Clone of mesa.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

pb_buffer_fenced.c 27KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /**************************************************************************
  2. *
  3. * Copyright 2007-2010 VMware, Inc.
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21. * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
  22. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /**
  28. * \file
  29. * Implementation of fenced buffers.
  30. *
  31. * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
  32. * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
  33. */
  34. #include "pipe/p_config.h"
  35. #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
  36. #include <unistd.h>
  37. #include <sched.h>
  38. #endif
  39. #include "pipe/p_compiler.h"
  40. #include "pipe/p_defines.h"
  41. #include "util/u_debug.h"
  42. #include "os/os_thread.h"
  43. #include "util/u_memory.h"
  44. #include "util/u_double_list.h"
  45. #include "pb_buffer.h"
  46. #include "pb_buffer_fenced.h"
  47. #include "pb_bufmgr.h"
  48. /**
  49. * Convenience macro (type safe).
  50. */
  51. #define SUPER(__derived) (&(__derived)->base)
  52. struct fenced_manager
  53. {
  54. struct pb_manager base;
  55. struct pb_manager *provider;
  56. struct pb_fence_ops *ops;
  57. /**
  58. * Maximum buffer size that can be safely allocated.
  59. */
  60. pb_size max_buffer_size;
  61. /**
  62. * Maximum cpu memory we can allocate before we start waiting for the
  63. * GPU to idle.
  64. */
  65. pb_size max_cpu_total_size;
  66. /**
  67. * Following members are mutable and protected by this mutex.
  68. */
  69. pipe_mutex mutex;
  70. /**
  71. * Fenced buffer list.
  72. *
  73. * All fenced buffers are placed in this listed, ordered from the oldest
  74. * fence to the newest fence.
  75. */
  76. struct list_head fenced;
  77. pb_size num_fenced;
  78. struct list_head unfenced;
  79. pb_size num_unfenced;
  80. /**
  81. * How much temporary CPU memory is being used to hold unvalidated buffers.
  82. */
  83. pb_size cpu_total_size;
  84. };
  85. /**
  86. * Fenced buffer.
  87. *
  88. * Wrapper around a pipe buffer which adds fencing and reference counting.
  89. */
  90. struct fenced_buffer
  91. {
  92. /*
  93. * Immutable members.
  94. */
  95. struct pb_buffer base;
  96. struct fenced_manager *mgr;
  97. /*
  98. * Following members are mutable and protected by fenced_manager::mutex.
  99. */
  100. struct list_head head;
  101. /**
  102. * Buffer with storage.
  103. */
  104. struct pb_buffer *buffer;
  105. pb_size size;
  106. struct pb_desc desc;
  107. /**
  108. * Temporary CPU storage data. Used when there isn't enough GPU memory to
  109. * store the buffer.
  110. */
  111. void *data;
  112. /**
  113. * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
  114. * buffer usage.
  115. */
  116. unsigned flags;
  117. unsigned mapcount;
  118. struct pb_validate *vl;
  119. unsigned validation_flags;
  120. struct pipe_fence_handle *fence;
  121. };
  122. static INLINE struct fenced_manager *
  123. fenced_manager(struct pb_manager *mgr)
  124. {
  125. assert(mgr);
  126. return (struct fenced_manager *)mgr;
  127. }
  128. static INLINE struct fenced_buffer *
  129. fenced_buffer(struct pb_buffer *buf)
  130. {
  131. assert(buf);
  132. return (struct fenced_buffer *)buf;
  133. }
  134. static void
  135. fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
  136. static enum pipe_error
  137. fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
  138. struct fenced_buffer *fenced_buf);
  139. static void
  140. fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
  141. static enum pipe_error
  142. fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
  143. struct fenced_buffer *fenced_buf,
  144. boolean wait);
  145. static enum pipe_error
  146. fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
  147. static enum pipe_error
  148. fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
  149. /**
  150. * Dump the fenced buffer list.
  151. *
  152. * Useful to understand failures to allocate buffers.
  153. */
  154. static void
  155. fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
  156. {
  157. #ifdef DEBUG
  158. struct pb_fence_ops *ops = fenced_mgr->ops;
  159. struct list_head *curr, *next;
  160. struct fenced_buffer *fenced_buf;
  161. debug_printf("%10s %7s %8s %7s %10s %s\n",
  162. "buffer", "size", "refcount", "storage", "fence", "signalled");
  163. curr = fenced_mgr->unfenced.next;
  164. next = curr->next;
  165. while(curr != &fenced_mgr->unfenced) {
  166. fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
  167. assert(!fenced_buf->fence);
  168. debug_printf("%10p %7u %8u %7s\n",
  169. (void *) fenced_buf,
  170. fenced_buf->base.base.width0,
  171. p_atomic_read(&fenced_buf->base.base.reference.count),
  172. fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
  173. curr = next;
  174. next = curr->next;
  175. }
  176. curr = fenced_mgr->fenced.next;
  177. next = curr->next;
  178. while(curr != &fenced_mgr->fenced) {
  179. int signaled;
  180. fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
  181. assert(fenced_buf->buffer);
  182. signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
  183. debug_printf("%10p %7u %8u %7s %10p %s\n",
  184. (void *) fenced_buf,
  185. fenced_buf->base.base.width0,
  186. p_atomic_read(&fenced_buf->base.base.reference.count),
  187. "gpu",
  188. (void *) fenced_buf->fence,
  189. signaled == 0 ? "y" : "n");
  190. curr = next;
  191. next = curr->next;
  192. }
  193. #else
  194. (void)fenced_mgr;
  195. #endif
  196. }
  197. static INLINE void
  198. fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
  199. struct fenced_buffer *fenced_buf)
  200. {
  201. assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
  202. assert(!fenced_buf->fence);
  203. assert(fenced_buf->head.prev);
  204. assert(fenced_buf->head.next);
  205. LIST_DEL(&fenced_buf->head);
  206. assert(fenced_mgr->num_unfenced);
  207. --fenced_mgr->num_unfenced;
  208. fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
  209. fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
  210. FREE(fenced_buf);
  211. }
  212. /**
  213. * Add the buffer to the fenced list.
  214. *
  215. * Reference count should be incremented before calling this function.
  216. */
  217. static INLINE void
  218. fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
  219. struct fenced_buffer *fenced_buf)
  220. {
  221. assert(pipe_is_referenced(&fenced_buf->base.base.reference));
  222. assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
  223. assert(fenced_buf->fence);
  224. p_atomic_inc(&fenced_buf->base.base.reference.count);
  225. LIST_DEL(&fenced_buf->head);
  226. assert(fenced_mgr->num_unfenced);
  227. --fenced_mgr->num_unfenced;
  228. LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
  229. ++fenced_mgr->num_fenced;
  230. }
  231. /**
  232. * Remove the buffer from the fenced list, and potentially destroy the buffer
  233. * if the reference count reaches zero.
  234. *
  235. * Returns TRUE if the buffer was detroyed.
  236. */
  237. static INLINE boolean
  238. fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
  239. struct fenced_buffer *fenced_buf)
  240. {
  241. struct pb_fence_ops *ops = fenced_mgr->ops;
  242. assert(fenced_buf->fence);
  243. assert(fenced_buf->mgr == fenced_mgr);
  244. ops->fence_reference(ops, &fenced_buf->fence, NULL);
  245. fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
  246. assert(fenced_buf->head.prev);
  247. assert(fenced_buf->head.next);
  248. LIST_DEL(&fenced_buf->head);
  249. assert(fenced_mgr->num_fenced);
  250. --fenced_mgr->num_fenced;
  251. LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
  252. ++fenced_mgr->num_unfenced;
  253. if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
  254. fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
  255. return TRUE;
  256. }
  257. return FALSE;
  258. }
  259. /**
  260. * Wait for the fence to expire, and remove it from the fenced list.
  261. *
  262. * This function will release and re-aquire the mutex, so any copy of mutable
  263. * state must be discarded after calling it.
  264. */
  265. static INLINE enum pipe_error
  266. fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
  267. struct fenced_buffer *fenced_buf)
  268. {
  269. struct pb_fence_ops *ops = fenced_mgr->ops;
  270. enum pipe_error ret = PIPE_ERROR;
  271. #if 0
  272. debug_warning("waiting for GPU");
  273. #endif
  274. assert(pipe_is_referenced(&fenced_buf->base.base.reference));
  275. assert(fenced_buf->fence);
  276. if(fenced_buf->fence) {
  277. struct pipe_fence_handle *fence = NULL;
  278. int finished;
  279. boolean proceed;
  280. ops->fence_reference(ops, &fence, fenced_buf->fence);
  281. pipe_mutex_unlock(fenced_mgr->mutex);
  282. finished = ops->fence_finish(ops, fenced_buf->fence, 0);
  283. pipe_mutex_lock(fenced_mgr->mutex);
  284. assert(pipe_is_referenced(&fenced_buf->base.base.reference));
  285. /*
  286. * Only proceed if the fence object didn't change in the meanwhile.
  287. * Otherwise assume the work has been already carried out by another
  288. * thread that re-aquired the lock before us.
  289. */
  290. proceed = fence == fenced_buf->fence ? TRUE : FALSE;
  291. ops->fence_reference(ops, &fence, NULL);
  292. if(proceed && finished == 0) {
  293. /*
  294. * Remove from the fenced list
  295. */
  296. boolean destroyed;
  297. destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
  298. /* TODO: remove consequents buffers with the same fence? */
  299. assert(!destroyed);
  300. fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
  301. ret = PIPE_OK;
  302. }
  303. }
  304. return ret;
  305. }
  306. /**
  307. * Remove as many fenced buffers from the fenced list as possible.
  308. *
  309. * Returns TRUE if at least one buffer was removed.
  310. */
  311. static boolean
  312. fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
  313. boolean wait)
  314. {
  315. struct pb_fence_ops *ops = fenced_mgr->ops;
  316. struct list_head *curr, *next;
  317. struct fenced_buffer *fenced_buf;
  318. struct pipe_fence_handle *prev_fence = NULL;
  319. boolean ret = FALSE;
  320. curr = fenced_mgr->fenced.next;
  321. next = curr->next;
  322. while(curr != &fenced_mgr->fenced) {
  323. fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
  324. if(fenced_buf->fence != prev_fence) {
  325. int signaled;
  326. if (wait) {
  327. signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
  328. /*
  329. * Don't return just now. Instead preemptively check if the
  330. * following buffers' fences already expired, without further waits.
  331. */
  332. wait = FALSE;
  333. }
  334. else {
  335. signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
  336. }
  337. if (signaled != 0) {
  338. return ret;
  339. }
  340. prev_fence = fenced_buf->fence;
  341. }
  342. else {
  343. /* This buffer's fence object is identical to the previous buffer's
  344. * fence object, so no need to check the fence again.
  345. */
  346. assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
  347. }
  348. fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
  349. ret = TRUE;
  350. curr = next;
  351. next = curr->next;
  352. }
  353. return ret;
  354. }
  355. /**
  356. * Try to free some GPU memory by backing it up into CPU memory.
  357. *
  358. * Returns TRUE if at least one buffer was freed.
  359. */
  360. static boolean
  361. fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
  362. {
  363. struct list_head *curr, *next;
  364. struct fenced_buffer *fenced_buf;
  365. curr = fenced_mgr->unfenced.next;
  366. next = curr->next;
  367. while(curr != &fenced_mgr->unfenced) {
  368. fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
  369. /*
  370. * We can only move storage if the buffer is not mapped and not
  371. * validated.
  372. */
  373. if(fenced_buf->buffer &&
  374. !fenced_buf->mapcount &&
  375. !fenced_buf->vl) {
  376. enum pipe_error ret;
  377. ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
  378. if(ret == PIPE_OK) {
  379. ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
  380. if(ret == PIPE_OK) {
  381. fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
  382. return TRUE;
  383. }
  384. fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
  385. }
  386. }
  387. curr = next;
  388. next = curr->next;
  389. }
  390. return FALSE;
  391. }
  392. /**
  393. * Destroy CPU storage for this buffer.
  394. */
  395. static void
  396. fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
  397. {
  398. if(fenced_buf->data) {
  399. align_free(fenced_buf->data);
  400. fenced_buf->data = NULL;
  401. assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
  402. fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
  403. }
  404. }
  405. /**
  406. * Create CPU storage for this buffer.
  407. */
  408. static enum pipe_error
  409. fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
  410. struct fenced_buffer *fenced_buf)
  411. {
  412. assert(!fenced_buf->data);
  413. if(fenced_buf->data)
  414. return PIPE_OK;
  415. if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
  416. return PIPE_ERROR_OUT_OF_MEMORY;
  417. fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
  418. if(!fenced_buf->data)
  419. return PIPE_ERROR_OUT_OF_MEMORY;
  420. fenced_mgr->cpu_total_size += fenced_buf->size;
  421. return PIPE_OK;
  422. }
  423. /**
  424. * Destroy the GPU storage.
  425. */
  426. static void
  427. fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
  428. {
  429. if(fenced_buf->buffer) {
  430. pb_reference(&fenced_buf->buffer, NULL);
  431. }
  432. }
  433. /**
  434. * Try to create GPU storage for this buffer.
  435. *
  436. * This function is a shorthand around pb_manager::create_buffer for
  437. * fenced_buffer_create_gpu_storage_locked()'s benefit.
  438. */
  439. static INLINE boolean
  440. fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
  441. struct fenced_buffer *fenced_buf)
  442. {
  443. struct pb_manager *provider = fenced_mgr->provider;
  444. assert(!fenced_buf->buffer);
  445. fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
  446. fenced_buf->size,
  447. &fenced_buf->desc);
  448. return fenced_buf->buffer ? TRUE : FALSE;
  449. }
  450. /**
  451. * Create GPU storage for this buffer.
  452. */
  453. static enum pipe_error
  454. fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
  455. struct fenced_buffer *fenced_buf,
  456. boolean wait)
  457. {
  458. assert(!fenced_buf->buffer);
  459. /*
  460. * Check for signaled buffers before trying to allocate.
  461. */
  462. fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
  463. fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
  464. /*
  465. * Keep trying while there is some sort of progress:
  466. * - fences are expiring,
  467. * - or buffers are being being swapped out from GPU memory into CPU memory.
  468. */
  469. while(!fenced_buf->buffer &&
  470. (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
  471. fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
  472. fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
  473. }
  474. if(!fenced_buf->buffer && wait) {
  475. /*
  476. * Same as before, but this time around, wait to free buffers if
  477. * necessary.
  478. */
  479. while(!fenced_buf->buffer &&
  480. (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
  481. fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
  482. fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
  483. }
  484. }
  485. if(!fenced_buf->buffer) {
  486. if(0)
  487. fenced_manager_dump_locked(fenced_mgr);
  488. /* give up */
  489. return PIPE_ERROR_OUT_OF_MEMORY;
  490. }
  491. return PIPE_OK;
  492. }
  493. static enum pipe_error
  494. fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
  495. {
  496. uint8_t *map;
  497. assert(fenced_buf->data);
  498. assert(fenced_buf->buffer);
  499. map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
  500. if(!map)
  501. return PIPE_ERROR;
  502. memcpy(map, fenced_buf->data, fenced_buf->size);
  503. pb_unmap(fenced_buf->buffer);
  504. return PIPE_OK;
  505. }
  506. static enum pipe_error
  507. fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
  508. {
  509. const uint8_t *map;
  510. assert(fenced_buf->data);
  511. assert(fenced_buf->buffer);
  512. map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
  513. if(!map)
  514. return PIPE_ERROR;
  515. memcpy(fenced_buf->data, map, fenced_buf->size);
  516. pb_unmap(fenced_buf->buffer);
  517. return PIPE_OK;
  518. }
  519. static void
  520. fenced_buffer_destroy(struct pb_buffer *buf)
  521. {
  522. struct fenced_buffer *fenced_buf = fenced_buffer(buf);
  523. struct fenced_manager *fenced_mgr = fenced_buf->mgr;
  524. assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
  525. pipe_mutex_lock(fenced_mgr->mutex);
  526. fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
  527. pipe_mutex_unlock(fenced_mgr->mutex);
  528. }
  529. static void *
  530. fenced_buffer_map(struct pb_buffer *buf,
  531. unsigned flags)
  532. {
  533. struct fenced_buffer *fenced_buf = fenced_buffer(buf);
  534. struct fenced_manager *fenced_mgr = fenced_buf->mgr;
  535. struct pb_fence_ops *ops = fenced_mgr->ops;
  536. void *map = NULL;
  537. pipe_mutex_lock(fenced_mgr->mutex);
  538. assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
  539. /*
  540. * Serialize writes.
  541. */
  542. while((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
  543. ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) &&
  544. (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
  545. /*
  546. * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
  547. */
  548. if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
  549. ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
  550. goto done;
  551. }
  552. if (flags & PIPE_BUFFER_USAGE_UNSYNCHRONIZED) {
  553. break;
  554. }
  555. /*
  556. * Wait for the GPU to finish accessing. This will release and re-acquire
  557. * the mutex, so all copies of mutable state must be discarded.
  558. */
  559. fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
  560. }
  561. if(fenced_buf->buffer) {
  562. map = pb_map(fenced_buf->buffer, flags);
  563. }
  564. else {
  565. assert(fenced_buf->data);
  566. map = fenced_buf->data;
  567. }
  568. if(map) {
  569. ++fenced_buf->mapcount;
  570. fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
  571. }
  572. done:
  573. pipe_mutex_unlock(fenced_mgr->mutex);
  574. return map;
  575. }
  576. static void
  577. fenced_buffer_unmap(struct pb_buffer *buf)
  578. {
  579. struct fenced_buffer *fenced_buf = fenced_buffer(buf);
  580. struct fenced_manager *fenced_mgr = fenced_buf->mgr;
  581. pipe_mutex_lock(fenced_mgr->mutex);
  582. assert(fenced_buf->mapcount);
  583. if(fenced_buf->mapcount) {
  584. if (fenced_buf->buffer)
  585. pb_unmap(fenced_buf->buffer);
  586. --fenced_buf->mapcount;
  587. if(!fenced_buf->mapcount)
  588. fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
  589. }
  590. pipe_mutex_unlock(fenced_mgr->mutex);
  591. }
  592. static enum pipe_error
  593. fenced_buffer_validate(struct pb_buffer *buf,
  594. struct pb_validate *vl,
  595. unsigned flags)
  596. {
  597. struct fenced_buffer *fenced_buf = fenced_buffer(buf);
  598. struct fenced_manager *fenced_mgr = fenced_buf->mgr;
  599. enum pipe_error ret;
  600. pipe_mutex_lock(fenced_mgr->mutex);
  601. if(!vl) {
  602. /* invalidate */
  603. fenced_buf->vl = NULL;
  604. fenced_buf->validation_flags = 0;
  605. ret = PIPE_OK;
  606. goto done;
  607. }
  608. assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
  609. assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
  610. flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
  611. /* Buffer cannot be validated in two different lists */
  612. if(fenced_buf->vl && fenced_buf->vl != vl) {
  613. ret = PIPE_ERROR_RETRY;
  614. goto done;
  615. }
  616. if(fenced_buf->vl == vl &&
  617. (fenced_buf->validation_flags & flags) == flags) {
  618. /* Nothing to do -- buffer already validated */
  619. ret = PIPE_OK;
  620. goto done;
  621. }
  622. /*
  623. * Create and update GPU storage.
  624. */
  625. if(!fenced_buf->buffer) {
  626. assert(!fenced_buf->mapcount);
  627. ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
  628. if(ret != PIPE_OK) {
  629. goto done;
  630. }
  631. ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
  632. if(ret != PIPE_OK) {
  633. fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
  634. goto done;
  635. }
  636. if(fenced_buf->mapcount) {
  637. debug_printf("warning: validating a buffer while it is still mapped\n");
  638. }
  639. else {
  640. fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
  641. }
  642. }
  643. ret = pb_validate(fenced_buf->buffer, vl, flags);
  644. if (ret != PIPE_OK)
  645. goto done;
  646. fenced_buf->vl = vl;
  647. fenced_buf->validation_flags |= flags;
  648. done:
  649. pipe_mutex_unlock(fenced_mgr->mutex);
  650. return ret;
  651. }
  652. static void
  653. fenced_buffer_fence(struct pb_buffer *buf,
  654. struct pipe_fence_handle *fence)
  655. {
  656. struct fenced_buffer *fenced_buf = fenced_buffer(buf);
  657. struct fenced_manager *fenced_mgr = fenced_buf->mgr;
  658. struct pb_fence_ops *ops = fenced_mgr->ops;
  659. pipe_mutex_lock(fenced_mgr->mutex);
  660. assert(pipe_is_referenced(&fenced_buf->base.base.reference));
  661. assert(fenced_buf->buffer);
  662. if(fence != fenced_buf->fence) {
  663. assert(fenced_buf->vl);
  664. assert(fenced_buf->validation_flags);
  665. if (fenced_buf->fence) {
  666. boolean destroyed;
  667. destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
  668. assert(!destroyed);
  669. }
  670. if (fence) {
  671. ops->fence_reference(ops, &fenced_buf->fence, fence);
  672. fenced_buf->flags |= fenced_buf->validation_flags;
  673. fenced_buffer_add_locked(fenced_mgr, fenced_buf);
  674. }
  675. pb_fence(fenced_buf->buffer, fence);
  676. fenced_buf->vl = NULL;
  677. fenced_buf->validation_flags = 0;
  678. }
  679. pipe_mutex_unlock(fenced_mgr->mutex);
  680. }
  681. static void
  682. fenced_buffer_get_base_buffer(struct pb_buffer *buf,
  683. struct pb_buffer **base_buf,
  684. pb_size *offset)
  685. {
  686. struct fenced_buffer *fenced_buf = fenced_buffer(buf);
  687. struct fenced_manager *fenced_mgr = fenced_buf->mgr;
  688. pipe_mutex_lock(fenced_mgr->mutex);
  689. /*
  690. * This should only be called when the buffer is validated. Typically
  691. * when processing relocations.
  692. */
  693. assert(fenced_buf->vl);
  694. assert(fenced_buf->buffer);
  695. if(fenced_buf->buffer)
  696. pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
  697. else {
  698. *base_buf = buf;
  699. *offset = 0;
  700. }
  701. pipe_mutex_unlock(fenced_mgr->mutex);
  702. }
  703. static const struct pb_vtbl
  704. fenced_buffer_vtbl = {
  705. fenced_buffer_destroy,
  706. fenced_buffer_map,
  707. fenced_buffer_unmap,
  708. fenced_buffer_validate,
  709. fenced_buffer_fence,
  710. fenced_buffer_get_base_buffer
  711. };
  712. /**
  713. * Wrap a buffer in a fenced buffer.
  714. */
  715. static struct pb_buffer *
  716. fenced_bufmgr_create_buffer(struct pb_manager *mgr,
  717. pb_size size,
  718. const struct pb_desc *desc)
  719. {
  720. struct fenced_manager *fenced_mgr = fenced_manager(mgr);
  721. struct fenced_buffer *fenced_buf;
  722. enum pipe_error ret;
  723. /*
  724. * Don't stall the GPU, waste time evicting buffers, or waste memory
  725. * trying to create a buffer that will most likely never fit into the
  726. * graphics aperture.
  727. */
  728. if(size > fenced_mgr->max_buffer_size) {
  729. goto no_buffer;
  730. }
  731. fenced_buf = CALLOC_STRUCT(fenced_buffer);
  732. if(!fenced_buf)
  733. goto no_buffer;
  734. pipe_reference_init(&fenced_buf->base.base.reference, 1);
  735. fenced_buf->base.base.alignment = desc->alignment;
  736. fenced_buf->base.base.usage = desc->usage;
  737. fenced_buf->base.base.size = size;
  738. fenced_buf->size = size;
  739. fenced_buf->desc = *desc;
  740. fenced_buf->base.vtbl = &fenced_buffer_vtbl;
  741. fenced_buf->mgr = fenced_mgr;
  742. pipe_mutex_lock(fenced_mgr->mutex);
  743. /*
  744. * Try to create GPU storage without stalling,
  745. */
  746. ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
  747. /*
  748. * Attempt to use CPU memory to avoid stalling the GPU.
  749. */
  750. if(ret != PIPE_OK) {
  751. ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
  752. }
  753. /*
  754. * Create GPU storage, waiting for some to be available.
  755. */
  756. if(ret != PIPE_OK) {
  757. ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
  758. }
  759. /*
  760. * Give up.
  761. */
  762. if(ret != PIPE_OK) {
  763. goto no_storage;
  764. }
  765. assert(fenced_buf->buffer || fenced_buf->data);
  766. LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
  767. ++fenced_mgr->num_unfenced;
  768. pipe_mutex_unlock(fenced_mgr->mutex);
  769. return &fenced_buf->base;
  770. no_storage:
  771. pipe_mutex_unlock(fenced_mgr->mutex);
  772. FREE(fenced_buf);
  773. no_buffer:
  774. return NULL;
  775. }
  776. static void
  777. fenced_bufmgr_flush(struct pb_manager *mgr)
  778. {
  779. struct fenced_manager *fenced_mgr = fenced_manager(mgr);
  780. pipe_mutex_lock(fenced_mgr->mutex);
  781. while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
  782. ;
  783. pipe_mutex_unlock(fenced_mgr->mutex);
  784. assert(fenced_mgr->provider->flush);
  785. if(fenced_mgr->provider->flush)
  786. fenced_mgr->provider->flush(fenced_mgr->provider);
  787. }
  788. static void
  789. fenced_bufmgr_destroy(struct pb_manager *mgr)
  790. {
  791. struct fenced_manager *fenced_mgr = fenced_manager(mgr);
  792. pipe_mutex_lock(fenced_mgr->mutex);
  793. /* Wait on outstanding fences */
  794. while (fenced_mgr->num_fenced) {
  795. pipe_mutex_unlock(fenced_mgr->mutex);
  796. #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
  797. sched_yield();
  798. #endif
  799. pipe_mutex_lock(fenced_mgr->mutex);
  800. while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
  801. ;
  802. }
  803. #ifdef DEBUG
  804. /*assert(!fenced_mgr->num_unfenced);*/
  805. #endif
  806. pipe_mutex_unlock(fenced_mgr->mutex);
  807. pipe_mutex_destroy(fenced_mgr->mutex);
  808. if(fenced_mgr->provider)
  809. fenced_mgr->provider->destroy(fenced_mgr->provider);
  810. fenced_mgr->ops->destroy(fenced_mgr->ops);
  811. FREE(fenced_mgr);
  812. }
  813. struct pb_manager *
  814. fenced_bufmgr_create(struct pb_manager *provider,
  815. struct pb_fence_ops *ops,
  816. pb_size max_buffer_size,
  817. pb_size max_cpu_total_size)
  818. {
  819. struct fenced_manager *fenced_mgr;
  820. if(!provider)
  821. return NULL;
  822. fenced_mgr = CALLOC_STRUCT(fenced_manager);
  823. if (!fenced_mgr)
  824. return NULL;
  825. fenced_mgr->base.destroy = fenced_bufmgr_destroy;
  826. fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
  827. fenced_mgr->base.flush = fenced_bufmgr_flush;
  828. fenced_mgr->provider = provider;
  829. fenced_mgr->ops = ops;
  830. fenced_mgr->max_buffer_size = max_buffer_size;
  831. fenced_mgr->max_cpu_total_size = max_cpu_total_size;
  832. LIST_INITHEAD(&fenced_mgr->fenced);
  833. fenced_mgr->num_fenced = 0;
  834. LIST_INITHEAD(&fenced_mgr->unfenced);
  835. fenced_mgr->num_unfenced = 0;
  836. pipe_mutex_init(fenced_mgr->mutex);
  837. return &fenced_mgr->base;
  838. }