1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069 |
- /**************************************************************************
- *
- * Copyright 2007-2010 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
- /**
- * \file
- * Implementation of fenced buffers.
- *
- * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
- * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
- */
-
-
- #include "pipe/p_config.h"
-
- #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
- #include <unistd.h>
- #include <sched.h>
- #endif
-
- #include "pipe/p_compiler.h"
- #include "pipe/p_defines.h"
- #include "util/u_debug.h"
- #include "os/os_thread.h"
- #include "util/u_memory.h"
- #include "util/u_double_list.h"
-
- #include "pb_buffer.h"
- #include "pb_buffer_fenced.h"
- #include "pb_bufmgr.h"
-
-
-
- /**
- * Convenience macro (type safe).
- */
- #define SUPER(__derived) (&(__derived)->base)
-
-
- struct fenced_manager
- {
- struct pb_manager base;
- struct pb_manager *provider;
- struct pb_fence_ops *ops;
-
- /**
- * Maximum buffer size that can be safely allocated.
- */
- pb_size max_buffer_size;
-
- /**
- * Maximum cpu memory we can allocate before we start waiting for the
- * GPU to idle.
- */
- pb_size max_cpu_total_size;
-
- /**
- * Following members are mutable and protected by this mutex.
- */
- pipe_mutex mutex;
-
- /**
- * Fenced buffer list.
- *
- * All fenced buffers are placed in this listed, ordered from the oldest
- * fence to the newest fence.
- */
- struct list_head fenced;
- pb_size num_fenced;
-
- struct list_head unfenced;
- pb_size num_unfenced;
-
- /**
- * How much temporary CPU memory is being used to hold unvalidated buffers.
- */
- pb_size cpu_total_size;
- };
-
-
- /**
- * Fenced buffer.
- *
- * Wrapper around a pipe buffer which adds fencing and reference counting.
- */
- struct fenced_buffer
- {
- /*
- * Immutable members.
- */
-
- struct pb_buffer base;
- struct fenced_manager *mgr;
-
- /*
- * Following members are mutable and protected by fenced_manager::mutex.
- */
-
- struct list_head head;
-
- /**
- * Buffer with storage.
- */
- struct pb_buffer *buffer;
- pb_size size;
- struct pb_desc desc;
-
- /**
- * Temporary CPU storage data. Used when there isn't enough GPU memory to
- * store the buffer.
- */
- void *data;
-
- /**
- * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
- * buffer usage.
- */
- unsigned flags;
-
- unsigned mapcount;
-
- struct pb_validate *vl;
- unsigned validation_flags;
-
- struct pipe_fence_handle *fence;
- };
-
-
- static INLINE struct fenced_manager *
- fenced_manager(struct pb_manager *mgr)
- {
- assert(mgr);
- return (struct fenced_manager *)mgr;
- }
-
-
- static INLINE struct fenced_buffer *
- fenced_buffer(struct pb_buffer *buf)
- {
- assert(buf);
- return (struct fenced_buffer *)buf;
- }
-
-
- static void
- fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
-
- static enum pipe_error
- fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf);
-
- static void
- fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
-
- static enum pipe_error
- fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf,
- boolean wait);
-
- static enum pipe_error
- fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
-
- static enum pipe_error
- fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
-
-
- /**
- * Dump the fenced buffer list.
- *
- * Useful to understand failures to allocate buffers.
- */
- static void
- fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
- {
- #ifdef DEBUG
- struct pb_fence_ops *ops = fenced_mgr->ops;
- struct list_head *curr, *next;
- struct fenced_buffer *fenced_buf;
-
- debug_printf("%10s %7s %8s %7s %10s %s\n",
- "buffer", "size", "refcount", "storage", "fence", "signalled");
-
- curr = fenced_mgr->unfenced.next;
- next = curr->next;
- while(curr != &fenced_mgr->unfenced) {
- fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
- assert(!fenced_buf->fence);
- debug_printf("%10p %7u %8u %7s\n",
- (void *) fenced_buf,
- fenced_buf->base.base.width0,
- p_atomic_read(&fenced_buf->base.base.reference.count),
- fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
- curr = next;
- next = curr->next;
- }
-
- curr = fenced_mgr->fenced.next;
- next = curr->next;
- while(curr != &fenced_mgr->fenced) {
- int signaled;
- fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
- assert(fenced_buf->buffer);
- signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
- debug_printf("%10p %7u %8u %7s %10p %s\n",
- (void *) fenced_buf,
- fenced_buf->base.base.width0,
- p_atomic_read(&fenced_buf->base.base.reference.count),
- "gpu",
- (void *) fenced_buf->fence,
- signaled == 0 ? "y" : "n");
- curr = next;
- next = curr->next;
- }
- #else
- (void)fenced_mgr;
- #endif
- }
-
-
- static INLINE void
- fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf)
- {
- assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
-
- assert(!fenced_buf->fence);
- assert(fenced_buf->head.prev);
- assert(fenced_buf->head.next);
- LIST_DEL(&fenced_buf->head);
- assert(fenced_mgr->num_unfenced);
- --fenced_mgr->num_unfenced;
-
- fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
- fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
-
- FREE(fenced_buf);
- }
-
-
- /**
- * Add the buffer to the fenced list.
- *
- * Reference count should be incremented before calling this function.
- */
- static INLINE void
- fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf)
- {
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
- assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
- assert(fenced_buf->fence);
-
- p_atomic_inc(&fenced_buf->base.base.reference.count);
-
- LIST_DEL(&fenced_buf->head);
- assert(fenced_mgr->num_unfenced);
- --fenced_mgr->num_unfenced;
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
- ++fenced_mgr->num_fenced;
- }
-
-
- /**
- * Remove the buffer from the fenced list, and potentially destroy the buffer
- * if the reference count reaches zero.
- *
- * Returns TRUE if the buffer was detroyed.
- */
- static INLINE boolean
- fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf)
- {
- struct pb_fence_ops *ops = fenced_mgr->ops;
-
- assert(fenced_buf->fence);
- assert(fenced_buf->mgr == fenced_mgr);
-
- ops->fence_reference(ops, &fenced_buf->fence, NULL);
- fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
-
- assert(fenced_buf->head.prev);
- assert(fenced_buf->head.next);
-
- LIST_DEL(&fenced_buf->head);
- assert(fenced_mgr->num_fenced);
- --fenced_mgr->num_fenced;
-
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
- ++fenced_mgr->num_unfenced;
-
- if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
- fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
- return TRUE;
- }
-
- return FALSE;
- }
-
-
- /**
- * Wait for the fence to expire, and remove it from the fenced list.
- *
- * This function will release and re-aquire the mutex, so any copy of mutable
- * state must be discarded after calling it.
- */
- static INLINE enum pipe_error
- fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf)
- {
- struct pb_fence_ops *ops = fenced_mgr->ops;
- enum pipe_error ret = PIPE_ERROR;
-
- #if 0
- debug_warning("waiting for GPU");
- #endif
-
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
- assert(fenced_buf->fence);
-
- if(fenced_buf->fence) {
- struct pipe_fence_handle *fence = NULL;
- int finished;
- boolean proceed;
-
- ops->fence_reference(ops, &fence, fenced_buf->fence);
-
- pipe_mutex_unlock(fenced_mgr->mutex);
-
- finished = ops->fence_finish(ops, fenced_buf->fence, 0);
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
-
- /*
- * Only proceed if the fence object didn't change in the meanwhile.
- * Otherwise assume the work has been already carried out by another
- * thread that re-aquired the lock before us.
- */
- proceed = fence == fenced_buf->fence ? TRUE : FALSE;
-
- ops->fence_reference(ops, &fence, NULL);
-
- if(proceed && finished == 0) {
- /*
- * Remove from the fenced list
- */
-
- boolean destroyed;
-
- destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
-
- /* TODO: remove consequents buffers with the same fence? */
-
- assert(!destroyed);
-
- fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
-
- ret = PIPE_OK;
- }
- }
-
- return ret;
- }
-
-
- /**
- * Remove as many fenced buffers from the fenced list as possible.
- *
- * Returns TRUE if at least one buffer was removed.
- */
- static boolean
- fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
- boolean wait)
- {
- struct pb_fence_ops *ops = fenced_mgr->ops;
- struct list_head *curr, *next;
- struct fenced_buffer *fenced_buf;
- struct pipe_fence_handle *prev_fence = NULL;
- boolean ret = FALSE;
-
- curr = fenced_mgr->fenced.next;
- next = curr->next;
- while(curr != &fenced_mgr->fenced) {
- fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
-
- if(fenced_buf->fence != prev_fence) {
- int signaled;
-
- if (wait) {
- signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
-
- /*
- * Don't return just now. Instead preemptively check if the
- * following buffers' fences already expired, without further waits.
- */
- wait = FALSE;
- }
- else {
- signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
- }
-
- if (signaled != 0) {
- return ret;
- }
-
- prev_fence = fenced_buf->fence;
- }
- else {
- /* This buffer's fence object is identical to the previous buffer's
- * fence object, so no need to check the fence again.
- */
- assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
- }
-
- fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
-
- ret = TRUE;
-
- curr = next;
- next = curr->next;
- }
-
- return ret;
- }
-
-
- /**
- * Try to free some GPU memory by backing it up into CPU memory.
- *
- * Returns TRUE if at least one buffer was freed.
- */
- static boolean
- fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
- {
- struct list_head *curr, *next;
- struct fenced_buffer *fenced_buf;
-
- curr = fenced_mgr->unfenced.next;
- next = curr->next;
- while(curr != &fenced_mgr->unfenced) {
- fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
-
- /*
- * We can only move storage if the buffer is not mapped and not
- * validated.
- */
- if(fenced_buf->buffer &&
- !fenced_buf->mapcount &&
- !fenced_buf->vl) {
- enum pipe_error ret;
-
- ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
- if(ret == PIPE_OK) {
- ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
- if(ret == PIPE_OK) {
- fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
- return TRUE;
- }
- fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
- }
- }
-
- curr = next;
- next = curr->next;
- }
-
- return FALSE;
- }
-
-
- /**
- * Destroy CPU storage for this buffer.
- */
- static void
- fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
- {
- if(fenced_buf->data) {
- align_free(fenced_buf->data);
- fenced_buf->data = NULL;
- assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
- fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
- }
- }
-
-
- /**
- * Create CPU storage for this buffer.
- */
- static enum pipe_error
- fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf)
- {
- assert(!fenced_buf->data);
- if(fenced_buf->data)
- return PIPE_OK;
-
- if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
- return PIPE_ERROR_OUT_OF_MEMORY;
-
- fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
- if(!fenced_buf->data)
- return PIPE_ERROR_OUT_OF_MEMORY;
-
- fenced_mgr->cpu_total_size += fenced_buf->size;
-
- return PIPE_OK;
- }
-
-
- /**
- * Destroy the GPU storage.
- */
- static void
- fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
- {
- if(fenced_buf->buffer) {
- pb_reference(&fenced_buf->buffer, NULL);
- }
- }
-
-
- /**
- * Try to create GPU storage for this buffer.
- *
- * This function is a shorthand around pb_manager::create_buffer for
- * fenced_buffer_create_gpu_storage_locked()'s benefit.
- */
- static INLINE boolean
- fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf)
- {
- struct pb_manager *provider = fenced_mgr->provider;
-
- assert(!fenced_buf->buffer);
-
- fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
- fenced_buf->size,
- &fenced_buf->desc);
- return fenced_buf->buffer ? TRUE : FALSE;
- }
-
-
- /**
- * Create GPU storage for this buffer.
- */
- static enum pipe_error
- fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
- struct fenced_buffer *fenced_buf,
- boolean wait)
- {
- assert(!fenced_buf->buffer);
-
- /*
- * Check for signaled buffers before trying to allocate.
- */
- fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
-
- fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
-
- /*
- * Keep trying while there is some sort of progress:
- * - fences are expiring,
- * - or buffers are being being swapped out from GPU memory into CPU memory.
- */
- while(!fenced_buf->buffer &&
- (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
- fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
- fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
- }
-
- if(!fenced_buf->buffer && wait) {
- /*
- * Same as before, but this time around, wait to free buffers if
- * necessary.
- */
- while(!fenced_buf->buffer &&
- (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
- fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
- fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
- }
- }
-
- if(!fenced_buf->buffer) {
- if(0)
- fenced_manager_dump_locked(fenced_mgr);
-
- /* give up */
- return PIPE_ERROR_OUT_OF_MEMORY;
- }
-
- return PIPE_OK;
- }
-
-
- static enum pipe_error
- fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
- {
- uint8_t *map;
-
- assert(fenced_buf->data);
- assert(fenced_buf->buffer);
-
- map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
- if(!map)
- return PIPE_ERROR;
-
- memcpy(map, fenced_buf->data, fenced_buf->size);
-
- pb_unmap(fenced_buf->buffer);
-
- return PIPE_OK;
- }
-
-
- static enum pipe_error
- fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
- {
- const uint8_t *map;
-
- assert(fenced_buf->data);
- assert(fenced_buf->buffer);
-
- map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
- if(!map)
- return PIPE_ERROR;
-
- memcpy(fenced_buf->data, map, fenced_buf->size);
-
- pb_unmap(fenced_buf->buffer);
-
- return PIPE_OK;
- }
-
-
- static void
- fenced_buffer_destroy(struct pb_buffer *buf)
- {
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- struct fenced_manager *fenced_mgr = fenced_buf->mgr;
-
- assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
-
- pipe_mutex_unlock(fenced_mgr->mutex);
- }
-
-
- static void *
- fenced_buffer_map(struct pb_buffer *buf,
- unsigned flags)
- {
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- struct pb_fence_ops *ops = fenced_mgr->ops;
- void *map = NULL;
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
-
- /*
- * Serialize writes.
- */
- while((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
- ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) &&
- (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
-
- /*
- * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
- */
- if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
- ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
- goto done;
- }
-
- if (flags & PIPE_BUFFER_USAGE_UNSYNCHRONIZED) {
- break;
- }
-
- /*
- * Wait for the GPU to finish accessing. This will release and re-acquire
- * the mutex, so all copies of mutable state must be discarded.
- */
- fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
- }
-
- if(fenced_buf->buffer) {
- map = pb_map(fenced_buf->buffer, flags);
- }
- else {
- assert(fenced_buf->data);
- map = fenced_buf->data;
- }
-
- if(map) {
- ++fenced_buf->mapcount;
- fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
- }
-
- done:
- pipe_mutex_unlock(fenced_mgr->mutex);
-
- return map;
- }
-
-
- static void
- fenced_buffer_unmap(struct pb_buffer *buf)
- {
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- struct fenced_manager *fenced_mgr = fenced_buf->mgr;
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- assert(fenced_buf->mapcount);
- if(fenced_buf->mapcount) {
- if (fenced_buf->buffer)
- pb_unmap(fenced_buf->buffer);
- --fenced_buf->mapcount;
- if(!fenced_buf->mapcount)
- fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
- }
-
- pipe_mutex_unlock(fenced_mgr->mutex);
- }
-
-
- static enum pipe_error
- fenced_buffer_validate(struct pb_buffer *buf,
- struct pb_validate *vl,
- unsigned flags)
- {
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- enum pipe_error ret;
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- if(!vl) {
- /* invalidate */
- fenced_buf->vl = NULL;
- fenced_buf->validation_flags = 0;
- ret = PIPE_OK;
- goto done;
- }
-
- assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
- assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
- flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
-
- /* Buffer cannot be validated in two different lists */
- if(fenced_buf->vl && fenced_buf->vl != vl) {
- ret = PIPE_ERROR_RETRY;
- goto done;
- }
-
- if(fenced_buf->vl == vl &&
- (fenced_buf->validation_flags & flags) == flags) {
- /* Nothing to do -- buffer already validated */
- ret = PIPE_OK;
- goto done;
- }
-
- /*
- * Create and update GPU storage.
- */
- if(!fenced_buf->buffer) {
- assert(!fenced_buf->mapcount);
-
- ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
- if(ret != PIPE_OK) {
- goto done;
- }
-
- ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
- if(ret != PIPE_OK) {
- fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
- goto done;
- }
-
- if(fenced_buf->mapcount) {
- debug_printf("warning: validating a buffer while it is still mapped\n");
- }
- else {
- fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
- }
- }
-
- ret = pb_validate(fenced_buf->buffer, vl, flags);
- if (ret != PIPE_OK)
- goto done;
-
- fenced_buf->vl = vl;
- fenced_buf->validation_flags |= flags;
-
- done:
- pipe_mutex_unlock(fenced_mgr->mutex);
-
- return ret;
- }
-
-
- static void
- fenced_buffer_fence(struct pb_buffer *buf,
- struct pipe_fence_handle *fence)
- {
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- struct pb_fence_ops *ops = fenced_mgr->ops;
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
- assert(fenced_buf->buffer);
-
- if(fence != fenced_buf->fence) {
- assert(fenced_buf->vl);
- assert(fenced_buf->validation_flags);
-
- if (fenced_buf->fence) {
- boolean destroyed;
- destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
- assert(!destroyed);
- }
- if (fence) {
- ops->fence_reference(ops, &fenced_buf->fence, fence);
- fenced_buf->flags |= fenced_buf->validation_flags;
- fenced_buffer_add_locked(fenced_mgr, fenced_buf);
- }
-
- pb_fence(fenced_buf->buffer, fence);
-
- fenced_buf->vl = NULL;
- fenced_buf->validation_flags = 0;
- }
-
- pipe_mutex_unlock(fenced_mgr->mutex);
- }
-
-
- static void
- fenced_buffer_get_base_buffer(struct pb_buffer *buf,
- struct pb_buffer **base_buf,
- pb_size *offset)
- {
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- struct fenced_manager *fenced_mgr = fenced_buf->mgr;
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- /*
- * This should only be called when the buffer is validated. Typically
- * when processing relocations.
- */
- assert(fenced_buf->vl);
- assert(fenced_buf->buffer);
-
- if(fenced_buf->buffer)
- pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
- else {
- *base_buf = buf;
- *offset = 0;
- }
-
- pipe_mutex_unlock(fenced_mgr->mutex);
- }
-
-
- static const struct pb_vtbl
- fenced_buffer_vtbl = {
- fenced_buffer_destroy,
- fenced_buffer_map,
- fenced_buffer_unmap,
- fenced_buffer_validate,
- fenced_buffer_fence,
- fenced_buffer_get_base_buffer
- };
-
-
- /**
- * Wrap a buffer in a fenced buffer.
- */
- static struct pb_buffer *
- fenced_bufmgr_create_buffer(struct pb_manager *mgr,
- pb_size size,
- const struct pb_desc *desc)
- {
- struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- struct fenced_buffer *fenced_buf;
- enum pipe_error ret;
-
- /*
- * Don't stall the GPU, waste time evicting buffers, or waste memory
- * trying to create a buffer that will most likely never fit into the
- * graphics aperture.
- */
- if(size > fenced_mgr->max_buffer_size) {
- goto no_buffer;
- }
-
- fenced_buf = CALLOC_STRUCT(fenced_buffer);
- if(!fenced_buf)
- goto no_buffer;
-
- pipe_reference_init(&fenced_buf->base.base.reference, 1);
- fenced_buf->base.base.alignment = desc->alignment;
- fenced_buf->base.base.usage = desc->usage;
- fenced_buf->base.base.size = size;
- fenced_buf->size = size;
- fenced_buf->desc = *desc;
-
- fenced_buf->base.vtbl = &fenced_buffer_vtbl;
- fenced_buf->mgr = fenced_mgr;
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- /*
- * Try to create GPU storage without stalling,
- */
- ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
-
- /*
- * Attempt to use CPU memory to avoid stalling the GPU.
- */
- if(ret != PIPE_OK) {
- ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
- }
-
- /*
- * Create GPU storage, waiting for some to be available.
- */
- if(ret != PIPE_OK) {
- ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
- }
-
- /*
- * Give up.
- */
- if(ret != PIPE_OK) {
- goto no_storage;
- }
-
- assert(fenced_buf->buffer || fenced_buf->data);
-
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
- ++fenced_mgr->num_unfenced;
- pipe_mutex_unlock(fenced_mgr->mutex);
-
- return &fenced_buf->base;
-
- no_storage:
- pipe_mutex_unlock(fenced_mgr->mutex);
- FREE(fenced_buf);
- no_buffer:
- return NULL;
- }
-
-
- static void
- fenced_bufmgr_flush(struct pb_manager *mgr)
- {
- struct fenced_manager *fenced_mgr = fenced_manager(mgr);
-
- pipe_mutex_lock(fenced_mgr->mutex);
- while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
- ;
- pipe_mutex_unlock(fenced_mgr->mutex);
-
- assert(fenced_mgr->provider->flush);
- if(fenced_mgr->provider->flush)
- fenced_mgr->provider->flush(fenced_mgr->provider);
- }
-
-
- static void
- fenced_bufmgr_destroy(struct pb_manager *mgr)
- {
- struct fenced_manager *fenced_mgr = fenced_manager(mgr);
-
- pipe_mutex_lock(fenced_mgr->mutex);
-
- /* Wait on outstanding fences */
- while (fenced_mgr->num_fenced) {
- pipe_mutex_unlock(fenced_mgr->mutex);
- #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
- sched_yield();
- #endif
- pipe_mutex_lock(fenced_mgr->mutex);
- while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
- ;
- }
-
- #ifdef DEBUG
- /*assert(!fenced_mgr->num_unfenced);*/
- #endif
-
- pipe_mutex_unlock(fenced_mgr->mutex);
- pipe_mutex_destroy(fenced_mgr->mutex);
-
- if(fenced_mgr->provider)
- fenced_mgr->provider->destroy(fenced_mgr->provider);
-
- fenced_mgr->ops->destroy(fenced_mgr->ops);
-
- FREE(fenced_mgr);
- }
-
-
- struct pb_manager *
- fenced_bufmgr_create(struct pb_manager *provider,
- struct pb_fence_ops *ops,
- pb_size max_buffer_size,
- pb_size max_cpu_total_size)
- {
- struct fenced_manager *fenced_mgr;
-
- if(!provider)
- return NULL;
-
- fenced_mgr = CALLOC_STRUCT(fenced_manager);
- if (!fenced_mgr)
- return NULL;
-
- fenced_mgr->base.destroy = fenced_bufmgr_destroy;
- fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
- fenced_mgr->base.flush = fenced_bufmgr_flush;
-
- fenced_mgr->provider = provider;
- fenced_mgr->ops = ops;
- fenced_mgr->max_buffer_size = max_buffer_size;
- fenced_mgr->max_cpu_total_size = max_cpu_total_size;
-
- LIST_INITHEAD(&fenced_mgr->fenced);
- fenced_mgr->num_fenced = 0;
-
- LIST_INITHEAD(&fenced_mgr->unfenced);
- fenced_mgr->num_unfenced = 0;
-
- pipe_mutex_init(fenced_mgr->mutex);
-
- return &fenced_mgr->base;
- }
|