Browse Source

i965: Drop global bufmgr lock from brw_bo_map_* functions.

After removing the unusuable debugging code in the previous commit, we
can also entirely remove the global mutex around mapping the buffer for
the first time and replace it with a single atomic operation to update
the cache once we retrieve the mmap.

v2 (Ken): Split out from Chris's original commit.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
tags/17.2-branchpoint
Chris Wilson 8 years ago
parent
commit
314647c4c2
1 changed files with 15 additions and 14 deletions
  1. 15
    14
      src/mesa/drivers/dri/i965/brw_bufmgr.c

+ 15
- 14
src/mesa/drivers/dri/i965/brw_bufmgr.c View File

@@ -650,10 +650,9 @@ brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;

pthread_mutex_lock(&bufmgr->lock);

if (!bo->map_cpu) {
struct drm_i915_gem_mmap mmap_arg;
void *map;

DBG("brw_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);

@@ -665,11 +664,15 @@ brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
ret = -errno;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
pthread_mutex_unlock(&bufmgr->lock);
return NULL;
}
VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
bo->map_cpu = (void *) (uintptr_t) mmap_arg.addr_ptr;
map = (void *) (uintptr_t) mmap_arg.addr_ptr;

if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
VG(VALGRIND_FREELIKE_BLOCK(map, 0));
drm_munmap(map, bo->size);
}
}
DBG("brw_bo_map_cpu: %d (%s) -> %p\n", bo->gem_handle, bo->name,
bo->map_cpu);
@@ -679,8 +682,6 @@ brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
flags & MAP_WRITE ? I915_GEM_DOMAIN_CPU : 0);
}

pthread_mutex_unlock(&bufmgr->lock);

return bo->map_cpu;
}

@@ -689,11 +690,10 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;

pthread_mutex_lock(&bufmgr->lock);

/* Get a mapping of the buffer if we haven't before. */
if (bo->map_gtt == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
void *map;

DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);

@@ -712,15 +712,18 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
/* and mmap it. We don't need to use VALGRIND_MALLOCLIKE_BLOCK
* because Valgrind will already intercept this mmap call.
*/
bo->map_gtt = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED, bufmgr->fd, mmap_arg.offset);
if (bo->map_gtt == MAP_FAILED) {
map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED, bufmgr->fd, mmap_arg.offset);
if (map == MAP_FAILED) {
bo->map_gtt = NULL;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
pthread_mutex_unlock(&bufmgr->lock);
return NULL;
}

if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
drm_munmap(map, bo->size);
}
}

DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
@@ -731,8 +734,6 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
}

pthread_mutex_unlock(&bufmgr->lock);

return bo->map_gtt;
}


Loading…
Cancel
Save