瀏覽代碼

iris: precompute hashes for cache tracking

saves a touch of cpu overhead in the new resolve tracking
tags/19.1-branchpoint
Kenneth Graunke 7 年之前
父節點
當前提交
04e8c5bb43

+ 14
- 4
src/gallium/drivers/iris/iris_bufmgr.c 查看文件

@@ -481,6 +481,16 @@ iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
}
}

static struct iris_bo *
bo_calloc(void)
{
struct iris_bo *bo = calloc(1, sizeof(*bo));
if (bo) {
bo->hash = _mesa_hash_pointer(bo);
}
return bo;
}

static struct iris_bo *
bo_alloc_internal(struct iris_bufmgr *bufmgr,
const char *name,
@@ -559,7 +569,7 @@ retry:
bo->gtt_offset = 0ull;
}
} else {
bo = calloc(1, sizeof(*bo));
bo = bo_calloc();
if (!bo)
goto err;

@@ -656,7 +666,7 @@ iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
{
struct iris_bo *bo;

bo = calloc(1, sizeof(*bo));
bo = bo_calloc();
if (!bo)
return NULL;

@@ -744,7 +754,7 @@ iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
goto out;
}

bo = calloc(1, sizeof(*bo));
bo = bo_calloc();
if (!bo)
goto out;

@@ -1338,7 +1348,7 @@ iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
goto out;
}

bo = calloc(1, sizeof(*bo));
bo = bo_calloc();
if (!bo)
goto out;


+ 3
- 0
src/gallium/drivers/iris/iris_bufmgr.h 查看文件

@@ -187,6 +187,9 @@ struct iris_bo {
* Boolean of whether this buffer points into user memory
*/
bool userptr;

/** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
uint32_t hash;
};

#define BO_ALLOC_ZEROED (1<<0)

+ 11
- 9
src/gallium/drivers/iris/iris_resolve.c 查看文件

@@ -193,8 +193,8 @@ void
iris_cache_flush_for_read(struct iris_batch *batch,
struct iris_bo *bo)
{
if (_mesa_hash_table_search(batch->cache.render, bo) ||
_mesa_set_search(batch->cache.depth, bo))
if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo) ||
_mesa_set_search_pre_hashed(batch->cache.depth, bo->hash, bo))
iris_flush_depth_and_render_caches(batch);
}

@@ -210,7 +210,7 @@ iris_cache_flush_for_render(struct iris_batch *batch,
enum isl_format format,
enum isl_aux_usage aux_usage)
{
if (_mesa_set_search(batch->cache.depth, bo))
if (_mesa_set_search_pre_hashed(batch->cache.depth, bo->hash, bo))
iris_flush_depth_and_render_caches(batch);

/* Check to see if this bo has been used by a previous rendering operation
@@ -236,7 +236,8 @@ iris_cache_flush_for_render(struct iris_batch *batch,
* and flush on format changes too. We can always relax this later if we
* find it to be a performance problem.
*/
struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
struct hash_entry *entry =
_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
if (entry && entry->data != format_aux_tuple(format, aux_usage))
iris_flush_depth_and_render_caches(batch);
}
@@ -248,7 +249,8 @@ iris_render_cache_add_bo(struct iris_batch *batch,
enum isl_aux_usage aux_usage)
{
#ifndef NDEBUG
struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
struct hash_entry *entry =
_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
if (entry) {
/* Otherwise, someone didn't do a flush_for_render and that would be
* very bad indeed.
@@ -257,20 +259,20 @@ iris_render_cache_add_bo(struct iris_batch *batch,
}
#endif

_mesa_hash_table_insert(batch->cache.render, bo,
format_aux_tuple(format, aux_usage));
_mesa_hash_table_insert_pre_hashed(batch->cache.render, bo->hash, bo,
format_aux_tuple(format, aux_usage));
}

void
iris_cache_flush_for_depth(struct iris_batch *batch,
struct iris_bo *bo)
{
if (_mesa_hash_table_search(batch->cache.render, bo))
if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo))
iris_flush_depth_and_render_caches(batch);
}

void
iris_depth_cache_add_bo(struct iris_batch *batch, struct iris_bo *bo)
{
_mesa_set_add(batch->cache.depth, bo);
_mesa_set_add_pre_hashed(batch->cache.depth, bo->hash, bo);
}

Loading…
取消
儲存