These happen to work because their values are the same as the equivalent PIPE_TRANSFER_* flags, but it's still misleading. Signed-off-by: Henri Verbeet <hverbeet@gmail.com>tags/mesa-8.0-rc1
@@ -62,7 +62,7 @@ static struct r600_fence *r600_create_fence(struct r600_pipe_context *ctx) | |||
R600_ERR("r600: failed to create bo for fence objects\n"); | |||
return NULL; | |||
} | |||
ctx->fences.data = r600_bo_map(ctx->radeon, ctx->fences.bo, PB_USAGE_UNSYNCHRONIZED, NULL); | |||
ctx->fences.data = r600_bo_map(ctx->radeon, ctx->fences.bo, PIPE_TRANSFER_UNSYNCHRONIZED, NULL); | |||
} | |||
if (!LIST_IS_EMPTY(&ctx->fences.pool)) { |
@@ -689,7 +689,6 @@ void* r600_texture_transfer_map(struct pipe_context *ctx, | |||
enum pipe_format format = transfer->resource->format; | |||
struct radeon *radeon = (struct radeon *)ctx->screen->winsys; | |||
unsigned offset = 0; | |||
unsigned usage = 0; | |||
char *map; | |||
if (rtransfer->staging_texture) { | |||
@@ -707,30 +706,7 @@ void* r600_texture_transfer_map(struct pipe_context *ctx, | |||
transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format); | |||
} | |||
if (transfer->usage & PIPE_TRANSFER_WRITE) { | |||
usage |= PB_USAGE_CPU_WRITE; | |||
if (transfer->usage & PIPE_TRANSFER_DISCARD) { | |||
} | |||
if (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) { | |||
} | |||
} | |||
if (transfer->usage & PIPE_TRANSFER_READ) { | |||
usage |= PB_USAGE_CPU_READ; | |||
} | |||
if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) { | |||
usage |= PB_USAGE_DONTBLOCK; | |||
} | |||
if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) { | |||
usage |= PB_USAGE_UNSYNCHRONIZED; | |||
} | |||
map = r600_bo_map(radeon, bo, usage, ctx); | |||
if (!map) { | |||
if (!(map = r600_bo_map(radeon, bo, transfer->usage, ctx))) { | |||
return NULL; | |||
} | |||
@@ -126,13 +126,13 @@ void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, voi | |||
{ | |||
struct pipe_context *pctx = ctx; | |||
if (usage & PB_USAGE_UNSYNCHRONIZED) { | |||
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) { | |||
radeon_bo_map(radeon, bo->bo); | |||
return (uint8_t *) bo->bo->data + bo->offset; | |||
} | |||
if (p_atomic_read(&bo->bo->reference.count) > 1) { | |||
if (usage & PB_USAGE_DONTBLOCK) { | |||
if (usage & PIPE_TRANSFER_DONTBLOCK) { | |||
return NULL; | |||
} | |||
if (ctx) { | |||
@@ -140,7 +140,7 @@ void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, voi | |||
} | |||
} | |||
if (usage & PB_USAGE_DONTBLOCK) { | |||
if (usage & PIPE_TRANSFER_DONTBLOCK) { | |||
uint32_t domain; | |||
if (radeon_bo_busy(radeon, bo->bo, &domain)) |
@@ -249,7 +249,7 @@ static int radeon_init_fence(struct radeon *radeon) | |||
if (radeon->fence_bo == NULL) { | |||
return -ENOMEM; | |||
} | |||
radeon->cfence = r600_bo_map(radeon, radeon->fence_bo, PB_USAGE_UNSYNCHRONIZED, NULL); | |||
radeon->cfence = r600_bo_map(radeon, radeon->fence_bo, PIPE_TRANSFER_UNSYNCHRONIZED, NULL); | |||
*radeon->cfence = 0; | |||
return 0; | |||
} |
@@ -1704,9 +1704,9 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu | |||
u32 *results, *current_result; | |||
if (wait) | |||
results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_CPU_READ, NULL); | |||
results = r600_bo_map(ctx->radeon, query->buffer, PIPE_TRANSFER_READ, NULL); | |||
else | |||
results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_DONTBLOCK | PB_USAGE_CPU_READ, NULL); | |||
results = r600_bo_map(ctx->radeon, query->buffer, PIPE_TRANSFER_DONTBLOCK | PIPE_TRANSFER_READ, NULL); | |||
if (!results) | |||
return FALSE; | |||
@@ -1777,7 +1777,7 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query) | |||
u32 *results; | |||
int i; | |||
results = r600_bo_map(ctx->radeon, query->buffer, PB_USAGE_CPU_WRITE, NULL); | |||
results = r600_bo_map(ctx->radeon, query->buffer, PIPE_TRANSFER_WRITE, NULL); | |||
if (results) { | |||
results = (u32*)((char*)results + query->results_end); | |||
memset(results, 0, query->result_size); |