The idea is not to use pb_map and pb_unmap wrappers, calling straight into the winsys.tags/i965-primitive-restart-v2
@@ -152,7 +152,7 @@ static boolean r300_get_query_result(struct pipe_context* pipe, | |||
return vresult->b; | |||
} | |||
map = r300->rws->buffer_map(q->buf, r300->cs, | |||
map = r300->rws->buffer_map(q->cs_buf, r300->cs, | |||
PIPE_TRANSFER_READ | | |||
(!wait ? PIPE_TRANSFER_DONTBLOCK : 0)); | |||
if (!map) | |||
@@ -166,7 +166,7 @@ static boolean r300_get_query_result(struct pipe_context* pipe, | |||
map++; | |||
} | |||
r300->rws->buffer_unmap(q->buf); | |||
r300->rws->buffer_unmap(q->cs_buf); | |||
if (q->type == PIPE_QUERY_OCCLUSION_PREDICATE) { | |||
vresult->b = temp != 0; |
@@ -373,7 +373,7 @@ static void r300_draw_arrays_immediate(struct r300_context *r300, | |||
/* Map the buffer. */ | |||
if (!map[vbi]) { | |||
map[vbi] = (uint32_t*)r300->rws->buffer_map( | |||
r300_resource(vbuf->buffer)->buf, | |||
r300_resource(vbuf->buffer)->cs_buf, | |||
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED); | |||
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start; | |||
} | |||
@@ -401,7 +401,7 @@ static void r300_draw_arrays_immediate(struct r300_context *r300, | |||
vbi = r300->velems->velem[i].vertex_buffer_index; | |||
if (map[vbi]) { | |||
r300->rws->buffer_unmap(r300_resource(r300->vertex_buffer[vbi].buffer)->buf); | |||
r300->rws->buffer_unmap(r300_resource(r300->vertex_buffer[vbi].buffer)->cs_buf); | |||
map[vbi] = NULL; | |||
} | |||
} | |||
@@ -616,7 +616,7 @@ static void r300_draw_elements(struct r300_context *r300, | |||
if (indexSize == 2 && (start & 1) && | |||
!indexBuffer->user_ptr) { | |||
/* If we got here, then orgIndexBuffer == indexBuffer. */ | |||
uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf, | |||
uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->cs_buf, | |||
r300->cs, | |||
PIPE_TRANSFER_READ | | |||
PIPE_TRANSFER_UNSYNCHRONIZED); | |||
@@ -630,7 +630,7 @@ static void r300_draw_elements(struct r300_context *r300, | |||
r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start, | |||
count, (uint8_t*)ptr); | |||
} | |||
r300->rws->buffer_unmap(r300_resource(orgIndexBuffer)->buf); | |||
r300->rws->buffer_unmap(r300_resource(orgIndexBuffer)->cs_buf); | |||
} else { | |||
if (indexBuffer->user_ptr) | |||
r300_upload_index_buffer(r300, &indexBuffer, indexSize, |
@@ -120,7 +120,7 @@ r300_buffer_transfer_map( struct pipe_context *pipe, | |||
usage |= PIPE_TRANSFER_UNSYNCHRONIZED; | |||
} | |||
map = rws->buffer_map(rbuf->buf, r300->cs, usage); | |||
map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage); | |||
if (map == NULL) | |||
return NULL; | |||
@@ -135,8 +135,8 @@ static void r300_buffer_transfer_unmap( struct pipe_context *pipe, | |||
struct radeon_winsys *rws = r300screen->rws; | |||
struct r300_resource *rbuf = r300_resource(transfer->resource); | |||
if (rbuf->buf) { | |||
rws->buffer_unmap(rbuf->buf); | |||
if (rbuf->cs_buf) { | |||
rws->buffer_unmap(rbuf->cs_buf); | |||
} | |||
} | |||
@@ -160,12 +160,12 @@ static void r300_buffer_transfer_inline_write(struct pipe_context *pipe, | |||
} | |||
assert(rbuf->b.b.user_ptr == NULL); | |||
map = rws->buffer_map(rbuf->buf, r300->cs, | |||
map = rws->buffer_map(rbuf->cs_buf, r300->cs, | |||
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | usage); | |||
memcpy(map + box->x, data, box->width); | |||
rws->buffer_unmap(rbuf->buf); | |||
rws->buffer_unmap(rbuf->cs_buf); | |||
} | |||
static const struct u_resource_vtbl r300_buffer_vtbl = |
@@ -235,11 +235,11 @@ void* r300_texture_transfer_map(struct pipe_context *ctx, | |||
if (r300transfer->linear_texture) { | |||
/* The detiled texture is of the same size as the region being mapped | |||
* (no offset needed). */ | |||
return r300->rws->buffer_map(r300transfer->linear_texture->buf, | |||
return r300->rws->buffer_map(r300transfer->linear_texture->cs_buf, | |||
r300->cs, transfer->usage); | |||
} else { | |||
/* Tiling is disabled. */ | |||
map = r300->rws->buffer_map(tex->buf, r300->cs, transfer->usage); | |||
map = r300->rws->buffer_map(tex->cs_buf, r300->cs, transfer->usage); | |||
if (!map) { | |||
return NULL; | |||
@@ -259,8 +259,8 @@ void r300_texture_transfer_unmap(struct pipe_context *ctx, | |||
struct r300_resource *tex = r300_resource(transfer->resource); | |||
if (r300transfer->linear_texture) { | |||
rws->buffer_unmap(r300transfer->linear_texture->buf); | |||
rws->buffer_unmap(r300transfer->linear_texture->cs_buf); | |||
} else { | |||
rws->buffer_unmap(tex->buf); | |||
rws->buffer_unmap(tex->cs_buf); | |||
} | |||
} |
@@ -2810,7 +2810,7 @@ int r600_vertex_elements_build_fetch_shader(struct r600_context *rctx, struct r6 | |||
return -ENOMEM; | |||
} | |||
bytecode = rctx->ws->buffer_map(ve->fetch_shader->buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
bytecode = rctx->ws->buffer_map(ve->fetch_shader->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
if (bytecode == NULL) { | |||
r600_bytecode_clear(&bc); | |||
pipe_resource_reference((struct pipe_resource**)&ve->fetch_shader, NULL); | |||
@@ -2825,7 +2825,7 @@ int r600_vertex_elements_build_fetch_shader(struct r600_context *rctx, struct r6 | |||
memcpy(bytecode, bc.bytecode, ve->fs_size); | |||
} | |||
rctx->ws->buffer_unmap(ve->fetch_shader->buf); | |||
rctx->ws->buffer_unmap(ve->fetch_shader->cs_buf); | |||
r600_bytecode_clear(&bc); | |||
if (rctx->chip_class >= EVERGREEN) |
@@ -129,7 +129,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe, | |||
if (rbuffer->b.b.user_ptr) | |||
return rbuffer->b.b.user_ptr + transfer->box.x; | |||
data = rctx->ws->buffer_map(rbuffer->buf, rctx->cs, transfer->usage); | |||
data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage); | |||
if (!data) | |||
return NULL; | |||
@@ -145,7 +145,7 @@ static void r600_buffer_transfer_unmap(struct pipe_context *pipe, | |||
if (rbuffer->b.b.user_ptr) | |||
return; | |||
rctx->ws->buffer_unmap(rbuffer->buf); | |||
rctx->ws->buffer_unmap(rbuffer->cs_buf); | |||
} | |||
static void r600_transfer_destroy(struct pipe_context *ctx, |
@@ -75,10 +75,10 @@ void r600_get_backend_mask(struct r600_context *ctx) | |||
va = r600_resource_va(&ctx->screen->screen, (void*)buffer); | |||
/* initialize buffer with zeroes */ | |||
results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
if (results) { | |||
memset(results, 0, ctx->max_db * 4 * 4); | |||
ctx->ws->buffer_unmap(buffer->buf); | |||
ctx->ws->buffer_unmap(buffer->cs_buf); | |||
/* emit EVENT_WRITE for ZPASS_DONE */ | |||
cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); | |||
@@ -90,14 +90,14 @@ void r600_get_backend_mask(struct r600_context *ctx) | |||
cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, buffer, RADEON_USAGE_WRITE); | |||
/* analyze results */ | |||
results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_READ); | |||
results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_READ); | |||
if (results) { | |||
for(i = 0; i < ctx->max_db; i++) { | |||
/* at least highest bit will be set if backend is used */ | |||
if (results[i*4 + 1]) | |||
mask |= (1<<i); | |||
} | |||
ctx->ws->buffer_unmap(buffer->buf); | |||
ctx->ws->buffer_unmap(buffer->cs_buf); | |||
} | |||
} | |||
@@ -52,7 +52,7 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx) | |||
R600_ERR("r600: failed to create bo for fence objects\n"); | |||
goto out; | |||
} | |||
rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->buf, | |||
rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->cs_buf, | |||
rctx->cs, | |||
PIPE_TRANSFER_READ_WRITE); | |||
} | |||
@@ -599,7 +599,7 @@ static void r600_destroy_screen(struct pipe_screen* pscreen) | |||
FREE(entry); | |||
} | |||
rscreen->ws->buffer_unmap(rscreen->fences.bo->buf); | |||
rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf); | |||
pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL); | |||
} | |||
pipe_mutex_destroy(rscreen->fences.mutex); |
@@ -52,7 +52,7 @@ static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, uns | |||
switch (type) { | |||
case PIPE_QUERY_OCCLUSION_COUNTER: | |||
case PIPE_QUERY_OCCLUSION_PREDICATE: | |||
results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
results = ctx->ws->buffer_map(buf->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
memset(results, 0, buf_size); | |||
/* Set top bits for unused backends. */ | |||
@@ -66,7 +66,7 @@ static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, uns | |||
} | |||
results += 4 * ctx->max_db; | |||
} | |||
ctx->ws->buffer_unmap(buf->buf); | |||
ctx->ws->buffer_unmap(buf->cs_buf); | |||
break; | |||
case PIPE_QUERY_TIME_ELAPSED: | |||
break; | |||
@@ -74,9 +74,9 @@ static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, uns | |||
case PIPE_QUERY_PRIMITIVES_GENERATED: | |||
case PIPE_QUERY_SO_STATISTICS: | |||
case PIPE_QUERY_SO_OVERFLOW_PREDICATE: | |||
results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
results = ctx->ws->buffer_map(buf->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
memset(results, 0, buf_size); | |||
ctx->ws->buffer_unmap(buf->buf); | |||
ctx->ws->buffer_unmap(buf->cs_buf); | |||
break; | |||
default: | |||
assert(0); | |||
@@ -406,7 +406,7 @@ static boolean r600_get_query_buffer_result(struct r600_context *ctx, | |||
unsigned results_base = 0; | |||
char *map; | |||
map = ctx->ws->buffer_map(qbuf->buf->buf, ctx->cs, | |||
map = ctx->ws->buffer_map(qbuf->buf->cs_buf, ctx->cs, | |||
PIPE_TRANSFER_READ | | |||
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK)); | |||
if (!map) | |||
@@ -477,7 +477,7 @@ static boolean r600_get_query_buffer_result(struct r600_context *ctx, | |||
assert(0); | |||
} | |||
ctx->ws->buffer_unmap(qbuf->buf->buf); | |||
ctx->ws->buffer_unmap(qbuf->buf->cs_buf); | |||
return TRUE; | |||
} | |||
@@ -71,7 +71,7 @@ static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *s | |||
if (shader->bo == NULL) { | |||
return -ENOMEM; | |||
} | |||
ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
if (R600_BIG_ENDIAN) { | |||
for (i = 0; i < rshader->bc.ndw; ++i) { | |||
ptr[i] = bswap_32(rshader->bc.bytecode[i]); | |||
@@ -79,7 +79,7 @@ static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *s | |||
} else { | |||
memcpy(ptr, rshader->bc.bytecode, rshader->bc.ndw * sizeof(*ptr)); | |||
} | |||
rctx->ws->buffer_unmap(shader->bo->buf); | |||
rctx->ws->buffer_unmap(shader->bo->cs_buf); | |||
} | |||
/* build state */ | |||
switch (rshader->processor_type) { |
@@ -617,9 +617,9 @@ r600_create_so_target(struct pipe_context *ctx, | |||
t->filled_size = (struct r600_resource*) | |||
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4); | |||
ptr = rctx->ws->buffer_map(t->filled_size->buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
ptr = rctx->ws->buffer_map(t->filled_size->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
memset(ptr, 0, t->filled_size->buf->size); | |||
rctx->ws->buffer_unmap(t->filled_size->buf); | |||
rctx->ws->buffer_unmap(t->filled_size->cs_buf); | |||
return &t->b; | |||
} |
@@ -898,20 +898,20 @@ void* r600_texture_transfer_map(struct pipe_context *ctx, | |||
{ | |||
struct r600_context *rctx = (struct r600_context *)ctx; | |||
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; | |||
struct pb_buffer *buf; | |||
struct radeon_winsys_cs_handle *buf; | |||
enum pipe_format format = transfer->resource->format; | |||
unsigned offset = 0; | |||
char *map; | |||
if (rtransfer->staging) { | |||
buf = ((struct r600_resource *)rtransfer->staging)->buf; | |||
buf = ((struct r600_resource *)rtransfer->staging)->cs_buf; | |||
} else { | |||
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource; | |||
if (rtex->flushed_depth_texture) | |||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf; | |||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf; | |||
else | |||
buf = ((struct r600_resource *)transfer->resource)->buf; | |||
buf = ((struct r600_resource *)transfer->resource)->cs_buf; | |||
offset = rtransfer->offset + | |||
transfer->box.y / util_format_get_blockheight(format) * transfer->stride + | |||
@@ -930,17 +930,17 @@ void r600_texture_transfer_unmap(struct pipe_context *ctx, | |||
{ | |||
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; | |||
struct r600_context *rctx = (struct r600_context*)ctx; | |||
struct pb_buffer *buf; | |||
struct radeon_winsys_cs_handle *buf; | |||
if (rtransfer->staging) { | |||
buf = ((struct r600_resource *)rtransfer->staging)->buf; | |||
buf = ((struct r600_resource *)rtransfer->staging)->cs_buf; | |||
} else { | |||
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource; | |||
if (rtex->flushed_depth_texture) { | |||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf; | |||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf; | |||
} else { | |||
buf = ((struct r600_resource *)transfer->resource)->buf; | |||
buf = ((struct r600_resource *)transfer->resource)->cs_buf; | |||
} | |||
} | |||
rctx->ws->buffer_unmap(buf); |
@@ -1306,7 +1306,7 @@ static void evergreen_set_ps_sampler_view(struct pipe_context *ctx, unsigned cou | |||
bo = (struct r600_resource*) | |||
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, | |||
count * sizeof(resource[0]->state)); | |||
ptr = rctx->ws->buffer_map(bo->buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
ptr = rctx->ws->buffer_map(bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
for (i = 0; i < count; i++, ptr += sizeof(resource[0]->state)) { | |||
pipe_sampler_view_reference( | |||
@@ -1322,7 +1322,7 @@ static void evergreen_set_ps_sampler_view(struct pipe_context *ctx, unsigned cou | |||
memset(ptr, 0, sizeof(resource[0]->state)); | |||
} | |||
rctx->ws->buffer_unmap(bo->buf); | |||
rctx->ws->buffer_unmap(bo->cs_buf); | |||
for (i = count; i < NUM_TEX_UNITS; i++) { | |||
if (rctx->ps_samplers.views[i]) | |||
@@ -1357,13 +1357,13 @@ static void evergreen_bind_ps_sampler(struct pipe_context *ctx, unsigned count, | |||
bo = (struct r600_resource*) | |||
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, | |||
count * sizeof(rstates[0]->val)); | |||
ptr = rctx->ws->buffer_map(bo->buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
ptr = rctx->ws->buffer_map(bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
for (i = 0; i < count; i++, ptr += sizeof(rstates[0]->val)) { | |||
memcpy(ptr, rstates[i]->val, sizeof(rstates[0]->val)); | |||
} | |||
rctx->ws->buffer_unmap(bo->buf); | |||
rctx->ws->buffer_unmap(bo->cs_buf); | |||
va = r600_resource_va(ctx->screen, (void *)bo); | |||
r600_pipe_state_add_reg(rstate, R_00B038_SPI_SHADER_USER_DATA_PS_2, va, bo, RADEON_USAGE_READ); |
@@ -79,7 +79,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe, | |||
if (rbuffer->b.b.user_ptr) | |||
return (uint8_t*)rbuffer->b.b.user_ptr + transfer->box.x; | |||
data = rctx->ws->buffer_map(rbuffer->buf, rctx->cs, transfer->usage); | |||
data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage); | |||
if (!data) | |||
return NULL; | |||
@@ -95,7 +95,7 @@ static void r600_buffer_transfer_unmap(struct pipe_context *pipe, | |||
if (rbuffer->b.b.user_ptr) | |||
return; | |||
rctx->ws->buffer_unmap(rbuffer->buf); | |||
rctx->ws->buffer_unmap(rbuffer->cs_buf); | |||
} | |||
static void r600_buffer_transfer_flush_region(struct pipe_context *pipe, | |||
@@ -126,12 +126,12 @@ static void r600_buffer_transfer_inline_write(struct pipe_context *pipe, | |||
assert(rbuffer->b.b.user_ptr == NULL); | |||
map = rctx->ws->buffer_map(rbuffer->buf, rctx->cs, | |||
map = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, | |||
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | usage); | |||
memcpy(map + box->x, data, box->width); | |||
rctx->ws->buffer_unmap(rbuffer->buf); | |||
rctx->ws->buffer_unmap(rbuffer->cs_buf); | |||
} | |||
static const struct u_resource_vtbl r600_buffer_vtbl = |
@@ -72,12 +72,12 @@ void r600_get_backend_mask(struct r600_context *ctx) | |||
goto err; | |||
/* initialize buffer with zeroes */ | |||
results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
if (results) { | |||
uint64_t va = 0; | |||
memset(results, 0, ctx->max_db * 4 * 4); | |||
ctx->ws->buffer_unmap(buffer->buf); | |||
ctx->ws->buffer_unmap(buffer->cs_buf); | |||
/* emit EVENT_WRITE for ZPASS_DONE */ | |||
va = r600_resource_va(&ctx->screen->screen, (void *)buffer); | |||
@@ -90,14 +90,14 @@ void r600_get_backend_mask(struct r600_context *ctx) | |||
cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, buffer, RADEON_USAGE_WRITE); | |||
/* analyze results */ | |||
results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_READ); | |||
results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_READ); | |||
if (results) { | |||
for(i = 0; i < ctx->max_db; i++) { | |||
/* at least highest bit will be set if backend is used */ | |||
if (results[i*4 + 1]) | |||
mask |= (1<<i); | |||
} | |||
ctx->ws->buffer_unmap(buffer->buf); | |||
ctx->ws->buffer_unmap(buffer->cs_buf); | |||
} | |||
} | |||
@@ -608,7 +608,7 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu | |||
unsigned results_base = query->results_start; | |||
char *map; | |||
map = ctx->ws->buffer_map(query->buffer->buf, ctx->cs, | |||
map = ctx->ws->buffer_map(query->buffer->cs_buf, ctx->cs, | |||
PIPE_TRANSFER_READ | | |||
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK)); | |||
if (!map) | |||
@@ -680,7 +680,7 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu | |||
} | |||
query->results_start = query->results_end; | |||
ctx->ws->buffer_unmap(query->buffer->buf); | |||
ctx->ws->buffer_unmap(query->buffer->cs_buf); | |||
return TRUE; | |||
} | |||
@@ -703,7 +703,7 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query) | |||
switch (query->type) { | |||
case PIPE_QUERY_OCCLUSION_COUNTER: | |||
case PIPE_QUERY_OCCLUSION_PREDICATE: | |||
results = ctx->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
results = ctx->ws->buffer_map(query->buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
if (results) { | |||
results = (uint32_t*)((char*)results + query->results_end); | |||
memset(results, 0, query->result_size); | |||
@@ -715,7 +715,7 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query) | |||
results[(i * 4)+3] = 0x80000000; | |||
} | |||
} | |||
ctx->ws->buffer_unmap(query->buffer->buf); | |||
ctx->ws->buffer_unmap(query->buffer->cs_buf); | |||
} | |||
break; | |||
case PIPE_QUERY_TIME_ELAPSED: | |||
@@ -724,10 +724,10 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query) | |||
case PIPE_QUERY_PRIMITIVES_GENERATED: | |||
case PIPE_QUERY_SO_STATISTICS: | |||
case PIPE_QUERY_SO_OVERFLOW_PREDICATE: | |||
results = ctx->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
results = ctx->ws->buffer_map(query->buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE); | |||
results = (uint32_t*)((char*)results + query->results_end); | |||
memset(results, 0, query->result_size); | |||
ctx->ws->buffer_unmap(query->buffer->buf); | |||
ctx->ws->buffer_unmap(query->buffer->cs_buf); | |||
break; | |||
default: | |||
assert(0); | |||
@@ -1089,9 +1089,9 @@ void r600_context_streamout_end(struct r600_context *ctx) | |||
if (!t[i]) | |||
continue; | |||
uint32_t *ptr = ctx->ws->buffer_map(t[i]->filled_size->buf, ctx->cs, RADEON_USAGE_READ); | |||
uint32_t *ptr = ctx->ws->buffer_map(t[i]->filled_size->cs_buf, ctx->cs, RADEON_USAGE_READ); | |||
printf("FILLED_SIZE%i: %u\n", i, *ptr); | |||
ctx->ws->buffer_unmap(t[i]->filled_size->buf); | |||
ctx->ws->buffer_unmap(t[i]->filled_size->cs_buf); | |||
} | |||
} | |||
@@ -501,9 +501,9 @@ r600_create_so_target(struct pipe_context *ctx, | |||
t->filled_size = (struct r600_resource*) | |||
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4); | |||
ptr = rctx->ws->buffer_map(t->filled_size->buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
ptr = rctx->ws->buffer_map(t->filled_size->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
memset(ptr, 0, t->filled_size->buf->size); | |||
rctx->ws->buffer_unmap(t->filled_size->buf); | |||
rctx->ws->buffer_unmap(t->filled_size->cs_buf); | |||
return &t->b; | |||
} | |||
@@ -570,7 +570,7 @@ static void r600_vertex_buffer_update(struct r600_context *rctx) | |||
if (t_list_buffer == NULL) | |||
return; | |||
ptr = (uint32_t*)rctx->ws->buffer_map(t_list_buffer->buf, | |||
ptr = (uint32_t*)rctx->ws->buffer_map(t_list_buffer->cs_buf, | |||
rctx->cs, | |||
PIPE_TRANSFER_WRITE); | |||
@@ -770,20 +770,20 @@ void* r600_texture_transfer_map(struct pipe_context *ctx, | |||
{ | |||
struct r600_context *rctx = (struct r600_context *)ctx; | |||
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; | |||
struct pb_buffer *buf; | |||
struct radeon_winsys_cs_handle *buf; | |||
enum pipe_format format = transfer->resource->format; | |||
unsigned offset = 0; | |||
char *map; | |||
if (rtransfer->staging_texture) { | |||
buf = ((struct r600_resource *)rtransfer->staging_texture)->buf; | |||
buf = ((struct r600_resource *)rtransfer->staging_texture)->cs_buf; | |||
} else { | |||
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource; | |||
if (rtex->flushed_depth_texture) | |||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf; | |||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf; | |||
else | |||
buf = ((struct r600_resource *)transfer->resource)->buf; | |||
buf = ((struct r600_resource *)transfer->resource)->cs_buf; | |||
offset = rtransfer->offset + | |||
transfer->box.y / util_format_get_blockheight(format) * transfer->stride + | |||
@@ -802,17 +802,17 @@ void r600_texture_transfer_unmap(struct pipe_context *ctx, | |||
{ | |||
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; | |||
struct r600_context *rctx = (struct r600_context*)ctx; | |||
struct pb_buffer *buf; | |||
struct radeon_winsys_cs_handle *buf; | |||
if (rtransfer->staging_texture) { | |||
buf = ((struct r600_resource *)rtransfer->staging_texture)->buf; | |||
buf = ((struct r600_resource *)rtransfer->staging_texture)->cs_buf; | |||
} else { | |||
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource; | |||
if (rtex->flushed_depth_texture) { | |||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf; | |||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf; | |||
} else { | |||
buf = ((struct r600_resource *)transfer->resource)->buf; | |||
buf = ((struct r600_resource *)transfer->resource)->cs_buf; | |||
} | |||
} | |||
rctx->ws->buffer_unmap(buf); |
@@ -67,7 +67,7 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx) | |||
R600_ERR("r600: failed to create bo for fence objects\n"); | |||
goto out; | |||
} | |||
rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->buf, | |||
rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->cs_buf, | |||
rctx->cs, | |||
PIPE_TRANSFER_READ_WRITE); | |||
} | |||
@@ -516,7 +516,7 @@ static void r600_destroy_screen(struct pipe_screen* pscreen) | |||
FREE(entry); | |||
} | |||
rscreen->ws->buffer_unmap(rscreen->fences.bo->buf); | |||
rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf); | |||
pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL); | |||
} | |||
pipe_mutex_destroy(rscreen->fences.mutex); |
@@ -545,7 +545,7 @@ int si_pipe_shader_create( | |||
if (shader->bo == NULL) { | |||
return -ENOMEM; | |||
} | |||
ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); | |||
if (0 /*R600_BIG_ENDIAN*/) { | |||
for (i = 0; i < (inst_byte_count-12)/4; ++i) { | |||
ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4)); | |||
@@ -553,7 +553,7 @@ int si_pipe_shader_create( | |||
} else { | |||
memcpy(ptr, inst_bytes + 12, inst_byte_count - 12); | |||
} | |||
rctx->ws->buffer_unmap(shader->bo->buf); | |||
rctx->ws->buffer_unmap(shader->bo->cs_buf); | |||
} | |||
free(inst_bytes); |
@@ -330,37 +330,20 @@ static void radeon_bo_destroy(struct pb_buffer *_buf) | |||
FREE(bo); | |||
} | |||
static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage) | |||
{ | |||
unsigned res = 0; | |||
if (usage & PIPE_TRANSFER_WRITE) | |||
res |= PB_USAGE_CPU_WRITE; | |||
if (usage & PIPE_TRANSFER_DONTBLOCK) | |||
res |= PB_USAGE_DONTBLOCK; | |||
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) | |||
res |= PB_USAGE_UNSYNCHRONIZED; | |||
return res; | |||
} | |||
static void *radeon_bo_map_internal(struct pb_buffer *_buf, | |||
unsigned flags, void *flush_ctx) | |||
static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf, | |||
struct radeon_winsys_cs *rcs, | |||
enum pipe_transfer_usage usage) | |||
{ | |||
struct radeon_bo *bo = radeon_bo(_buf); | |||
struct radeon_drm_cs *cs = flush_ctx; | |||
struct drm_radeon_gem_mmap args; | |||
struct radeon_bo *bo = (struct radeon_bo*)buf; | |||
struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs; | |||
struct drm_radeon_gem_mmap args = {0}; | |||
void *ptr; | |||
memset(&args, 0, sizeof(args)); | |||
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */ | |||
if (!(flags & PB_USAGE_UNSYNCHRONIZED)) { | |||
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { | |||
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */ | |||
if (flags & PB_USAGE_DONTBLOCK) { | |||
if (!(flags & PB_USAGE_CPU_WRITE)) { | |||
if (usage & PIPE_TRANSFER_DONTBLOCK) { | |||
if (!(usage & PIPE_TRANSFER_WRITE)) { | |||
/* Mapping for read. | |||
* | |||
* Since we are mapping for read, we don't need to wait | |||
@@ -389,7 +372,7 @@ static void *radeon_bo_map_internal(struct pb_buffer *_buf, | |||
} | |||
} | |||
} else { | |||
if (!(flags & PB_USAGE_CPU_WRITE)) { | |||
if (!(usage & PIPE_TRANSFER_WRITE)) { | |||
/* Mapping for read. | |||
* | |||
* Since we are mapping for read, we don't need to wait | |||
@@ -454,7 +437,7 @@ static void *radeon_bo_map_internal(struct pb_buffer *_buf, | |||
return bo->ptr; | |||
} | |||
static void radeon_bo_unmap_internal(struct pb_buffer *_buf) | |||
static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf) | |||
{ | |||
/* NOP */ | |||
} | |||
@@ -482,8 +465,8 @@ static void radeon_bo_fence(struct pb_buffer *buf, | |||
const struct pb_vtbl radeon_bo_vtbl = { | |||
radeon_bo_destroy, | |||
radeon_bo_map_internal, | |||
radeon_bo_unmap_internal, | |||
NULL, /* never called */ | |||
NULL, /* never called */ | |||
radeon_bo_validate, | |||
radeon_bo_fence, | |||
radeon_bo_get_base_buffer, | |||
@@ -634,13 +617,6 @@ struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws) | |||
return &mgr->base; | |||
} | |||
static void *radeon_bo_map(struct pb_buffer *buf, | |||
struct radeon_winsys_cs *cs, | |||
enum pipe_transfer_usage usage) | |||
{ | |||
return pb_map(buf, get_pb_usage_from_transfer_flags(usage), cs); | |||
} | |||
static unsigned eg_tile_split(unsigned tile_split) | |||
{ | |||
switch (tile_split) { | |||
@@ -909,7 +885,7 @@ void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws) | |||
ws->base.buffer_set_tiling = radeon_bo_set_tiling; | |||
ws->base.buffer_get_tiling = radeon_bo_get_tiling; | |||
ws->base.buffer_map = radeon_bo_map; | |||
ws->base.buffer_unmap = pb_unmap; | |||
ws->base.buffer_unmap = radeon_bo_unmap; | |||
ws->base.buffer_wait = radeon_bo_wait; | |||
ws->base.buffer_is_busy = radeon_bo_is_busy; | |||
ws->base.buffer_create = radeon_winsys_bo_create; |
@@ -158,7 +158,7 @@ struct radeon_winsys { | |||
* \param usage A bitmask of the PIPE_TRANSFER_* flags. | |||
* \return The pointer at the beginning of the buffer. | |||
*/ | |||
void *(*buffer_map)(struct pb_buffer *buf, | |||
void *(*buffer_map)(struct radeon_winsys_cs_handle *buf, | |||
struct radeon_winsys_cs *cs, | |||
enum pipe_transfer_usage usage); | |||
@@ -167,7 +167,7 @@ struct radeon_winsys { | |||
* | |||
* \param buf A winsys buffer object to unmap. | |||
*/ | |||
void (*buffer_unmap)(struct pb_buffer *buf); | |||
void (*buffer_unmap)(struct radeon_winsys_cs_handle *buf); | |||
/** | |||
* Return TRUE if a buffer object is being used by the GPU. |