This will be used by common code in the next commit. Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>tags/mesa-10.1-rc1
@@ -91,7 +91,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, | |||
/* Check if mapping this buffer would cause waiting for the GPU. */ | |||
if (r600_rings_is_buffer_referenced(&rctx->b, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || | |||
rctx->b.ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { | |||
r600_invalidate_buffer(&rctx->b.b, &rbuffer->b.b); | |||
rctx->b.invalidate_buffer(&rctx->b.b, &rbuffer->b.b); | |||
} | |||
} | |||
else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) && |
@@ -717,7 +717,6 @@ unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format, | |||
uint32_t r600_translate_texformat(struct pipe_screen *screen, enum pipe_format format, | |||
const unsigned char *swizzle_view, | |||
uint32_t *word4_p, uint32_t *yuv_format_p); | |||
void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf); | |||
/* r600_uvd.c */ | |||
struct pipe_video_codec *r600_uvd_create_decoder(struct pipe_context *context, |
@@ -2072,7 +2072,7 @@ out_unknown: | |||
return ~0; | |||
} | |||
void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf) | |||
static void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf) | |||
{ | |||
struct r600_context *rctx = (struct r600_context*)ctx; | |||
struct r600_resource *rbuffer = r600_resource(buf); | |||
@@ -2162,6 +2162,7 @@ void r600_init_common_state_functions(struct r600_context *rctx) | |||
rctx->b.b.create_surface = r600_create_surface; | |||
rctx->b.b.surface_destroy = r600_surface_destroy; | |||
rctx->b.b.draw_vbo = r600_draw_vbo; | |||
rctx->b.invalidate_buffer = r600_invalidate_buffer; | |||
} | |||
void r600_trace_emit(struct r600_context *rctx) |
@@ -256,6 +256,10 @@ struct r600_common_context { | |||
unsigned first_level, unsigned last_level, | |||
unsigned first_layer, unsigned last_layer, | |||
unsigned first_sample, unsigned last_sample); | |||
/* Reallocate the buffer and update all resource bindings where | |||
* the buffer is bound, including all resource descriptors. */ | |||
void (*invalidate_buffer)(struct pipe_context *ctx, struct pipe_resource *buf); | |||
}; | |||
/* r600_buffer.c */ |
@@ -63,7 +63,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, | |||
/* Check if mapping this buffer would cause waiting for the GPU. */ | |||
if (r600_rings_is_buffer_referenced(&rctx->b, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || | |||
rctx->b.ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { | |||
si_invalidate_buffer(&rctx->b.b, &rbuffer->b.b); | |||
rctx->b.invalidate_buffer(&rctx->b.b, &rbuffer->b.b); | |||
} | |||
} | |||
@@ -551,7 +551,7 @@ static void si_desc_reset_buffer_offset(struct pipe_context *ctx, | |||
* idle by discarding its contents. Apps usually tell us when to do this using | |||
* map_buffer flags, for example. | |||
*/ | |||
void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf) | |||
static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf) | |||
{ | |||
struct r600_context *rctx = (struct r600_context*)ctx; | |||
struct r600_resource *rbuffer = r600_resource(buf); | |||
@@ -811,6 +811,7 @@ void si_init_all_descriptors(struct r600_context *rctx) | |||
rctx->b.b.set_constant_buffer = si_set_constant_buffer; | |||
rctx->b.b.set_stream_output_targets = si_set_streamout_targets; | |||
rctx->b.clear_buffer = si_clear_buffer; | |||
rctx->b.invalidate_buffer = si_invalidate_buffer; | |||
} | |||
void si_release_all_descriptors(struct r600_context *rctx) |
@@ -200,7 +200,6 @@ void si_all_descriptors_begin_new_cs(struct r600_context *rctx); | |||
void si_copy_buffer(struct r600_context *rctx, | |||
struct pipe_resource *dst, struct pipe_resource *src, | |||
uint64_t dst_offset, uint64_t src_offset, unsigned size); | |||
void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf); | |||
/* si_state.c */ | |||
struct si_pipe_shader_selector; |