Browse Source

r600g: enable thread offloading

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
tags/mesa-8.0-rc1
Marek Olšák 14 years ago
parent
commit
c79e9f0ed5

+ 2
- 3
src/gallium/drivers/r600/r600_pipe.c View File

@@ -117,9 +117,8 @@ static struct r600_fence *r600_create_fence(struct r600_pipe_context *ctx)
}


static void r600_flush(struct pipe_context *ctx,
struct pipe_fence_handle **fence,
unsigned flags)
void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
unsigned flags)
{
struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
struct r600_fence **rfence = (struct r600_fence**)fence;

+ 5
- 0
src/gallium/drivers/r600/r600_pipe.h View File

@@ -273,6 +273,11 @@ struct pipe_resource *r600_buffer_from_handle(struct pipe_screen *screen,
struct winsys_handle *whandle);
void r600_upload_index_buffer(struct r600_pipe_context *rctx, struct r600_drawl *draw);


/* r600_pipe.c */
void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
unsigned flags);

/* r600_query.c */
void r600_init_query_functions(struct r600_pipe_context *rctx);


+ 2
- 2
src/gallium/drivers/r600/r600_texture.c View File

@@ -66,7 +66,7 @@ static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600
rtransfer->staging_texture,
0, &sbox);

ctx->flush(ctx, NULL);
r600_flush(ctx, NULL, RADEON_FLUSH_ASYNC);
}

unsigned r600_texture_get_offset(struct r600_resource_texture *rtex,
@@ -645,7 +645,7 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
if (usage & PIPE_TRANSFER_READ) {
r600_copy_to_staging_texture(ctx, trans);
/* Always referenced in the blit. */
ctx->flush(ctx, NULL);
r600_flush(ctx, NULL, 0);
}
return &trans->transfer;
}

+ 1
- 1
src/gallium/winsys/r600/drm/evergreen_hw_context.c View File

@@ -1156,7 +1156,7 @@ void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *dr

if ((ctx->pm4_dirty_cdwords + ndwords + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
/* need to flush */
r600_context_flush(ctx, 0);
r600_context_flush(ctx, RADEON_FLUSH_ASYNC);
}
/* at that point everythings is flushed and ctx->pm4_cdwords = 0 */
if ((ctx->pm4_dirty_cdwords + ndwords) > ctx->pm4_ndwords) {

+ 0
- 3
src/gallium/winsys/r600/drm/r600_drm.c View File

@@ -249,9 +249,6 @@ struct radeon *radeon_create(struct radeon_winsys *ws)
if (radeon_drm_get_tiling(radeon))
return NULL;

/* XXX disable ioctl thread offloading until the porting is done. */
setenv("RADEON_THREAD", "0", 0);

return radeon;
}


+ 7
- 7
src/gallium/winsys/r600/drm/r600_hw_context.c View File

@@ -938,7 +938,7 @@ void r600_context_flush_all(struct r600_context *ctx, unsigned flush_flags)

if ((ctx->pm4_dirty_cdwords + ndwords + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
/* need to flush */
r600_context_flush(ctx, 0);
r600_context_flush(ctx, RADEON_FLUSH_ASYNC);
}

ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SURFACE_SYNC, 3, ctx->predicate_drawing);
@@ -1436,7 +1436,7 @@ void r600_context_draw(struct r600_context *ctx, const struct r600_draw *draw)

if ((ctx->pm4_dirty_cdwords + ndwords + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
/* need to flush */
r600_context_flush(ctx, 0);
r600_context_flush(ctx, RADEON_FLUSH_ASYNC);
}
/* at that point everythings is flushed and ctx->pm4_cdwords = 0 */
if ((ctx->pm4_dirty_cdwords + ndwords) > ctx->pm4_ndwords) {
@@ -1549,7 +1549,7 @@ void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence_bo,

if ((ctx->pm4_dirty_cdwords + ndwords + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
/* need to flush */
r600_context_flush(ctx, 0);
r600_context_flush(ctx, RADEON_FLUSH_ASYNC);
}

ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
@@ -1611,7 +1611,7 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query)

if ((required_space + ctx->pm4_cdwords) > ctx->pm4_ndwords) {
/* need to flush */
r600_context_flush(ctx, 0);
r600_context_flush(ctx, RADEON_FLUSH_ASYNC);
}

if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
@@ -1622,7 +1622,7 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query)
query->queries_emitted = 1;
} else {
if (++query->queries_emitted > query->buffer_size / query->result_size / 2)
r600_context_flush(ctx, 0);
r600_context_flush(ctx, RADEON_FLUSH_ASYNC);
}
}

@@ -1714,7 +1714,7 @@ void r600_query_predication(struct r600_context *ctx, struct r600_query *query,
{
if (operation == PREDICATION_OP_CLEAR) {
if (ctx->pm4_cdwords + 3 > ctx->pm4_ndwords)
r600_context_flush(ctx, 0);
r600_context_flush(ctx, RADEON_FLUSH_ASYNC);

ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
ctx->pm4[ctx->pm4_cdwords++] = 0;
@@ -1730,7 +1730,7 @@ void r600_query_predication(struct r600_context *ctx, struct r600_query *query,
count /= query->result_size;

if (ctx->pm4_cdwords + 5 * count > ctx->pm4_ndwords)
r600_context_flush(ctx, 0);
r600_context_flush(ctx, RADEON_FLUSH_ASYNC);

op = PRED_OP(operation) | PREDICATION_DRAW_VISIBLE |
(flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW);

Loading…
Cancel
Save