Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>tags/mesa-10.1-devel
util_blitter_save_viewport(rctx->blitter, &rctx->queued.named.viewport->viewport); | util_blitter_save_viewport(rctx->blitter, &rctx->queued.named.viewport->viewport); | ||||
} | } | ||||
util_blitter_save_vertex_buffer_slot(rctx->blitter, rctx->vertex_buffer); | util_blitter_save_vertex_buffer_slot(rctx->blitter, rctx->vertex_buffer); | ||||
util_blitter_save_so_targets(rctx->blitter, rctx->num_so_targets, | |||||
(struct pipe_stream_output_target**)rctx->so_targets); | |||||
util_blitter_save_so_targets(rctx->blitter, rctx->b.streamout.num_targets, | |||||
(struct pipe_stream_output_target**)rctx->b.streamout.targets); | |||||
if (op & R600_SAVE_FRAMEBUFFER) | if (op & R600_SAVE_FRAMEBUFFER) | ||||
util_blitter_save_framebuffer(rctx->blitter, &rctx->framebuffer); | util_blitter_save_framebuffer(rctx->blitter, &rctx->framebuffer); |
num_dw += ctx->num_cs_dw_nontimer_queries_suspend; | num_dw += ctx->num_cs_dw_nontimer_queries_suspend; | ||||
/* Count in streamout_end at the end of CS. */ | /* Count in streamout_end at the end of CS. */ | ||||
num_dw += ctx->num_cs_dw_streamout_end; | |||||
if (ctx->b.streamout.begin_emitted) { | |||||
num_dw += ctx->b.streamout.num_dw_for_end; | |||||
} | |||||
/* Count in render_condition(NULL) at the end of CS. */ | /* Count in render_condition(NULL) at the end of CS. */ | ||||
if (ctx->predicate_drawing) { | if (ctx->predicate_drawing) { | ||||
struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; | struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; | ||||
bool queries_suspended = false; | bool queries_suspended = false; | ||||
#if 0 | |||||
bool streamout_suspended = false; | |||||
#endif | |||||
if (!cs->cdw) | if (!cs->cdw) | ||||
return; | return; | ||||
queries_suspended = true; | queries_suspended = true; | ||||
} | } | ||||
#if 0 | |||||
if (ctx->num_cs_dw_streamout_end) { | |||||
r600_context_streamout_end(ctx); | |||||
streamout_suspended = true; | |||||
ctx->b.streamout.suspended = false; | |||||
if (ctx->b.streamout.begin_emitted) { | |||||
r600_emit_streamout_end(&ctx->b); | |||||
ctx->b.streamout.suspended = true; | |||||
} | } | ||||
#endif | |||||
ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB | | ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB | | ||||
R600_CONTEXT_FLUSH_AND_INV_CB_META | | R600_CONTEXT_FLUSH_AND_INV_CB_META | | ||||
si_pm4_emit(ctx, ctx->queued.named.init); | si_pm4_emit(ctx, ctx->queued.named.init); | ||||
ctx->emitted.named.init = ctx->queued.named.init; | ctx->emitted.named.init = ctx->queued.named.init; | ||||
#if 0 | |||||
if (streamout_suspended) { | |||||
ctx->streamout_start = TRUE; | |||||
ctx->streamout_append_bitmask = ~0; | |||||
if (ctx->b.streamout.suspended) { | |||||
ctx->b.streamout.append_bitmask = ctx->b.streamout.enabled_mask; | |||||
r600_streamout_buffers_dirty(&ctx->b); | |||||
} | } | ||||
#endif | |||||
/* resume queries */ | /* resume queries */ | ||||
if (queries_suspended) { | if (queries_suspended) { |
rctx->cache_flush = si_atom_cache_flush; | rctx->cache_flush = si_atom_cache_flush; | ||||
rctx->atoms.cache_flush = &rctx->cache_flush; | rctx->atoms.cache_flush = &rctx->cache_flush; | ||||
rctx->atoms.streamout_begin = &rctx->b.streamout.begin_atom; | |||||
switch (rctx->b.chip_class) { | switch (rctx->b.chip_class) { | ||||
case SI: | case SI: | ||||
case CIK: | case CIK: |
/* Caches must be flushed after resource descriptors are | /* Caches must be flushed after resource descriptors are | ||||
* updated in memory. */ | * updated in memory. */ | ||||
struct r600_atom *cache_flush; | struct r600_atom *cache_flush; | ||||
struct r600_atom *streamout_begin; | |||||
}; | }; | ||||
struct r600_atom *array[0]; | struct r600_atom *array[0]; | ||||
} atoms; | } atoms; | ||||
/* The list of active queries. Only one query of each type can be active. */ | /* The list of active queries. Only one query of each type can be active. */ | ||||
struct list_head active_nontimer_query_list; | struct list_head active_nontimer_query_list; | ||||
unsigned num_cs_dw_nontimer_queries_suspend; | unsigned num_cs_dw_nontimer_queries_suspend; | ||||
unsigned num_cs_dw_streamout_end; | |||||
unsigned backend_mask; | unsigned backend_mask; | ||||
unsigned max_db; /* for OQ */ | unsigned max_db; /* for OQ */ | ||||
boolean predicate_drawing; | boolean predicate_drawing; | ||||
unsigned num_so_targets; | |||||
struct r600_so_target *so_targets[PIPE_MAX_SO_BUFFERS]; | |||||
boolean streamout_start; | |||||
unsigned streamout_append_bitmask; | |||||
unsigned *vs_so_stride_in_dw; | |||||
unsigned *vs_shader_so_strides; | |||||
/* Vertex and index buffers. */ | /* Vertex and index buffers. */ | ||||
bool vertex_buffers_dirty; | bool vertex_buffers_dirty; | ||||
struct pipe_index_buffer index_buffer; | struct pipe_index_buffer index_buffer; |
unsigned cb_shader_mask; | unsigned cb_shader_mask; | ||||
bool cb0_is_integer; | bool cb0_is_integer; | ||||
unsigned sprite_coord_enable; | unsigned sprite_coord_enable; | ||||
unsigned so_strides[4]; | |||||
union si_shader_key key; | union si_shader_key key; | ||||
}; | }; | ||||
rctx->vs_shader = sel; | rctx->vs_shader = sel; | ||||
if (sel && sel->current) | |||||
if (sel && sel->current) { | |||||
si_pm4_bind_state(rctx, vs, sel->current->pm4); | si_pm4_bind_state(rctx, vs, sel->current->pm4); | ||||
else | |||||
rctx->b.streamout.stride_in_dw = sel->so.stride; | |||||
} else { | |||||
si_pm4_bind_state(rctx, vs, rctx->dummy_pixel_shader->pm4); | si_pm4_bind_state(rctx, vs, rctx->dummy_pixel_shader->pm4); | ||||
} | |||||
rctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; | rctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE; | ||||
} | } |
} | } | ||||
} | } | ||||
rctx->vs_shader_so_strides = rctx->vs_shader->current->so_strides; | |||||
if (!si_update_draw_info_state(rctx, info)) | if (!si_update_draw_info_state(rctx, info)) | ||||
return; | return; | ||||
} | } | ||||
#endif | #endif | ||||
#if 0 | |||||
/* Enable stream out if needed. */ | |||||
if (rctx->streamout_start) { | |||||
r600_context_streamout_begin(rctx); | |||||
rctx->streamout_start = FALSE; | |||||
} | |||||
#endif | |||||
/* Set the depth buffer as dirty. */ | /* Set the depth buffer as dirty. */ | ||||
if (rctx->framebuffer.zsbuf) { | if (rctx->framebuffer.zsbuf) { | ||||
struct pipe_surface *surf = rctx->framebuffer.zsbuf; | struct pipe_surface *surf = rctx->framebuffer.zsbuf; |