This should do all the things that MI_FLUSH did, but it can be pipelined so that further rendering isn't blocked on the flush completion unless necessary.tags/mesa_7_7_rc1
@@ -719,15 +719,6 @@ i830_new_batch(struct intel_context *intel) | |||
assert(!intel->no_batch_wrap); | |||
} | |||
static GLuint | |||
i830_flush_cmd(void) | |||
{ | |||
return MI_FLUSH | FLUSH_MAP_CACHE; | |||
} | |||
static void | |||
i830_assert_not_dirty( struct intel_context *intel ) | |||
{ | |||
@@ -753,7 +744,6 @@ i830InitVtbl(struct i830_context *i830) | |||
i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state; | |||
i830->intel.vtbl.set_draw_region = i830_set_draw_region; | |||
i830->intel.vtbl.update_texture_state = i830UpdateTextureState; | |||
i830->intel.vtbl.flush_cmd = i830_flush_cmd; | |||
i830->intel.vtbl.render_start = i830_render_start; | |||
i830->intel.vtbl.render_prevalidate = i830_render_prevalidate; | |||
i830->intel.vtbl.assert_not_dirty = i830_assert_not_dirty; |
@@ -672,12 +672,6 @@ i915_new_batch(struct intel_context *intel) | |||
assert(!intel->no_batch_wrap); | |||
} | |||
static GLuint | |||
i915_flush_cmd(void) | |||
{ | |||
return MI_FLUSH | FLUSH_MAP_CACHE; | |||
} | |||
static void | |||
i915_assert_not_dirty( struct intel_context *intel ) | |||
{ | |||
@@ -699,7 +693,6 @@ i915InitVtbl(struct i915_context *i915) | |||
i915->intel.vtbl.render_prevalidate = i915_render_prevalidate; | |||
i915->intel.vtbl.set_draw_region = i915_set_draw_region; | |||
i915->intel.vtbl.update_texture_state = i915UpdateTextureState; | |||
i915->intel.vtbl.flush_cmd = i915_flush_cmd; | |||
i915->intel.vtbl.assert_not_dirty = i915_assert_not_dirty; | |||
i915->intel.vtbl.finish_batch = intel_finish_vb; | |||
} |
@@ -153,18 +153,14 @@ static void brw_emit_prim(struct brw_context *brw, | |||
* the besides the draw code. | |||
*/ | |||
if (intel->always_flush_cache) { | |||
BEGIN_BATCH(1, IGNORE_CLIPRECTS); | |||
OUT_BATCH(intel->vtbl.flush_cmd()); | |||
ADVANCE_BATCH(); | |||
intel_batchbuffer_emit_mi_flush(intel->batch); | |||
} | |||
if (prim_packet.verts_per_instance) { | |||
intel_batchbuffer_data( brw->intel.batch, &prim_packet, | |||
sizeof(prim_packet), LOOP_CLIPRECTS); | |||
} | |||
if (intel->always_flush_cache) { | |||
BEGIN_BATCH(1, IGNORE_CLIPRECTS); | |||
OUT_BATCH(intel->vtbl.flush_cmd()); | |||
ADVANCE_BATCH(); | |||
intel_batchbuffer_emit_mi_flush(intel->batch); | |||
} | |||
brw->no_batch_wrap = GL_FALSE; |
@@ -175,20 +175,6 @@ static void brw_note_fence( struct intel_context *intel, GLuint fence ) | |||
brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_FENCE; | |||
} | |||
/* called from intelWaitForIdle() and intelFlush() | |||
* | |||
* For now, just flush everything. Could be smarter later. | |||
*/ | |||
static GLuint brw_flush_cmd( void ) | |||
{ | |||
struct brw_mi_flush flush; | |||
flush.opcode = CMD_MI_FLUSH; | |||
flush.pad = 0; | |||
flush.flags = BRW_FLUSH_STATE_CACHE; | |||
return *(GLuint *)&flush; | |||
} | |||
static void brw_invalidate_state( struct intel_context *intel, GLuint new_state ) | |||
{ | |||
/* nothing */ | |||
@@ -209,6 +195,5 @@ void brwInitVtbl( struct brw_context *brw ) | |||
brw->intel.vtbl.finish_batch = brw_finish_batch; | |||
brw->intel.vtbl.destroy = brw_destroy_context; | |||
brw->intel.vtbl.set_draw_region = brw_set_draw_region; | |||
brw->intel.vtbl.flush_cmd = brw_flush_cmd; | |||
brw->intel.vtbl.debug_batch = brw_debug_batch; | |||
} |
@@ -210,10 +210,10 @@ _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file, | |||
fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line, | |||
used); | |||
batch->reserved_space = 0; | |||
/* Emit a flush if the bufmgr doesn't do it for us. */ | |||
if (intel->always_flush_cache || !intel->ttm) { | |||
*(GLuint *) (batch->ptr) = intel->vtbl.flush_cmd(); | |||
batch->ptr += 4; | |||
intel_batchbuffer_emit_mi_flush(batch); | |||
used = batch->ptr - batch->map; | |||
} | |||
@@ -244,6 +244,8 @@ _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file, | |||
if (intel->vtbl.finish_batch) | |||
intel->vtbl.finish_batch(intel); | |||
batch->reserved_space = BATCH_RESERVED; | |||
/* TODO: Just pass the relocation list and dma buffer up to the | |||
* kernel. | |||
*/ | |||
@@ -299,3 +301,31 @@ intel_batchbuffer_data(struct intel_batchbuffer *batch, | |||
__memcpy(batch->ptr, data, bytes); | |||
batch->ptr += bytes; | |||
} | |||
/* Emit a pipelined flush to either flush render and texture cache for | |||
* reading from a FBO-drawn texture, or flush so that frontbuffer | |||
* render appears on the screen in DRI1. | |||
* | |||
* This is also used for the always_flush_cache driconf debug option. | |||
*/ | |||
void | |||
intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch) | |||
{ | |||
struct intel_context *intel = batch->intel; | |||
if (intel->gen >= 4) { | |||
BEGIN_BATCH(4, IGNORE_CLIPRECTS); | |||
OUT_BATCH(_3DSTATE_PIPE_CONTROL | | |||
PIPE_CONTROL_INSTRUCTION_FLUSH | | |||
PIPE_CONTROL_WRITE_FLUSH | | |||
PIPE_CONTROL_NO_WRITE); | |||
OUT_BATCH(0); /* write address */ | |||
OUT_BATCH(0); /* write data */ | |||
OUT_BATCH(0); /* write data */ | |||
ADVANCE_BATCH(); | |||
} else { | |||
BEGIN_BATCH(1, IGNORE_CLIPRECTS); | |||
OUT_BATCH(MI_FLUSH); | |||
ADVANCE_BATCH(); | |||
} | |||
} |
@@ -62,6 +62,7 @@ struct intel_batchbuffer | |||
} emit; | |||
GLuint dirty_state; | |||
GLuint reserved_space; | |||
}; | |||
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context | |||
@@ -95,6 +96,7 @@ GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, | |||
uint32_t read_domains, | |||
uint32_t write_domain, | |||
uint32_t offset); | |||
void intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch); | |||
/* Inline functions - might actually be better off with these | |||
* non-inlined. Certainly better off switching all command packets to | |||
@@ -104,7 +106,7 @@ GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, | |||
static INLINE GLint | |||
intel_batchbuffer_space(struct intel_batchbuffer *batch) | |||
{ | |||
return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map); | |||
return (batch->size - batch->reserved_space) - (batch->ptr - batch->map); | |||
} | |||
@@ -173,12 +175,4 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch, | |||
intel->batch->emit.start_ptr = NULL; \ | |||
} while(0) | |||
static INLINE void | |||
intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch) | |||
{ | |||
intel_batchbuffer_require_space(batch, 4, IGNORE_CLIPRECTS); | |||
intel_batchbuffer_emit_dword(batch, MI_FLUSH); | |||
} | |||
#endif |
@@ -117,8 +117,6 @@ struct intel_context | |||
struct intel_region * depth_region, | |||
GLuint num_regions); | |||
GLuint (*flush_cmd) (void); | |||
void (*reduced_primitive_state) (struct intel_context * intel, | |||
GLenum rprim); | |||