Browse Source

i965: Un-virtualize brw_finish_batch().

Since the i915/i965 split, there's only one implementation of this
virtual function.  We may as well just call it directly.

Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Eric Anholt <eric@anholt.net>
tags/mesa-10.1-devel
Kenneth Graunke 12 years ago
parent
commit
6613f346ac

+ 0
- 1
src/mesa/drivers/dri/i965/brw_context.h View File

@@ -923,7 +923,6 @@ struct brw_context
struct
{
void (*destroy) (struct brw_context * brw);
void (*finish_batch) (struct brw_context * brw);
void (*new_batch) (struct brw_context * brw);

void (*update_texture_surface)(struct gl_context *ctx,

+ 0
- 22
src/mesa/drivers/dri/i965/brw_vtbl.c View File

@@ -90,27 +90,6 @@ brw_destroy_context(struct brw_context *brw)
drm_intel_gem_context_destroy(brw->hw_ctx);
}

/**
* called from intel_batchbuffer_flush and children before sending a
* batchbuffer off.
*
* Note that ALL state emitted here must fit in the reserved space
* at the end of a batchbuffer. If you add more GPU state, increase
* the BATCH_RESERVED macro.
*/
static void
brw_finish_batch(struct brw_context *brw)
{
brw_emit_query_end(brw);

if (brw->curbe.curbe_bo) {
drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
drm_intel_bo_unreference(brw->curbe.curbe_bo);
brw->curbe.curbe_bo = NULL;
}
}


/**
* called from intelFlushBatchLocked
*/
@@ -155,7 +134,6 @@ brw_new_batch(struct brw_context *brw)
void brwInitVtbl( struct brw_context *brw )
{
brw->vtbl.new_batch = brw_new_batch;
brw->vtbl.finish_batch = brw_finish_batch;
brw->vtbl.destroy = brw_destroy_context;

assert(brw->gen >= 4);

+ 22
- 2
src/mesa/drivers/dri/i965/intel_batchbuffer.c View File

@@ -172,6 +172,27 @@ do_batch_dump(struct brw_context *brw)
}
}

/**
* Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and
* sending it off.
*
* This function can emit state (say, to preserve registers that aren't saved
* between batches). All of this state MUST fit in the reserved space at the
* end of the batchbuffer. If you add more GPU state, increase the reserved
* space by updating the BATCH_RESERVED macro.
*/
static void
brw_finish_batch(struct brw_context *brw)
{
brw_emit_query_end(brw);

if (brw->curbe.curbe_bo) {
drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
drm_intel_bo_unreference(brw->curbe.curbe_bo);
brw->curbe.curbe_bo = NULL;
}
}

/* TODO: Push this whole function into bufmgr.
*/
static int
@@ -256,8 +277,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,

brw->batch.reserved_space = 0;

if (brw->vtbl.finish_batch)
brw->vtbl.finish_batch(brw);
brw_finish_batch(brw);

/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);

Loading…
Cancel
Save