There's no reason to do that. The buffer being used for rendering is always mapped as unsynchronized.tags/i965-primitive-restart-v2
@@ -1361,8 +1361,10 @@ get_next_slot(struct gen_mipmap_state *ctx) | |||
{ | |||
const unsigned max_slots = 4096 / sizeof ctx->vertices; | |||
if (ctx->vbuf_slot >= max_slots) | |||
util_gen_mipmap_flush( ctx ); | |||
if (ctx->vbuf_slot >= max_slots) { | |||
pipe_resource_reference(&ctx->vbuf, NULL); | |||
ctx->vbuf_slot = 0; | |||
} | |||
if (!ctx->vbuf) { | |||
ctx->vbuf = pipe_buffer_create(ctx->pipe->screen, | |||
@@ -1475,17 +1477,6 @@ util_destroy_gen_mipmap(struct gen_mipmap_state *ctx) | |||
} | |||
/* Release vertex buffer at end of frame to avoid synchronous | |||
* rendering. | |||
*/ | |||
void util_gen_mipmap_flush( struct gen_mipmap_state *ctx ) | |||
{ | |||
pipe_resource_reference(&ctx->vbuf, NULL); | |||
ctx->vbuf_slot = 0; | |||
} | |||
/** | |||
* Generate mipmap images. It's assumed all needed texture memory is | |||
* already allocated. |
@@ -50,12 +50,6 @@ util_create_gen_mipmap(struct pipe_context *pipe, struct cso_context *cso); | |||
extern void | |||
util_destroy_gen_mipmap(struct gen_mipmap_state *ctx); | |||
/* Release vertex buffer at end of frame to avoid synchronous | |||
* rendering. | |||
*/ | |||
extern void | |||
util_gen_mipmap_flush( struct gen_mipmap_state *ctx ); | |||
extern void | |||
util_gen_mipmap(struct gen_mipmap_state *ctx, |
@@ -81,11 +81,7 @@ void st_flush( struct st_context *st, | |||
{ | |||
FLUSH_CURRENT(st->ctx, 0); | |||
/* Release any vertex buffers that might potentially be accessed in | |||
* successive frames: | |||
*/ | |||
st_flush_bitmap(st); | |||
util_gen_mipmap_flush(st->gen_mipmap); | |||
st->pipe->flush( st->pipe, fence ); | |||
} |