Browse Source

virgl: modify how we handle GL_MAP_FLUSH_EXPLICIT_BIT

Previously, we ignored the the glUnmap(..) operation and
flushed before we flush the cbuf.  Now, let's just flush
the data when we unmap.

Neither method is optimal, for example:

glMapBufferRange(.., 0, 100, GL_MAP_FLUSH_EXPLICIT_BIT)
glFlushMappedBufferRange(.., 25, 30)
glFlushMappedBufferRange(.., 65, 70)

We'll end up flushing 25 --> 70.  Maybe we can fix this later.

v2: Add fixme comment in the code (Elie)

Reviewed-by: Elie Tournier <elie.tournier@collabora.com>
merge-requests/24/head
Gurchetan Singh 6 years ago
parent
commit
db77573d7b

+ 24
- 22
src/gallium/drivers/virgl/virgl_buffer.c View File

@@ -33,7 +33,6 @@ static void virgl_buffer_destroy(struct pipe_screen *screen,
struct virgl_screen *vs = virgl_screen(screen);
struct virgl_buffer *vbuf = virgl_buffer(buf);

util_range_destroy(&vbuf->valid_buffer_range);
vs->vws->resource_unref(vs->vws, vbuf->base.hw_res);
FREE(vbuf);
}
@@ -53,7 +52,7 @@ static void *virgl_buffer_transfer_map(struct pipe_context *ctx,
bool readback;
bool doflushwait = false;

if ((usage & PIPE_TRANSFER_READ) && (vbuf->on_list == TRUE))
if (usage & PIPE_TRANSFER_READ)
doflushwait = true;
else
doflushwait = virgl_res_needs_flush_wait(vctx, &vbuf->base, usage);
@@ -92,13 +91,19 @@ static void virgl_buffer_transfer_unmap(struct pipe_context *ctx,
struct virgl_buffer *vbuf = virgl_buffer(transfer->resource);

if (trans->base.usage & PIPE_TRANSFER_WRITE) {
if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
struct virgl_screen *vs = virgl_screen(ctx->screen);
vctx->num_transfers++;
vs->vws->transfer_put(vs->vws, vbuf->base.hw_res,
&transfer->box, trans->base.stride, trans->l_stride, trans->offset, transfer->level);

struct virgl_screen *vs = virgl_screen(ctx->screen);
if (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
transfer->box.x += trans->range.start;
transfer->box.width = trans->range.end - trans->range.start;
trans->offset = transfer->box.x;
}

vctx->num_transfers++;
vs->vws->transfer_put(vs->vws, vbuf->base.hw_res,
&transfer->box, trans->base.stride,
trans->l_stride, trans->offset,
transfer->level);

}

virgl_resource_destroy_transfer(vctx, trans);
@@ -108,20 +113,19 @@ static void virgl_buffer_transfer_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
const struct pipe_box *box)
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_buffer *vbuf = virgl_buffer(transfer->resource);
struct virgl_transfer *trans = virgl_transfer(transfer);

if (!vbuf->on_list) {
struct pipe_resource *res = NULL;

list_addtail(&vbuf->flush_list, &vctx->to_flush_bufs);
vbuf->on_list = TRUE;
pipe_resource_reference(&res, &vbuf->base.u.b);
}

util_range_add(&vbuf->valid_buffer_range, transfer->box.x + box->x,
transfer->box.x + box->x + box->width);

/*
* FIXME: This is not optimal. For example,
*
* glMapBufferRange(.., 0, 100, GL_MAP_FLUSH_EXPLICIT_BIT)
* glFlushMappedBufferRange(.., 25, 30)
* glFlushMappedBufferRange(.., 65, 70)
*
* We'll end up flushing 25 --> 70.
*/
util_range_add(&trans->range, box->x, box->x + box->width);
vbuf->base.clean = FALSE;
}

@@ -145,7 +149,6 @@ struct pipe_resource *virgl_buffer_create(struct virgl_screen *vs,
buf->base.u.b.screen = &vs->base;
buf->base.u.vtbl = &virgl_buffer_vtbl;
pipe_reference_init(&buf->base.u.b.reference, 1);
util_range_init(&buf->valid_buffer_range);
virgl_resource_layout(&buf->base.u.b, &buf->metadata);

vbind = pipe_to_virgl_bind(template->bind);
@@ -155,6 +158,5 @@ struct pipe_resource *virgl_buffer_create(struct virgl_screen *vs,
template->width0, 1, 1, 1, 0, 0,
buf->metadata.total_size);

util_range_set_empty(&buf->valid_buffer_range);
return &buf->base.u.b;
}

+ 1
- 33
src/gallium/drivers/virgl/virgl_context.c View File

@@ -60,29 +60,6 @@ uint32_t virgl_object_assign_handle(void)
return ++next_handle;
}

static void virgl_buffer_flush(struct virgl_context *vctx,
struct virgl_buffer *vbuf)
{
struct virgl_screen *rs = virgl_screen(vctx->base.screen);
struct pipe_box box;

assert(vbuf->on_list);

box.height = 1;
box.depth = 1;
box.y = 0;
box.z = 0;

box.x = vbuf->valid_buffer_range.start;
box.width = MIN2(vbuf->valid_buffer_range.end - vbuf->valid_buffer_range.start, vbuf->base.u.b.width0);

vctx->num_transfers++;
rs->vws->transfer_put(rs->vws, vbuf->base.hw_res,
&box, 0, 0, box.x, 0);

util_range_set_empty(&vbuf->valid_buffer_range);
}

static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
@@ -774,19 +751,11 @@ static void virgl_flush_from_st(struct pipe_context *ctx,
enum pipe_flush_flags flags)
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_buffer *buf, *tmp;
struct virgl_screen *rs = virgl_screen(ctx->screen);

if (flags & PIPE_FLUSH_FENCE_FD)
vctx->cbuf->needs_out_fence_fd = true;

LIST_FOR_EACH_ENTRY_SAFE(buf, tmp, &vctx->to_flush_bufs, flush_list) {
struct pipe_resource *res = &buf->base.u.b;
virgl_buffer_flush(vctx, buf);
list_del(&buf->flush_list);
buf->on_list = FALSE;
pipe_resource_reference(&res, NULL);

}
virgl_flush_eq(vctx, vctx, fence);

if (vctx->cbuf->in_fence_fd != -1) {
@@ -1329,7 +1298,6 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
virgl_init_query_functions(vctx);
virgl_init_so_functions(vctx);

list_inithead(&vctx->to_flush_bufs);
slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);

vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);

+ 0
- 1
src/gallium/drivers/virgl/virgl_context.h View File

@@ -75,7 +75,6 @@ struct virgl_context {
struct pipe_resource *images[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
int num_transfers;
int num_draws;
struct list_head to_flush_bufs;

struct pipe_resource *atomic_buffers[PIPE_MAX_HW_ATOMIC_BUFFERS];


+ 0
- 13
src/gallium/drivers/virgl/virgl_resource.h View File

@@ -52,19 +52,6 @@ struct virgl_resource {

struct virgl_buffer {
struct virgl_resource base;

struct list_head flush_list;
boolean on_list;

/* The buffer range which is initialized (with a write transfer,
* streamout, DMA, or as a random access target). The rest of
* the buffer is considered invalid and can be mapped unsynchronized.
*
* This allows unsychronized mapping of a buffer range which hasn't
* been used yet. It's for applications which forget to use
* the unsynchronized map flag and expect the driver to figure it out.
*/
struct util_range valid_buffer_range;
struct virgl_resource_metadata metadata;
};


Loading…
Cancel
Save