Browse Source

vmware/core: Update for pipebuffer changes.

In particular, delay patching GMR relocations until the buffers are
validated, since the buffers relative offset can only be established then.
tags/mesa-7.7-2
José Fonseca 15 years ago
parent
commit
383f460cf7

+ 66
- 12
src/gallium/winsys/drm/vmware/core/vmw_context.c View File



#define VMW_COMMAND_SIZE (64*1024) #define VMW_COMMAND_SIZE (64*1024)
#define VMW_SURFACE_RELOCS (1024) #define VMW_SURFACE_RELOCS (1024)
#define VMW_REGION_RELOCS (512)


#define VMW_MUST_FLUSH_STACK 8 #define VMW_MUST_FLUSH_STACK 8


struct vmw_region_relocation
{
struct SVGAGuestPtr *where;
struct pb_buffer *buffer;
/* TODO: put offset info inside where */
uint32 offset;
};

struct vmw_svga_winsys_context struct vmw_svga_winsys_context
{ {
struct svga_winsys_context base; struct svga_winsys_context base;
uint32_t staged; uint32_t staged;
uint32_t reserved; uint32_t reserved;
} surface; } surface;
struct {
struct vmw_region_relocation relocs[VMW_REGION_RELOCS];
uint32_t size;
uint32_t used;
uint32_t staged;
uint32_t reserved;
} region;


struct pb_validate *validate; struct pb_validate *validate;


ret = pb_validate_validate(vswc->validate); ret = pb_validate_validate(vswc->validate);
assert(ret == PIPE_OK); assert(ret == PIPE_OK);
if(ret == PIPE_OK) { if(ret == PIPE_OK) {
/* Apply relocations */
for(i = 0; i < vswc->region.used; ++i) {
struct vmw_region_relocation *reloc = &vswc->region.relocs[i];
struct SVGAGuestPtr ptr;

if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr))
assert(0);

ptr.offset += reloc->offset;

*reloc->where = ptr;
}


if (vswc->command.used) if (vswc->command.used)
vmw_ioctl_command(vswc->vws, vmw_ioctl_command(vswc->vws,
vswc->surface.used = 0; vswc->surface.used = 0;
vswc->surface.reserved = 0; vswc->surface.reserved = 0;


for(i = 0; i < vswc->region.used + vswc->region.staged; ++i) {
pb_reference(&vswc->region.relocs[i].buffer, NULL);
}

vswc->region.used = 0;
vswc->region.reserved = 0;

#ifdef DEBUG #ifdef DEBUG
vswc->must_flush = FALSE; vswc->must_flush = FALSE;
#endif #endif


if(vswc->preemptive_flush || if(vswc->preemptive_flush ||
vswc->command.used + nr_bytes > vswc->command.size || vswc->command.used + nr_bytes > vswc->command.size ||
vswc->surface.used + nr_relocs > vswc->surface.size) {
vswc->surface.used + nr_relocs > vswc->surface.size ||
vswc->region.used + nr_relocs > vswc->region.size) {
#ifdef DEBUG #ifdef DEBUG
vswc->must_flush = TRUE; vswc->must_flush = TRUE;
debug_backtrace_capture(vswc->must_flush_stack, 1, debug_backtrace_capture(vswc->must_flush_stack, 1,


assert(vswc->command.used + nr_bytes <= vswc->command.size); assert(vswc->command.used + nr_bytes <= vswc->command.size);
assert(vswc->surface.used + nr_relocs <= vswc->surface.size); assert(vswc->surface.used + nr_relocs <= vswc->surface.size);

assert(vswc->region.used + nr_relocs <= vswc->region.size);
vswc->command.reserved = nr_bytes; vswc->command.reserved = nr_bytes;
vswc->surface.reserved = nr_relocs; vswc->surface.reserved = nr_relocs;
vswc->surface.staged = 0; vswc->surface.staged = 0;

vswc->region.reserved = nr_relocs;
vswc->region.staged = 0;
return vswc->command.buffer + vswc->command.used; return vswc->command.buffer + vswc->command.used;
} }


unsigned flags) unsigned flags)
{ {
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
struct SVGAGuestPtr ptr;
struct pb_buffer *buf = vmw_pb_buffer(buffer);
struct vmw_region_relocation *reloc;
enum pipe_error ret; enum pipe_error ret;
assert(vswc->region.staged < vswc->region.reserved);


if(!vmw_gmr_bufmgr_region_ptr(buf, &ptr))
assert(0);
reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
reloc->where = where;
pb_reference(&reloc->buffer, vmw_pb_buffer(buffer));
reloc->offset = offset;


ptr.offset += offset;
++vswc->region.staged;


*where = ptr;

ret = pb_validate_add_buffer(vswc->validate, buf, flags);
ret = pb_validate_add_buffer(vswc->validate, reloc->buffer, flags);
/* TODO: Update pipebuffer to reserve buffers and not fail here */ /* TODO: Update pipebuffer to reserve buffers and not fail here */
assert(ret == PIPE_OK); assert(ret == PIPE_OK);


* SVGA virtual device it's not a performance issue since flushing commands * SVGA virtual device it's not a performance issue since flushing commands
* to the FIFO won't cause flushing in the host. * to the FIFO won't cause flushing in the host.
*/ */
vswc->seen_regions += buf->base.size;
vswc->seen_regions += reloc->buffer->base.size;
if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/2) if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/2)
vswc->preemptive_flush = TRUE; vswc->preemptive_flush = TRUE;
} }
vswc->surface.used += vswc->surface.staged; vswc->surface.used += vswc->surface.staged;
vswc->surface.staged = 0; vswc->surface.staged = 0;
vswc->surface.reserved = 0; vswc->surface.reserved = 0;

assert(vswc->region.staged <= vswc->region.reserved);
assert(vswc->region.used + vswc->region.staged <= vswc->region.size);
vswc->region.used += vswc->region.staged;
vswc->region.staged = 0;
vswc->region.reserved = 0;
} }




{ {
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
unsigned i; unsigned i;

for(i = 0; i < vswc->region.used; ++i) {
pb_reference(&vswc->region.relocs[i].buffer, NULL);
}

for(i = 0; i < vswc->surface.used; ++i) { for(i = 0; i < vswc->surface.used; ++i) {
p_atomic_dec(&vswc->surface.handles[i]->validated); p_atomic_dec(&vswc->surface.handles[i]->validated);
vmw_svga_winsys_surface_reference(&vswc->surface.handles[i], NULL); vmw_svga_winsys_surface_reference(&vswc->surface.handles[i], NULL);


vswc->command.size = VMW_COMMAND_SIZE; vswc->command.size = VMW_COMMAND_SIZE;
vswc->surface.size = VMW_SURFACE_RELOCS; vswc->surface.size = VMW_SURFACE_RELOCS;
vswc->region.size = VMW_REGION_RELOCS;


vswc->validate = pb_validate_create(); vswc->validate = pb_validate_create();
if(!vswc->validate) { if(!vswc->validate) {

+ 18
- 2
src/gallium/winsys/drm/vmware/core/vmw_screen_pools.c View File

if(!vws->pools.gmr_mm) if(!vws->pools.gmr_mm)
goto error; goto error;


/*
* GMR buffers are typically shortlived, but it's possible that at a given
* instance a buffer is mapped. So to avoid stalling we tell pipebuffer to
* forbid creation of buffers beyond half the GMR pool size,
*
* XXX: It is unclear weather we want to limit the total amount of temporary
* malloc memory used to backup unvalidated GMR buffers. On one hand it is
* preferrable to fail an allocation than exhausting the guest memory with
* temporary data, but on the other hand it is possible that a stupid
* application creates large vertex buffers and does not use them for a long
* time -- since the svga pipe driver only emits the DMA uploads when a
* buffer is used for drawing this would effectively disabling swapping GMR
* buffers to memory. So far, the preemptively flush already seems to keep
* total allocated memory within relatively small numbers, so we don't
* limit.
*/
vws->pools.gmr_fenced = fenced_bufmgr_create( vws->pools.gmr_fenced = fenced_bufmgr_create(
vws->pools.gmr_mm, vws->pools.gmr_mm,
vmw_fence_ops_create(vws), vmw_fence_ops_create(vws),
0,
0);
VMW_GMR_POOL_SIZE/2,
~0);


#ifdef DEBUG #ifdef DEBUG
vws->pools.gmr_fenced = pb_debug_manager_create(vws->pools.gmr_fenced, vws->pools.gmr_fenced = pb_debug_manager_create(vws->pools.gmr_fenced,

Loading…
Cancel
Save