Please see the diff for further info. This paves the way for moving user buffer uploads out of drivers and should allow to clean up the mess in u_upload_mgr in the meantime. For now only allowed for buffers on r300 and r600. Acked-by: Christian König <deathsimple@vodafone.de>tags/mesa-8.0-rc1
@@ -68,8 +68,13 @@ i915_get_transfer(struct pipe_context *pipe, | |||
const struct pipe_box *box) | |||
{ | |||
struct i915_context *i915 = i915_context(pipe); | |||
struct pipe_transfer *transfer = util_slab_alloc(&i915->transfer_pool); | |||
struct pipe_transfer *transfer; | |||
if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) { | |||
return NULL; | |||
} | |||
transfer = util_slab_alloc(&i915->transfer_pool); | |||
if (transfer == NULL) | |||
return NULL; | |||
@@ -720,9 +720,14 @@ i915_texture_get_transfer(struct pipe_context *pipe, | |||
{ | |||
struct i915_context *i915 = i915_context(pipe); | |||
struct i915_texture *tex = i915_texture(resource); | |||
struct i915_transfer *transfer = util_slab_alloc(&i915->texture_transfer_pool); | |||
struct i915_transfer *transfer; | |||
boolean use_staging_texture = FALSE; | |||
if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) { | |||
return NULL; | |||
} | |||
transfer = util_slab_alloc(&i915->texture_transfer_pool); | |||
if (transfer == NULL) | |||
return NULL; | |||
@@ -587,6 +587,10 @@ llvmpipe_get_transfer(struct pipe_context *pipe, | |||
assert(resource); | |||
assert(level <= resource->last_level); | |||
if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) { | |||
return NULL; | |||
} | |||
/* | |||
* Transfers, like other pipe operations, must happen in order, so flush the | |||
* context if necessary. |
@@ -172,7 +172,13 @@ nouveau_buffer_transfer_get(struct pipe_context *pipe, | |||
{ | |||
struct nv04_resource *buf = nv04_resource(resource); | |||
struct nouveau_context *nv = nouveau_context(pipe); | |||
struct nouveau_transfer *xfr = CALLOC_STRUCT(nouveau_transfer); | |||
struct nouveau_transfer *xfr; | |||
if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) { | |||
return NULL; | |||
} | |||
xfr = CALLOC_STRUCT(nouveau_transfer); | |||
if (!xfr) | |||
return NULL; | |||
@@ -243,7 +243,7 @@ nv50_miptree_transfer_new(struct pipe_context *pctx, | |||
uint32_t size; | |||
int ret; | |||
if (usage & PIPE_TRANSFER_MAP_DIRECTLY) | |||
if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_MAP_PERMANENTLY)) | |||
return NULL; | |||
tx = CALLOC_STRUCT(nv50_transfer); |
@@ -243,7 +243,7 @@ nvc0_miptree_transfer_new(struct pipe_context *pctx, | |||
uint32_t size; | |||
int ret; | |||
if (usage & PIPE_TRANSFER_MAP_DIRECTLY) | |||
if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_MAP_PERMANENTLY)) | |||
return NULL; | |||
tx = CALLOC_STRUCT(nvc0_transfer); |
@@ -26,6 +26,9 @@ nvfx_transfer_new(struct pipe_context *pipe, | |||
unsigned usage, | |||
const struct pipe_box *box) | |||
{ | |||
if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) { | |||
return NULL; | |||
} | |||
if((usage & (PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_DONTBLOCK)) == PIPE_TRANSFER_DONTBLOCK) | |||
{ | |||
struct nouveau_bo* bo = ((struct nvfx_resource*)pt)->bo; |
@@ -89,6 +89,10 @@ r300_texture_get_transfer(struct pipe_context *ctx, | |||
struct pipe_resource base; | |||
boolean referenced_cs, referenced_hw; | |||
if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_MAP_PERMANENTLY)) { | |||
return NULL; | |||
} | |||
referenced_cs = | |||
r300->rws->cs_is_buffer_referenced(r300->cs, tex->cs_buf); | |||
if (referenced_cs) { |
@@ -630,6 +630,10 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx, | |||
int r; | |||
boolean use_staging_texture = FALSE; | |||
if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) { | |||
return NULL; | |||
} | |||
/* We cannot map a tiled texture directly because the data is | |||
* in a different order, therefore we do detiling using a blit. | |||
* |
@@ -74,6 +74,10 @@ svga_buffer_get_transfer(struct pipe_context *pipe, | |||
struct svga_buffer *sbuf = svga_buffer(resource); | |||
struct pipe_transfer *transfer; | |||
if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) { | |||
return NULL; | |||
} | |||
transfer = CALLOC_STRUCT(pipe_transfer); | |||
if (transfer == NULL) { | |||
return NULL; |
@@ -259,7 +259,7 @@ svga_texture_get_transfer(struct pipe_context *pipe, | |||
unsigned nblocksy = util_format_get_nblocksy(texture->format, box->height); | |||
/* We can't map texture storage directly */ | |||
if (usage & PIPE_TRANSFER_MAP_DIRECTLY) | |||
if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_MAP_PERMANENTLY)) | |||
return NULL; | |||
assert(box->depth == 1); |
@@ -225,6 +225,22 @@ enum pipe_transfer_usage { | |||
*/ | |||
PIPE_TRANSFER_MAP_DIRECTLY = (1 << 2), | |||
/** | |||
* The transfer should map the resource storage directly and the GPU should | |||
* be able to see what the CPU has written. Such a storage may stay mapped | |||
* while issuing draw commands which use it. The only allowed usage is | |||
* non-overlapping writes which are suballocated out of a big buffer. | |||
* The minimum allowed alignment of suballocations is 256 bytes (this is | |||
* a subject to change). | |||
* The flag is intended to be used to avoid mapping and unmapping | |||
* resources repeatedly when doing uploads and draws intermixed. | |||
* | |||
* The driver may return NULL if that isn't possible, and the state | |||
* tracker needs to cope with that and use an alternative path | |||
* without this flag. | |||
*/ | |||
PIPE_TRANSFER_MAP_PERMANENTLY = (1 << 3), | |||
/** | |||
* Discards the memory within the mapped region. | |||
* |