浏览代码

Cope with memory pool fragmentation by allowing a second attempt at

rendering operations to take place after evicting all resident
buffers.

Cope better with memory allocation failures throughout the driver and
improve tracking of failures.
tags/mesa_6_5_1
Keith Whitwell 19 年前
父节点
当前提交
493b2ddecb

+ 52
- 10
src/mesa/drivers/dri/i965/brw_draw.c 查看文件

@@ -265,14 +265,14 @@ static GLboolean check_fallbacks( struct brw_context *brw,
}


GLboolean brw_draw_prims( GLcontext *ctx,
const struct gl_client_array *arrays[],
const struct brw_draw_prim *prim,
GLuint nr_prims,
const struct brw_draw_index_buffer *ib,
GLuint min_index,
GLuint max_index,
GLuint flags )
static GLboolean brw_try_draw_prims( GLcontext *ctx,
const struct gl_client_array *arrays[],
const struct brw_draw_prim *prim,
GLuint nr_prims,
const struct brw_draw_index_buffer *ib,
GLuint min_index,
GLuint max_index,
GLuint flags )
{
struct intel_context *intel = intel_context(ctx);
struct brw_context *brw = brw_context(ctx);
@@ -338,9 +338,12 @@ GLboolean brw_draw_prims( GLcontext *ctx,
* way around this, as not every flush is due to a buffer filling
* up.
*/
intel_batchbuffer_flush( brw->intel.batch );
if (!intel_batchbuffer_flush( brw->intel.batch )) {
DBG("%s intel_batchbuffer_flush failed\n", __FUNCTION__);
retval = GL_FALSE;
}

if (intel->thrashing) {
if (retval && intel->thrashing) {
bmSetFence(intel);
}

@@ -359,9 +362,48 @@ GLboolean brw_draw_prims( GLcontext *ctx,
}

UNLOCK_HARDWARE(intel);

if (!retval)
_mesa_printf("%s failed\n", __FUNCTION__);

return retval;
}


GLboolean brw_draw_prims( GLcontext *ctx,
const struct gl_client_array *arrays[],
const struct brw_draw_prim *prim,
GLuint nr_prims,
const struct brw_draw_index_buffer *ib,
GLuint min_index,
GLuint max_index,
GLuint flags )
{
struct intel_context *intel = intel_context(ctx);
GLboolean retval;

retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index, flags);

if (!retval && bmError(intel)) {

DBG("retrying\n");
/* This looks like out-of-memory but potentially we have
* situation where there is enough memory but it has become
* fragmented. Clear out all heaps and start from scratch by
* faking a contended lock event: (done elsewhere)
*/

/* Then try a second time only to upload textures and draw the
* primitives:
*/
retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index, flags);
}

return retval;
}


static void brw_invalidate_vbo_cb( struct intel_context *intel, void *ptr )
{
/* nothing to do, we don't rely on the contents being preserved */

+ 1
- 0
src/mesa/drivers/dri/i965/brw_state_pool.c 查看文件

@@ -47,6 +47,7 @@ GLboolean brw_pool_alloc( struct brw_mem_pool *pool,
size = (size + 3) & ~3;

if (pool->offset + fixup + size >= pool->size) {
_mesa_printf("%s failed\n", __FUNCTION__);
assert(0);
exit(0);
}

+ 1
- 1
src/mesa/drivers/dri/i965/brw_vs_tnl.c 查看文件

@@ -340,7 +340,7 @@ static struct ureg get_temp( struct tnl_program *p )
int bit = ffs( ~p->temp_in_use );
if (!bit) {
fprintf(stderr, "%s: out of temporaries\n", __FILE__);
abort();
assert(0);
}

if (bit > p->program->Base.NumTemporaries)

+ 7
- 4
src/mesa/drivers/dri/i965/bufmgr.h 查看文件

@@ -118,20 +118,20 @@ void bmBufferSetInvalidateCB(struct intel_context *,
* client would, so flags here is more proscriptive than the usage
* values in the ARB_vbo interface:
*/
void bmBufferData(struct intel_context *,
int bmBufferData(struct intel_context *,
struct buffer *buf,
unsigned size,
const void *data,
unsigned flags );

void bmBufferSubData(struct intel_context *,
int bmBufferSubData(struct intel_context *,
struct buffer *buf,
unsigned offset,
unsigned size,
const void *data );


void bmBufferDataAUB(struct intel_context *,
int bmBufferDataAUB(struct intel_context *,
struct buffer *buf,
unsigned size,
const void *data,
@@ -139,7 +139,7 @@ void bmBufferDataAUB(struct intel_context *,
unsigned aubtype,
unsigned aubsubtype );

void bmBufferSubDataAUB(struct intel_context *,
int bmBufferSubDataAUB(struct intel_context *,
struct buffer *buf,
unsigned offset,
unsigned size,
@@ -183,6 +183,9 @@ int bmValidateBuffers( struct intel_context * );
void bmReleaseBuffers( struct intel_context * );


GLboolean bmError( struct intel_context * );
void bmEvictAll( struct intel_context * );

/* This functionality is used by the buffer manager, not really sure
* if we need to be exposing it in this way, probably libdrm will
* offer equivalent calls.

+ 124
- 32
src/mesa/drivers/dri/i965/bufmgr_fake.c 查看文件

@@ -268,7 +268,7 @@ static void set_dirty( struct intel_context *intel,
}


static int evict_lru( struct intel_context *intel, GLuint max_fence )
static int evict_lru( struct intel_context *intel, GLuint max_fence, GLuint *pool )
{
struct bufmgr *bm = intel->bm;
struct block *block, *tmp;
@@ -292,6 +292,7 @@ static int evict_lru( struct intel_context *intel, GLuint max_fence )
block->buf->block = NULL;

free_block(intel, block);
*pool = i;
return 1;
}
}
@@ -305,7 +306,7 @@ static int evict_lru( struct intel_context *intel, GLuint max_fence )
#define foreach_s_rev(ptr, t, list) \
for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)

static int evict_mru( struct intel_context *intel)
static int evict_mru( struct intel_context *intel, GLuint *pool )
{
struct bufmgr *bm = intel->bm;
struct block *block, *tmp;
@@ -325,6 +326,7 @@ static int evict_mru( struct intel_context *intel)
block->buf->block = NULL;

free_block(intel, block);
*pool = i;
return 1;
}
}
@@ -432,6 +434,8 @@ static GLboolean alloc_block( struct intel_context *intel,
struct bufmgr *bm = intel->bm;
int i;

assert(intel->locked);

DBG("%s 0x%x bytes (%s)\n", __FUNCTION__, buf->size, buf->name);

for (i = 0; i < bm->nr_pools; i++) {
@@ -453,6 +457,7 @@ static GLboolean alloc_block( struct intel_context *intel,
static GLboolean evict_and_alloc_block( struct intel_context *intel,
struct buffer *buf )
{
GLuint pool;
struct bufmgr *bm = intel->bm;

assert(buf->block == NULL);
@@ -478,16 +483,16 @@ static GLboolean evict_and_alloc_block( struct intel_context *intel,

/* Look for memory blocks not used for >1 frame:
*/
while (evict_lru(intel, intel->second_last_swap_fence))
if (alloc_block(intel, buf))
while (evict_lru(intel, intel->second_last_swap_fence, &pool))
if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;

/* If we're not thrashing, allow lru eviction to dig deeper into
* recently used textures. We'll probably be thrashing soon:
*/
if (!intel->thrashing) {
while (evict_lru(intel, 0))
if (alloc_block(intel, buf))
while (evict_lru(intel, 0, &pool))
if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;
}

@@ -514,7 +519,7 @@ static GLboolean evict_and_alloc_block( struct intel_context *intel,
if (!is_empty_list(&bm->on_hardware)) {
bmSetFence(intel);

if (!is_empty_list(&bm->fenced)) {
while (!is_empty_list(&bm->fenced)) {
GLuint fence = bm->fenced.next->fence;
bmFinishFence(intel, fence);
}
@@ -528,10 +533,15 @@ static GLboolean evict_and_alloc_block( struct intel_context *intel,
return GL_TRUE;
}

while (evict_mru(intel))
if (alloc_block(intel, buf))
while (evict_mru(intel, &pool))
if (alloc_from_pool(intel, pool, buf))
return GL_TRUE;

DBG("%s 0x%x bytes failed\n", __FUNCTION__, buf->size);

assert(is_empty_list(&bm->on_hardware));
assert(is_empty_list(&bm->fenced));

return GL_FALSE;
}

@@ -742,13 +752,14 @@ static void wait_quiescent(struct intel_context *intel,
/* If buffer size changes, free and reallocate. Otherwise update in
* place.
*/
void bmBufferData(struct intel_context *intel,
struct buffer *buf,
unsigned size,
const void *data,
unsigned flags )
int bmBufferData(struct intel_context *intel,
struct buffer *buf,
unsigned size,
const void *data,
unsigned flags )
{
struct bufmgr *bm = intel->bm;
int retval = 0;

LOCK(bm);
{
@@ -780,13 +791,19 @@ void bmBufferData(struct intel_context *intel,

buf->size = size;
if (buf->block) {
assert (buf->block->mem->size == size);
assert (buf->block->mem->size >= size);
}

if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
if (data != NULL) {
if (!buf->block && !evict_and_alloc_block(intel, buf))
assert(0);

assert(intel->locked || data == NULL);

if (data != NULL) {
if (!buf->block && !evict_and_alloc_block(intel, buf)) {
bm->fail = 1;
retval = -1;
goto out;
}

wait_quiescent(intel, buf->block);

@@ -810,22 +827,25 @@ void bmBufferData(struct intel_context *intel,
}
}
}
out:
UNLOCK(bm);
return retval;
}


/* Update the buffer in place, in whatever space it is currently resident:
*/
void bmBufferSubData(struct intel_context *intel,
int bmBufferSubData(struct intel_context *intel,
struct buffer *buf,
unsigned offset,
unsigned size,
const void *data )
{
struct bufmgr *bm = intel->bm;
int retval;

if (size == 0)
return;
return 0;

LOCK(bm);
{
@@ -834,8 +854,14 @@ void bmBufferSubData(struct intel_context *intel,
assert(offset+size <= buf->size);

if (buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)) {
if (!buf->block && !evict_and_alloc_block(intel, buf))
assert(0);

assert(intel->locked);

if (!buf->block && !evict_and_alloc_block(intel, buf)) {
bm->fail = 1;
retval = -1;
goto out;
}
if (!(buf->flags & BM_NO_FENCE_SUBDATA))
wait_quiescent(intel, buf->block);
@@ -854,12 +880,14 @@ void bmBufferSubData(struct intel_context *intel,
do_memcpy(buf->backing_store + offset, data, size);
}
}
out:
UNLOCK(bm);
return retval;
}



void bmBufferDataAUB(struct intel_context *intel,
int bmBufferDataAUB(struct intel_context *intel,
struct buffer *buf,
unsigned size,
const void *data,
@@ -867,14 +895,14 @@ void bmBufferDataAUB(struct intel_context *intel,
unsigned aubtype,
unsigned aubsubtype )
{
bmBufferData(intel, buf, size, data, flags);
int retval = bmBufferData(intel, buf, size, data, flags);

/* This only works because in this version of the buffer manager we
* allocate all buffers statically in agp space and so can emit the
* uploads to the aub file with the correct offsets as they happen.
*/
if (data && intel->aub_file) {
if (retval == 0 && data && intel->aub_file) {

if (buf->block && !buf->dirty) {
intel->vtbl.aub_gtt_data(intel,
@@ -886,10 +914,12 @@ void bmBufferDataAUB(struct intel_context *intel,
buf->aub_dirty = 0;
}
}
return retval;
}

void bmBufferSubDataAUB(struct intel_context *intel,
int bmBufferSubDataAUB(struct intel_context *intel,
struct buffer *buf,
unsigned offset,
unsigned size,
@@ -897,7 +927,7 @@ void bmBufferSubDataAUB(struct intel_context *intel,
unsigned aubtype,
unsigned aubsubtype )
{
bmBufferSubData(intel, buf, offset, size, data);
int retval = bmBufferSubData(intel, buf, offset, size, data);

/* This only works because in this version of the buffer manager we
@@ -905,7 +935,7 @@ void bmBufferSubDataAUB(struct intel_context *intel,
* uploads to the aub file with the correct offsets as they happen.
*/
if (intel->aub_file) {
if (buf->block && !buf->dirty)
if (retval == 0 && buf->block && !buf->dirty)
intel->vtbl.aub_gtt_data(intel,
buf->block->mem->ofs + offset,
((const char *)buf->block->virtual) + offset,
@@ -913,6 +943,8 @@ void bmBufferSubDataAUB(struct intel_context *intel,
aubtype,
aubsubtype);
}

return retval;
}

void bmUnmapBufferAUB( struct intel_context *intel,
@@ -1016,8 +1048,12 @@ void *bmMapBuffer( struct intel_context *intel,
retval = NULL;
}
else if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {

assert(intel->locked);

if (!buf->block && !evict_and_alloc_block(intel, buf)) {
_mesa_printf("%s: alloc failed\n", __FUNCTION__);
bm->fail = 1;
retval = NULL;
}
else {
@@ -1116,6 +1152,7 @@ int bmValidateBuffers( struct intel_context *intel )
LOCK(bm);
{
DBG("%s fail %d\n", __FUNCTION__, bm->fail);
assert(intel->locked);

if (!bm->fail) {
struct block *block, *tmp;
@@ -1170,11 +1207,13 @@ int bmValidateBuffers( struct intel_context *intel )
}

retval = !bm->fail;
bm->fail = 0;
assert(is_empty_list(&bm->referenced));
}
UNLOCK(bm);


if (!retval)
_mesa_printf("%s failed\n", __FUNCTION__);

return retval;
}

@@ -1188,6 +1227,7 @@ void bmReleaseBuffers( struct intel_context *intel )
LOCK(bm);
{
struct block *block, *tmp;
assert(intel->locked);

foreach_s (block, tmp, &bm->referenced) {

@@ -1220,8 +1260,6 @@ void bmReleaseBuffers( struct intel_context *intel )

block->referenced = 0;
}

bm->fail = 0;
}
UNLOCK(bm);
}
@@ -1310,13 +1348,54 @@ void bm_fake_NotifyContendedLockTake( struct intel_context *intel )
assert(is_empty_list(&bm->referenced));

bm->need_fence = 1;
bm->fail = 0;
bmFinishFence(intel, bmSetFence(intel));

assert(is_empty_list(&bm->fenced));
assert(is_empty_list(&bm->on_hardware));

for (i = 0; i < bm->nr_pools; i++) {
if (!(bm->pool[i].flags & BM_NO_EVICT)) {
foreach_s(block, tmp, &bm->pool[i].lru) {
assert(bmTestFence(intel, block->fence));
set_dirty(intel, block->buf);
}
}
}
}
UNLOCK(bm);
}



void bmEvictAll( struct intel_context *intel )
{
struct bufmgr *bm = intel->bm;

LOCK(bm);
{
struct block *block, *tmp;
GLuint i;

DBG("%s\n", __FUNCTION__);

assert(is_empty_list(&bm->referenced));

bm->need_fence = 1;
bm->fail = 0;
bmFinishFence(intel, bmSetFence(intel));

assert(is_empty_list(&bm->fenced));
assert(is_empty_list(&bm->on_hardware));

for (i = 0; i < bm->nr_pools; i++) {
if (!(bm->pool[i].flags & BM_NO_EVICT)) {
foreach_s(block, tmp, &bm->pool[i].lru) {
assert(bmTestFence(intel, block->fence));
set_dirty(intel, block->buf);
block->buf->block = NULL;

free_block(intel, block);
}
}
}
@@ -1325,3 +1404,16 @@ void bm_fake_NotifyContendedLockTake( struct intel_context *intel )
}


GLboolean bmError( struct intel_context *intel )
{
struct bufmgr *bm = intel->bm;
GLboolean retval;

LOCK(bm);
{
retval = bm->fail;
}
UNLOCK(bm);

return retval;
}

+ 17
- 6
src/mesa/drivers/dri/i965/intel_batchbuffer.c 查看文件

@@ -62,14 +62,15 @@ static void intel_batchbuffer_reset_cb( struct intel_context *intel,
batch->ptr = NULL;
}

void intel_batchbuffer_map( struct intel_batchbuffer *batch )
GLubyte *intel_batchbuffer_map( struct intel_batchbuffer *batch )
{
if (!batch->map) {
batch->map = bmMapBuffer(batch->intel, batch->buffer,
BM_MEM_AGP|BM_MEM_LOCAL|BM_CLIENT|BM_WRITE);
batch->ptr += (unsigned long)batch->map;
}
assert(batch->map);

return batch->map;
}

void intel_batchbuffer_unmap( struct intel_batchbuffer *batch )
@@ -122,18 +123,19 @@ void intel_batchbuffer_free( struct intel_batchbuffer *batch )
#define MI_BATCH_BUFFER_END (0xA<<23)


void intel_batchbuffer_flush( struct intel_batchbuffer *batch )
GLboolean intel_batchbuffer_flush( struct intel_batchbuffer *batch )
{
struct intel_context *intel = batch->intel;
GLuint used = batch->ptr - (batch->map + batch->offset);
GLuint offset;
GLboolean ignore_cliprects = (batch->flags & INTEL_BATCH_CLIPRECTS) ? GL_FALSE : GL_TRUE;
GLint retval = GL_TRUE;

assert(intel->locked);

if (used == 0) {
bmReleaseBuffers( batch->intel );
return;
return GL_TRUE;
}

/* Throw away non-effective packets.
@@ -148,7 +150,7 @@ void intel_batchbuffer_flush( struct intel_batchbuffer *batch )
sched_yield();
LOCK_HARDWARE(intel);

return;
return GL_TRUE;
}


@@ -177,7 +179,10 @@ void intel_batchbuffer_flush( struct intel_batchbuffer *batch )
offset = bmBufferOffset(batch->intel, batch->buffer);

if (!bmValidateBuffers( batch->intel )) {
assert(0);
assert(intel->locked);
bmReleaseBuffers( batch->intel );
retval = GL_FALSE;
goto out;
}


@@ -208,8 +213,14 @@ void intel_batchbuffer_flush( struct intel_batchbuffer *batch )

/* Reset the buffer:
*/
out:
intel_batchbuffer_reset( batch );
intel_batchbuffer_map( batch );

if (!retval)
DBG("%s failed\n", __FUNCTION__);

return retval;
}



+ 2
- 2
src/mesa/drivers/dri/i965/intel_batchbuffer.h 查看文件

@@ -57,10 +57,10 @@ struct intel_batchbuffer *intel_batchbuffer_alloc( struct intel_context *intel )
void intel_batchbuffer_free( struct intel_batchbuffer *batch );


void intel_batchbuffer_flush( struct intel_batchbuffer *batch );
GLboolean intel_batchbuffer_flush( struct intel_batchbuffer *batch );

void intel_batchbuffer_unmap( struct intel_batchbuffer *batch );
void intel_batchbuffer_map( struct intel_batchbuffer *batch );
GLubyte *intel_batchbuffer_map( struct intel_batchbuffer *batch );


/* Unlike bmBufferData, this currently requires the buffer be mapped.

+ 24
- 18
src/mesa/drivers/dri/i965/intel_context.c 查看文件

@@ -536,6 +536,13 @@ GLboolean intelMakeCurrent(__DRIcontextPrivate *driContextPriv,
return GL_TRUE;
}


static void lost_hardware( struct intel_context *intel )
{
bm_fake_NotifyContendedLockTake( intel );
intel->vtbl.lost_hardware( intel );
}

static void intelContendedLock( struct intel_context *intel, GLuint flags )
{
__DRIdrawablePrivate *dPriv = intel->driDrawable;
@@ -560,16 +567,7 @@ static void intelContendedLock( struct intel_context *intel, GLuint flags )
*/
if (sarea->ctxOwner != me) {
sarea->ctxOwner = me;

/* Should also fence the frontbuffer even if ctxOwner doesn't
* change:
*/
bm_fake_NotifyContendedLockTake( intel );


/*
*/
intel->vtbl.lost_hardware( intel );
lost_hardware(intel);
}

/* Drawable changed?
@@ -606,19 +604,16 @@ void LOCK_HARDWARE( struct intel_context *intel )
intel->locked = 1;

if (intel->aub_wrap) {
/* Should also fence the frontbuffer even if ctxOwner doesn't
* change:
*/
bm_fake_NotifyContendedLockTake( intel );

/*
*/
intel->vtbl.lost_hardware( intel );
intel->vtbl.aub_wrap(intel);

intel->aub_wrap = 0;
}

if (bmError(intel)) {
bmEvictAll(intel);
intel->vtbl.lost_hardware( intel );
}

/* Make sure nothing has been emitted prior to getting the lock:
*/
@@ -626,7 +621,18 @@ void LOCK_HARDWARE( struct intel_context *intel )

/* XXX: postpone, may not be needed:
*/
intel_batchbuffer_map(intel->batch);
if (!intel_batchbuffer_map(intel->batch)) {
bmEvictAll(intel);
intel->vtbl.lost_hardware( intel );

/* This could only fail if the batchbuffer was greater in size
* than the available texture memory:
*/
if (!intel_batchbuffer_map(intel->batch)) {
_mesa_printf("double failure to map batchbuffer\n");
assert(0);
}
}
}

+ 19
- 17
src/mesa/drivers/dri/i965/intel_mipmap_tree.c 查看文件

@@ -214,13 +214,13 @@ GLuint intel_miptree_image_offset(struct intel_mipmap_tree *mt,

/* Upload data for a particular image.
*/
void intel_miptree_image_data(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face,
GLuint level,
const void *src,
GLuint src_row_pitch,
GLuint src_image_pitch)
GLboolean intel_miptree_image_data(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face,
GLuint level,
const void *src,
GLuint src_row_pitch,
GLuint src_image_pitch)
{
GLuint depth = dst->level[level].depth;
GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
@@ -229,17 +229,19 @@ void intel_miptree_image_data(struct intel_context *intel,

DBG("%s\n", __FUNCTION__);
for (i = 0; i < depth; i++) {
intel_region_data(intel,
dst->region,
dst_offset + dst_depth_offset[i],
0,
0,
src,
src_row_pitch,
0, 0, /* source x,y */
dst->level[level].width,
dst->level[level].height);
if (!intel_region_data(intel,
dst->region,
dst_offset + dst_depth_offset[i],
0,
0,
src,
src_row_pitch,
0, 0, /* source x,y */
dst->level[level].width,
dst->level[level].height))
return GL_FALSE;
src += src_image_pitch;
}
return GL_TRUE;
}


+ 7
- 7
src/mesa/drivers/dri/i965/intel_mipmap_tree.h 查看文件

@@ -147,13 +147,13 @@ void intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,

/* Upload an image into a tree
*/
void intel_miptree_image_data(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face,
GLuint level,
const void *src,
GLuint src_row_pitch,
GLuint src_image_pitch);
GLboolean intel_miptree_image_data(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face,
GLuint level,
const void *src,
GLuint src_row_pitch,
GLuint src_image_pitch);

/* i915_mipmap_tree.c:
*/

+ 34
- 29
src/mesa/drivers/dri/i965/intel_regions.c 查看文件

@@ -52,6 +52,8 @@ GLubyte *intel_region_map(struct intel_context *intel, struct intel_region *regi
DBG("%s\n", __FUNCTION__);
if (!region->map_refcount++) {
region->map = bmMapBuffer(intel, region->buffer, 0);
if (!region->map)
region->map_refcount--;
}

return region->map;
@@ -198,15 +200,14 @@ void _mesa_copy_rect( GLubyte *dst,
*
* Currently always memcpy.
*/
void intel_region_data(struct intel_context *intel,
struct intel_region *dst,
GLuint dst_offset,
GLuint dstx, GLuint dsty,
const void *src, GLuint src_pitch,
GLuint srcx, GLuint srcy,
GLuint width, GLuint height)
GLboolean intel_region_data(struct intel_context *intel,
struct intel_region *dst,
GLuint dst_offset,
GLuint dstx, GLuint dsty,
const void *src, GLuint src_pitch,
GLuint srcx, GLuint srcy,
GLuint width, GLuint height)
{
DBG("%s\n", __FUNCTION__);

if (width == dst->pitch &&
@@ -216,29 +217,33 @@ void intel_region_data(struct intel_context *intel,
srcx == 0 &&
srcy == 0)
{
bmBufferDataAUB(intel,
dst->buffer,
dst->cpp * width * dst->height,
src,
0,
0, /* DW_NOTYPE */
0);
return (bmBufferDataAUB(intel,
dst->buffer,
dst->cpp * width * dst->height,
src, 0, 0, 0) == 0);
}
else {
assert (dst_offset + dstx + width +
(dsty + height - 1) * dst->pitch * dst->cpp <=
dst->pitch * dst->cpp * dst->height);

_mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
dst->cpp,
dst->pitch,
dstx, dsty,
width, height,
src,
src_pitch,
srcx, srcy);

intel_region_unmap(intel, dst);
GLubyte *map = intel_region_map(intel, dst);

if (map) {
assert (dst_offset + dstx + width +
(dsty + height - 1) * dst->pitch * dst->cpp <=
dst->pitch * dst->cpp * dst->height);
_mesa_copy_rect(map + dst_offset,
dst->cpp,
dst->pitch,
dstx, dsty,
width, height,
src,
src_pitch,
srcx, srcy);
intel_region_unmap(intel, dst);
return GL_TRUE;
}
else
return GL_FALSE;
}
}

+ 7
- 7
src/mesa/drivers/dri/i965/intel_regions.h 查看文件

@@ -91,13 +91,13 @@ void intel_region_unmap(struct intel_context *intel,

/* Upload data to a rectangular sub-region
*/
void intel_region_data(struct intel_context *intel,
struct intel_region *dest,
GLuint dest_offset,
GLuint destx, GLuint desty,
const void *src, GLuint src_stride,
GLuint srcx, GLuint srcy,
GLuint width, GLuint height);
GLboolean intel_region_data(struct intel_context *intel,
struct intel_region *dest,
GLuint dest_offset,
GLuint destx, GLuint desty,
const void *src, GLuint src_stride,
GLuint srcx, GLuint srcy,
GLuint width, GLuint height);
/* Copy rectangular sub-regions
*/

+ 26
- 21
src/mesa/drivers/dri/i965/intel_tex_validate.c 查看文件

@@ -89,21 +89,21 @@ static void intel_calculate_first_last_level( struct intel_texture_object *intel
intelObj->lastLevel = lastLevel;
}

static void copy_image_data_to_tree( struct intel_context *intel,
struct intel_texture_object *intelObj,
struct gl_texture_image *texImage,
GLuint face,
GLuint level)
static GLboolean copy_image_data_to_tree( struct intel_context *intel,
struct intel_texture_object *intelObj,
struct gl_texture_image *texImage,
GLuint face,
GLuint level)
{
intel_miptree_image_data(intel,
intelObj->mt,
face,
level,
texImage->Data,
texImage->RowStride,
(texImage->RowStride *
texImage->Height *
texImage->TexFormat->TexelBytes));
return intel_miptree_image_data(intel,
intelObj->mt,
face,
level,
texImage->Data,
texImage->RowStride,
(texImage->RowStride *
texImage->Height *
texImage->TexFormat->TexelBytes));
}

static void intel_texture_invalidate( struct intel_texture_object *intelObj )
@@ -129,7 +129,6 @@ GLuint intel_finalize_mipmap_tree( struct intel_context *intel,
struct gl_texture_object *tObj )
{
struct intel_texture_object *intelObj = intel_texture_object(tObj);

GLuint face, i;
GLuint nr_faces = 0;
struct gl_texture_image *firstImage;
@@ -226,18 +225,24 @@ GLuint intel_finalize_mipmap_tree( struct intel_context *intel,
i,
texImage->Data);

copy_image_data_to_tree(intel,
intelObj,
texImage,
face,
i);
if (!copy_image_data_to_tree(intel,
intelObj,
texImage,
face,
i))
return GL_FALSE;

}
}
intelObj->dirty_images[face] = 0;
}
}

/* Only clear the dirty flags if everything went ok:
*/
for (face = 0; face < nr_faces; face++) {
intelObj->dirty_images[face] = 0;
}

intelObj->dirty = 0;
}


正在加载...
取消
保存