@@ -18,6 +18,7 @@ | |||
struct _mesa_HashTable; | |||
static int delayed_free( struct bufmgr *bm ); | |||
/* Maximum number of buffers to pass to bmValidateBufferList: | |||
*/ | |||
@@ -96,6 +97,7 @@ static struct block *alloc_from_pool( struct bufmgr *bm, | |||
return NULL; | |||
DBG("alloc_from_pool %d sz 0x%x\n", pool_nr, size); | |||
assert(align >= 7); | |||
block->mem = mmAllocMem(pool->heap, size, align, 0); | |||
if (!block->mem) { | |||
@@ -143,13 +145,17 @@ static struct block *alloc_block( struct bufmgr *bm, | |||
GLuint i; | |||
for (i = 0; i < bm->nr_pools; i++) { | |||
struct block *block; | |||
if (bm->pool[i].flags & BM_NO_ALLOC) | |||
continue; | |||
if ((bm->pool[i].flags & flags & BM_MEM_MASK) == 0) | |||
continue; | |||
return alloc_from_pool(bm, i, size, align); | |||
block = alloc_from_pool(bm, i, size, align); | |||
if (block) | |||
return block; | |||
} | |||
if (flags & BM_MEM_LOCAL) | |||
@@ -161,12 +167,21 @@ static struct block *alloc_block( struct bufmgr *bm, | |||
static int bmAllocMem( struct bufmgr *bm, | |||
struct buffer *buf ) | |||
{ | |||
buf->block = alloc_block(bm, buf->size, 4, buf->flags); | |||
delayed_free(bm); | |||
buf->block = alloc_block(bm, | |||
buf->size, | |||
buf->alignment, | |||
buf->flags); | |||
if (buf->block) | |||
buf->block->buf = buf; | |||
else | |||
_mesa_printf("bmAllocMem failed memflags %x\n", buf->flags & BM_MEM_MASK); | |||
assert(buf->block); | |||
/* Sleep here or fail??? | |||
*/ | |||
/* assert(buf->block); */ | |||
return buf->block != NULL; | |||
} | |||
@@ -178,6 +193,8 @@ static void free_block( struct bufmgr *bm, struct block *block ) | |||
if (!block) | |||
return; | |||
remove_from_list(block); | |||
switch (block->mem_type) { | |||
case BM_MEM_AGP: | |||
case BM_MEM_VRAM: | |||
@@ -187,7 +204,7 @@ static void free_block( struct bufmgr *bm, struct block *block ) | |||
} | |||
else { | |||
block->buf = NULL; | |||
move_to_tail(&block->pool->freed, block); | |||
insert_at_tail(&block->pool->freed, block); | |||
} | |||
break; | |||
@@ -219,6 +236,7 @@ static int delayed_free( struct bufmgr *bm ) | |||
} | |||
} | |||
DBG("%s: %d\n", __FUNCTION__, ret); | |||
return ret; | |||
} | |||
@@ -256,7 +274,11 @@ static int move_buffers( struct bufmgr *bm, | |||
if (flags & BM_NO_UPLOAD) | |||
goto cleanup; | |||
assert(!buffers[i]->mapped); | |||
/* Known issue: this assert will get hit on texture swapping. | |||
* There's not much to do about that at this stage - it's | |||
* tbd. | |||
*/ | |||
assert(!buffers[i]->mapped); | |||
DBG("try to move buffer %d size 0x%x to pools 0x%x\n", | |||
buffers[i]->id, buffers[i]->size, flags & BM_MEM_MASK); | |||
@@ -477,6 +499,7 @@ void bmDeleteBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers) | |||
free(buf); | |||
_mesa_HashRemove(bm->hash, buffers[i]); | |||
} | |||
assert(_mesa_HashLookup(bm->hash, buffers[i]) == NULL); | |||
} | |||
} | |||
@@ -501,7 +524,7 @@ unsigned bmBufferStatic(struct bufmgr *bm, | |||
buf->size = size; | |||
buf->flags = bm->pool[pool].flags; | |||
buf->alignment = 0; | |||
buf->alignment = 12; | |||
buf->block = alloc_from_pool(bm, pool, buf->size, buf->alignment); | |||
if (!buf->block) | |||
return 0; |
@@ -43,6 +43,7 @@ | |||
#include "bufmgr.h" | |||
#include "intel_regions.h" | |||
#include "intel_batchbuffer.h" | |||
/*************************************** | |||
* Mesa's Driver Functions | |||
@@ -172,9 +173,9 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis, | |||
/* Advertise the full hardware capabilities. The new memory | |||
* manager should cope much better with overload situations: | |||
*/ | |||
ctx->Const.MaxTextureLevels = 11; | |||
ctx->Const.Max3DTextureLevels = 8; | |||
ctx->Const.MaxCubeTextureLevels = 11; | |||
ctx->Const.MaxTextureLevels = 12; | |||
ctx->Const.Max3DTextureLevels = 9; | |||
ctx->Const.MaxCubeTextureLevels = 12; | |||
ctx->Const.MaxTextureRectSize = (1<<11); | |||
ctx->Const.MaxTextureUnits = I915_TEX_UNITS; | |||
@@ -259,14 +259,14 @@ static GLboolean i915_update_tex_unit( GLcontext *ctx, | |||
memcpy(i915->state.Tex[unit], state, sizeof(state)); | |||
} | |||
if (INTEL_DEBUG & DEBUG_TEXTURE) { | |||
_mesa_printf("state[I915_TEXREG_SS2] = 0x%x\n", state[I915_TEXREG_SS2]); | |||
_mesa_printf("state[I915_TEXREG_SS3] = 0x%x\n", state[I915_TEXREG_SS3]); | |||
_mesa_printf("state[I915_TEXREG_SS4] = 0x%x\n", state[I915_TEXREG_SS4]); | |||
_mesa_printf("state[I915_TEXREG_MS2] = 0x%x\n", state[I915_TEXREG_MS2]); | |||
_mesa_printf("state[I915_TEXREG_MS3] = 0x%x\n", state[I915_TEXREG_MS3]); | |||
_mesa_printf("state[I915_TEXREG_MS4] = 0x%x\n", state[I915_TEXREG_MS4]); | |||
} | |||
#if 0 | |||
DBG(TEXTURE, "state[I915_TEXREG_SS2] = 0x%x\n", state[I915_TEXREG_SS2]); | |||
DBG(TEXTURE, "state[I915_TEXREG_SS3] = 0x%x\n", state[I915_TEXREG_SS3]); | |||
DBG(TEXTURE, "state[I915_TEXREG_SS4] = 0x%x\n", state[I915_TEXREG_SS4]); | |||
DBG(TEXTURE, "state[I915_TEXREG_MS2] = 0x%x\n", state[I915_TEXREG_MS2]); | |||
DBG(TEXTURE, "state[I915_TEXREG_MS3] = 0x%x\n", state[I915_TEXREG_MS3]); | |||
DBG(TEXTURE, "state[I915_TEXREG_MS4] = 0x%x\n", state[I915_TEXREG_MS4]); | |||
#endif | |||
return GL_TRUE; | |||
} |
@@ -403,15 +403,6 @@ void intelInstallBatchBuffer( struct intel_context *intel ) | |||
BM_READ, | |||
NULL, | |||
&intel->batch.start_offset); | |||
if (0) { | |||
static int foo; | |||
if (foo++ > 10) { | |||
_mesa_printf("foo\n"); | |||
exit(1); | |||
} | |||
} | |||
} | |||
void intelInitBatchBuffer( struct intel_context *intel ) |
@@ -75,6 +75,7 @@ struct intel_mipmap_tree *intel_miptree_create( struct intel_context *intel, | |||
mt->depth0 = depth0; | |||
mt->cpp = cpp; | |||
mt->compressed = compressed; | |||
mt->refcount = 1; | |||
switch (intel->intelScreen->deviceID) { | |||
case PCI_CHIP_I945_G: | |||
@@ -104,19 +105,25 @@ struct intel_mipmap_tree *intel_miptree_create( struct intel_context *intel, | |||
} | |||
struct intel_mipmap_tree *intel_miptree_reference( struct intel_mipmap_tree *mt ) | |||
void intel_miptree_reference( struct intel_mipmap_tree **dst, | |||
struct intel_mipmap_tree *src ) | |||
{ | |||
mt->refcount++; | |||
return mt; | |||
src->refcount++; | |||
*dst = src; | |||
} | |||
void intel_miptree_release( struct intel_context *intel, | |||
struct intel_mipmap_tree *mt ) | |||
struct intel_mipmap_tree **mt ) | |||
{ | |||
if (--mt->refcount) { | |||
intel_region_release(intel, mt->region); | |||
free(mt); | |||
if (!*mt) | |||
return; | |||
DBG("%s %d\n", __FUNCTION__, (*mt)->refcount-1); | |||
if (--(*mt)->refcount == 0) { | |||
intel_region_release(intel, &((*mt)->region)); | |||
free(*mt); | |||
} | |||
*mt = NULL; | |||
} | |||
@@ -107,10 +107,11 @@ struct intel_mipmap_tree *intel_miptree_create( struct intel_context *intel, | |||
GLuint cpp, | |||
GLboolean compressed); | |||
struct intel_mipmap_tree *intel_miptree_reference( struct intel_mipmap_tree * ); | |||
void intel_miptree_reference( struct intel_mipmap_tree **dst, | |||
struct intel_mipmap_tree *src ); | |||
void intel_miptree_release( struct intel_context *intel, | |||
struct intel_mipmap_tree *mt ); | |||
struct intel_mipmap_tree **mt ); | |||
/* Check if an image fits an existing mipmap tree layout | |||
*/ |
@@ -328,6 +328,10 @@ intelTryDrawPixels( GLcontext *ctx, | |||
GLuint cpp = intel->intelScreen->cpp; | |||
GLint size = width * pitch * cpp; | |||
/* XXX: Need to adjust pixels pointer for unpack->skip pixels/rows | |||
* offsets. | |||
*/ | |||
if (INTEL_DEBUG & DEBUG_PIXEL) | |||
fprintf(stderr, "%s\n", __FUNCTION__); | |||
@@ -437,6 +441,7 @@ struct intel_region *intel_readbuf_region( struct intel_context *intel ) | |||
case BUFFER_BACK_LEFT: | |||
return intel->back_region; | |||
default: | |||
assert(0); | |||
return NULL; | |||
} | |||
} |
@@ -85,20 +85,27 @@ struct intel_region *intel_region_alloc( struct intel_context *intel, | |||
return region; | |||
} | |||
struct intel_region *intel_region_reference( struct intel_region *region ) | |||
void intel_region_reference( struct intel_region **dst, | |||
struct intel_region *src) | |||
{ | |||
region->refcount++; | |||
return region; | |||
src->refcount++; | |||
*dst = src; | |||
} | |||
void intel_region_release( struct intel_context *intel, | |||
struct intel_region *region ) | |||
struct intel_region **region ) | |||
{ | |||
if (--region->refcount) { | |||
assert(region->map_refcount == 0); | |||
bmDeleteBuffers(intel->bm, 1, ®ion->buffer); | |||
free(region); | |||
if (!*region) | |||
return; | |||
DBG("%s %d\n", __FUNCTION__, (*region)->refcount-1); | |||
if (--(*region)->refcount == 0) { | |||
assert((*region)->map_refcount == 0); | |||
bmDeleteBuffers(intel->bm, 1, &(*region)->buffer); | |||
free(*region); | |||
} | |||
*region = NULL; | |||
} | |||
@@ -56,9 +56,12 @@ struct intel_region *intel_region_alloc( struct intel_context *intel, | |||
GLuint cpp, | |||
GLuint pitch, | |||
GLuint height ); | |||
struct intel_region *intel_region_reference( struct intel_region *ib ); | |||
void intel_region_reference( struct intel_region **dst, | |||
struct intel_region *src ); | |||
void intel_region_release(struct intel_context *intel, | |||
struct intel_region *ib ); | |||
struct intel_region **ib ); | |||
struct intel_region *intel_region_create_static( struct intel_context *intel, |
@@ -111,7 +111,7 @@ static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv) | |||
intelScreen->front.pitch = gDRIPriv->fbStride; | |||
intelScreen->front.offset = gDRIPriv->fbOffset; | |||
intelScreen->front.map = sPriv->pFB; | |||
intelScreen->front.map = (char *)sPriv->pFB; | |||
intelScreen->back.offset = gDRIPriv->backOffset; | |||
intelScreen->back.pitch = gDRIPriv->backPitch; |
@@ -34,6 +34,7 @@ | |||
#include "intel_screen.h" | |||
#include "intel_context.h" | |||
#include "intel_regions.h" | |||
#include "swrast/swrast.h" | |||
int intel_translate_compare_func( GLenum func ) | |||
@@ -192,17 +193,19 @@ static void intelDrawBuffer(GLcontext *ctx, GLenum mode ) | |||
intelSetFrontClipRects( intel ); | |||
/* if (intel->draw_region) */ | |||
/* intel_region_release(intel, intel->draw_region); */ | |||
if (front) { | |||
intel->drawOffset = screen->front.offset; | |||
/* intel->draw_region = intel_region_reference(intel->front_region); */ | |||
intel->draw_region = intel->front_region; | |||
if (intel->draw_region != intel->front_region) { | |||
intel_region_release(intel, &intel->draw_region); | |||
intel_region_reference(&intel->draw_region, intel->front_region); | |||
} | |||
} else { | |||
intel->drawOffset = screen->back.offset; | |||
/* intel->draw_region = intel_region_reference(intel->back_region); */ | |||
intel->draw_region = intel->back_region; | |||
if (intel->draw_region != intel->back_region) { | |||
intel_region_release(intel, &intel->draw_region); | |||
intel_region_reference(&intel->draw_region, intel->back_region); | |||
} | |||
} | |||
intel->vtbl.set_draw_offset( intel, intel->drawOffset ); |
@@ -112,7 +112,7 @@ static void guess_and_alloc_mipmap_tree( struct intel_context *intel, | |||
lastLevel = firstLevel + MAX2(MAX2(l2width,l2height),l2depth); | |||
} | |||
assert(!intelObj->mt); | |||
intelObj->mt = intel_miptree_create( intel, | |||
intelObj->base.Target, | |||
intelImage->base.InternalFormat, | |||
@@ -207,44 +207,63 @@ static void intelTexImage(GLcontext *ctx, | |||
* Release any old malloced memory. | |||
*/ | |||
if (intelImage->mt) { | |||
intel_miptree_release(intel, intelImage->mt); | |||
intelImage->mt = NULL; | |||
intel_miptree_release(intel, &intelImage->mt); | |||
assert(!texImage->Data); | |||
} | |||
else if (texImage->Data) { | |||
free(texImage->Data); | |||
} | |||
/* XXX: If this is the only texture image in the tree, could call | |||
/* If this is the only texture image in the tree, could call | |||
* bmBufferData with NULL data to free the old block and avoid | |||
* waiting on any outstanding fences. | |||
* | |||
* XXX: Better to do this internally to intel_mipmap_tree.c, | |||
* somehow? | |||
* | |||
* XXX: this hits a malloc/free problem. fixme. | |||
*/ | |||
#if 0 | |||
if (intelObj->mt && | |||
intelObj->mt->first_level == level && | |||
intelObj->mt->last_level == level && | |||
intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB) { | |||
DBG("release it 2\n"); | |||
intel_miptree_release(intel, &intelObj->mt); | |||
} | |||
#endif | |||
if (intelObj->mt && | |||
intelObj->mt->first_level == level && | |||
intelObj->mt->last_level == level && | |||
intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB && | |||
!intel_miptree_match_image(intelObj->mt, &intelImage->base, | |||
intelImage->face, intelImage->level)) { | |||
DBG("release it\n"); | |||
intel_miptree_release(intel, &intelObj->mt); | |||
assert(!intelObj->mt); | |||
} | |||
if (!intelObj->mt) { | |||
guess_and_alloc_mipmap_tree(intel, intelObj, intelImage); | |||
if (!intelObj->mt) | |||
_mesa_printf("guess_and_alloc_mipmap_tree: failed\n"); | |||
} | |||
if (intelObj->mt && | |||
intelObj->mt != intelImage->mt && | |||
intel_miptree_match_image(intelObj->mt, &intelImage->base, | |||
intelImage->face, intelImage->level)) { | |||
if (intelImage->mt) | |||
intel_miptree_release(intel, intelImage->mt); | |||
if (intelImage->mt) { | |||
intel_miptree_release(intel, &intelImage->mt); | |||
} | |||
intelImage->mt = intel_miptree_reference(intelObj->mt); | |||
intel_miptree_reference(&intelImage->mt, intelObj->mt); | |||
assert(intelImage->mt); | |||
} | |||
if (!intelImage->mt) | |||
_mesa_printf("XXX: Image did not fit into tree - storing in local memory!\n"); | |||
/* intelCopyTexImage calls this function with pixels == NULL, with | |||
* the expectation that the mipmap tree will be set up but nothing | |||
@@ -278,7 +297,6 @@ static void intelTexImage(GLcontext *ctx, | |||
sizeInBytes = postConvWidth * postConvHeight * texelBytes; | |||
dstRowStride = postConvWidth * texImage->TexFormat->TexelBytes; | |||
} | |||
texImage->Data = malloc(sizeInBytes); | |||
} | |||
@@ -60,19 +60,19 @@ static void intel_calculate_first_last_level( struct intel_texture_object *intel | |||
} | |||
static void copy_image_data_to_tree( struct intel_context *intel, | |||
struct intel_mipmap_tree *mt, | |||
struct intel_texture_object *intelObj, | |||
struct intel_texture_image *intelImage ) | |||
{ | |||
if (intelImage->mt) { | |||
/* Copy potentially with the blitter: | |||
*/ | |||
intel_miptree_image_copy(intel, | |||
mt, | |||
intelObj->mt, | |||
intelImage->face, | |||
intelImage->level, | |||
intelImage->mt); | |||
intel_miptree_release(intel, intelImage->mt); | |||
intel_miptree_release(intel, &intelImage->mt); | |||
} | |||
else { | |||
assert(intelImage->base.Data != NULL); | |||
@@ -80,7 +80,7 @@ static void copy_image_data_to_tree( struct intel_context *intel, | |||
/* More straightforward upload. | |||
*/ | |||
intel_miptree_image_data(intel, | |||
mt, | |||
intelObj->mt, | |||
intelImage->face, | |||
intelImage->level, | |||
intelImage->base.Data, | |||
@@ -90,7 +90,7 @@ static void copy_image_data_to_tree( struct intel_context *intel, | |||
intelImage->base.Data = NULL; | |||
} | |||
intelImage->mt = intel_miptree_reference(mt); | |||
intel_miptree_reference(&intelImage->mt, intelObj->mt); | |||
} | |||
@@ -118,8 +118,7 @@ static GLuint intel_finalize_mipmap_tree( struct intel_context *intel, GLuint un | |||
*/ | |||
if (firstImage->base.Border) { | |||
if (intelObj->mt) { | |||
intel_miptree_release(intel, intelObj->mt); | |||
intelObj->mt = NULL; | |||
intel_miptree_release(intel, &intelObj->mt); | |||
} | |||
return 0; | |||
} | |||
@@ -134,9 +133,9 @@ static GLuint intel_finalize_mipmap_tree( struct intel_context *intel, GLuint un | |||
firstImage->mt->last_level >= intelObj->lastLevel) { | |||
if (intelObj->mt) | |||
intel_miptree_release(intel, intelObj->mt); | |||
intel_miptree_release(intel, &intelObj->mt); | |||
intelObj->mt = intel_miptree_reference(firstImage->mt); | |||
intel_miptree_reference(&intelObj->mt, firstImage->mt); | |||
} | |||
/* Check tree can hold all active levels. Check tree matches | |||
@@ -152,8 +151,7 @@ static GLuint intel_finalize_mipmap_tree( struct intel_context *intel, GLuint un | |||
((intelObj->mt->first_level > intelObj->firstLevel) || | |||
(intelObj->mt->last_level < intelObj->lastLevel) || | |||
(intelObj->mt->internal_format != firstImage->base.InternalFormat))) { | |||
intel_miptree_release(intel, intelObj->mt); | |||
intelObj->mt = NULL; | |||
intel_miptree_release(intel, &intelObj->mt); | |||
} | |||
@@ -184,7 +182,7 @@ static GLuint intel_finalize_mipmap_tree( struct intel_context *intel, GLuint un | |||
*/ | |||
if (intelObj->mt != intelImage->mt) { | |||
copy_image_data_to_tree(intel, | |||
intelObj->mt, | |||
intelObj, | |||
intelImage); | |||
} | |||
} |