Browse Source

fix handling of textures with a base internal format that does not have all four rgba values set for radeon and r200 (discovered with a modified glean pixelFormats test, noone ever noticed in over 2 years). For radeon, use hw format I8 as previously, and change tex env to make the correct default values appear for both GL_ALPHA and GL_LUMINANCE textures. For r200, which supports GL_LUMINANCE just fine, use the AL88 hw format for GL_ALPHA textures, since it seems like it's probably not worth the effort to fix up the texture environment (certainly complicated in case of ATI_fragment_shader programs).

tags/mesa_6_5_2
Roland Scheidegger 19 years ago
parent
commit
97f47f771a
2 changed files with 30 additions and 11 deletions
  1. 3
    1
      src/mesa/drivers/dri/r200/r200_tex.c
  2. 27
    10
      src/mesa/drivers/dri/radeon/radeon_texstate.c

+ 3
- 1
src/mesa/drivers/dri/r200/r200_tex.c View File

@@ -405,7 +405,9 @@ r200ChooseTextureFormat( GLcontext *ctx, GLint internalFormat,
case GL_ALPHA12:
case GL_ALPHA16:
case GL_COMPRESSED_ALPHA:
return _dri_texformat_a8;
/* can't use a8 format since interpreting hw I8 as a8 would result
in wrong rgb values (same as alpha value instead of 0). */
return _dri_texformat_al88;

case 1:
case GL_LUMINANCE:

+ 27
- 10
src/mesa/drivers/dri/radeon/radeon_texstate.c View File

@@ -523,9 +523,10 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )

/* Set the texture environment state. Isn't this nice and clean?
* The chip will automagically set the texture alpha to 0xff when
* the texture format does not include an alpha component. This
* the texture format does not include an alpha component. This
* reduces the amount of special-casing we have to do, alpha-only
* textures being a notable exception.
* textures being a notable exception. Doesn't work for luminance
* textures realized with I8 and ALPHA_IN_MAP not set neither (on r100).
*/
/* Don't cache these results.
*/
@@ -555,7 +556,10 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
assert(op <= 3);
switch ( srcRGBi ) {
case GL_TEXTURE:
color_arg[i] = radeon_texture_color[op][unit];
if (texUnit->_Current->Image[0][0]->_BaseFormat == GL_ALPHA)
color_arg[i] = radeon_zero_color[op];
else
color_arg[i] = radeon_texture_color[op][unit];
break;
case GL_CONSTANT:
color_arg[i] = radeon_tfactor_color[op];
@@ -574,12 +578,17 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
break;
case GL_TEXTURE0:
case GL_TEXTURE1:
case GL_TEXTURE2:
case GL_TEXTURE2: {
GLuint txunit = srcRGBi - GL_TEXTURE0;
if (ctx->Texture.Unit[txunit]._Current->Image[0][0]->_BaseFormat == GL_ALPHA)
color_arg[i] = radeon_zero_color[op];
else
/* implement ogl 1.4/1.5 core spec here, not specification of
* GL_ARB_texture_env_crossbar (which would require disabling blending
* instead of undefined results when referencing not enabled texunit) */
color_arg[i] = radeon_texture_color[op][srcRGBi - GL_TEXTURE0];
break;
color_arg[i] = radeon_texture_color[op][txunit];
}
break;
default:
return GL_FALSE;
}
@@ -592,7 +601,10 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
assert(op <= 1);
switch ( srcAi ) {
case GL_TEXTURE:
alpha_arg[i] = radeon_texture_alpha[op][unit];
if (texUnit->_Current->Image[0][0]->_BaseFormat == GL_LUMINANCE)
alpha_arg[i] = radeon_zero_alpha[op+1];
else
alpha_arg[i] = radeon_texture_alpha[op][unit];
break;
case GL_CONSTANT:
alpha_arg[i] = radeon_tfactor_alpha[op];
@@ -611,9 +623,14 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
break;
case GL_TEXTURE0:
case GL_TEXTURE1:
case GL_TEXTURE2:
alpha_arg[i] = radeon_texture_alpha[op][srcAi - GL_TEXTURE0];
break;
case GL_TEXTURE2: {
GLuint txunit = srcAi - GL_TEXTURE0;
if (ctx->Texture.Unit[txunit]._Current->Image[0][0]->_BaseFormat == GL_LUMINANCE)
alpha_arg[i] = radeon_zero_alpha[op+1];
else
alpha_arg[i] = radeon_texture_alpha[op][txunit];
}
break;
default:
return GL_FALSE;
}

Loading…
Cancel
Save