It's past time, and it was going to get in the way of the renderbuffer mapping refactor. We dropped all the other DRI1 drivers for this release, and I can't imagine anybody supporting DRI1 radeon classic in a new release of Mesa. Diff produced by treating kernel_mm as true, deleting the DRI1 paths that produce kernel_mm false, and deleting code.tags/mesa-8.0-rc1
@@ -893,12 +893,6 @@ struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *sc | |||
return (struct radeon_bo_manager*)bom; | |||
} | |||
void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom) | |||
{ | |||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom; | |||
DRI_AGE_TEXTURES(boml->texture_heap); | |||
} | |||
unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo) | |||
{ | |||
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo; |
@@ -62,8 +62,6 @@ void rcommonBeginBatch(radeonContextPtr rmesa, | |||
radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, __offset); \ | |||
radeon_cs_write_reloc(b_l_rmesa->cmdbuf.cs, \ | |||
bo, rd, wd, flags); \ | |||
if (!b_l_rmesa->radeonScreen->kernel_mm) \ | |||
b_l_rmesa->cmdbuf.cs->section_cdw += 2; \ | |||
} while(0) | |||
@@ -231,18 +231,6 @@ void radeonUpdateScissor( struct gl_context *ctx ) | |||
y2 = y + h - 1; | |||
} | |||
if (!rmesa->radeonScreen->kernel_mm) { | |||
/* Fix scissors for dri 1 */ | |||
__DRIdrawable *dPriv = radeon_get_drawable(rmesa); | |||
x1 += dPriv->x; | |||
x2 += dPriv->x + 1; | |||
min_x += dPriv->x; | |||
max_x += dPriv->x + 1; | |||
y1 += dPriv->y; | |||
y2 += dPriv->y + 1; | |||
min_y += dPriv->y; | |||
max_y += dPriv->y + 1; | |||
} | |||
rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x); | |||
rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y); | |||
@@ -389,15 +377,6 @@ void radeonWaitForIdleLocked(radeonContextPtr radeon) | |||
} | |||
} | |||
static void radeonWaitForIdle(radeonContextPtr radeon) | |||
{ | |||
if (!radeon->radeonScreen->driScreen->dri2.enabled) { | |||
LOCK_HARDWARE(radeon); | |||
radeonWaitForIdleLocked(radeon); | |||
UNLOCK_HARDWARE(radeon); | |||
} | |||
} | |||
static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb) | |||
{ | |||
int current_page = rfb->pf_current_page; | |||
@@ -913,34 +892,6 @@ void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GL | |||
ctx->Driver.Viewport = old_viewport; | |||
} | |||
static void radeon_print_state_atom_prekmm(radeonContextPtr radeon, struct radeon_state_atom *state) | |||
{ | |||
int i, j, reg; | |||
int dwords = (*state->check) (radeon->glCtx, state); | |||
drm_r300_cmd_header_t cmd; | |||
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size); | |||
if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) { | |||
if (dwords > state->cmd_size) | |||
dwords = state->cmd_size; | |||
for (i = 0; i < dwords;) { | |||
cmd = *((drm_r300_cmd_header_t *) &state->cmd[i]); | |||
reg = (cmd.packet0.reghi << 8) | cmd.packet0.reglo; | |||
fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n", | |||
state->name, i, reg, cmd.packet0.count); | |||
++i; | |||
for (j = 0; j < cmd.packet0.count && i < dwords; j++) { | |||
fprintf(stderr, " %s[%d]: 0x%04x = %08x\n", | |||
state->name, i, reg, state->cmd[i]); | |||
reg += 4; | |||
++i; | |||
} | |||
} | |||
} | |||
} | |||
static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state) | |||
{ | |||
int i, j, reg, count; | |||
@@ -949,11 +900,6 @@ static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state | |||
if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) ) | |||
return; | |||
if (!radeon->radeonScreen->kernel_mm) { | |||
radeon_print_state_atom_prekmm(radeon, state); | |||
return; | |||
} | |||
dwords = (*state->check) (radeon->glCtx, state); | |||
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size); | |||
@@ -1145,32 +1091,21 @@ void radeonFinish(struct gl_context * ctx) | |||
{ | |||
radeonContextPtr radeon = RADEON_CONTEXT(ctx); | |||
struct gl_framebuffer *fb = ctx->DrawBuffer; | |||
struct radeon_renderbuffer *rrb; | |||
int i; | |||
if (ctx->Driver.Flush) | |||
ctx->Driver.Flush(ctx); /* +r6/r7 */ | |||
if (radeon->radeonScreen->kernel_mm) { | |||
for (i = 0; i < fb->_NumColorDrawBuffers; i++) { | |||
struct radeon_renderbuffer *rrb; | |||
rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]); | |||
if (rrb && rrb->bo) | |||
radeon_bo_wait(rrb->bo); | |||
} | |||
{ | |||
struct radeon_renderbuffer *rrb; | |||
rrb = radeon_get_depthbuffer(radeon); | |||
if (rrb && rrb->bo) | |||
radeon_bo_wait(rrb->bo); | |||
} | |||
} else if (radeon->do_irqs) { | |||
LOCK_HARDWARE(radeon); | |||
radeonEmitIrqLocked(radeon); | |||
UNLOCK_HARDWARE(radeon); | |||
radeonWaitIrq(radeon); | |||
} else { | |||
radeonWaitForIdle(radeon); | |||
for (i = 0; i < fb->_NumColorDrawBuffers; i++) { | |||
struct radeon_renderbuffer *rrb; | |||
rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]); | |||
if (rrb && rrb->bo) | |||
radeon_bo_wait(rrb->bo); | |||
} | |||
rrb = radeon_get_depthbuffer(radeon); | |||
if (rrb && rrb->bo) | |||
radeon_bo_wait(rrb->bo); | |||
} | |||
/* cmdbuffer */ | |||
@@ -1249,6 +1184,8 @@ GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const cha | |||
void rcommonInitCmdBuf(radeonContextPtr rmesa) | |||
{ | |||
GLuint size; | |||
struct drm_radeon_gem_info mminfo = { 0 }; | |||
/* Initialize command buffer */ | |||
size = 256 * driQueryOptioni(&rmesa->optionCache, | |||
"command_buffer_size"); | |||
@@ -1266,12 +1203,8 @@ void rcommonInitCmdBuf(radeonContextPtr rmesa) | |||
"Allocating %d bytes command buffer (max state is %d bytes)\n", | |||
size * 4, rmesa->hw.max_state_size * 4); | |||
if (rmesa->radeonScreen->kernel_mm) { | |||
int fd = rmesa->radeonScreen->driScreen->fd; | |||
rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd); | |||
} else { | |||
rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa); | |||
} | |||
rmesa->cmdbuf.csm = | |||
radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd); | |||
if (rmesa->cmdbuf.csm == NULL) { | |||
/* FIXME: fatal error */ | |||
return; | |||
@@ -1283,31 +1216,23 @@ void rcommonInitCmdBuf(radeonContextPtr rmesa) | |||
radeon_cs_space_set_flush(rmesa->cmdbuf.cs, | |||
(void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx); | |||
if (!rmesa->radeonScreen->kernel_mm) { | |||
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]); | |||
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size); | |||
} else { | |||
struct drm_radeon_gem_info mminfo = { 0 }; | |||
if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo))) | |||
{ | |||
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_visible); | |||
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size); | |||
} | |||
if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, | |||
&mminfo, sizeof(mminfo))) { | |||
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, | |||
mminfo.vram_visible); | |||
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, | |||
mminfo.gart_size); | |||
} | |||
} | |||
/** | |||
* Destroy the command buffer | |||
*/ | |||
void rcommonDestroyCmdBuf(radeonContextPtr rmesa) | |||
{ | |||
radeon_cs_destroy(rmesa->cmdbuf.cs); | |||
if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) { | |||
radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm); | |||
} else { | |||
radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm); | |||
} | |||
radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm); | |||
} | |||
void rcommonBeginBatch(radeonContextPtr rmesa, int n, |
@@ -372,7 +372,7 @@ GLboolean radeonUnbindContext(__DRIcontext * driContextPriv) | |||
static void | |||
radeon_make_kernel_renderbuffer_current(radeonContextPtr radeon, | |||
radeon_make_renderbuffer_current(radeonContextPtr radeon, | |||
struct gl_framebuffer *draw) | |||
{ | |||
/* if radeon->fake */ | |||
@@ -428,74 +428,6 @@ radeon_make_kernel_renderbuffer_current(radeonContextPtr radeon, | |||
} | |||
} | |||
static void | |||
radeon_make_renderbuffer_current(radeonContextPtr radeon, | |||
struct gl_framebuffer *draw) | |||
{ | |||
int size = 4096*4096*4; | |||
/* if radeon->fake */ | |||
struct radeon_renderbuffer *rb; | |||
if (radeon->radeonScreen->kernel_mm) { | |||
radeon_make_kernel_renderbuffer_current(radeon, draw); | |||
return; | |||
} | |||
if ((rb = (void *)draw->Attachment[BUFFER_FRONT_LEFT].Renderbuffer)) { | |||
if (!rb->bo) { | |||
rb->bo = radeon_bo_open(radeon->radeonScreen->bom, | |||
radeon->radeonScreen->frontOffset + | |||
radeon->radeonScreen->fbLocation, | |||
size, | |||
4096, | |||
RADEON_GEM_DOMAIN_VRAM, | |||
0); | |||
} | |||
rb->cpp = radeon->radeonScreen->cpp; | |||
rb->pitch = radeon->radeonScreen->frontPitch * rb->cpp; | |||
} | |||
if ((rb = (void *)draw->Attachment[BUFFER_BACK_LEFT].Renderbuffer)) { | |||
if (!rb->bo) { | |||
rb->bo = radeon_bo_open(radeon->radeonScreen->bom, | |||
radeon->radeonScreen->backOffset + | |||
radeon->radeonScreen->fbLocation, | |||
size, | |||
4096, | |||
RADEON_GEM_DOMAIN_VRAM, | |||
0); | |||
} | |||
rb->cpp = radeon->radeonScreen->cpp; | |||
rb->pitch = radeon->radeonScreen->backPitch * rb->cpp; | |||
} | |||
if ((rb = (void *)draw->Attachment[BUFFER_DEPTH].Renderbuffer)) { | |||
if (!rb->bo) { | |||
rb->bo = radeon_bo_open(radeon->radeonScreen->bom, | |||
radeon->radeonScreen->depthOffset + | |||
radeon->radeonScreen->fbLocation, | |||
size, | |||
4096, | |||
RADEON_GEM_DOMAIN_VRAM, | |||
0); | |||
} | |||
rb->cpp = radeon->radeonScreen->cpp; | |||
rb->pitch = radeon->radeonScreen->depthPitch * rb->cpp; | |||
} | |||
if ((rb = (void *)draw->Attachment[BUFFER_STENCIL].Renderbuffer)) { | |||
if (!rb->bo) { | |||
rb->bo = radeon_bo_open(radeon->radeonScreen->bom, | |||
radeon->radeonScreen->depthOffset + | |||
radeon->radeonScreen->fbLocation, | |||
size, | |||
4096, | |||
RADEON_GEM_DOMAIN_VRAM, | |||
0); | |||
} | |||
rb->cpp = radeon->radeonScreen->cpp; | |||
rb->pitch = radeon->radeonScreen->depthPitch * rb->cpp; | |||
} | |||
} | |||
static unsigned | |||
radeon_bits_per_pixel(const struct radeon_renderbuffer *rb) | |||
{ |
@@ -105,9 +105,6 @@ static void r100_get_lock(radeonContextPtr radeon) | |||
if (sarea->ctx_owner != rmesa->radeon.dri.hwContext) { | |||
sarea->ctx_owner = rmesa->radeon.dri.hwContext; | |||
if (!radeon->radeonScreen->kernel_mm) | |||
radeon_bo_legacy_texture_age(radeon->radeonScreen->bom); | |||
} | |||
} | |||
@@ -331,8 +328,7 @@ r100CreateContext( gl_api api, | |||
ctx->Extensions.OES_EGL_image = true; | |||
#endif | |||
ctx->Extensions.EXT_framebuffer_object = | |||
rmesa->radeon.radeonScreen->kernel_mm; | |||
ctx->Extensions.EXT_framebuffer_object = true; | |||
ctx->Extensions.ARB_texture_cube_map = | |||
rmesa->radeon.radeonScreen->drmSupportsCubeMapsR100; | |||
@@ -345,16 +341,14 @@ r100CreateContext( gl_api api, | |||
ctx->Extensions.EXT_texture_compression_s3tc = true; | |||
} | |||
ctx->Extensions.NV_texture_rectangle = rmesa->radeon.radeonScreen->kernel_mm | |||
|| rmesa->radeon.dri.drmMinor >= 9; | |||
ctx->Extensions.ARB_occlusion_query = rmesa->radeon.radeonScreen->kernel_mm; | |||
ctx->Extensions.NV_texture_rectangle = true; | |||
ctx->Extensions.ARB_occlusion_query = true; | |||
/* XXX these should really go right after _mesa_init_driver_functions() */ | |||
radeon_fbo_init(&rmesa->radeon); | |||
radeonInitSpanFuncs( ctx ); | |||
radeonInitIoctlFuncs( ctx ); | |||
radeonInitStateFuncs( ctx , rmesa->radeon.radeonScreen->kernel_mm ); | |||
radeonInitStateFuncs( ctx ); | |||
radeonInitState( rmesa ); | |||
radeonInitSwtcl( ctx ); | |||
@@ -91,8 +91,7 @@ void radeonSetUpAtomList( r100ContextPtr rmesa ) | |||
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]); | |||
for (i = 0; i < 6; ++i) | |||
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]); | |||
if (rmesa->radeon.radeonScreen->kernel_mm) | |||
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.stp); | |||
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.stp); | |||
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye); | |||
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd); | |||
insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog); | |||
@@ -102,9 +101,6 @@ void radeonSetUpAtomList( r100ContextPtr rmesa ) | |||
static void radeonEmitScissor(r100ContextPtr rmesa) | |||
{ | |||
BATCH_LOCALS(&rmesa->radeon); | |||
if (!rmesa->radeon.radeonScreen->kernel_mm) { | |||
return; | |||
} | |||
if (rmesa->radeon.state.scissor.enabled) { | |||
BEGIN_BATCH(6); | |||
OUT_BATCH(CP_PACKET0(RADEON_PP_CNTL, 0)); | |||
@@ -142,11 +138,7 @@ extern void radeonEmitVbufPrim( r100ContextPtr rmesa, | |||
#if RADEON_OLD_PACKETS | |||
BEGIN_BATCH(8); | |||
OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 3); | |||
if (!rmesa->radeon.radeonScreen->kernel_mm) { | |||
OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0); | |||
} else { | |||
OUT_BATCH(rmesa->ioctl.vertex_offset); | |||
} | |||
OUT_BATCH(rmesa->ioctl.vertex_offset); | |||
OUT_BATCH(vertex_nr); | |||
OUT_BATCH(vertex_format); | |||
@@ -155,12 +147,10 @@ extern void radeonEmitVbufPrim( r100ContextPtr rmesa, | |||
RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE | | |||
(vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT)); | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs, | |||
rmesa->ioctl.bo, | |||
RADEON_GEM_DOMAIN_GTT, | |||
0, 0); | |||
} | |||
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs, | |||
rmesa->ioctl.bo, | |||
RADEON_GEM_DOMAIN_GTT, | |||
0, 0); | |||
END_BATCH(); | |||
@@ -195,9 +185,7 @@ void radeonFlushElts( struct gl_context *ctx ) | |||
nr = rmesa->tcl.elt_used; | |||
#if RADEON_OLD_PACKETS | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
dwords -= 2; | |||
} | |||
dwords -= 2; | |||
#endif | |||
#if RADEON_OLD_PACKETS | |||
@@ -212,12 +200,10 @@ void radeonFlushElts( struct gl_context *ctx ) | |||
rmesa->radeon.cmdbuf.cs->section_cdw += dwords; | |||
#if RADEON_OLD_PACKETS | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs, | |||
rmesa->ioctl.bo, | |||
RADEON_GEM_DOMAIN_GTT, | |||
0, 0); | |||
} | |||
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs, | |||
rmesa->ioctl.bo, | |||
RADEON_GEM_DOMAIN_GTT, | |||
0, 0); | |||
#endif | |||
END_BATCH(); | |||
@@ -254,11 +240,7 @@ GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa, | |||
#if RADEON_OLD_PACKETS | |||
BEGIN_BATCH_NO_AUTOSTATE(2+ELTS_BUFSZ(align_min_nr)/4); | |||
OUT_BATCH_PACKET3_CLIP(RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM, 0); | |||
if (!rmesa->radeon.radeonScreen->kernel_mm) { | |||
OUT_BATCH_RELOC(rmesa->ioctl.vertex_offset, rmesa->ioctl.bo, rmesa->ioctl.vertex_offset, RADEON_GEM_DOMAIN_GTT, 0, 0); | |||
} else { | |||
OUT_BATCH(rmesa->ioctl.vertex_offset); | |||
} | |||
OUT_BATCH(rmesa->ioctl.vertex_offset); | |||
OUT_BATCH(rmesa->ioctl.vertex_max); | |||
OUT_BATCH(vertex_format); | |||
OUT_BATCH(primitive | | |||
@@ -343,41 +325,7 @@ void radeonEmitAOS( r100ContextPtr rmesa, | |||
OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1); | |||
OUT_BATCH(nr); | |||
if (!rmesa->radeon.radeonScreen->kernel_mm) { | |||
for (i = 0; i + 1 < nr; i += 2) { | |||
OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) | | |||
(rmesa->radeon.tcl.aos[i].stride << 8) | | |||
(rmesa->radeon.tcl.aos[i + 1].components << 16) | | |||
(rmesa->radeon.tcl.aos[i + 1].stride << 24)); | |||
voffset = rmesa->radeon.tcl.aos[i + 0].offset + | |||
offset * 4 * rmesa->radeon.tcl.aos[i + 0].stride; | |||
OUT_BATCH_RELOC(voffset, | |||
rmesa->radeon.tcl.aos[i].bo, | |||
voffset, | |||
RADEON_GEM_DOMAIN_GTT, | |||
0, 0); | |||
voffset = rmesa->radeon.tcl.aos[i + 1].offset + | |||
offset * 4 * rmesa->radeon.tcl.aos[i + 1].stride; | |||
OUT_BATCH_RELOC(voffset, | |||
rmesa->radeon.tcl.aos[i+1].bo, | |||
voffset, | |||
RADEON_GEM_DOMAIN_GTT, | |||
0, 0); | |||
} | |||
if (nr & 1) { | |||
OUT_BATCH((rmesa->radeon.tcl.aos[nr - 1].components << 0) | | |||
(rmesa->radeon.tcl.aos[nr - 1].stride << 8)); | |||
voffset = rmesa->radeon.tcl.aos[nr - 1].offset + | |||
offset * 4 * rmesa->radeon.tcl.aos[nr - 1].stride; | |||
OUT_BATCH_RELOC(voffset, | |||
rmesa->radeon.tcl.aos[nr - 1].bo, | |||
voffset, | |||
RADEON_GEM_DOMAIN_GTT, | |||
0, 0); | |||
} | |||
} else { | |||
{ | |||
for (i = 0; i + 1 < nr; i += 2) { | |||
OUT_BATCH((rmesa->radeon.tcl.aos[i].components << 0) | | |||
(rmesa->radeon.tcl.aos[i].stride << 8) | | |||
@@ -432,129 +380,6 @@ void radeonEmitAOS( r100ContextPtr rmesa, | |||
*/ | |||
#define RADEON_MAX_CLEARS 256 | |||
static void radeonKernelClear(struct gl_context *ctx, GLuint flags) | |||
{ | |||
r100ContextPtr rmesa = R100_CONTEXT(ctx); | |||
__DRIdrawable *dPriv = radeon_get_drawable(&rmesa->radeon); | |||
drm_radeon_sarea_t *sarea = rmesa->radeon.sarea; | |||
uint32_t clear; | |||
GLint ret, i; | |||
GLint cx, cy, cw, ch; | |||
radeonEmitState(&rmesa->radeon); | |||
LOCK_HARDWARE( &rmesa->radeon ); | |||
/* compute region after locking: */ | |||
cx = ctx->DrawBuffer->_Xmin; | |||
cy = ctx->DrawBuffer->_Ymin; | |||
cw = ctx->DrawBuffer->_Xmax - cx; | |||
ch = ctx->DrawBuffer->_Ymax - cy; | |||
/* Flip top to bottom */ | |||
cx += dPriv->x; | |||
cy = dPriv->y + dPriv->h - cy - ch; | |||
/* Throttle the number of clear ioctls we do. | |||
*/ | |||
while ( 1 ) { | |||
int ret; | |||
drm_radeon_getparam_t gp; | |||
gp.param = RADEON_PARAM_LAST_CLEAR; | |||
gp.value = (int *)&clear; | |||
ret = drmCommandWriteRead( rmesa->radeon.dri.fd, | |||
DRM_RADEON_GETPARAM, &gp, sizeof(gp) ); | |||
if ( ret ) { | |||
fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret ); | |||
exit(1); | |||
} | |||
if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) { | |||
break; | |||
} | |||
if ( rmesa->radeon.do_usleeps ) { | |||
UNLOCK_HARDWARE( &rmesa->radeon ); | |||
DO_USLEEP( 1 ); | |||
LOCK_HARDWARE( &rmesa->radeon ); | |||
} | |||
} | |||
/* Send current state to the hardware */ | |||
rcommonFlushCmdBufLocked( &rmesa->radeon, __FUNCTION__ ); | |||
for ( i = 0 ; i < dPriv->numClipRects ; ) { | |||
GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects ); | |||
drm_clip_rect_t *box = dPriv->pClipRects; | |||
drm_clip_rect_t *b = rmesa->radeon.sarea->boxes; | |||
drm_radeon_clear_t clear; | |||
drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; | |||
GLint n = 0; | |||
if (cw != dPriv->w || ch != dPriv->h) { | |||
/* clear subregion */ | |||
for ( ; i < nr ; i++ ) { | |||
GLint x = box[i].x1; | |||
GLint y = box[i].y1; | |||
GLint w = box[i].x2 - x; | |||
GLint h = box[i].y2 - y; | |||
if ( x < cx ) w -= cx - x, x = cx; | |||
if ( y < cy ) h -= cy - y, y = cy; | |||
if ( x + w > cx + cw ) w = cx + cw - x; | |||
if ( y + h > cy + ch ) h = cy + ch - y; | |||
if ( w <= 0 ) continue; | |||
if ( h <= 0 ) continue; | |||
b->x1 = x; | |||
b->y1 = y; | |||
b->x2 = x + w; | |||
b->y2 = y + h; | |||
b++; | |||
n++; | |||
} | |||
} else { | |||
/* clear whole buffer */ | |||
for ( ; i < nr ; i++ ) { | |||
*b++ = box[i]; | |||
n++; | |||
} | |||
} | |||
rmesa->radeon.sarea->nbox = n; | |||
clear.flags = flags; | |||
clear.clear_color = rmesa->radeon.state.color.clear; | |||
clear.clear_depth = rmesa->radeon.state.depth.clear; | |||
clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK]; | |||
clear.depth_mask = rmesa->radeon.state.stencil.clear; | |||
clear.depth_boxes = depth_boxes; | |||
n--; | |||
b = rmesa->radeon.sarea->boxes; | |||
for ( ; n >= 0 ; n-- ) { | |||
depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1; | |||
depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1; | |||
depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2; | |||
depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2; | |||
depth_boxes[n].f[CLEAR_DEPTH] = | |||
(float)rmesa->radeon.state.depth.clear; | |||
} | |||
ret = drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_CLEAR, | |||
&clear, sizeof(drm_radeon_clear_t)); | |||
if ( ret ) { | |||
UNLOCK_HARDWARE( &rmesa->radeon ); | |||
fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret ); | |||
exit( 1 ); | |||
} | |||
} | |||
UNLOCK_HARDWARE( &rmesa->radeon ); | |||
} | |||
static void radeonClear( struct gl_context *ctx, GLbitfield mask ) | |||
{ | |||
r100ContextPtr rmesa = R100_CONTEXT(ctx); | |||
@@ -618,12 +443,7 @@ static void radeonClear( struct gl_context *ctx, GLbitfield mask ) | |||
} | |||
} | |||
if (rmesa->radeon.radeonScreen->kernel_mm) | |||
radeonUserClear(ctx, orig_mask); | |||
else { | |||
radeonKernelClear(ctx, flags); | |||
rmesa->radeon.hw.all_dirty = GL_TRUE; | |||
} | |||
radeonUserClear(ctx, orig_mask); | |||
} | |||
void radeonInitIoctlFuncs( struct gl_context *ctx ) |
@@ -158,23 +158,13 @@ static inline uint32_t cmdpacket3(int cmd_type) | |||
} | |||
#define OUT_BATCH_PACKET3(packet, num_extra) do { \ | |||
if (!b_l_rmesa->radeonScreen->kernel_mm) { \ | |||
OUT_BATCH(cmdpacket3(RADEON_CMD_PACKET3)); \ | |||
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \ | |||
} else { \ | |||
OUT_BATCH(CP_PACKET2); \ | |||
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \ | |||
} \ | |||
OUT_BATCH(CP_PACKET2); \ | |||
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \ | |||
} while(0) | |||
#define OUT_BATCH_PACKET3_CLIP(packet, num_extra) do { \ | |||
if (!b_l_rmesa->radeonScreen->kernel_mm) { \ | |||
OUT_BATCH(cmdpacket3(RADEON_CMD_PACKET3_CLIP)); \ | |||
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \ | |||
} else { \ | |||
OUT_BATCH(CP_PACKET2); \ | |||
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \ | |||
} \ | |||
OUT_BATCH(CP_PACKET2); \ | |||
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \ | |||
} while(0) | |||
@@ -188,25 +188,21 @@ static void radeonEndQuery(struct gl_context *ctx, struct gl_query_object *q) | |||
static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q) | |||
{ | |||
radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __FUNCTION__, q->Id); | |||
\ | |||
#ifdef DRM_RADEON_GEM_BUSY | |||
radeonContextPtr radeon = RADEON_CONTEXT(ctx); | |||
if (radeon->radeonScreen->kernel_mm) { | |||
struct radeon_query_object *query = (struct radeon_query_object *)q; | |||
uint32_t domain; | |||
struct radeon_query_object *query = (struct radeon_query_object *)q; | |||
uint32_t domain; | |||
/* Need to perform a flush, as per ARB_occlusion_query spec */ | |||
if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) { | |||
ctx->Driver.Flush(ctx); | |||
} | |||
/* Need to perform a flush, as per ARB_occlusion_query spec */ | |||
if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) { | |||
ctx->Driver.Flush(ctx); | |||
} | |||
if (radeon_bo_is_busy(query->bo, &domain) == 0) { | |||
radeonQueryGetResult(ctx, q); | |||
query->Base.Ready = GL_TRUE; | |||
} | |||
} else { | |||
radeonWaitQuery(ctx, q); | |||
if (radeon_bo_is_busy(query->bo, &domain) == 0) { | |||
radeonQueryGetResult(ctx, q); | |||
query->Base.Ready = GL_TRUE; | |||
} | |||
#else | |||
radeonWaitQuery(ctx, q); |
@@ -109,7 +109,6 @@ typedef struct radeon_screen { | |||
int num_gb_pipes; | |||
int num_z_pipes; | |||
int kernel_mm; | |||
drm_radeon_sarea_t *sarea; /* Private SAREA data */ | |||
struct radeon_bo_manager *bom; | |||
@@ -550,31 +550,6 @@ static void radeonPolygonOffset( struct gl_context *ctx, | |||
rmesa->hw.zbs.cmd[ZBS_SE_ZBIAS_CONSTANT] = constant.ui32; | |||
} | |||
static void radeonPolygonStipplePreKMS( struct gl_context *ctx, const GLubyte *mask ) | |||
{ | |||
r100ContextPtr rmesa = R100_CONTEXT(ctx); | |||
GLuint i; | |||
drm_radeon_stipple_t stipple; | |||
/* Must flip pattern upside down. | |||
*/ | |||
for ( i = 0 ; i < 32 ; i++ ) { | |||
rmesa->state.stipple.mask[31 - i] = ((GLuint *) mask)[i]; | |||
} | |||
/* TODO: push this into cmd mechanism | |||
*/ | |||
radeon_firevertices(&rmesa->radeon); | |||
LOCK_HARDWARE( &rmesa->radeon ); | |||
/* FIXME: Use window x,y offsets into stipple RAM. | |||
*/ | |||
stipple.mask = rmesa->state.stipple.mask; | |||
drmCommandWrite( rmesa->radeon.dri.fd, DRM_RADEON_STIPPLE, | |||
&stipple, sizeof(drm_radeon_stipple_t) ); | |||
UNLOCK_HARDWARE( &rmesa->radeon ); | |||
} | |||
static void radeonPolygonMode( struct gl_context *ctx, GLenum face, GLenum mode ) | |||
{ | |||
r100ContextPtr rmesa = R100_CONTEXT(ctx); | |||
@@ -2244,7 +2219,7 @@ static void radeonPolygonStipple( struct gl_context *ctx, const GLubyte *mask ) | |||
* Many of the ctx->Driver functions might have been initialized to | |||
* software defaults in the earlier _mesa_init_driver_functions() call. | |||
*/ | |||
void radeonInitStateFuncs( struct gl_context *ctx , GLboolean dri2 ) | |||
void radeonInitStateFuncs( struct gl_context *ctx ) | |||
{ | |||
ctx->Driver.UpdateState = radeonInvalidateState; | |||
ctx->Driver.LightingSpaceChange = radeonLightingSpaceChange; | |||
@@ -2253,8 +2228,7 @@ void radeonInitStateFuncs( struct gl_context *ctx , GLboolean dri2 ) | |||
ctx->Driver.ReadBuffer = radeonReadBuffer; | |||
ctx->Driver.CopyPixels = _mesa_meta_CopyPixels; | |||
ctx->Driver.DrawPixels = _mesa_meta_DrawPixels; | |||
if (dri2) | |||
ctx->Driver.ReadPixels = radeonReadPixels; | |||
ctx->Driver.ReadPixels = radeonReadPixels; | |||
ctx->Driver.AlphaFunc = radeonAlphaFunc; | |||
ctx->Driver.BlendEquationSeparate = radeonBlendEquationSeparate; | |||
@@ -2279,10 +2253,7 @@ void radeonInitStateFuncs( struct gl_context *ctx , GLboolean dri2 ) | |||
ctx->Driver.LogicOpcode = radeonLogicOpCode; | |||
ctx->Driver.PolygonMode = radeonPolygonMode; | |||
ctx->Driver.PolygonOffset = radeonPolygonOffset; | |||
if (dri2) | |||
ctx->Driver.PolygonStipple = radeonPolygonStipple; | |||
else | |||
ctx->Driver.PolygonStipple = radeonPolygonStipplePreKMS; | |||
ctx->Driver.PolygonStipple = radeonPolygonStipple; | |||
ctx->Driver.RenderMode = radeonRenderMode; | |||
ctx->Driver.Scissor = radeonScissor; | |||
ctx->Driver.ShadeModel = radeonShadeModel; |
@@ -40,7 +40,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
#include "radeon_context.h" | |||
extern void radeonInitState( r100ContextPtr rmesa ); | |||
extern void radeonInitStateFuncs( struct gl_context *ctx , GLboolean dri2); | |||
extern void radeonInitStateFuncs( struct gl_context *ctx ); | |||
extern void radeonUpdateMaterial( struct gl_context *ctx ); | |||
@@ -160,16 +160,7 @@ static struct { | |||
*/ | |||
static int cmdpkt( r100ContextPtr rmesa, int id ) | |||
{ | |||
drm_radeon_cmd_header_t h; | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
return CP_PACKET0(packet[id].start, packet[id].len - 1); | |||
} else { | |||
h.i = 0; | |||
h.packet.cmd_type = RADEON_CMD_PACKET; | |||
h.packet.packet_id = id; | |||
} | |||
return h.i; | |||
return CP_PACKET0(packet[id].start, packet[id].len - 1); | |||
} | |||
static int cmdvec( int offset, int stride, int count ) | |||
@@ -216,37 +207,17 @@ CHECK( tex0_mm, ctx->Texture.Unit[0]._ReallyEnabled, 3 ) | |||
CHECK( tex1_mm, ctx->Texture.Unit[1]._ReallyEnabled, 3 ) | |||
/* need this for the cubic_map on disabled unit 2 bug, maybe r100 only? */ | |||
CHECK( tex2_mm, ctx->Texture._EnabledUnits, 3 ) | |||
CHECK( tex0, ctx->Texture.Unit[0]._ReallyEnabled, 2 ) | |||
CHECK( tex1, ctx->Texture.Unit[1]._ReallyEnabled, 2 ) | |||
CHECK( tex2, ctx->Texture._EnabledUnits, 2 ) | |||
CHECK( cube0, (ctx->Texture.Unit[0]._ReallyEnabled & TEXTURE_CUBE_BIT), 3 + 3*5 - CUBE_STATE_SIZE ) | |||
CHECK( cube1, (ctx->Texture.Unit[1]._ReallyEnabled & TEXTURE_CUBE_BIT), 3 + 3*5 - CUBE_STATE_SIZE ) | |||
CHECK( cube2, (ctx->Texture.Unit[2]._ReallyEnabled & TEXTURE_CUBE_BIT), 3 + 3*5 - CUBE_STATE_SIZE ) | |||
CHECK( cube0_mm, (ctx->Texture.Unit[0]._ReallyEnabled & TEXTURE_CUBE_BIT), 2 + 4*5 - CUBE_STATE_SIZE ) | |||
CHECK( cube1_mm, (ctx->Texture.Unit[1]._ReallyEnabled & TEXTURE_CUBE_BIT), 2 + 4*5 - CUBE_STATE_SIZE ) | |||
CHECK( cube2_mm, (ctx->Texture.Unit[2]._ReallyEnabled & TEXTURE_CUBE_BIT), 2 + 4*5 - CUBE_STATE_SIZE ) | |||
CHECK( fog, ctx->Fog.Enabled, 0 ) | |||
CHECK( fog_add4, ctx->Fog.Enabled, 4 ) | |||
TCL_CHECK( tcl, GL_TRUE, 0 ) | |||
TCL_CHECK( tcl_add4, GL_TRUE, 4 ) | |||
TCL_CHECK( tcl_tex0, ctx->Texture.Unit[0]._ReallyEnabled, 0 ) | |||
TCL_CHECK( tcl_tex1, ctx->Texture.Unit[1]._ReallyEnabled, 0 ) | |||
TCL_CHECK( tcl_tex2, ctx->Texture.Unit[2]._ReallyEnabled, 0 ) | |||
TCL_CHECK( tcl_tex0_add4, ctx->Texture.Unit[0]._ReallyEnabled, 4 ) | |||
TCL_CHECK( tcl_tex1_add4, ctx->Texture.Unit[1]._ReallyEnabled, 4 ) | |||
TCL_CHECK( tcl_tex2_add4, ctx->Texture.Unit[2]._ReallyEnabled, 4 ) | |||
TCL_CHECK( tcl_lighting, ctx->Light.Enabled, 0 ) | |||
TCL_CHECK( tcl_lighting_add4, ctx->Light.Enabled, 4 ) | |||
TCL_CHECK( tcl_eyespace_or_lighting, ctx->_NeedEyeCoords || ctx->Light.Enabled, 0 ) | |||
TCL_CHECK( tcl_eyespace_or_lighting_add4, ctx->_NeedEyeCoords || ctx->Light.Enabled, 4 ) | |||
TCL_CHECK( tcl_lit0, ctx->Light.Enabled && ctx->Light.Light[0].Enabled, 0 ) | |||
TCL_CHECK( tcl_lit1, ctx->Light.Enabled && ctx->Light.Light[1].Enabled, 0 ) | |||
TCL_CHECK( tcl_lit2, ctx->Light.Enabled && ctx->Light.Light[2].Enabled, 0 ) | |||
TCL_CHECK( tcl_lit3, ctx->Light.Enabled && ctx->Light.Light[3].Enabled, 0 ) | |||
TCL_CHECK( tcl_lit4, ctx->Light.Enabled && ctx->Light.Light[4].Enabled, 0 ) | |||
TCL_CHECK( tcl_lit5, ctx->Light.Enabled && ctx->Light.Light[5].Enabled, 0 ) | |||
TCL_CHECK( tcl_lit6, ctx->Light.Enabled && ctx->Light.Light[6].Enabled, 0 ) | |||
TCL_CHECK( tcl_lit7, ctx->Light.Enabled && ctx->Light.Light[7].Enabled, 0 ) | |||
TCL_CHECK( tcl_lit0_add6, ctx->Light.Enabled && ctx->Light.Light[0].Enabled, 6 ) | |||
TCL_CHECK( tcl_lit1_add6, ctx->Light.Enabled && ctx->Light.Light[1].Enabled, 6 ) | |||
TCL_CHECK( tcl_lit2_add6, ctx->Light.Enabled && ctx->Light.Light[2].Enabled, 6 ) | |||
@@ -255,19 +226,12 @@ TCL_CHECK( tcl_lit4_add6, ctx->Light.Enabled && ctx->Light.Light[4].Enabled, 6 ) | |||
TCL_CHECK( tcl_lit5_add6, ctx->Light.Enabled && ctx->Light.Light[5].Enabled, 6 ) | |||
TCL_CHECK( tcl_lit6_add6, ctx->Light.Enabled && ctx->Light.Light[6].Enabled, 6 ) | |||
TCL_CHECK( tcl_lit7_add6, ctx->Light.Enabled && ctx->Light.Light[7].Enabled, 6 ) | |||
TCL_CHECK( tcl_ucp0, (ctx->Transform.ClipPlanesEnabled & 0x1), 0 ) | |||
TCL_CHECK( tcl_ucp1, (ctx->Transform.ClipPlanesEnabled & 0x2), 0 ) | |||
TCL_CHECK( tcl_ucp2, (ctx->Transform.ClipPlanesEnabled & 0x4), 0 ) | |||
TCL_CHECK( tcl_ucp3, (ctx->Transform.ClipPlanesEnabled & 0x8), 0 ) | |||
TCL_CHECK( tcl_ucp4, (ctx->Transform.ClipPlanesEnabled & 0x10), 0 ) | |||
TCL_CHECK( tcl_ucp5, (ctx->Transform.ClipPlanesEnabled & 0x20), 0 ) | |||
TCL_CHECK( tcl_ucp0_add4, (ctx->Transform.ClipPlanesEnabled & 0x1), 4 ) | |||
TCL_CHECK( tcl_ucp1_add4, (ctx->Transform.ClipPlanesEnabled & 0x2), 4 ) | |||
TCL_CHECK( tcl_ucp2_add4, (ctx->Transform.ClipPlanesEnabled & 0x4), 4 ) | |||
TCL_CHECK( tcl_ucp3_add4, (ctx->Transform.ClipPlanesEnabled & 0x8), 4 ) | |||
TCL_CHECK( tcl_ucp4_add4, (ctx->Transform.ClipPlanesEnabled & 0x10), 4 ) | |||
TCL_CHECK( tcl_ucp5_add4, (ctx->Transform.ClipPlanesEnabled & 0x20), 4 ) | |||
TCL_CHECK( tcl_eyespace_or_fog, ctx->_NeedEyeCoords || ctx->Fog.Enabled, 0 ) | |||
TCL_CHECK( tcl_eyespace_or_fog_add4, ctx->_NeedEyeCoords || ctx->Fog.Enabled, 4 ) | |||
CHECK( txr0, (ctx->Texture.Unit[0]._ReallyEnabled & TEXTURE_RECT_BIT), 0 ) | |||
@@ -330,71 +294,6 @@ static void lit_emit(struct gl_context *ctx, struct radeon_state_atom *atom) | |||
END_BATCH(); | |||
} | |||
static void ctx_emit(struct gl_context *ctx, struct radeon_state_atom *atom) | |||
{ | |||
r100ContextPtr r100 = R100_CONTEXT(ctx); | |||
BATCH_LOCALS(&r100->radeon); | |||
struct radeon_renderbuffer *rrb; | |||
uint32_t cbpitch; | |||
uint32_t zbpitch, depth_fmt; | |||
uint32_t dwords = atom->check(ctx, atom); | |||
/* output the first 7 bytes of context */ | |||
BEGIN_BATCH_NO_AUTOSTATE(dwords); | |||
OUT_BATCH_TABLE(atom->cmd, 5); | |||
rrb = radeon_get_depthbuffer(&r100->radeon); | |||
if (!rrb) { | |||
OUT_BATCH(0); | |||
OUT_BATCH(0); | |||
} else { | |||
zbpitch = (rrb->pitch / rrb->cpp); | |||
if (r100->using_hyperz) | |||
zbpitch |= RADEON_DEPTH_HYPERZ; | |||
OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0); | |||
OUT_BATCH(zbpitch); | |||
if (rrb->cpp == 4) | |||
depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; | |||
else | |||
depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; | |||
atom->cmd[CTX_RB3D_ZSTENCILCNTL] &= ~RADEON_DEPTH_FORMAT_MASK; | |||
atom->cmd[CTX_RB3D_ZSTENCILCNTL] |= depth_fmt; | |||
} | |||
OUT_BATCH(atom->cmd[CTX_RB3D_ZSTENCILCNTL]); | |||
OUT_BATCH(atom->cmd[CTX_CMD_1]); | |||
OUT_BATCH(atom->cmd[CTX_PP_CNTL]); | |||
rrb = radeon_get_colorbuffer(&r100->radeon); | |||
if (!rrb || !rrb->bo) { | |||
OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]); | |||
OUT_BATCH(atom->cmd[CTX_RB3D_COLOROFFSET]); | |||
} else { | |||
atom->cmd[CTX_RB3D_CNTL] &= ~(0xf << 10); | |||
if (rrb->cpp == 4) | |||
atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_ARGB8888; | |||
else | |||
atom->cmd[CTX_RB3D_CNTL] |= RADEON_COLOR_FORMAT_RGB565; | |||
OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]); | |||
OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0); | |||
} | |||
OUT_BATCH(atom->cmd[CTX_CMD_2]); | |||
if (!rrb || !rrb->bo) { | |||
OUT_BATCH(atom->cmd[CTX_RB3D_COLORPITCH]); | |||
} else { | |||
cbpitch = (rrb->pitch / rrb->cpp); | |||
if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE) | |||
cbpitch |= RADEON_COLOR_TILE_ENABLE; | |||
OUT_BATCH(cbpitch); | |||
} | |||
END_BATCH(); | |||
} | |||
static int check_always_ctx( struct gl_context *ctx, struct radeon_state_atom *atom) | |||
{ | |||
r100ContextPtr r100 = R100_CONTEXT(ctx); | |||
@@ -512,34 +411,6 @@ static void ctx_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom) | |||
END_BATCH(); | |||
} | |||
static void cube_emit(struct gl_context *ctx, struct radeon_state_atom *atom) | |||
{ | |||
r100ContextPtr r100 = R100_CONTEXT(ctx); | |||
BATCH_LOCALS(&r100->radeon); | |||
uint32_t dwords = atom->check(ctx, atom); | |||
int i = atom->idx, j; | |||
radeonTexObj *t = r100->state.texture.unit[i].texobj; | |||
radeon_mipmap_level *lvl; | |||
if (!(ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) | |||
return; | |||
if (!t) | |||
return; | |||
if (!t->mt) | |||
return; | |||
BEGIN_BATCH_NO_AUTOSTATE(dwords); | |||
OUT_BATCH_TABLE(atom->cmd, 3); | |||
lvl = &t->mt->levels[0]; | |||
for (j = 0; j < 5; j++) { | |||
OUT_BATCH_RELOC(lvl->faces[j].offset, t->mt->bo, lvl->faces[j].offset, | |||
RADEON_GEM_DOMAIN_VRAM, 0, 0); | |||
} | |||
END_BATCH(); | |||
} | |||
static void cube_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom) | |||
{ | |||
r100ContextPtr r100 = R100_CONTEXT(ctx); | |||
@@ -576,41 +447,6 @@ static void cube_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom) | |||
END_BATCH(); | |||
} | |||
static void tex_emit(struct gl_context *ctx, struct radeon_state_atom *atom) | |||
{ | |||
r100ContextPtr r100 = R100_CONTEXT(ctx); | |||
BATCH_LOCALS(&r100->radeon); | |||
uint32_t dwords = atom->cmd_size; | |||
int i = atom->idx; | |||
radeonTexObj *t = r100->state.texture.unit[i].texobj; | |||
radeon_mipmap_level *lvl; | |||
if (t && t->mt && !t->image_override) | |||
dwords += 2; | |||
BEGIN_BATCH_NO_AUTOSTATE(dwords); | |||
OUT_BATCH_TABLE(atom->cmd, 3); | |||
if (t && t->mt && !t->image_override) { | |||
if ((ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_CUBE_BIT)) { | |||
lvl = &t->mt->levels[0]; | |||
OUT_BATCH_RELOC(lvl->faces[5].offset, t->mt->bo, lvl->faces[5].offset, | |||
RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0); | |||
} else { | |||
OUT_BATCH_RELOC(t->tile_bits, t->mt->bo, 0, | |||
RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0, 0); | |||
} | |||
} else if (!t) { | |||
/* workaround for old CS mechanism */ | |||
OUT_BATCH(r100->radeon.radeonScreen->texOffset[RADEON_LOCAL_TEX_HEAP]); | |||
// OUT_BATCH(r100->radeon.radeonScreen); | |||
} else { | |||
OUT_BATCH(t->override_offset); | |||
} | |||
OUT_BATCH_TABLE((atom->cmd+4), 5); | |||
END_BATCH(); | |||
} | |||
static void tex_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom) | |||
{ | |||
r100ContextPtr r100 = R100_CONTEXT(ctx); | |||
@@ -708,11 +544,8 @@ void radeonInitState( r100ContextPtr rmesa ) | |||
/* Allocate state buffers: | |||
*/ | |||
ALLOC_STATE( ctx, always_add4, CTX_STATE_SIZE, "CTX/context", 0 ); | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
rmesa->hw.ctx.emit = ctx_emit_cs; | |||
rmesa->hw.ctx.check = check_always_ctx; | |||
} else | |||
rmesa->hw.ctx.emit = ctx_emit; | |||
rmesa->hw.ctx.emit = ctx_emit_cs; | |||
rmesa->hw.ctx.check = check_always_ctx; | |||
ALLOC_STATE( lin, always, LIN_STATE_SIZE, "LIN/line", 0 ); | |||
ALLOC_STATE( msk, always, MSK_STATE_SIZE, "MSK/mask", 0 ); | |||
ALLOC_STATE( vpt, always, VPT_STATE_SIZE, "VPT/viewport", 0 ); | |||
@@ -721,89 +554,45 @@ void radeonInitState( r100ContextPtr rmesa ) | |||
ALLOC_STATE( zbs, always, ZBS_STATE_SIZE, "ZBS/zbias", 0 ); | |||
ALLOC_STATE( tcl, always, TCL_STATE_SIZE, "TCL/tcl", 1 ); | |||
ALLOC_STATE( mtl, tcl_lighting, MTL_STATE_SIZE, "MTL/material", 1 ); | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
ALLOC_STATE( grd, always_add2, GRD_STATE_SIZE, "GRD/guard-band", 1 ); | |||
ALLOC_STATE( fog, fog_add4, FOG_STATE_SIZE, "FOG/fog", 1 ); | |||
ALLOC_STATE( glt, tcl_lighting_add4, GLT_STATE_SIZE, "GLT/light-global", 1 ); | |||
ALLOC_STATE( eye, tcl_lighting_add4, EYE_STATE_SIZE, "EYE/eye-vector", 1 ); | |||
ALLOC_STATE_IDX( tex[0], tex0_mm, TEX_STATE_SIZE, "TEX/tex-0", 0, 0); | |||
ALLOC_STATE_IDX( tex[1], tex1_mm, TEX_STATE_SIZE, "TEX/tex-1", 0, 1); | |||
ALLOC_STATE_IDX( tex[2], tex2_mm, TEX_STATE_SIZE, "TEX/tex-2", 0, 2); | |||
ALLOC_STATE( mat[0], tcl_add4, MAT_STATE_SIZE, "MAT/modelproject", 1 ); | |||
ALLOC_STATE( mat[1], tcl_eyespace_or_fog_add4, MAT_STATE_SIZE, "MAT/modelview", 1 ); | |||
ALLOC_STATE( mat[2], tcl_eyespace_or_lighting_add4, MAT_STATE_SIZE, "MAT/it-modelview", 1 ); | |||
ALLOC_STATE( mat[3], tcl_tex0_add4, MAT_STATE_SIZE, "MAT/texmat0", 1 ); | |||
ALLOC_STATE( mat[4], tcl_tex1_add4, MAT_STATE_SIZE, "MAT/texmat1", 1 ); | |||
ALLOC_STATE( mat[5], tcl_tex2_add4, MAT_STATE_SIZE, "MAT/texmat2", 1 ); | |||
ALLOC_STATE( lit[0], tcl_lit0_add6, LIT_STATE_SIZE, "LIT/light-0", 1 ); | |||
ALLOC_STATE( lit[1], tcl_lit1_add6, LIT_STATE_SIZE, "LIT/light-1", 1 ); | |||
ALLOC_STATE( lit[2], tcl_lit2_add6, LIT_STATE_SIZE, "LIT/light-2", 1 ); | |||
ALLOC_STATE( lit[3], tcl_lit3_add6, LIT_STATE_SIZE, "LIT/light-3", 1 ); | |||
ALLOC_STATE( lit[4], tcl_lit4_add6, LIT_STATE_SIZE, "LIT/light-4", 1 ); | |||
ALLOC_STATE( lit[5], tcl_lit5_add6, LIT_STATE_SIZE, "LIT/light-5", 1 ); | |||
ALLOC_STATE( lit[6], tcl_lit6_add6, LIT_STATE_SIZE, "LIT/light-6", 1 ); | |||
ALLOC_STATE( lit[7], tcl_lit7_add6, LIT_STATE_SIZE, "LIT/light-7", 1 ); | |||
ALLOC_STATE( ucp[0], tcl_ucp0_add4, UCP_STATE_SIZE, "UCP/userclip-0", 1 ); | |||
ALLOC_STATE( ucp[1], tcl_ucp1_add4, UCP_STATE_SIZE, "UCP/userclip-1", 1 ); | |||
ALLOC_STATE( ucp[2], tcl_ucp2_add4, UCP_STATE_SIZE, "UCP/userclip-2", 1 ); | |||
ALLOC_STATE( ucp[3], tcl_ucp3_add4, UCP_STATE_SIZE, "UCP/userclip-3", 1 ); | |||
ALLOC_STATE( ucp[4], tcl_ucp4_add4, UCP_STATE_SIZE, "UCP/userclip-4", 1 ); | |||
ALLOC_STATE( ucp[5], tcl_ucp5_add4, UCP_STATE_SIZE, "UCP/userclip-5", 1 ); | |||
} else { | |||
ALLOC_STATE( grd, always, GRD_STATE_SIZE, "GRD/guard-band", 1 ); | |||
ALLOC_STATE( fog, fog, FOG_STATE_SIZE, "FOG/fog", 1 ); | |||
ALLOC_STATE( glt, tcl_lighting, GLT_STATE_SIZE, "GLT/light-global", 1 ); | |||
ALLOC_STATE( eye, tcl_lighting, EYE_STATE_SIZE, "EYE/eye-vector", 1 ); | |||
ALLOC_STATE_IDX( tex[0], tex0, TEX_STATE_SIZE, "TEX/tex-0", 0, 0); | |||
ALLOC_STATE_IDX( tex[1], tex1, TEX_STATE_SIZE, "TEX/tex-1", 0, 1); | |||
ALLOC_STATE_IDX( tex[2], tex2, TEX_STATE_SIZE, "TEX/tex-2", 0, 2); | |||
ALLOC_STATE( mat[0], tcl, MAT_STATE_SIZE, "MAT/modelproject", 1 ); | |||
ALLOC_STATE( mat[1], tcl_eyespace_or_fog, MAT_STATE_SIZE, "MAT/modelview", 1 ); | |||
ALLOC_STATE( mat[2], tcl_eyespace_or_lighting, MAT_STATE_SIZE, "MAT/it-modelview", 1 ); | |||
ALLOC_STATE( mat[3], tcl_tex0, MAT_STATE_SIZE, "MAT/texmat0", 1 ); | |||
ALLOC_STATE( mat[4], tcl_tex1, MAT_STATE_SIZE, "MAT/texmat1", 1 ); | |||
ALLOC_STATE( mat[5], tcl_tex2, MAT_STATE_SIZE, "MAT/texmat2", 1 ); | |||
ALLOC_STATE( lit[0], tcl_lit0, LIT_STATE_SIZE, "LIT/light-0", 1 ); | |||
ALLOC_STATE( lit[1], tcl_lit1, LIT_STATE_SIZE, "LIT/light-1", 1 ); | |||
ALLOC_STATE( lit[2], tcl_lit2, LIT_STATE_SIZE, "LIT/light-2", 1 ); | |||
ALLOC_STATE( lit[3], tcl_lit3, LIT_STATE_SIZE, "LIT/light-3", 1 ); | |||
ALLOC_STATE( lit[4], tcl_lit4, LIT_STATE_SIZE, "LIT/light-4", 1 ); | |||
ALLOC_STATE( lit[5], tcl_lit5, LIT_STATE_SIZE, "LIT/light-5", 1 ); | |||
ALLOC_STATE( lit[6], tcl_lit6, LIT_STATE_SIZE, "LIT/light-6", 1 ); | |||
ALLOC_STATE( lit[7], tcl_lit7, LIT_STATE_SIZE, "LIT/light-7", 1 ); | |||
ALLOC_STATE( ucp[0], tcl_ucp0, UCP_STATE_SIZE, "UCP/userclip-0", 1 ); | |||
ALLOC_STATE( ucp[1], tcl_ucp1, UCP_STATE_SIZE, "UCP/userclip-1", 1 ); | |||
ALLOC_STATE( ucp[2], tcl_ucp2, UCP_STATE_SIZE, "UCP/userclip-2", 1 ); | |||
ALLOC_STATE( ucp[3], tcl_ucp3, UCP_STATE_SIZE, "UCP/userclip-3", 1 ); | |||
ALLOC_STATE( ucp[4], tcl_ucp4, UCP_STATE_SIZE, "UCP/userclip-4", 1 ); | |||
ALLOC_STATE( ucp[5], tcl_ucp5, UCP_STATE_SIZE, "UCP/userclip-5", 1 ); | |||
} | |||
ALLOC_STATE( grd, always_add2, GRD_STATE_SIZE, "GRD/guard-band", 1 ); | |||
ALLOC_STATE( fog, fog_add4, FOG_STATE_SIZE, "FOG/fog", 1 ); | |||
ALLOC_STATE( glt, tcl_lighting_add4, GLT_STATE_SIZE, "GLT/light-global", 1 ); | |||
ALLOC_STATE( eye, tcl_lighting_add4, EYE_STATE_SIZE, "EYE/eye-vector", 1 ); | |||
ALLOC_STATE_IDX( tex[0], tex0_mm, TEX_STATE_SIZE, "TEX/tex-0", 0, 0); | |||
ALLOC_STATE_IDX( tex[1], tex1_mm, TEX_STATE_SIZE, "TEX/tex-1", 0, 1); | |||
ALLOC_STATE_IDX( tex[2], tex2_mm, TEX_STATE_SIZE, "TEX/tex-2", 0, 2); | |||
ALLOC_STATE( mat[0], tcl_add4, MAT_STATE_SIZE, "MAT/modelproject", 1 ); | |||
ALLOC_STATE( mat[1], tcl_eyespace_or_fog_add4, MAT_STATE_SIZE, "MAT/modelview", 1 ); | |||
ALLOC_STATE( mat[2], tcl_eyespace_or_lighting_add4, MAT_STATE_SIZE, "MAT/it-modelview", 1 ); | |||
ALLOC_STATE( mat[3], tcl_tex0_add4, MAT_STATE_SIZE, "MAT/texmat0", 1 ); | |||
ALLOC_STATE( mat[4], tcl_tex1_add4, MAT_STATE_SIZE, "MAT/texmat1", 1 ); | |||
ALLOC_STATE( mat[5], tcl_tex2_add4, MAT_STATE_SIZE, "MAT/texmat2", 1 ); | |||
ALLOC_STATE( lit[0], tcl_lit0_add6, LIT_STATE_SIZE, "LIT/light-0", 1 ); | |||
ALLOC_STATE( lit[1], tcl_lit1_add6, LIT_STATE_SIZE, "LIT/light-1", 1 ); | |||
ALLOC_STATE( lit[2], tcl_lit2_add6, LIT_STATE_SIZE, "LIT/light-2", 1 ); | |||
ALLOC_STATE( lit[3], tcl_lit3_add6, LIT_STATE_SIZE, "LIT/light-3", 1 ); | |||
ALLOC_STATE( lit[4], tcl_lit4_add6, LIT_STATE_SIZE, "LIT/light-4", 1 ); | |||
ALLOC_STATE( lit[5], tcl_lit5_add6, LIT_STATE_SIZE, "LIT/light-5", 1 ); | |||
ALLOC_STATE( lit[6], tcl_lit6_add6, LIT_STATE_SIZE, "LIT/light-6", 1 ); | |||
ALLOC_STATE( lit[7], tcl_lit7_add6, LIT_STATE_SIZE, "LIT/light-7", 1 ); | |||
ALLOC_STATE( ucp[0], tcl_ucp0_add4, UCP_STATE_SIZE, "UCP/userclip-0", 1 ); | |||
ALLOC_STATE( ucp[1], tcl_ucp1_add4, UCP_STATE_SIZE, "UCP/userclip-1", 1 ); | |||
ALLOC_STATE( ucp[2], tcl_ucp2_add4, UCP_STATE_SIZE, "UCP/userclip-2", 1 ); | |||
ALLOC_STATE( ucp[3], tcl_ucp3_add4, UCP_STATE_SIZE, "UCP/userclip-3", 1 ); | |||
ALLOC_STATE( ucp[4], tcl_ucp4_add4, UCP_STATE_SIZE, "UCP/userclip-4", 1 ); | |||
ALLOC_STATE( ucp[5], tcl_ucp5_add4, UCP_STATE_SIZE, "UCP/userclip-5", 1 ); | |||
ALLOC_STATE( stp, always, STP_STATE_SIZE, "STP/stp", 0 ); | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
ALLOC_STATE( stp, always, STP_STATE_SIZE, "STP/stp", 0 ); | |||
} | |||
for (i = 0; i < 3; i++) { | |||
if (rmesa->radeon.radeonScreen->kernel_mm) | |||
rmesa->hw.tex[i].emit = tex_emit_cs; | |||
else | |||
rmesa->hw.tex[i].emit = tex_emit; | |||
rmesa->hw.tex[i].emit = tex_emit_cs; | |||
} | |||
if (rmesa->radeon.radeonScreen->drmSupportsCubeMapsR100) | |||
{ | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
ALLOC_STATE_IDX( cube[0], cube0_mm, CUBE_STATE_SIZE, "CUBE/cube-0", 0, 0 ); | |||
ALLOC_STATE_IDX( cube[1], cube1_mm, CUBE_STATE_SIZE, "CUBE/cube-1", 0, 1 ); | |||
ALLOC_STATE_IDX( cube[2], cube2_mm, CUBE_STATE_SIZE, "CUBE/cube-2", 0, 2 ); | |||
for (i = 0; i < 3; i++) | |||
rmesa->hw.cube[i].emit = cube_emit_cs; | |||
} else { | |||
ALLOC_STATE_IDX( cube[0], cube0, CUBE_STATE_SIZE, "CUBE/cube-0", 0, 0 ); | |||
ALLOC_STATE_IDX( cube[1], cube1, CUBE_STATE_SIZE, "CUBE/cube-1", 0, 1 ); | |||
ALLOC_STATE_IDX( cube[2], cube2, CUBE_STATE_SIZE, "CUBE/cube-2", 0, 2 ); | |||
for (i = 0; i < 3; i++) | |||
rmesa->hw.cube[i].emit = cube_emit; | |||
} | |||
ALLOC_STATE_IDX( cube[0], cube0_mm, CUBE_STATE_SIZE, "CUBE/cube-0", 0, 0 ); | |||
ALLOC_STATE_IDX( cube[1], cube1_mm, CUBE_STATE_SIZE, "CUBE/cube-1", 0, 1 ); | |||
ALLOC_STATE_IDX( cube[2], cube2_mm, CUBE_STATE_SIZE, "CUBE/cube-2", 0, 2 ); | |||
for (i = 0; i < 3; i++) | |||
rmesa->hw.cube[i].emit = cube_emit_cs; | |||
} | |||
else | |||
{ | |||
@@ -874,25 +663,22 @@ void radeonInitState( r100ContextPtr rmesa ) | |||
cmdvec( RADEON_VS_UCP_ADDR + i, 1, 4 ); | |||
} | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
rmesa->hw.stp.cmd[STP_CMD_0] = CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0); | |||
rmesa->hw.stp.cmd[STP_DATA_0] = 0; | |||
rmesa->hw.stp.cmd[STP_CMD_1] = CP_PACKET0_ONE(RADEON_RE_STIPPLE_DATA, 31); | |||
rmesa->hw.stp.cmd[STP_CMD_0] = CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0); | |||
rmesa->hw.stp.cmd[STP_DATA_0] = 0; | |||
rmesa->hw.stp.cmd[STP_CMD_1] = CP_PACKET0_ONE(RADEON_RE_STIPPLE_DATA, 31); | |||
rmesa->hw.grd.emit = scl_emit; | |||
rmesa->hw.fog.emit = vec_emit; | |||
rmesa->hw.glt.emit = vec_emit; | |||
rmesa->hw.eye.emit = vec_emit; | |||
for (i = 0; i < 6; i++) | |||
rmesa->hw.mat[i].emit = vec_emit; | |||
rmesa->hw.grd.emit = scl_emit; | |||
rmesa->hw.fog.emit = vec_emit; | |||
rmesa->hw.glt.emit = vec_emit; | |||
rmesa->hw.eye.emit = vec_emit; | |||
for (i = 0; i < 6; i++) | |||
rmesa->hw.mat[i].emit = vec_emit; | |||
for (i = 0; i < 8; i++) | |||
rmesa->hw.lit[i].emit = lit_emit; | |||
for (i = 0; i < 8; i++) | |||
rmesa->hw.lit[i].emit = lit_emit; | |||
for (i = 0; i < 6; i++) | |||
rmesa->hw.ucp[i].emit = vec_emit; | |||
} | |||
for (i = 0; i < 6; i++) | |||
rmesa->hw.ucp[i].emit = vec_emit; | |||
rmesa->last_ReallyEnabled = -1; | |||
@@ -1150,11 +936,9 @@ void radeonInitState( r100ContextPtr rmesa ) | |||
rmesa->hw.eye.cmd[EYE_Z] = IEEE_ONE; | |||
rmesa->hw.eye.cmd[EYE_RESCALE_FACTOR] = IEEE_ONE; | |||
if (rmesa->radeon.radeonScreen->kernel_mm) { | |||
radeon_init_query_stateobj(&rmesa->radeon, R100_QUERYOBJ_CMDSIZE); | |||
rmesa->radeon.query.queryobj.cmd[R100_QUERYOBJ_CMD_0] = CP_PACKET0(RADEON_RB3D_ZPASS_DATA, 0); | |||
rmesa->radeon.query.queryobj.cmd[R100_QUERYOBJ_DATA_0] = 0; | |||
} | |||
radeon_init_query_stateobj(&rmesa->radeon, R100_QUERYOBJ_CMDSIZE); | |||
rmesa->radeon.query.queryobj.cmd[R100_QUERYOBJ_CMD_0] = CP_PACKET0(RADEON_RB3D_ZPASS_DATA, 0); | |||
rmesa->radeon.query.queryobj.cmd[R100_QUERYOBJ_DATA_0] = 0; | |||
rmesa->radeon.hw.all_dirty = GL_TRUE; | |||
@@ -1126,9 +1126,7 @@ radeon_init_common_texture_funcs(radeonContextPtr radeon, | |||
functions->GenerateMipmap = radeonGenerateMipmap; | |||
if (radeon->radeonScreen->kernel_mm) { | |||
functions->CopyTexSubImage2D = radeonCopyTexSubImage2D; | |||
} | |||
functions->CopyTexSubImage2D = radeonCopyTexSubImage2D; | |||
#if FEATURE_OES_EGL_image | |||
functions->EGLImageTargetTexture2D = radeon_image_target_texture_2d; |