Clone of mesa.
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

r200_ioctl.c 23KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934
  1. /* $XFree86$ */
  2. /*
  3. Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
  4. The Weather Channel (TM) funded Tungsten Graphics to develop the
  5. initial release of the Radeon 8500 driver under the XFree86 license.
  6. This notice must be preserved.
  7. Permission is hereby granted, free of charge, to any person obtaining
  8. a copy of this software and associated documentation files (the
  9. "Software"), to deal in the Software without restriction, including
  10. without limitation the rights to use, copy, modify, merge, publish,
  11. distribute, sublicense, and/or sell copies of the Software, and to
  12. permit persons to whom the Software is furnished to do so, subject to
  13. the following conditions:
  14. The above copyright notice and this permission notice (including the
  15. next paragraph) shall be included in all copies or substantial
  16. portions of the Software.
  17. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. /*
  26. * Authors:
  27. * Keith Whitwell <keith@tungstengraphics.com>
  28. */
  29. #include "glheader.h"
  30. #include "imports.h"
  31. #include "macros.h"
  32. #include "context.h"
  33. #include "swrast/swrast.h"
  34. #include "r200_context.h"
  35. #include "r200_state.h"
  36. #include "r200_ioctl.h"
  37. #include "r200_tcl.h"
  38. #include "r200_sanity.h"
  39. #include "radeon_reg.h"
  40. #include <unistd.h> /* for usleep() */
  41. #include <errno.h>
  42. #define R200_TIMEOUT 512
  43. #define R200_IDLE_RETRY 16
  44. static void do_usleep( int nr, const char *caller )
  45. {
  46. //if (1) fprintf(stderr, "usleep %d in %s\n", nr, caller );
  47. if (1) usleep( nr );
  48. }
  49. static void r200WaitForIdle( r200ContextPtr rmesa );
  50. int r200FlushCmdBufLocked( r200ContextPtr rmesa, const char * caller )
  51. {
  52. int ret, i;
  53. drmRadeonCmdBuffer cmd;
  54. if (R200_DEBUG & DEBUG_IOCTL) {
  55. fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
  56. if (0 & R200_DEBUG & DEBUG_VERBOSE)
  57. for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
  58. fprintf(stderr, "%d: %x\n", i/4,
  59. *(int *)(&rmesa->store.cmd_buf[i]));
  60. }
  61. if (R200_DEBUG & DEBUG_DMA)
  62. fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
  63. rmesa->dma.nr_released_bufs);
  64. if (R200_DEBUG & DEBUG_SANITY) {
  65. if (rmesa->state.scissor.enabled)
  66. ret = r200SanityCmdBuffer( rmesa,
  67. rmesa->state.scissor.numClipRects,
  68. rmesa->state.scissor.pClipRects);
  69. else
  70. ret = r200SanityCmdBuffer( rmesa,
  71. rmesa->numClipRects,
  72. rmesa->pClipRects);
  73. if (ret) {
  74. fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
  75. goto out;
  76. }
  77. }
  78. if (R200_DEBUG & DEBUG_MEMORY) {
  79. if (!r200ValidateTexObjs( rmesa )) {
  80. fprintf(stderr, " -- tex memory is inconsistent - expect mangled textures\n");
  81. }
  82. }
  83. cmd.bufsz = rmesa->store.cmd_used;
  84. cmd.buf = rmesa->store.cmd_buf;
  85. if (rmesa->state.scissor.enabled) {
  86. cmd.nbox = rmesa->state.scissor.numClipRects;
  87. cmd.boxes = (drmClipRect *)rmesa->state.scissor.pClipRects;
  88. } else {
  89. cmd.nbox = rmesa->numClipRects;
  90. cmd.boxes = (drmClipRect *)rmesa->pClipRects;
  91. }
  92. ret = drmCommandWrite( rmesa->dri.fd,
  93. DRM_RADEON_CMDBUF,
  94. &cmd, sizeof(cmd) );
  95. if (ret)
  96. fprintf(stderr, "drmCommandWrite: %d\n", ret);
  97. if (R200_DEBUG & DEBUG_SYNC) {
  98. fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
  99. r200WaitForIdleLocked( rmesa );
  100. }
  101. out:
  102. rmesa->store.primnr = 0;
  103. rmesa->store.statenr = 0;
  104. rmesa->store.cmd_used = 0;
  105. rmesa->dma.nr_released_bufs = 0;
  106. /* rmesa->lost_context = 0; */
  107. rmesa->lost_context = 1;
  108. return ret;
  109. }
  110. /* Note: does not emit any commands to avoid recursion on
  111. * r200AllocCmdBuf.
  112. */
  113. void r200FlushCmdBuf( r200ContextPtr rmesa, const char *caller )
  114. {
  115. int ret;
  116. LOCK_HARDWARE( rmesa );
  117. ret = r200FlushCmdBufLocked( rmesa, caller );
  118. UNLOCK_HARDWARE( rmesa );
  119. if (ret) {
  120. fprintf(stderr, "drmRadeonCmdBuffer: %d (exiting)\n", ret);
  121. exit(ret);
  122. }
  123. }
  124. /* =============================================================
  125. * Hardware vertex buffer handling
  126. */
  127. void r200RefillCurrentDmaRegion( r200ContextPtr rmesa )
  128. {
  129. struct r200_dma_buffer *dmabuf;
  130. int fd = rmesa->dri.fd;
  131. int index = 0;
  132. int size = 0;
  133. drmDMAReq dma;
  134. int ret;
  135. if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
  136. fprintf(stderr, "%s\n", __FUNCTION__);
  137. if (rmesa->dma.flush) {
  138. rmesa->dma.flush( rmesa );
  139. }
  140. if (rmesa->dma.current.buf)
  141. r200ReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
  142. if (rmesa->dma.nr_released_bufs > 4)
  143. r200FlushCmdBuf( rmesa, __FUNCTION__ );
  144. dma.context = rmesa->dri.hwContext;
  145. dma.send_count = 0;
  146. dma.send_list = NULL;
  147. dma.send_sizes = NULL;
  148. dma.flags = 0;
  149. dma.request_count = 1;
  150. dma.request_size = RADEON_BUFFER_SIZE;
  151. dma.request_list = &index;
  152. dma.request_sizes = &size;
  153. dma.granted_count = 0;
  154. LOCK_HARDWARE(rmesa); /* no need to validate */
  155. while (1) {
  156. ret = drmDMA( fd, &dma );
  157. if (ret == 0)
  158. break;
  159. if (rmesa->dma.nr_released_bufs) {
  160. r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
  161. }
  162. if (rmesa->do_usleeps) {
  163. UNLOCK_HARDWARE( rmesa );
  164. do_usleep(1, __FUNCTION__);
  165. LOCK_HARDWARE( rmesa );
  166. }
  167. }
  168. UNLOCK_HARDWARE(rmesa);
  169. if (R200_DEBUG & DEBUG_DMA)
  170. fprintf(stderr, "Allocated buffer %d\n", index);
  171. dmabuf = CALLOC_STRUCT( r200_dma_buffer );
  172. dmabuf->buf = &rmesa->r200Screen->buffers->list[index];
  173. dmabuf->refcount = 1;
  174. rmesa->dma.current.buf = dmabuf;
  175. rmesa->dma.current.address = dmabuf->buf->address;
  176. rmesa->dma.current.end = dmabuf->buf->total;
  177. rmesa->dma.current.start = 0;
  178. rmesa->dma.current.ptr = 0;
  179. }
  180. void r200ReleaseDmaRegion( r200ContextPtr rmesa,
  181. struct r200_dma_region *region,
  182. const char *caller )
  183. {
  184. if (R200_DEBUG & DEBUG_IOCTL)
  185. fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
  186. if (!region->buf)
  187. return;
  188. if (rmesa->dma.flush)
  189. rmesa->dma.flush( rmesa );
  190. if (--region->buf->refcount == 0) {
  191. drmRadeonCmdHeader *cmd;
  192. if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
  193. fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
  194. region->buf->buf->idx);
  195. cmd = (drmRadeonCmdHeader *)r200AllocCmdBuf( rmesa, sizeof(*cmd),
  196. __FUNCTION__ );
  197. cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
  198. cmd->dma.buf_idx = region->buf->buf->idx;
  199. FREE(region->buf);
  200. rmesa->dma.nr_released_bufs++;
  201. }
  202. region->buf = 0;
  203. region->start = 0;
  204. }
  205. /* Allocates a region from rmesa->dma.current. If there isn't enough
  206. * space in current, grab a new buffer (and discard what was left of current)
  207. */
  208. void r200AllocDmaRegion( r200ContextPtr rmesa,
  209. struct r200_dma_region *region,
  210. int bytes,
  211. int alignment )
  212. {
  213. if (R200_DEBUG & DEBUG_IOCTL)
  214. fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
  215. if (rmesa->dma.flush)
  216. rmesa->dma.flush( rmesa );
  217. if (region->buf)
  218. r200ReleaseDmaRegion( rmesa, region, __FUNCTION__ );
  219. alignment--;
  220. rmesa->dma.current.start = rmesa->dma.current.ptr =
  221. (rmesa->dma.current.ptr + alignment) & ~alignment;
  222. if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
  223. r200RefillCurrentDmaRegion( rmesa );
  224. region->start = rmesa->dma.current.start;
  225. region->ptr = rmesa->dma.current.start;
  226. region->end = rmesa->dma.current.start + bytes;
  227. region->address = rmesa->dma.current.address;
  228. region->buf = rmesa->dma.current.buf;
  229. region->buf->refcount++;
  230. rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
  231. rmesa->dma.current.start =
  232. rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
  233. }
  234. void r200AllocDmaRegionVerts( r200ContextPtr rmesa,
  235. struct r200_dma_region *region,
  236. int numverts,
  237. int vertsize,
  238. int alignment )
  239. {
  240. r200AllocDmaRegion( rmesa, region, vertsize * numverts, alignment );
  241. }
  242. /* ================================================================
  243. * SwapBuffers with client-side throttling
  244. */
  245. static GLuint r200GetLastFrame(r200ContextPtr rmesa)
  246. {
  247. drmRadeonGetParam gp;
  248. int ret;
  249. GLuint frame;
  250. gp.param = RADEON_PARAM_LAST_FRAME;
  251. gp.value = (int *)&frame;
  252. ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
  253. &gp, sizeof(gp) );
  254. if ( ret ) {
  255. fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
  256. exit(1);
  257. }
  258. return frame;
  259. }
  260. static void r200EmitIrqLocked( r200ContextPtr rmesa )
  261. {
  262. drmRadeonIrqEmit ie;
  263. int ret;
  264. ie.irq_seq = &rmesa->iw.irq_seq;
  265. ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
  266. &ie, sizeof(ie) );
  267. if ( ret ) {
  268. fprintf( stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__, ret );
  269. exit(1);
  270. }
  271. }
  272. static void r200WaitIrq( r200ContextPtr rmesa )
  273. {
  274. int ret;
  275. do {
  276. ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
  277. &rmesa->iw, sizeof(rmesa->iw) );
  278. } while (ret && (errno == EINTR || errno == EAGAIN));
  279. if ( ret ) {
  280. fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
  281. exit(1);
  282. }
  283. }
  284. static void r200WaitForFrameCompletion( r200ContextPtr rmesa )
  285. {
  286. RADEONSAREAPrivPtr sarea = rmesa->sarea;
  287. if (rmesa->do_irqs) {
  288. if (r200GetLastFrame(rmesa) < sarea->last_frame) {
  289. if (!rmesa->irqsEmitted) {
  290. while (r200GetLastFrame (rmesa) < sarea->last_frame)
  291. ;
  292. }
  293. else {
  294. UNLOCK_HARDWARE( rmesa );
  295. r200WaitIrq( rmesa );
  296. LOCK_HARDWARE( rmesa );
  297. }
  298. rmesa->irqsEmitted = 10;
  299. }
  300. if (rmesa->irqsEmitted) {
  301. r200EmitIrqLocked( rmesa );
  302. rmesa->irqsEmitted--;
  303. }
  304. }
  305. else {
  306. while (r200GetLastFrame (rmesa) < sarea->last_frame) {
  307. UNLOCK_HARDWARE( rmesa );
  308. if (rmesa->do_usleeps)
  309. do_usleep(1, __FUNCTION__);
  310. LOCK_HARDWARE( rmesa );
  311. }
  312. }
  313. }
  314. /* Copy the back color buffer to the front color buffer.
  315. */
  316. void r200CopyBuffer( const __DRIdrawablePrivate *dPriv )
  317. {
  318. r200ContextPtr rmesa;
  319. GLint nbox, i, ret;
  320. assert(dPriv);
  321. assert(dPriv->driContextPriv);
  322. assert(dPriv->driContextPriv->driverPrivate);
  323. rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
  324. if ( R200_DEBUG & DEBUG_IOCTL ) {
  325. fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, rmesa->glCtx );
  326. }
  327. R200_FIREVERTICES( rmesa );
  328. LOCK_HARDWARE( rmesa );
  329. /* Throttle the frame rate -- only allow one pending swap buffers
  330. * request at a time.
  331. */
  332. r200WaitForFrameCompletion( rmesa );
  333. r200WaitForVBlank( rmesa );
  334. nbox = rmesa->dri.drawable->numClipRects; /* must be in locked region */
  335. for ( i = 0 ; i < nbox ; ) {
  336. GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
  337. XF86DRIClipRectPtr box = rmesa->dri.drawable->pClipRects;
  338. XF86DRIClipRectPtr b = rmesa->sarea->boxes;
  339. GLint n = 0;
  340. for ( ; i < nr ; i++ ) {
  341. *b++ = box[i];
  342. n++;
  343. }
  344. rmesa->sarea->nbox = n;
  345. ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
  346. if ( ret ) {
  347. fprintf( stderr, "DRM_R200_SWAP_BUFFERS: return = %d\n", ret );
  348. UNLOCK_HARDWARE( rmesa );
  349. exit( 1 );
  350. }
  351. }
  352. UNLOCK_HARDWARE( rmesa );
  353. rmesa->lost_context = 1;
  354. }
  355. void r200PageFlip( const __DRIdrawablePrivate *dPriv )
  356. {
  357. r200ContextPtr rmesa;
  358. GLint ret;
  359. assert(dPriv);
  360. assert(dPriv->driContextPriv);
  361. assert(dPriv->driContextPriv->driverPrivate);
  362. rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
  363. if ( R200_DEBUG & DEBUG_IOCTL ) {
  364. fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
  365. rmesa->sarea->pfCurrentPage);
  366. }
  367. R200_FIREVERTICES( rmesa );
  368. LOCK_HARDWARE( rmesa );
  369. if (!rmesa->dri.drawable->numClipRects) {
  370. UNLOCK_HARDWARE( rmesa );
  371. usleep( 10000 ); /* throttle invisible client 10ms */
  372. return;
  373. }
  374. /* Need to do this for the perf box placement:
  375. */
  376. {
  377. XF86DRIClipRectPtr box = rmesa->dri.drawable->pClipRects;
  378. XF86DRIClipRectPtr b = rmesa->sarea->boxes;
  379. b[0] = box[0];
  380. rmesa->sarea->nbox = 1;
  381. }
  382. /* Throttle the frame rate -- only allow a few pending swap buffers
  383. * request at a time.
  384. */
  385. r200WaitForFrameCompletion( rmesa );
  386. r200WaitForVBlank( rmesa );
  387. ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
  388. UNLOCK_HARDWARE( rmesa );
  389. if ( ret ) {
  390. fprintf( stderr, "DRM_R200_FLIP: return = %d\n", ret );
  391. exit( 1 );
  392. }
  393. if ( rmesa->sarea->pfCurrentPage == 1 ) {
  394. rmesa->state.color.drawOffset = rmesa->r200Screen->frontOffset;
  395. rmesa->state.color.drawPitch = rmesa->r200Screen->frontPitch;
  396. } else {
  397. rmesa->state.color.drawOffset = rmesa->r200Screen->backOffset;
  398. rmesa->state.color.drawPitch = rmesa->r200Screen->backPitch;
  399. }
  400. R200_STATECHANGE( rmesa, ctx );
  401. rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset;
  402. rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch;
  403. }
  404. /* ================================================================
  405. * Buffer clear
  406. */
  407. static void r200Clear( GLcontext *ctx, GLbitfield mask, GLboolean all,
  408. GLint cx, GLint cy, GLint cw, GLint ch )
  409. {
  410. r200ContextPtr rmesa = R200_CONTEXT(ctx);
  411. __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
  412. GLuint flags = 0;
  413. GLuint color_mask = 0;
  414. GLint ret, i;
  415. if ( R200_DEBUG & DEBUG_IOCTL ) {
  416. fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
  417. __FUNCTION__, all, cx, cy, cw, ch );
  418. }
  419. {
  420. LOCK_HARDWARE( rmesa );
  421. UNLOCK_HARDWARE( rmesa );
  422. if ( dPriv->numClipRects == 0 )
  423. return;
  424. }
  425. r200EmitState( rmesa );
  426. /* Need to cope with lostcontext here as kernel relies on
  427. * some residual state:
  428. */
  429. R200_FIREVERTICES( rmesa );
  430. if ( mask & DD_FRONT_LEFT_BIT ) {
  431. flags |= RADEON_FRONT;
  432. color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
  433. mask &= ~DD_FRONT_LEFT_BIT;
  434. }
  435. if ( mask & DD_BACK_LEFT_BIT ) {
  436. flags |= RADEON_BACK;
  437. color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
  438. mask &= ~DD_BACK_LEFT_BIT;
  439. }
  440. if ( mask & DD_DEPTH_BIT ) {
  441. if ( ctx->Depth.Mask ) flags |= RADEON_DEPTH; /* FIXME: ??? */
  442. mask &= ~DD_DEPTH_BIT;
  443. }
  444. if ( (mask & DD_STENCIL_BIT) && rmesa->state.stencil.hwBuffer ) {
  445. flags |= RADEON_STENCIL;
  446. mask &= ~DD_STENCIL_BIT;
  447. }
  448. if ( mask )
  449. _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
  450. if ( !flags )
  451. return;
  452. /* Flip top to bottom */
  453. cx += dPriv->x;
  454. cy = dPriv->y + dPriv->h - cy - ch;
  455. LOCK_HARDWARE( rmesa );
  456. /* Throttle the number of clear ioctls we do.
  457. */
  458. while ( 1 ) {
  459. drmRadeonGetParam gp;
  460. int ret;
  461. int clear;
  462. gp.param = RADEON_PARAM_LAST_CLEAR;
  463. gp.value = (int *)&clear;
  464. ret = drmCommandWriteRead( rmesa->dri.fd,
  465. DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
  466. if ( ret ) {
  467. fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
  468. exit(1);
  469. }
  470. /* Clear throttling needs more thought.
  471. */
  472. if ( rmesa->sarea->last_clear - clear <= 25 ) {
  473. break;
  474. }
  475. if (rmesa->do_usleeps) {
  476. UNLOCK_HARDWARE( rmesa );
  477. do_usleep(1, __FUNCTION__);
  478. LOCK_HARDWARE( rmesa );
  479. }
  480. }
  481. for ( i = 0 ; i < dPriv->numClipRects ; ) {
  482. GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
  483. XF86DRIClipRectPtr box = dPriv->pClipRects;
  484. XF86DRIClipRectPtr b = rmesa->sarea->boxes;
  485. drmRadeonClearType clear;
  486. drmRadeonClearRect depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
  487. GLint n = 0;
  488. if ( !all ) {
  489. for ( ; i < nr ; i++ ) {
  490. GLint x = box[i].x1;
  491. GLint y = box[i].y1;
  492. GLint w = box[i].x2 - x;
  493. GLint h = box[i].y2 - y;
  494. if ( x < cx ) w -= cx - x, x = cx;
  495. if ( y < cy ) h -= cy - y, y = cy;
  496. if ( x + w > cx + cw ) w = cx + cw - x;
  497. if ( y + h > cy + ch ) h = cy + ch - y;
  498. if ( w <= 0 ) continue;
  499. if ( h <= 0 ) continue;
  500. b->x1 = x;
  501. b->y1 = y;
  502. b->x2 = x + w;
  503. b->y2 = y + h;
  504. b++;
  505. n++;
  506. }
  507. } else {
  508. for ( ; i < nr ; i++ ) {
  509. *b++ = box[i];
  510. n++;
  511. }
  512. }
  513. rmesa->sarea->nbox = n;
  514. clear.flags = flags;
  515. clear.clear_color = rmesa->state.color.clear;
  516. clear.clear_depth = 0; /* not used */
  517. clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
  518. clear.depth_mask = rmesa->state.stencil.clear;
  519. clear.depth_boxes = depth_boxes;
  520. n--;
  521. b = rmesa->sarea->boxes;
  522. for ( ; n >= 0 ; n-- ) {
  523. depth_boxes[n].f[RADEON_CLEAR_X1] = (float)b[n].x1;
  524. depth_boxes[n].f[RADEON_CLEAR_Y1] = (float)b[n].y1;
  525. depth_boxes[n].f[RADEON_CLEAR_X2] = (float)b[n].x2;
  526. depth_boxes[n].f[RADEON_CLEAR_Y2] = (float)b[n].y2;
  527. depth_boxes[n].f[RADEON_CLEAR_DEPTH] = ctx->Depth.Clear;
  528. }
  529. ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
  530. &clear, sizeof(drmRadeonClearType));
  531. if ( ret ) {
  532. UNLOCK_HARDWARE( rmesa );
  533. fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
  534. exit( 1 );
  535. }
  536. }
  537. UNLOCK_HARDWARE( rmesa );
  538. rmesa->lost_context = 1;
  539. }
  540. void r200WaitForIdleLocked( r200ContextPtr rmesa )
  541. {
  542. int ret;
  543. int i = 0;
  544. do {
  545. ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_CP_IDLE);
  546. if (ret)
  547. do_usleep( 1, __FUNCTION__ );
  548. } while (ret && ++i < 100);
  549. if ( ret < 0 ) {
  550. UNLOCK_HARDWARE( rmesa );
  551. fprintf( stderr, "Error: R200 timed out... exiting\n" );
  552. exit( -1 );
  553. }
  554. }
  555. static void r200WaitForIdle( r200ContextPtr rmesa )
  556. {
  557. LOCK_HARDWARE(rmesa);
  558. r200WaitForIdleLocked( rmesa );
  559. UNLOCK_HARDWARE(rmesa);
  560. }
  561. void r200WaitForVBlank( r200ContextPtr rmesa )
  562. {
  563. #if 0
  564. drmVBlank vbl;
  565. int ret;
  566. if ( !rmesa->r200Screen->irq )
  567. return;
  568. if ( getenv("LIBGL_SYNC_REFRESH") ) {
  569. /* Wait for until the next vertical blank */
  570. vbl.request.type = DRM_VBLANK_RELATIVE;
  571. vbl.request.sequence = 1;
  572. } else if ( getenv("LIBGL_THROTTLE_REFRESH") ) {
  573. /* Wait for at least one vertical blank since the last call */
  574. vbl.request.type = DRM_VBLANK_ABSOLUTE;
  575. vbl.request.sequence = rmesa->vbl_seq + 1;
  576. } else {
  577. return;
  578. }
  579. UNLOCK_HARDWARE( rmesa );
  580. if ((ret = drmWaitVBlank( rmesa->dri.fd, &vbl ))) {
  581. fprintf(stderr, "%s: drmWaitVBlank returned %d, IRQs don't seem to be"
  582. " working correctly.\nTry running with LIBGL_THROTTLE_REFRESH"
  583. " and LIBL_SYNC_REFRESH unset.\n", __FUNCTION__, ret);
  584. exit(1);
  585. } else if (R200_DEBUG & DEBUG_IOCTL)
  586. fprintf(stderr, "%s: drmWaitVBlank returned %d\n", __FUNCTION__, ret);
  587. rmesa->vbl_seq = vbl.reply.sequence;
  588. LOCK_HARDWARE( rmesa );
  589. #endif
  590. }
  591. void r200Flush( GLcontext *ctx )
  592. {
  593. r200ContextPtr rmesa = R200_CONTEXT( ctx );
  594. if (R200_DEBUG & DEBUG_IOCTL)
  595. fprintf(stderr, "%s\n", __FUNCTION__);
  596. if (rmesa->dma.flush)
  597. rmesa->dma.flush( rmesa );
  598. if (!is_empty_list(&rmesa->hw.dirty))
  599. r200EmitState( rmesa );
  600. if (rmesa->store.cmd_used)
  601. r200FlushCmdBuf( rmesa, __FUNCTION__ );
  602. }
  603. /* Make sure all commands have been sent to the hardware and have
  604. * completed processing.
  605. */
  606. void r200Finish( GLcontext *ctx )
  607. {
  608. r200ContextPtr rmesa = R200_CONTEXT(ctx);
  609. r200Flush( ctx );
  610. if (rmesa->do_irqs) {
  611. LOCK_HARDWARE( rmesa );
  612. r200EmitIrqLocked( rmesa );
  613. UNLOCK_HARDWARE( rmesa );
  614. r200WaitIrq( rmesa );
  615. }
  616. else
  617. r200WaitForIdle( rmesa );
  618. }
  619. /* This version of AllocateMemoryNV allocates only agp memory, and
  620. * only does so after the point at which the driver has been
  621. * initialized.
  622. *
  623. * Theoretically a valid context isn't required. However, in this
  624. * implementation, it is, as I'm using the hardware lock to protect
  625. * the kernel data structures, and the current context to get the
  626. * device fd.
  627. */
  628. void *r200AllocateMemoryNV(GLsizei size, GLfloat readfreq,
  629. GLfloat writefreq, GLfloat priority)
  630. {
  631. GET_CURRENT_CONTEXT(ctx);
  632. r200ContextPtr rmesa;
  633. int region_offset;
  634. drmRadeonMemAlloc alloc;
  635. int ret;
  636. if (R200_DEBUG & DEBUG_IOCTL)
  637. fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
  638. writefreq, priority);
  639. if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || rmesa->r200Screen->IsPCI )
  640. return NULL;
  641. if (getenv("R200_NO_ALLOC"))
  642. return NULL;
  643. if (rmesa->dri.drmMinor < 6)
  644. return NULL;
  645. alloc.region = RADEON_MEM_REGION_AGP;
  646. alloc.alignment = 0;
  647. alloc.size = size;
  648. alloc.region_offset = &region_offset;
  649. ret = drmCommandWriteRead( rmesa->r200Screen->driScreen->fd,
  650. DRM_RADEON_ALLOC,
  651. &alloc, sizeof(alloc));
  652. if (ret) {
  653. fprintf(stderr, "%s: DRM_RADEON_ALLOC ret %d\n", __FUNCTION__, ret);
  654. return NULL;
  655. }
  656. {
  657. char *region_start = (char *)rmesa->r200Screen->agpTextures.map;
  658. return (void *)(region_start + region_offset);
  659. }
  660. }
  661. /* Called via glXFreeMemoryNV() */
  662. void r200FreeMemoryNV(GLvoid *pointer)
  663. {
  664. GET_CURRENT_CONTEXT(ctx);
  665. r200ContextPtr rmesa;
  666. int region_offset;
  667. drmRadeonMemFree memfree;
  668. int ret;
  669. if (R200_DEBUG & DEBUG_IOCTL)
  670. fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
  671. if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || rmesa->r200Screen->IsPCI ) {
  672. fprintf(stderr, "%s: no context\n", __FUNCTION__);
  673. return;
  674. }
  675. if (rmesa->dri.drmMinor < 6)
  676. return;
  677. region_offset = (char *)pointer - (char *)rmesa->r200Screen->agpTextures.map;
  678. if (region_offset < 0 ||
  679. region_offset > rmesa->r200Screen->agpTextures.size) {
  680. fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
  681. rmesa->r200Screen->agpTextures.size);
  682. return;
  683. }
  684. memfree.region = RADEON_MEM_REGION_AGP;
  685. memfree.region_offset = region_offset;
  686. ret = drmCommandWrite( rmesa->r200Screen->driScreen->fd,
  687. DRM_RADEON_FREE,
  688. &memfree, sizeof(memfree));
  689. if (ret)
  690. fprintf(stderr, "%s: DRM_RADEON_FREE ret %d\n", __FUNCTION__, ret);
  691. }
  692. /* Called via glXGetAGPOffsetMESA() */
  693. GLuint r200GetAGPOffset(const GLvoid *pointer)
  694. {
  695. GET_CURRENT_CONTEXT(ctx);
  696. r200ContextPtr rmesa;
  697. GLuint card_offset;
  698. if (!ctx || !(rmesa = R200_CONTEXT(ctx)) ) {
  699. fprintf(stderr, "%s: no context\n", __FUNCTION__);
  700. return ~0;
  701. }
  702. if (!r200IsAgpMemory( rmesa, pointer, 0 ))
  703. return ~0;
  704. if (rmesa->dri.drmMinor < 6)
  705. return ~0;
  706. card_offset = r200AgpOffsetFromVirtual( rmesa, pointer );
  707. return card_offset - rmesa->r200Screen->agp_base;
  708. }
  709. GLboolean r200IsAgpMemory( r200ContextPtr rmesa, const GLvoid *pointer,
  710. GLint size )
  711. {
  712. int offset = (char *)pointer - (char *)rmesa->r200Screen->agpTextures.map;
  713. int valid = (size >= 0 &&
  714. offset >= 0 &&
  715. offset + size < rmesa->r200Screen->agpTextures.size);
  716. if (R200_DEBUG & DEBUG_IOCTL)
  717. fprintf(stderr, "r200IsAgpMemory( %p ) : %d\n", pointer, valid );
  718. return valid;
  719. }
  720. GLuint r200AgpOffsetFromVirtual( r200ContextPtr rmesa, const GLvoid *pointer )
  721. {
  722. int offset = (char *)pointer - (char *)rmesa->r200Screen->agpTextures.map;
  723. if (offset < 0 || offset > rmesa->r200Screen->agpTextures.size)
  724. return ~0;
  725. else
  726. return rmesa->r200Screen->agp_texture_offset + offset;
  727. }
  728. void r200InitIoctlFuncs( GLcontext *ctx )
  729. {
  730. ctx->Driver.Clear = r200Clear;
  731. ctx->Driver.Finish = r200Finish;
  732. ctx->Driver.Flush = r200Flush;
  733. }