Clone of mesa.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

r300_render.c 42KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234
  1. /*
  2. * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
  3. * Copyright 2010 Marek Olšák <maraeo@gmail.com>
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * on the rights to use, copy, modify, merge, publish, distribute, sub
  9. * license, and/or sell copies of the Software, and to permit persons to whom
  10. * the Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
  20. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  21. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  22. * USE OR OTHER DEALINGS IN THE SOFTWARE. */
  23. /* r300_render: Vertex and index buffer primitive emission. Contains both
  24. * HW TCL fastpath rendering, and SW TCL Draw-assisted rendering. */
  25. #include "draw/draw_context.h"
  26. #include "draw/draw_vbuf.h"
  27. #include "util/u_inlines.h"
  28. #include "util/u_format.h"
  29. #include "util/u_memory.h"
  30. #include "util/u_upload_mgr.h"
  31. #include "util/u_prim.h"
  32. #include "r300_cs.h"
  33. #include "r300_context.h"
  34. #include "r300_screen_buffer.h"
  35. #include "r300_emit.h"
  36. #include "r300_reg.h"
  37. #include <limits.h>
  38. #define IMMD_DWORDS 32
  39. static uint32_t r300_translate_primitive(unsigned prim)
  40. {
  41. switch (prim) {
  42. case PIPE_PRIM_POINTS:
  43. return R300_VAP_VF_CNTL__PRIM_POINTS;
  44. case PIPE_PRIM_LINES:
  45. return R300_VAP_VF_CNTL__PRIM_LINES;
  46. case PIPE_PRIM_LINE_LOOP:
  47. return R300_VAP_VF_CNTL__PRIM_LINE_LOOP;
  48. case PIPE_PRIM_LINE_STRIP:
  49. return R300_VAP_VF_CNTL__PRIM_LINE_STRIP;
  50. case PIPE_PRIM_TRIANGLES:
  51. return R300_VAP_VF_CNTL__PRIM_TRIANGLES;
  52. case PIPE_PRIM_TRIANGLE_STRIP:
  53. return R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP;
  54. case PIPE_PRIM_TRIANGLE_FAN:
  55. return R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN;
  56. case PIPE_PRIM_QUADS:
  57. return R300_VAP_VF_CNTL__PRIM_QUADS;
  58. case PIPE_PRIM_QUAD_STRIP:
  59. return R300_VAP_VF_CNTL__PRIM_QUAD_STRIP;
  60. case PIPE_PRIM_POLYGON:
  61. return R300_VAP_VF_CNTL__PRIM_POLYGON;
  62. default:
  63. return 0;
  64. }
  65. }
  66. static uint32_t r300_provoking_vertex_fixes(struct r300_context *r300,
  67. unsigned mode)
  68. {
  69. struct r300_rs_state* rs = (struct r300_rs_state*)r300->rs_state.state;
  70. uint32_t color_control = rs->color_control;
  71. /* By default (see r300_state.c:r300_create_rs_state) color_control is
  72. * initialized to provoking the first vertex.
  73. *
  74. * Triangle fans must be reduced to the second vertex, not the first, in
  75. * Gallium flatshade-first mode, as per the GL spec.
  76. * (http://www.opengl.org/registry/specs/ARB/provoking_vertex.txt)
  77. *
  78. * Quads never provoke correctly in flatshade-first mode. The first
  79. * vertex is never considered as provoking, so only the second, third,
  80. * and fourth vertices can be selected, and both "third" and "last" modes
  81. * select the fourth vertex. This is probably due to D3D lacking quads.
  82. *
  83. * Similarly, polygons reduce to the first, not the last, vertex, when in
  84. * "last" mode, and all other modes start from the second vertex.
  85. *
  86. * ~ C.
  87. */
  88. if (rs->rs.flatshade_first) {
  89. switch (mode) {
  90. case PIPE_PRIM_TRIANGLE_FAN:
  91. color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_SECOND;
  92. break;
  93. case PIPE_PRIM_QUADS:
  94. case PIPE_PRIM_QUAD_STRIP:
  95. case PIPE_PRIM_POLYGON:
  96. color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_LAST;
  97. break;
  98. default:
  99. color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_FIRST;
  100. break;
  101. }
  102. } else {
  103. color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_LAST;
  104. }
  105. return color_control;
  106. }
  107. void r500_emit_index_bias(struct r300_context *r300, int index_bias)
  108. {
  109. CS_LOCALS(r300);
  110. BEGIN_CS(2);
  111. OUT_CS_REG(R500_VAP_INDEX_OFFSET,
  112. (index_bias & 0xFFFFFF) | (index_bias < 0 ? 1<<24 : 0));
  113. END_CS;
  114. }
  115. static void r300_emit_draw_init(struct r300_context *r300, unsigned mode,
  116. unsigned min_index, unsigned max_index)
  117. {
  118. CS_LOCALS(r300);
  119. BEGIN_CS(5);
  120. OUT_CS_REG(R300_GA_COLOR_CONTROL,
  121. r300_provoking_vertex_fixes(r300, mode));
  122. OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
  123. OUT_CS(max_index);
  124. OUT_CS(min_index);
  125. END_CS;
  126. }
  127. /* This function splits the index bias value into two parts:
  128. * - buffer_offset: the value that can be safely added to buffer offsets
  129. * in r300_emit_vertex_arrays (it must yield a positive offset when added to
  130. * a vertex buffer offset)
  131. * - index_offset: the value that must be manually subtracted from indices
  132. * in an index buffer to achieve negative offsets. */
  133. static void r300_split_index_bias(struct r300_context *r300, int index_bias,
  134. int *buffer_offset, int *index_offset)
  135. {
  136. struct pipe_vertex_buffer *vb, *vbufs = r300->vbuf_mgr->vertex_buffer;
  137. struct pipe_vertex_element *velem = r300->velems->velem;
  138. unsigned i, size;
  139. int max_neg_bias;
  140. if (index_bias < 0) {
  141. /* See how large index bias we may subtract. We must be careful
  142. * here because negative buffer offsets are not allowed
  143. * by the DRM API. */
  144. max_neg_bias = INT_MAX;
  145. for (i = 0; i < r300->velems->count; i++) {
  146. vb = &vbufs[velem[i].vertex_buffer_index];
  147. size = (vb->buffer_offset + velem[i].src_offset) / vb->stride;
  148. max_neg_bias = MIN2(max_neg_bias, size);
  149. }
  150. /* Now set the minimum allowed value. */
  151. *buffer_offset = MAX2(-max_neg_bias, index_bias);
  152. } else {
  153. /* A positive index bias is OK. */
  154. *buffer_offset = index_bias;
  155. }
  156. *index_offset = index_bias - *buffer_offset;
  157. }
  158. enum r300_prepare_flags {
  159. PREP_FIRST_DRAW = (1 << 0), /* call emit_dirty_state and friends? */
  160. PREP_VALIDATE_VBOS = (1 << 1), /* validate VBOs? */
  161. PREP_EMIT_AOS = (1 << 2), /* call emit_vertex_arrays? */
  162. PREP_EMIT_AOS_SWTCL = (1 << 3), /* call emit_vertex_arrays_swtcl? */
  163. PREP_INDEXED = (1 << 4) /* is this draw_elements? */
  164. };
  165. /**
  166. * Check if the requested number of dwords is available in the CS and
  167. * if not, flush.
  168. * \param r300 The context.
  169. * \param flags See r300_prepare_flags.
  170. * \param cs_dwords The number of dwords to reserve in CS.
  171. * \return TRUE if the CS was flushed
  172. */
  173. static boolean r300_reserve_cs_dwords(struct r300_context *r300,
  174. enum r300_prepare_flags flags,
  175. unsigned cs_dwords)
  176. {
  177. boolean flushed = FALSE;
  178. boolean first_draw = flags & PREP_FIRST_DRAW;
  179. boolean emit_vertex_arrays = flags & PREP_EMIT_AOS;
  180. boolean emit_vertex_arrays_swtcl = flags & PREP_EMIT_AOS_SWTCL;
  181. /* Add dirty state, index offset, and AOS. */
  182. if (first_draw) {
  183. cs_dwords += r300_get_num_dirty_dwords(r300);
  184. if (r300->screen->caps.index_bias_supported)
  185. cs_dwords += 2; /* emit_index_offset */
  186. if (emit_vertex_arrays)
  187. cs_dwords += 55; /* emit_vertex_arrays */
  188. if (emit_vertex_arrays_swtcl)
  189. cs_dwords += 7; /* emit_vertex_arrays_swtcl */
  190. }
  191. cs_dwords += r300_get_num_cs_end_dwords(r300);
  192. /* Reserve requested CS space. */
  193. if (cs_dwords > (R300_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
  194. r300->context.flush(&r300->context, 0, NULL);
  195. flushed = TRUE;
  196. }
  197. return flushed;
  198. }
  199. /**
  200. * Validate buffers and emit dirty state.
  201. * \param r300 The context.
  202. * \param flags See r300_prepare_flags.
  203. * \param index_buffer The index buffer to validate. The parameter may be NULL.
  204. * \param buffer_offset The offset passed to emit_vertex_arrays.
  205. * \param index_bias The index bias to emit.
  206. * \return TRUE if rendering should be skipped
  207. */
  208. static boolean r300_emit_states(struct r300_context *r300,
  209. enum r300_prepare_flags flags,
  210. struct pipe_resource *index_buffer,
  211. int buffer_offset,
  212. int index_bias,
  213. boolean user_buffers)
  214. {
  215. boolean first_draw = flags & PREP_FIRST_DRAW;
  216. boolean emit_vertex_arrays = flags & PREP_EMIT_AOS;
  217. boolean emit_vertex_arrays_swtcl = flags & PREP_EMIT_AOS_SWTCL;
  218. boolean indexed = flags & PREP_INDEXED;
  219. boolean validate_vbos = flags & PREP_VALIDATE_VBOS;
  220. /* Validate buffers and emit dirty state if needed. */
  221. if (first_draw) {
  222. if (r300->validate_buffers) {
  223. if (!r300_emit_buffer_validate(r300, validate_vbos,
  224. index_buffer)) {
  225. fprintf(stderr, "r300: CS space validation failed. "
  226. "(not enough memory?) Skipping rendering.\n");
  227. return FALSE;
  228. }
  229. /* Consider the validation done only if everything was validated. */
  230. if (validate_vbos) {
  231. r300->validate_buffers = FALSE;
  232. if (user_buffers)
  233. r300->upload_vb_validated = TRUE;
  234. if (r300->index_buffer.buffer &&
  235. r300_resource(r300->index_buffer.buffer)->b.user_ptr) {
  236. r300->upload_ib_validated = TRUE;
  237. }
  238. }
  239. }
  240. r300_emit_dirty_state(r300);
  241. if (r300->screen->caps.index_bias_supported) {
  242. if (r300->screen->caps.has_tcl)
  243. r500_emit_index_bias(r300, index_bias);
  244. else
  245. r500_emit_index_bias(r300, 0);
  246. }
  247. if (emit_vertex_arrays &&
  248. (r300->vertex_arrays_dirty ||
  249. r300->vertex_arrays_indexed != indexed ||
  250. r300->vertex_arrays_offset != buffer_offset)) {
  251. r300_emit_vertex_arrays(r300, buffer_offset, indexed);
  252. r300->vertex_arrays_dirty = FALSE;
  253. r300->vertex_arrays_indexed = indexed;
  254. r300->vertex_arrays_offset = buffer_offset;
  255. }
  256. if (emit_vertex_arrays_swtcl)
  257. r300_emit_vertex_arrays_swtcl(r300, indexed);
  258. }
  259. return TRUE;
  260. }
  261. /**
  262. * Check if the requested number of dwords is available in the CS and
  263. * if not, flush. Then validate buffers and emit dirty state.
  264. * \param r300 The context.
  265. * \param flags See r300_prepare_flags.
  266. * \param index_buffer The index buffer to validate. The parameter may be NULL.
  267. * \param cs_dwords The number of dwords to reserve in CS.
  268. * \param buffer_offset The offset passed to emit_vertex_arrays.
  269. * \param index_bias The index bias to emit.
  270. * \return TRUE if rendering should be skipped
  271. */
  272. static boolean r300_prepare_for_rendering(struct r300_context *r300,
  273. enum r300_prepare_flags flags,
  274. struct pipe_resource *index_buffer,
  275. unsigned cs_dwords,
  276. int buffer_offset,
  277. int index_bias,
  278. boolean user_buffers)
  279. {
  280. if (r300_reserve_cs_dwords(r300, flags, cs_dwords))
  281. flags |= PREP_FIRST_DRAW;
  282. return r300_emit_states(r300, flags, index_buffer, buffer_offset,
  283. index_bias, user_buffers);
  284. }
  285. static boolean immd_is_good_idea(struct r300_context *r300,
  286. unsigned count)
  287. {
  288. struct pipe_vertex_element* velem;
  289. struct pipe_resource *buf;
  290. boolean checked[PIPE_MAX_ATTRIBS] = {0};
  291. unsigned vertex_element_count = r300->velems->count;
  292. unsigned i, vbi;
  293. if (DBG_ON(r300, DBG_NO_IMMD)) {
  294. return FALSE;
  295. }
  296. if (r300->draw) {
  297. return FALSE;
  298. }
  299. if (count * r300->velems->vertex_size_dwords > IMMD_DWORDS) {
  300. return FALSE;
  301. }
  302. /* We shouldn't map buffers referenced by CS, busy buffers,
  303. * and ones placed in VRAM. */
  304. for (i = 0; i < vertex_element_count; i++) {
  305. velem = &r300->velems->velem[i];
  306. vbi = velem->vertex_buffer_index;
  307. if (!checked[vbi]) {
  308. buf = r300->vbuf_mgr->real_vertex_buffer[vbi];
  309. if ((r300_resource(buf)->domain != R300_DOMAIN_GTT)) {
  310. return FALSE;
  311. }
  312. checked[vbi] = TRUE;
  313. }
  314. }
  315. return TRUE;
  316. }
  317. /*****************************************************************************
  318. * The HWTCL draw functions. *
  319. ****************************************************************************/
  320. static void r300_draw_arrays_immediate(struct r300_context *r300,
  321. unsigned mode, unsigned start,
  322. unsigned count)
  323. {
  324. struct pipe_vertex_element* velem;
  325. struct pipe_vertex_buffer* vbuf;
  326. unsigned vertex_element_count = r300->velems->count;
  327. unsigned i, v, vbi;
  328. /* Size of the vertex, in dwords. */
  329. unsigned vertex_size = r300->velems->vertex_size_dwords;
  330. /* The number of dwords for this draw operation. */
  331. unsigned dwords = 4 + count * vertex_size;
  332. /* Size of the vertex element, in dwords. */
  333. unsigned size[PIPE_MAX_ATTRIBS];
  334. /* Stride to the same attrib in the next vertex in the vertex buffer,
  335. * in dwords. */
  336. unsigned stride[PIPE_MAX_ATTRIBS];
  337. /* Mapped vertex buffers. */
  338. uint32_t* map[PIPE_MAX_ATTRIBS];
  339. uint32_t* mapelem[PIPE_MAX_ATTRIBS];
  340. struct pipe_transfer* transfer[PIPE_MAX_ATTRIBS] = {0};
  341. CS_LOCALS(r300);
  342. if (!r300_prepare_for_rendering(r300, PREP_FIRST_DRAW, NULL, dwords, 0, 0,
  343. FALSE))
  344. return;
  345. /* Calculate the vertex size, offsets, strides etc. and map the buffers. */
  346. for (i = 0; i < vertex_element_count; i++) {
  347. velem = &r300->velems->velem[i];
  348. size[i] = r300->velems->format_size[i] / 4;
  349. vbi = velem->vertex_buffer_index;
  350. vbuf = &r300->vbuf_mgr->vertex_buffer[vbi];
  351. stride[i] = vbuf->stride / 4;
  352. /* Map the buffer. */
  353. if (!transfer[vbi]) {
  354. map[vbi] = (uint32_t*)pipe_buffer_map(&r300->context,
  355. r300->vbuf_mgr->real_vertex_buffer[vbi],
  356. PIPE_TRANSFER_READ |
  357. PIPE_TRANSFER_UNSYNCHRONIZED,
  358. &transfer[vbi]);
  359. map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * start;
  360. }
  361. mapelem[i] = map[vbi] + (velem->src_offset / 4);
  362. }
  363. r300_emit_draw_init(r300, mode, 0, count-1);
  364. BEGIN_CS(dwords);
  365. OUT_CS_REG(R300_VAP_VTX_SIZE, vertex_size);
  366. OUT_CS_PKT3(R300_PACKET3_3D_DRAW_IMMD_2, count * vertex_size);
  367. OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED | (count << 16) |
  368. r300_translate_primitive(mode));
  369. /* Emit vertices. */
  370. for (v = 0; v < count; v++) {
  371. for (i = 0; i < vertex_element_count; i++) {
  372. OUT_CS_TABLE(&mapelem[i][stride[i] * v], size[i]);
  373. }
  374. }
  375. END_CS;
  376. /* Unmap buffers. */
  377. for (i = 0; i < vertex_element_count; i++) {
  378. vbi = r300->velems->velem[i].vertex_buffer_index;
  379. if (transfer[vbi]) {
  380. pipe_buffer_unmap(&r300->context, transfer[vbi]);
  381. transfer[vbi] = NULL;
  382. }
  383. }
  384. }
  385. static void r300_emit_draw_arrays(struct r300_context *r300,
  386. unsigned mode,
  387. unsigned count)
  388. {
  389. boolean alt_num_verts = count > 65535;
  390. CS_LOCALS(r300);
  391. if (count >= (1 << 24)) {
  392. fprintf(stderr, "r300: Got a huge number of vertices: %i, "
  393. "refusing to render.\n", count);
  394. return;
  395. }
  396. r300_emit_draw_init(r300, mode, 0, count-1);
  397. BEGIN_CS(2 + (alt_num_verts ? 2 : 0));
  398. if (alt_num_verts) {
  399. OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count);
  400. }
  401. OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
  402. OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) |
  403. r300_translate_primitive(mode) |
  404. (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0));
  405. END_CS;
  406. }
  407. static void r300_emit_draw_elements(struct r300_context *r300,
  408. struct pipe_resource* indexBuffer,
  409. unsigned indexSize,
  410. unsigned minIndex,
  411. unsigned maxIndex,
  412. unsigned mode,
  413. unsigned start,
  414. unsigned count,
  415. uint16_t *imm_indices3)
  416. {
  417. uint32_t count_dwords, offset_dwords;
  418. boolean alt_num_verts = count > 65535;
  419. CS_LOCALS(r300);
  420. if (count >= (1 << 24) || maxIndex >= (1 << 24)) {
  421. fprintf(stderr, "r300: Got a huge number of vertices: %i, "
  422. "refusing to render (maxIndex: %i).\n", count, maxIndex);
  423. return;
  424. }
  425. DBG(r300, DBG_DRAW, "r300: Indexbuf of %u indices, min %u max %u\n",
  426. count, minIndex, maxIndex);
  427. r300_emit_draw_init(r300, mode, minIndex, maxIndex);
  428. /* If start is odd, render the first triangle with indices embedded
  429. * in the command stream. This will increase start by 3 and make it
  430. * even. We can then proceed without a fallback. */
  431. if (indexSize == 2 && (start & 1) &&
  432. mode == PIPE_PRIM_TRIANGLES) {
  433. BEGIN_CS(4);
  434. OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 2);
  435. OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (3 << 16) |
  436. R300_VAP_VF_CNTL__PRIM_TRIANGLES);
  437. OUT_CS(imm_indices3[1] << 16 | imm_indices3[0]);
  438. OUT_CS(imm_indices3[2]);
  439. END_CS;
  440. start += 3;
  441. count -= 3;
  442. if (!count)
  443. return;
  444. }
  445. offset_dwords = indexSize * start / sizeof(uint32_t);
  446. BEGIN_CS(8 + (alt_num_verts ? 2 : 0));
  447. if (alt_num_verts) {
  448. OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count);
  449. }
  450. OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 0);
  451. if (indexSize == 4) {
  452. count_dwords = count;
  453. OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
  454. R300_VAP_VF_CNTL__INDEX_SIZE_32bit |
  455. r300_translate_primitive(mode) |
  456. (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0));
  457. } else {
  458. count_dwords = (count + 1) / 2;
  459. OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
  460. r300_translate_primitive(mode) |
  461. (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0));
  462. }
  463. OUT_CS_PKT3(R300_PACKET3_INDX_BUFFER, 2);
  464. OUT_CS(R300_INDX_BUFFER_ONE_REG_WR | (R300_VAP_PORT_IDX0 >> 2) |
  465. (0 << R300_INDX_BUFFER_SKIP_SHIFT));
  466. OUT_CS(offset_dwords << 2);
  467. OUT_CS(count_dwords);
  468. OUT_CS_RELOC(r300_resource(indexBuffer));
  469. END_CS;
  470. }
  471. static void r300_draw_elements(struct r300_context *r300, int indexBias,
  472. unsigned minIndex, unsigned maxIndex,
  473. unsigned mode, unsigned start, unsigned count,
  474. boolean user_buffers)
  475. {
  476. struct pipe_resource *indexBuffer = r300->index_buffer.buffer;
  477. unsigned indexSize = r300->index_buffer.index_size;
  478. struct pipe_resource* orgIndexBuffer = indexBuffer;
  479. boolean alt_num_verts = r300->screen->caps.is_r500 &&
  480. count > 65536 &&
  481. r300->rws->get_value(r300->rws, R300_VID_DRM_2_3_0);
  482. unsigned short_count;
  483. int buffer_offset = 0, index_offset = 0; /* for index bias emulation */
  484. uint16_t indices3[3];
  485. if (indexBias && !r300->screen->caps.index_bias_supported) {
  486. r300_split_index_bias(r300, indexBias, &buffer_offset, &index_offset);
  487. }
  488. r300_translate_index_buffer(r300, &indexBuffer, &indexSize, index_offset,
  489. &start, count);
  490. /* Fallback for misaligned ushort indices. */
  491. if (indexSize == 2 && (start & 1) &&
  492. !r300_resource(indexBuffer)->b.user_ptr) {
  493. struct pipe_transfer *transfer;
  494. uint16_t *ptr = pipe_buffer_map(&r300->context, indexBuffer,
  495. PIPE_TRANSFER_READ |
  496. PIPE_TRANSFER_UNSYNCHRONIZED,
  497. &transfer);
  498. if (mode == PIPE_PRIM_TRIANGLES) {
  499. memcpy(indices3, ptr + start, 6);
  500. } else {
  501. /* Copy the mapped index buffer directly to the upload buffer.
  502. * The start index will be aligned simply from the fact that
  503. * every sub-buffer in the upload buffer is aligned. */
  504. r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start,
  505. count, (uint8_t*)ptr);
  506. }
  507. pipe_buffer_unmap(&r300->context, transfer);
  508. } else {
  509. if (r300_resource(indexBuffer)->b.user_ptr)
  510. r300_upload_index_buffer(r300, &indexBuffer, indexSize,
  511. &start, count,
  512. r300_resource(indexBuffer)->b.user_ptr);
  513. }
  514. /* 19 dwords for emit_draw_elements. Give up if the function fails. */
  515. if (!r300_prepare_for_rendering(r300,
  516. PREP_FIRST_DRAW | PREP_VALIDATE_VBOS | PREP_EMIT_AOS |
  517. PREP_INDEXED, indexBuffer, 19, buffer_offset, indexBias,
  518. user_buffers))
  519. goto done;
  520. if (alt_num_verts || count <= 65535) {
  521. r300_emit_draw_elements(r300, indexBuffer, indexSize,
  522. minIndex, maxIndex, mode, start, count, indices3);
  523. } else {
  524. do {
  525. if (indexSize == 2 && (start & 1))
  526. short_count = MIN2(count, 65535);
  527. else
  528. short_count = MIN2(count, 65534);
  529. r300_emit_draw_elements(r300, indexBuffer, indexSize,
  530. minIndex, maxIndex,
  531. mode, start, short_count, indices3);
  532. start += short_count;
  533. count -= short_count;
  534. /* 15 dwords for emit_draw_elements */
  535. if (count) {
  536. if (!r300_prepare_for_rendering(r300,
  537. PREP_VALIDATE_VBOS | PREP_EMIT_AOS | PREP_INDEXED,
  538. indexBuffer, 19, buffer_offset, indexBias, user_buffers))
  539. goto done;
  540. }
  541. } while (count);
  542. }
  543. done:
  544. if (indexBuffer != orgIndexBuffer) {
  545. pipe_resource_reference( &indexBuffer, NULL );
  546. }
  547. }
  548. static void r300_draw_arrays(struct r300_context *r300, unsigned mode,
  549. unsigned start, unsigned count,
  550. boolean user_buffers)
  551. {
  552. boolean alt_num_verts = r300->screen->caps.is_r500 &&
  553. count > 65536 &&
  554. r300->rws->get_value(r300->rws, R300_VID_DRM_2_3_0);
  555. unsigned short_count;
  556. /* 9 spare dwords for emit_draw_arrays. Give up if the function fails. */
  557. if (!r300_prepare_for_rendering(r300,
  558. PREP_FIRST_DRAW | PREP_VALIDATE_VBOS | PREP_EMIT_AOS,
  559. NULL, 9, start, 0, user_buffers))
  560. return;
  561. if (alt_num_verts || count <= 65535) {
  562. r300_emit_draw_arrays(r300, mode, count);
  563. } else {
  564. do {
  565. short_count = MIN2(count, 65535);
  566. r300_emit_draw_arrays(r300, mode, short_count);
  567. start += short_count;
  568. count -= short_count;
  569. /* 9 spare dwords for emit_draw_arrays. Give up if the function fails. */
  570. if (count) {
  571. if (!r300_prepare_for_rendering(r300,
  572. PREP_VALIDATE_VBOS | PREP_EMIT_AOS, NULL, 9,
  573. start, 0, user_buffers))
  574. return;
  575. }
  576. } while (count);
  577. }
  578. }
  579. static void r300_draw_vbo(struct pipe_context* pipe,
  580. const struct pipe_draw_info *info)
  581. {
  582. struct r300_context* r300 = r300_context(pipe);
  583. unsigned count = info->count;
  584. boolean buffers_updated, uploader_flushed;
  585. boolean indexed = info->indexed && r300->index_buffer.buffer;
  586. unsigned start_indexed = info->start + r300->index_buffer.offset;
  587. int max_index = MIN2(r300->vbuf_mgr->max_index, info->max_index);
  588. if (r300->skip_rendering ||
  589. !u_trim_pipe_prim(info->mode, &count)) {
  590. return;
  591. }
  592. /* Start the vbuf manager and update buffers if needed. */
  593. u_vbuf_mgr_draw_begin(r300->vbuf_mgr, info,
  594. &buffers_updated, &uploader_flushed);
  595. if (buffers_updated) {
  596. r300->vertex_arrays_dirty = TRUE;
  597. if (uploader_flushed || !r300->upload_vb_validated) {
  598. r300->upload_vb_validated = FALSE;
  599. r300->validate_buffers = TRUE;
  600. }
  601. } else {
  602. r300->upload_vb_validated = FALSE;
  603. }
  604. /* Draw. */
  605. r300_update_derived_state(r300);
  606. if (indexed) {
  607. r300_draw_elements(r300, info->index_bias, info->min_index,
  608. max_index, info->mode, start_indexed, count,
  609. buffers_updated);
  610. } else {
  611. if (immd_is_good_idea(r300, count)) {
  612. r300_draw_arrays_immediate(r300, info->mode, info->start, count);
  613. } else {
  614. r300_draw_arrays(r300, info->mode, info->start, count,
  615. buffers_updated);
  616. }
  617. }
  618. u_vbuf_mgr_draw_end(r300->vbuf_mgr);
  619. }
  620. /****************************************************************************
  621. * The rest of this file is for SW TCL rendering only. Please be polite and *
  622. * keep these functions separated so that they are easier to locate. ~C. *
  623. ***************************************************************************/
  624. /* SW TCL elements, using Draw. */
  625. static void r300_swtcl_draw_vbo(struct pipe_context* pipe,
  626. const struct pipe_draw_info *info)
  627. {
  628. struct r300_context* r300 = r300_context(pipe);
  629. struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
  630. struct pipe_transfer *ib_transfer = NULL;
  631. unsigned count = info->count;
  632. int i;
  633. void *indices = NULL;
  634. boolean indexed = info->indexed && r300->index_buffer.buffer;
  635. if (r300->skip_rendering) {
  636. return;
  637. }
  638. if (!u_trim_pipe_prim(info->mode, &count)) {
  639. return;
  640. }
  641. r300_update_derived_state(r300);
  642. r300_reserve_cs_dwords(r300,
  643. PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL |
  644. (indexed ? PREP_INDEXED : 0),
  645. indexed ? 256 : 6);
  646. for (i = 0; i < r300->vbuf_mgr->nr_vertex_buffers; i++) {
  647. if (r300->vbuf_mgr->vertex_buffer[i].buffer) {
  648. void *buf = pipe_buffer_map(pipe,
  649. r300->vbuf_mgr->vertex_buffer[i].buffer,
  650. PIPE_TRANSFER_READ |
  651. PIPE_TRANSFER_UNSYNCHRONIZED,
  652. &vb_transfer[i]);
  653. draw_set_mapped_vertex_buffer(r300->draw, i, buf);
  654. }
  655. }
  656. if (indexed) {
  657. indices = pipe_buffer_map(pipe, r300->index_buffer.buffer,
  658. PIPE_TRANSFER_READ |
  659. PIPE_TRANSFER_UNSYNCHRONIZED, &ib_transfer);
  660. }
  661. draw_set_mapped_index_buffer(r300->draw, indices);
  662. r300->draw_vbo_locked = TRUE;
  663. r300->draw_first_emitted = FALSE;
  664. draw_vbo(r300->draw, info);
  665. draw_flush(r300->draw);
  666. r300->draw_vbo_locked = FALSE;
  667. for (i = 0; i < r300->vbuf_mgr->nr_vertex_buffers; i++) {
  668. if (r300->vbuf_mgr->vertex_buffer[i].buffer) {
  669. pipe_buffer_unmap(pipe, vb_transfer[i]);
  670. draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
  671. }
  672. }
  673. if (indexed) {
  674. pipe_buffer_unmap(pipe, ib_transfer);
  675. draw_set_mapped_index_buffer(r300->draw, NULL);
  676. }
  677. }
  678. /* Object for rendering using Draw. */
  679. struct r300_render {
  680. /* Parent class */
  681. struct vbuf_render base;
  682. /* Pipe context */
  683. struct r300_context* r300;
  684. /* Vertex information */
  685. size_t vertex_size;
  686. unsigned prim;
  687. unsigned hwprim;
  688. /* VBO */
  689. size_t vbo_max_used;
  690. void * vbo_ptr;
  691. struct pipe_transfer *vbo_transfer;
  692. };
  693. static INLINE struct r300_render*
  694. r300_render(struct vbuf_render* render)
  695. {
  696. return (struct r300_render*)render;
  697. }
  698. static const struct vertex_info*
  699. r300_render_get_vertex_info(struct vbuf_render* render)
  700. {
  701. struct r300_render* r300render = r300_render(render);
  702. struct r300_context* r300 = r300render->r300;
  703. return &r300->vertex_info;
  704. }
  705. static boolean r300_render_allocate_vertices(struct vbuf_render* render,
  706. ushort vertex_size,
  707. ushort count)
  708. {
  709. struct r300_render* r300render = r300_render(render);
  710. struct r300_context* r300 = r300render->r300;
  711. struct pipe_screen* screen = r300->context.screen;
  712. size_t size = (size_t)vertex_size * (size_t)count;
  713. DBG(r300, DBG_DRAW, "r300: render_allocate_vertices (size: %d)\n", size);
  714. if (size + r300->draw_vbo_offset > r300->draw_vbo_size)
  715. {
  716. pipe_resource_reference(&r300->vbo, NULL);
  717. r300->vbo = pipe_buffer_create(screen,
  718. PIPE_BIND_VERTEX_BUFFER,
  719. R300_MAX_DRAW_VBO_SIZE);
  720. r300->draw_vbo_offset = 0;
  721. r300->draw_vbo_size = R300_MAX_DRAW_VBO_SIZE;
  722. r300->validate_buffers = TRUE;
  723. }
  724. r300render->vertex_size = vertex_size;
  725. return (r300->vbo) ? TRUE : FALSE;
  726. }
  727. static void* r300_render_map_vertices(struct vbuf_render* render)
  728. {
  729. struct r300_render* r300render = r300_render(render);
  730. struct r300_context* r300 = r300render->r300;
  731. assert(!r300render->vbo_transfer);
  732. DBG(r300, DBG_DRAW, "r300: render_map_vertices\n");
  733. r300render->vbo_ptr = pipe_buffer_map(&r300render->r300->context,
  734. r300->vbo,
  735. PIPE_TRANSFER_WRITE |
  736. PIPE_TRANSFER_UNSYNCHRONIZED,
  737. &r300render->vbo_transfer);
  738. assert(r300render->vbo_ptr);
  739. return ((uint8_t*)r300render->vbo_ptr + r300->draw_vbo_offset);
  740. }
  741. static void r300_render_unmap_vertices(struct vbuf_render* render,
  742. ushort min,
  743. ushort max)
  744. {
  745. struct r300_render* r300render = r300_render(render);
  746. struct pipe_context* context = &r300render->r300->context;
  747. struct r300_context* r300 = r300render->r300;
  748. assert(r300render->vbo_transfer);
  749. DBG(r300, DBG_DRAW, "r300: render_unmap_vertices\n");
  750. r300render->vbo_max_used = MAX2(r300render->vbo_max_used,
  751. r300render->vertex_size * (max + 1));
  752. pipe_buffer_unmap(context, r300render->vbo_transfer);
  753. r300render->vbo_transfer = NULL;
  754. }
  755. static void r300_render_release_vertices(struct vbuf_render* render)
  756. {
  757. struct r300_render* r300render = r300_render(render);
  758. struct r300_context* r300 = r300render->r300;
  759. DBG(r300, DBG_DRAW, "r300: render_release_vertices\n");
  760. r300->draw_vbo_offset += r300render->vbo_max_used;
  761. r300render->vbo_max_used = 0;
  762. }
  763. static boolean r300_render_set_primitive(struct vbuf_render* render,
  764. unsigned prim)
  765. {
  766. struct r300_render* r300render = r300_render(render);
  767. r300render->prim = prim;
  768. r300render->hwprim = r300_translate_primitive(prim);
  769. return TRUE;
  770. }
  771. static void r300_render_draw_arrays(struct vbuf_render* render,
  772. unsigned start,
  773. unsigned count)
  774. {
  775. struct r300_render* r300render = r300_render(render);
  776. struct r300_context* r300 = r300render->r300;
  777. uint8_t* ptr;
  778. unsigned i;
  779. unsigned dwords = 6;
  780. CS_LOCALS(r300);
  781. (void) i; (void) ptr;
  782. DBG(r300, DBG_DRAW, "r300: render_draw_arrays (count: %d)\n", count);
  783. if (r300->draw_first_emitted) {
  784. if (!r300_prepare_for_rendering(r300,
  785. PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL,
  786. NULL, dwords, 0, 0, FALSE))
  787. return;
  788. } else {
  789. if (!r300_emit_states(r300,
  790. PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL,
  791. NULL, 0, 0, FALSE))
  792. return;
  793. }
  794. BEGIN_CS(dwords);
  795. OUT_CS_REG(R300_GA_COLOR_CONTROL,
  796. r300_provoking_vertex_fixes(r300, r300render->prim));
  797. OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, count - 1);
  798. OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
  799. OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) |
  800. r300render->hwprim);
  801. END_CS;
  802. r300->draw_first_emitted = TRUE;
  803. }
  804. static void r300_render_draw_elements(struct vbuf_render* render,
  805. const ushort* indices,
  806. uint count)
  807. {
  808. struct r300_render* r300render = r300_render(render);
  809. struct r300_context* r300 = r300render->r300;
  810. int i;
  811. unsigned end_cs_dwords;
  812. unsigned max_index = (r300->draw_vbo_size - r300->draw_vbo_offset) /
  813. (r300render->r300->vertex_info.size * 4) - 1;
  814. unsigned short_count;
  815. unsigned free_dwords;
  816. CS_LOCALS(r300);
  817. DBG(r300, DBG_DRAW, "r300: render_draw_elements (count: %d)\n", count);
  818. if (r300->draw_first_emitted) {
  819. if (!r300_prepare_for_rendering(r300,
  820. PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL | PREP_INDEXED,
  821. NULL, 256, 0, 0, FALSE))
  822. return;
  823. } else {
  824. if (!r300_emit_states(r300,
  825. PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL | PREP_INDEXED,
  826. NULL, 0, 0, FALSE))
  827. return;
  828. }
  829. /* Below we manage the CS space manually because there may be more
  830. * indices than it can fit in CS. */
  831. end_cs_dwords = r300_get_num_cs_end_dwords(r300);
  832. while (count) {
  833. free_dwords = R300_MAX_CMDBUF_DWORDS - r300->cs->cdw;
  834. short_count = MIN2(count, (free_dwords - end_cs_dwords - 6) * 2);
  835. BEGIN_CS(6 + (short_count+1)/2);
  836. OUT_CS_REG(R300_GA_COLOR_CONTROL,
  837. r300_provoking_vertex_fixes(r300, r300render->prim));
  838. OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max_index);
  839. OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, (short_count+1)/2);
  840. OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (short_count << 16) |
  841. r300render->hwprim);
  842. for (i = 0; i < short_count-1; i += 2) {
  843. OUT_CS(indices[i+1] << 16 | indices[i]);
  844. }
  845. if (short_count % 2) {
  846. OUT_CS(indices[short_count-1]);
  847. }
  848. END_CS;
  849. /* OK now subtract the emitted indices and see if we need to emit
  850. * another draw packet. */
  851. indices += short_count;
  852. count -= short_count;
  853. if (count) {
  854. if (!r300_prepare_for_rendering(r300,
  855. PREP_EMIT_AOS_SWTCL | PREP_INDEXED,
  856. NULL, 256, 0, 0, FALSE))
  857. return;
  858. end_cs_dwords = r300_get_num_cs_end_dwords(r300);
  859. }
  860. }
  861. r300->draw_first_emitted = TRUE;
  862. }
  863. static void r300_render_destroy(struct vbuf_render* render)
  864. {
  865. FREE(render);
  866. }
  867. static struct vbuf_render* r300_render_create(struct r300_context* r300)
  868. {
  869. struct r300_render* r300render = CALLOC_STRUCT(r300_render);
  870. r300render->r300 = r300;
  871. r300render->base.max_vertex_buffer_bytes = 1024 * 1024;
  872. r300render->base.max_indices = 16 * 1024;
  873. r300render->base.get_vertex_info = r300_render_get_vertex_info;
  874. r300render->base.allocate_vertices = r300_render_allocate_vertices;
  875. r300render->base.map_vertices = r300_render_map_vertices;
  876. r300render->base.unmap_vertices = r300_render_unmap_vertices;
  877. r300render->base.set_primitive = r300_render_set_primitive;
  878. r300render->base.draw_elements = r300_render_draw_elements;
  879. r300render->base.draw_arrays = r300_render_draw_arrays;
  880. r300render->base.release_vertices = r300_render_release_vertices;
  881. r300render->base.destroy = r300_render_destroy;
  882. return &r300render->base;
  883. }
  884. struct draw_stage* r300_draw_stage(struct r300_context* r300)
  885. {
  886. struct vbuf_render* render;
  887. struct draw_stage* stage;
  888. render = r300_render_create(r300);
  889. if (!render) {
  890. return NULL;
  891. }
  892. stage = draw_vbuf_stage(r300->draw, render);
  893. if (!stage) {
  894. render->destroy(render);
  895. return NULL;
  896. }
  897. draw_set_render(r300->draw, render);
  898. return stage;
  899. }
  900. void r300_draw_flush_vbuf(struct r300_context *r300)
  901. {
  902. pipe_resource_reference(&r300->vbo, NULL);
  903. r300->draw_vbo_size = 0;
  904. }
  905. /****************************************************************************
  906. * End of SW TCL functions *
  907. ***************************************************************************/
  908. /* This functions is used to draw a rectangle for the blitter module.
  909. *
  910. * If we rendered a quad, the pixels on the main diagonal
  911. * would be computed and stored twice, which makes the clear/copy codepaths
  912. * somewhat inefficient. Instead we use a rectangular point sprite. */
  913. static void r300_blitter_draw_rectangle(struct blitter_context *blitter,
  914. unsigned x1, unsigned y1,
  915. unsigned x2, unsigned y2,
  916. float depth,
  917. enum blitter_attrib_type type,
  918. const float attrib[4])
  919. {
  920. struct r300_context *r300 = r300_context(util_blitter_get_pipe(blitter));
  921. unsigned last_sprite_coord_enable = r300->sprite_coord_enable;
  922. unsigned width = x2 - x1;
  923. unsigned height = y2 - y1;
  924. unsigned vertex_size =
  925. type == UTIL_BLITTER_ATTRIB_COLOR || !r300->draw ? 8 : 4;
  926. unsigned dwords = 13 + vertex_size +
  927. (type == UTIL_BLITTER_ATTRIB_TEXCOORD ? 7 : 0);
  928. const float zeros[4] = {0, 0, 0, 0};
  929. CS_LOCALS(r300);
  930. r300->context.set_vertex_buffers(&r300->context, 0, NULL);
  931. if (type == UTIL_BLITTER_ATTRIB_TEXCOORD)
  932. r300->sprite_coord_enable = 1;
  933. r300_update_derived_state(r300);
  934. /* Mark some states we don't care about as non-dirty. */
  935. r300->clip_state.dirty = FALSE;
  936. r300->viewport_state.dirty = FALSE;
  937. if (!r300_prepare_for_rendering(r300, PREP_FIRST_DRAW, NULL, dwords, 0, 0,
  938. FALSE))
  939. goto done;
  940. DBG(r300, DBG_DRAW, "r300: draw_rectangle\n");
  941. BEGIN_CS(dwords);
  942. /* Set up GA. */
  943. OUT_CS_REG(R300_GA_POINT_SIZE, (height * 6) | ((width * 6) << 16));
  944. if (type == UTIL_BLITTER_ATTRIB_TEXCOORD) {
  945. /* Set up the GA to generate texcoords. */
  946. OUT_CS_REG(R300_GB_ENABLE, R300_GB_POINT_STUFF_ENABLE |
  947. (R300_GB_TEX_STR << R300_GB_TEX0_SOURCE_SHIFT));
  948. OUT_CS_REG_SEQ(R300_GA_POINT_S0, 4);
  949. OUT_CS_32F(attrib[0]);
  950. OUT_CS_32F(attrib[3]);
  951. OUT_CS_32F(attrib[2]);
  952. OUT_CS_32F(attrib[1]);
  953. }
  954. /* Set up VAP controls. */
  955. OUT_CS_REG(R300_VAP_CLIP_CNTL, R300_CLIP_DISABLE);
  956. OUT_CS_REG(R300_VAP_VTE_CNTL, R300_VTX_XY_FMT | R300_VTX_Z_FMT);
  957. OUT_CS_REG(R300_VAP_VTX_SIZE, vertex_size);
  958. OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
  959. OUT_CS(1);
  960. OUT_CS(0);
  961. /* Draw. */
  962. OUT_CS_PKT3(R300_PACKET3_3D_DRAW_IMMD_2, vertex_size);
  963. OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED | (1 << 16) |
  964. R300_VAP_VF_CNTL__PRIM_POINTS);
  965. OUT_CS_32F(x1 + width * 0.5f);
  966. OUT_CS_32F(y1 + height * 0.5f);
  967. OUT_CS_32F(depth);
  968. OUT_CS_32F(1);
  969. if (vertex_size == 8) {
  970. if (!attrib)
  971. attrib = zeros;
  972. OUT_CS_TABLE(attrib, 4);
  973. }
  974. END_CS;
  975. done:
  976. /* Restore the state. */
  977. r300_mark_atom_dirty(r300, &r300->clip_state);
  978. r300_mark_atom_dirty(r300, &r300->rs_state);
  979. r300_mark_atom_dirty(r300, &r300->viewport_state);
  980. r300->sprite_coord_enable = last_sprite_coord_enable;
  981. }
  982. static void r300_resource_resolve(struct pipe_context* pipe,
  983. struct pipe_resource* dest,
  984. unsigned dst_layer,
  985. struct pipe_resource* src,
  986. unsigned src_layer)
  987. {
  988. struct r300_context* r300 = r300_context(pipe);
  989. struct pipe_surface* srcsurf, surf_tmpl;
  990. struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state;
  991. float color[] = {0, 0, 0, 0};
  992. memset(&surf_tmpl, 0, sizeof(surf_tmpl));
  993. surf_tmpl.format = src->format;
  994. surf_tmpl.usage = 0; /* not really a surface hence no bind flags */
  995. surf_tmpl.u.tex.level = 0; /* msaa resources cannot have mipmaps */
  996. surf_tmpl.u.tex.first_layer = src_layer;
  997. surf_tmpl.u.tex.last_layer = src_layer;
  998. srcsurf = pipe->create_surface(pipe, src, &surf_tmpl);
  999. surf_tmpl.format = dest->format;
  1000. surf_tmpl.u.tex.first_layer = dst_layer;
  1001. surf_tmpl.u.tex.last_layer = dst_layer;
  1002. DBG(r300, DBG_DRAW, "r300: Resolving resource...\n");
  1003. /* Enable AA resolve. */
  1004. aa->dest = r300_surface(pipe->create_surface(pipe, dest, &surf_tmpl));
  1005. aa->aaresolve_ctl =
  1006. R300_RB3D_AARESOLVE_CTL_AARESOLVE_MODE_RESOLVE |
  1007. R300_RB3D_AARESOLVE_CTL_AARESOLVE_ALPHA_AVERAGE;
  1008. r300->aa_state.size = 10;
  1009. r300_mark_atom_dirty(r300, &r300->aa_state);
  1010. /* Resolve the surface. */
  1011. r300->context.clear_render_target(pipe,
  1012. srcsurf, color, 0, 0, src->width0, src->height0);
  1013. /* Disable AA resolve. */
  1014. aa->aaresolve_ctl = 0;
  1015. r300->aa_state.size = 4;
  1016. r300_mark_atom_dirty(r300, &r300->aa_state);
  1017. pipe_surface_reference((struct pipe_surface**)&srcsurf, NULL);
  1018. pipe_surface_reference((struct pipe_surface**)&aa->dest, NULL);
  1019. }
  1020. void r300_init_render_functions(struct r300_context *r300)
  1021. {
  1022. /* Set draw functions based on presence of HW TCL. */
  1023. if (r300->screen->caps.has_tcl) {
  1024. r300->context.draw_vbo = r300_draw_vbo;
  1025. } else {
  1026. r300->context.draw_vbo = r300_swtcl_draw_vbo;
  1027. }
  1028. r300->context.resource_resolve = r300_resource_resolve;
  1029. r300->blitter->draw_rectangle = r300_blitter_draw_rectangle;
  1030. /* Plug in the two-sided stencil reference value fallback if needed. */
  1031. if (!r300->screen->caps.is_r500)
  1032. r300_plug_in_stencil_ref_fallback(r300);
  1033. }