Clone of mesa.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

nv50_tgsi_to_nc.c 61KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101
  1. /*
  2. * Copyright 2010 Christoph Bumiller
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  18. * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
  19. * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. * SOFTWARE.
  21. */
  22. #include <unistd.h>
  23. #include "nv50_context.h"
  24. #include "nv50_pc.h"
  25. #include "pipe/p_shader_tokens.h"
  26. #include "tgsi/tgsi_parse.h"
  27. #include "tgsi/tgsi_util.h"
  28. #include "tgsi/tgsi_dump.h"
  29. #define BLD_MAX_TEMPS 64
  30. #define BLD_MAX_ADDRS 4
  31. #define BLD_MAX_PREDS 4
  32. #define BLD_MAX_IMMDS 128
  33. #define BLD_MAX_COND_NESTING 8
  34. #define BLD_MAX_LOOP_NESTING 4
  35. #define BLD_MAX_CALL_NESTING 2
  36. /* collects all values assigned to the same TGSI register */
  37. struct bld_value_stack {
  38. struct nv_value *top;
  39. struct nv_value **body;
  40. unsigned size;
  41. uint16_t loop_use; /* 1 bit per loop level, indicates if used/defd */
  42. uint16_t loop_def;
  43. };
  44. static INLINE void
  45. bld_vals_push_val(struct bld_value_stack *stk, struct nv_value *val)
  46. {
  47. assert(!stk->size || (stk->body[stk->size - 1] != val));
  48. if (!(stk->size % 8)) {
  49. unsigned old_sz = (stk->size + 0) * sizeof(struct nv_value *);
  50. unsigned new_sz = (stk->size + 8) * sizeof(struct nv_value *);
  51. stk->body = (struct nv_value **)REALLOC(stk->body, old_sz, new_sz);
  52. }
  53. stk->body[stk->size++] = val;
  54. }
  55. static INLINE boolean
  56. bld_vals_del_val(struct bld_value_stack *stk, struct nv_value *val)
  57. {
  58. unsigned i;
  59. for (i = stk->size; i > 0; --i)
  60. if (stk->body[i - 1] == val)
  61. break;
  62. if (!i)
  63. return FALSE;
  64. if (i != stk->size)
  65. stk->body[i - 1] = stk->body[stk->size - 1];
  66. --stk->size; /* XXX: old size in REALLOC */
  67. return TRUE;
  68. }
  69. static INLINE void
  70. bld_vals_push(struct bld_value_stack *stk)
  71. {
  72. bld_vals_push_val(stk, stk->top);
  73. stk->top = NULL;
  74. }
  75. static INLINE void
  76. bld_push_values(struct bld_value_stack *stacks, int n)
  77. {
  78. int i, c;
  79. for (i = 0; i < n; ++i)
  80. for (c = 0; c < 4; ++c)
  81. if (stacks[i * 4 + c].top)
  82. bld_vals_push(&stacks[i * 4 + c]);
  83. }
  84. struct bld_context {
  85. struct nv50_translation_info *ti;
  86. struct nv_pc *pc;
  87. struct nv_basic_block *b;
  88. struct tgsi_parse_context parse[BLD_MAX_CALL_NESTING];
  89. int call_lvl;
  90. struct nv_basic_block *cond_bb[BLD_MAX_COND_NESTING];
  91. struct nv_basic_block *join_bb[BLD_MAX_COND_NESTING];
  92. struct nv_basic_block *else_bb[BLD_MAX_COND_NESTING];
  93. int cond_lvl;
  94. struct nv_basic_block *loop_bb[BLD_MAX_LOOP_NESTING];
  95. struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
  96. int loop_lvl;
  97. ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
  98. struct bld_value_stack tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
  99. struct bld_value_stack avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
  100. struct bld_value_stack pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
  101. struct bld_value_stack ovs[PIPE_MAX_SHADER_OUTPUTS][4];
  102. uint32_t outputs_written[(PIPE_MAX_SHADER_OUTPUTS + 7) / 8];
  103. struct nv_value *frgcrd[4];
  104. struct nv_value *sysval[4];
  105. /* wipe on new BB */
  106. struct nv_value *saved_addr[4][2];
  107. struct nv_value *saved_inputs[128];
  108. struct nv_value *saved_immd[BLD_MAX_IMMDS];
  109. uint num_immds;
  110. };
  111. static INLINE ubyte
  112. bld_stack_file(struct bld_context *bld, struct bld_value_stack *stk)
  113. {
  114. if (stk < &bld->avs[0][0])
  115. return NV_FILE_GPR;
  116. else
  117. if (stk < &bld->pvs[0][0])
  118. return NV_FILE_ADDR;
  119. else
  120. if (stk < &bld->ovs[0][0])
  121. return NV_FILE_FLAGS;
  122. else
  123. return NV_FILE_OUT;
  124. }
  125. static INLINE struct nv_value *
  126. bld_fetch(struct bld_context *bld, struct bld_value_stack *stk, int i, int c)
  127. {
  128. stk[i * 4 + c].loop_use |= 1 << bld->loop_lvl;
  129. return stk[i * 4 + c].top;
  130. }
  131. static struct nv_value *
  132. bld_loop_phi(struct bld_context *, struct bld_value_stack *, struct nv_value *);
  133. /* If a variable is defined in a loop without prior use, we don't need
  134. * a phi in the loop header to account for backwards flow.
  135. *
  136. * However, if this variable is then also used outside the loop, we do
  137. * need a phi after all. But we must not use this phi's def inside the
  138. * loop, so we can eliminate the phi if it is unused later.
  139. */
  140. static INLINE void
  141. bld_store(struct bld_context *bld, struct bld_value_stack *stk, int i, int c,
  142. struct nv_value *val)
  143. {
  144. const uint16_t m = 1 << bld->loop_lvl;
  145. stk = &stk[i * 4 + c];
  146. if (bld->loop_lvl && !(m & (stk->loop_def | stk->loop_use)))
  147. bld_loop_phi(bld, stk, val);
  148. stk->top = val;
  149. stk->loop_def |= 1 << bld->loop_lvl;
  150. }
  151. static INLINE void
  152. bld_clear_def_use(struct bld_value_stack *stk, int n, int lvl)
  153. {
  154. int i;
  155. const uint16_t mask = ~(1 << lvl);
  156. for (i = 0; i < n * 4; ++i) {
  157. stk[i].loop_def &= mask;
  158. stk[i].loop_use &= mask;
  159. }
  160. }
  161. #define FETCH_TEMP(i, c) bld_fetch(bld, &bld->tvs[0][0], i, c)
  162. #define STORE_TEMP(i, c, v) bld_store(bld, &bld->tvs[0][0], i, c, (v))
  163. #define FETCH_ADDR(i, c) bld_fetch(bld, &bld->avs[0][0], i, c)
  164. #define STORE_ADDR(i, c, v) bld_store(bld, &bld->avs[0][0], i, c, (v))
  165. #define FETCH_PRED(i, c) bld_fetch(bld, &bld->pvs[0][0], i, c)
  166. #define STORE_PRED(i, c, v) bld_store(bld, &bld->pvs[0][0], i, c, (v))
  167. #define STORE_OUTR(i, c, v) \
  168. do { \
  169. bld->ovs[i][c].top = (v); \
  170. bld->outputs_written[(i) / 8] |= 1 << (((i) * 4 + (c)) % 32); \
  171. } while (0)
  172. static INLINE void
  173. bld_warn_uninitialized(struct bld_context *bld, int kind,
  174. struct bld_value_stack *stk, struct nv_basic_block *b)
  175. {
  176. #if NV50_DEBUG & NV50_DEBUG_PROG_IR
  177. long i = (stk - &bld->tvs[0][0]) / 4;
  178. long c = (stk - &bld->tvs[0][0]) & 3;
  179. if (c == 3)
  180. c = -1;
  181. debug_printf("WARNING: TEMP[%li].%c %s used uninitialized in BB:%i\n",
  182. i, (int)('x' + c), kind ? "may be" : "is", b->id);
  183. #endif
  184. }
  185. static INLINE struct nv_value *
  186. bld_def(struct nv_instruction *i, int c, struct nv_value *value)
  187. {
  188. i->def[c] = value;
  189. value->insn = i;
  190. return value;
  191. }
  192. static INLINE struct nv_value *
  193. find_by_bb(struct bld_value_stack *stack, struct nv_basic_block *b)
  194. {
  195. int i;
  196. if (stack->top && stack->top->insn->bb == b)
  197. return stack->top;
  198. for (i = stack->size - 1; i >= 0; --i)
  199. if (stack->body[i]->insn->bb == b)
  200. return stack->body[i];
  201. return NULL;
  202. }
  203. /* fetch value from stack that was defined in the specified basic block,
  204. * or search for first definitions in all of its predecessors
  205. */
  206. static void
  207. fetch_by_bb(struct bld_value_stack *stack,
  208. struct nv_value **vals, int *n,
  209. struct nv_basic_block *b)
  210. {
  211. int i;
  212. struct nv_value *val;
  213. assert(*n < 16); /* MAX_COND_NESTING */
  214. val = find_by_bb(stack, b);
  215. if (val) {
  216. for (i = 0; i < *n; ++i)
  217. if (vals[i] == val)
  218. return;
  219. vals[(*n)++] = val;
  220. return;
  221. }
  222. for (i = 0; i < b->num_in; ++i)
  223. if (!IS_WALL_EDGE(b->in_kind[i]))
  224. fetch_by_bb(stack, vals, n, b->in[i]);
  225. }
  226. static INLINE boolean
  227. nvbb_is_terminated(struct nv_basic_block *bb)
  228. {
  229. return bb->exit && bb->exit->is_terminator;
  230. }
  231. static INLINE struct nv_value *
  232. bld_load_imm_u32(struct bld_context *bld, uint32_t u);
  233. static INLINE struct nv_value *
  234. bld_undef(struct bld_context *bld, ubyte file)
  235. {
  236. struct nv_instruction *nvi = new_instruction(bld->pc, NV_OP_UNDEF);
  237. return bld_def(nvi, 0, new_value(bld->pc, file, NV_TYPE_U32));
  238. }
  239. static struct nv_value *
  240. bld_phi(struct bld_context *bld, struct nv_basic_block *b,
  241. struct bld_value_stack *stack)
  242. {
  243. struct nv_basic_block *in;
  244. struct nv_value *vals[16] = { 0 };
  245. struct nv_value *val;
  246. struct nv_instruction *phi;
  247. int i, j, n;
  248. do {
  249. i = n = 0;
  250. fetch_by_bb(stack, vals, &n, b);
  251. if (!n) {
  252. bld_warn_uninitialized(bld, 0, stack, b);
  253. return NULL;
  254. }
  255. if (n == 1) {
  256. if (nvbb_dominated_by(b, vals[0]->insn->bb))
  257. break;
  258. bld_warn_uninitialized(bld, 1, stack, b);
  259. /* back-tracking to insert missing value of other path */
  260. in = b;
  261. while (in->in[0]) {
  262. if (in->num_in == 1) {
  263. in = in->in[0];
  264. } else {
  265. if (!nvbb_reachable_by(in->in[0], vals[0]->insn->bb, b))
  266. in = in->in[0];
  267. else
  268. if (!nvbb_reachable_by(in->in[1], vals[0]->insn->bb, b))
  269. in = in->in[1];
  270. else
  271. in = in->in[0];
  272. }
  273. }
  274. bld->pc->current_block = in;
  275. /* should make this a no-op */
  276. bld_vals_push_val(stack, bld_undef(bld, vals[0]->reg.file));
  277. continue;
  278. }
  279. for (i = 0; i < n; ++i) {
  280. /* if value dominates b, continue to the redefinitions */
  281. if (nvbb_dominated_by(b, vals[i]->insn->bb))
  282. continue;
  283. /* if value dominates any in-block, b should be the dom frontier */
  284. for (j = 0; j < b->num_in; ++j)
  285. if (nvbb_dominated_by(b->in[j], vals[i]->insn->bb))
  286. break;
  287. /* otherwise, find the dominance frontier and put the phi there */
  288. if (j == b->num_in) {
  289. in = nvbb_dom_frontier(vals[i]->insn->bb);
  290. val = bld_phi(bld, in, stack);
  291. bld_vals_push_val(stack, val);
  292. break;
  293. }
  294. }
  295. } while(i < n);
  296. bld->pc->current_block = b;
  297. if (n == 1)
  298. return vals[0];
  299. phi = new_instruction(bld->pc, NV_OP_PHI);
  300. bld_def(phi, 0, new_value(bld->pc, vals[0]->reg.file, vals[0]->reg.type));
  301. for (i = 0; i < n; ++i)
  302. phi->src[i] = new_ref(bld->pc, vals[i]);
  303. return phi->def[0];
  304. }
  305. /* Insert a phi function in the loop header.
  306. * For nested loops, we need to insert phi functions in all the outer
  307. * loop headers if they don't have one yet.
  308. *
  309. * @def: redefinition from inside loop, or NULL if to be replaced later
  310. */
  311. static struct nv_value *
  312. bld_loop_phi(struct bld_context *bld, struct bld_value_stack *stack,
  313. struct nv_value *def)
  314. {
  315. struct nv_instruction *phi;
  316. struct nv_basic_block *bb = bld->pc->current_block;
  317. struct nv_value *val = NULL;
  318. if (bld->loop_lvl > 1) {
  319. --bld->loop_lvl;
  320. if (!((stack->loop_def | stack->loop_use) & (1 << bld->loop_lvl)))
  321. val = bld_loop_phi(bld, stack, NULL);
  322. ++bld->loop_lvl;
  323. }
  324. if (!val)
  325. val = bld_phi(bld, bld->pc->current_block, stack); /* old definition */
  326. if (!val) {
  327. bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
  328. val = bld_undef(bld, bld_stack_file(bld, stack));
  329. }
  330. bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1];
  331. phi = new_instruction(bld->pc, NV_OP_PHI);
  332. bld_def(phi, 0, new_value_like(bld->pc, val));
  333. if (!def)
  334. def = phi->def[0];
  335. bld_vals_push_val(stack, phi->def[0]);
  336. phi->target = (struct nv_basic_block *)stack; /* cheat */
  337. nv_reference(bld->pc, &phi->src[0], val);
  338. nv_reference(bld->pc, &phi->src[1], def);
  339. bld->pc->current_block = bb;
  340. return phi->def[0];
  341. }
  342. static INLINE struct nv_value *
  343. bld_fetch_global(struct bld_context *bld, struct bld_value_stack *stack)
  344. {
  345. const uint16_t m = 1 << bld->loop_lvl;
  346. const uint16_t use = stack->loop_use;
  347. stack->loop_use |= m;
  348. /* If neither used nor def'd inside the loop, build a phi in foresight,
  349. * so we don't have to replace stuff later on, which requires tracking.
  350. */
  351. if (bld->loop_lvl && !((use | stack->loop_def) & m))
  352. return bld_loop_phi(bld, stack, NULL);
  353. return bld_phi(bld, bld->pc->current_block, stack);
  354. }
  355. static INLINE struct nv_value *
  356. bld_imm_u32(struct bld_context *bld, uint32_t u)
  357. {
  358. int i;
  359. unsigned n = bld->num_immds;
  360. for (i = 0; i < n; ++i)
  361. if (bld->saved_immd[i]->reg.imm.u32 == u)
  362. return bld->saved_immd[i];
  363. assert(n < BLD_MAX_IMMDS);
  364. bld->num_immds++;
  365. bld->saved_immd[n] = new_value(bld->pc, NV_FILE_IMM, NV_TYPE_U32);
  366. bld->saved_immd[n]->reg.imm.u32 = u;
  367. return bld->saved_immd[n];
  368. }
  369. static void
  370. bld_replace_value(struct nv_pc *, struct nv_basic_block *, struct nv_value *,
  371. struct nv_value *);
  372. /* Replace the source of the phi in the loop header by the last assignment,
  373. * or eliminate the phi function if there is no assignment inside the loop.
  374. *
  375. * Redundancy situation 1 - (used) but (not redefined) value:
  376. * %3 = phi %0, %3 = %3 is used
  377. * %3 = phi %0, %4 = is new definition
  378. *
  379. * Redundancy situation 2 - (not used) but (redefined) value:
  380. * %3 = phi %0, %2 = %2 is used, %3 could be used outside, deleted by DCE
  381. */
  382. static void
  383. bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
  384. {
  385. struct nv_basic_block *save = bld->pc->current_block;
  386. struct nv_instruction *phi, *next;
  387. struct nv_value *val;
  388. struct bld_value_stack *stk;
  389. int i, s, n;
  390. for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
  391. next = phi->next;
  392. stk = (struct bld_value_stack *)phi->target;
  393. phi->target = NULL;
  394. /* start with s == 1, src[0] is from outside the loop */
  395. for (s = 1, n = 0; n < bb->num_in; ++n) {
  396. if (bb->in_kind[n] != CFG_EDGE_BACK)
  397. continue;
  398. assert(s < 4);
  399. bld->pc->current_block = bb->in[n];
  400. val = bld_fetch_global(bld, stk);
  401. for (i = 0; i < 4; ++i)
  402. if (phi->src[i] && phi->src[i]->value == val)
  403. break;
  404. if (i == 4) {
  405. /* skip values we do not want to replace */
  406. for (; phi->src[s] && phi->src[s]->value != phi->def[0]; ++s);
  407. nv_reference(bld->pc, &phi->src[s++], val);
  408. }
  409. }
  410. bld->pc->current_block = save;
  411. if (phi->src[0]->value == phi->def[0] ||
  412. phi->src[0]->value == phi->src[1]->value)
  413. s = 1;
  414. else
  415. if (phi->src[1]->value == phi->def[0])
  416. s = 0;
  417. else
  418. continue;
  419. if (s >= 0) {
  420. /* eliminate the phi */
  421. bld_vals_del_val(stk, phi->def[0]);
  422. ++bld->pc->pass_seq;
  423. bld_replace_value(bld->pc, bb, phi->def[0], phi->src[s]->value);
  424. nv_nvi_delete(phi);
  425. }
  426. }
  427. }
  428. static INLINE struct nv_value *
  429. bld_imm_f32(struct bld_context *bld, float f)
  430. {
  431. return bld_imm_u32(bld, fui(f));
  432. }
  433. #define SET_TYPE(v, t) ((v)->reg.type = (v)->reg.as_type = (t))
  434. static struct nv_value *
  435. bld_insn_1(struct bld_context *bld, uint opcode, struct nv_value *src0)
  436. {
  437. struct nv_instruction *insn = new_instruction(bld->pc, opcode);
  438. nv_reference(bld->pc, &insn->src[0], src0);
  439. return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
  440. }
  441. static struct nv_value *
  442. bld_insn_2(struct bld_context *bld, uint opcode,
  443. struct nv_value *src0, struct nv_value *src1)
  444. {
  445. struct nv_instruction *insn = new_instruction(bld->pc, opcode);
  446. nv_reference(bld->pc, &insn->src[0], src0);
  447. nv_reference(bld->pc, &insn->src[1], src1);
  448. return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
  449. }
  450. static struct nv_value *
  451. bld_insn_3(struct bld_context *bld, uint opcode,
  452. struct nv_value *src0, struct nv_value *src1,
  453. struct nv_value *src2)
  454. {
  455. struct nv_instruction *insn = new_instruction(bld->pc, opcode);
  456. nv_reference(bld->pc, &insn->src[0], src0);
  457. nv_reference(bld->pc, &insn->src[1], src1);
  458. nv_reference(bld->pc, &insn->src[2], src2);
  459. return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
  460. }
  461. static struct nv_value *
  462. bld_duplicate_insn(struct bld_context *bld, struct nv_instruction *nvi)
  463. {
  464. struct nv_instruction *dupi = new_instruction(bld->pc, nvi->opcode);
  465. int c;
  466. if (nvi->def[0])
  467. bld_def(dupi, 0, new_value_like(bld->pc, nvi->def[0]));
  468. if (nvi->flags_def) {
  469. dupi->flags_def = new_value_like(bld->pc, nvi->flags_def);
  470. dupi->flags_def->insn = dupi;
  471. }
  472. for (c = 0; c < 5; ++c)
  473. if (nvi->src[c])
  474. nv_reference(bld->pc, &dupi->src[c], nvi->src[c]->value);
  475. if (nvi->flags_src)
  476. nv_reference(bld->pc, &dupi->flags_src, nvi->flags_src->value);
  477. dupi->cc = nvi->cc;
  478. dupi->saturate = nvi->saturate;
  479. dupi->centroid = nvi->centroid;
  480. dupi->flat = nvi->flat;
  481. return dupi->def[0];
  482. }
  483. static void
  484. bld_lmem_store(struct bld_context *bld, struct nv_value *ptr, int ofst,
  485. struct nv_value *val)
  486. {
  487. struct nv_instruction *insn = new_instruction(bld->pc, NV_OP_STA);
  488. struct nv_value *loc;
  489. loc = new_value(bld->pc, NV_FILE_MEM_L, NV_TYPE_U32);
  490. loc->reg.id = ofst * 4;
  491. nv_reference(bld->pc, &insn->src[0], loc);
  492. nv_reference(bld->pc, &insn->src[1], val);
  493. nv_reference(bld->pc, &insn->src[4], ptr);
  494. }
  495. static struct nv_value *
  496. bld_lmem_load(struct bld_context *bld, struct nv_value *ptr, int ofst)
  497. {
  498. struct nv_value *loc, *val;
  499. loc = new_value(bld->pc, NV_FILE_MEM_L, NV_TYPE_U32);
  500. loc->reg.id = ofst * 4;
  501. val = bld_insn_1(bld, NV_OP_LDA, loc);
  502. nv_reference(bld->pc, &val->insn->src[4], ptr);
  503. return val;
  504. }
  505. #define BLD_INSN_1_EX(d, op, dt, s0, s0t) \
  506. do { \
  507. (d) = bld_insn_1(bld, (NV_OP_##op), (s0)); \
  508. SET_TYPE(d, NV_TYPE_##dt); \
  509. (d)->insn->src[0]->typecast = NV_TYPE_##s0t; \
  510. } while(0)
  511. #define BLD_INSN_2_EX(d, op, dt, s0, s0t, s1, s1t) \
  512. do { \
  513. (d) = bld_insn_2(bld, (NV_OP_##op), (s0), (s1)); \
  514. SET_TYPE(d, NV_TYPE_##dt); \
  515. (d)->insn->src[0]->typecast = NV_TYPE_##s0t; \
  516. (d)->insn->src[1]->typecast = NV_TYPE_##s1t; \
  517. } while(0)
  518. static struct nv_value *
  519. bld_pow(struct bld_context *bld, struct nv_value *x, struct nv_value *e)
  520. {
  521. struct nv_value *val;
  522. BLD_INSN_1_EX(val, LG2, F32, x, F32);
  523. BLD_INSN_2_EX(val, MUL, F32, e, F32, val, F32);
  524. val = bld_insn_1(bld, NV_OP_PREEX2, val);
  525. val = bld_insn_1(bld, NV_OP_EX2, val);
  526. return val;
  527. }
  528. static INLINE struct nv_value *
  529. bld_load_imm_f32(struct bld_context *bld, float f)
  530. {
  531. struct nv_value *imm = bld_insn_1(bld, NV_OP_MOV, bld_imm_f32(bld, f));
  532. SET_TYPE(imm, NV_TYPE_F32);
  533. return imm;
  534. }
  535. static INLINE struct nv_value *
  536. bld_load_imm_u32(struct bld_context *bld, uint32_t u)
  537. {
  538. return bld_insn_1(bld, NV_OP_MOV, bld_imm_u32(bld, u));
  539. }
  540. static struct nv_value *
  541. bld_get_address(struct bld_context *bld, int id, struct nv_value *indirect)
  542. {
  543. int i;
  544. struct nv_instruction *nvi;
  545. struct nv_value *val;
  546. for (i = 0; i < 4; ++i) {
  547. if (!bld->saved_addr[i][0])
  548. break;
  549. if (bld->saved_addr[i][1] == indirect) {
  550. nvi = bld->saved_addr[i][0]->insn;
  551. if (nvi->src[0]->value->reg.imm.u32 == id)
  552. return bld->saved_addr[i][0];
  553. }
  554. }
  555. i &= 3;
  556. val = bld_imm_u32(bld, id);
  557. if (indirect)
  558. val = bld_insn_2(bld, NV_OP_ADD, indirect, val);
  559. else
  560. val = bld_insn_1(bld, NV_OP_MOV, val);
  561. bld->saved_addr[i][0] = val;
  562. bld->saved_addr[i][0]->reg.file = NV_FILE_ADDR;
  563. bld->saved_addr[i][0]->reg.type = NV_TYPE_U16;
  564. bld->saved_addr[i][1] = indirect;
  565. return bld->saved_addr[i][0];
  566. }
  567. static struct nv_value *
  568. bld_predicate(struct bld_context *bld, struct nv_value *src, boolean bool_only)
  569. {
  570. struct nv_instruction *s0i, *nvi = src->insn;
  571. if (!nvi) {
  572. nvi = bld_insn_1(bld,
  573. (src->reg.file == NV_FILE_IMM) ? NV_OP_MOV : NV_OP_LDA,
  574. src)->insn;
  575. src = nvi->def[0];
  576. } else
  577. if (bool_only) {
  578. while (nvi->opcode == NV_OP_ABS || nvi->opcode == NV_OP_NEG ||
  579. nvi->opcode == NV_OP_CVT) {
  580. s0i = nvi->src[0]->value->insn;
  581. if (!s0i || !nv50_op_can_write_flags(s0i->opcode))
  582. break;
  583. nvi = s0i;
  584. assert(!nvi->flags_src);
  585. }
  586. }
  587. if (!nv50_op_can_write_flags(nvi->opcode) ||
  588. nvi->bb != bld->pc->current_block) {
  589. nvi = new_instruction(bld->pc, NV_OP_CVT);
  590. nv_reference(bld->pc, &nvi->src[0], src);
  591. }
  592. if (!nvi->flags_def) {
  593. nvi->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
  594. nvi->flags_def->insn = nvi;
  595. }
  596. return nvi->flags_def;
  597. }
  598. static void
  599. bld_kil(struct bld_context *bld, struct nv_value *src)
  600. {
  601. struct nv_instruction *nvi;
  602. src = bld_predicate(bld, src, FALSE);
  603. nvi = new_instruction(bld->pc, NV_OP_KIL);
  604. nvi->fixed = 1;
  605. nvi->flags_src = new_ref(bld->pc, src);
  606. nvi->cc = NV_CC_LT;
  607. }
  608. static void
  609. bld_flow(struct bld_context *bld, uint opcode, ubyte cc,
  610. struct nv_value *src, struct nv_basic_block *target,
  611. boolean plan_reconverge)
  612. {
  613. struct nv_instruction *nvi;
  614. if (plan_reconverge)
  615. new_instruction(bld->pc, NV_OP_JOINAT)->fixed = 1;
  616. nvi = new_instruction(bld->pc, opcode);
  617. nvi->is_terminator = 1;
  618. nvi->cc = cc;
  619. nvi->target = target;
  620. if (src)
  621. nvi->flags_src = new_ref(bld->pc, src);
  622. }
  623. static ubyte
  624. translate_setcc(unsigned opcode)
  625. {
  626. switch (opcode) {
  627. case TGSI_OPCODE_SLT: return NV_CC_LT;
  628. case TGSI_OPCODE_SGE: return NV_CC_GE;
  629. case TGSI_OPCODE_SEQ: return NV_CC_EQ;
  630. case TGSI_OPCODE_SGT: return NV_CC_GT;
  631. case TGSI_OPCODE_SLE: return NV_CC_LE;
  632. case TGSI_OPCODE_SNE: return NV_CC_NE | NV_CC_U;
  633. case TGSI_OPCODE_STR: return NV_CC_TR;
  634. case TGSI_OPCODE_SFL: return NV_CC_FL;
  635. case TGSI_OPCODE_ISLT: return NV_CC_LT;
  636. case TGSI_OPCODE_ISGE: return NV_CC_GE;
  637. case TGSI_OPCODE_USEQ: return NV_CC_EQ;
  638. case TGSI_OPCODE_USGE: return NV_CC_GE;
  639. case TGSI_OPCODE_USLT: return NV_CC_LT;
  640. case TGSI_OPCODE_USNE: return NV_CC_NE;
  641. default:
  642. assert(0);
  643. return NV_CC_FL;
  644. }
  645. }
  646. static uint
  647. translate_opcode(uint opcode)
  648. {
  649. switch (opcode) {
  650. case TGSI_OPCODE_ABS: return NV_OP_ABS;
  651. case TGSI_OPCODE_ADD:
  652. case TGSI_OPCODE_SUB:
  653. case TGSI_OPCODE_UADD: return NV_OP_ADD;
  654. case TGSI_OPCODE_AND: return NV_OP_AND;
  655. case TGSI_OPCODE_EX2: return NV_OP_EX2;
  656. case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
  657. case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
  658. case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
  659. case TGSI_OPCODE_ROUND: return NV_OP_ROUND;
  660. case TGSI_OPCODE_COS: return NV_OP_COS;
  661. case TGSI_OPCODE_SIN: return NV_OP_SIN;
  662. case TGSI_OPCODE_DDX: return NV_OP_DFDX;
  663. case TGSI_OPCODE_DDY: return NV_OP_DFDY;
  664. case TGSI_OPCODE_F2I:
  665. case TGSI_OPCODE_F2U:
  666. case TGSI_OPCODE_I2F:
  667. case TGSI_OPCODE_U2F: return NV_OP_CVT;
  668. case TGSI_OPCODE_INEG: return NV_OP_NEG;
  669. case TGSI_OPCODE_LG2: return NV_OP_LG2;
  670. case TGSI_OPCODE_ISHR:
  671. case TGSI_OPCODE_USHR: return NV_OP_SHR;
  672. case TGSI_OPCODE_MAD:
  673. case TGSI_OPCODE_UMAD: return NV_OP_MAD;
  674. case TGSI_OPCODE_MAX:
  675. case TGSI_OPCODE_IMAX:
  676. case TGSI_OPCODE_UMAX: return NV_OP_MAX;
  677. case TGSI_OPCODE_MIN:
  678. case TGSI_OPCODE_IMIN:
  679. case TGSI_OPCODE_UMIN: return NV_OP_MIN;
  680. case TGSI_OPCODE_MUL:
  681. case TGSI_OPCODE_UMUL: return NV_OP_MUL;
  682. case TGSI_OPCODE_OR: return NV_OP_OR;
  683. case TGSI_OPCODE_RCP: return NV_OP_RCP;
  684. case TGSI_OPCODE_RSQ: return NV_OP_RSQ;
  685. case TGSI_OPCODE_SAD: return NV_OP_SAD;
  686. case TGSI_OPCODE_SHL: return NV_OP_SHL;
  687. case TGSI_OPCODE_SLT:
  688. case TGSI_OPCODE_SGE:
  689. case TGSI_OPCODE_SEQ:
  690. case TGSI_OPCODE_SGT:
  691. case TGSI_OPCODE_SLE:
  692. case TGSI_OPCODE_SNE:
  693. case TGSI_OPCODE_ISLT:
  694. case TGSI_OPCODE_ISGE:
  695. case TGSI_OPCODE_USEQ:
  696. case TGSI_OPCODE_USGE:
  697. case TGSI_OPCODE_USLT:
  698. case TGSI_OPCODE_USNE: return NV_OP_SET;
  699. case TGSI_OPCODE_TEX: return NV_OP_TEX;
  700. case TGSI_OPCODE_TXP: return NV_OP_TEX;
  701. case TGSI_OPCODE_TXB: return NV_OP_TXB;
  702. case TGSI_OPCODE_TXL: return NV_OP_TXL;
  703. case TGSI_OPCODE_TXD: return NV_OP_TEX;
  704. case TGSI_OPCODE_XOR: return NV_OP_XOR;
  705. default:
  706. return NV_OP_NOP;
  707. }
  708. }
  709. static ubyte
  710. infer_src_type(unsigned opcode)
  711. {
  712. switch (opcode) {
  713. case TGSI_OPCODE_MOV:
  714. case TGSI_OPCODE_AND:
  715. case TGSI_OPCODE_OR:
  716. case TGSI_OPCODE_XOR:
  717. case TGSI_OPCODE_SAD:
  718. case TGSI_OPCODE_U2F:
  719. case TGSI_OPCODE_UADD:
  720. case TGSI_OPCODE_UDIV:
  721. case TGSI_OPCODE_UMOD:
  722. case TGSI_OPCODE_UMAD:
  723. case TGSI_OPCODE_UMUL:
  724. case TGSI_OPCODE_UMAX:
  725. case TGSI_OPCODE_UMIN:
  726. case TGSI_OPCODE_USEQ:
  727. case TGSI_OPCODE_USGE:
  728. case TGSI_OPCODE_USLT:
  729. case TGSI_OPCODE_USNE:
  730. case TGSI_OPCODE_USHR:
  731. return NV_TYPE_U32;
  732. case TGSI_OPCODE_I2F:
  733. case TGSI_OPCODE_IDIV:
  734. case TGSI_OPCODE_IMAX:
  735. case TGSI_OPCODE_IMIN:
  736. case TGSI_OPCODE_INEG:
  737. case TGSI_OPCODE_ISGE:
  738. case TGSI_OPCODE_ISHR:
  739. case TGSI_OPCODE_ISLT:
  740. return NV_TYPE_S32;
  741. default:
  742. return NV_TYPE_F32;
  743. }
  744. }
  745. static ubyte
  746. infer_dst_type(unsigned opcode)
  747. {
  748. switch (opcode) {
  749. case TGSI_OPCODE_MOV:
  750. case TGSI_OPCODE_F2U:
  751. case TGSI_OPCODE_AND:
  752. case TGSI_OPCODE_OR:
  753. case TGSI_OPCODE_XOR:
  754. case TGSI_OPCODE_SAD:
  755. case TGSI_OPCODE_UADD:
  756. case TGSI_OPCODE_UDIV:
  757. case TGSI_OPCODE_UMOD:
  758. case TGSI_OPCODE_UMAD:
  759. case TGSI_OPCODE_UMUL:
  760. case TGSI_OPCODE_UMAX:
  761. case TGSI_OPCODE_UMIN:
  762. case TGSI_OPCODE_USEQ:
  763. case TGSI_OPCODE_USGE:
  764. case TGSI_OPCODE_USLT:
  765. case TGSI_OPCODE_USNE:
  766. case TGSI_OPCODE_USHR:
  767. return NV_TYPE_U32;
  768. case TGSI_OPCODE_F2I:
  769. case TGSI_OPCODE_IDIV:
  770. case TGSI_OPCODE_IMAX:
  771. case TGSI_OPCODE_IMIN:
  772. case TGSI_OPCODE_INEG:
  773. case TGSI_OPCODE_ISGE:
  774. case TGSI_OPCODE_ISHR:
  775. case TGSI_OPCODE_ISLT:
  776. return NV_TYPE_S32;
  777. default:
  778. return NV_TYPE_F32;
  779. }
  780. }
  781. static void
  782. emit_store(struct bld_context *bld, const struct tgsi_full_instruction *inst,
  783. unsigned chan, struct nv_value *value)
  784. {
  785. struct nv_value *ptr;
  786. const struct tgsi_full_dst_register *reg = &inst->Dst[0];
  787. if (reg->Register.Indirect) {
  788. ptr = FETCH_ADDR(reg->Indirect.Index,
  789. tgsi_util_get_src_register_swizzle(&reg->Indirect, 0));
  790. } else {
  791. ptr = NULL;
  792. }
  793. assert(chan < 4);
  794. if (inst->Instruction.Opcode != TGSI_OPCODE_MOV)
  795. value->reg.type = infer_dst_type(inst->Instruction.Opcode);
  796. switch (inst->Instruction.Saturate) {
  797. case TGSI_SAT_NONE:
  798. break;
  799. case TGSI_SAT_ZERO_ONE:
  800. BLD_INSN_1_EX(value, SAT, F32, value, F32);
  801. break;
  802. case TGSI_SAT_MINUS_PLUS_ONE:
  803. value->reg.as_type = NV_TYPE_F32;
  804. value = bld_insn_2(bld, NV_OP_MAX, value, bld_load_imm_f32(bld, -1.0f));
  805. value = bld_insn_2(bld, NV_OP_MIN, value, bld_load_imm_f32(bld, 1.0f));
  806. break;
  807. }
  808. switch (reg->Register.File) {
  809. case TGSI_FILE_OUTPUT:
  810. if (!value->insn && (bld->ti->output_file == NV_FILE_OUT))
  811. value = bld_insn_1(bld, NV_OP_MOV, value);
  812. value = bld_insn_1(bld, NV_OP_MOV, value);
  813. value->reg.file = bld->ti->output_file;
  814. if (bld->ti->p->type == PIPE_SHADER_FRAGMENT) {
  815. STORE_OUTR(reg->Register.Index, chan, value);
  816. } else {
  817. value->insn->fixed = 1;
  818. value->reg.id = bld->ti->output_map[reg->Register.Index][chan];
  819. }
  820. break;
  821. case TGSI_FILE_TEMPORARY:
  822. assert(reg->Register.Index < BLD_MAX_TEMPS);
  823. if (!value->insn || (value->insn->bb != bld->pc->current_block))
  824. value = bld_insn_1(bld, NV_OP_MOV, value);
  825. value->reg.file = NV_FILE_GPR;
  826. if (bld->ti->store_to_memory)
  827. bld_lmem_store(bld, ptr, reg->Register.Index * 4 + chan, value);
  828. else
  829. STORE_TEMP(reg->Register.Index, chan, value);
  830. break;
  831. case TGSI_FILE_ADDRESS:
  832. assert(reg->Register.Index < BLD_MAX_ADDRS);
  833. value->reg.file = NV_FILE_ADDR;
  834. value->reg.type = NV_TYPE_U16;
  835. STORE_ADDR(reg->Register.Index, chan, value);
  836. break;
  837. }
  838. }
  839. static INLINE uint32_t
  840. bld_is_output_written(struct bld_context *bld, int i, int c)
  841. {
  842. if (c < 0)
  843. return bld->outputs_written[i / 8] & (0xf << ((i * 4) % 32));
  844. return bld->outputs_written[i / 8] & (1 << ((i * 4 + c) % 32));
  845. }
  846. static void
  847. bld_export_outputs(struct bld_context *bld)
  848. {
  849. struct nv_value *vals[4];
  850. struct nv_instruction *nvi;
  851. int i, c, n;
  852. bld_push_values(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
  853. for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; ++i) {
  854. if (!bld_is_output_written(bld, i, -1))
  855. continue;
  856. for (n = 0, c = 0; c < 4; ++c) {
  857. if (!bld_is_output_written(bld, i, c))
  858. continue;
  859. vals[n] = bld_fetch_global(bld, &bld->ovs[i][c]);
  860. assert(vals[n]);
  861. vals[n] = bld_insn_1(bld, NV_OP_MOV, vals[n]);
  862. vals[n++]->reg.id = bld->ti->output_map[i][c];
  863. }
  864. assert(n);
  865. (nvi = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
  866. for (c = 0; c < n; ++c)
  867. nvi->src[c] = new_ref(bld->pc, vals[c]);
  868. }
  869. }
  870. static void
  871. bld_new_block(struct bld_context *bld, struct nv_basic_block *b)
  872. {
  873. int i;
  874. bld_push_values(&bld->tvs[0][0], BLD_MAX_TEMPS);
  875. bld_push_values(&bld->avs[0][0], BLD_MAX_ADDRS);
  876. bld_push_values(&bld->pvs[0][0], BLD_MAX_PREDS);
  877. bld_push_values(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
  878. bld->pc->current_block = b;
  879. for (i = 0; i < 4; ++i)
  880. bld->saved_addr[i][0] = NULL;
  881. for (i = 0; i < 128; ++i)
  882. bld->saved_inputs[i] = NULL;
  883. bld->out_kind = CFG_EDGE_FORWARD;
  884. }
  885. static struct nv_value *
  886. bld_saved_input(struct bld_context *bld, unsigned i, unsigned c)
  887. {
  888. unsigned idx = bld->ti->input_map[i][c];
  889. if (bld->ti->p->type != PIPE_SHADER_FRAGMENT)
  890. return NULL;
  891. if (bld->saved_inputs[idx])
  892. return bld->saved_inputs[idx];
  893. return NULL;
  894. }
  895. static struct nv_value *
  896. bld_interpolate(struct bld_context *bld, unsigned mode, struct nv_value *val)
  897. {
  898. if (val->reg.id == 255) {
  899. /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
  900. val = bld_insn_1(bld, NV_OP_LINTERP, val);
  901. val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
  902. val->insn->src[0]->typecast = NV_TYPE_U32;
  903. val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
  904. val->insn->src[0]->typecast = NV_TYPE_U32;
  905. } else
  906. if (mode & (NV50_INTERP_LINEAR | NV50_INTERP_FLAT))
  907. val = bld_insn_1(bld, NV_OP_LINTERP, val);
  908. else
  909. val = bld_insn_2(bld, NV_OP_PINTERP, val, bld->frgcrd[3]);
  910. val->insn->flat = (mode & NV50_INTERP_FLAT) ? 1 : 0;
  911. val->insn->centroid = (mode & NV50_INTERP_CENTROID) ? 1 : 0;
  912. return val;
  913. }
  914. static struct nv_value *
  915. emit_fetch(struct bld_context *bld, const struct tgsi_full_instruction *insn,
  916. const unsigned s, const unsigned chan)
  917. {
  918. const struct tgsi_full_src_register *src = &insn->Src[s];
  919. struct nv_value *res;
  920. struct nv_value *ptr = NULL;
  921. unsigned idx, swz, dim_idx, ind_idx, ind_swz, sgn;
  922. ubyte type = infer_src_type(insn->Instruction.Opcode);
  923. idx = src->Register.Index;
  924. swz = tgsi_util_get_full_src_register_swizzle(src, chan);
  925. dim_idx = -1;
  926. ind_idx = -1;
  927. ind_swz = 0;
  928. if (src->Register.Indirect) {
  929. ind_idx = src->Indirect.Index;
  930. ind_swz = tgsi_util_get_src_register_swizzle(&src->Indirect, 0);
  931. ptr = FETCH_ADDR(ind_idx, ind_swz);
  932. }
  933. if (idx >= (128 / 4) && src->Register.File == TGSI_FILE_CONSTANT)
  934. ptr = bld_get_address(bld, (idx * 16) & ~0x1ff, ptr);
  935. switch (src->Register.File) {
  936. case TGSI_FILE_CONSTANT:
  937. dim_idx = src->Dimension.Index;
  938. assert(dim_idx < 15);
  939. res = new_value(bld->pc, NV_FILE_MEM_C(dim_idx), type);
  940. SET_TYPE(res, type);
  941. res->reg.id = (idx * 4 + swz) & 127;
  942. res = bld_insn_1(bld, NV_OP_LDA, res);
  943. if (ptr)
  944. res->insn->src[4] = new_ref(bld->pc, ptr);
  945. break;
  946. case TGSI_FILE_IMMEDIATE:
  947. assert(idx < bld->ti->immd32_nr);
  948. res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
  949. switch (bld->ti->immd32_ty[idx]) {
  950. case TGSI_IMM_FLOAT32: SET_TYPE(res, NV_TYPE_F32); break;
  951. case TGSI_IMM_UINT32: SET_TYPE(res, NV_TYPE_U32); break;
  952. case TGSI_IMM_INT32: SET_TYPE(res, NV_TYPE_S32); break;
  953. default:
  954. SET_TYPE(res, type);
  955. break;
  956. }
  957. break;
  958. case TGSI_FILE_INPUT:
  959. res = bld_saved_input(bld, idx, swz);
  960. if (res && (insn->Instruction.Opcode != TGSI_OPCODE_TXP))
  961. break;
  962. res = new_value(bld->pc, bld->ti->input_file, type);
  963. res->reg.id = bld->ti->input_map[idx][swz];
  964. if (res->reg.file == NV_FILE_MEM_V) {
  965. res = bld_interpolate(bld, bld->ti->interp_mode[idx], res);
  966. } else {
  967. assert(src->Dimension.Dimension == 0);
  968. res = bld_insn_1(bld, NV_OP_LDA, res);
  969. assert(res->reg.type == type);
  970. }
  971. bld->saved_inputs[bld->ti->input_map[idx][swz]] = res;
  972. break;
  973. case TGSI_FILE_TEMPORARY:
  974. if (bld->ti->store_to_memory)
  975. res = bld_lmem_load(bld, ptr, idx * 4 + swz);
  976. else
  977. res = bld_fetch_global(bld, &bld->tvs[idx][swz]);
  978. break;
  979. case TGSI_FILE_ADDRESS:
  980. res = bld_fetch_global(bld, &bld->avs[idx][swz]);
  981. break;
  982. case TGSI_FILE_PREDICATE:
  983. res = bld_fetch_global(bld, &bld->pvs[idx][swz]);
  984. break;
  985. case TGSI_FILE_SYSTEM_VALUE:
  986. res = new_value(bld->pc, bld->ti->input_file, NV_TYPE_U32);
  987. res->reg.id = bld->ti->sysval_map[idx];
  988. res = bld_insn_1(bld, NV_OP_LDA, res);
  989. res = bld_insn_1(bld, NV_OP_CVT, res);
  990. res->reg.type = NV_TYPE_F32;
  991. break;
  992. default:
  993. NOUVEAU_ERR("illegal/unhandled src reg file: %d\n", src->Register.File);
  994. abort();
  995. break;
  996. }
  997. if (!res)
  998. return bld_undef(bld, NV_FILE_GPR);
  999. sgn = tgsi_util_get_full_src_register_sign_mode(src, chan);
  1000. if (insn->Instruction.Opcode != TGSI_OPCODE_MOV)
  1001. res->reg.as_type = type;
  1002. else
  1003. if (sgn != TGSI_UTIL_SIGN_KEEP) /* apparently "MOV A, -B" assumes float */
  1004. res->reg.as_type = NV_TYPE_F32;
  1005. switch (sgn) {
  1006. case TGSI_UTIL_SIGN_KEEP:
  1007. break;
  1008. case TGSI_UTIL_SIGN_CLEAR:
  1009. res = bld_insn_1(bld, NV_OP_ABS, res);
  1010. break;
  1011. case TGSI_UTIL_SIGN_TOGGLE:
  1012. res = bld_insn_1(bld, NV_OP_NEG, res);
  1013. break;
  1014. case TGSI_UTIL_SIGN_SET:
  1015. res = bld_insn_1(bld, NV_OP_ABS, res);
  1016. res = bld_insn_1(bld, NV_OP_NEG, res);
  1017. break;
  1018. default:
  1019. NOUVEAU_ERR("illegal/unhandled src reg sign mode\n");
  1020. abort();
  1021. break;
  1022. }
  1023. return res;
  1024. }
  1025. static void
  1026. bld_lit(struct bld_context *bld, struct nv_value *dst0[4],
  1027. const struct tgsi_full_instruction *insn)
  1028. {
  1029. struct nv_value *val0 = NULL;
  1030. struct nv_value *zero = NULL;
  1031. unsigned mask = insn->Dst[0].Register.WriteMask;
  1032. if (mask & ((1 << 0) | (1 << 3)))
  1033. dst0[3] = dst0[0] = bld_load_imm_f32(bld, 1.0f);
  1034. if (mask & (3 << 1)) {
  1035. zero = bld_load_imm_f32(bld, 0.0f);
  1036. val0 = bld_insn_2(bld, NV_OP_MAX, emit_fetch(bld, insn, 0, 0), zero);
  1037. if (mask & (1 << 1))
  1038. dst0[1] = val0;
  1039. }
  1040. if (mask & (1 << 2)) {
  1041. struct nv_value *val1, *val3, *src1, *src3;
  1042. struct nv_value *pos128 = bld_load_imm_f32(bld, 127.999999f);
  1043. struct nv_value *neg128 = bld_load_imm_f32(bld, -127.999999f);
  1044. src1 = emit_fetch(bld, insn, 0, 1);
  1045. src3 = emit_fetch(bld, insn, 0, 3);
  1046. val0->insn->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
  1047. val0->insn->flags_def->insn = val0->insn;
  1048. val1 = bld_insn_2(bld, NV_OP_MAX, src1, zero);
  1049. val3 = bld_insn_2(bld, NV_OP_MAX, src3, neg128);
  1050. val3 = bld_insn_2(bld, NV_OP_MIN, val3, pos128);
  1051. val3 = bld_pow(bld, val1, val3);
  1052. dst0[2] = bld_insn_1(bld, NV_OP_MOV, zero);
  1053. dst0[2]->insn->cc = NV_CC_LE;
  1054. dst0[2]->insn->flags_src = new_ref(bld->pc, val0->insn->flags_def);
  1055. dst0[2] = bld_insn_2(bld, NV_OP_SELECT, val3, dst0[2]);
  1056. }
  1057. }
  1058. static INLINE void
  1059. get_tex_dim(const struct tgsi_full_instruction *insn, int *dim, int *arg)
  1060. {
  1061. switch (insn->Texture.Texture) {
  1062. case TGSI_TEXTURE_1D:
  1063. *arg = *dim = 1;
  1064. break;
  1065. case TGSI_TEXTURE_SHADOW1D:
  1066. *dim = 1;
  1067. *arg = 2;
  1068. break;
  1069. case TGSI_TEXTURE_UNKNOWN:
  1070. case TGSI_TEXTURE_2D:
  1071. case TGSI_TEXTURE_RECT:
  1072. *arg = *dim = 2;
  1073. break;
  1074. case TGSI_TEXTURE_SHADOW2D:
  1075. case TGSI_TEXTURE_SHADOWRECT:
  1076. *dim = 2;
  1077. *arg = 3;
  1078. break;
  1079. case TGSI_TEXTURE_3D:
  1080. case TGSI_TEXTURE_CUBE:
  1081. *dim = *arg = 3;
  1082. break;
  1083. default:
  1084. assert(0);
  1085. break;
  1086. }
  1087. }
  1088. static void
  1089. load_proj_tex_coords(struct bld_context *bld,
  1090. struct nv_value *t[4], int dim, int arg,
  1091. const struct tgsi_full_instruction *insn)
  1092. {
  1093. int c, mask;
  1094. mask = (1 << dim) - 1;
  1095. if (arg != dim)
  1096. mask |= 4; /* depth comparison value */
  1097. t[3] = emit_fetch(bld, insn, 0, 3);
  1098. if (t[3]->insn->opcode == NV_OP_PINTERP) {
  1099. t[3] = bld_duplicate_insn(bld, t[3]->insn);
  1100. t[3]->insn->opcode = NV_OP_LINTERP;
  1101. nv_reference(bld->pc, &t[3]->insn->src[1], NULL);
  1102. }
  1103. t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
  1104. for (c = 0; c < 4; ++c) {
  1105. if (!(mask & (1 << c)))
  1106. continue;
  1107. t[c] = emit_fetch(bld, insn, 0, c);
  1108. if (t[c]->insn->opcode != NV_OP_LINTERP &&
  1109. t[c]->insn->opcode != NV_OP_PINTERP)
  1110. continue;
  1111. t[c] = bld_duplicate_insn(bld, t[c]->insn);
  1112. t[c]->insn->opcode = NV_OP_PINTERP;
  1113. nv_reference(bld->pc, &t[c]->insn->src[1], t[3]);
  1114. mask &= ~(1 << c);
  1115. }
  1116. for (c = 0; mask; ++c, mask >>= 1) {
  1117. if (!(mask & 1))
  1118. continue;
  1119. t[c] = bld_insn_2(bld, NV_OP_MUL, t[c], t[3]);
  1120. }
  1121. }
  1122. /* For a quad of threads / top left, top right, bottom left, bottom right
  1123. * pixels, do a different operation, and take src0 from a specific thread.
  1124. */
  1125. #define QOP_ADD 0
  1126. #define QOP_SUBR 1
  1127. #define QOP_SUB 2
  1128. #define QOP_MOV1 3
  1129. #define QOP(a, b, c, d) \
  1130. ((QOP_##a << 0) | (QOP_##b << 2) | (QOP_##c << 4) | (QOP_##d << 6))
  1131. static INLINE struct nv_value *
  1132. bld_quadop(struct bld_context *bld, ubyte qop, struct nv_value *src0, int lane,
  1133. struct nv_value *src1, boolean wp)
  1134. {
  1135. struct nv_value *val = bld_insn_2(bld, NV_OP_QUADOP, src0, src1);
  1136. val->insn->lanes = lane;
  1137. val->insn->quadop = qop;
  1138. if (wp) {
  1139. val->insn->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
  1140. val->insn->flags_def->insn = val->insn;
  1141. }
  1142. return val;
  1143. }
  1144. static INLINE struct nv_value *
  1145. bld_cmov(struct bld_context *bld,
  1146. struct nv_value *src, ubyte cc, struct nv_value *cr)
  1147. {
  1148. src = bld_insn_1(bld, NV_OP_MOV, src);
  1149. src->insn->cc = cc;
  1150. src->insn->flags_src = new_ref(bld->pc, cr);
  1151. return src;
  1152. }
  1153. static struct nv_instruction *
  1154. emit_tex(struct bld_context *bld, uint opcode,
  1155. struct nv_value *dst[4], struct nv_value *t_in[4],
  1156. int argc, int tic, int tsc, int cube)
  1157. {
  1158. struct nv_value *t[4];
  1159. struct nv_instruction *nvi;
  1160. int c;
  1161. /* the inputs to a tex instruction must be separate values */
  1162. for (c = 0; c < argc; ++c) {
  1163. t[c] = bld_insn_1(bld, NV_OP_MOV, t_in[c]);
  1164. SET_TYPE(t[c], NV_TYPE_F32);
  1165. t[c]->insn->fixed = 1;
  1166. }
  1167. nvi = new_instruction(bld->pc, opcode);
  1168. for (c = 0; c < 4; ++c)
  1169. dst[c] = bld_def(nvi, c, new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32));
  1170. for (c = 0; c < argc; ++c)
  1171. nvi->src[c] = new_ref(bld->pc, t[c]);
  1172. nvi->tex_t = tic;
  1173. nvi->tex_s = tsc;
  1174. nvi->tex_mask = 0xf;
  1175. nvi->tex_cube = cube;
  1176. nvi->tex_live = 0;
  1177. nvi->tex_argc = argc;
  1178. return nvi;
  1179. }
  1180. static void
  1181. bld_texlod_sequence(struct bld_context *bld,
  1182. struct nv_value *dst[4], struct nv_value *t[4], int arg,
  1183. int tic, int tsc, int cube)
  1184. {
  1185. emit_tex(bld, NV_OP_TXL, dst, t, arg, tic, tsc, cube); /* TODO */
  1186. }
  1187. /* The lanes of a quad are grouped by the bit in the condition register
  1188. * they have set, which is selected by differing bias values.
  1189. * Move the input values for TEX into a new register set for each group
  1190. * and execute TEX only for a specific group.
  1191. * We always need to use 4 new registers for the inputs/outputs because
  1192. * the implicitly calculated derivatives must be correct.
  1193. */
  1194. static void
  1195. bld_texbias_sequence(struct bld_context *bld,
  1196. struct nv_value *dst[4], struct nv_value *t[4], int arg,
  1197. int tic, int tsc, int cube)
  1198. {
  1199. struct nv_instruction *sel, *tex;
  1200. struct nv_value *bit[4], *cr[4], *res[4][4], *val;
  1201. int l, c;
  1202. const ubyte cc[4] = { NV_CC_EQ, NV_CC_S, NV_CC_C, NV_CC_O };
  1203. for (l = 0; l < 4; ++l) {
  1204. bit[l] = bld_load_imm_u32(bld, 1 << l);
  1205. val = bld_quadop(bld, QOP(SUBR, SUBR, SUBR, SUBR),
  1206. t[arg - 1], l, t[arg - 1], TRUE);
  1207. cr[l] = bld_cmov(bld, bit[l], NV_CC_EQ, val->insn->flags_def);
  1208. cr[l]->reg.file = NV_FILE_FLAGS;
  1209. SET_TYPE(cr[l], NV_TYPE_U16);
  1210. }
  1211. sel = new_instruction(bld->pc, NV_OP_SELECT);
  1212. for (l = 0; l < 4; ++l)
  1213. sel->src[l] = new_ref(bld->pc, cr[l]);
  1214. bld_def(sel, 0, new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16));
  1215. for (l = 0; l < 4; ++l) {
  1216. tex = emit_tex(bld, NV_OP_TXB, dst, t, arg, tic, tsc, cube);
  1217. tex->cc = cc[l];
  1218. tex->flags_src = new_ref(bld->pc, sel->def[0]);
  1219. for (c = 0; c < 4; ++c)
  1220. res[l][c] = tex->def[c];
  1221. }
  1222. for (l = 0; l < 4; ++l)
  1223. for (c = 0; c < 4; ++c)
  1224. res[l][c] = bld_cmov(bld, res[l][c], cc[l], sel->def[0]);
  1225. for (c = 0; c < 4; ++c) {
  1226. sel = new_instruction(bld->pc, NV_OP_SELECT);
  1227. for (l = 0; l < 4; ++l)
  1228. sel->src[l] = new_ref(bld->pc, res[l][c]);
  1229. bld_def(sel, 0, (dst[c] = new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32)));
  1230. }
  1231. }
  1232. static boolean
  1233. bld_is_constant(struct nv_value *val)
  1234. {
  1235. if (val->reg.file == NV_FILE_IMM)
  1236. return TRUE;
  1237. return val->insn && nvcg_find_constant(val->insn->src[0]);
  1238. }
  1239. static void
  1240. bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
  1241. const struct tgsi_full_instruction *insn)
  1242. {
  1243. struct nv_value *t[4], *s[3];
  1244. uint opcode = translate_opcode(insn->Instruction.Opcode);
  1245. int arg, dim, c;
  1246. const int tic = insn->Src[1].Register.Index;
  1247. const int tsc = tic;
  1248. const int cube = (insn->Texture.Texture == TGSI_TEXTURE_CUBE) ? 1 : 0;
  1249. get_tex_dim(insn, &dim, &arg);
  1250. if (!cube && insn->Instruction.Opcode == TGSI_OPCODE_TXP)
  1251. load_proj_tex_coords(bld, t, dim, arg, insn);
  1252. else {
  1253. for (c = 0; c < dim; ++c)
  1254. t[c] = emit_fetch(bld, insn, 0, c);
  1255. if (arg != dim)
  1256. t[dim] = emit_fetch(bld, insn, 0, 2);
  1257. }
  1258. if (cube) {
  1259. assert(dim >= 3);
  1260. for (c = 0; c < 3; ++c)
  1261. s[c] = bld_insn_1(bld, NV_OP_ABS, t[c]);
  1262. s[0] = bld_insn_2(bld, NV_OP_MAX, s[0], s[1]);
  1263. s[0] = bld_insn_2(bld, NV_OP_MAX, s[0], s[2]);
  1264. s[0] = bld_insn_1(bld, NV_OP_RCP, s[0]);
  1265. for (c = 0; c < 3; ++c)
  1266. t[c] = bld_insn_2(bld, NV_OP_MUL, t[c], s[0]);
  1267. }
  1268. if (opcode == NV_OP_TXB || opcode == NV_OP_TXL) {
  1269. t[arg++] = emit_fetch(bld, insn, 0, 3);
  1270. if ((bld->ti->p->type == PIPE_SHADER_FRAGMENT) &&
  1271. !bld_is_constant(t[arg - 1])) {
  1272. if (opcode == NV_OP_TXB)
  1273. bld_texbias_sequence(bld, dst0, t, arg, tic, tsc, cube);
  1274. else
  1275. bld_texlod_sequence(bld, dst0, t, arg, tic, tsc, cube);
  1276. return;
  1277. }
  1278. }
  1279. emit_tex(bld, opcode, dst0, t, arg, tic, tsc, cube);
  1280. }
  1281. static INLINE struct nv_value *
  1282. bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
  1283. int n)
  1284. {
  1285. struct nv_value *dotp, *src0, *src1;
  1286. int c;
  1287. src0 = emit_fetch(bld, insn, 0, 0);
  1288. src1 = emit_fetch(bld, insn, 1, 0);
  1289. dotp = bld_insn_2(bld, NV_OP_MUL, src0, src1);
  1290. for (c = 1; c < n; ++c) {
  1291. src0 = emit_fetch(bld, insn, 0, c);
  1292. src1 = emit_fetch(bld, insn, 1, c);
  1293. dotp = bld_insn_3(bld, NV_OP_MAD, src0, src1, dotp);
  1294. }
  1295. return dotp;
  1296. }
  1297. #define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
  1298. for (chan = 0; chan < 4; ++chan) \
  1299. if ((inst)->Dst[0].Register.WriteMask & (1 << chan))
  1300. static void
  1301. bld_instruction(struct bld_context *bld,
  1302. const struct tgsi_full_instruction *insn)
  1303. {
  1304. struct nv50_program *prog = bld->ti->p;
  1305. const struct tgsi_full_dst_register *dreg = &insn->Dst[0];
  1306. struct nv_value *src0;
  1307. struct nv_value *src1;
  1308. struct nv_value *src2;
  1309. struct nv_value *dst0[4] = { 0 };
  1310. struct nv_value *temp;
  1311. int c;
  1312. uint opcode = translate_opcode(insn->Instruction.Opcode);
  1313. #if NV50_DEBUG & NV50_DEBUG_PROG_IR
  1314. debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
  1315. #endif
  1316. switch (insn->Instruction.Opcode) {
  1317. case TGSI_OPCODE_ADD:
  1318. case TGSI_OPCODE_MAX:
  1319. case TGSI_OPCODE_MIN:
  1320. case TGSI_OPCODE_MUL:
  1321. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1322. src0 = emit_fetch(bld, insn, 0, c);
  1323. src1 = emit_fetch(bld, insn, 1, c);
  1324. dst0[c] = bld_insn_2(bld, opcode, src0, src1);
  1325. }
  1326. break;
  1327. case TGSI_OPCODE_ARL:
  1328. src1 = bld_imm_u32(bld, 4);
  1329. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1330. src0 = emit_fetch(bld, insn, 0, c);
  1331. temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
  1332. SET_TYPE(temp, NV_TYPE_S32);
  1333. dst0[c] = bld_insn_2(bld, NV_OP_SHL, temp, src1);
  1334. }
  1335. break;
  1336. case TGSI_OPCODE_CMP:
  1337. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1338. src0 = emit_fetch(bld, insn, 0, c);
  1339. src1 = emit_fetch(bld, insn, 1, c);
  1340. src2 = emit_fetch(bld, insn, 2, c);
  1341. src0 = bld_predicate(bld, src0, FALSE);
  1342. src1 = bld_insn_1(bld, NV_OP_MOV, src1);
  1343. src1->insn->flags_src = new_ref(bld->pc, src0);
  1344. src1->insn->cc = NV_CC_LT;
  1345. src2 = bld_insn_1(bld, NV_OP_MOV, src2);
  1346. src2->insn->flags_src = new_ref(bld->pc, src0);
  1347. src2->insn->cc = NV_CC_GE;
  1348. dst0[c] = bld_insn_2(bld, NV_OP_SELECT, src1, src2);
  1349. }
  1350. break;
  1351. case TGSI_OPCODE_COS:
  1352. case TGSI_OPCODE_SIN:
  1353. src0 = emit_fetch(bld, insn, 0, 0);
  1354. temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
  1355. if (insn->Dst[0].Register.WriteMask & 7)
  1356. temp = bld_insn_1(bld, opcode, temp);
  1357. for (c = 0; c < 3; ++c)
  1358. if (insn->Dst[0].Register.WriteMask & (1 << c))
  1359. dst0[c] = temp;
  1360. if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
  1361. break;
  1362. src0 = emit_fetch(bld, insn, 0, 3);
  1363. temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
  1364. dst0[3] = bld_insn_1(bld, opcode, temp);
  1365. break;
  1366. case TGSI_OPCODE_DP2:
  1367. temp = bld_dot(bld, insn, 2);
  1368. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1369. dst0[c] = temp;
  1370. break;
  1371. case TGSI_OPCODE_DP3:
  1372. temp = bld_dot(bld, insn, 3);
  1373. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1374. dst0[c] = temp;
  1375. break;
  1376. case TGSI_OPCODE_DP4:
  1377. temp = bld_dot(bld, insn, 4);
  1378. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1379. dst0[c] = temp;
  1380. break;
  1381. case TGSI_OPCODE_DPH:
  1382. src0 = bld_dot(bld, insn, 3);
  1383. src1 = emit_fetch(bld, insn, 1, 3);
  1384. temp = bld_insn_2(bld, NV_OP_ADD, src0, src1);
  1385. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1386. dst0[c] = temp;
  1387. break;
  1388. case TGSI_OPCODE_DST:
  1389. if (insn->Dst[0].Register.WriteMask & 1)
  1390. dst0[0] = bld_imm_f32(bld, 1.0f);
  1391. if (insn->Dst[0].Register.WriteMask & 2) {
  1392. src0 = emit_fetch(bld, insn, 0, 1);
  1393. src1 = emit_fetch(bld, insn, 1, 1);
  1394. dst0[1] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
  1395. }
  1396. if (insn->Dst[0].Register.WriteMask & 4)
  1397. dst0[2] = emit_fetch(bld, insn, 0, 2);
  1398. if (insn->Dst[0].Register.WriteMask & 8)
  1399. dst0[3] = emit_fetch(bld, insn, 1, 3);
  1400. break;
  1401. case TGSI_OPCODE_EXP:
  1402. src0 = emit_fetch(bld, insn, 0, 0);
  1403. temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
  1404. if (insn->Dst[0].Register.WriteMask & 2)
  1405. dst0[1] = bld_insn_2(bld, NV_OP_SUB, src0, temp);
  1406. if (insn->Dst[0].Register.WriteMask & 1) {
  1407. temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
  1408. dst0[0] = bld_insn_1(bld, NV_OP_EX2, temp);
  1409. }
  1410. if (insn->Dst[0].Register.WriteMask & 4) {
  1411. temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
  1412. dst0[2] = bld_insn_1(bld, NV_OP_EX2, temp);
  1413. }
  1414. if (insn->Dst[0].Register.WriteMask & 8)
  1415. dst0[3] = bld_imm_f32(bld, 1.0f);
  1416. break;
  1417. case TGSI_OPCODE_EX2:
  1418. src0 = emit_fetch(bld, insn, 0, 0);
  1419. temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
  1420. temp = bld_insn_1(bld, NV_OP_EX2, temp);
  1421. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1422. dst0[c] = temp;
  1423. break;
  1424. case TGSI_OPCODE_FRC:
  1425. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1426. src0 = emit_fetch(bld, insn, 0, c);
  1427. dst0[c] = bld_insn_1(bld, NV_OP_FLOOR, src0);
  1428. dst0[c] = bld_insn_2(bld, NV_OP_SUB, src0, dst0[c]);
  1429. }
  1430. break;
  1431. case TGSI_OPCODE_KIL:
  1432. for (c = 0; c < 4; ++c) {
  1433. src0 = emit_fetch(bld, insn, 0, c);
  1434. bld_kil(bld, src0);
  1435. }
  1436. break;
  1437. case TGSI_OPCODE_KILP:
  1438. (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
  1439. break;
  1440. case TGSI_OPCODE_IF:
  1441. {
  1442. struct nv_basic_block *b = new_basic_block(bld->pc);
  1443. assert(bld->cond_lvl < BLD_MAX_COND_NESTING);
  1444. nvbb_attach_block(bld->pc->current_block, b, CFG_EDGE_FORWARD);
  1445. bld->join_bb[bld->cond_lvl] = bld->pc->current_block;
  1446. bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
  1447. src1 = bld_predicate(bld, emit_fetch(bld, insn, 0, 0), TRUE);
  1448. bld_flow(bld, NV_OP_BRA, NV_CC_EQ, src1, NULL, (bld->cond_lvl == 0));
  1449. ++bld->cond_lvl;
  1450. bld_new_block(bld, b);
  1451. }
  1452. break;
  1453. case TGSI_OPCODE_ELSE:
  1454. {
  1455. struct nv_basic_block *b = new_basic_block(bld->pc);
  1456. --bld->cond_lvl;
  1457. nvbb_attach_block(bld->join_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
  1458. bld->cond_bb[bld->cond_lvl]->exit->target = b;
  1459. bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
  1460. new_instruction(bld->pc, NV_OP_BRA)->is_terminator = 1;
  1461. ++bld->cond_lvl;
  1462. bld_new_block(bld, b);
  1463. }
  1464. break;
  1465. case TGSI_OPCODE_ENDIF:
  1466. {
  1467. struct nv_basic_block *b = new_basic_block(bld->pc);
  1468. if (!nvbb_is_terminated(bld->pc->current_block))
  1469. bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, b, FALSE);
  1470. --bld->cond_lvl;
  1471. nvbb_attach_block(bld->pc->current_block, b, bld->out_kind);
  1472. nvbb_attach_block(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
  1473. bld->cond_bb[bld->cond_lvl]->exit->target = b;
  1474. bld_new_block(bld, b);
  1475. if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
  1476. bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
  1477. new_instruction(bld->pc, NV_OP_JOIN)->is_join = TRUE;
  1478. }
  1479. }
  1480. break;
  1481. case TGSI_OPCODE_BGNLOOP:
  1482. {
  1483. struct nv_basic_block *bl = new_basic_block(bld->pc);
  1484. struct nv_basic_block *bb = new_basic_block(bld->pc);
  1485. assert(bld->loop_lvl < BLD_MAX_LOOP_NESTING);
  1486. bld->loop_bb[bld->loop_lvl] = bl;
  1487. bld->brkt_bb[bld->loop_lvl] = bb;
  1488. bld_flow(bld, NV_OP_BREAKADDR, NV_CC_TR, NULL, bb, FALSE);
  1489. nvbb_attach_block(bld->pc->current_block, bl, CFG_EDGE_LOOP_ENTER);
  1490. bld_new_block(bld, bld->loop_bb[bld->loop_lvl++]);
  1491. if (bld->loop_lvl == bld->pc->loop_nesting_bound)
  1492. bld->pc->loop_nesting_bound++;
  1493. bld_clear_def_use(&bld->tvs[0][0], BLD_MAX_TEMPS, bld->loop_lvl);
  1494. bld_clear_def_use(&bld->avs[0][0], BLD_MAX_ADDRS, bld->loop_lvl);
  1495. bld_clear_def_use(&bld->pvs[0][0], BLD_MAX_PREDS, bld->loop_lvl);
  1496. }
  1497. break;
  1498. case TGSI_OPCODE_BRK:
  1499. {
  1500. struct nv_basic_block *bb = bld->brkt_bb[bld->loop_lvl - 1];
  1501. bld_flow(bld, NV_OP_BREAK, NV_CC_TR, NULL, bb, FALSE);
  1502. if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
  1503. nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
  1504. bld->out_kind = CFG_EDGE_FAKE;
  1505. }
  1506. break;
  1507. case TGSI_OPCODE_CONT:
  1508. {
  1509. struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
  1510. bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
  1511. nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
  1512. if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
  1513. bld->join_bb[bld->cond_lvl - 1] = NULL;
  1514. nv_nvi_delete(bb->exit->prev);
  1515. }
  1516. bld->out_kind = CFG_EDGE_FAKE;
  1517. }
  1518. break;
  1519. case TGSI_OPCODE_ENDLOOP:
  1520. {
  1521. struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
  1522. if (!nvbb_is_terminated(bld->pc->current_block))
  1523. bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
  1524. nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
  1525. bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
  1526. bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
  1527. }
  1528. break;
  1529. case TGSI_OPCODE_ABS:
  1530. case TGSI_OPCODE_CEIL:
  1531. case TGSI_OPCODE_FLR:
  1532. case TGSI_OPCODE_TRUNC:
  1533. case TGSI_OPCODE_ROUND:
  1534. case TGSI_OPCODE_DDX:
  1535. case TGSI_OPCODE_DDY:
  1536. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1537. src0 = emit_fetch(bld, insn, 0, c);
  1538. dst0[c] = bld_insn_1(bld, opcode, src0);
  1539. }
  1540. break;
  1541. case TGSI_OPCODE_LIT:
  1542. bld_lit(bld, dst0, insn);
  1543. break;
  1544. case TGSI_OPCODE_LRP:
  1545. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1546. src0 = emit_fetch(bld, insn, 0, c);
  1547. src1 = emit_fetch(bld, insn, 1, c);
  1548. src2 = emit_fetch(bld, insn, 2, c);
  1549. dst0[c] = bld_insn_2(bld, NV_OP_SUB, src1, src2);
  1550. dst0[c] = bld_insn_3(bld, NV_OP_MAD, dst0[c], src0, src2);
  1551. }
  1552. break;
  1553. case TGSI_OPCODE_MOV:
  1554. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1555. dst0[c] = emit_fetch(bld, insn, 0, c);
  1556. break;
  1557. case TGSI_OPCODE_MAD:
  1558. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1559. src0 = emit_fetch(bld, insn, 0, c);
  1560. src1 = emit_fetch(bld, insn, 1, c);
  1561. src2 = emit_fetch(bld, insn, 2, c);
  1562. dst0[c] = bld_insn_3(bld, opcode, src0, src1, src2);
  1563. }
  1564. break;
  1565. case TGSI_OPCODE_POW:
  1566. src0 = emit_fetch(bld, insn, 0, 0);
  1567. src1 = emit_fetch(bld, insn, 1, 0);
  1568. temp = bld_pow(bld, src0, src1);
  1569. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1570. dst0[c] = temp;
  1571. break;
  1572. case TGSI_OPCODE_LOG:
  1573. src0 = emit_fetch(bld, insn, 0, 0);
  1574. src0 = bld_insn_1(bld, NV_OP_ABS, src0);
  1575. temp = bld_insn_1(bld, NV_OP_LG2, src0);
  1576. dst0[2] = temp;
  1577. if (insn->Dst[0].Register.WriteMask & 3) {
  1578. temp = bld_insn_1(bld, NV_OP_FLOOR, temp);
  1579. dst0[0] = temp;
  1580. }
  1581. if (insn->Dst[0].Register.WriteMask & 2) {
  1582. temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
  1583. temp = bld_insn_1(bld, NV_OP_EX2, temp);
  1584. temp = bld_insn_1(bld, NV_OP_RCP, temp);
  1585. dst0[1] = bld_insn_2(bld, NV_OP_MUL, src0, temp);
  1586. }
  1587. if (insn->Dst[0].Register.WriteMask & 8)
  1588. dst0[3] = bld_imm_f32(bld, 1.0f);
  1589. break;
  1590. case TGSI_OPCODE_RCP:
  1591. case TGSI_OPCODE_LG2:
  1592. src0 = emit_fetch(bld, insn, 0, 0);
  1593. temp = bld_insn_1(bld, opcode, src0);
  1594. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1595. dst0[c] = temp;
  1596. break;
  1597. case TGSI_OPCODE_RSQ:
  1598. src0 = emit_fetch(bld, insn, 0, 0);
  1599. temp = bld_insn_1(bld, NV_OP_ABS, src0);
  1600. temp = bld_insn_1(bld, NV_OP_RSQ, temp);
  1601. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1602. dst0[c] = temp;
  1603. break;
  1604. case TGSI_OPCODE_SLT:
  1605. case TGSI_OPCODE_SGE:
  1606. case TGSI_OPCODE_SEQ:
  1607. case TGSI_OPCODE_SGT:
  1608. case TGSI_OPCODE_SLE:
  1609. case TGSI_OPCODE_SNE:
  1610. case TGSI_OPCODE_ISLT:
  1611. case TGSI_OPCODE_ISGE:
  1612. case TGSI_OPCODE_USEQ:
  1613. case TGSI_OPCODE_USGE:
  1614. case TGSI_OPCODE_USLT:
  1615. case TGSI_OPCODE_USNE:
  1616. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1617. src0 = emit_fetch(bld, insn, 0, c);
  1618. src1 = emit_fetch(bld, insn, 1, c);
  1619. dst0[c] = bld_insn_2(bld, NV_OP_SET, src0, src1);
  1620. dst0[c]->insn->set_cond = translate_setcc(insn->Instruction.Opcode);
  1621. SET_TYPE(dst0[c], infer_dst_type(insn->Instruction.Opcode));
  1622. dst0[c]->insn->src[0]->typecast =
  1623. dst0[c]->insn->src[1]->typecast =
  1624. infer_src_type(insn->Instruction.Opcode);
  1625. if (dst0[c]->reg.type != NV_TYPE_F32)
  1626. break;
  1627. dst0[c]->reg.as_type = NV_TYPE_S32;
  1628. dst0[c] = bld_insn_1(bld, NV_OP_ABS, dst0[c]);
  1629. dst0[c] = bld_insn_1(bld, NV_OP_CVT, dst0[c]);
  1630. SET_TYPE(dst0[c], NV_TYPE_F32);
  1631. }
  1632. break;
  1633. case TGSI_OPCODE_SCS:
  1634. if (insn->Dst[0].Register.WriteMask & 0x3) {
  1635. src0 = emit_fetch(bld, insn, 0, 0);
  1636. temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
  1637. if (insn->Dst[0].Register.WriteMask & 0x1)
  1638. dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
  1639. if (insn->Dst[0].Register.WriteMask & 0x2)
  1640. dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
  1641. }
  1642. if (insn->Dst[0].Register.WriteMask & 0x4)
  1643. dst0[2] = bld_imm_f32(bld, 0.0f);
  1644. if (insn->Dst[0].Register.WriteMask & 0x8)
  1645. dst0[3] = bld_imm_f32(bld, 1.0f);
  1646. break;
  1647. case TGSI_OPCODE_SSG:
  1648. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1649. src0 = emit_fetch(bld, insn, 0, c);
  1650. src1 = bld_predicate(bld, src0, FALSE);
  1651. temp = bld_insn_2(bld, NV_OP_AND, src0, bld_imm_u32(bld, 0x80000000));
  1652. temp = bld_insn_2(bld, NV_OP_OR, temp, bld_imm_f32(bld, 1.0f));
  1653. dst0[c] = bld_insn_2(bld, NV_OP_XOR, temp, temp);
  1654. dst0[c]->insn->cc = NV_CC_EQ;
  1655. nv_reference(bld->pc, &dst0[c]->insn->flags_src, src1);
  1656. dst0[c] = bld_insn_2(bld, NV_OP_SELECT, dst0[c], temp);
  1657. }
  1658. break;
  1659. case TGSI_OPCODE_SUB:
  1660. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1661. src0 = emit_fetch(bld, insn, 0, c);
  1662. src1 = emit_fetch(bld, insn, 1, c);
  1663. dst0[c] = bld_insn_2(bld, NV_OP_ADD, src0, src1);
  1664. dst0[c]->insn->src[1]->mod ^= NV_MOD_NEG;
  1665. }
  1666. break;
  1667. case TGSI_OPCODE_TEX:
  1668. case TGSI_OPCODE_TXB:
  1669. case TGSI_OPCODE_TXL:
  1670. case TGSI_OPCODE_TXP:
  1671. case TGSI_OPCODE_TXD: // fake
  1672. bld_tex(bld, dst0, insn);
  1673. break;
  1674. case TGSI_OPCODE_XPD:
  1675. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
  1676. if (c == 3) {
  1677. dst0[3] = bld_imm_f32(bld, 1.0f);
  1678. break;
  1679. }
  1680. src0 = emit_fetch(bld, insn, 1, (c + 1) % 3);
  1681. src1 = emit_fetch(bld, insn, 0, (c + 2) % 3);
  1682. dst0[c] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
  1683. src0 = emit_fetch(bld, insn, 0, (c + 1) % 3);
  1684. src1 = emit_fetch(bld, insn, 1, (c + 2) % 3);
  1685. dst0[c] = bld_insn_3(bld, NV_OP_MAD, src0, src1, dst0[c]);
  1686. dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
  1687. }
  1688. break;
  1689. case TGSI_OPCODE_RET:
  1690. (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
  1691. break;
  1692. case TGSI_OPCODE_END:
  1693. if (bld->ti->p->type == PIPE_SHADER_FRAGMENT)
  1694. bld_export_outputs(bld);
  1695. break;
  1696. default:
  1697. NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
  1698. abort();
  1699. break;
  1700. }
  1701. FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
  1702. emit_store(bld, insn, c, dst0[c]);
  1703. if (prog->type == PIPE_SHADER_VERTEX && prog->vp.clpd_nr &&
  1704. dreg->Register.File == TGSI_FILE_OUTPUT && !dreg->Register.Indirect &&
  1705. prog->out[dreg->Register.Index].sn == TGSI_SEMANTIC_POSITION) {
  1706. int p;
  1707. for (p = 0; p < prog->vp.clpd_nr; p++) {
  1708. struct nv_value *clipd = NULL;
  1709. for (c = 0; c < 4; c++) {
  1710. temp = new_value(bld->pc, NV_FILE_MEM_C(15), NV_TYPE_F32);
  1711. temp->reg.id = p * 4 + c;
  1712. temp = bld_insn_1(bld, NV_OP_LDA, temp);
  1713. clipd = clipd ?
  1714. bld_insn_3(bld, NV_OP_MAD, dst0[c], temp, clipd) :
  1715. bld_insn_2(bld, NV_OP_MUL, dst0[c], temp);
  1716. }
  1717. temp = bld_insn_1(bld, NV_OP_MOV, clipd);
  1718. temp->reg.file = NV_FILE_OUT;
  1719. temp->reg.id = bld->ti->p->vp.clpd + p;
  1720. temp->insn->fixed = 1;
  1721. }
  1722. }
  1723. }
  1724. static INLINE void
  1725. bld_free_value_trackers(struct bld_value_stack *base, int n)
  1726. {
  1727. int i, c;
  1728. for (i = 0; i < n; ++i)
  1729. for (c = 0; c < 4; ++c)
  1730. if (base[i * 4 + c].body)
  1731. FREE(base[i * 4 + c].body);
  1732. }
  1733. int
  1734. nv50_tgsi_to_nc(struct nv_pc *pc, struct nv50_translation_info *ti)
  1735. {
  1736. struct bld_context *bld = CALLOC_STRUCT(bld_context);
  1737. int c;
  1738. unsigned ip;
  1739. pc->root[0] = pc->current_block = new_basic_block(pc);
  1740. bld->pc = pc;
  1741. bld->ti = ti;
  1742. pc->loop_nesting_bound = 1;
  1743. c = util_bitcount(bld->ti->p->fp.interp >> 24);
  1744. if (c && ti->p->type == PIPE_SHADER_FRAGMENT) {
  1745. bld->frgcrd[3] = new_value(pc, NV_FILE_MEM_V, NV_TYPE_F32);
  1746. bld->frgcrd[3]->reg.id = c - 1;
  1747. bld->frgcrd[3] = bld_insn_1(bld, NV_OP_LINTERP, bld->frgcrd[3]);
  1748. bld->frgcrd[3] = bld_insn_1(bld, NV_OP_RCP, bld->frgcrd[3]);
  1749. }
  1750. for (ip = 0; ip < ti->inst_nr; ++ip)
  1751. bld_instruction(bld, &ti->insns[ip]);
  1752. bld_free_value_trackers(&bld->tvs[0][0], BLD_MAX_TEMPS);
  1753. bld_free_value_trackers(&bld->avs[0][0], BLD_MAX_ADDRS);
  1754. bld_free_value_trackers(&bld->pvs[0][0], BLD_MAX_PREDS);
  1755. bld_free_value_trackers(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
  1756. FREE(bld);
  1757. return 0;
  1758. }
  1759. /* If a variable is assigned in a loop, replace all references to the value
  1760. * from outside the loop with a phi value.
  1761. */
  1762. static void
  1763. bld_replace_value(struct nv_pc *pc, struct nv_basic_block *b,
  1764. struct nv_value *old_val,
  1765. struct nv_value *new_val)
  1766. {
  1767. struct nv_instruction *nvi;
  1768. for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
  1769. int s;
  1770. for (s = 0; s < 5; ++s) {
  1771. if (!nvi->src[s])
  1772. continue;
  1773. if (nvi->src[s]->value == old_val)
  1774. nv_reference(pc, &nvi->src[s], new_val);
  1775. }
  1776. if (nvi->flags_src && nvi->flags_src->value == old_val)
  1777. nv_reference(pc, &nvi->flags_src, new_val);
  1778. }
  1779. b->pass_seq = pc->pass_seq;
  1780. if (b->out[0] && b->out[0]->pass_seq < pc->pass_seq)
  1781. bld_replace_value(pc, b->out[0], old_val, new_val);
  1782. if (b->out[1] && b->out[1]->pass_seq < pc->pass_seq)
  1783. bld_replace_value(pc, b->out[1], old_val, new_val);
  1784. }