Clone of mesa.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

nvc0_query.c 30KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. /*
  2. * Copyright 2011 Nouveau Project
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  18. * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
  19. * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. * SOFTWARE.
  21. *
  22. * Authors: Christoph Bumiller
  23. */
  24. #define NVC0_PUSH_EXPLICIT_SPACE_CHECKING
  25. #include "nvc0_context.h"
  26. #include "nouveau/nv_object.xml.h"
  27. #include "nve4_compute.xml.h"
  28. #define NVC0_QUERY_STATE_READY 0
  29. #define NVC0_QUERY_STATE_ACTIVE 1
  30. #define NVC0_QUERY_STATE_ENDED 2
  31. #define NVC0_QUERY_STATE_FLUSHED 3
  32. struct nvc0_query {
  33. uint32_t *data;
  34. uint16_t type;
  35. uint16_t index;
  36. int8_t ctr[4];
  37. uint32_t sequence;
  38. struct nouveau_bo *bo;
  39. uint32_t base;
  40. uint32_t offset; /* base + i * rotate */
  41. uint8_t state;
  42. boolean is64bit;
  43. uint8_t rotate;
  44. int nesting; /* only used for occlusion queries */
  45. struct nouveau_mm_allocation *mm;
  46. };
  47. #define NVC0_QUERY_ALLOC_SPACE 256
  48. static void nve4_mp_pm_query_begin(struct nvc0_context *, struct nvc0_query *);
  49. static void nve4_mp_pm_query_end(struct nvc0_context *, struct nvc0_query *);
  50. static boolean nve4_mp_pm_query_result(struct nvc0_context *,
  51. struct nvc0_query *, void *, boolean);
  52. static INLINE struct nvc0_query *
  53. nvc0_query(struct pipe_query *pipe)
  54. {
  55. return (struct nvc0_query *)pipe;
  56. }
  57. static boolean
  58. nvc0_query_allocate(struct nvc0_context *nvc0, struct nvc0_query *q, int size)
  59. {
  60. struct nvc0_screen *screen = nvc0->screen;
  61. int ret;
  62. if (q->bo) {
  63. nouveau_bo_ref(NULL, &q->bo);
  64. if (q->mm) {
  65. if (q->state == NVC0_QUERY_STATE_READY)
  66. nouveau_mm_free(q->mm);
  67. else
  68. nouveau_fence_work(screen->base.fence.current,
  69. nouveau_mm_free_work, q->mm);
  70. }
  71. }
  72. if (size) {
  73. q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
  74. if (!q->bo)
  75. return FALSE;
  76. q->offset = q->base;
  77. ret = nouveau_bo_map(q->bo, 0, screen->base.client);
  78. if (ret) {
  79. nvc0_query_allocate(nvc0, q, 0);
  80. return FALSE;
  81. }
  82. q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base);
  83. }
  84. return TRUE;
  85. }
  86. static void
  87. nvc0_query_destroy(struct pipe_context *pipe, struct pipe_query *pq)
  88. {
  89. nvc0_query_allocate(nvc0_context(pipe), nvc0_query(pq), 0);
  90. FREE(nvc0_query(pq));
  91. }
  92. static struct pipe_query *
  93. nvc0_query_create(struct pipe_context *pipe, unsigned type)
  94. {
  95. struct nvc0_context *nvc0 = nvc0_context(pipe);
  96. struct nvc0_query *q;
  97. unsigned space = NVC0_QUERY_ALLOC_SPACE;
  98. q = CALLOC_STRUCT(nvc0_query);
  99. if (!q)
  100. return NULL;
  101. switch (type) {
  102. case PIPE_QUERY_OCCLUSION_COUNTER:
  103. case PIPE_QUERY_OCCLUSION_PREDICATE:
  104. q->rotate = 32;
  105. space = NVC0_QUERY_ALLOC_SPACE;
  106. break;
  107. case PIPE_QUERY_PIPELINE_STATISTICS:
  108. q->is64bit = TRUE;
  109. space = 512;
  110. break;
  111. case PIPE_QUERY_SO_STATISTICS:
  112. case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
  113. q->is64bit = TRUE;
  114. space = 64;
  115. break;
  116. case PIPE_QUERY_TIME_ELAPSED:
  117. case PIPE_QUERY_TIMESTAMP:
  118. case PIPE_QUERY_TIMESTAMP_DISJOINT:
  119. case PIPE_QUERY_GPU_FINISHED:
  120. case PIPE_QUERY_PRIMITIVES_GENERATED:
  121. case PIPE_QUERY_PRIMITIVES_EMITTED:
  122. space = 32;
  123. break;
  124. case NVC0_QUERY_TFB_BUFFER_OFFSET:
  125. space = 16;
  126. break;
  127. default:
  128. if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS &&
  129. nvc0->screen->base.device->drm_version >= 0x01000101) {
  130. if (type >= NVE4_PM_QUERY(0) &&
  131. type <= NVE4_PM_QUERY_MAX) {
  132. /* 8 counters per MP + clock */
  133. space = 12 * nvc0->screen->mp_count * sizeof(uint32_t);
  134. break;
  135. }
  136. }
  137. debug_printf("invalid query type: %u\n", type);
  138. FREE(q);
  139. return NULL;
  140. }
  141. if (!nvc0_query_allocate(nvc0, q, space)) {
  142. FREE(q);
  143. return NULL;
  144. }
  145. q->type = type;
  146. if (q->rotate) {
  147. /* we advance before query_begin ! */
  148. q->offset -= q->rotate;
  149. q->data -= q->rotate / sizeof(*q->data);
  150. } else
  151. if (!q->is64bit)
  152. q->data[0] = 0; /* initialize sequence */
  153. return (struct pipe_query *)q;
  154. }
  155. static void
  156. nvc0_query_get(struct nouveau_pushbuf *push, struct nvc0_query *q,
  157. unsigned offset, uint32_t get)
  158. {
  159. offset += q->offset;
  160. PUSH_SPACE(push, 5);
  161. PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_WR);
  162. BEGIN_NVC0(push, NVC0_3D(QUERY_ADDRESS_HIGH), 4);
  163. PUSH_DATAh(push, q->bo->offset + offset);
  164. PUSH_DATA (push, q->bo->offset + offset);
  165. PUSH_DATA (push, q->sequence);
  166. PUSH_DATA (push, get);
  167. }
  168. static void
  169. nvc0_query_rotate(struct nvc0_context *nvc0, struct nvc0_query *q)
  170. {
  171. q->offset += q->rotate;
  172. q->data += q->rotate / sizeof(*q->data);
  173. if (q->offset - q->base == NVC0_QUERY_ALLOC_SPACE)
  174. nvc0_query_allocate(nvc0, q, NVC0_QUERY_ALLOC_SPACE);
  175. }
  176. static void
  177. nvc0_query_begin(struct pipe_context *pipe, struct pipe_query *pq)
  178. {
  179. struct nvc0_context *nvc0 = nvc0_context(pipe);
  180. struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  181. struct nvc0_query *q = nvc0_query(pq);
  182. /* For occlusion queries we have to change the storage, because a previous
  183. * query might set the initial render conition to FALSE even *after* we re-
  184. * initialized it to TRUE.
  185. */
  186. if (q->rotate) {
  187. nvc0_query_rotate(nvc0, q);
  188. /* XXX: can we do this with the GPU, and sync with respect to a previous
  189. * query ?
  190. */
  191. q->data[0] = q->sequence; /* initialize sequence */
  192. q->data[1] = 1; /* initial render condition = TRUE */
  193. q->data[4] = q->sequence + 1; /* for comparison COND_MODE */
  194. q->data[5] = 0;
  195. }
  196. q->sequence++;
  197. switch (q->type) {
  198. case PIPE_QUERY_OCCLUSION_COUNTER:
  199. case PIPE_QUERY_OCCLUSION_PREDICATE:
  200. q->nesting = nvc0->screen->num_occlusion_queries_active++;
  201. if (q->nesting) {
  202. nvc0_query_get(push, q, 0x10, 0x0100f002);
  203. } else {
  204. PUSH_SPACE(push, 3);
  205. BEGIN_NVC0(push, NVC0_3D(COUNTER_RESET), 1);
  206. PUSH_DATA (push, NVC0_3D_COUNTER_RESET_SAMPLECNT);
  207. IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 1);
  208. }
  209. break;
  210. case PIPE_QUERY_PRIMITIVES_GENERATED:
  211. nvc0_query_get(push, q, 0x10, 0x06805002 | (q->index << 5));
  212. break;
  213. case PIPE_QUERY_PRIMITIVES_EMITTED:
  214. nvc0_query_get(push, q, 0x10, 0x05805002 | (q->index << 5));
  215. break;
  216. case PIPE_QUERY_SO_STATISTICS:
  217. nvc0_query_get(push, q, 0x20, 0x05805002 | (q->index << 5));
  218. nvc0_query_get(push, q, 0x30, 0x06805002 | (q->index << 5));
  219. break;
  220. case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
  221. nvc0_query_get(push, q, 0x10, 0x03005002 | (q->index << 5));
  222. break;
  223. case PIPE_QUERY_TIMESTAMP_DISJOINT:
  224. case PIPE_QUERY_TIME_ELAPSED:
  225. nvc0_query_get(push, q, 0x10, 0x00005002);
  226. break;
  227. case PIPE_QUERY_PIPELINE_STATISTICS:
  228. nvc0_query_get(push, q, 0xc0 + 0x00, 0x00801002); /* VFETCH, VERTICES */
  229. nvc0_query_get(push, q, 0xc0 + 0x10, 0x01801002); /* VFETCH, PRIMS */
  230. nvc0_query_get(push, q, 0xc0 + 0x20, 0x02802002); /* VP, LAUNCHES */
  231. nvc0_query_get(push, q, 0xc0 + 0x30, 0x03806002); /* GP, LAUNCHES */
  232. nvc0_query_get(push, q, 0xc0 + 0x40, 0x04806002); /* GP, PRIMS_OUT */
  233. nvc0_query_get(push, q, 0xc0 + 0x50, 0x07804002); /* RAST, PRIMS_IN */
  234. nvc0_query_get(push, q, 0xc0 + 0x60, 0x08804002); /* RAST, PRIMS_OUT */
  235. nvc0_query_get(push, q, 0xc0 + 0x70, 0x0980a002); /* ROP, PIXELS */
  236. nvc0_query_get(push, q, 0xc0 + 0x80, 0x0d808002); /* TCP, LAUNCHES */
  237. nvc0_query_get(push, q, 0xc0 + 0x90, 0x0e809002); /* TEP, LAUNCHES */
  238. break;
  239. default:
  240. if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_MAX)
  241. nve4_mp_pm_query_begin(nvc0, q);
  242. break;
  243. }
  244. q->state = NVC0_QUERY_STATE_ACTIVE;
  245. }
  246. static void
  247. nvc0_query_end(struct pipe_context *pipe, struct pipe_query *pq)
  248. {
  249. struct nvc0_context *nvc0 = nvc0_context(pipe);
  250. struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  251. struct nvc0_query *q = nvc0_query(pq);
  252. if (q->state != NVC0_QUERY_STATE_ACTIVE) {
  253. /* some queries don't require 'begin' to be called (e.g. GPU_FINISHED) */
  254. if (q->rotate)
  255. nvc0_query_rotate(nvc0, q);
  256. q->sequence++;
  257. }
  258. q->state = NVC0_QUERY_STATE_ENDED;
  259. switch (q->type) {
  260. case PIPE_QUERY_OCCLUSION_COUNTER:
  261. case PIPE_QUERY_OCCLUSION_PREDICATE:
  262. nvc0_query_get(push, q, 0, 0x0100f002);
  263. if (--nvc0->screen->num_occlusion_queries_active == 0) {
  264. PUSH_SPACE(push, 1);
  265. IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 0);
  266. }
  267. break;
  268. case PIPE_QUERY_PRIMITIVES_GENERATED:
  269. nvc0_query_get(push, q, 0, 0x06805002 | (q->index << 5));
  270. break;
  271. case PIPE_QUERY_PRIMITIVES_EMITTED:
  272. nvc0_query_get(push, q, 0, 0x05805002 | (q->index << 5));
  273. break;
  274. case PIPE_QUERY_SO_STATISTICS:
  275. nvc0_query_get(push, q, 0x00, 0x05805002 | (q->index << 5));
  276. nvc0_query_get(push, q, 0x10, 0x06805002 | (q->index << 5));
  277. break;
  278. case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
  279. /* TODO: How do we sum over all streams for render condition ? */
  280. /* PRIMS_DROPPED doesn't write sequence, use a ZERO query to sync on */
  281. nvc0_query_get(push, q, 0x00, 0x03005002 | (q->index << 5));
  282. nvc0_query_get(push, q, 0x20, 0x00005002);
  283. break;
  284. case PIPE_QUERY_TIMESTAMP:
  285. case PIPE_QUERY_TIMESTAMP_DISJOINT:
  286. case PIPE_QUERY_TIME_ELAPSED:
  287. nvc0_query_get(push, q, 0, 0x00005002);
  288. break;
  289. case PIPE_QUERY_GPU_FINISHED:
  290. nvc0_query_get(push, q, 0, 0x1000f010);
  291. break;
  292. case PIPE_QUERY_PIPELINE_STATISTICS:
  293. nvc0_query_get(push, q, 0x00, 0x00801002); /* VFETCH, VERTICES */
  294. nvc0_query_get(push, q, 0x10, 0x01801002); /* VFETCH, PRIMS */
  295. nvc0_query_get(push, q, 0x20, 0x02802002); /* VP, LAUNCHES */
  296. nvc0_query_get(push, q, 0x30, 0x03806002); /* GP, LAUNCHES */
  297. nvc0_query_get(push, q, 0x40, 0x04806002); /* GP, PRIMS_OUT */
  298. nvc0_query_get(push, q, 0x50, 0x07804002); /* RAST, PRIMS_IN */
  299. nvc0_query_get(push, q, 0x60, 0x08804002); /* RAST, PRIMS_OUT */
  300. nvc0_query_get(push, q, 0x70, 0x0980a002); /* ROP, PIXELS */
  301. nvc0_query_get(push, q, 0x80, 0x0d808002); /* TCP, LAUNCHES */
  302. nvc0_query_get(push, q, 0x90, 0x0e809002); /* TEP, LAUNCHES */
  303. break;
  304. case NVC0_QUERY_TFB_BUFFER_OFFSET:
  305. /* indexed by TFB buffer instead of by vertex stream */
  306. nvc0_query_get(push, q, 0x00, 0x0d005002 | (q->index << 5));
  307. break;
  308. default:
  309. if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_MAX)
  310. nve4_mp_pm_query_end(nvc0, q);
  311. break;
  312. }
  313. }
  314. static INLINE void
  315. nvc0_query_update(struct nouveau_client *cli, struct nvc0_query *q)
  316. {
  317. if (q->is64bit) {
  318. if (!nouveau_bo_map(q->bo, NOUVEAU_BO_RD | NOUVEAU_BO_NOBLOCK, cli))
  319. q->state = NVC0_QUERY_STATE_READY;
  320. } else {
  321. if (q->data[0] == q->sequence)
  322. q->state = NVC0_QUERY_STATE_READY;
  323. }
  324. }
  325. static boolean
  326. nvc0_query_result(struct pipe_context *pipe, struct pipe_query *pq,
  327. boolean wait, union pipe_query_result *result)
  328. {
  329. struct nvc0_context *nvc0 = nvc0_context(pipe);
  330. struct nvc0_query *q = nvc0_query(pq);
  331. uint64_t *res64 = (uint64_t*)result;
  332. uint32_t *res32 = (uint32_t*)result;
  333. boolean *res8 = (boolean*)result;
  334. uint64_t *data64 = (uint64_t *)q->data;
  335. unsigned i;
  336. if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_MAX)
  337. return nve4_mp_pm_query_result(nvc0, q, result, wait);
  338. if (q->state != NVC0_QUERY_STATE_READY)
  339. nvc0_query_update(nvc0->screen->base.client, q);
  340. if (q->state != NVC0_QUERY_STATE_READY) {
  341. if (!wait) {
  342. if (q->state != NVC0_QUERY_STATE_FLUSHED) {
  343. q->state = NVC0_QUERY_STATE_FLUSHED;
  344. /* flush for silly apps that spin on GL_QUERY_RESULT_AVAILABLE */
  345. PUSH_KICK(nvc0->base.pushbuf);
  346. }
  347. return FALSE;
  348. }
  349. if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->screen->base.client))
  350. return FALSE;
  351. }
  352. q->state = NVC0_QUERY_STATE_READY;
  353. switch (q->type) {
  354. case PIPE_QUERY_GPU_FINISHED:
  355. res8[0] = TRUE;
  356. break;
  357. case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */
  358. res64[0] = q->data[1] - q->data[5];
  359. break;
  360. case PIPE_QUERY_OCCLUSION_PREDICATE:
  361. res8[0] = q->data[1] != q->data[5];
  362. break;
  363. case PIPE_QUERY_PRIMITIVES_GENERATED: /* u64 count, u64 time */
  364. case PIPE_QUERY_PRIMITIVES_EMITTED: /* u64 count, u64 time */
  365. res64[0] = data64[0] - data64[2];
  366. break;
  367. case PIPE_QUERY_SO_STATISTICS:
  368. res64[0] = data64[0] - data64[4];
  369. res64[1] = data64[2] - data64[6];
  370. break;
  371. case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
  372. res8[0] = data64[0] != data64[2];
  373. break;
  374. case PIPE_QUERY_TIMESTAMP:
  375. res64[0] = data64[1];
  376. break;
  377. case PIPE_QUERY_TIMESTAMP_DISJOINT: /* u32 sequence, u32 0, u64 time */
  378. res64[0] = 1000000000;
  379. res8[8] = (data64[1] == data64[3]) ? FALSE : TRUE;
  380. break;
  381. case PIPE_QUERY_TIME_ELAPSED:
  382. res64[0] = data64[1] - data64[3];
  383. break;
  384. case PIPE_QUERY_PIPELINE_STATISTICS:
  385. for (i = 0; i < 10; ++i)
  386. res64[i] = data64[i * 2] - data64[24 + i * 2];
  387. break;
  388. case NVC0_QUERY_TFB_BUFFER_OFFSET:
  389. res32[0] = q->data[1];
  390. break;
  391. default:
  392. assert(0); /* can't happen, we don't create queries with invalid type */
  393. return FALSE;
  394. }
  395. return TRUE;
  396. }
  397. void
  398. nvc0_query_fifo_wait(struct nouveau_pushbuf *push, struct pipe_query *pq)
  399. {
  400. struct nvc0_query *q = nvc0_query(pq);
  401. unsigned offset = q->offset;
  402. if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE) offset += 0x20;
  403. PUSH_SPACE(push, 5);
  404. PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
  405. BEGIN_NVC0(push, SUBC_3D(NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH), 4);
  406. PUSH_DATAh(push, q->bo->offset + offset);
  407. PUSH_DATA (push, q->bo->offset + offset);
  408. PUSH_DATA (push, q->sequence);
  409. PUSH_DATA (push, (1 << 12) |
  410. NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
  411. }
  412. static void
  413. nvc0_render_condition(struct pipe_context *pipe,
  414. struct pipe_query *pq, uint mode)
  415. {
  416. struct nvc0_context *nvc0 = nvc0_context(pipe);
  417. struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  418. struct nvc0_query *q;
  419. uint32_t cond;
  420. boolean negated = FALSE;
  421. boolean wait =
  422. mode != PIPE_RENDER_COND_NO_WAIT &&
  423. mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
  424. nvc0->cond_query = pq;
  425. nvc0->cond_mode = mode;
  426. if (!pq) {
  427. PUSH_SPACE(push, 1);
  428. IMMED_NVC0(push, NVC0_3D(COND_MODE), NVC0_3D_COND_MODE_ALWAYS);
  429. return;
  430. }
  431. q = nvc0_query(pq);
  432. /* NOTE: comparison of 2 queries only works if both have completed */
  433. switch (q->type) {
  434. case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
  435. cond = negated ? NVC0_3D_COND_MODE_EQUAL :
  436. NVC0_3D_COND_MODE_NOT_EQUAL;
  437. wait = TRUE;
  438. break;
  439. case PIPE_QUERY_OCCLUSION_COUNTER:
  440. case PIPE_QUERY_OCCLUSION_PREDICATE:
  441. if (likely(!negated)) {
  442. if (unlikely(q->nesting))
  443. cond = wait ? NVC0_3D_COND_MODE_NOT_EQUAL :
  444. NVC0_3D_COND_MODE_ALWAYS;
  445. else
  446. cond = NVC0_3D_COND_MODE_RES_NON_ZERO;
  447. } else {
  448. cond = wait ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_ALWAYS;
  449. }
  450. break;
  451. default:
  452. assert(!"render condition query not a predicate");
  453. mode = NVC0_3D_COND_MODE_ALWAYS;
  454. break;
  455. }
  456. if (wait)
  457. nvc0_query_fifo_wait(push, pq);
  458. PUSH_SPACE(push, 4);
  459. PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
  460. BEGIN_NVC0(push, NVC0_3D(COND_ADDRESS_HIGH), 3);
  461. PUSH_DATAh(push, q->bo->offset + q->offset);
  462. PUSH_DATA (push, q->bo->offset + q->offset);
  463. PUSH_DATA (push, cond);
  464. }
  465. void
  466. nvc0_query_pushbuf_submit(struct nouveau_pushbuf *push,
  467. struct pipe_query *pq, unsigned result_offset)
  468. {
  469. struct nvc0_query *q = nvc0_query(pq);
  470. #define NVC0_IB_ENTRY_1_NO_PREFETCH (1 << (31 - 8))
  471. nouveau_pushbuf_space(push, 0, 0, 1);
  472. nouveau_pushbuf_data(push, q->bo, q->offset + result_offset, 4 |
  473. NVC0_IB_ENTRY_1_NO_PREFETCH);
  474. }
  475. void
  476. nvc0_so_target_save_offset(struct pipe_context *pipe,
  477. struct pipe_stream_output_target *ptarg,
  478. unsigned index, boolean *serialize)
  479. {
  480. struct nvc0_so_target *targ = nvc0_so_target(ptarg);
  481. if (*serialize) {
  482. *serialize = FALSE;
  483. PUSH_SPACE(nvc0_context(pipe)->base.pushbuf, 1);
  484. IMMED_NVC0(nvc0_context(pipe)->base.pushbuf, NVC0_3D(SERIALIZE), 0);
  485. }
  486. nvc0_query(targ->pq)->index = index;
  487. nvc0_query_end(pipe, targ->pq);
  488. }
  489. /* === PERFORMANCE MONITORING COUNTERS === */
  490. /* Code to read out MP counters: They are accessible via mmio, too, but let's
  491. * just avoid mapping registers in userspace. We'd have to know which MPs are
  492. * enabled/present, too, and that information is not presently exposed.
  493. * We could add a kernel interface for it, but reading the counters like this
  494. * has the advantage of being async (if get_result isn't called immediately).
  495. */
  496. static const uint64_t nve4_read_mp_pm_counters_code[] =
  497. {
  498. 0x2042004270420047ULL, /* sched */
  499. 0x2800400000001de4ULL, /* mov b32 $r0 c0[0] (04) */
  500. 0x2c0000000c009c04ULL, /* mov b32 $r2 $physid (20) */
  501. 0x2800400010005de4ULL, /* mov b32 $r1 c0[4] (04) */
  502. 0x2c0000008400dc04ULL, /* mov b32 $r3 $tidx (27) */
  503. 0x7000c01050209c03ULL, /* ext u32 $r2 $r2 0x0414 (04) */
  504. 0x2c00000010011c04ULL, /* mov b32 $r4 $pm0 (20) */
  505. 0x190e0000fc33dc03ULL, /* set $p1 eq u32 $r3 0 (04) */
  506. 0x2280428042804277ULL, /* sched */
  507. 0x2c00000014015c04ULL, /* mov b32 $r5 $pm1 (27) */
  508. 0x10000000c0209c02ULL, /* mul $r2 u32 $r2 u32 48 (04) */
  509. 0x2c00000018019c04ULL, /* mov b32 $r6 $pm2 (28) */
  510. 0x4801000008001c03ULL, /* add b32 ($r0 $c) $r0 $r2 (04) */
  511. 0x2c0000001c01dc04ULL, /* mov b32 $r7 $pm3 (28) */
  512. 0x0800000000105c42ULL, /* add b32 $r1 $r1 0 $c (04) */
  513. 0x2c00000140009c04ULL, /* mov b32 $r2 $clock (28) */
  514. 0x2042804200420047ULL, /* sched */
  515. 0x94000000000107c5ULL, /* $p1 st b128 wt g[$r0d] $r4q (04) */
  516. 0x2c00000020011c04ULL, /* mov b32 $r4 $pm4 (20) */
  517. 0x2c00000024015c04ULL, /* mov b32 $r5 $pm5 (04) */
  518. 0x2c00000028019c04ULL, /* mov b32 $r6 $pm6 (20) */
  519. 0x2c0000002c01dc04ULL, /* mov b32 $r7 $pm7 (04) */
  520. 0x2c0000014400dc04ULL, /* mov b32 $r3 $clockhi (28) */
  521. 0x94000000400107c5ULL, /* $p1 st b128 wt g[$r0d+16] $r4q (04) */
  522. 0x200002e042804207ULL, /* sched */
  523. 0x2800400020011de4ULL, /* mov b32 $r4 c0[8] (20) */
  524. 0x2c0000000c015c04ULL, /* mov b32 $r5 $physid (04) */
  525. 0x94000000800087a5ULL, /* $p1 st b64 wt g[$r0d+32] $r2d (28) */
  526. 0x94000000a00107a5ULL, /* $p1 st b64 wt g[$r0d+40] $r4d (04) */
  527. 0x8000000000001de7ULL /* exit (2e) */
  528. };
  529. /* NOTE: intentionally using the same names as NV */
  530. static const char *nve4_pm_query_names[] =
  531. {
  532. /* MP counters */
  533. "prof_trigger_00",
  534. "prof_trigger_01",
  535. "prof_trigger_02",
  536. "prof_trigger_03",
  537. "prof_trigger_04",
  538. "prof_trigger_05",
  539. "prof_trigger_06",
  540. "prof_trigger_07",
  541. "warps_launched",
  542. "threads_launched",
  543. "sm_cta_launched",
  544. "inst_issued1",
  545. "inst_issued2",
  546. "inst_executed",
  547. "local_load",
  548. "local_store",
  549. "shared_load",
  550. "shared_store",
  551. "l1_local_load_hit",
  552. "l1_local_load_miss",
  553. "l1_local_store_hit",
  554. "l1_local_store_miss",
  555. "gld_request",
  556. "gst_request",
  557. "l1_global_load_hit",
  558. "l1_global_load_miss",
  559. "uncached_global_load_transaction",
  560. "global_store_transaction",
  561. "branch",
  562. "divergent_branch",
  563. "active_warps",
  564. "active_cycles"
  565. };
  566. /* For simplicity, we will allocate as many group slots as we allocate counter
  567. * slots. This means that a single counter which wants to source from 2 groups
  568. * will have to be declared as using 2 counter slots. This shouldn't really be
  569. * a problem because such queries don't make much sense ... (unless someone is
  570. * really creative).
  571. */
  572. struct nve4_mp_counter_cfg
  573. {
  574. uint32_t func : 16; /* mask or 4-bit logic op (depending on mode) */
  575. uint32_t mode : 4; /* LOGOP,B6,LOGOP_B6(_PULSE) */
  576. uint32_t pad : 3;
  577. uint32_t sig_dom : 1; /* if 0, MP_PM_A, if 1, MP_PM_B */
  578. uint32_t sig_sel : 8; /* signal group */
  579. uint32_t src_sel : 32; /* signal selection for up to 5 sources */
  580. };
  581. struct nve4_mp_pm_query_cfg
  582. {
  583. struct nve4_mp_counter_cfg ctr[4];
  584. uint8_t num_counters;
  585. uint8_t op; /* PIPE_LOGICOP_CLEAR(for ADD),OR,AND */
  586. };
  587. #define _Q1A(n, f, m, g, s) [NVE4_PM_QUERY_##n] = { { { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g, s }, {}, {}, {} }, 1, PIPE_LOGICOP_CLEAR }
  588. #define _Q1B(n, f, m, g, s) [NVE4_PM_QUERY_##n] = { { { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g, s }, {}, {}, {} }, 1, PIPE_LOGICOP_CLEAR }
  589. static const struct nve4_mp_pm_query_cfg nve4_mp_pm_queries[] =
  590. {
  591. _Q1A(PROF_TRIGGER_0, 0x0001, B6, USER, 0x00000000),
  592. _Q1A(PROF_TRIGGER_1, 0x0001, B6, USER, 0x00000004),
  593. _Q1A(PROF_TRIGGER_2, 0x0001, B6, USER, 0x00000008),
  594. _Q1A(PROF_TRIGGER_3, 0x0001, B6, USER, 0x0000000c),
  595. _Q1A(PROF_TRIGGER_4, 0x0001, B6, USER, 0x00000010),
  596. _Q1A(PROF_TRIGGER_5, 0x0001, B6, USER, 0x00000014),
  597. _Q1A(PROF_TRIGGER_6, 0x0001, B6, USER, 0x00000018),
  598. _Q1A(PROF_TRIGGER_7, 0x0001, B6, USER, 0x0000001c),
  599. _Q1A(LAUNCHED_WARPS, 0x0001, B6, LAUNCH, 0x00000004),
  600. _Q1A(LAUNCHED_THREADS, 0x003f, B6, LAUNCH, 0x398a4188),
  601. _Q1B(LAUNCHED_CTA, 0x0001, B6, WARP, 0x0000001c),
  602. _Q1A(INST_ISSUED1, 0x0001, B6, ISSUE, 0x00000004),
  603. _Q1A(INST_ISSUED2, 0x0001, B6, ISSUE, 0x00000008),
  604. _Q1A(INST_EXECUTED, 0x0003, B6, EXEC, 0x00000398),
  605. _Q1A(LD_SHARED, 0x0001, B6, LDST, 0x00000000),
  606. _Q1A(ST_SHARED, 0x0001, B6, LDST, 0x00000004),
  607. _Q1A(LD_LOCAL, 0x0001, B6, LDST, 0x00000008),
  608. _Q1A(ST_LOCAL, 0x0001, B6, LDST, 0x0000000c),
  609. _Q1A(GLD_REQUEST, 0x0001, B6, LDST, 0x00000010),
  610. _Q1A(GST_REQUEST, 0x0001, B6, LDST, 0x00000014),
  611. _Q1B(L1_LOCAL_LOAD_HIT, 0x0001, B6, L1, 0x00000000),
  612. _Q1B(L1_LOCAL_LOAD_MISS, 0x0001, B6, L1, 0x00000004),
  613. _Q1B(L1_LOCAL_STORE_HIT, 0x0001, B6, L1, 0x00000008),
  614. _Q1B(L1_LOCAL_STORE_MISS, 0x0001, B6, L1, 0x0000000c),
  615. _Q1B(L1_GLOBAL_LOAD_HIT, 0x0001, B6, L1, 0x00000010),
  616. _Q1B(L1_GLOBAL_LOAD_MISS, 0x0001, B6, L1, 0x00000014),
  617. _Q1B(GLD_TRANSACTIONS_UNCACHED, 0x0001, B6, MEM, 0x00000000),
  618. _Q1B(GST_TRANSACTIONS, 0x0001, B6, MEM, 0x00000004),
  619. _Q1A(BRANCH, 0x0001, B6, BRANCH, 0x0000000c),
  620. _Q1A(BRANCH_DIVERGENT, 0x0001, B6, BRANCH, 0x00000010),
  621. _Q1B(ACTIVE_WARPS, 0x003f, B6, WARP, 0x398a4188),
  622. _Q1B(ACTIVE_CYCLES, 0x0001, B6, WARP, 0x00000004)
  623. };
  624. #undef _Q1A
  625. #undef _Q1B
  626. void
  627. nve4_mp_pm_query_begin(struct nvc0_context *nvc0, struct nvc0_query *q)
  628. {
  629. struct nvc0_screen *screen = nvc0->screen;
  630. struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  631. const struct nve4_mp_pm_query_cfg *cfg;
  632. unsigned i, c;
  633. unsigned num_ab[2] = { 0, 0 };
  634. cfg = &nve4_mp_pm_queries[q->type - PIPE_QUERY_DRIVER_SPECIFIC];
  635. /* check if we have enough free counter slots */
  636. for (i = 0; i < cfg->num_counters; ++i)
  637. num_ab[cfg->ctr[i].sig_dom]++;
  638. if (screen->pm.num_mp_pm_active[0] + num_ab[0] > 4 ||
  639. screen->pm.num_mp_pm_active[1] + num_ab[1] > 4) {
  640. NOUVEAU_ERR("Not enough free MP counter slots !\n");
  641. return;
  642. }
  643. assert(cfg->num_counters <= 4);
  644. PUSH_SPACE(push, 4 * 8 + 6);
  645. if (!screen->pm.mp_counters_enabled) {
  646. screen->pm.mp_counters_enabled = TRUE;
  647. BEGIN_NVC0(push, SUBC_SW(0x06ac), 1);
  648. PUSH_DATA (push, 0x1fcb);
  649. }
  650. /* set sequence field to 0 (used to check if result is available) */
  651. for (i = 0; i < screen->mp_count; ++i)
  652. q->data[i * 10 + 10] = 0;
  653. for (i = 0; i < cfg->num_counters; ++i) {
  654. const unsigned d = cfg->ctr[i].sig_dom;
  655. if (!screen->pm.num_mp_pm_active[d]) {
  656. uint32_t m = (1 << 22) | (1 << (7 + (8 * !d)));
  657. if (screen->pm.num_mp_pm_active[!d])
  658. m |= 1 << (7 + (8 * d));
  659. BEGIN_NVC0(push, SUBC_SW(0x0600), 1);
  660. PUSH_DATA (push, m);
  661. }
  662. screen->pm.num_mp_pm_active[d]++;
  663. for (c = d * 4; c < (d * 4 + 4); ++c) {
  664. if (!screen->pm.mp_counter[c]) {
  665. q->ctr[i] = c;
  666. screen->pm.mp_counter[c] = (struct pipe_query *)q;
  667. break;
  668. }
  669. }
  670. assert(c <= (d * 4 + 3)); /* must succeed, already checked for space */
  671. /* configure and reset the counter(s) */
  672. if (d == 0)
  673. BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_A_SIGSEL(c & 3)), 1);
  674. else
  675. BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_B_SIGSEL(c & 3)), 1);
  676. PUSH_DATA (push, cfg->ctr[i].sig_sel);
  677. BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_SRCSEL(c)), 1);
  678. PUSH_DATA (push, cfg->ctr[i].src_sel + 0x2108421 * (c & 3));
  679. BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(c)), 1);
  680. PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
  681. BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_SET(c)), 1);
  682. PUSH_DATA (push, 0);
  683. }
  684. }
  685. static void
  686. nve4_mp_pm_query_end(struct nvc0_context *nvc0, struct nvc0_query *q)
  687. {
  688. struct nvc0_screen *screen = nvc0->screen;
  689. struct pipe_context *pipe = &nvc0->base.pipe;
  690. struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  691. uint32_t mask;
  692. uint32_t input[3];
  693. const uint block[3] = { 32, 1, 1 };
  694. const uint grid[3] = { screen->mp_count, 1, 1 };
  695. unsigned c;
  696. const struct nve4_mp_pm_query_cfg *cfg;
  697. cfg = &nve4_mp_pm_queries[q->type - PIPE_QUERY_DRIVER_SPECIFIC];
  698. if (unlikely(!screen->pm.prog)) {
  699. struct nvc0_program *prog = CALLOC_STRUCT(nvc0_program);
  700. prog->type = PIPE_SHADER_COMPUTE;
  701. prog->translated = TRUE;
  702. prog->num_gprs = 8;
  703. prog->code = (uint32_t *)nve4_read_mp_pm_counters_code;
  704. prog->code_size = sizeof(nve4_read_mp_pm_counters_code);
  705. prog->parm_size = 12;
  706. screen->pm.prog = prog;
  707. }
  708. /* disable all counting */
  709. PUSH_SPACE(push, 8);
  710. for (c = 0; c < 8; ++c)
  711. if (screen->pm.mp_counter[c])
  712. IMMED_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(c)), 0);
  713. /* release counters for this query */
  714. for (c = 0; c < 8; ++c) {
  715. if (nvc0_query(screen->pm.mp_counter[c]) == q) {
  716. screen->pm.num_mp_pm_active[c / 4]--;
  717. screen->pm.mp_counter[c] = NULL;
  718. }
  719. }
  720. BCTX_REFN_bo(nvc0->bufctx_cp, CP_QUERY, NOUVEAU_BO_GART | NOUVEAU_BO_WR,
  721. q->bo);
  722. pipe->bind_compute_state(pipe, screen->pm.prog);
  723. input[0] = (q->bo->offset + q->base);
  724. input[1] = (q->bo->offset + q->base) >> 32;
  725. input[2] = q->sequence;
  726. pipe->launch_grid(pipe, block, grid, 0, input);
  727. nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_QUERY);
  728. /* re-activate other counters */
  729. PUSH_SPACE(push, 16);
  730. mask = 0;
  731. for (c = 0; c < 8; ++c) {
  732. unsigned i;
  733. q = nvc0_query(screen->pm.mp_counter[c]);
  734. if (!q)
  735. continue;
  736. cfg = &nve4_mp_pm_queries[q->type - PIPE_QUERY_DRIVER_SPECIFIC];
  737. for (i = 0; i < cfg->num_counters; ++i) {
  738. if (mask & (1 << q->ctr[i]))
  739. break;
  740. mask |= 1 << q->ctr[i];
  741. BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(q->ctr[i])), 1);
  742. PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
  743. }
  744. }
  745. }
  746. static boolean
  747. nve4_mp_pm_query_result(struct nvc0_context *nvc0, struct nvc0_query *q,
  748. void *result, boolean wait)
  749. {
  750. uint32_t count[4];
  751. uint64_t value = 0;
  752. unsigned p, c;
  753. const struct nve4_mp_pm_query_cfg *cfg;
  754. cfg = &nve4_mp_pm_queries[q->type - PIPE_QUERY_DRIVER_SPECIFIC];
  755. for (p = 0; p < nvc0->screen->mp_count_compute; ++p) {
  756. uint64_t clock;
  757. const unsigned b = p * 12;
  758. clock = *(uint64_t *)&q->data[b + 8];
  759. (void)clock; /* might be interesting one day */
  760. if (q->data[b + 10] != q->sequence) {
  761. /* WARNING: This will spin forever if you loop with wait == FALSE and
  762. * the push buffer hasn't been flushed !
  763. */
  764. if (!wait)
  765. return FALSE;
  766. if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->base.client))
  767. return FALSE;
  768. }
  769. for (c = 0; c < cfg->num_counters; ++c)
  770. count[c] = q->data[b + q->ctr[c]];
  771. for (; c < 4; ++c)
  772. count[c] = 0;
  773. switch (cfg->op) {
  774. case PIPE_LOGICOP_AND:
  775. value &= count[0] & count[1] & count[2] & count[3];
  776. break;
  777. case PIPE_LOGICOP_OR:
  778. value |= count[0] | count[1] | count[2] | count[3];
  779. break;
  780. case PIPE_LOGICOP_CLEAR: /* abused as ADD */
  781. default:
  782. value += count[0] + count[1] + count[2] + count[3];
  783. break;
  784. }
  785. }
  786. *(uint64_t *)result = value;
  787. return TRUE;
  788. }
  789. int
  790. nvc0_screen_get_driver_query_info(struct pipe_screen *pscreen,
  791. unsigned id,
  792. struct pipe_driver_query_info *info)
  793. {
  794. struct nvc0_screen *screen = nvc0_screen(pscreen);
  795. if (screen->base.class_3d >= NVE4_3D_CLASS) {
  796. unsigned count = 0;
  797. if (screen->base.device->drm_version >= 0x01000101)
  798. count = NVE4_PM_QUERY_COUNT;
  799. if (!info)
  800. return count;
  801. if (id < count) {
  802. info->name = nve4_pm_query_names[id];
  803. info->query_type = NVE4_PM_QUERY(id);
  804. info->max_value = ~0ULL;
  805. info->uses_byte_units = FALSE;
  806. return 1;
  807. }
  808. } else {
  809. if (!info)
  810. return 0;
  811. }
  812. /* user asked for info about non-existing query */
  813. info->name = "this_is_not_the_query_you_are_looking_for";
  814. info->query_type = 0xdeadd01d;
  815. info->max_value = 0;
  816. info->uses_byte_units = FALSE;
  817. return 0;
  818. }
  819. void
  820. nvc0_init_query_functions(struct nvc0_context *nvc0)
  821. {
  822. struct pipe_context *pipe = &nvc0->base.pipe;
  823. pipe->create_query = nvc0_query_create;
  824. pipe->destroy_query = nvc0_query_destroy;
  825. pipe->begin_query = nvc0_query_begin;
  826. pipe->end_query = nvc0_query_end;
  827. pipe->get_query_result = nvc0_query_result;
  828. pipe->render_condition = nvc0_render_condition;
  829. }