Clone of mesa.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

bufmgr_fake.c 27KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. /* Fake version of the buffer manager so that we can prototype the
  2. * changes in a driver fairly quickly. Basically wraps the old style
  3. * memory management in the new programming interface.
  4. *
  5. * This version imports code from the via memory manager to closer
  6. * approximate the behaviour of a true memory manager. In particular,
  7. * in this version we do not expect to lose texture memory contents on
  8. * context switches.
  9. */
  10. #include "bufmgr.h"
  11. #include "intel_context.h"
  12. #include "intel_ioctl.h"
  13. #include "hash.h"
  14. #include "simple_list.h"
  15. #include "mm.h"
  16. #include "imports.h"
  17. #include <sys/ioctl.h>
  18. #include <unistd.h>
  19. static int ttmcount = 0;
  20. /*
  21. * Define this if the texture TTMs should be cached. If it is not defined,
  22. * Texture downloads will be slow since the TTM pages are not write-combined.
  23. * Backdoor mapping will very probably fix this. (texdown-pool)
  24. */
  25. #define CACHED_TTMS
  26. /*
  27. * Batchbuffer memory location:
  28. * 0 Is the global texture pool (as without ttms)
  29. * 1 Is a memory-managed large pinned uncached TTM. Should work as 0, but
  30. * doesn't. The only difference is that the TTM memory is accessed
  31. * directly instead of through the aperture. Runs for a while.
  32. * 2 Is dynamic TTMS. This is what we want. Doesn't work either, but runs
  33. * for a while, depending on application. multiarb works fine for example.
  34. */
  35. #define BATCH_LOCATION 0
  36. #if (BATCH_LOCATION == 2)
  37. #warning Batch buffers using dynamic TTMS. Making TTMS uncached.
  38. #undef CACHED_TTMS
  39. #endif
  40. struct _mesa_HashTable;
  41. static int delayed_free( struct bufmgr *bm );
  42. #define BM_POOL_MAX 8
  43. /* Wrapper around mm.c's mem_block, which understands that you must
  44. * wait for fences to expire before memory can be freed. This is
  45. * specific to our use of memcpy for uploads - an upload that was
  46. * processed through the command queue wouldn't need to care about
  47. * fences.
  48. */
  49. struct block {
  50. struct block *next, *prev;
  51. int mem_type;
  52. struct pool *pool; /* BM_MEM_AGP */
  53. struct mem_block *mem; /* BM_MEM_AGP */
  54. unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */
  55. void *virtual;
  56. struct buffer *buf;
  57. drm_ttm_arg_t drm_ttm;
  58. drm_ttm_buf_arg_t drm_buf;
  59. int has_ttm;
  60. };
  61. struct buffer {
  62. unsigned id; /* debug only */
  63. unsigned size;
  64. unsigned alignment;
  65. unsigned mapped;
  66. unsigned flags;
  67. struct block *block;
  68. };
  69. struct pool {
  70. unsigned flags;
  71. struct mem_block *heap;
  72. void *virtual;
  73. struct block lru;
  74. struct block freed;
  75. drm_ttm_arg_t drm_ttm;
  76. drm_ttm_buf_arg_t drm_buf;
  77. };
  78. struct bufmgr {
  79. struct intel_context *intel;
  80. struct pool pool[BM_POOL_MAX];
  81. unsigned nr_pools;
  82. struct _mesa_HashTable *hash;
  83. unsigned buf_nr; /* for generating ids */
  84. };
  85. static struct block *alloc_from_pool( struct bufmgr *bm,
  86. unsigned pool_nr,
  87. unsigned size,
  88. unsigned align )
  89. {
  90. struct pool *pool = &bm->pool[pool_nr];
  91. struct block *block = (struct block *)calloc(sizeof *block, 1);
  92. if (!block)
  93. return NULL;
  94. DBG_BM("alloc_from_pool %d sz 0x%x\n", pool_nr, size);
  95. assert(align >= 7);
  96. block->mem = mmAllocMem(pool->heap, size, align, 0);
  97. if (!block->mem) {
  98. DBG_BM("\t- failed\n");
  99. free(block);
  100. return NULL;
  101. }
  102. make_empty_list(block);
  103. block->pool = pool;
  104. block->mem_type = pool->flags & BM_MEM_MASK;
  105. block->virtual = pool->virtual + block->mem->ofs;
  106. block->has_ttm = 0;
  107. DBG_BM("\t- offset 0x%x\n", block->mem->ofs);
  108. return block;
  109. }
  110. static struct block *alloc_local( unsigned size )
  111. {
  112. struct block *block = (struct block *)calloc(sizeof *block, 1);
  113. if (!block)
  114. return NULL;
  115. DBG_BM("alloc_local 0x%x\n", size);
  116. block->mem_type = BM_MEM_LOCAL;
  117. block->virtual = ALIGN_MALLOC(size, 1<<7);
  118. if (!block->virtual) {
  119. free(block);
  120. return NULL;
  121. }
  122. return block;
  123. }
  124. static struct block *alloc_block( struct bufmgr *bm,
  125. unsigned size,
  126. unsigned align,
  127. int flags )
  128. {
  129. GLuint i;
  130. int ret;
  131. struct block *block;
  132. unsigned alignment = ( 1 << align );
  133. if (!(flags & BM_NO_TTM)
  134. #if (BATCH_LOCATION != 2)
  135. #warning Disabling dynamic batch buffers
  136. && !(flags & BM_CLIENT)
  137. #endif
  138. ) {
  139. block = (struct block *)calloc(sizeof *block, 1);
  140. if (!block) return NULL;
  141. make_empty_list(block);
  142. block->pool = bm->pool + 0;
  143. block->mem_type = flags;
  144. block->has_ttm = 0;
  145. block->drm_ttm.op = ttm_add;
  146. block->drm_ttm.size = ((size + alignment -1) >> align) << align;
  147. ret = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &block->drm_ttm);
  148. assert(ret == 0);
  149. block->drm_buf.ttm_handle = block->drm_ttm.handle;
  150. block->drm_buf.ttm_page_offset = 0;
  151. block->drm_buf.num_pages = block->drm_ttm.size / getpagesize();
  152. block->drm_buf.next = NULL;
  153. #ifdef CACHED_TTMS
  154. block->drm_buf.flags = DRM_TTM_FLAG_NEW | DRM_TTM_FLAG_CACHED;
  155. #else
  156. block->drm_buf.flags = DRM_TTM_FLAG_NEW;
  157. #endif
  158. block->has_ttm = 2;
  159. if (block->has_ttm > 1)
  160. block->virtual = NULL;
  161. ttmcount += block->drm_buf.num_pages;
  162. DBG_BM("ttmcount pages is %d\n", ttmcount);
  163. DBG_BM("ttm handle is 0x%x\n", block->drm_ttm.handle);
  164. return block;
  165. }
  166. if (!(flags & (BM_CLIENT))) {
  167. for (i = 0; i < bm->nr_pools; i++) {
  168. struct block *block;
  169. if (bm->pool[i].flags & BM_NO_ALLOC)
  170. continue;
  171. if ((bm->pool[i].flags & flags & BM_MEM_MASK) == 0)
  172. continue;
  173. block = alloc_from_pool(bm, i, size, align);
  174. if (block) return block;
  175. }
  176. }
  177. if (flags & BM_MEM_LOCAL)
  178. return alloc_local(size);
  179. return NULL;
  180. }
  181. static int bmAllocMem( struct bufmgr *bm,
  182. struct buffer *buf,
  183. GLuint flags )
  184. {
  185. delayed_free(bm);
  186. buf->block = alloc_block(bm,
  187. buf->size,
  188. buf->alignment,
  189. buf->flags | flags);
  190. if (buf->block)
  191. buf->block->buf = buf;
  192. else
  193. _mesa_printf("bmAllocMem failed memflags %x\n", buf->flags & BM_MEM_MASK);
  194. /* Sleep here or fail???
  195. */
  196. /* assert(buf->block); */
  197. return buf->block != NULL;
  198. }
  199. /* Release the card storage associated with buf:
  200. */
  201. static void free_block( struct bufmgr *bm, struct block *block )
  202. {
  203. int ret;
  204. if (!block)
  205. return;
  206. DBG_BM("free block (mem: %d, sz %d) from buf %d\n",
  207. block->mem_type,
  208. block->buf->size,
  209. block->buf->id);
  210. switch (block->mem_type) {
  211. case BM_MEM_AGP:
  212. case BM_MEM_VRAM:
  213. remove_from_list(block);
  214. if (!block->has_ttm)
  215. DBG_BM(" - offset %x\n", block->mem->ofs);
  216. else
  217. DBG_BM(" - offset %x\n", block->drm_buf.aper_offset*getpagesize());
  218. if (bmTestFence(bm, block->fence)) {
  219. if (!block->has_ttm) {
  220. mmFreeMem(block->mem);
  221. } else {
  222. block->drm_ttm.op = ttm_remove;
  223. ret = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &block->drm_ttm);
  224. ttmcount -= block->drm_buf.num_pages;
  225. DBG_BM("ttmcount pages is %d\n", ttmcount);
  226. assert(ret == 0);
  227. }
  228. free(block);
  229. }
  230. else {
  231. DBG_BM(" - place on delayed_free list\n");
  232. block->buf = NULL;
  233. insert_at_tail(&block->pool->freed, block);
  234. }
  235. break;
  236. case BM_MEM_LOCAL:
  237. DBG_BM(" - free local memory\n");
  238. ALIGN_FREE(block->virtual);
  239. free(block);
  240. break;
  241. default:
  242. DBG_BM(" - unknown memory type\n");
  243. free(block);
  244. break;
  245. }
  246. }
  247. static int delayed_free( struct bufmgr *bm )
  248. {
  249. struct block *block, *tmp;
  250. int ret = 0;
  251. int rettm;
  252. int i;
  253. for (i = 0; i < bm->nr_pools; i++) {
  254. foreach_s(block, tmp, &bm->pool[i].freed ) {
  255. if (bmTestFence(bm, block->fence)) {
  256. remove_from_list(block);
  257. if (!block->has_ttm) {
  258. mmFreeMem(block->mem);
  259. ret += block->mem->size;
  260. } else {
  261. block->drm_ttm.op = ttm_remove;
  262. rettm = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &block->drm_ttm);
  263. ttmcount -= block->drm_buf.num_pages;
  264. DBG_BM("ttmcount pages is %d\n", ttmcount);
  265. assert(rettm == 0);
  266. ret += block->drm_buf.num_pages*getpagesize();
  267. }
  268. free(block);
  269. }
  270. }
  271. }
  272. DBG_BM("%s: %d\n", __FUNCTION__, ret);
  273. return ret;
  274. }
  275. static int move_buffers( struct bufmgr *bm,
  276. struct buffer *buffers[],
  277. int nr,
  278. int flags )
  279. {
  280. struct block *newMem[BM_LIST_MAX];
  281. GLint i;
  282. GLuint nr_uploads = 0;
  283. drm_ttm_arg_t arg;
  284. struct block *block, *last_block;
  285. int ret;
  286. DBG_BM("%s\n", __FUNCTION__);
  287. memset(newMem, 0, sizeof(newMem));
  288. /* First do all the allocations (or fail):
  289. */
  290. for (i = 0; i < nr; i++) {
  291. if (!buffers[i]->block) {
  292. if (flags & BM_NO_ALLOC)
  293. goto cleanup;
  294. newMem[i] = alloc_block(bm,
  295. buffers[i]->size,
  296. buffers[i]->alignment,
  297. flags & BM_MEM_MASK);
  298. if (!newMem[i])
  299. goto cleanup;
  300. }
  301. }
  302. /*
  303. * Tell kernel where TTMS should be.
  304. */
  305. arg.num_bufs = 0;
  306. last_block = NULL;
  307. for (i = 0; i <nr; ++i) {
  308. if (newMem[i] && newMem[i]->has_ttm) {
  309. buffers[i]->block = newMem[i];
  310. newMem[i] = NULL;
  311. }
  312. block = buffers[i]->block;
  313. if (block->has_ttm) {
  314. if ((flags & BM_MEM_MASK) == BM_MEM_AGP ||
  315. (((flags & BM_MEM_MASK) == BM_MEM_LOCAL) &&
  316. (block->mem_type == BM_MEM_AGP))) {
  317. if (arg.num_bufs == 0)
  318. arg.first = &block->drm_buf;
  319. else
  320. last_block->drm_buf.next = &block->drm_buf;
  321. arg.num_bufs++;
  322. last_block = block;
  323. block->drm_buf.op = ((flags & BM_MEM_MASK) == BM_MEM_AGP) ?
  324. ttm_validate : ttm_unbind;
  325. block->mem_type = flags & BM_MEM_MASK;
  326. }
  327. }
  328. }
  329. arg.op = ttm_bufs;
  330. arg.do_fence = 0;
  331. DBG_BM("Num validated TTM bufs is %d\n", arg.num_bufs);
  332. if (arg.num_bufs) {
  333. ret = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &arg);
  334. assert(ret == 0);
  335. }
  336. /*
  337. * End TTM code.
  338. */
  339. for (i=0; i< nr; ++i) {
  340. if (!(buffers[i]->block->mem_type & flags)) {
  341. if (flags & BM_NO_UPLOAD)
  342. goto cleanup;
  343. /* Known issue: this assert will get hit on texture swapping.
  344. * There's not much to do about that at this stage - it's a
  345. * todo item.
  346. */
  347. assert(!buffers[i]->mapped);
  348. DBG_BM("try to move buffer %d size 0x%x to pools 0x%x\n",
  349. buffers[i]->id, buffers[i]->size, flags & BM_MEM_MASK);
  350. newMem[i] = alloc_block(bm,
  351. buffers[i]->size,
  352. buffers[i]->alignment,
  353. (flags & BM_MEM_MASK) | BM_NO_TTM);
  354. if (!newMem[i])
  355. goto cleanup;
  356. }
  357. }
  358. /* Now copy all the image data and free the old texture memory.
  359. */
  360. for (i = 0; i < nr; i++) {
  361. if (newMem[i]) {
  362. if (buffers[i]->block) {
  363. /* XXX: To be replaced with DMA, GTT bind, and other
  364. * mechanisms in final version. Memcpy (or sse_memcpy) is
  365. * probably pretty good for local->agp uploads.
  366. */
  367. DBG_BM("memcpy %d bytes\n", buffers[i]->size);
  368. memcpy(newMem[i]->virtual,
  369. buffers[i]->block->virtual,
  370. buffers[i]->size);
  371. free_block(bm, buffers[i]->block);
  372. nr_uploads++;
  373. }
  374. buffers[i]->block = newMem[i];
  375. buffers[i]->block->buf = buffers[i];
  376. }
  377. }
  378. /* Tell hardware that its texture and other caches may be invalid:
  379. */
  380. if (nr_uploads && (flags & (BM_MEM_AGP|BM_MEM_VRAM)))
  381. bmFlushReadCaches(bm);
  382. DBG_BM("%s - success\n", __FUNCTION__);
  383. return 1;
  384. cleanup:
  385. /* Release any allocations made prior to failure:
  386. */
  387. for (i = 0; i < nr; i++) {
  388. if (newMem[i])
  389. free_block(bm, newMem[i]);
  390. }
  391. _mesa_printf("%s - fail\n", __FUNCTION__);
  392. return 0;
  393. }
  394. static unsigned evict_lru( struct bufmgr *bm,
  395. unsigned flags)
  396. {
  397. int i;
  398. DBG_BM("%s\n", __FUNCTION__);
  399. if (flags & BM_NO_EVICT)
  400. return 0;
  401. /* XXX: this is broken with >1 active pool - all the first pool
  402. * will be evicted before starting on the second. Actually, maybe
  403. * you want that in some situations...
  404. */
  405. for (i = 0; i < bm->nr_pools; i++) {
  406. if ((bm->pool[i].flags & flags & BM_MEM_MASK) &&
  407. !(bm->pool[i].flags & BM_NO_EVICT)) {
  408. struct block *block = bm->pool[i].lru.next;
  409. unsigned size = block->buf->size;
  410. if (block == &bm->pool[i].lru ||
  411. !bmTestFence(bm, block->fence))
  412. return 0;
  413. move_buffers(bm, &block->buf, 1, BM_MEM_LOCAL);
  414. return size;
  415. }
  416. }
  417. return 0;
  418. }
  419. #if 0
  420. /* Speculatively move texture images which haven't been used in a
  421. * while back to local memory.
  422. */
  423. static void viaSwapOutWork( struct bufmgr *bm )
  424. {
  425. unsigned total = 0;
  426. unsigned target;
  427. if (bm->thrashing) {
  428. target = 1*1024*1024;
  429. }
  430. else if (bmIsTexMemLow(bm)) {
  431. target = 64*1024;
  432. }
  433. else {
  434. return;
  435. }
  436. while (1) {
  437. unsigned size = evict_lru(bm);
  438. if (!size)
  439. return;
  440. total += size;
  441. if (total >= target)
  442. return;
  443. }
  444. }
  445. #endif
  446. /***********************************************************************
  447. * Public functions
  448. */
  449. /* The initialization functions are skewed in the fake implementation.
  450. * This call would be to attach to an existing manager, rather than to
  451. * create a local one.
  452. */
  453. struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel )
  454. {
  455. struct bufmgr *bm = (struct bufmgr *)calloc(sizeof(*bm), 1);
  456. bm->intel = intel;
  457. bm->hash = _mesa_NewHashTable();
  458. return bm;
  459. }
  460. void bmInitMemType( struct bufmgr *bm,
  461. unsigned mem_type,
  462. unsigned long size )
  463. {
  464. /* Nothing really to do. Could store and use to validate
  465. * bmInitPool requests.
  466. */
  467. }
  468. /* The virtual pointer would go away in a true implementation.
  469. */
  470. int bmInitPool( struct bufmgr *bm,
  471. unsigned long low_offset,
  472. void *low_virtual,
  473. unsigned long size,
  474. unsigned flags)
  475. {
  476. GLuint i;
  477. struct pool *pool;
  478. if (bm->nr_pools >= BM_POOL_MAX)
  479. return -1;
  480. i = bm->nr_pools++;
  481. DBG_BM("bmInitPool %d low_offset %x sz %x\n",
  482. i, low_offset, size);
  483. pool = bm->pool + i;
  484. pool->flags = flags;
  485. make_empty_list(&bm->pool[i].lru);
  486. make_empty_list(&bm->pool[i].freed);
  487. if (flags == BM_MEM_AGP) {
  488. #if (BATCH_LOCATION == 1)
  489. #warning Replacing pool 0 with a large uncached pinned TTM.
  490. int ret;
  491. drmAddress ttmAddress;
  492. DBG_BM("Creating Pinned ttm.\n");
  493. pool->drm_ttm.op = ttm_add;
  494. pool->drm_ttm.size = size;
  495. ret = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &pool->drm_ttm);
  496. if (ret) return -1;
  497. ret = drmMap(bm->intel->driFd, pool->drm_ttm.handle, size, &ttmAddress);
  498. DBG_BM("Virtual is 0x%lx\n", (unsigned long) ttmAddress);
  499. if (ret) {
  500. pool->drm_ttm.op = ttm_add;
  501. ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &pool->drm_ttm);
  502. return -1;
  503. }
  504. pool->drm_buf.ttm_handle = pool->drm_ttm.handle;
  505. pool->drm_buf.ttm_page_offset = 0;
  506. pool->drm_buf.num_pages = pool->drm_ttm.size / getpagesize();
  507. pool->drm_buf.next = NULL;
  508. pool->drm_buf.flags = DRM_TTM_FLAG_NEW | DRM_TTM_FLAG_PINNED;
  509. pool->drm_buf.op = ttm_validate;
  510. pool->drm_ttm.op = ttm_bufs;
  511. pool->drm_ttm.num_bufs = 1;
  512. pool->drm_ttm.first = &pool->drm_buf;
  513. drmGetLock(bm->intel->driFd, bm->intel->hHWContext, 0);
  514. ret = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &pool->drm_ttm);
  515. drmUnlock(bm->intel->driFd, bm->intel->hHWContext);
  516. low_offset = pool->drm_buf.aper_offset * getpagesize();
  517. pool->heap = mmInit( low_offset , size );
  518. pool->virtual = (char *) ttmAddress - low_offset;
  519. DBG_BM("Pinned buf offset is 0x%lx\n", low_offset);
  520. #else
  521. pool->heap = mmInit( low_offset , size );
  522. pool->virtual = low_virtual - low_offset;
  523. #endif
  524. } else {
  525. pool->heap = mmInit( low_offset, size );
  526. pool->virtual = low_virtual - low_offset;
  527. }
  528. return i;
  529. }
  530. #if 0
  531. void bmAssertTTM(struct bufmgr *bm, unsigned n, unsigned *buffers)
  532. {
  533. unsigned i;
  534. for (i = 0; i < n; i++) {
  535. struct buffer *buf = _mesa_HashLookup(bm->hash, buffers[i]);
  536. DBG_BM("0x%x\n", buf->flags);
  537. assert(buf->block);
  538. assert(buf->block->has_ttm);
  539. }
  540. }
  541. #endif
  542. void bmGenBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers)
  543. {
  544. unsigned i;
  545. for (i = 0; i < n; i++) {
  546. struct buffer *buf = calloc(sizeof(*buf), 1);
  547. buf->id = ++bm->buf_nr;
  548. buf->alignment = 12; /* page-alignment to fit in with AGP swapping */
  549. buf->flags = BM_MEM_AGP|BM_MEM_VRAM|BM_MEM_LOCAL;
  550. buffers[i] = buf->id;
  551. _mesa_HashInsert(bm->hash, buffers[i], buf);
  552. }
  553. }
  554. void bmDeleteBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers)
  555. {
  556. unsigned i;
  557. for (i = 0; i < n; i++) {
  558. struct buffer *buf = _mesa_HashLookup(bm->hash, buffers[i]);
  559. if (buf) {
  560. free_block(bm, buf->block);
  561. free(buf);
  562. _mesa_HashRemove(bm->hash, buffers[i]);
  563. }
  564. assert(_mesa_HashLookup(bm->hash, buffers[i]) == NULL);
  565. }
  566. }
  567. /* Hook to inform faked buffer manager about fixed-position
  568. * front,depth,back buffers. These may move to a fully memory-managed
  569. * scheme, or they may continue to be managed as is. It will probably
  570. * be useful to pass a fixed offset here one day.
  571. */
  572. unsigned bmBufferStatic(struct bufmgr *bm,
  573. unsigned buffer,
  574. unsigned size,
  575. unsigned pool )
  576. {
  577. struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
  578. assert(!buf->block);
  579. assert(bm->pool[pool].flags & BM_NO_EVICT);
  580. assert(bm->pool[pool].flags & BM_NO_MOVE);
  581. buf->size = size;
  582. buf->flags = bm->pool[pool].flags;
  583. buf->alignment = 12;
  584. buf->block = alloc_from_pool(bm, pool, buf->size, buf->alignment);
  585. if (!buf->block)
  586. return 0;
  587. buf->block->buf = buf;
  588. return buf->block->mem->ofs;
  589. }
  590. /* If buffer size changes, free and reallocate. Otherwise update in
  591. * place.
  592. */
  593. void bmBufferData(struct bufmgr *bm,
  594. unsigned buffer,
  595. unsigned size,
  596. const void *data,
  597. unsigned flags )
  598. {
  599. struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
  600. DBG_BM("bmBufferData %d sz 0x%x data: %p\n", buffer, size, data);
  601. assert(!buf->mapped);
  602. if (buf->block) {
  603. if ((buf->block->mem_type != BM_MEM_LOCAL && !bmTestFence(bm, buf->block->fence)) ||
  604. (buf->size && buf->size != size) ||
  605. (data == NULL)) {
  606. free_block(bm, buf->block);
  607. buf->block = NULL;
  608. }
  609. }
  610. buf->size = size;
  611. if (data != NULL) {
  612. bmAllocMem(bm, buf, buf->flags | flags);
  613. memcpy(buf->block->virtual, data, size);
  614. }
  615. }
  616. /* Update the buffer in place, in whatever space it is currently resident:
  617. */
  618. void bmBufferSubData(struct bufmgr *bm,
  619. unsigned buffer,
  620. unsigned offset,
  621. unsigned size,
  622. const void *data )
  623. {
  624. struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
  625. DBG_BM("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buffer, offset, size);
  626. if (buf->block == 0)
  627. bmAllocMem(bm, buf, buf->flags);
  628. if (buf->block->mem_type != BM_MEM_LOCAL)
  629. bmFinishFence(bm, buf->block->fence);
  630. if (size)
  631. memcpy(buf->block->virtual + offset, data, size);
  632. }
  633. /* Return a pointer to whatever space the buffer is currently resident in:
  634. */
  635. void *bmMapBuffer( struct bufmgr *bm,
  636. unsigned buffer,
  637. unsigned flags )
  638. {
  639. struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
  640. int ret;
  641. DBG_BM("bmMapBuffer %d\n", buffer);
  642. DBG_BM("Map: Block is 0x%x\n", &buf->block);
  643. if (buf->mapped)
  644. return NULL;
  645. /*
  646. * Hack to recognize batchbuffers.
  647. */
  648. if (buf->block == 0)
  649. bmAllocMem(bm, buf, flags);
  650. if (buf->block == 0)
  651. return NULL;
  652. buf->mapped = 1;
  653. /* Finish any outstanding operations to/from this memory:
  654. */
  655. if (buf->block->mem_type != BM_MEM_LOCAL)
  656. bmFinishFence(bm, buf->block->fence);
  657. if (buf->block->has_ttm > 0) {
  658. ret = drmMap(bm->intel->driFd, buf->block->drm_ttm.handle,
  659. buf->block->drm_ttm.size, &buf->block->virtual);
  660. if (ret) {
  661. fprintf(stderr,"TTM Map failed. Handle is 0x%x, size is %lu\n",
  662. buf->block->drm_ttm.handle, buf->block->drm_ttm.size);
  663. assert(0);
  664. }
  665. }
  666. DBG_BM("Mapped buf %u 0x%x\n", buffer, buf->block->virtual);
  667. return buf->block->virtual;
  668. }
  669. void bmUnmapBuffer( struct bufmgr *bm, unsigned buffer )
  670. {
  671. struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
  672. if (!buf)
  673. return;
  674. DBG_BM("bmUnmapBuffer %d\n", buffer);
  675. if (buf->block->has_ttm > 0) {
  676. drmUnmap(buf->block->virtual, buf->size);
  677. DBG_BM("Unmapped buf %u 0x%x\n", buffer, buf->block->virtual);
  678. buf->block->virtual = NULL;
  679. }
  680. buf->mapped = 0;
  681. }
  682. /* Add a mechanism to tell the manager about some fixed buffers such
  683. * as the (fixed) front, back and depth buffers. Something like this
  684. * may be needed even in a finalized version if we keep the static
  685. * management of these buffers.
  686. *
  687. * These are excluded from the buffer memory management in this file,
  688. * but are presented to the driver by the same interface. In the
  689. * future they may become managed.
  690. */
  691. #if 0
  692. void bm_fake_SetFixedBufferParams( struct bufmgr *bm
  693. unsigned buffer,
  694. unsigned offset,
  695. unsigned size )
  696. {
  697. }
  698. #endif
  699. /* Build the list of buffers to validate:
  700. */
  701. struct bm_buffer_list *bmNewBufferList( void )
  702. {
  703. struct bm_buffer_list *list = calloc(sizeof(*list), 1);
  704. DBG_BM("bmNewBufferList\n");
  705. return list;
  706. }
  707. void bmAddBuffer( struct bm_buffer_list *list,
  708. unsigned buffer,
  709. unsigned flags,
  710. unsigned *memtype_return,
  711. unsigned *offset_return )
  712. {
  713. assert(list->nr < BM_LIST_MAX);
  714. list->elem[list->nr].buffer = buffer;
  715. list->elem[list->nr].memtype_return = memtype_return;
  716. list->elem[list->nr].offset_return = offset_return;
  717. DBG_BM("bmAddBuffer nr %d buf %d\n",
  718. list->nr, buffer);
  719. list->nr++;
  720. }
  721. void bmFreeBufferList( struct bm_buffer_list *list )
  722. {
  723. free(list);
  724. }
  725. /* To be called prior to emitting commands to hardware which reference
  726. * these buffers. The buffer_usage list provides information on where
  727. * the buffers should be placed and whether their contents need to be
  728. * preserved on copying. The offset and pool data elements are return
  729. * values from this function telling the driver exactly where the
  730. * buffers are currently located.
  731. */
  732. int bmValidateBufferList( struct bufmgr *bm,
  733. struct bm_buffer_list *list,
  734. unsigned flags )
  735. {
  736. struct buffer *bufs[BM_LIST_MAX];
  737. unsigned i;
  738. DBG_BM("%s\n", __FUNCTION__);
  739. if (list->nr > BM_LIST_MAX)
  740. return 0;
  741. for (i = 0; i < list->nr; i++) {
  742. bufs[i] = _mesa_HashLookup(bm->hash, list->elem[i].buffer);
  743. }
  744. /* The old story: evict one texture after another until allocation
  745. * succeeds. This is a pretty poor strategy but really hard to do
  746. * better without more infrastucture... Which is coming - hooray!
  747. */
  748. while (!move_buffers(bm, bufs, list->nr, flags)) {
  749. /*
  750. * We should never get here. The kernel handles this.
  751. */
  752. assert(0);
  753. if (!delayed_free(bm) &&
  754. !evict_lru(bm, flags))
  755. return 0;
  756. _mesa_printf("couldn't allocate sufficient texture memory\n");
  757. exit(1);
  758. }
  759. for (i = 0; i < list->nr; i++) {
  760. if (bufs[i]->block->has_ttm > 1) {
  761. if (list->elem[i].offset_return)
  762. list->elem[i].offset_return[0] = bufs[i]->block->drm_buf.aper_offset*getpagesize();
  763. DBG_BM("TTM OFFS 0x%x\n", bufs[i]->block->drm_buf.aper_offset*getpagesize());
  764. } else {
  765. if (list->elem[i].offset_return)
  766. list->elem[i].offset_return[0] = bufs[i]->block->mem->ofs;
  767. DBG_BM("Pinned Offs 0x%x\n", bufs[i]->block->mem->ofs);
  768. }
  769. if (list->elem[i].memtype_return)
  770. list->elem[i].memtype_return[0] = bufs[i]->block->mem_type;
  771. }
  772. return 1;
  773. }
  774. /* After commands are emitted but before unlocking, this must be
  775. * called so that the buffer manager can correctly age the buffers.
  776. * The buffer manager keeps track of the list of validated buffers, so
  777. * already knows what to apply the fence to.
  778. *
  779. * The buffer manager knows how to emit and test fences directly
  780. * through the drm and without callbacks or whatever into the driver.
  781. */
  782. unsigned bmFenceBufferList( struct bufmgr *bm, struct bm_buffer_list *list )
  783. {
  784. drm_ttm_arg_t arg;
  785. int ret;
  786. arg.op = ttm_bufs;
  787. arg.do_fence = 1;
  788. arg.num_bufs = 0;
  789. ret = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &arg);
  790. assert(ret == 0);
  791. DBG_BM("%s (%d bufs)\n", __FUNCTION__, list->nr);
  792. if (list->nr) {
  793. unsigned i;
  794. unsigned fence = bmSetFence( bm );
  795. /* Move all buffers to head of resident list and set their fences
  796. */
  797. for (i = 0; i < list->nr; i++) {
  798. struct buffer *buf = _mesa_HashLookup(bm->hash, list->elem[i].buffer);
  799. if (!buf->block->has_ttm) {
  800. move_to_head(&buf->block->pool->lru, buf->block);
  801. }
  802. buf->block->fence = fence;
  803. }
  804. return fence;
  805. }
  806. else
  807. return 0;
  808. }
  809. /* This functionality is used by the buffer manager, not really sure
  810. * if we need to be exposing it in this way, probably libdrm will
  811. * offer equivalent calls.
  812. *
  813. * For now they can stay, but will likely change/move before final:
  814. */
  815. unsigned bmSetFence( struct bufmgr *bm )
  816. {
  817. assert(bm->intel->locked);
  818. return intelEmitIrqLocked( bm->intel );
  819. }
  820. int bmTestFence( struct bufmgr *bm, unsigned fence )
  821. {
  822. /* if (fence % 1024 == 0) */
  823. /* _mesa_printf("%d %d\n", fence, bm->intel->sarea->last_dispatch); */
  824. DBG_BM("fence: %d %d\n", fence, bm->intel->sarea->last_dispatch);
  825. return fence <= bm->intel->sarea->last_dispatch;
  826. }
  827. void bmFinishFence( struct bufmgr *bm, unsigned fence )
  828. {
  829. if (!bmTestFence(bm, fence))
  830. intelWaitIrq( bm->intel, fence );
  831. }
  832. /* There is a need to tell the hardware to flush various caches
  833. * before we can start reading and writing video memory.
  834. *
  835. * TODO: Need a flag value to tell hardware which caches have changed?
  836. * Who would we rely on to populate the flag?
  837. */
  838. /* If new data is uploaded/mapped to video or agp memory, need to
  839. * flush the texture and other read caches to ensure the new version
  840. * is picked up. Can be done immediately after the upload (ie. within
  841. * ValidateBuffers).
  842. */
  843. void bmFlushReadCaches( struct bufmgr *bm )
  844. {
  845. }
  846. /* If a buffer which has been written to is going to be evicted, read
  847. * by bmGetBufferData or mappped with bmMapBuffer, need to flush the
  848. * write cache first. Probably want to make sure this happens
  849. * immediately after the last write and before the fence (how to
  850. * tell?). If we wait until just prior the evict/read/map, would then
  851. * have to emit another fence and wait for the hw queue to drain to be
  852. * sure the caches had flushed.
  853. *
  854. * A possible strategy:
  855. * - every once in a while, when there is no last_draw_flush_fence outstanding,
  856. * emit a draw-cache flush just prior to the fence.
  857. * - note the fence (last_draw_flush_fence)
  858. * - note the most recently retired value of last_draw_flush_fence in
  859. * last_retired_draw_flush_fence
  860. * - keep track of which fence each buffer is last written to in
  861. * buffer.last_write_fence
  862. * - on evict/read/map, check:
  863. * - if buffer.last_write_fence > last_draw_flush_fence {
  864. * emit_flush
  865. * last_draw_flush_fence = emit fence
  866. * }
  867. * if last_write_fence > last_retired_draw_flush_fence {
  868. * finish_fence(last_draw_flush_fence)
  869. * last_retired_draw_flush_fence = last_draw_fence
  870. * }
  871. *
  872. */
  873. void bmFlushDrawCache( struct bufmgr *bm )
  874. {
  875. }
  876. /* Specifically ignore texture memory sharing.
  877. */
  878. void bm_fake_NotifyContendedLockTake( struct bufmgr *bm )
  879. {
  880. fprintf(stderr, "did we just lose texture memory? oh well, never mind\n");
  881. }