Clone of mesa.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

tu_device.c 65KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071
  1. /*
  2. * Copyright © 2016 Red Hat.
  3. * Copyright © 2016 Bas Nieuwenhuizen
  4. *
  5. * based in part on anv driver which is:
  6. * Copyright © 2015 Intel Corporation
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the "Software"),
  10. * to deal in the Software without restriction, including without limitation
  11. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12. * and/or sell copies of the Software, and to permit persons to whom the
  13. * Software is furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the next
  16. * paragraph) shall be included in all copies or substantial portions of the
  17. * Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  24. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  25. * DEALINGS IN THE SOFTWARE.
  26. */
  27. #include "tu_private.h"
  28. #include <fcntl.h>
  29. #include <libsync.h>
  30. #include <stdbool.h>
  31. #include <string.h>
  32. #include <sys/mman.h>
  33. #include <sys/sysinfo.h>
  34. #include <unistd.h>
  35. #include <xf86drm.h>
  36. #include "util/debug.h"
  37. #include "util/disk_cache.h"
  38. #include "util/strtod.h"
  39. #include "vk_format.h"
  40. #include "vk_util.h"
  41. #include "drm-uapi/msm_drm.h"
  42. static int
  43. tu_device_get_cache_uuid(uint16_t family, void *uuid)
  44. {
  45. uint32_t mesa_timestamp;
  46. uint16_t f = family;
  47. memset(uuid, 0, VK_UUID_SIZE);
  48. if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
  49. &mesa_timestamp))
  50. return -1;
  51. memcpy(uuid, &mesa_timestamp, 4);
  52. memcpy((char *) uuid + 4, &f, 2);
  53. snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
  54. return 0;
  55. }
  56. static void
  57. tu_get_driver_uuid(void *uuid)
  58. {
  59. memset(uuid, 0, VK_UUID_SIZE);
  60. snprintf(uuid, VK_UUID_SIZE, "freedreno");
  61. }
  62. static void
  63. tu_get_device_uuid(void *uuid)
  64. {
  65. memset(uuid, 0, VK_UUID_SIZE);
  66. }
  67. static VkResult
  68. tu_bo_init(struct tu_device *dev,
  69. struct tu_bo *bo,
  70. uint32_t gem_handle,
  71. uint64_t size)
  72. {
  73. uint64_t iova = tu_gem_info_iova(dev, gem_handle);
  74. if (!iova)
  75. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  76. *bo = (struct tu_bo) {
  77. .gem_handle = gem_handle,
  78. .size = size,
  79. .iova = iova,
  80. };
  81. return VK_SUCCESS;
  82. }
  83. VkResult
  84. tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
  85. {
  86. /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
  87. * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
  88. */
  89. uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
  90. if (!gem_handle)
  91. return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
  92. VkResult result = tu_bo_init(dev, bo, gem_handle, size);
  93. if (result != VK_SUCCESS) {
  94. tu_gem_close(dev, gem_handle);
  95. return vk_error(dev->instance, result);
  96. }
  97. return VK_SUCCESS;
  98. }
  99. VkResult
  100. tu_bo_init_dmabuf(struct tu_device *dev,
  101. struct tu_bo *bo,
  102. uint64_t size,
  103. int fd)
  104. {
  105. uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
  106. if (!gem_handle)
  107. return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
  108. VkResult result = tu_bo_init(dev, bo, gem_handle, size);
  109. if (result != VK_SUCCESS) {
  110. tu_gem_close(dev, gem_handle);
  111. return vk_error(dev->instance, result);
  112. }
  113. return VK_SUCCESS;
  114. }
  115. int
  116. tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
  117. {
  118. return tu_gem_export_dmabuf(dev, bo->gem_handle);
  119. }
  120. VkResult
  121. tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
  122. {
  123. if (bo->map)
  124. return VK_SUCCESS;
  125. uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
  126. if (!offset)
  127. return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
  128. /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
  129. void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
  130. dev->physical_device->local_fd, offset);
  131. if (map == MAP_FAILED)
  132. return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
  133. bo->map = map;
  134. return VK_SUCCESS;
  135. }
  136. void
  137. tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
  138. {
  139. assert(bo->gem_handle);
  140. if (bo->map)
  141. munmap(bo->map, bo->size);
  142. tu_gem_close(dev, bo->gem_handle);
  143. }
  144. static VkResult
  145. tu_physical_device_init(struct tu_physical_device *device,
  146. struct tu_instance *instance,
  147. drmDevicePtr drm_device)
  148. {
  149. const char *path = drm_device->nodes[DRM_NODE_RENDER];
  150. VkResult result = VK_SUCCESS;
  151. drmVersionPtr version;
  152. int fd;
  153. int master_fd = -1;
  154. fd = open(path, O_RDWR | O_CLOEXEC);
  155. if (fd < 0) {
  156. return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
  157. "failed to open device %s", path);
  158. }
  159. /* Version 1.3 added MSM_INFO_IOVA. */
  160. const int min_version_major = 1;
  161. const int min_version_minor = 3;
  162. version = drmGetVersion(fd);
  163. if (!version) {
  164. close(fd);
  165. return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
  166. "failed to query kernel driver version for device %s",
  167. path);
  168. }
  169. if (strcmp(version->name, "msm")) {
  170. drmFreeVersion(version);
  171. close(fd);
  172. return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
  173. "device %s does not use the msm kernel driver", path);
  174. }
  175. if (version->version_major != min_version_major ||
  176. version->version_minor < min_version_minor) {
  177. result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
  178. "kernel driver for device %s has version %d.%d, "
  179. "but Vulkan requires version >= %d.%d",
  180. path, version->version_major, version->version_minor,
  181. min_version_major, min_version_minor);
  182. drmFreeVersion(version);
  183. close(fd);
  184. return result;
  185. }
  186. drmFreeVersion(version);
  187. if (instance->debug_flags & TU_DEBUG_STARTUP)
  188. tu_logi("Found compatible device '%s'.", path);
  189. device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
  190. device->instance = instance;
  191. assert(strlen(path) < ARRAY_SIZE(device->path));
  192. strncpy(device->path, path, ARRAY_SIZE(device->path));
  193. if (instance->enabled_extensions.KHR_display) {
  194. master_fd =
  195. open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
  196. if (master_fd >= 0) {
  197. /* TODO: free master_fd is accel is not working? */
  198. }
  199. }
  200. device->master_fd = master_fd;
  201. device->local_fd = fd;
  202. if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
  203. if (instance->debug_flags & TU_DEBUG_STARTUP)
  204. tu_logi("Could not query the GPU ID");
  205. result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
  206. "could not get GPU ID");
  207. goto fail;
  208. }
  209. if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
  210. if (instance->debug_flags & TU_DEBUG_STARTUP)
  211. tu_logi("Could not query the GMEM size");
  212. result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
  213. "could not get GMEM size");
  214. goto fail;
  215. }
  216. memset(device->name, 0, sizeof(device->name));
  217. sprintf(device->name, "FD%d", device->gpu_id);
  218. switch (device->gpu_id) {
  219. case 630:
  220. device->tile_align_w = 32;
  221. device->tile_align_h = 32;
  222. break;
  223. default:
  224. result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
  225. "device %s is unsupported", device->name);
  226. goto fail;
  227. }
  228. if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
  229. result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
  230. "cannot generate UUID");
  231. goto fail;
  232. }
  233. /* The gpu id is already embedded in the uuid so we just pass "tu"
  234. * when creating the cache.
  235. */
  236. char buf[VK_UUID_SIZE * 2 + 1];
  237. disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
  238. device->disk_cache = disk_cache_create(device->name, buf, 0);
  239. fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
  240. "testing use only.\n");
  241. tu_get_driver_uuid(&device->device_uuid);
  242. tu_get_device_uuid(&device->device_uuid);
  243. tu_fill_device_extension_table(device, &device->supported_extensions);
  244. if (result != VK_SUCCESS) {
  245. vk_error(instance, result);
  246. goto fail;
  247. }
  248. result = tu_wsi_init(device);
  249. if (result != VK_SUCCESS) {
  250. vk_error(instance, result);
  251. goto fail;
  252. }
  253. return VK_SUCCESS;
  254. fail:
  255. close(fd);
  256. if (master_fd != -1)
  257. close(master_fd);
  258. return result;
  259. }
  260. static void
  261. tu_physical_device_finish(struct tu_physical_device *device)
  262. {
  263. tu_wsi_finish(device);
  264. disk_cache_destroy(device->disk_cache);
  265. close(device->local_fd);
  266. if (device->master_fd != -1)
  267. close(device->master_fd);
  268. }
  269. static void *
  270. default_alloc_func(void *pUserData,
  271. size_t size,
  272. size_t align,
  273. VkSystemAllocationScope allocationScope)
  274. {
  275. return malloc(size);
  276. }
  277. static void *
  278. default_realloc_func(void *pUserData,
  279. void *pOriginal,
  280. size_t size,
  281. size_t align,
  282. VkSystemAllocationScope allocationScope)
  283. {
  284. return realloc(pOriginal, size);
  285. }
  286. static void
  287. default_free_func(void *pUserData, void *pMemory)
  288. {
  289. free(pMemory);
  290. }
  291. static const VkAllocationCallbacks default_alloc = {
  292. .pUserData = NULL,
  293. .pfnAllocation = default_alloc_func,
  294. .pfnReallocation = default_realloc_func,
  295. .pfnFree = default_free_func,
  296. };
  297. static const struct debug_control tu_debug_options[] = {
  298. { "startup", TU_DEBUG_STARTUP },
  299. { "nir", TU_DEBUG_NIR },
  300. { "ir3", TU_DEBUG_IR3 },
  301. { NULL, 0 }
  302. };
  303. const char *
  304. tu_get_debug_option_name(int id)
  305. {
  306. assert(id < ARRAY_SIZE(tu_debug_options) - 1);
  307. return tu_debug_options[id].string;
  308. }
  309. static int
  310. tu_get_instance_extension_index(const char *name)
  311. {
  312. for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
  313. if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
  314. return i;
  315. }
  316. return -1;
  317. }
  318. VkResult
  319. tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
  320. const VkAllocationCallbacks *pAllocator,
  321. VkInstance *pInstance)
  322. {
  323. struct tu_instance *instance;
  324. VkResult result;
  325. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
  326. uint32_t client_version;
  327. if (pCreateInfo->pApplicationInfo &&
  328. pCreateInfo->pApplicationInfo->apiVersion != 0) {
  329. client_version = pCreateInfo->pApplicationInfo->apiVersion;
  330. } else {
  331. tu_EnumerateInstanceVersion(&client_version);
  332. }
  333. instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
  334. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  335. if (!instance)
  336. return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
  337. instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
  338. if (pAllocator)
  339. instance->alloc = *pAllocator;
  340. else
  341. instance->alloc = default_alloc;
  342. instance->api_version = client_version;
  343. instance->physical_device_count = -1;
  344. instance->debug_flags =
  345. parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
  346. if (instance->debug_flags & TU_DEBUG_STARTUP)
  347. tu_logi("Created an instance");
  348. for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
  349. const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
  350. int index = tu_get_instance_extension_index(ext_name);
  351. if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
  352. vk_free2(&default_alloc, pAllocator, instance);
  353. return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
  354. }
  355. instance->enabled_extensions.extensions[index] = true;
  356. }
  357. result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
  358. if (result != VK_SUCCESS) {
  359. vk_free2(&default_alloc, pAllocator, instance);
  360. return vk_error(instance, result);
  361. }
  362. _mesa_locale_init();
  363. VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
  364. *pInstance = tu_instance_to_handle(instance);
  365. return VK_SUCCESS;
  366. }
  367. void
  368. tu_DestroyInstance(VkInstance _instance,
  369. const VkAllocationCallbacks *pAllocator)
  370. {
  371. TU_FROM_HANDLE(tu_instance, instance, _instance);
  372. if (!instance)
  373. return;
  374. for (int i = 0; i < instance->physical_device_count; ++i) {
  375. tu_physical_device_finish(instance->physical_devices + i);
  376. }
  377. VG(VALGRIND_DESTROY_MEMPOOL(instance));
  378. _mesa_locale_fini();
  379. vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
  380. vk_free(&instance->alloc, instance);
  381. }
  382. static VkResult
  383. tu_enumerate_devices(struct tu_instance *instance)
  384. {
  385. /* TODO: Check for more devices ? */
  386. drmDevicePtr devices[8];
  387. VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
  388. int max_devices;
  389. instance->physical_device_count = 0;
  390. max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
  391. if (instance->debug_flags & TU_DEBUG_STARTUP)
  392. tu_logi("Found %d drm nodes", max_devices);
  393. if (max_devices < 1)
  394. return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
  395. for (unsigned i = 0; i < (unsigned) max_devices; i++) {
  396. if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
  397. devices[i]->bustype == DRM_BUS_PLATFORM) {
  398. result = tu_physical_device_init(
  399. instance->physical_devices + instance->physical_device_count,
  400. instance, devices[i]);
  401. if (result == VK_SUCCESS)
  402. ++instance->physical_device_count;
  403. else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
  404. break;
  405. }
  406. }
  407. drmFreeDevices(devices, max_devices);
  408. return result;
  409. }
  410. VkResult
  411. tu_EnumeratePhysicalDevices(VkInstance _instance,
  412. uint32_t *pPhysicalDeviceCount,
  413. VkPhysicalDevice *pPhysicalDevices)
  414. {
  415. TU_FROM_HANDLE(tu_instance, instance, _instance);
  416. VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
  417. VkResult result;
  418. if (instance->physical_device_count < 0) {
  419. result = tu_enumerate_devices(instance);
  420. if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
  421. return result;
  422. }
  423. for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
  424. vk_outarray_append(&out, p)
  425. {
  426. *p = tu_physical_device_to_handle(instance->physical_devices + i);
  427. }
  428. }
  429. return vk_outarray_status(&out);
  430. }
  431. VkResult
  432. tu_EnumeratePhysicalDeviceGroups(
  433. VkInstance _instance,
  434. uint32_t *pPhysicalDeviceGroupCount,
  435. VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
  436. {
  437. TU_FROM_HANDLE(tu_instance, instance, _instance);
  438. VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
  439. pPhysicalDeviceGroupCount);
  440. VkResult result;
  441. if (instance->physical_device_count < 0) {
  442. result = tu_enumerate_devices(instance);
  443. if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
  444. return result;
  445. }
  446. for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
  447. vk_outarray_append(&out, p)
  448. {
  449. p->physicalDeviceCount = 1;
  450. p->physicalDevices[0] =
  451. tu_physical_device_to_handle(instance->physical_devices + i);
  452. p->subsetAllocation = false;
  453. }
  454. }
  455. return vk_outarray_status(&out);
  456. }
  457. void
  458. tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
  459. VkPhysicalDeviceFeatures *pFeatures)
  460. {
  461. memset(pFeatures, 0, sizeof(*pFeatures));
  462. *pFeatures = (VkPhysicalDeviceFeatures) {
  463. .robustBufferAccess = false,
  464. .fullDrawIndexUint32 = false,
  465. .imageCubeArray = false,
  466. .independentBlend = false,
  467. .geometryShader = false,
  468. .tessellationShader = false,
  469. .sampleRateShading = false,
  470. .dualSrcBlend = false,
  471. .logicOp = false,
  472. .multiDrawIndirect = false,
  473. .drawIndirectFirstInstance = false,
  474. .depthClamp = false,
  475. .depthBiasClamp = false,
  476. .fillModeNonSolid = false,
  477. .depthBounds = false,
  478. .wideLines = false,
  479. .largePoints = false,
  480. .alphaToOne = false,
  481. .multiViewport = false,
  482. .samplerAnisotropy = false,
  483. .textureCompressionETC2 = false,
  484. .textureCompressionASTC_LDR = false,
  485. .textureCompressionBC = false,
  486. .occlusionQueryPrecise = false,
  487. .pipelineStatisticsQuery = false,
  488. .vertexPipelineStoresAndAtomics = false,
  489. .fragmentStoresAndAtomics = false,
  490. .shaderTessellationAndGeometryPointSize = false,
  491. .shaderImageGatherExtended = false,
  492. .shaderStorageImageExtendedFormats = false,
  493. .shaderStorageImageMultisample = false,
  494. .shaderUniformBufferArrayDynamicIndexing = false,
  495. .shaderSampledImageArrayDynamicIndexing = false,
  496. .shaderStorageBufferArrayDynamicIndexing = false,
  497. .shaderStorageImageArrayDynamicIndexing = false,
  498. .shaderStorageImageReadWithoutFormat = false,
  499. .shaderStorageImageWriteWithoutFormat = false,
  500. .shaderClipDistance = false,
  501. .shaderCullDistance = false,
  502. .shaderFloat64 = false,
  503. .shaderInt64 = false,
  504. .shaderInt16 = false,
  505. .sparseBinding = false,
  506. .variableMultisampleRate = false,
  507. .inheritedQueries = false,
  508. };
  509. }
  510. void
  511. tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
  512. VkPhysicalDeviceFeatures2 *pFeatures)
  513. {
  514. vk_foreach_struct(ext, pFeatures->pNext)
  515. {
  516. switch (ext->sType) {
  517. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
  518. VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
  519. features->variablePointersStorageBuffer = false;
  520. features->variablePointers = false;
  521. break;
  522. }
  523. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
  524. VkPhysicalDeviceMultiviewFeatures *features =
  525. (VkPhysicalDeviceMultiviewFeatures *) ext;
  526. features->multiview = false;
  527. features->multiviewGeometryShader = false;
  528. features->multiviewTessellationShader = false;
  529. break;
  530. }
  531. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
  532. VkPhysicalDeviceShaderDrawParametersFeatures *features =
  533. (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
  534. features->shaderDrawParameters = false;
  535. break;
  536. }
  537. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
  538. VkPhysicalDeviceProtectedMemoryFeatures *features =
  539. (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
  540. features->protectedMemory = false;
  541. break;
  542. }
  543. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
  544. VkPhysicalDevice16BitStorageFeatures *features =
  545. (VkPhysicalDevice16BitStorageFeatures *) ext;
  546. features->storageBuffer16BitAccess = false;
  547. features->uniformAndStorageBuffer16BitAccess = false;
  548. features->storagePushConstant16 = false;
  549. features->storageInputOutput16 = false;
  550. break;
  551. }
  552. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
  553. VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
  554. (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
  555. features->samplerYcbcrConversion = false;
  556. break;
  557. }
  558. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
  559. VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
  560. (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
  561. features->shaderInputAttachmentArrayDynamicIndexing = false;
  562. features->shaderUniformTexelBufferArrayDynamicIndexing = false;
  563. features->shaderStorageTexelBufferArrayDynamicIndexing = false;
  564. features->shaderUniformBufferArrayNonUniformIndexing = false;
  565. features->shaderSampledImageArrayNonUniformIndexing = false;
  566. features->shaderStorageBufferArrayNonUniformIndexing = false;
  567. features->shaderStorageImageArrayNonUniformIndexing = false;
  568. features->shaderInputAttachmentArrayNonUniformIndexing = false;
  569. features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
  570. features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
  571. features->descriptorBindingUniformBufferUpdateAfterBind = false;
  572. features->descriptorBindingSampledImageUpdateAfterBind = false;
  573. features->descriptorBindingStorageImageUpdateAfterBind = false;
  574. features->descriptorBindingStorageBufferUpdateAfterBind = false;
  575. features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
  576. features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
  577. features->descriptorBindingUpdateUnusedWhilePending = false;
  578. features->descriptorBindingPartiallyBound = false;
  579. features->descriptorBindingVariableDescriptorCount = false;
  580. features->runtimeDescriptorArray = false;
  581. break;
  582. }
  583. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
  584. VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
  585. (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
  586. features->conditionalRendering = false;
  587. features->inheritedConditionalRendering = false;
  588. break;
  589. }
  590. default:
  591. break;
  592. }
  593. }
  594. return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
  595. }
  596. void
  597. tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
  598. VkPhysicalDeviceProperties *pProperties)
  599. {
  600. TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
  601. VkSampleCountFlags sample_counts = 0xf;
  602. /* make sure that the entire descriptor set is addressable with a signed
  603. * 32-bit int. So the sum of all limits scaled by descriptor size has to
  604. * be at most 2 GiB. the combined image & samples object count as one of
  605. * both. This limit is for the pipeline layout, not for the set layout, but
  606. * there is no set limit, so we just set a pipeline limit. I don't think
  607. * any app is going to hit this soon. */
  608. size_t max_descriptor_set_size =
  609. ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
  610. (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
  611. 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
  612. 32 /* sampler, largest when combined with image */ +
  613. 64 /* sampled image */ + 64 /* storage image */);
  614. VkPhysicalDeviceLimits limits = {
  615. .maxImageDimension1D = (1 << 14),
  616. .maxImageDimension2D = (1 << 14),
  617. .maxImageDimension3D = (1 << 11),
  618. .maxImageDimensionCube = (1 << 14),
  619. .maxImageArrayLayers = (1 << 11),
  620. .maxTexelBufferElements = 128 * 1024 * 1024,
  621. .maxUniformBufferRange = UINT32_MAX,
  622. .maxStorageBufferRange = UINT32_MAX,
  623. .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
  624. .maxMemoryAllocationCount = UINT32_MAX,
  625. .maxSamplerAllocationCount = 64 * 1024,
  626. .bufferImageGranularity = 64, /* A cache line */
  627. .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
  628. .maxBoundDescriptorSets = MAX_SETS,
  629. .maxPerStageDescriptorSamplers = max_descriptor_set_size,
  630. .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
  631. .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
  632. .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
  633. .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
  634. .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
  635. .maxPerStageResources = max_descriptor_set_size,
  636. .maxDescriptorSetSamplers = max_descriptor_set_size,
  637. .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
  638. .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
  639. .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
  640. .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
  641. .maxDescriptorSetSampledImages = max_descriptor_set_size,
  642. .maxDescriptorSetStorageImages = max_descriptor_set_size,
  643. .maxDescriptorSetInputAttachments = max_descriptor_set_size,
  644. .maxVertexInputAttributes = 32,
  645. .maxVertexInputBindings = 32,
  646. .maxVertexInputAttributeOffset = 2047,
  647. .maxVertexInputBindingStride = 2048,
  648. .maxVertexOutputComponents = 128,
  649. .maxTessellationGenerationLevel = 64,
  650. .maxTessellationPatchSize = 32,
  651. .maxTessellationControlPerVertexInputComponents = 128,
  652. .maxTessellationControlPerVertexOutputComponents = 128,
  653. .maxTessellationControlPerPatchOutputComponents = 120,
  654. .maxTessellationControlTotalOutputComponents = 4096,
  655. .maxTessellationEvaluationInputComponents = 128,
  656. .maxTessellationEvaluationOutputComponents = 128,
  657. .maxGeometryShaderInvocations = 127,
  658. .maxGeometryInputComponents = 64,
  659. .maxGeometryOutputComponents = 128,
  660. .maxGeometryOutputVertices = 256,
  661. .maxGeometryTotalOutputComponents = 1024,
  662. .maxFragmentInputComponents = 128,
  663. .maxFragmentOutputAttachments = 8,
  664. .maxFragmentDualSrcAttachments = 1,
  665. .maxFragmentCombinedOutputResources = 8,
  666. .maxComputeSharedMemorySize = 32768,
  667. .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
  668. .maxComputeWorkGroupInvocations = 2048,
  669. .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
  670. .subPixelPrecisionBits = 4 /* FIXME */,
  671. .subTexelPrecisionBits = 4 /* FIXME */,
  672. .mipmapPrecisionBits = 4 /* FIXME */,
  673. .maxDrawIndexedIndexValue = UINT32_MAX,
  674. .maxDrawIndirectCount = UINT32_MAX,
  675. .maxSamplerLodBias = 16,
  676. .maxSamplerAnisotropy = 16,
  677. .maxViewports = MAX_VIEWPORTS,
  678. .maxViewportDimensions = { (1 << 14), (1 << 14) },
  679. .viewportBoundsRange = { INT16_MIN, INT16_MAX },
  680. .viewportSubPixelBits = 8,
  681. .minMemoryMapAlignment = 4096, /* A page */
  682. .minTexelBufferOffsetAlignment = 1,
  683. .minUniformBufferOffsetAlignment = 4,
  684. .minStorageBufferOffsetAlignment = 4,
  685. .minTexelOffset = -32,
  686. .maxTexelOffset = 31,
  687. .minTexelGatherOffset = -32,
  688. .maxTexelGatherOffset = 31,
  689. .minInterpolationOffset = -2,
  690. .maxInterpolationOffset = 2,
  691. .subPixelInterpolationOffsetBits = 8,
  692. .maxFramebufferWidth = (1 << 14),
  693. .maxFramebufferHeight = (1 << 14),
  694. .maxFramebufferLayers = (1 << 10),
  695. .framebufferColorSampleCounts = sample_counts,
  696. .framebufferDepthSampleCounts = sample_counts,
  697. .framebufferStencilSampleCounts = sample_counts,
  698. .framebufferNoAttachmentsSampleCounts = sample_counts,
  699. .maxColorAttachments = MAX_RTS,
  700. .sampledImageColorSampleCounts = sample_counts,
  701. .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
  702. .sampledImageDepthSampleCounts = sample_counts,
  703. .sampledImageStencilSampleCounts = sample_counts,
  704. .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
  705. .maxSampleMaskWords = 1,
  706. .timestampComputeAndGraphics = true,
  707. .timestampPeriod = 1,
  708. .maxClipDistances = 8,
  709. .maxCullDistances = 8,
  710. .maxCombinedClipAndCullDistances = 8,
  711. .discreteQueuePriorities = 1,
  712. .pointSizeRange = { 0.125, 255.875 },
  713. .lineWidthRange = { 0.0, 7.9921875 },
  714. .pointSizeGranularity = (1.0 / 8.0),
  715. .lineWidthGranularity = (1.0 / 128.0),
  716. .strictLines = false, /* FINISHME */
  717. .standardSampleLocations = true,
  718. .optimalBufferCopyOffsetAlignment = 128,
  719. .optimalBufferCopyRowPitchAlignment = 128,
  720. .nonCoherentAtomSize = 64,
  721. };
  722. *pProperties = (VkPhysicalDeviceProperties) {
  723. .apiVersion = tu_physical_device_api_version(pdevice),
  724. .driverVersion = vk_get_driver_version(),
  725. .vendorID = 0, /* TODO */
  726. .deviceID = 0,
  727. .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
  728. .limits = limits,
  729. .sparseProperties = { 0 },
  730. };
  731. strcpy(pProperties->deviceName, pdevice->name);
  732. memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
  733. }
  734. void
  735. tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
  736. VkPhysicalDeviceProperties2 *pProperties)
  737. {
  738. TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
  739. tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
  740. vk_foreach_struct(ext, pProperties->pNext)
  741. {
  742. switch (ext->sType) {
  743. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
  744. VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
  745. (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
  746. properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
  747. break;
  748. }
  749. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
  750. VkPhysicalDeviceIDProperties *properties =
  751. (VkPhysicalDeviceIDProperties *) ext;
  752. memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
  753. memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
  754. properties->deviceLUIDValid = false;
  755. break;
  756. }
  757. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
  758. VkPhysicalDeviceMultiviewProperties *properties =
  759. (VkPhysicalDeviceMultiviewProperties *) ext;
  760. properties->maxMultiviewViewCount = MAX_VIEWS;
  761. properties->maxMultiviewInstanceIndex = INT_MAX;
  762. break;
  763. }
  764. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
  765. VkPhysicalDevicePointClippingProperties *properties =
  766. (VkPhysicalDevicePointClippingProperties *) ext;
  767. properties->pointClippingBehavior =
  768. VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
  769. break;
  770. }
  771. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
  772. VkPhysicalDeviceMaintenance3Properties *properties =
  773. (VkPhysicalDeviceMaintenance3Properties *) ext;
  774. /* Make sure everything is addressable by a signed 32-bit int, and
  775. * our largest descriptors are 96 bytes. */
  776. properties->maxPerSetDescriptors = (1ull << 31) / 96;
  777. /* Our buffer size fields allow only this much */
  778. properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
  779. break;
  780. }
  781. default:
  782. break;
  783. }
  784. }
  785. }
  786. static const VkQueueFamilyProperties tu_queue_family_properties = {
  787. .queueFlags =
  788. VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
  789. .queueCount = 1,
  790. .timestampValidBits = 64,
  791. .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
  792. };
  793. void
  794. tu_GetPhysicalDeviceQueueFamilyProperties(
  795. VkPhysicalDevice physicalDevice,
  796. uint32_t *pQueueFamilyPropertyCount,
  797. VkQueueFamilyProperties *pQueueFamilyProperties)
  798. {
  799. VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
  800. vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
  801. }
  802. void
  803. tu_GetPhysicalDeviceQueueFamilyProperties2(
  804. VkPhysicalDevice physicalDevice,
  805. uint32_t *pQueueFamilyPropertyCount,
  806. VkQueueFamilyProperties2 *pQueueFamilyProperties)
  807. {
  808. VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
  809. vk_outarray_append(&out, p)
  810. {
  811. p->queueFamilyProperties = tu_queue_family_properties;
  812. }
  813. }
  814. static uint64_t
  815. tu_get_system_heap_size()
  816. {
  817. struct sysinfo info;
  818. sysinfo(&info);
  819. uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
  820. /* We don't want to burn too much ram with the GPU. If the user has 4GiB
  821. * or less, we use at most half. If they have more than 4GiB, we use 3/4.
  822. */
  823. uint64_t available_ram;
  824. if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
  825. available_ram = total_ram / 2;
  826. else
  827. available_ram = total_ram * 3 / 4;
  828. return available_ram;
  829. }
  830. void
  831. tu_GetPhysicalDeviceMemoryProperties(
  832. VkPhysicalDevice physicalDevice,
  833. VkPhysicalDeviceMemoryProperties *pMemoryProperties)
  834. {
  835. pMemoryProperties->memoryHeapCount = 1;
  836. pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
  837. pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
  838. pMemoryProperties->memoryTypeCount = 1;
  839. pMemoryProperties->memoryTypes[0].propertyFlags =
  840. VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
  841. VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
  842. VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
  843. pMemoryProperties->memoryTypes[0].heapIndex = 0;
  844. }
  845. void
  846. tu_GetPhysicalDeviceMemoryProperties2(
  847. VkPhysicalDevice physicalDevice,
  848. VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
  849. {
  850. return tu_GetPhysicalDeviceMemoryProperties(
  851. physicalDevice, &pMemoryProperties->memoryProperties);
  852. }
  853. static VkResult
  854. tu_queue_init(struct tu_device *device,
  855. struct tu_queue *queue,
  856. uint32_t queue_family_index,
  857. int idx,
  858. VkDeviceQueueCreateFlags flags)
  859. {
  860. queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
  861. queue->device = device;
  862. queue->queue_family_index = queue_family_index;
  863. queue->queue_idx = idx;
  864. queue->flags = flags;
  865. int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
  866. if (ret)
  867. return VK_ERROR_INITIALIZATION_FAILED;
  868. tu_fence_init(&queue->submit_fence, false);
  869. return VK_SUCCESS;
  870. }
  871. static void
  872. tu_queue_finish(struct tu_queue *queue)
  873. {
  874. tu_fence_finish(&queue->submit_fence);
  875. tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
  876. }
  877. static int
  878. tu_get_device_extension_index(const char *name)
  879. {
  880. for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
  881. if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
  882. return i;
  883. }
  884. return -1;
  885. }
  886. VkResult
  887. tu_CreateDevice(VkPhysicalDevice physicalDevice,
  888. const VkDeviceCreateInfo *pCreateInfo,
  889. const VkAllocationCallbacks *pAllocator,
  890. VkDevice *pDevice)
  891. {
  892. TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
  893. VkResult result;
  894. struct tu_device *device;
  895. /* Check enabled features */
  896. if (pCreateInfo->pEnabledFeatures) {
  897. VkPhysicalDeviceFeatures supported_features;
  898. tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
  899. VkBool32 *supported_feature = (VkBool32 *) &supported_features;
  900. VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
  901. unsigned num_features =
  902. sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
  903. for (uint32_t i = 0; i < num_features; i++) {
  904. if (enabled_feature[i] && !supported_feature[i])
  905. return vk_error(physical_device->instance,
  906. VK_ERROR_FEATURE_NOT_PRESENT);
  907. }
  908. }
  909. device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
  910. sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
  911. if (!device)
  912. return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  913. device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
  914. device->instance = physical_device->instance;
  915. device->physical_device = physical_device;
  916. if (pAllocator)
  917. device->alloc = *pAllocator;
  918. else
  919. device->alloc = physical_device->instance->alloc;
  920. for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
  921. const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
  922. int index = tu_get_device_extension_index(ext_name);
  923. if (index < 0 ||
  924. !physical_device->supported_extensions.extensions[index]) {
  925. vk_free(&device->alloc, device);
  926. return vk_error(physical_device->instance,
  927. VK_ERROR_EXTENSION_NOT_PRESENT);
  928. }
  929. device->enabled_extensions.extensions[index] = true;
  930. }
  931. for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
  932. const VkDeviceQueueCreateInfo *queue_create =
  933. &pCreateInfo->pQueueCreateInfos[i];
  934. uint32_t qfi = queue_create->queueFamilyIndex;
  935. device->queues[qfi] = vk_alloc(
  936. &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
  937. 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
  938. if (!device->queues[qfi]) {
  939. result = VK_ERROR_OUT_OF_HOST_MEMORY;
  940. goto fail;
  941. }
  942. memset(device->queues[qfi], 0,
  943. queue_create->queueCount * sizeof(struct tu_queue));
  944. device->queue_count[qfi] = queue_create->queueCount;
  945. for (unsigned q = 0; q < queue_create->queueCount; q++) {
  946. result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
  947. queue_create->flags);
  948. if (result != VK_SUCCESS)
  949. goto fail;
  950. }
  951. }
  952. device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
  953. if (!device->compiler)
  954. goto fail;
  955. VkPipelineCacheCreateInfo ci;
  956. ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
  957. ci.pNext = NULL;
  958. ci.flags = 0;
  959. ci.pInitialData = NULL;
  960. ci.initialDataSize = 0;
  961. VkPipelineCache pc;
  962. result =
  963. tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
  964. if (result != VK_SUCCESS)
  965. goto fail;
  966. device->mem_cache = tu_pipeline_cache_from_handle(pc);
  967. *pDevice = tu_device_to_handle(device);
  968. return VK_SUCCESS;
  969. fail:
  970. for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
  971. for (unsigned q = 0; q < device->queue_count[i]; q++)
  972. tu_queue_finish(&device->queues[i][q]);
  973. if (device->queue_count[i])
  974. vk_free(&device->alloc, device->queues[i]);
  975. }
  976. if (device->compiler)
  977. ralloc_free(device->compiler);
  978. vk_free(&device->alloc, device);
  979. return result;
  980. }
  981. void
  982. tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
  983. {
  984. TU_FROM_HANDLE(tu_device, device, _device);
  985. if (!device)
  986. return;
  987. for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
  988. for (unsigned q = 0; q < device->queue_count[i]; q++)
  989. tu_queue_finish(&device->queues[i][q]);
  990. if (device->queue_count[i])
  991. vk_free(&device->alloc, device->queues[i]);
  992. }
  993. /* the compiler does not use pAllocator */
  994. ralloc_free(device->compiler);
  995. VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
  996. tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
  997. vk_free(&device->alloc, device);
  998. }
  999. VkResult
  1000. tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
  1001. VkLayerProperties *pProperties)
  1002. {
  1003. *pPropertyCount = 0;
  1004. return VK_SUCCESS;
  1005. }
  1006. VkResult
  1007. tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
  1008. uint32_t *pPropertyCount,
  1009. VkLayerProperties *pProperties)
  1010. {
  1011. *pPropertyCount = 0;
  1012. return VK_SUCCESS;
  1013. }
  1014. void
  1015. tu_GetDeviceQueue2(VkDevice _device,
  1016. const VkDeviceQueueInfo2 *pQueueInfo,
  1017. VkQueue *pQueue)
  1018. {
  1019. TU_FROM_HANDLE(tu_device, device, _device);
  1020. struct tu_queue *queue;
  1021. queue =
  1022. &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
  1023. if (pQueueInfo->flags != queue->flags) {
  1024. /* From the Vulkan 1.1.70 spec:
  1025. *
  1026. * "The queue returned by vkGetDeviceQueue2 must have the same
  1027. * flags value from this structure as that used at device
  1028. * creation time in a VkDeviceQueueCreateInfo instance. If no
  1029. * matching flags were specified at device creation time then
  1030. * pQueue will return VK_NULL_HANDLE."
  1031. */
  1032. *pQueue = VK_NULL_HANDLE;
  1033. return;
  1034. }
  1035. *pQueue = tu_queue_to_handle(queue);
  1036. }
  1037. void
  1038. tu_GetDeviceQueue(VkDevice _device,
  1039. uint32_t queueFamilyIndex,
  1040. uint32_t queueIndex,
  1041. VkQueue *pQueue)
  1042. {
  1043. const VkDeviceQueueInfo2 info =
  1044. (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
  1045. .queueFamilyIndex = queueFamilyIndex,
  1046. .queueIndex = queueIndex };
  1047. tu_GetDeviceQueue2(_device, &info, pQueue);
  1048. }
  1049. VkResult
  1050. tu_QueueSubmit(VkQueue _queue,
  1051. uint32_t submitCount,
  1052. const VkSubmitInfo *pSubmits,
  1053. VkFence _fence)
  1054. {
  1055. TU_FROM_HANDLE(tu_queue, queue, _queue);
  1056. for (uint32_t i = 0; i < submitCount; ++i) {
  1057. const VkSubmitInfo *submit = pSubmits + i;
  1058. const bool last_submit = (i == submitCount - 1);
  1059. struct tu_bo_list bo_list;
  1060. tu_bo_list_init(&bo_list);
  1061. uint32_t entry_count = 0;
  1062. for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
  1063. TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
  1064. entry_count += cmdbuf->cs.entry_count;
  1065. }
  1066. struct drm_msm_gem_submit_cmd cmds[entry_count];
  1067. uint32_t entry_idx = 0;
  1068. for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
  1069. TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
  1070. struct tu_cs *cs = &cmdbuf->cs;
  1071. for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
  1072. cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
  1073. cmds[entry_idx].submit_idx =
  1074. tu_bo_list_add(&bo_list, cs->entries[i].bo,
  1075. MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
  1076. cmds[entry_idx].submit_offset = cs->entries[i].offset;
  1077. cmds[entry_idx].size = cs->entries[i].size;
  1078. cmds[entry_idx].pad = 0;
  1079. cmds[entry_idx].nr_relocs = 0;
  1080. cmds[entry_idx].relocs = 0;
  1081. }
  1082. tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
  1083. }
  1084. uint32_t flags = MSM_PIPE_3D0;
  1085. if (last_submit) {
  1086. flags |= MSM_SUBMIT_FENCE_FD_OUT;
  1087. }
  1088. struct drm_msm_gem_submit req = {
  1089. .flags = flags,
  1090. .queueid = queue->msm_queue_id,
  1091. .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
  1092. .nr_bos = bo_list.count,
  1093. .cmds = (uint64_t)(uintptr_t)cmds,
  1094. .nr_cmds = entry_count,
  1095. };
  1096. int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
  1097. DRM_MSM_GEM_SUBMIT,
  1098. &req, sizeof(req));
  1099. if (ret) {
  1100. fprintf(stderr, "submit failed: %s\n", strerror(errno));
  1101. abort();
  1102. }
  1103. tu_bo_list_destroy(&bo_list);
  1104. if (last_submit) {
  1105. /* no need to merge fences as queue execution is serialized */
  1106. tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
  1107. }
  1108. }
  1109. if (_fence != VK_NULL_HANDLE) {
  1110. TU_FROM_HANDLE(tu_fence, fence, _fence);
  1111. tu_fence_copy(fence, &queue->submit_fence);
  1112. }
  1113. return VK_SUCCESS;
  1114. }
  1115. VkResult
  1116. tu_QueueWaitIdle(VkQueue _queue)
  1117. {
  1118. TU_FROM_HANDLE(tu_queue, queue, _queue);
  1119. tu_fence_wait_idle(&queue->submit_fence);
  1120. return VK_SUCCESS;
  1121. }
  1122. VkResult
  1123. tu_DeviceWaitIdle(VkDevice _device)
  1124. {
  1125. TU_FROM_HANDLE(tu_device, device, _device);
  1126. for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
  1127. for (unsigned q = 0; q < device->queue_count[i]; q++) {
  1128. tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
  1129. }
  1130. }
  1131. return VK_SUCCESS;
  1132. }
  1133. VkResult
  1134. tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
  1135. uint32_t *pPropertyCount,
  1136. VkExtensionProperties *pProperties)
  1137. {
  1138. VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
  1139. /* We spport no lyaers */
  1140. if (pLayerName)
  1141. return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
  1142. for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
  1143. if (tu_supported_instance_extensions.extensions[i]) {
  1144. vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
  1145. }
  1146. }
  1147. return vk_outarray_status(&out);
  1148. }
  1149. VkResult
  1150. tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
  1151. const char *pLayerName,
  1152. uint32_t *pPropertyCount,
  1153. VkExtensionProperties *pProperties)
  1154. {
  1155. /* We spport no lyaers */
  1156. TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
  1157. VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
  1158. /* We spport no lyaers */
  1159. if (pLayerName)
  1160. return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
  1161. for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
  1162. if (device->supported_extensions.extensions[i]) {
  1163. vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
  1164. }
  1165. }
  1166. return vk_outarray_status(&out);
  1167. }
  1168. PFN_vkVoidFunction
  1169. tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
  1170. {
  1171. TU_FROM_HANDLE(tu_instance, instance, _instance);
  1172. return tu_lookup_entrypoint_checked(
  1173. pName, instance ? instance->api_version : 0,
  1174. instance ? &instance->enabled_extensions : NULL, NULL);
  1175. }
  1176. /* The loader wants us to expose a second GetInstanceProcAddr function
  1177. * to work around certain LD_PRELOAD issues seen in apps.
  1178. */
  1179. PUBLIC
  1180. VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
  1181. vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
  1182. PUBLIC
  1183. VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
  1184. vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
  1185. {
  1186. return tu_GetInstanceProcAddr(instance, pName);
  1187. }
  1188. PFN_vkVoidFunction
  1189. tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
  1190. {
  1191. TU_FROM_HANDLE(tu_device, device, _device);
  1192. return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
  1193. &device->instance->enabled_extensions,
  1194. &device->enabled_extensions);
  1195. }
  1196. static VkResult
  1197. tu_alloc_memory(struct tu_device *device,
  1198. const VkMemoryAllocateInfo *pAllocateInfo,
  1199. const VkAllocationCallbacks *pAllocator,
  1200. VkDeviceMemory *pMem)
  1201. {
  1202. struct tu_device_memory *mem;
  1203. VkResult result;
  1204. assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
  1205. if (pAllocateInfo->allocationSize == 0) {
  1206. /* Apparently, this is allowed */
  1207. *pMem = VK_NULL_HANDLE;
  1208. return VK_SUCCESS;
  1209. }
  1210. mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
  1211. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  1212. if (mem == NULL)
  1213. return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  1214. const VkImportMemoryFdInfoKHR *fd_info =
  1215. vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
  1216. if (fd_info && !fd_info->handleType)
  1217. fd_info = NULL;
  1218. if (fd_info) {
  1219. assert(fd_info->handleType ==
  1220. VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
  1221. fd_info->handleType ==
  1222. VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
  1223. /*
  1224. * TODO Importing the same fd twice gives us the same handle without
  1225. * reference counting. We need to maintain a per-instance handle-to-bo
  1226. * table and add reference count to tu_bo.
  1227. */
  1228. result = tu_bo_init_dmabuf(device, &mem->bo,
  1229. pAllocateInfo->allocationSize, fd_info->fd);
  1230. if (result == VK_SUCCESS) {
  1231. /* take ownership and close the fd */
  1232. close(fd_info->fd);
  1233. }
  1234. } else {
  1235. result =
  1236. tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
  1237. }
  1238. if (result != VK_SUCCESS) {
  1239. vk_free2(&device->alloc, pAllocator, mem);
  1240. return result;
  1241. }
  1242. mem->size = pAllocateInfo->allocationSize;
  1243. mem->type_index = pAllocateInfo->memoryTypeIndex;
  1244. mem->map = NULL;
  1245. mem->user_ptr = NULL;
  1246. *pMem = tu_device_memory_to_handle(mem);
  1247. return VK_SUCCESS;
  1248. }
  1249. VkResult
  1250. tu_AllocateMemory(VkDevice _device,
  1251. const VkMemoryAllocateInfo *pAllocateInfo,
  1252. const VkAllocationCallbacks *pAllocator,
  1253. VkDeviceMemory *pMem)
  1254. {
  1255. TU_FROM_HANDLE(tu_device, device, _device);
  1256. return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
  1257. }
  1258. void
  1259. tu_FreeMemory(VkDevice _device,
  1260. VkDeviceMemory _mem,
  1261. const VkAllocationCallbacks *pAllocator)
  1262. {
  1263. TU_FROM_HANDLE(tu_device, device, _device);
  1264. TU_FROM_HANDLE(tu_device_memory, mem, _mem);
  1265. if (mem == NULL)
  1266. return;
  1267. tu_bo_finish(device, &mem->bo);
  1268. vk_free2(&device->alloc, pAllocator, mem);
  1269. }
  1270. VkResult
  1271. tu_MapMemory(VkDevice _device,
  1272. VkDeviceMemory _memory,
  1273. VkDeviceSize offset,
  1274. VkDeviceSize size,
  1275. VkMemoryMapFlags flags,
  1276. void **ppData)
  1277. {
  1278. TU_FROM_HANDLE(tu_device, device, _device);
  1279. TU_FROM_HANDLE(tu_device_memory, mem, _memory);
  1280. VkResult result;
  1281. if (mem == NULL) {
  1282. *ppData = NULL;
  1283. return VK_SUCCESS;
  1284. }
  1285. if (mem->user_ptr) {
  1286. *ppData = mem->user_ptr;
  1287. } else if (!mem->map) {
  1288. result = tu_bo_map(device, &mem->bo);
  1289. if (result != VK_SUCCESS)
  1290. return result;
  1291. *ppData = mem->map = mem->bo.map;
  1292. } else
  1293. *ppData = mem->map;
  1294. if (*ppData) {
  1295. *ppData += offset;
  1296. return VK_SUCCESS;
  1297. }
  1298. return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
  1299. }
  1300. void
  1301. tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
  1302. {
  1303. /* I do not see any unmapping done by the freedreno Gallium driver. */
  1304. }
  1305. VkResult
  1306. tu_FlushMappedMemoryRanges(VkDevice _device,
  1307. uint32_t memoryRangeCount,
  1308. const VkMappedMemoryRange *pMemoryRanges)
  1309. {
  1310. return VK_SUCCESS;
  1311. }
  1312. VkResult
  1313. tu_InvalidateMappedMemoryRanges(VkDevice _device,
  1314. uint32_t memoryRangeCount,
  1315. const VkMappedMemoryRange *pMemoryRanges)
  1316. {
  1317. return VK_SUCCESS;
  1318. }
  1319. void
  1320. tu_GetBufferMemoryRequirements(VkDevice _device,
  1321. VkBuffer _buffer,
  1322. VkMemoryRequirements *pMemoryRequirements)
  1323. {
  1324. TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
  1325. pMemoryRequirements->memoryTypeBits = 1;
  1326. pMemoryRequirements->alignment = 16;
  1327. pMemoryRequirements->size =
  1328. align64(buffer->size, pMemoryRequirements->alignment);
  1329. }
  1330. void
  1331. tu_GetBufferMemoryRequirements2(
  1332. VkDevice device,
  1333. const VkBufferMemoryRequirementsInfo2 *pInfo,
  1334. VkMemoryRequirements2 *pMemoryRequirements)
  1335. {
  1336. tu_GetBufferMemoryRequirements(device, pInfo->buffer,
  1337. &pMemoryRequirements->memoryRequirements);
  1338. }
  1339. void
  1340. tu_GetImageMemoryRequirements(VkDevice _device,
  1341. VkImage _image,
  1342. VkMemoryRequirements *pMemoryRequirements)
  1343. {
  1344. TU_FROM_HANDLE(tu_image, image, _image);
  1345. pMemoryRequirements->memoryTypeBits = 1;
  1346. pMemoryRequirements->size = image->size;
  1347. pMemoryRequirements->alignment = image->alignment;
  1348. }
  1349. void
  1350. tu_GetImageMemoryRequirements2(VkDevice device,
  1351. const VkImageMemoryRequirementsInfo2 *pInfo,
  1352. VkMemoryRequirements2 *pMemoryRequirements)
  1353. {
  1354. tu_GetImageMemoryRequirements(device, pInfo->image,
  1355. &pMemoryRequirements->memoryRequirements);
  1356. }
  1357. void
  1358. tu_GetImageSparseMemoryRequirements(
  1359. VkDevice device,
  1360. VkImage image,
  1361. uint32_t *pSparseMemoryRequirementCount,
  1362. VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
  1363. {
  1364. tu_stub();
  1365. }
  1366. void
  1367. tu_GetImageSparseMemoryRequirements2(
  1368. VkDevice device,
  1369. const VkImageSparseMemoryRequirementsInfo2 *pInfo,
  1370. uint32_t *pSparseMemoryRequirementCount,
  1371. VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
  1372. {
  1373. tu_stub();
  1374. }
  1375. void
  1376. tu_GetDeviceMemoryCommitment(VkDevice device,
  1377. VkDeviceMemory memory,
  1378. VkDeviceSize *pCommittedMemoryInBytes)
  1379. {
  1380. *pCommittedMemoryInBytes = 0;
  1381. }
  1382. VkResult
  1383. tu_BindBufferMemory2(VkDevice device,
  1384. uint32_t bindInfoCount,
  1385. const VkBindBufferMemoryInfo *pBindInfos)
  1386. {
  1387. for (uint32_t i = 0; i < bindInfoCount; ++i) {
  1388. TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
  1389. TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
  1390. if (mem) {
  1391. buffer->bo = &mem->bo;
  1392. buffer->bo_offset = pBindInfos[i].memoryOffset;
  1393. } else {
  1394. buffer->bo = NULL;
  1395. }
  1396. }
  1397. return VK_SUCCESS;
  1398. }
  1399. VkResult
  1400. tu_BindBufferMemory(VkDevice device,
  1401. VkBuffer buffer,
  1402. VkDeviceMemory memory,
  1403. VkDeviceSize memoryOffset)
  1404. {
  1405. const VkBindBufferMemoryInfo info = {
  1406. .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
  1407. .buffer = buffer,
  1408. .memory = memory,
  1409. .memoryOffset = memoryOffset
  1410. };
  1411. return tu_BindBufferMemory2(device, 1, &info);
  1412. }
  1413. VkResult
  1414. tu_BindImageMemory2(VkDevice device,
  1415. uint32_t bindInfoCount,
  1416. const VkBindImageMemoryInfo *pBindInfos)
  1417. {
  1418. for (uint32_t i = 0; i < bindInfoCount; ++i) {
  1419. TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
  1420. TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
  1421. if (mem) {
  1422. image->bo = &mem->bo;
  1423. image->bo_offset = pBindInfos[i].memoryOffset;
  1424. } else {
  1425. image->bo = NULL;
  1426. image->bo_offset = 0;
  1427. }
  1428. }
  1429. return VK_SUCCESS;
  1430. }
  1431. VkResult
  1432. tu_BindImageMemory(VkDevice device,
  1433. VkImage image,
  1434. VkDeviceMemory memory,
  1435. VkDeviceSize memoryOffset)
  1436. {
  1437. const VkBindImageMemoryInfo info = {
  1438. .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
  1439. .image = image,
  1440. .memory = memory,
  1441. .memoryOffset = memoryOffset
  1442. };
  1443. return tu_BindImageMemory2(device, 1, &info);
  1444. }
  1445. VkResult
  1446. tu_QueueBindSparse(VkQueue _queue,
  1447. uint32_t bindInfoCount,
  1448. const VkBindSparseInfo *pBindInfo,
  1449. VkFence _fence)
  1450. {
  1451. return VK_SUCCESS;
  1452. }
  1453. // Queue semaphore functions
  1454. VkResult
  1455. tu_CreateSemaphore(VkDevice _device,
  1456. const VkSemaphoreCreateInfo *pCreateInfo,
  1457. const VkAllocationCallbacks *pAllocator,
  1458. VkSemaphore *pSemaphore)
  1459. {
  1460. TU_FROM_HANDLE(tu_device, device, _device);
  1461. struct tu_semaphore *sem =
  1462. vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
  1463. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  1464. if (!sem)
  1465. return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  1466. *pSemaphore = tu_semaphore_to_handle(sem);
  1467. return VK_SUCCESS;
  1468. }
  1469. void
  1470. tu_DestroySemaphore(VkDevice _device,
  1471. VkSemaphore _semaphore,
  1472. const VkAllocationCallbacks *pAllocator)
  1473. {
  1474. TU_FROM_HANDLE(tu_device, device, _device);
  1475. TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
  1476. if (!_semaphore)
  1477. return;
  1478. vk_free2(&device->alloc, pAllocator, sem);
  1479. }
  1480. VkResult
  1481. tu_CreateEvent(VkDevice _device,
  1482. const VkEventCreateInfo *pCreateInfo,
  1483. const VkAllocationCallbacks *pAllocator,
  1484. VkEvent *pEvent)
  1485. {
  1486. TU_FROM_HANDLE(tu_device, device, _device);
  1487. struct tu_event *event =
  1488. vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
  1489. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  1490. if (!event)
  1491. return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  1492. *pEvent = tu_event_to_handle(event);
  1493. return VK_SUCCESS;
  1494. }
  1495. void
  1496. tu_DestroyEvent(VkDevice _device,
  1497. VkEvent _event,
  1498. const VkAllocationCallbacks *pAllocator)
  1499. {
  1500. TU_FROM_HANDLE(tu_device, device, _device);
  1501. TU_FROM_HANDLE(tu_event, event, _event);
  1502. if (!event)
  1503. return;
  1504. vk_free2(&device->alloc, pAllocator, event);
  1505. }
  1506. VkResult
  1507. tu_GetEventStatus(VkDevice _device, VkEvent _event)
  1508. {
  1509. TU_FROM_HANDLE(tu_event, event, _event);
  1510. if (*event->map == 1)
  1511. return VK_EVENT_SET;
  1512. return VK_EVENT_RESET;
  1513. }
  1514. VkResult
  1515. tu_SetEvent(VkDevice _device, VkEvent _event)
  1516. {
  1517. TU_FROM_HANDLE(tu_event, event, _event);
  1518. *event->map = 1;
  1519. return VK_SUCCESS;
  1520. }
  1521. VkResult
  1522. tu_ResetEvent(VkDevice _device, VkEvent _event)
  1523. {
  1524. TU_FROM_HANDLE(tu_event, event, _event);
  1525. *event->map = 0;
  1526. return VK_SUCCESS;
  1527. }
  1528. VkResult
  1529. tu_CreateBuffer(VkDevice _device,
  1530. const VkBufferCreateInfo *pCreateInfo,
  1531. const VkAllocationCallbacks *pAllocator,
  1532. VkBuffer *pBuffer)
  1533. {
  1534. TU_FROM_HANDLE(tu_device, device, _device);
  1535. struct tu_buffer *buffer;
  1536. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
  1537. buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
  1538. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  1539. if (buffer == NULL)
  1540. return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  1541. buffer->size = pCreateInfo->size;
  1542. buffer->usage = pCreateInfo->usage;
  1543. buffer->flags = pCreateInfo->flags;
  1544. *pBuffer = tu_buffer_to_handle(buffer);
  1545. return VK_SUCCESS;
  1546. }
  1547. void
  1548. tu_DestroyBuffer(VkDevice _device,
  1549. VkBuffer _buffer,
  1550. const VkAllocationCallbacks *pAllocator)
  1551. {
  1552. TU_FROM_HANDLE(tu_device, device, _device);
  1553. TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
  1554. if (!buffer)
  1555. return;
  1556. vk_free2(&device->alloc, pAllocator, buffer);
  1557. }
  1558. static uint32_t
  1559. tu_surface_max_layer_count(struct tu_image_view *iview)
  1560. {
  1561. return iview->type == VK_IMAGE_VIEW_TYPE_3D
  1562. ? iview->extent.depth
  1563. : (iview->base_layer + iview->layer_count);
  1564. }
  1565. VkResult
  1566. tu_CreateFramebuffer(VkDevice _device,
  1567. const VkFramebufferCreateInfo *pCreateInfo,
  1568. const VkAllocationCallbacks *pAllocator,
  1569. VkFramebuffer *pFramebuffer)
  1570. {
  1571. TU_FROM_HANDLE(tu_device, device, _device);
  1572. struct tu_framebuffer *framebuffer;
  1573. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
  1574. size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
  1575. pCreateInfo->attachmentCount;
  1576. framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
  1577. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  1578. if (framebuffer == NULL)
  1579. return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  1580. framebuffer->attachment_count = pCreateInfo->attachmentCount;
  1581. framebuffer->width = pCreateInfo->width;
  1582. framebuffer->height = pCreateInfo->height;
  1583. framebuffer->layers = pCreateInfo->layers;
  1584. for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
  1585. VkImageView _iview = pCreateInfo->pAttachments[i];
  1586. struct tu_image_view *iview = tu_image_view_from_handle(_iview);
  1587. framebuffer->attachments[i].attachment = iview;
  1588. framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
  1589. framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
  1590. framebuffer->layers =
  1591. MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
  1592. }
  1593. *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
  1594. return VK_SUCCESS;
  1595. }
  1596. void
  1597. tu_DestroyFramebuffer(VkDevice _device,
  1598. VkFramebuffer _fb,
  1599. const VkAllocationCallbacks *pAllocator)
  1600. {
  1601. TU_FROM_HANDLE(tu_device, device, _device);
  1602. TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
  1603. if (!fb)
  1604. return;
  1605. vk_free2(&device->alloc, pAllocator, fb);
  1606. }
  1607. static void
  1608. tu_init_sampler(struct tu_device *device,
  1609. struct tu_sampler *sampler,
  1610. const VkSamplerCreateInfo *pCreateInfo)
  1611. {
  1612. }
  1613. VkResult
  1614. tu_CreateSampler(VkDevice _device,
  1615. const VkSamplerCreateInfo *pCreateInfo,
  1616. const VkAllocationCallbacks *pAllocator,
  1617. VkSampler *pSampler)
  1618. {
  1619. TU_FROM_HANDLE(tu_device, device, _device);
  1620. struct tu_sampler *sampler;
  1621. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
  1622. sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
  1623. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  1624. if (!sampler)
  1625. return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
  1626. tu_init_sampler(device, sampler, pCreateInfo);
  1627. *pSampler = tu_sampler_to_handle(sampler);
  1628. return VK_SUCCESS;
  1629. }
  1630. void
  1631. tu_DestroySampler(VkDevice _device,
  1632. VkSampler _sampler,
  1633. const VkAllocationCallbacks *pAllocator)
  1634. {
  1635. TU_FROM_HANDLE(tu_device, device, _device);
  1636. TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
  1637. if (!sampler)
  1638. return;
  1639. vk_free2(&device->alloc, pAllocator, sampler);
  1640. }
  1641. /* vk_icd.h does not declare this function, so we declare it here to
  1642. * suppress Wmissing-prototypes.
  1643. */
  1644. PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
  1645. vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
  1646. PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
  1647. vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
  1648. {
  1649. /* For the full details on loader interface versioning, see
  1650. * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
  1651. * What follows is a condensed summary, to help you navigate the large and
  1652. * confusing official doc.
  1653. *
  1654. * - Loader interface v0 is incompatible with later versions. We don't
  1655. * support it.
  1656. *
  1657. * - In loader interface v1:
  1658. * - The first ICD entrypoint called by the loader is
  1659. * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
  1660. * entrypoint.
  1661. * - The ICD must statically expose no other Vulkan symbol unless it
  1662. * is linked with -Bsymbolic.
  1663. * - Each dispatchable Vulkan handle created by the ICD must be
  1664. * a pointer to a struct whose first member is VK_LOADER_DATA. The
  1665. * ICD must initialize VK_LOADER_DATA.loadMagic to
  1666. * ICD_LOADER_MAGIC.
  1667. * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
  1668. * vkDestroySurfaceKHR(). The ICD must be capable of working with
  1669. * such loader-managed surfaces.
  1670. *
  1671. * - Loader interface v2 differs from v1 in:
  1672. * - The first ICD entrypoint called by the loader is
  1673. * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
  1674. * statically expose this entrypoint.
  1675. *
  1676. * - Loader interface v3 differs from v2 in:
  1677. * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
  1678. * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
  1679. * because the loader no longer does so.
  1680. */
  1681. *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
  1682. return VK_SUCCESS;
  1683. }
  1684. VkResult
  1685. tu_GetMemoryFdKHR(VkDevice _device,
  1686. const VkMemoryGetFdInfoKHR *pGetFdInfo,
  1687. int *pFd)
  1688. {
  1689. TU_FROM_HANDLE(tu_device, device, _device);
  1690. TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
  1691. assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
  1692. /* At the moment, we support only the below handle types. */
  1693. assert(pGetFdInfo->handleType ==
  1694. VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
  1695. pGetFdInfo->handleType ==
  1696. VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
  1697. int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
  1698. if (prime_fd < 0)
  1699. return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
  1700. *pFd = prime_fd;
  1701. return VK_SUCCESS;
  1702. }
  1703. VkResult
  1704. tu_GetMemoryFdPropertiesKHR(VkDevice _device,
  1705. VkExternalMemoryHandleTypeFlagBits handleType,
  1706. int fd,
  1707. VkMemoryFdPropertiesKHR *pMemoryFdProperties)
  1708. {
  1709. assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
  1710. pMemoryFdProperties->memoryTypeBits = 1;
  1711. return VK_SUCCESS;
  1712. }
  1713. void
  1714. tu_GetPhysicalDeviceExternalSemaphoreProperties(
  1715. VkPhysicalDevice physicalDevice,
  1716. const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
  1717. VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
  1718. {
  1719. pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
  1720. pExternalSemaphoreProperties->compatibleHandleTypes = 0;
  1721. pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
  1722. }
  1723. void
  1724. tu_GetPhysicalDeviceExternalFenceProperties(
  1725. VkPhysicalDevice physicalDevice,
  1726. const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
  1727. VkExternalFenceProperties *pExternalFenceProperties)
  1728. {
  1729. pExternalFenceProperties->exportFromImportedHandleTypes = 0;
  1730. pExternalFenceProperties->compatibleHandleTypes = 0;
  1731. pExternalFenceProperties->externalFenceFeatures = 0;
  1732. }
  1733. VkResult
  1734. tu_CreateDebugReportCallbackEXT(
  1735. VkInstance _instance,
  1736. const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
  1737. const VkAllocationCallbacks *pAllocator,
  1738. VkDebugReportCallbackEXT *pCallback)
  1739. {
  1740. TU_FROM_HANDLE(tu_instance, instance, _instance);
  1741. return vk_create_debug_report_callback(&instance->debug_report_callbacks,
  1742. pCreateInfo, pAllocator,
  1743. &instance->alloc, pCallback);
  1744. }
  1745. void
  1746. tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
  1747. VkDebugReportCallbackEXT _callback,
  1748. const VkAllocationCallbacks *pAllocator)
  1749. {
  1750. TU_FROM_HANDLE(tu_instance, instance, _instance);
  1751. vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
  1752. _callback, pAllocator, &instance->alloc);
  1753. }
  1754. void
  1755. tu_DebugReportMessageEXT(VkInstance _instance,
  1756. VkDebugReportFlagsEXT flags,
  1757. VkDebugReportObjectTypeEXT objectType,
  1758. uint64_t object,
  1759. size_t location,
  1760. int32_t messageCode,
  1761. const char *pLayerPrefix,
  1762. const char *pMessage)
  1763. {
  1764. TU_FROM_HANDLE(tu_instance, instance, _instance);
  1765. vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
  1766. object, location, messageCode, pLayerPrefix, pMessage);
  1767. }
  1768. void
  1769. tu_GetDeviceGroupPeerMemoryFeatures(
  1770. VkDevice device,
  1771. uint32_t heapIndex,
  1772. uint32_t localDeviceIndex,
  1773. uint32_t remoteDeviceIndex,
  1774. VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
  1775. {
  1776. assert(localDeviceIndex == remoteDeviceIndex);
  1777. *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
  1778. VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
  1779. VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
  1780. VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
  1781. }