Clone of mesa.
Du kannst nicht mehr als 25 Themen auswählen Themen müssen mit entweder einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

anv_device.c 102KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896
  1. /*
  2. * Copyright © 2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include <assert.h>
  24. #include <stdbool.h>
  25. #include <string.h>
  26. #include <sys/mman.h>
  27. #include <sys/sysinfo.h>
  28. #include <unistd.h>
  29. #include <fcntl.h>
  30. #include <xf86drm.h>
  31. #include <drm_fourcc.h>
  32. #include "anv_private.h"
  33. #include "util/strtod.h"
  34. #include "util/debug.h"
  35. #include "util/build_id.h"
  36. #include "util/mesa-sha1.h"
  37. #include "vk_util.h"
  38. #include "common/gen_defines.h"
  39. #include "genxml/gen7_pack.h"
  40. static void
  41. compiler_debug_log(void *data, const char *fmt, ...)
  42. { }
  43. static void
  44. compiler_perf_log(void *data, const char *fmt, ...)
  45. {
  46. va_list args;
  47. va_start(args, fmt);
  48. if (unlikely(INTEL_DEBUG & DEBUG_PERF))
  49. intel_logd_v(fmt, args);
  50. va_end(args);
  51. }
  52. static VkResult
  53. anv_compute_heap_size(int fd, uint64_t gtt_size, uint64_t *heap_size)
  54. {
  55. /* Query the total ram from the system */
  56. struct sysinfo info;
  57. sysinfo(&info);
  58. uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
  59. /* We don't want to burn too much ram with the GPU. If the user has 4GiB
  60. * or less, we use at most half. If they have more than 4GiB, we use 3/4.
  61. */
  62. uint64_t available_ram;
  63. if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
  64. available_ram = total_ram / 2;
  65. else
  66. available_ram = total_ram * 3 / 4;
  67. /* We also want to leave some padding for things we allocate in the driver,
  68. * so don't go over 3/4 of the GTT either.
  69. */
  70. uint64_t available_gtt = gtt_size * 3 / 4;
  71. *heap_size = MIN2(available_ram, available_gtt);
  72. return VK_SUCCESS;
  73. }
  74. static VkResult
  75. anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
  76. {
  77. uint64_t gtt_size;
  78. if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
  79. &gtt_size) == -1) {
  80. /* If, for whatever reason, we can't actually get the GTT size from the
  81. * kernel (too old?) fall back to the aperture size.
  82. */
  83. anv_perf_warn(NULL, NULL,
  84. "Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
  85. if (anv_gem_get_aperture(fd, &gtt_size) == -1) {
  86. return vk_errorf(NULL, NULL, VK_ERROR_INITIALIZATION_FAILED,
  87. "failed to get aperture size: %m");
  88. }
  89. }
  90. device->supports_48bit_addresses = (device->info.gen >= 8) &&
  91. gtt_size > (4ULL << 30 /* GiB */);
  92. uint64_t heap_size = 0;
  93. VkResult result = anv_compute_heap_size(fd, gtt_size, &heap_size);
  94. if (result != VK_SUCCESS)
  95. return result;
  96. if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
  97. /* When running with an overridden PCI ID, we may get a GTT size from
  98. * the kernel that is greater than 2 GiB but the execbuf check for 48bit
  99. * address support can still fail. Just clamp the address space size to
  100. * 2 GiB if we don't have 48-bit support.
  101. */
  102. intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
  103. "not support for 48-bit addresses",
  104. __FILE__, __LINE__);
  105. heap_size = 2ull << 30;
  106. }
  107. if (heap_size <= 3ull * (1ull << 30)) {
  108. /* In this case, everything fits nicely into the 32-bit address space,
  109. * so there's no need for supporting 48bit addresses on client-allocated
  110. * memory objects.
  111. */
  112. device->memory.heap_count = 1;
  113. device->memory.heaps[0] = (struct anv_memory_heap) {
  114. .size = heap_size,
  115. .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
  116. .supports_48bit_addresses = false,
  117. };
  118. } else {
  119. /* Not everything will fit nicely into a 32-bit address space. In this
  120. * case we need a 64-bit heap. Advertise a small 32-bit heap and a
  121. * larger 48-bit heap. If we're in this case, then we have a total heap
  122. * size larger than 3GiB which most likely means they have 8 GiB of
  123. * video memory and so carving off 1 GiB for the 32-bit heap should be
  124. * reasonable.
  125. */
  126. const uint64_t heap_size_32bit = 1ull << 30;
  127. const uint64_t heap_size_48bit = heap_size - heap_size_32bit;
  128. assert(device->supports_48bit_addresses);
  129. device->memory.heap_count = 2;
  130. device->memory.heaps[0] = (struct anv_memory_heap) {
  131. .size = heap_size_48bit,
  132. .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
  133. .supports_48bit_addresses = true,
  134. };
  135. device->memory.heaps[1] = (struct anv_memory_heap) {
  136. .size = heap_size_32bit,
  137. .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
  138. .supports_48bit_addresses = false,
  139. };
  140. }
  141. uint32_t type_count = 0;
  142. for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
  143. uint32_t valid_buffer_usage = ~0;
  144. /* There appears to be a hardware issue in the VF cache where it only
  145. * considers the bottom 32 bits of memory addresses. If you happen to
  146. * have two vertex buffers which get placed exactly 4 GiB apart and use
  147. * them in back-to-back draw calls, you can get collisions. In order to
  148. * solve this problem, we require vertex and index buffers be bound to
  149. * memory allocated out of the 32-bit heap.
  150. */
  151. if (device->memory.heaps[heap].supports_48bit_addresses) {
  152. valid_buffer_usage &= ~(VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
  153. VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
  154. }
  155. if (device->info.has_llc) {
  156. /* Big core GPUs share LLC with the CPU and thus one memory type can be
  157. * both cached and coherent at the same time.
  158. */
  159. device->memory.types[type_count++] = (struct anv_memory_type) {
  160. .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
  161. VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
  162. VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
  163. VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
  164. .heapIndex = heap,
  165. .valid_buffer_usage = valid_buffer_usage,
  166. };
  167. } else {
  168. /* The spec requires that we expose a host-visible, coherent memory
  169. * type, but Atom GPUs don't share LLC. Thus we offer two memory types
  170. * to give the application a choice between cached, but not coherent and
  171. * coherent but uncached (WC though).
  172. */
  173. device->memory.types[type_count++] = (struct anv_memory_type) {
  174. .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
  175. VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
  176. VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
  177. .heapIndex = heap,
  178. .valid_buffer_usage = valid_buffer_usage,
  179. };
  180. device->memory.types[type_count++] = (struct anv_memory_type) {
  181. .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
  182. VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
  183. VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
  184. .heapIndex = heap,
  185. .valid_buffer_usage = valid_buffer_usage,
  186. };
  187. }
  188. }
  189. device->memory.type_count = type_count;
  190. return VK_SUCCESS;
  191. }
  192. static VkResult
  193. anv_physical_device_init_uuids(struct anv_physical_device *device)
  194. {
  195. const struct build_id_note *note =
  196. build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
  197. if (!note) {
  198. return vk_errorf(device->instance, device,
  199. VK_ERROR_INITIALIZATION_FAILED,
  200. "Failed to find build-id");
  201. }
  202. unsigned build_id_len = build_id_length(note);
  203. if (build_id_len < 20) {
  204. return vk_errorf(device->instance, device,
  205. VK_ERROR_INITIALIZATION_FAILED,
  206. "build-id too short. It needs to be a SHA");
  207. }
  208. struct mesa_sha1 sha1_ctx;
  209. uint8_t sha1[20];
  210. STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
  211. /* The pipeline cache UUID is used for determining when a pipeline cache is
  212. * invalid. It needs both a driver build and the PCI ID of the device.
  213. */
  214. _mesa_sha1_init(&sha1_ctx);
  215. _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
  216. _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
  217. sizeof(device->chipset_id));
  218. _mesa_sha1_final(&sha1_ctx, sha1);
  219. memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
  220. /* The driver UUID is used for determining sharability of images and memory
  221. * between two Vulkan instances in separate processes. People who want to
  222. * share memory need to also check the device UUID (below) so all this
  223. * needs to be is the build-id.
  224. */
  225. memcpy(device->driver_uuid, build_id_data(note), VK_UUID_SIZE);
  226. /* The device UUID uniquely identifies the given device within the machine.
  227. * Since we never have more than one device, this doesn't need to be a real
  228. * UUID. However, on the off-chance that someone tries to use this to
  229. * cache pre-tiled images or something of the like, we use the PCI ID and
  230. * some bits of ISL info to ensure that this is safe.
  231. */
  232. _mesa_sha1_init(&sha1_ctx);
  233. _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
  234. sizeof(device->chipset_id));
  235. _mesa_sha1_update(&sha1_ctx, &device->isl_dev.has_bit6_swizzling,
  236. sizeof(device->isl_dev.has_bit6_swizzling));
  237. _mesa_sha1_final(&sha1_ctx, sha1);
  238. memcpy(device->device_uuid, sha1, VK_UUID_SIZE);
  239. return VK_SUCCESS;
  240. }
  241. static VkResult
  242. anv_physical_device_init(struct anv_physical_device *device,
  243. struct anv_instance *instance,
  244. const char *primary_path,
  245. const char *path)
  246. {
  247. VkResult result;
  248. int fd;
  249. int master_fd = -1;
  250. brw_process_intel_debug_variable();
  251. fd = open(path, O_RDWR | O_CLOEXEC);
  252. if (fd < 0)
  253. return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
  254. device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
  255. device->instance = instance;
  256. assert(strlen(path) < ARRAY_SIZE(device->path));
  257. strncpy(device->path, path, ARRAY_SIZE(device->path));
  258. device->no_hw = getenv("INTEL_NO_HW") != NULL;
  259. const int pci_id_override = gen_get_pci_device_id_override();
  260. if (pci_id_override < 0) {
  261. device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
  262. if (!device->chipset_id) {
  263. result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
  264. goto fail;
  265. }
  266. } else {
  267. device->chipset_id = pci_id_override;
  268. device->no_hw = true;
  269. }
  270. device->name = gen_get_device_name(device->chipset_id);
  271. if (!gen_get_device_info(device->chipset_id, &device->info)) {
  272. result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
  273. goto fail;
  274. }
  275. if (device->info.is_haswell) {
  276. intel_logw("Haswell Vulkan support is incomplete");
  277. } else if (device->info.gen == 7 && !device->info.is_baytrail) {
  278. intel_logw("Ivy Bridge Vulkan support is incomplete");
  279. } else if (device->info.gen == 7 && device->info.is_baytrail) {
  280. intel_logw("Bay Trail Vulkan support is incomplete");
  281. } else if (device->info.gen >= 8 && device->info.gen <= 10) {
  282. /* Gen8-10 fully supported */
  283. } else if (device->info.gen == 11) {
  284. intel_logw("Vulkan is not yet fully supported on gen11.");
  285. } else {
  286. result = vk_errorf(device->instance, device,
  287. VK_ERROR_INCOMPATIBLE_DRIVER,
  288. "Vulkan not yet supported on %s", device->name);
  289. goto fail;
  290. }
  291. device->cmd_parser_version = -1;
  292. if (device->info.gen == 7) {
  293. device->cmd_parser_version =
  294. anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
  295. if (device->cmd_parser_version == -1) {
  296. result = vk_errorf(device->instance, device,
  297. VK_ERROR_INITIALIZATION_FAILED,
  298. "failed to get command parser version");
  299. goto fail;
  300. }
  301. }
  302. if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
  303. result = vk_errorf(device->instance, device,
  304. VK_ERROR_INITIALIZATION_FAILED,
  305. "kernel missing gem wait");
  306. goto fail;
  307. }
  308. if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
  309. result = vk_errorf(device->instance, device,
  310. VK_ERROR_INITIALIZATION_FAILED,
  311. "kernel missing execbuf2");
  312. goto fail;
  313. }
  314. if (!device->info.has_llc &&
  315. anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
  316. result = vk_errorf(device->instance, device,
  317. VK_ERROR_INITIALIZATION_FAILED,
  318. "kernel missing wc mmap");
  319. goto fail;
  320. }
  321. result = anv_physical_device_init_heaps(device, fd);
  322. if (result != VK_SUCCESS)
  323. goto fail;
  324. device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
  325. device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
  326. device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
  327. device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
  328. device->has_syncobj_wait = device->has_syncobj &&
  329. anv_gem_supports_syncobj_wait(fd);
  330. device->has_context_priority = anv_gem_has_context_priority(fd);
  331. device->use_softpin = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN)
  332. && device->supports_48bit_addresses;
  333. device->has_context_isolation =
  334. anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
  335. bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
  336. /* Starting with Gen10, the timestamp frequency of the command streamer may
  337. * vary from one part to another. We can query the value from the kernel.
  338. */
  339. if (device->info.gen >= 10) {
  340. int timestamp_frequency =
  341. anv_gem_get_param(fd, I915_PARAM_CS_TIMESTAMP_FREQUENCY);
  342. if (timestamp_frequency < 0)
  343. intel_logw("Kernel 4.16-rc1+ required to properly query CS timestamp frequency");
  344. else
  345. device->info.timestamp_frequency = timestamp_frequency;
  346. }
  347. /* GENs prior to 8 do not support EU/Subslice info */
  348. if (device->info.gen >= 8) {
  349. device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
  350. device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
  351. /* Without this information, we cannot get the right Braswell
  352. * brandstrings, and we have to use conservative numbers for GPGPU on
  353. * many platforms, but otherwise, things will just work.
  354. */
  355. if (device->subslice_total < 1 || device->eu_total < 1) {
  356. intel_logw("Kernel 4.1 required to properly query GPU properties");
  357. }
  358. } else if (device->info.gen == 7) {
  359. device->subslice_total = 1 << (device->info.gt - 1);
  360. }
  361. if (device->info.is_cherryview &&
  362. device->subslice_total > 0 && device->eu_total > 0) {
  363. /* Logical CS threads = EUs per subslice * num threads per EU */
  364. uint32_t max_cs_threads =
  365. device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
  366. /* Fuse configurations may give more threads than expected, never less. */
  367. if (max_cs_threads > device->info.max_cs_threads)
  368. device->info.max_cs_threads = max_cs_threads;
  369. }
  370. device->compiler = brw_compiler_create(NULL, &device->info);
  371. if (device->compiler == NULL) {
  372. result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
  373. goto fail;
  374. }
  375. device->compiler->shader_debug_log = compiler_debug_log;
  376. device->compiler->shader_perf_log = compiler_perf_log;
  377. device->compiler->supports_pull_constants = false;
  378. device->compiler->constant_buffer_0_is_relative =
  379. device->info.gen < 8 || !device->has_context_isolation;
  380. isl_device_init(&device->isl_dev, &device->info, swizzled);
  381. result = anv_physical_device_init_uuids(device);
  382. if (result != VK_SUCCESS)
  383. goto fail;
  384. if (instance->enabled_extensions.KHR_display) {
  385. master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
  386. if (master_fd >= 0) {
  387. /* prod the device with a GETPARAM call which will fail if
  388. * we don't have permission to even render on this device
  389. */
  390. if (anv_gem_get_param(master_fd, I915_PARAM_CHIPSET_ID) == 0) {
  391. close(master_fd);
  392. master_fd = -1;
  393. }
  394. }
  395. }
  396. device->master_fd = master_fd;
  397. result = anv_init_wsi(device);
  398. if (result != VK_SUCCESS) {
  399. ralloc_free(device->compiler);
  400. goto fail;
  401. }
  402. anv_physical_device_get_supported_extensions(device,
  403. &device->supported_extensions);
  404. device->local_fd = fd;
  405. return VK_SUCCESS;
  406. fail:
  407. close(fd);
  408. if (master_fd != -1)
  409. close(master_fd);
  410. return result;
  411. }
  412. static void
  413. anv_physical_device_finish(struct anv_physical_device *device)
  414. {
  415. anv_finish_wsi(device);
  416. ralloc_free(device->compiler);
  417. close(device->local_fd);
  418. if (device->master_fd >= 0)
  419. close(device->master_fd);
  420. }
  421. static void *
  422. default_alloc_func(void *pUserData, size_t size, size_t align,
  423. VkSystemAllocationScope allocationScope)
  424. {
  425. return malloc(size);
  426. }
  427. static void *
  428. default_realloc_func(void *pUserData, void *pOriginal, size_t size,
  429. size_t align, VkSystemAllocationScope allocationScope)
  430. {
  431. return realloc(pOriginal, size);
  432. }
  433. static void
  434. default_free_func(void *pUserData, void *pMemory)
  435. {
  436. free(pMemory);
  437. }
  438. static const VkAllocationCallbacks default_alloc = {
  439. .pUserData = NULL,
  440. .pfnAllocation = default_alloc_func,
  441. .pfnReallocation = default_realloc_func,
  442. .pfnFree = default_free_func,
  443. };
  444. VkResult anv_EnumerateInstanceExtensionProperties(
  445. const char* pLayerName,
  446. uint32_t* pPropertyCount,
  447. VkExtensionProperties* pProperties)
  448. {
  449. VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
  450. for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
  451. if (anv_instance_extensions_supported.extensions[i]) {
  452. vk_outarray_append(&out, prop) {
  453. *prop = anv_instance_extensions[i];
  454. }
  455. }
  456. }
  457. return vk_outarray_status(&out);
  458. }
  459. VkResult anv_CreateInstance(
  460. const VkInstanceCreateInfo* pCreateInfo,
  461. const VkAllocationCallbacks* pAllocator,
  462. VkInstance* pInstance)
  463. {
  464. struct anv_instance *instance;
  465. VkResult result;
  466. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
  467. struct anv_instance_extension_table enabled_extensions = {};
  468. for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
  469. int idx;
  470. for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
  471. if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
  472. anv_instance_extensions[idx].extensionName) == 0)
  473. break;
  474. }
  475. if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
  476. return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
  477. if (!anv_instance_extensions_supported.extensions[idx])
  478. return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
  479. enabled_extensions.extensions[idx] = true;
  480. }
  481. instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
  482. VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
  483. if (!instance)
  484. return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
  485. instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
  486. if (pAllocator)
  487. instance->alloc = *pAllocator;
  488. else
  489. instance->alloc = default_alloc;
  490. if (pCreateInfo->pApplicationInfo &&
  491. pCreateInfo->pApplicationInfo->apiVersion != 0) {
  492. instance->apiVersion = pCreateInfo->pApplicationInfo->apiVersion;
  493. } else {
  494. anv_EnumerateInstanceVersion(&instance->apiVersion);
  495. }
  496. instance->enabled_extensions = enabled_extensions;
  497. for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
  498. /* Vulkan requires that entrypoints for extensions which have not been
  499. * enabled must not be advertised.
  500. */
  501. if (!anv_entrypoint_is_enabled(i, instance->apiVersion,
  502. &instance->enabled_extensions, NULL)) {
  503. instance->dispatch.entrypoints[i] = NULL;
  504. } else if (anv_dispatch_table.entrypoints[i] != NULL) {
  505. instance->dispatch.entrypoints[i] = anv_dispatch_table.entrypoints[i];
  506. } else {
  507. instance->dispatch.entrypoints[i] =
  508. anv_tramp_dispatch_table.entrypoints[i];
  509. }
  510. }
  511. instance->physicalDeviceCount = -1;
  512. result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
  513. if (result != VK_SUCCESS) {
  514. vk_free2(&default_alloc, pAllocator, instance);
  515. return vk_error(result);
  516. }
  517. _mesa_locale_init();
  518. VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
  519. *pInstance = anv_instance_to_handle(instance);
  520. return VK_SUCCESS;
  521. }
  522. void anv_DestroyInstance(
  523. VkInstance _instance,
  524. const VkAllocationCallbacks* pAllocator)
  525. {
  526. ANV_FROM_HANDLE(anv_instance, instance, _instance);
  527. if (!instance)
  528. return;
  529. if (instance->physicalDeviceCount > 0) {
  530. /* We support at most one physical device. */
  531. assert(instance->physicalDeviceCount == 1);
  532. anv_physical_device_finish(&instance->physicalDevice);
  533. }
  534. VG(VALGRIND_DESTROY_MEMPOOL(instance));
  535. vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
  536. _mesa_locale_fini();
  537. vk_free(&instance->alloc, instance);
  538. }
  539. static VkResult
  540. anv_enumerate_devices(struct anv_instance *instance)
  541. {
  542. /* TODO: Check for more devices ? */
  543. drmDevicePtr devices[8];
  544. VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
  545. int max_devices;
  546. instance->physicalDeviceCount = 0;
  547. max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
  548. if (max_devices < 1)
  549. return VK_ERROR_INCOMPATIBLE_DRIVER;
  550. for (unsigned i = 0; i < (unsigned)max_devices; i++) {
  551. if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
  552. devices[i]->bustype == DRM_BUS_PCI &&
  553. devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
  554. result = anv_physical_device_init(&instance->physicalDevice,
  555. instance,
  556. devices[i]->nodes[DRM_NODE_PRIMARY],
  557. devices[i]->nodes[DRM_NODE_RENDER]);
  558. if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
  559. break;
  560. }
  561. }
  562. drmFreeDevices(devices, max_devices);
  563. if (result == VK_SUCCESS)
  564. instance->physicalDeviceCount = 1;
  565. return result;
  566. }
  567. static VkResult
  568. anv_instance_ensure_physical_device(struct anv_instance *instance)
  569. {
  570. if (instance->physicalDeviceCount < 0) {
  571. VkResult result = anv_enumerate_devices(instance);
  572. if (result != VK_SUCCESS &&
  573. result != VK_ERROR_INCOMPATIBLE_DRIVER)
  574. return result;
  575. }
  576. return VK_SUCCESS;
  577. }
  578. VkResult anv_EnumeratePhysicalDevices(
  579. VkInstance _instance,
  580. uint32_t* pPhysicalDeviceCount,
  581. VkPhysicalDevice* pPhysicalDevices)
  582. {
  583. ANV_FROM_HANDLE(anv_instance, instance, _instance);
  584. VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
  585. VkResult result = anv_instance_ensure_physical_device(instance);
  586. if (result != VK_SUCCESS)
  587. return result;
  588. if (instance->physicalDeviceCount == 0)
  589. return VK_SUCCESS;
  590. assert(instance->physicalDeviceCount == 1);
  591. vk_outarray_append(&out, i) {
  592. *i = anv_physical_device_to_handle(&instance->physicalDevice);
  593. }
  594. return vk_outarray_status(&out);
  595. }
  596. VkResult anv_EnumeratePhysicalDeviceGroups(
  597. VkInstance _instance,
  598. uint32_t* pPhysicalDeviceGroupCount,
  599. VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
  600. {
  601. ANV_FROM_HANDLE(anv_instance, instance, _instance);
  602. VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
  603. pPhysicalDeviceGroupCount);
  604. VkResult result = anv_instance_ensure_physical_device(instance);
  605. if (result != VK_SUCCESS)
  606. return result;
  607. if (instance->physicalDeviceCount == 0)
  608. return VK_SUCCESS;
  609. assert(instance->physicalDeviceCount == 1);
  610. vk_outarray_append(&out, p) {
  611. p->physicalDeviceCount = 1;
  612. memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
  613. p->physicalDevices[0] =
  614. anv_physical_device_to_handle(&instance->physicalDevice);
  615. p->subsetAllocation = VK_FALSE;
  616. vk_foreach_struct(ext, p->pNext)
  617. anv_debug_ignored_stype(ext->sType);
  618. }
  619. return vk_outarray_status(&out);
  620. }
  621. void anv_GetPhysicalDeviceFeatures(
  622. VkPhysicalDevice physicalDevice,
  623. VkPhysicalDeviceFeatures* pFeatures)
  624. {
  625. ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
  626. *pFeatures = (VkPhysicalDeviceFeatures) {
  627. .robustBufferAccess = true,
  628. .fullDrawIndexUint32 = true,
  629. .imageCubeArray = true,
  630. .independentBlend = true,
  631. .geometryShader = true,
  632. .tessellationShader = true,
  633. .sampleRateShading = true,
  634. .dualSrcBlend = true,
  635. .logicOp = true,
  636. .multiDrawIndirect = true,
  637. .drawIndirectFirstInstance = true,
  638. .depthClamp = true,
  639. .depthBiasClamp = true,
  640. .fillModeNonSolid = true,
  641. .depthBounds = false,
  642. .wideLines = true,
  643. .largePoints = true,
  644. .alphaToOne = true,
  645. .multiViewport = true,
  646. .samplerAnisotropy = true,
  647. .textureCompressionETC2 = pdevice->info.gen >= 8 ||
  648. pdevice->info.is_baytrail,
  649. .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
  650. .textureCompressionBC = true,
  651. .occlusionQueryPrecise = true,
  652. .pipelineStatisticsQuery = true,
  653. .fragmentStoresAndAtomics = true,
  654. .shaderTessellationAndGeometryPointSize = true,
  655. .shaderImageGatherExtended = true,
  656. .shaderStorageImageExtendedFormats = true,
  657. .shaderStorageImageMultisample = false,
  658. .shaderStorageImageReadWithoutFormat = false,
  659. .shaderStorageImageWriteWithoutFormat = true,
  660. .shaderUniformBufferArrayDynamicIndexing = true,
  661. .shaderSampledImageArrayDynamicIndexing = true,
  662. .shaderStorageBufferArrayDynamicIndexing = true,
  663. .shaderStorageImageArrayDynamicIndexing = true,
  664. .shaderClipDistance = true,
  665. .shaderCullDistance = true,
  666. .shaderFloat64 = pdevice->info.gen >= 8 &&
  667. pdevice->info.has_64bit_types,
  668. .shaderInt64 = pdevice->info.gen >= 8 &&
  669. pdevice->info.has_64bit_types,
  670. .shaderInt16 = pdevice->info.gen >= 8,
  671. .shaderResourceMinLod = false,
  672. .variableMultisampleRate = true,
  673. .inheritedQueries = true,
  674. };
  675. /* We can't do image stores in vec4 shaders */
  676. pFeatures->vertexPipelineStoresAndAtomics =
  677. pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
  678. pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
  679. }
  680. void anv_GetPhysicalDeviceFeatures2(
  681. VkPhysicalDevice physicalDevice,
  682. VkPhysicalDeviceFeatures2* pFeatures)
  683. {
  684. anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
  685. vk_foreach_struct(ext, pFeatures->pNext) {
  686. switch (ext->sType) {
  687. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
  688. VkPhysicalDeviceProtectedMemoryFeatures *features = (void *)ext;
  689. features->protectedMemory = VK_FALSE;
  690. break;
  691. }
  692. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
  693. VkPhysicalDeviceMultiviewFeatures *features =
  694. (VkPhysicalDeviceMultiviewFeatures *)ext;
  695. features->multiview = true;
  696. features->multiviewGeometryShader = true;
  697. features->multiviewTessellationShader = true;
  698. break;
  699. }
  700. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES: {
  701. VkPhysicalDeviceVariablePointerFeatures *features = (void *)ext;
  702. features->variablePointersStorageBuffer = true;
  703. features->variablePointers = true;
  704. break;
  705. }
  706. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
  707. VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
  708. (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
  709. features->samplerYcbcrConversion = true;
  710. break;
  711. }
  712. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
  713. VkPhysicalDeviceShaderDrawParameterFeatures *features = (void *)ext;
  714. features->shaderDrawParameters = true;
  715. break;
  716. }
  717. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR: {
  718. VkPhysicalDevice16BitStorageFeaturesKHR *features =
  719. (VkPhysicalDevice16BitStorageFeaturesKHR *)ext;
  720. ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
  721. features->storageBuffer16BitAccess = pdevice->info.gen >= 8;
  722. features->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
  723. features->storagePushConstant16 = pdevice->info.gen >= 8;
  724. features->storageInputOutput16 = false;
  725. break;
  726. }
  727. default:
  728. anv_debug_ignored_stype(ext->sType);
  729. break;
  730. }
  731. }
  732. }
  733. void anv_GetPhysicalDeviceProperties(
  734. VkPhysicalDevice physicalDevice,
  735. VkPhysicalDeviceProperties* pProperties)
  736. {
  737. ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
  738. const struct gen_device_info *devinfo = &pdevice->info;
  739. /* See assertions made when programming the buffer surface state. */
  740. const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
  741. (1ul << 30) : (1ul << 27);
  742. const uint32_t max_samplers = (devinfo->gen >= 8 || devinfo->is_haswell) ?
  743. 128 : 16;
  744. VkSampleCountFlags sample_counts =
  745. isl_device_get_sample_counts(&pdevice->isl_dev);
  746. VkPhysicalDeviceLimits limits = {
  747. .maxImageDimension1D = (1 << 14),
  748. .maxImageDimension2D = (1 << 14),
  749. .maxImageDimension3D = (1 << 11),
  750. .maxImageDimensionCube = (1 << 14),
  751. .maxImageArrayLayers = (1 << 11),
  752. .maxTexelBufferElements = 128 * 1024 * 1024,
  753. .maxUniformBufferRange = (1ul << 27),
  754. .maxStorageBufferRange = max_raw_buffer_sz,
  755. .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
  756. .maxMemoryAllocationCount = UINT32_MAX,
  757. .maxSamplerAllocationCount = 64 * 1024,
  758. .bufferImageGranularity = 64, /* A cache line */
  759. .sparseAddressSpaceSize = 0,
  760. .maxBoundDescriptorSets = MAX_SETS,
  761. .maxPerStageDescriptorSamplers = max_samplers,
  762. .maxPerStageDescriptorUniformBuffers = 64,
  763. .maxPerStageDescriptorStorageBuffers = 64,
  764. .maxPerStageDescriptorSampledImages = max_samplers,
  765. .maxPerStageDescriptorStorageImages = 64,
  766. .maxPerStageDescriptorInputAttachments = 64,
  767. .maxPerStageResources = 250,
  768. .maxDescriptorSetSamplers = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
  769. .maxDescriptorSetUniformBuffers = 6 * 64, /* number of stages * maxPerStageDescriptorUniformBuffers */
  770. .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
  771. .maxDescriptorSetStorageBuffers = 6 * 64, /* number of stages * maxPerStageDescriptorStorageBuffers */
  772. .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
  773. .maxDescriptorSetSampledImages = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSampledImages */
  774. .maxDescriptorSetStorageImages = 6 * 64, /* number of stages * maxPerStageDescriptorStorageImages */
  775. .maxDescriptorSetInputAttachments = 256,
  776. .maxVertexInputAttributes = MAX_VBS,
  777. .maxVertexInputBindings = MAX_VBS,
  778. .maxVertexInputAttributeOffset = 2047,
  779. .maxVertexInputBindingStride = 2048,
  780. .maxVertexOutputComponents = 128,
  781. .maxTessellationGenerationLevel = 64,
  782. .maxTessellationPatchSize = 32,
  783. .maxTessellationControlPerVertexInputComponents = 128,
  784. .maxTessellationControlPerVertexOutputComponents = 128,
  785. .maxTessellationControlPerPatchOutputComponents = 128,
  786. .maxTessellationControlTotalOutputComponents = 2048,
  787. .maxTessellationEvaluationInputComponents = 128,
  788. .maxTessellationEvaluationOutputComponents = 128,
  789. .maxGeometryShaderInvocations = 32,
  790. .maxGeometryInputComponents = 64,
  791. .maxGeometryOutputComponents = 128,
  792. .maxGeometryOutputVertices = 256,
  793. .maxGeometryTotalOutputComponents = 1024,
  794. .maxFragmentInputComponents = 112, /* 128 components - (POS, PSIZ, CLIP_DIST0, CLIP_DIST1) */
  795. .maxFragmentOutputAttachments = 8,
  796. .maxFragmentDualSrcAttachments = 1,
  797. .maxFragmentCombinedOutputResources = 8,
  798. .maxComputeSharedMemorySize = 32768,
  799. .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
  800. .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
  801. .maxComputeWorkGroupSize = {
  802. 16 * devinfo->max_cs_threads,
  803. 16 * devinfo->max_cs_threads,
  804. 16 * devinfo->max_cs_threads,
  805. },
  806. .subPixelPrecisionBits = 4 /* FIXME */,
  807. .subTexelPrecisionBits = 4 /* FIXME */,
  808. .mipmapPrecisionBits = 4 /* FIXME */,
  809. .maxDrawIndexedIndexValue = UINT32_MAX,
  810. .maxDrawIndirectCount = UINT32_MAX,
  811. .maxSamplerLodBias = 16,
  812. .maxSamplerAnisotropy = 16,
  813. .maxViewports = MAX_VIEWPORTS,
  814. .maxViewportDimensions = { (1 << 14), (1 << 14) },
  815. .viewportBoundsRange = { INT16_MIN, INT16_MAX },
  816. .viewportSubPixelBits = 13, /* We take a float? */
  817. .minMemoryMapAlignment = 4096, /* A page */
  818. .minTexelBufferOffsetAlignment = 1,
  819. /* We need 16 for UBO block reads to work and 32 for push UBOs */
  820. .minUniformBufferOffsetAlignment = 32,
  821. .minStorageBufferOffsetAlignment = 4,
  822. .minTexelOffset = -8,
  823. .maxTexelOffset = 7,
  824. .minTexelGatherOffset = -32,
  825. .maxTexelGatherOffset = 31,
  826. .minInterpolationOffset = -0.5,
  827. .maxInterpolationOffset = 0.4375,
  828. .subPixelInterpolationOffsetBits = 4,
  829. .maxFramebufferWidth = (1 << 14),
  830. .maxFramebufferHeight = (1 << 14),
  831. .maxFramebufferLayers = (1 << 11),
  832. .framebufferColorSampleCounts = sample_counts,
  833. .framebufferDepthSampleCounts = sample_counts,
  834. .framebufferStencilSampleCounts = sample_counts,
  835. .framebufferNoAttachmentsSampleCounts = sample_counts,
  836. .maxColorAttachments = MAX_RTS,
  837. .sampledImageColorSampleCounts = sample_counts,
  838. .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
  839. .sampledImageDepthSampleCounts = sample_counts,
  840. .sampledImageStencilSampleCounts = sample_counts,
  841. .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
  842. .maxSampleMaskWords = 1,
  843. .timestampComputeAndGraphics = false,
  844. .timestampPeriod = 1000000000.0 / devinfo->timestamp_frequency,
  845. .maxClipDistances = 8,
  846. .maxCullDistances = 8,
  847. .maxCombinedClipAndCullDistances = 8,
  848. .discreteQueuePriorities = 1,
  849. .pointSizeRange = { 0.125, 255.875 },
  850. .lineWidthRange = { 0.0, 7.9921875 },
  851. .pointSizeGranularity = (1.0 / 8.0),
  852. .lineWidthGranularity = (1.0 / 128.0),
  853. .strictLines = false, /* FINISHME */
  854. .standardSampleLocations = true,
  855. .optimalBufferCopyOffsetAlignment = 128,
  856. .optimalBufferCopyRowPitchAlignment = 128,
  857. .nonCoherentAtomSize = 64,
  858. };
  859. *pProperties = (VkPhysicalDeviceProperties) {
  860. .apiVersion = anv_physical_device_api_version(pdevice),
  861. .driverVersion = vk_get_driver_version(),
  862. .vendorID = 0x8086,
  863. .deviceID = pdevice->chipset_id,
  864. .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
  865. .limits = limits,
  866. .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
  867. };
  868. snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
  869. "%s", pdevice->name);
  870. memcpy(pProperties->pipelineCacheUUID,
  871. pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
  872. }
  873. void anv_GetPhysicalDeviceProperties2(
  874. VkPhysicalDevice physicalDevice,
  875. VkPhysicalDeviceProperties2* pProperties)
  876. {
  877. ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
  878. anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
  879. vk_foreach_struct(ext, pProperties->pNext) {
  880. switch (ext->sType) {
  881. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
  882. VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
  883. (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
  884. properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
  885. break;
  886. }
  887. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
  888. VkPhysicalDeviceIDProperties *id_props =
  889. (VkPhysicalDeviceIDProperties *)ext;
  890. memcpy(id_props->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
  891. memcpy(id_props->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
  892. /* The LUID is for Windows. */
  893. id_props->deviceLUIDValid = false;
  894. break;
  895. }
  896. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
  897. VkPhysicalDeviceMaintenance3Properties *props =
  898. (VkPhysicalDeviceMaintenance3Properties *)ext;
  899. /* This value doesn't matter for us today as our per-stage
  900. * descriptors are the real limit.
  901. */
  902. props->maxPerSetDescriptors = 1024;
  903. props->maxMemoryAllocationSize = MAX_MEMORY_ALLOCATION_SIZE;
  904. break;
  905. }
  906. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
  907. VkPhysicalDeviceMultiviewProperties *properties =
  908. (VkPhysicalDeviceMultiviewProperties *)ext;
  909. properties->maxMultiviewViewCount = 16;
  910. properties->maxMultiviewInstanceIndex = UINT32_MAX / 16;
  911. break;
  912. }
  913. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
  914. VkPhysicalDevicePointClippingProperties *properties =
  915. (VkPhysicalDevicePointClippingProperties *) ext;
  916. properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
  917. anv_finishme("Implement pop-free point clipping");
  918. break;
  919. }
  920. case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
  921. VkPhysicalDeviceSubgroupProperties *properties = (void *)ext;
  922. properties->subgroupSize = BRW_SUBGROUP_SIZE;
  923. VkShaderStageFlags scalar_stages = 0;
  924. for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
  925. if (pdevice->compiler->scalar_stage[stage])
  926. scalar_stages |= mesa_to_vk_shader_stage(stage);
  927. }
  928. properties->supportedStages = scalar_stages;
  929. properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
  930. VK_SUBGROUP_FEATURE_VOTE_BIT |
  931. VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
  932. VK_SUBGROUP_FEATURE_BALLOT_BIT |
  933. VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
  934. VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
  935. VK_SUBGROUP_FEATURE_CLUSTERED_BIT |
  936. VK_SUBGROUP_FEATURE_QUAD_BIT;
  937. properties->quadOperationsInAllStages = VK_TRUE;
  938. break;
  939. }
  940. default:
  941. anv_debug_ignored_stype(ext->sType);
  942. break;
  943. }
  944. }
  945. }
  946. /* We support exactly one queue family. */
  947. static const VkQueueFamilyProperties
  948. anv_queue_family_properties = {
  949. .queueFlags = VK_QUEUE_GRAPHICS_BIT |
  950. VK_QUEUE_COMPUTE_BIT |
  951. VK_QUEUE_TRANSFER_BIT,
  952. .queueCount = 1,
  953. .timestampValidBits = 36, /* XXX: Real value here */
  954. .minImageTransferGranularity = { 1, 1, 1 },
  955. };
  956. void anv_GetPhysicalDeviceQueueFamilyProperties(
  957. VkPhysicalDevice physicalDevice,
  958. uint32_t* pCount,
  959. VkQueueFamilyProperties* pQueueFamilyProperties)
  960. {
  961. VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
  962. vk_outarray_append(&out, p) {
  963. *p = anv_queue_family_properties;
  964. }
  965. }
  966. void anv_GetPhysicalDeviceQueueFamilyProperties2(
  967. VkPhysicalDevice physicalDevice,
  968. uint32_t* pQueueFamilyPropertyCount,
  969. VkQueueFamilyProperties2* pQueueFamilyProperties)
  970. {
  971. VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
  972. vk_outarray_append(&out, p) {
  973. p->queueFamilyProperties = anv_queue_family_properties;
  974. vk_foreach_struct(s, p->pNext) {
  975. anv_debug_ignored_stype(s->sType);
  976. }
  977. }
  978. }
  979. void anv_GetPhysicalDeviceMemoryProperties(
  980. VkPhysicalDevice physicalDevice,
  981. VkPhysicalDeviceMemoryProperties* pMemoryProperties)
  982. {
  983. ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
  984. pMemoryProperties->memoryTypeCount = physical_device->memory.type_count;
  985. for (uint32_t i = 0; i < physical_device->memory.type_count; i++) {
  986. pMemoryProperties->memoryTypes[i] = (VkMemoryType) {
  987. .propertyFlags = physical_device->memory.types[i].propertyFlags,
  988. .heapIndex = physical_device->memory.types[i].heapIndex,
  989. };
  990. }
  991. pMemoryProperties->memoryHeapCount = physical_device->memory.heap_count;
  992. for (uint32_t i = 0; i < physical_device->memory.heap_count; i++) {
  993. pMemoryProperties->memoryHeaps[i] = (VkMemoryHeap) {
  994. .size = physical_device->memory.heaps[i].size,
  995. .flags = physical_device->memory.heaps[i].flags,
  996. };
  997. }
  998. }
  999. void anv_GetPhysicalDeviceMemoryProperties2(
  1000. VkPhysicalDevice physicalDevice,
  1001. VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
  1002. {
  1003. anv_GetPhysicalDeviceMemoryProperties(physicalDevice,
  1004. &pMemoryProperties->memoryProperties);
  1005. vk_foreach_struct(ext, pMemoryProperties->pNext) {
  1006. switch (ext->sType) {
  1007. default:
  1008. anv_debug_ignored_stype(ext->sType);
  1009. break;
  1010. }
  1011. }
  1012. }
  1013. void
  1014. anv_GetDeviceGroupPeerMemoryFeatures(
  1015. VkDevice device,
  1016. uint32_t heapIndex,
  1017. uint32_t localDeviceIndex,
  1018. uint32_t remoteDeviceIndex,
  1019. VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
  1020. {
  1021. assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
  1022. *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
  1023. VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
  1024. VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
  1025. VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
  1026. }
  1027. PFN_vkVoidFunction anv_GetInstanceProcAddr(
  1028. VkInstance _instance,
  1029. const char* pName)
  1030. {
  1031. ANV_FROM_HANDLE(anv_instance, instance, _instance);
  1032. /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
  1033. * when we have to return valid function pointers, NULL, or it's left
  1034. * undefined. See the table for exact details.
  1035. */
  1036. if (pName == NULL)
  1037. return NULL;
  1038. #define LOOKUP_ANV_ENTRYPOINT(entrypoint) \
  1039. if (strcmp(pName, "vk" #entrypoint) == 0) \
  1040. return (PFN_vkVoidFunction)anv_##entrypoint
  1041. LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
  1042. LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceLayerProperties);
  1043. LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceVersion);
  1044. LOOKUP_ANV_ENTRYPOINT(CreateInstance);
  1045. #undef LOOKUP_ANV_ENTRYPOINT
  1046. if (instance == NULL)
  1047. return NULL;
  1048. int idx = anv_get_entrypoint_index(pName);
  1049. if (idx < 0)
  1050. return NULL;
  1051. return instance->dispatch.entrypoints[idx];
  1052. }
  1053. /* With version 1+ of the loader interface the ICD should expose
  1054. * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
  1055. */
  1056. PUBLIC
  1057. VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
  1058. VkInstance instance,
  1059. const char* pName);
  1060. PUBLIC
  1061. VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
  1062. VkInstance instance,
  1063. const char* pName)
  1064. {
  1065. return anv_GetInstanceProcAddr(instance, pName);
  1066. }
  1067. PFN_vkVoidFunction anv_GetDeviceProcAddr(
  1068. VkDevice _device,
  1069. const char* pName)
  1070. {
  1071. ANV_FROM_HANDLE(anv_device, device, _device);
  1072. if (!device || !pName)
  1073. return NULL;
  1074. int idx = anv_get_entrypoint_index(pName);
  1075. if (idx < 0)
  1076. return NULL;
  1077. return device->dispatch.entrypoints[idx];
  1078. }
  1079. VkResult
  1080. anv_CreateDebugReportCallbackEXT(VkInstance _instance,
  1081. const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
  1082. const VkAllocationCallbacks* pAllocator,
  1083. VkDebugReportCallbackEXT* pCallback)
  1084. {
  1085. ANV_FROM_HANDLE(anv_instance, instance, _instance);
  1086. return vk_create_debug_report_callback(&instance->debug_report_callbacks,
  1087. pCreateInfo, pAllocator, &instance->alloc,
  1088. pCallback);
  1089. }
  1090. void
  1091. anv_DestroyDebugReportCallbackEXT(VkInstance _instance,
  1092. VkDebugReportCallbackEXT _callback,
  1093. const VkAllocationCallbacks* pAllocator)
  1094. {
  1095. ANV_FROM_HANDLE(anv_instance, instance, _instance);
  1096. vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
  1097. _callback, pAllocator, &instance->alloc);
  1098. }
  1099. void
  1100. anv_DebugReportMessageEXT(VkInstance _instance,
  1101. VkDebugReportFlagsEXT flags,
  1102. VkDebugReportObjectTypeEXT objectType,
  1103. uint64_t object,
  1104. size_t location,
  1105. int32_t messageCode,
  1106. const char* pLayerPrefix,
  1107. const char* pMessage)
  1108. {
  1109. ANV_FROM_HANDLE(anv_instance, instance, _instance);
  1110. vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
  1111. object, location, messageCode, pLayerPrefix, pMessage);
  1112. }
  1113. static void
  1114. anv_queue_init(struct anv_device *device, struct anv_queue *queue)
  1115. {
  1116. queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
  1117. queue->device = device;
  1118. queue->flags = 0;
  1119. }
  1120. static void
  1121. anv_queue_finish(struct anv_queue *queue)
  1122. {
  1123. }
  1124. static struct anv_state
  1125. anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
  1126. {
  1127. struct anv_state state;
  1128. state = anv_state_pool_alloc(pool, size, align);
  1129. memcpy(state.map, p, size);
  1130. anv_state_flush(pool->block_pool.device, state);
  1131. return state;
  1132. }
  1133. struct gen8_border_color {
  1134. union {
  1135. float float32[4];
  1136. uint32_t uint32[4];
  1137. };
  1138. /* Pad out to 64 bytes */
  1139. uint32_t _pad[12];
  1140. };
  1141. static void
  1142. anv_device_init_border_colors(struct anv_device *device)
  1143. {
  1144. static const struct gen8_border_color border_colors[] = {
  1145. [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
  1146. [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
  1147. [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
  1148. [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
  1149. [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
  1150. [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
  1151. };
  1152. device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
  1153. sizeof(border_colors), 64,
  1154. border_colors);
  1155. }
  1156. static void
  1157. anv_device_init_trivial_batch(struct anv_device *device)
  1158. {
  1159. anv_bo_init_new(&device->trivial_batch_bo, device, 4096);
  1160. if (device->instance->physicalDevice.has_exec_async)
  1161. device->trivial_batch_bo.flags |= EXEC_OBJECT_ASYNC;
  1162. if (device->instance->physicalDevice.use_softpin)
  1163. device->trivial_batch_bo.flags |= EXEC_OBJECT_PINNED;
  1164. anv_vma_alloc(device, &device->trivial_batch_bo);
  1165. void *map = anv_gem_mmap(device, device->trivial_batch_bo.gem_handle,
  1166. 0, 4096, 0);
  1167. struct anv_batch batch = {
  1168. .start = map,
  1169. .next = map,
  1170. .end = map + 4096,
  1171. };
  1172. anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
  1173. anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
  1174. if (!device->info.has_llc)
  1175. gen_clflush_range(map, batch.next - map);
  1176. anv_gem_munmap(map, device->trivial_batch_bo.size);
  1177. }
  1178. VkResult anv_EnumerateDeviceExtensionProperties(
  1179. VkPhysicalDevice physicalDevice,
  1180. const char* pLayerName,
  1181. uint32_t* pPropertyCount,
  1182. VkExtensionProperties* pProperties)
  1183. {
  1184. ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
  1185. VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
  1186. (void)device;
  1187. for (int i = 0; i < ANV_DEVICE_EXTENSION_COUNT; i++) {
  1188. if (device->supported_extensions.extensions[i]) {
  1189. vk_outarray_append(&out, prop) {
  1190. *prop = anv_device_extensions[i];
  1191. }
  1192. }
  1193. }
  1194. return vk_outarray_status(&out);
  1195. }
  1196. static void
  1197. anv_device_init_dispatch(struct anv_device *device)
  1198. {
  1199. const struct anv_dispatch_table *genX_table;
  1200. switch (device->info.gen) {
  1201. case 11:
  1202. genX_table = &gen11_dispatch_table;
  1203. break;
  1204. case 10:
  1205. genX_table = &gen10_dispatch_table;
  1206. break;
  1207. case 9:
  1208. genX_table = &gen9_dispatch_table;
  1209. break;
  1210. case 8:
  1211. genX_table = &gen8_dispatch_table;
  1212. break;
  1213. case 7:
  1214. if (device->info.is_haswell)
  1215. genX_table = &gen75_dispatch_table;
  1216. else
  1217. genX_table = &gen7_dispatch_table;
  1218. break;
  1219. default:
  1220. unreachable("unsupported gen\n");
  1221. }
  1222. for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
  1223. /* Vulkan requires that entrypoints for extensions which have not been
  1224. * enabled must not be advertised.
  1225. */
  1226. if (!anv_entrypoint_is_enabled(i, device->instance->apiVersion,
  1227. &device->instance->enabled_extensions,
  1228. &device->enabled_extensions)) {
  1229. device->dispatch.entrypoints[i] = NULL;
  1230. } else if (genX_table->entrypoints[i]) {
  1231. device->dispatch.entrypoints[i] = genX_table->entrypoints[i];
  1232. } else {
  1233. device->dispatch.entrypoints[i] = anv_dispatch_table.entrypoints[i];
  1234. }
  1235. }
  1236. }
  1237. static int
  1238. vk_priority_to_gen(int priority)
  1239. {
  1240. switch (priority) {
  1241. case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
  1242. return GEN_CONTEXT_LOW_PRIORITY;
  1243. case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
  1244. return GEN_CONTEXT_MEDIUM_PRIORITY;
  1245. case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
  1246. return GEN_CONTEXT_HIGH_PRIORITY;
  1247. case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
  1248. return GEN_CONTEXT_REALTIME_PRIORITY;
  1249. default:
  1250. unreachable("Invalid priority");
  1251. }
  1252. }
  1253. static void
  1254. anv_device_init_hiz_clear_batch(struct anv_device *device)
  1255. {
  1256. anv_bo_init_new(&device->hiz_clear_bo, device, 4096);
  1257. uint32_t *map = anv_gem_mmap(device, device->hiz_clear_bo.gem_handle,
  1258. 0, 4096, 0);
  1259. union isl_color_value hiz_clear = { .u32 = { 0, } };
  1260. hiz_clear.f32[0] = ANV_HZ_FC_VAL;
  1261. memcpy(map, hiz_clear.u32, sizeof(hiz_clear.u32));
  1262. anv_gem_munmap(map, device->hiz_clear_bo.size);
  1263. }
  1264. VkResult anv_CreateDevice(
  1265. VkPhysicalDevice physicalDevice,
  1266. const VkDeviceCreateInfo* pCreateInfo,
  1267. const VkAllocationCallbacks* pAllocator,
  1268. VkDevice* pDevice)
  1269. {
  1270. ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
  1271. VkResult result;
  1272. struct anv_device *device;
  1273. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
  1274. struct anv_device_extension_table enabled_extensions = { };
  1275. for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
  1276. int idx;
  1277. for (idx = 0; idx < ANV_DEVICE_EXTENSION_COUNT; idx++) {
  1278. if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
  1279. anv_device_extensions[idx].extensionName) == 0)
  1280. break;
  1281. }
  1282. if (idx >= ANV_DEVICE_EXTENSION_COUNT)
  1283. return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
  1284. if (!physical_device->supported_extensions.extensions[idx])
  1285. return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
  1286. enabled_extensions.extensions[idx] = true;
  1287. }
  1288. /* Check enabled features */
  1289. if (pCreateInfo->pEnabledFeatures) {
  1290. VkPhysicalDeviceFeatures supported_features;
  1291. anv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
  1292. VkBool32 *supported_feature = (VkBool32 *)&supported_features;
  1293. VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
  1294. unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
  1295. for (uint32_t i = 0; i < num_features; i++) {
  1296. if (enabled_feature[i] && !supported_feature[i])
  1297. return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
  1298. }
  1299. }
  1300. /* Check requested queues and fail if we are requested to create any
  1301. * queues with flags we don't support.
  1302. */
  1303. assert(pCreateInfo->queueCreateInfoCount > 0);
  1304. for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
  1305. if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
  1306. return vk_error(VK_ERROR_INITIALIZATION_FAILED);
  1307. }
  1308. /* Check if client specified queue priority. */
  1309. const VkDeviceQueueGlobalPriorityCreateInfoEXT *queue_priority =
  1310. vk_find_struct_const(pCreateInfo->pQueueCreateInfos[0].pNext,
  1311. DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
  1312. VkQueueGlobalPriorityEXT priority =
  1313. queue_priority ? queue_priority->globalPriority :
  1314. VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
  1315. device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
  1316. sizeof(*device), 8,
  1317. VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
  1318. if (!device)
  1319. return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
  1320. device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
  1321. device->instance = physical_device->instance;
  1322. device->chipset_id = physical_device->chipset_id;
  1323. device->no_hw = physical_device->no_hw;
  1324. device->lost = false;
  1325. if (pAllocator)
  1326. device->alloc = *pAllocator;
  1327. else
  1328. device->alloc = physical_device->instance->alloc;
  1329. /* XXX(chadv): Can we dup() physicalDevice->fd here? */
  1330. device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
  1331. if (device->fd == -1) {
  1332. result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
  1333. goto fail_device;
  1334. }
  1335. device->context_id = anv_gem_create_context(device);
  1336. if (device->context_id == -1) {
  1337. result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
  1338. goto fail_fd;
  1339. }
  1340. if (physical_device->use_softpin) {
  1341. if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
  1342. result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
  1343. goto fail_fd;
  1344. }
  1345. /* keep the page with address zero out of the allocator */
  1346. util_vma_heap_init(&device->vma_lo, LOW_HEAP_MIN_ADDRESS, LOW_HEAP_SIZE);
  1347. device->vma_lo_available =
  1348. physical_device->memory.heaps[physical_device->memory.heap_count - 1].size;
  1349. /* Leave the last 4GiB out of the high vma range, so that no state base
  1350. * address + size can overflow 48 bits. For more information see the
  1351. * comment about Wa32bitGeneralStateOffset in anv_allocator.c
  1352. */
  1353. util_vma_heap_init(&device->vma_hi, HIGH_HEAP_MIN_ADDRESS,
  1354. HIGH_HEAP_SIZE);
  1355. device->vma_hi_available = physical_device->memory.heap_count == 1 ? 0 :
  1356. physical_device->memory.heaps[0].size;
  1357. }
  1358. /* As per spec, the driver implementation may deny requests to acquire
  1359. * a priority above the default priority (MEDIUM) if the caller does not
  1360. * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_EXT
  1361. * is returned.
  1362. */
  1363. if (physical_device->has_context_priority) {
  1364. int err = anv_gem_set_context_param(device->fd, device->context_id,
  1365. I915_CONTEXT_PARAM_PRIORITY,
  1366. vk_priority_to_gen(priority));
  1367. if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
  1368. result = vk_error(VK_ERROR_NOT_PERMITTED_EXT);
  1369. goto fail_fd;
  1370. }
  1371. }
  1372. device->info = physical_device->info;
  1373. device->isl_dev = physical_device->isl_dev;
  1374. /* On Broadwell and later, we can use batch chaining to more efficiently
  1375. * implement growing command buffers. Prior to Haswell, the kernel
  1376. * command parser gets in the way and we have to fall back to growing
  1377. * the batch.
  1378. */
  1379. device->can_chain_batches = device->info.gen >= 8;
  1380. device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
  1381. pCreateInfo->pEnabledFeatures->robustBufferAccess;
  1382. device->enabled_extensions = enabled_extensions;
  1383. anv_device_init_dispatch(device);
  1384. if (pthread_mutex_init(&device->mutex, NULL) != 0) {
  1385. result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
  1386. goto fail_context_id;
  1387. }
  1388. pthread_condattr_t condattr;
  1389. if (pthread_condattr_init(&condattr) != 0) {
  1390. result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
  1391. goto fail_mutex;
  1392. }
  1393. if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
  1394. pthread_condattr_destroy(&condattr);
  1395. result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
  1396. goto fail_mutex;
  1397. }
  1398. if (pthread_cond_init(&device->queue_submit, NULL) != 0) {
  1399. pthread_condattr_destroy(&condattr);
  1400. result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
  1401. goto fail_mutex;
  1402. }
  1403. pthread_condattr_destroy(&condattr);
  1404. uint64_t bo_flags =
  1405. (physical_device->supports_48bit_addresses ? EXEC_OBJECT_SUPPORTS_48B_ADDRESS : 0) |
  1406. (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
  1407. (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0) |
  1408. (physical_device->use_softpin ? EXEC_OBJECT_PINNED : 0);
  1409. anv_bo_pool_init(&device->batch_bo_pool, device, bo_flags);
  1410. result = anv_bo_cache_init(&device->bo_cache);
  1411. if (result != VK_SUCCESS)
  1412. goto fail_batch_bo_pool;
  1413. if (!physical_device->use_softpin)
  1414. bo_flags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
  1415. result = anv_state_pool_init(&device->dynamic_state_pool, device,
  1416. DYNAMIC_STATE_POOL_MIN_ADDRESS,
  1417. 16384,
  1418. bo_flags);
  1419. if (result != VK_SUCCESS)
  1420. goto fail_bo_cache;
  1421. result = anv_state_pool_init(&device->instruction_state_pool, device,
  1422. INSTRUCTION_STATE_POOL_MIN_ADDRESS,
  1423. 16384,
  1424. bo_flags);
  1425. if (result != VK_SUCCESS)
  1426. goto fail_dynamic_state_pool;
  1427. result = anv_state_pool_init(&device->surface_state_pool, device,
  1428. SURFACE_STATE_POOL_MIN_ADDRESS,
  1429. 4096,
  1430. bo_flags);
  1431. if (result != VK_SUCCESS)
  1432. goto fail_instruction_state_pool;
  1433. if (physical_device->use_softpin) {
  1434. result = anv_state_pool_init(&device->binding_table_pool, device,
  1435. BINDING_TABLE_POOL_MIN_ADDRESS,
  1436. 4096,
  1437. bo_flags);
  1438. if (result != VK_SUCCESS)
  1439. goto fail_surface_state_pool;
  1440. }
  1441. result = anv_bo_init_new(&device->workaround_bo, device, 1024);
  1442. if (result != VK_SUCCESS)
  1443. goto fail_binding_table_pool;
  1444. if (physical_device->use_softpin)
  1445. device->workaround_bo.flags |= EXEC_OBJECT_PINNED;
  1446. if (!anv_vma_alloc(device, &device->workaround_bo))
  1447. goto fail_workaround_bo;
  1448. anv_device_init_trivial_batch(device);
  1449. if (device->info.gen >= 10)
  1450. anv_device_init_hiz_clear_batch(device);
  1451. anv_scratch_pool_init(device, &device->scratch_pool);
  1452. anv_queue_init(device, &device->queue);
  1453. switch (device->info.gen) {
  1454. case 7:
  1455. if (!device->info.is_haswell)
  1456. result = gen7_init_device_state(device);
  1457. else
  1458. result = gen75_init_device_state(device);
  1459. break;
  1460. case 8:
  1461. result = gen8_init_device_state(device);
  1462. break;
  1463. case 9:
  1464. result = gen9_init_device_state(device);
  1465. break;
  1466. case 10:
  1467. result = gen10_init_device_state(device);
  1468. break;
  1469. case 11:
  1470. result = gen11_init_device_state(device);
  1471. break;
  1472. default:
  1473. /* Shouldn't get here as we don't create physical devices for any other
  1474. * gens. */
  1475. unreachable("unhandled gen");
  1476. }
  1477. if (result != VK_SUCCESS)
  1478. goto fail_workaround_bo;
  1479. anv_device_init_blorp(device);
  1480. anv_device_init_border_colors(device);
  1481. *pDevice = anv_device_to_handle(device);
  1482. return VK_SUCCESS;
  1483. fail_workaround_bo:
  1484. anv_queue_finish(&device->queue);
  1485. anv_scratch_pool_finish(device, &device->scratch_pool);
  1486. anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
  1487. anv_gem_close(device, device->workaround_bo.gem_handle);
  1488. fail_binding_table_pool:
  1489. if (physical_device->use_softpin)
  1490. anv_state_pool_finish(&device->binding_table_pool);
  1491. fail_surface_state_pool:
  1492. anv_state_pool_finish(&device->surface_state_pool);
  1493. fail_instruction_state_pool:
  1494. anv_state_pool_finish(&device->instruction_state_pool);
  1495. fail_dynamic_state_pool:
  1496. anv_state_pool_finish(&device->dynamic_state_pool);
  1497. fail_bo_cache:
  1498. anv_bo_cache_finish(&device->bo_cache);
  1499. fail_batch_bo_pool:
  1500. anv_bo_pool_finish(&device->batch_bo_pool);
  1501. pthread_cond_destroy(&device->queue_submit);
  1502. fail_mutex:
  1503. pthread_mutex_destroy(&device->mutex);
  1504. fail_context_id:
  1505. anv_gem_destroy_context(device, device->context_id);
  1506. fail_fd:
  1507. close(device->fd);
  1508. fail_device:
  1509. vk_free(&device->alloc, device);
  1510. return result;
  1511. }
  1512. void anv_DestroyDevice(
  1513. VkDevice _device,
  1514. const VkAllocationCallbacks* pAllocator)
  1515. {
  1516. ANV_FROM_HANDLE(anv_device, device, _device);
  1517. struct anv_physical_device *physical_device = &device->instance->physicalDevice;
  1518. if (!device)
  1519. return;
  1520. anv_device_finish_blorp(device);
  1521. anv_queue_finish(&device->queue);
  1522. #ifdef HAVE_VALGRIND
  1523. /* We only need to free these to prevent valgrind errors. The backing
  1524. * BO will go away in a couple of lines so we don't actually leak.
  1525. */
  1526. anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
  1527. #endif
  1528. anv_scratch_pool_finish(device, &device->scratch_pool);
  1529. anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
  1530. anv_vma_free(device, &device->workaround_bo);
  1531. anv_gem_close(device, device->workaround_bo.gem_handle);
  1532. anv_vma_free(device, &device->trivial_batch_bo);
  1533. anv_gem_close(device, device->trivial_batch_bo.gem_handle);
  1534. if (device->info.gen >= 10)
  1535. anv_gem_close(device, device->hiz_clear_bo.gem_handle);
  1536. if (physical_device->use_softpin)
  1537. anv_state_pool_finish(&device->binding_table_pool);
  1538. anv_state_pool_finish(&device->surface_state_pool);
  1539. anv_state_pool_finish(&device->instruction_state_pool);
  1540. anv_state_pool_finish(&device->dynamic_state_pool);
  1541. anv_bo_cache_finish(&device->bo_cache);
  1542. anv_bo_pool_finish(&device->batch_bo_pool);
  1543. pthread_cond_destroy(&device->queue_submit);
  1544. pthread_mutex_destroy(&device->mutex);
  1545. anv_gem_destroy_context(device, device->context_id);
  1546. close(device->fd);
  1547. vk_free(&device->alloc, device);
  1548. }
  1549. VkResult anv_EnumerateInstanceLayerProperties(
  1550. uint32_t* pPropertyCount,
  1551. VkLayerProperties* pProperties)
  1552. {
  1553. if (pProperties == NULL) {
  1554. *pPropertyCount = 0;
  1555. return VK_SUCCESS;
  1556. }
  1557. /* None supported at this time */
  1558. return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
  1559. }
  1560. VkResult anv_EnumerateDeviceLayerProperties(
  1561. VkPhysicalDevice physicalDevice,
  1562. uint32_t* pPropertyCount,
  1563. VkLayerProperties* pProperties)
  1564. {
  1565. if (pProperties == NULL) {
  1566. *pPropertyCount = 0;
  1567. return VK_SUCCESS;
  1568. }
  1569. /* None supported at this time */
  1570. return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
  1571. }
  1572. void anv_GetDeviceQueue(
  1573. VkDevice _device,
  1574. uint32_t queueNodeIndex,
  1575. uint32_t queueIndex,
  1576. VkQueue* pQueue)
  1577. {
  1578. ANV_FROM_HANDLE(anv_device, device, _device);
  1579. assert(queueIndex == 0);
  1580. *pQueue = anv_queue_to_handle(&device->queue);
  1581. }
  1582. void anv_GetDeviceQueue2(
  1583. VkDevice _device,
  1584. const VkDeviceQueueInfo2* pQueueInfo,
  1585. VkQueue* pQueue)
  1586. {
  1587. ANV_FROM_HANDLE(anv_device, device, _device);
  1588. assert(pQueueInfo->queueIndex == 0);
  1589. if (pQueueInfo->flags == device->queue.flags)
  1590. *pQueue = anv_queue_to_handle(&device->queue);
  1591. else
  1592. *pQueue = NULL;
  1593. }
  1594. VkResult
  1595. anv_device_query_status(struct anv_device *device)
  1596. {
  1597. /* This isn't likely as most of the callers of this function already check
  1598. * for it. However, it doesn't hurt to check and it potentially lets us
  1599. * avoid an ioctl.
  1600. */
  1601. if (unlikely(device->lost))
  1602. return VK_ERROR_DEVICE_LOST;
  1603. uint32_t active, pending;
  1604. int ret = anv_gem_gpu_get_reset_stats(device, &active, &pending);
  1605. if (ret == -1) {
  1606. /* We don't know the real error. */
  1607. device->lost = true;
  1608. return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
  1609. "get_reset_stats failed: %m");
  1610. }
  1611. if (active) {
  1612. device->lost = true;
  1613. return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
  1614. "GPU hung on one of our command buffers");
  1615. } else if (pending) {
  1616. device->lost = true;
  1617. return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
  1618. "GPU hung with commands in-flight");
  1619. }
  1620. return VK_SUCCESS;
  1621. }
  1622. VkResult
  1623. anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo)
  1624. {
  1625. /* Note: This only returns whether or not the BO is in use by an i915 GPU.
  1626. * Other usages of the BO (such as on different hardware) will not be
  1627. * flagged as "busy" by this ioctl. Use with care.
  1628. */
  1629. int ret = anv_gem_busy(device, bo->gem_handle);
  1630. if (ret == 1) {
  1631. return VK_NOT_READY;
  1632. } else if (ret == -1) {
  1633. /* We don't know the real error. */
  1634. device->lost = true;
  1635. return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
  1636. "gem wait failed: %m");
  1637. }
  1638. /* Query for device status after the busy call. If the BO we're checking
  1639. * got caught in a GPU hang we don't want to return VK_SUCCESS to the
  1640. * client because it clearly doesn't have valid data. Yes, this most
  1641. * likely means an ioctl, but we just did an ioctl to query the busy status
  1642. * so it's no great loss.
  1643. */
  1644. return anv_device_query_status(device);
  1645. }
  1646. VkResult
  1647. anv_device_wait(struct anv_device *device, struct anv_bo *bo,
  1648. int64_t timeout)
  1649. {
  1650. int ret = anv_gem_wait(device, bo->gem_handle, &timeout);
  1651. if (ret == -1 && errno == ETIME) {
  1652. return VK_TIMEOUT;
  1653. } else if (ret == -1) {
  1654. /* We don't know the real error. */
  1655. device->lost = true;
  1656. return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
  1657. "gem wait failed: %m");
  1658. }
  1659. /* Query for device status after the wait. If the BO we're waiting on got
  1660. * caught in a GPU hang we don't want to return VK_SUCCESS to the client
  1661. * because it clearly doesn't have valid data. Yes, this most likely means
  1662. * an ioctl, but we just did an ioctl to wait so it's no great loss.
  1663. */
  1664. return anv_device_query_status(device);
  1665. }
  1666. VkResult anv_DeviceWaitIdle(
  1667. VkDevice _device)
  1668. {
  1669. ANV_FROM_HANDLE(anv_device, device, _device);
  1670. if (unlikely(device->lost))
  1671. return VK_ERROR_DEVICE_LOST;
  1672. struct anv_batch batch;
  1673. uint32_t cmds[8];
  1674. batch.start = batch.next = cmds;
  1675. batch.end = (void *) cmds + sizeof(cmds);
  1676. anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
  1677. anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
  1678. return anv_device_submit_simple_batch(device, &batch);
  1679. }
  1680. bool
  1681. anv_vma_alloc(struct anv_device *device, struct anv_bo *bo)
  1682. {
  1683. if (!(bo->flags & EXEC_OBJECT_PINNED))
  1684. return true;
  1685. pthread_mutex_lock(&device->vma_mutex);
  1686. bo->offset = 0;
  1687. if (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS &&
  1688. device->vma_hi_available >= bo->size) {
  1689. uint64_t addr = util_vma_heap_alloc(&device->vma_hi, bo->size, 4096);
  1690. if (addr) {
  1691. bo->offset = gen_canonical_address(addr);
  1692. assert(addr == gen_48b_address(bo->offset));
  1693. device->vma_hi_available -= bo->size;
  1694. }
  1695. }
  1696. if (bo->offset == 0 && device->vma_lo_available >= bo->size) {
  1697. uint64_t addr = util_vma_heap_alloc(&device->vma_lo, bo->size, 4096);
  1698. if (addr) {
  1699. bo->offset = gen_canonical_address(addr);
  1700. assert(addr == gen_48b_address(bo->offset));
  1701. device->vma_lo_available -= bo->size;
  1702. }
  1703. }
  1704. pthread_mutex_unlock(&device->vma_mutex);
  1705. return bo->offset != 0;
  1706. }
  1707. void
  1708. anv_vma_free(struct anv_device *device, struct anv_bo *bo)
  1709. {
  1710. if (!(bo->flags & EXEC_OBJECT_PINNED))
  1711. return;
  1712. const uint64_t addr_48b = gen_48b_address(bo->offset);
  1713. pthread_mutex_lock(&device->vma_mutex);
  1714. if (addr_48b >= LOW_HEAP_MIN_ADDRESS &&
  1715. addr_48b <= LOW_HEAP_MAX_ADDRESS) {
  1716. util_vma_heap_free(&device->vma_lo, addr_48b, bo->size);
  1717. device->vma_lo_available += bo->size;
  1718. } else {
  1719. assert(addr_48b >= HIGH_HEAP_MIN_ADDRESS &&
  1720. addr_48b <= HIGH_HEAP_MAX_ADDRESS);
  1721. util_vma_heap_free(&device->vma_hi, addr_48b, bo->size);
  1722. device->vma_hi_available += bo->size;
  1723. }
  1724. pthread_mutex_unlock(&device->vma_mutex);
  1725. bo->offset = 0;
  1726. }
  1727. VkResult
  1728. anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
  1729. {
  1730. uint32_t gem_handle = anv_gem_create(device, size);
  1731. if (!gem_handle)
  1732. return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
  1733. anv_bo_init(bo, gem_handle, size);
  1734. return VK_SUCCESS;
  1735. }
  1736. VkResult anv_AllocateMemory(
  1737. VkDevice _device,
  1738. const VkMemoryAllocateInfo* pAllocateInfo,
  1739. const VkAllocationCallbacks* pAllocator,
  1740. VkDeviceMemory* pMem)
  1741. {
  1742. ANV_FROM_HANDLE(anv_device, device, _device);
  1743. struct anv_physical_device *pdevice = &device->instance->physicalDevice;
  1744. struct anv_device_memory *mem;
  1745. VkResult result = VK_SUCCESS;
  1746. assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
  1747. /* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
  1748. assert(pAllocateInfo->allocationSize > 0);
  1749. if (pAllocateInfo->allocationSize > MAX_MEMORY_ALLOCATION_SIZE)
  1750. return VK_ERROR_OUT_OF_DEVICE_MEMORY;
  1751. /* FINISHME: Fail if allocation request exceeds heap size. */
  1752. mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
  1753. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  1754. if (mem == NULL)
  1755. return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
  1756. assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
  1757. mem->type = &pdevice->memory.types[pAllocateInfo->memoryTypeIndex];
  1758. mem->map = NULL;
  1759. mem->map_size = 0;
  1760. uint64_t bo_flags = 0;
  1761. assert(mem->type->heapIndex < pdevice->memory.heap_count);
  1762. if (pdevice->memory.heaps[mem->type->heapIndex].supports_48bit_addresses)
  1763. bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
  1764. const struct wsi_memory_allocate_info *wsi_info =
  1765. vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
  1766. if (wsi_info && wsi_info->implicit_sync) {
  1767. /* We need to set the WRITE flag on window system buffers so that GEM
  1768. * will know we're writing to them and synchronize uses on other rings
  1769. * (eg if the display server uses the blitter ring).
  1770. */
  1771. bo_flags |= EXEC_OBJECT_WRITE;
  1772. } else if (pdevice->has_exec_async) {
  1773. bo_flags |= EXEC_OBJECT_ASYNC;
  1774. }
  1775. if (pdevice->use_softpin)
  1776. bo_flags |= EXEC_OBJECT_PINNED;
  1777. const VkImportMemoryFdInfoKHR *fd_info =
  1778. vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
  1779. /* The Vulkan spec permits handleType to be 0, in which case the struct is
  1780. * ignored.
  1781. */
  1782. if (fd_info && fd_info->handleType) {
  1783. /* At the moment, we support only the below handle types. */
  1784. assert(fd_info->handleType ==
  1785. VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
  1786. fd_info->handleType ==
  1787. VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
  1788. result = anv_bo_cache_import(device, &device->bo_cache,
  1789. fd_info->fd, bo_flags, &mem->bo);
  1790. if (result != VK_SUCCESS)
  1791. goto fail;
  1792. VkDeviceSize aligned_alloc_size =
  1793. align_u64(pAllocateInfo->allocationSize, 4096);
  1794. /* For security purposes, we reject importing the bo if it's smaller
  1795. * than the requested allocation size. This prevents a malicious client
  1796. * from passing a buffer to a trusted client, lying about the size, and
  1797. * telling the trusted client to try and texture from an image that goes
  1798. * out-of-bounds. This sort of thing could lead to GPU hangs or worse
  1799. * in the trusted client. The trusted client can protect itself against
  1800. * this sort of attack but only if it can trust the buffer size.
  1801. */
  1802. if (mem->bo->size < aligned_alloc_size) {
  1803. result = vk_errorf(device->instance, device,
  1804. VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
  1805. "aligned allocationSize too large for "
  1806. "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: "
  1807. "%"PRIu64"B > %"PRIu64"B",
  1808. aligned_alloc_size, mem->bo->size);
  1809. anv_bo_cache_release(device, &device->bo_cache, mem->bo);
  1810. goto fail;
  1811. }
  1812. /* From the Vulkan spec:
  1813. *
  1814. * "Importing memory from a file descriptor transfers ownership of
  1815. * the file descriptor from the application to the Vulkan
  1816. * implementation. The application must not perform any operations on
  1817. * the file descriptor after a successful import."
  1818. *
  1819. * If the import fails, we leave the file descriptor open.
  1820. */
  1821. close(fd_info->fd);
  1822. } else {
  1823. result = anv_bo_cache_alloc(device, &device->bo_cache,
  1824. pAllocateInfo->allocationSize, bo_flags,
  1825. &mem->bo);
  1826. if (result != VK_SUCCESS)
  1827. goto fail;
  1828. const VkMemoryDedicatedAllocateInfoKHR *dedicated_info =
  1829. vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
  1830. if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
  1831. ANV_FROM_HANDLE(anv_image, image, dedicated_info->image);
  1832. /* Some legacy (non-modifiers) consumers need the tiling to be set on
  1833. * the BO. In this case, we have a dedicated allocation.
  1834. */
  1835. if (image->needs_set_tiling) {
  1836. const uint32_t i915_tiling =
  1837. isl_tiling_to_i915_tiling(image->planes[0].surface.isl.tiling);
  1838. int ret = anv_gem_set_tiling(device, mem->bo->gem_handle,
  1839. image->planes[0].surface.isl.row_pitch,
  1840. i915_tiling);
  1841. if (ret) {
  1842. anv_bo_cache_release(device, &device->bo_cache, mem->bo);
  1843. return vk_errorf(device->instance, NULL,
  1844. VK_ERROR_OUT_OF_DEVICE_MEMORY,
  1845. "failed to set BO tiling: %m");
  1846. }
  1847. }
  1848. }
  1849. }
  1850. *pMem = anv_device_memory_to_handle(mem);
  1851. return VK_SUCCESS;
  1852. fail:
  1853. vk_free2(&device->alloc, pAllocator, mem);
  1854. return result;
  1855. }
  1856. VkResult anv_GetMemoryFdKHR(
  1857. VkDevice device_h,
  1858. const VkMemoryGetFdInfoKHR* pGetFdInfo,
  1859. int* pFd)
  1860. {
  1861. ANV_FROM_HANDLE(anv_device, dev, device_h);
  1862. ANV_FROM_HANDLE(anv_device_memory, mem, pGetFdInfo->memory);
  1863. assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
  1864. assert(pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
  1865. pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
  1866. return anv_bo_cache_export(dev, &dev->bo_cache, mem->bo, pFd);
  1867. }
  1868. VkResult anv_GetMemoryFdPropertiesKHR(
  1869. VkDevice _device,
  1870. VkExternalMemoryHandleTypeFlagBitsKHR handleType,
  1871. int fd,
  1872. VkMemoryFdPropertiesKHR* pMemoryFdProperties)
  1873. {
  1874. ANV_FROM_HANDLE(anv_device, device, _device);
  1875. struct anv_physical_device *pdevice = &device->instance->physicalDevice;
  1876. switch (handleType) {
  1877. case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
  1878. /* dma-buf can be imported as any memory type */
  1879. pMemoryFdProperties->memoryTypeBits =
  1880. (1 << pdevice->memory.type_count) - 1;
  1881. return VK_SUCCESS;
  1882. default:
  1883. /* The valid usage section for this function says:
  1884. *
  1885. * "handleType must not be one of the handle types defined as
  1886. * opaque."
  1887. *
  1888. * So opaque handle types fall into the default "unsupported" case.
  1889. */
  1890. return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
  1891. }
  1892. }
  1893. void anv_FreeMemory(
  1894. VkDevice _device,
  1895. VkDeviceMemory _mem,
  1896. const VkAllocationCallbacks* pAllocator)
  1897. {
  1898. ANV_FROM_HANDLE(anv_device, device, _device);
  1899. ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
  1900. if (mem == NULL)
  1901. return;
  1902. if (mem->map)
  1903. anv_UnmapMemory(_device, _mem);
  1904. anv_bo_cache_release(device, &device->bo_cache, mem->bo);
  1905. vk_free2(&device->alloc, pAllocator, mem);
  1906. }
  1907. VkResult anv_MapMemory(
  1908. VkDevice _device,
  1909. VkDeviceMemory _memory,
  1910. VkDeviceSize offset,
  1911. VkDeviceSize size,
  1912. VkMemoryMapFlags flags,
  1913. void** ppData)
  1914. {
  1915. ANV_FROM_HANDLE(anv_device, device, _device);
  1916. ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
  1917. if (mem == NULL) {
  1918. *ppData = NULL;
  1919. return VK_SUCCESS;
  1920. }
  1921. if (size == VK_WHOLE_SIZE)
  1922. size = mem->bo->size - offset;
  1923. /* From the Vulkan spec version 1.0.32 docs for MapMemory:
  1924. *
  1925. * * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
  1926. * assert(size != 0);
  1927. * * If size is not equal to VK_WHOLE_SIZE, size must be less than or
  1928. * equal to the size of the memory minus offset
  1929. */
  1930. assert(size > 0);
  1931. assert(offset + size <= mem->bo->size);
  1932. /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
  1933. * takes a VkDeviceMemory pointer, it seems like only one map of the memory
  1934. * at a time is valid. We could just mmap up front and return an offset
  1935. * pointer here, but that may exhaust virtual memory on 32 bit
  1936. * userspace. */
  1937. uint32_t gem_flags = 0;
  1938. if (!device->info.has_llc &&
  1939. (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
  1940. gem_flags |= I915_MMAP_WC;
  1941. /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
  1942. uint64_t map_offset = offset & ~4095ull;
  1943. assert(offset >= map_offset);
  1944. uint64_t map_size = (offset + size) - map_offset;
  1945. /* Let's map whole pages */
  1946. map_size = align_u64(map_size, 4096);
  1947. void *map = anv_gem_mmap(device, mem->bo->gem_handle,
  1948. map_offset, map_size, gem_flags);
  1949. if (map == MAP_FAILED)
  1950. return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
  1951. mem->map = map;
  1952. mem->map_size = map_size;
  1953. *ppData = mem->map + (offset - map_offset);
  1954. return VK_SUCCESS;
  1955. }
  1956. void anv_UnmapMemory(
  1957. VkDevice _device,
  1958. VkDeviceMemory _memory)
  1959. {
  1960. ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
  1961. if (mem == NULL)
  1962. return;
  1963. anv_gem_munmap(mem->map, mem->map_size);
  1964. mem->map = NULL;
  1965. mem->map_size = 0;
  1966. }
  1967. static void
  1968. clflush_mapped_ranges(struct anv_device *device,
  1969. uint32_t count,
  1970. const VkMappedMemoryRange *ranges)
  1971. {
  1972. for (uint32_t i = 0; i < count; i++) {
  1973. ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
  1974. if (ranges[i].offset >= mem->map_size)
  1975. continue;
  1976. gen_clflush_range(mem->map + ranges[i].offset,
  1977. MIN2(ranges[i].size, mem->map_size - ranges[i].offset));
  1978. }
  1979. }
  1980. VkResult anv_FlushMappedMemoryRanges(
  1981. VkDevice _device,
  1982. uint32_t memoryRangeCount,
  1983. const VkMappedMemoryRange* pMemoryRanges)
  1984. {
  1985. ANV_FROM_HANDLE(anv_device, device, _device);
  1986. if (device->info.has_llc)
  1987. return VK_SUCCESS;
  1988. /* Make sure the writes we're flushing have landed. */
  1989. __builtin_ia32_mfence();
  1990. clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
  1991. return VK_SUCCESS;
  1992. }
  1993. VkResult anv_InvalidateMappedMemoryRanges(
  1994. VkDevice _device,
  1995. uint32_t memoryRangeCount,
  1996. const VkMappedMemoryRange* pMemoryRanges)
  1997. {
  1998. ANV_FROM_HANDLE(anv_device, device, _device);
  1999. if (device->info.has_llc)
  2000. return VK_SUCCESS;
  2001. clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
  2002. /* Make sure no reads get moved up above the invalidate. */
  2003. __builtin_ia32_mfence();
  2004. return VK_SUCCESS;
  2005. }
  2006. void anv_GetBufferMemoryRequirements(
  2007. VkDevice _device,
  2008. VkBuffer _buffer,
  2009. VkMemoryRequirements* pMemoryRequirements)
  2010. {
  2011. ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
  2012. ANV_FROM_HANDLE(anv_device, device, _device);
  2013. struct anv_physical_device *pdevice = &device->instance->physicalDevice;
  2014. /* The Vulkan spec (git aaed022) says:
  2015. *
  2016. * memoryTypeBits is a bitfield and contains one bit set for every
  2017. * supported memory type for the resource. The bit `1<<i` is set if and
  2018. * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
  2019. * structure for the physical device is supported.
  2020. */
  2021. uint32_t memory_types = 0;
  2022. for (uint32_t i = 0; i < pdevice->memory.type_count; i++) {
  2023. uint32_t valid_usage = pdevice->memory.types[i].valid_buffer_usage;
  2024. if ((valid_usage & buffer->usage) == buffer->usage)
  2025. memory_types |= (1u << i);
  2026. }
  2027. /* Base alignment requirement of a cache line */
  2028. uint32_t alignment = 16;
  2029. /* We need an alignment of 32 for pushing UBOs */
  2030. if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
  2031. alignment = MAX2(alignment, 32);
  2032. pMemoryRequirements->size = buffer->size;
  2033. pMemoryRequirements->alignment = alignment;
  2034. /* Storage and Uniform buffers should have their size aligned to
  2035. * 32-bits to avoid boundary checks when last DWord is not complete.
  2036. * This would ensure that not internal padding would be needed for
  2037. * 16-bit types.
  2038. */
  2039. if (device->robust_buffer_access &&
  2040. (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
  2041. buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
  2042. pMemoryRequirements->size = align_u64(buffer->size, 4);
  2043. pMemoryRequirements->memoryTypeBits = memory_types;
  2044. }
  2045. void anv_GetBufferMemoryRequirements2(
  2046. VkDevice _device,
  2047. const VkBufferMemoryRequirementsInfo2* pInfo,
  2048. VkMemoryRequirements2* pMemoryRequirements)
  2049. {
  2050. anv_GetBufferMemoryRequirements(_device, pInfo->buffer,
  2051. &pMemoryRequirements->memoryRequirements);
  2052. vk_foreach_struct(ext, pMemoryRequirements->pNext) {
  2053. switch (ext->sType) {
  2054. case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
  2055. VkMemoryDedicatedRequirements *requirements = (void *)ext;
  2056. requirements->prefersDedicatedAllocation = VK_FALSE;
  2057. requirements->requiresDedicatedAllocation = VK_FALSE;
  2058. break;
  2059. }
  2060. default:
  2061. anv_debug_ignored_stype(ext->sType);
  2062. break;
  2063. }
  2064. }
  2065. }
  2066. void anv_GetImageMemoryRequirements(
  2067. VkDevice _device,
  2068. VkImage _image,
  2069. VkMemoryRequirements* pMemoryRequirements)
  2070. {
  2071. ANV_FROM_HANDLE(anv_image, image, _image);
  2072. ANV_FROM_HANDLE(anv_device, device, _device);
  2073. struct anv_physical_device *pdevice = &device->instance->physicalDevice;
  2074. /* The Vulkan spec (git aaed022) says:
  2075. *
  2076. * memoryTypeBits is a bitfield and contains one bit set for every
  2077. * supported memory type for the resource. The bit `1<<i` is set if and
  2078. * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
  2079. * structure for the physical device is supported.
  2080. *
  2081. * All types are currently supported for images.
  2082. */
  2083. uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
  2084. pMemoryRequirements->size = image->size;
  2085. pMemoryRequirements->alignment = image->alignment;
  2086. pMemoryRequirements->memoryTypeBits = memory_types;
  2087. }
  2088. void anv_GetImageMemoryRequirements2(
  2089. VkDevice _device,
  2090. const VkImageMemoryRequirementsInfo2* pInfo,
  2091. VkMemoryRequirements2* pMemoryRequirements)
  2092. {
  2093. ANV_FROM_HANDLE(anv_device, device, _device);
  2094. ANV_FROM_HANDLE(anv_image, image, pInfo->image);
  2095. anv_GetImageMemoryRequirements(_device, pInfo->image,
  2096. &pMemoryRequirements->memoryRequirements);
  2097. vk_foreach_struct_const(ext, pInfo->pNext) {
  2098. switch (ext->sType) {
  2099. case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: {
  2100. struct anv_physical_device *pdevice = &device->instance->physicalDevice;
  2101. const VkImagePlaneMemoryRequirementsInfoKHR *plane_reqs =
  2102. (const VkImagePlaneMemoryRequirementsInfoKHR *) ext;
  2103. uint32_t plane = anv_image_aspect_to_plane(image->aspects,
  2104. plane_reqs->planeAspect);
  2105. assert(image->planes[plane].offset == 0);
  2106. /* The Vulkan spec (git aaed022) says:
  2107. *
  2108. * memoryTypeBits is a bitfield and contains one bit set for every
  2109. * supported memory type for the resource. The bit `1<<i` is set
  2110. * if and only if the memory type `i` in the
  2111. * VkPhysicalDeviceMemoryProperties structure for the physical
  2112. * device is supported.
  2113. *
  2114. * All types are currently supported for images.
  2115. */
  2116. pMemoryRequirements->memoryRequirements.memoryTypeBits =
  2117. (1ull << pdevice->memory.type_count) - 1;
  2118. pMemoryRequirements->memoryRequirements.size = image->planes[plane].size;
  2119. pMemoryRequirements->memoryRequirements.alignment =
  2120. image->planes[plane].alignment;
  2121. break;
  2122. }
  2123. default:
  2124. anv_debug_ignored_stype(ext->sType);
  2125. break;
  2126. }
  2127. }
  2128. vk_foreach_struct(ext, pMemoryRequirements->pNext) {
  2129. switch (ext->sType) {
  2130. case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
  2131. VkMemoryDedicatedRequirements *requirements = (void *)ext;
  2132. if (image->needs_set_tiling) {
  2133. /* If we need to set the tiling for external consumers, we need a
  2134. * dedicated allocation.
  2135. *
  2136. * See also anv_AllocateMemory.
  2137. */
  2138. requirements->prefersDedicatedAllocation = VK_TRUE;
  2139. requirements->requiresDedicatedAllocation = VK_TRUE;
  2140. } else {
  2141. requirements->prefersDedicatedAllocation = VK_FALSE;
  2142. requirements->requiresDedicatedAllocation = VK_FALSE;
  2143. }
  2144. break;
  2145. }
  2146. default:
  2147. anv_debug_ignored_stype(ext->sType);
  2148. break;
  2149. }
  2150. }
  2151. }
  2152. void anv_GetImageSparseMemoryRequirements(
  2153. VkDevice device,
  2154. VkImage image,
  2155. uint32_t* pSparseMemoryRequirementCount,
  2156. VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
  2157. {
  2158. *pSparseMemoryRequirementCount = 0;
  2159. }
  2160. void anv_GetImageSparseMemoryRequirements2(
  2161. VkDevice device,
  2162. const VkImageSparseMemoryRequirementsInfo2* pInfo,
  2163. uint32_t* pSparseMemoryRequirementCount,
  2164. VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
  2165. {
  2166. *pSparseMemoryRequirementCount = 0;
  2167. }
  2168. void anv_GetDeviceMemoryCommitment(
  2169. VkDevice device,
  2170. VkDeviceMemory memory,
  2171. VkDeviceSize* pCommittedMemoryInBytes)
  2172. {
  2173. *pCommittedMemoryInBytes = 0;
  2174. }
  2175. static void
  2176. anv_bind_buffer_memory(const VkBindBufferMemoryInfo *pBindInfo)
  2177. {
  2178. ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory);
  2179. ANV_FROM_HANDLE(anv_buffer, buffer, pBindInfo->buffer);
  2180. assert(pBindInfo->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO);
  2181. if (mem) {
  2182. assert((buffer->usage & mem->type->valid_buffer_usage) == buffer->usage);
  2183. buffer->address = (struct anv_address) {
  2184. .bo = mem->bo,
  2185. .offset = pBindInfo->memoryOffset,
  2186. };
  2187. } else {
  2188. buffer->address = ANV_NULL_ADDRESS;
  2189. }
  2190. }
  2191. VkResult anv_BindBufferMemory(
  2192. VkDevice device,
  2193. VkBuffer buffer,
  2194. VkDeviceMemory memory,
  2195. VkDeviceSize memoryOffset)
  2196. {
  2197. anv_bind_buffer_memory(
  2198. &(VkBindBufferMemoryInfo) {
  2199. .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
  2200. .buffer = buffer,
  2201. .memory = memory,
  2202. .memoryOffset = memoryOffset,
  2203. });
  2204. return VK_SUCCESS;
  2205. }
  2206. VkResult anv_BindBufferMemory2(
  2207. VkDevice device,
  2208. uint32_t bindInfoCount,
  2209. const VkBindBufferMemoryInfo* pBindInfos)
  2210. {
  2211. for (uint32_t i = 0; i < bindInfoCount; i++)
  2212. anv_bind_buffer_memory(&pBindInfos[i]);
  2213. return VK_SUCCESS;
  2214. }
  2215. VkResult anv_QueueBindSparse(
  2216. VkQueue _queue,
  2217. uint32_t bindInfoCount,
  2218. const VkBindSparseInfo* pBindInfo,
  2219. VkFence fence)
  2220. {
  2221. ANV_FROM_HANDLE(anv_queue, queue, _queue);
  2222. if (unlikely(queue->device->lost))
  2223. return VK_ERROR_DEVICE_LOST;
  2224. return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
  2225. }
  2226. // Event functions
  2227. VkResult anv_CreateEvent(
  2228. VkDevice _device,
  2229. const VkEventCreateInfo* pCreateInfo,
  2230. const VkAllocationCallbacks* pAllocator,
  2231. VkEvent* pEvent)
  2232. {
  2233. ANV_FROM_HANDLE(anv_device, device, _device);
  2234. struct anv_state state;
  2235. struct anv_event *event;
  2236. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
  2237. state = anv_state_pool_alloc(&device->dynamic_state_pool,
  2238. sizeof(*event), 8);
  2239. event = state.map;
  2240. event->state = state;
  2241. event->semaphore = VK_EVENT_RESET;
  2242. if (!device->info.has_llc) {
  2243. /* Make sure the writes we're flushing have landed. */
  2244. __builtin_ia32_mfence();
  2245. __builtin_ia32_clflush(event);
  2246. }
  2247. *pEvent = anv_event_to_handle(event);
  2248. return VK_SUCCESS;
  2249. }
  2250. void anv_DestroyEvent(
  2251. VkDevice _device,
  2252. VkEvent _event,
  2253. const VkAllocationCallbacks* pAllocator)
  2254. {
  2255. ANV_FROM_HANDLE(anv_device, device, _device);
  2256. ANV_FROM_HANDLE(anv_event, event, _event);
  2257. if (!event)
  2258. return;
  2259. anv_state_pool_free(&device->dynamic_state_pool, event->state);
  2260. }
  2261. VkResult anv_GetEventStatus(
  2262. VkDevice _device,
  2263. VkEvent _event)
  2264. {
  2265. ANV_FROM_HANDLE(anv_device, device, _device);
  2266. ANV_FROM_HANDLE(anv_event, event, _event);
  2267. if (unlikely(device->lost))
  2268. return VK_ERROR_DEVICE_LOST;
  2269. if (!device->info.has_llc) {
  2270. /* Invalidate read cache before reading event written by GPU. */
  2271. __builtin_ia32_clflush(event);
  2272. __builtin_ia32_mfence();
  2273. }
  2274. return event->semaphore;
  2275. }
  2276. VkResult anv_SetEvent(
  2277. VkDevice _device,
  2278. VkEvent _event)
  2279. {
  2280. ANV_FROM_HANDLE(anv_device, device, _device);
  2281. ANV_FROM_HANDLE(anv_event, event, _event);
  2282. event->semaphore = VK_EVENT_SET;
  2283. if (!device->info.has_llc) {
  2284. /* Make sure the writes we're flushing have landed. */
  2285. __builtin_ia32_mfence();
  2286. __builtin_ia32_clflush(event);
  2287. }
  2288. return VK_SUCCESS;
  2289. }
  2290. VkResult anv_ResetEvent(
  2291. VkDevice _device,
  2292. VkEvent _event)
  2293. {
  2294. ANV_FROM_HANDLE(anv_device, device, _device);
  2295. ANV_FROM_HANDLE(anv_event, event, _event);
  2296. event->semaphore = VK_EVENT_RESET;
  2297. if (!device->info.has_llc) {
  2298. /* Make sure the writes we're flushing have landed. */
  2299. __builtin_ia32_mfence();
  2300. __builtin_ia32_clflush(event);
  2301. }
  2302. return VK_SUCCESS;
  2303. }
  2304. // Buffer functions
  2305. VkResult anv_CreateBuffer(
  2306. VkDevice _device,
  2307. const VkBufferCreateInfo* pCreateInfo,
  2308. const VkAllocationCallbacks* pAllocator,
  2309. VkBuffer* pBuffer)
  2310. {
  2311. ANV_FROM_HANDLE(anv_device, device, _device);
  2312. struct anv_buffer *buffer;
  2313. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
  2314. buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
  2315. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  2316. if (buffer == NULL)
  2317. return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
  2318. buffer->size = pCreateInfo->size;
  2319. buffer->usage = pCreateInfo->usage;
  2320. buffer->address = ANV_NULL_ADDRESS;
  2321. *pBuffer = anv_buffer_to_handle(buffer);
  2322. return VK_SUCCESS;
  2323. }
  2324. void anv_DestroyBuffer(
  2325. VkDevice _device,
  2326. VkBuffer _buffer,
  2327. const VkAllocationCallbacks* pAllocator)
  2328. {
  2329. ANV_FROM_HANDLE(anv_device, device, _device);
  2330. ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
  2331. if (!buffer)
  2332. return;
  2333. vk_free2(&device->alloc, pAllocator, buffer);
  2334. }
  2335. void
  2336. anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
  2337. enum isl_format format,
  2338. struct anv_address address,
  2339. uint32_t range, uint32_t stride)
  2340. {
  2341. isl_buffer_fill_state(&device->isl_dev, state.map,
  2342. .address = anv_address_physical(address),
  2343. .mocs = device->default_mocs,
  2344. .size = range,
  2345. .format = format,
  2346. .stride = stride);
  2347. anv_state_flush(device, state);
  2348. }
  2349. void anv_DestroySampler(
  2350. VkDevice _device,
  2351. VkSampler _sampler,
  2352. const VkAllocationCallbacks* pAllocator)
  2353. {
  2354. ANV_FROM_HANDLE(anv_device, device, _device);
  2355. ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
  2356. if (!sampler)
  2357. return;
  2358. vk_free2(&device->alloc, pAllocator, sampler);
  2359. }
  2360. VkResult anv_CreateFramebuffer(
  2361. VkDevice _device,
  2362. const VkFramebufferCreateInfo* pCreateInfo,
  2363. const VkAllocationCallbacks* pAllocator,
  2364. VkFramebuffer* pFramebuffer)
  2365. {
  2366. ANV_FROM_HANDLE(anv_device, device, _device);
  2367. struct anv_framebuffer *framebuffer;
  2368. assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
  2369. size_t size = sizeof(*framebuffer) +
  2370. sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
  2371. framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
  2372. VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
  2373. if (framebuffer == NULL)
  2374. return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
  2375. framebuffer->attachment_count = pCreateInfo->attachmentCount;
  2376. for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
  2377. VkImageView _iview = pCreateInfo->pAttachments[i];
  2378. framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
  2379. }
  2380. framebuffer->width = pCreateInfo->width;
  2381. framebuffer->height = pCreateInfo->height;
  2382. framebuffer->layers = pCreateInfo->layers;
  2383. *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
  2384. return VK_SUCCESS;
  2385. }
  2386. void anv_DestroyFramebuffer(
  2387. VkDevice _device,
  2388. VkFramebuffer _fb,
  2389. const VkAllocationCallbacks* pAllocator)
  2390. {
  2391. ANV_FROM_HANDLE(anv_device, device, _device);
  2392. ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
  2393. if (!fb)
  2394. return;
  2395. vk_free2(&device->alloc, pAllocator, fb);
  2396. }
  2397. /* vk_icd.h does not declare this function, so we declare it here to
  2398. * suppress Wmissing-prototypes.
  2399. */
  2400. PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
  2401. vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion);
  2402. PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
  2403. vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion)
  2404. {
  2405. /* For the full details on loader interface versioning, see
  2406. * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
  2407. * What follows is a condensed summary, to help you navigate the large and
  2408. * confusing official doc.
  2409. *
  2410. * - Loader interface v0 is incompatible with later versions. We don't
  2411. * support it.
  2412. *
  2413. * - In loader interface v1:
  2414. * - The first ICD entrypoint called by the loader is
  2415. * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
  2416. * entrypoint.
  2417. * - The ICD must statically expose no other Vulkan symbol unless it is
  2418. * linked with -Bsymbolic.
  2419. * - Each dispatchable Vulkan handle created by the ICD must be
  2420. * a pointer to a struct whose first member is VK_LOADER_DATA. The
  2421. * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
  2422. * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
  2423. * vkDestroySurfaceKHR(). The ICD must be capable of working with
  2424. * such loader-managed surfaces.
  2425. *
  2426. * - Loader interface v2 differs from v1 in:
  2427. * - The first ICD entrypoint called by the loader is
  2428. * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
  2429. * statically expose this entrypoint.
  2430. *
  2431. * - Loader interface v3 differs from v2 in:
  2432. * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
  2433. * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
  2434. * because the loader no longer does so.
  2435. */
  2436. *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
  2437. return VK_SUCCESS;
  2438. }