@@ -1158,6 +1158,10 @@ AC_ARG_ENABLE([driglx-direct], | |||
[driglx_direct="$enableval"], | |||
[driglx_direct="yes"]) | |||
# Check for libcaca | |||
PKG_CHECK_EXISTS([caca], [have_libcaca=yes], [have_libcaca=no]) | |||
AM_CONDITIONAL([HAVE_LIBCACA], [test x$have_libcaca = xyes]) | |||
dnl | |||
dnl libGL configuration per driver | |||
dnl | |||
@@ -1535,6 +1539,8 @@ GBM_PC_LIB_PRIV="$DLOPEN_LIBS" | |||
AC_SUBST([GBM_PC_REQ_PRIV]) | |||
AC_SUBST([GBM_PC_LIB_PRIV]) | |||
AM_CONDITIONAL(HAVE_VULKAN, true) | |||
dnl | |||
dnl EGL configuration | |||
dnl | |||
@@ -2311,6 +2317,13 @@ AC_SUBST([XA_MINOR], $XA_MINOR) | |||
AC_SUBST([XA_TINY], $XA_TINY) | |||
AC_SUBST([XA_VERSION], "$XA_MAJOR.$XA_MINOR.$XA_TINY") | |||
PKG_CHECK_MODULES(VALGRIND, [valgrind], | |||
[have_valgrind=yes], [have_valgrind=no]) | |||
if test "x$have_valgrind" = "xyes"; then | |||
AC_DEFINE([HAVE_VALGRIND], 1, | |||
[Use valgrind intrinsics to suppress false warnings]) | |||
fi | |||
dnl Restore LDFLAGS and CPPFLAGS | |||
LDFLAGS="$_SAVE_LDFLAGS" | |||
CPPFLAGS="$_SAVE_CPPFLAGS" | |||
@@ -2419,6 +2432,7 @@ AC_CONFIG_FILES([Makefile | |||
src/mesa/drivers/osmesa/osmesa.pc | |||
src/mesa/drivers/x11/Makefile | |||
src/mesa/main/tests/Makefile | |||
src/vulkan/Makefile | |||
src/util/Makefile | |||
src/util/tests/hash_table/Makefile]) | |||
@@ -0,0 +1,90 @@ | |||
// | |||
// File: vk_platform.h | |||
// | |||
/* | |||
** Copyright (c) 2014-2015 The Khronos Group Inc. | |||
** | |||
** Permission is hereby granted, free of charge, to any person obtaining a | |||
** copy of this software and/or associated documentation files (the | |||
** "Materials"), to deal in the Materials without restriction, including | |||
** without limitation the rights to use, copy, modify, merge, publish, | |||
** distribute, sublicense, and/or sell copies of the Materials, and to | |||
** permit persons to whom the Materials are furnished to do so, subject to | |||
** the following conditions: | |||
** | |||
** The above copyright notice and this permission notice shall be included | |||
** in all copies or substantial portions of the Materials. | |||
** | |||
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |||
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |||
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |||
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY | |||
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |||
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |||
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. | |||
*/ | |||
#ifndef __VK_PLATFORM_H__ | |||
#define __VK_PLATFORM_H__ | |||
#ifdef __cplusplus | |||
extern "C" | |||
{ | |||
#endif // __cplusplus | |||
/* | |||
*************************************************************************************************** | |||
* Platform-specific directives and type declarations | |||
*************************************************************************************************** | |||
*/ | |||
#if defined(_WIN32) | |||
// On Windows, VKAPI should equate to the __stdcall convention | |||
#define VKAPI __stdcall | |||
#elif defined(__GNUC__) | |||
// On other platforms using GCC, VKAPI stays undefined | |||
#define VKAPI | |||
#else | |||
// Unsupported Platform! | |||
#error "Unsupported OS Platform detected!" | |||
#endif | |||
#include <stddef.h> | |||
#if !defined(VK_NO_STDINT_H) | |||
#if defined(_MSC_VER) && (_MSC_VER < 1600) | |||
typedef signed __int8 int8_t; | |||
typedef unsigned __int8 uint8_t; | |||
typedef signed __int16 int16_t; | |||
typedef unsigned __int16 uint16_t; | |||
typedef signed __int32 int32_t; | |||
typedef unsigned __int32 uint32_t; | |||
typedef signed __int64 int64_t; | |||
typedef unsigned __int64 uint64_t; | |||
#else | |||
#include <stdint.h> | |||
#endif | |||
#endif // !defined(VK_NO_STDINT_H) | |||
typedef uint64_t VkDeviceSize; | |||
typedef uint32_t bool32_t; | |||
typedef uint32_t VkSampleMask; | |||
typedef uint32_t VkFlags; | |||
#if (UINTPTR_MAX >= UINT64_MAX) | |||
#define VK_UINTPTRLEAST64_MAX UINTPTR_MAX | |||
typedef uintptr_t VkUintPtrLeast64; | |||
#else | |||
#define VK_UINTPTRLEAST64_MAX UINT64_MAX | |||
typedef uint64_t VkUintPtrLeast64; | |||
#endif | |||
#ifdef __cplusplus | |||
} // extern "C" | |||
#endif // __cplusplus | |||
#endif // __VK_PLATFORM_H__ |
@@ -0,0 +1,55 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#ifndef __VULKAN_INTEL_H__ | |||
#define __VULKAN_INTEL_H__ | |||
#include "vulkan.h" | |||
#ifdef __cplusplus | |||
extern "C" | |||
{ | |||
#endif // __cplusplus | |||
#define VK_STRUCTURE_TYPE_DMA_BUF_IMAGE_CREATE_INFO_INTEL 1024 | |||
typedef struct VkDmaBufImageCreateInfo_ | |||
{ | |||
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DMA_BUF_IMAGE_CREATE_INFO_INTEL | |||
const void* pNext; // Pointer to next structure. | |||
int fd; | |||
VkFormat format; | |||
VkExtent3D extent; // Depth must be 1 | |||
uint32_t strideInBytes; | |||
} VkDmaBufImageCreateInfo; | |||
VkResult VKAPI vkCreateDmaBufImageINTEL( | |||
VkDevice _device, | |||
const VkDmaBufImageCreateInfo* pCreateInfo, | |||
VkDeviceMemory* pMem, | |||
VkImage* pImage); | |||
#ifdef __cplusplus | |||
} // extern "C" | |||
#endif // __cplusplus | |||
#endif // __VULKAN_INTEL_H__ |
@@ -61,6 +61,10 @@ EXTRA_DIST = \ | |||
AM_CFLAGS = $(VISIBILITY_CFLAGS) | |||
AM_CXXFLAGS = $(VISIBILITY_CXXFLAGS) | |||
if HAVE_VULKAN | |||
SUBDIRS += vulkan | |||
endif | |||
AM_CPPFLAGS = \ | |||
-I$(top_srcdir)/include/ \ | |||
-I$(top_srcdir)/src/mapi/ \ |
@@ -514,6 +514,10 @@ struct ast_type_qualifier { | |||
unsigned stream:1; /**< Has stream value assigned */ | |||
unsigned explicit_stream:1; /**< stream value assigned explicitly by shader code */ | |||
/** \} */ | |||
/** \name Vulkan qualifiers */ | |||
unsigned vk_set:1; | |||
} | |||
/** \brief Set of flags, accessed by name. */ | |||
q; | |||
@@ -595,6 +599,11 @@ struct ast_type_qualifier { | |||
*/ | |||
glsl_base_type image_base_type; | |||
/** | |||
* Vulkan descriptor set | |||
*/ | |||
int set; | |||
/** | |||
* Return true if and only if an interpolation qualifier is present. | |||
*/ |
@@ -2645,7 +2645,16 @@ apply_type_qualifier_to_variable(const struct ast_type_qualifier *qual, | |||
state->fs_redeclares_gl_fragcoord_with_no_layout_qualifiers; | |||
} | |||
if (qual->flags.q.explicit_location) { | |||
if (qual->flags.q.vk_set) { | |||
if (!qual->flags.q.explicit_index) | |||
_mesa_glsl_error(loc, state, | |||
"Vulkan descriptor set layout requires both group and index " | |||
"qualifiers"); | |||
var->data.vk_set = true; | |||
var->data.set = qual->set; | |||
var->data.index = qual->index; | |||
} else if (qual->flags.q.explicit_location) { | |||
validate_explicit_location(qual, var, state, loc); | |||
} else if (qual->flags.q.explicit_index) { | |||
_mesa_glsl_error(loc, state, "explicit index requires explicit location"); | |||
@@ -5782,6 +5791,10 @@ ast_interface_block::hir(exec_list *instructions, | |||
var->data.explicit_binding = this->layout.flags.q.explicit_binding; | |||
var->data.binding = this->layout.binding; | |||
var->data.vk_set = this->layout.flags.q.vk_set; | |||
var->data.set = this->layout.set; | |||
var->data.index = this->layout.index; | |||
state->symbols->add_variable(var); | |||
instructions->push_tail(var); | |||
} | |||
@@ -5854,6 +5867,10 @@ ast_interface_block::hir(exec_list *instructions, | |||
var->data.explicit_binding = this->layout.flags.q.explicit_binding; | |||
var->data.binding = this->layout.binding; | |||
var->data.vk_set = this->layout.flags.q.vk_set; | |||
var->data.set = this->layout.set; | |||
var->data.index = this->layout.index; | |||
state->symbols->add_variable(var); | |||
instructions->push_tail(var); | |||
} |
@@ -252,6 +252,11 @@ ast_type_qualifier::merge_qualifier(YYLTYPE *loc, | |||
this->image_base_type = q.image_base_type; | |||
} | |||
if (q.flags.q.vk_set) { | |||
this->set = q.set; | |||
this->index = q.index; | |||
} | |||
return true; | |||
} | |||
@@ -1431,6 +1431,11 @@ layout_qualifier_id: | |||
$$.binding = $3; | |||
} | |||
if (match_layout_qualifier("set", $1, state) == 0) { | |||
$$.flags.q.vk_set = 1; | |||
$$.set = $3; | |||
} | |||
if (state->has_atomic_counters() && | |||
match_layout_qualifier("offset", $1, state) == 0) { | |||
$$.flags.q.explicit_offset = 1; |
@@ -682,6 +682,11 @@ public: | |||
unsigned explicit_location:1; | |||
unsigned explicit_index:1; | |||
/** | |||
* Do we have a Vulkan (group, index) qualifier for this variable? | |||
*/ | |||
unsigned vk_set:1; | |||
/** | |||
* Was an initial binding explicitly set in the shader? | |||
* | |||
@@ -751,8 +756,10 @@ public: | |||
* \note | |||
* The GLSL spec only allows the values 0 or 1 for the index in \b dual | |||
* source blending. | |||
* | |||
* This is now also used for the Vulkan descriptor set index. | |||
*/ | |||
unsigned index:1; | |||
int16_t index; | |||
/** | |||
* \brief Layout qualifier for gl_FragDepth. | |||
@@ -800,6 +807,11 @@ public: | |||
*/ | |||
int16_t binding; | |||
/** | |||
* Vulkan descriptor set for the resource. | |||
*/ | |||
int16_t set; | |||
/** | |||
* Storage location of the base of this variable | |||
* |
@@ -53,6 +53,11 @@ process_block(void *mem_ctx, struct hash_table *ht, ir_variable *var) | |||
b->binding = 0; | |||
} | |||
if (var->data.vk_set) { | |||
b->set = var->data.set; | |||
b->index = var->data.index; | |||
} | |||
_mesa_hash_table_insert(ht, var->get_interface_type()->name, (void *) b); | |||
return b; | |||
} else { |
@@ -35,6 +35,8 @@ struct link_uniform_block_active { | |||
unsigned num_array_elements; | |||
unsigned binding; | |||
unsigned set; | |||
unsigned index; | |||
bool has_instance_name; | |||
bool has_binding; |
@@ -293,6 +293,9 @@ link_uniform_blocks(void *mem_ctx, | |||
blocks[i].NumUniforms = | |||
(unsigned)(ptrdiff_t)(&variables[parcel.index] - blocks[i].Uniforms); | |||
blocks[i].Set = b->set; | |||
blocks[i].Index = b->index; | |||
i++; | |||
} | |||
} else { | |||
@@ -311,6 +314,9 @@ link_uniform_blocks(void *mem_ctx, | |||
blocks[i].NumUniforms = | |||
(unsigned)(ptrdiff_t)(&variables[parcel.index] - blocks[i].Uniforms); | |||
blocks[i].Set = b->set; | |||
blocks[i].Index = b->index; | |||
i++; | |||
} | |||
} |
@@ -1,3 +1,4 @@ | |||
#include <stdlib.h> | |||
#include "main/glheader.h" | |||
#include "main/compiler.h" | |||
#include "glapi/glapi.h" | |||
@@ -33,12 +34,14 @@ _glapi_check_multithread(void) | |||
PUBLIC void | |||
_glapi_set_context(void *context) | |||
{} | |||
{ | |||
_glapi_Context = context; | |||
} | |||
PUBLIC void * | |||
_glapi_get_context(void) | |||
{ | |||
return 0; | |||
return _glapi_Context; | |||
} | |||
PUBLIC void | |||
@@ -84,7 +87,7 @@ _glapi_set_nop_handler(_glapi_nop_handler_proc func) | |||
PUBLIC struct _glapi_table * | |||
_glapi_new_nop_table(unsigned num_entries) | |||
{ | |||
return NULL; | |||
return malloc(16); | |||
} | |||
#ifndef NO_MAIN |
@@ -41,7 +41,7 @@ AM_CFLAGS = \ | |||
AM_CXXFLAGS = $(AM_CFLAGS) | |||
noinst_LTLIBRARIES = libi965_dri.la | |||
noinst_LTLIBRARIES = libi965_dri.la libi965_compiler.la | |||
libi965_dri_la_SOURCES = $(i965_FILES) | |||
libi965_dri_la_LIBADD = $(INTEL_LIBS) | |||
@@ -54,6 +54,15 @@ TEST_LIBS = \ | |||
$(CLOCK_LIB) \ | |||
../common/libdri_test_stubs.la | |||
libi965_compiler_la_SOURCES = $(i965_FILES) | |||
libi965_compiler_la_LIBADD = $(INTEL_LIBS) \ | |||
../common/libdricommon.la \ | |||
../common/libmegadriver_stub.la \ | |||
../../../libmesa.la \ | |||
$(DRI_LIB_DEPS) \ | |||
$(CLOCK_LIB) \ | |||
../common/libdri_test_stubs.la -lm | |||
TESTS = \ | |||
test_fs_cmod_propagation \ | |||
test_fs_saturate_propagation \ |
@@ -830,6 +830,7 @@ brwCreateContext(gl_api api, | |||
intel_batchbuffer_init(brw); | |||
#if 0 | |||
if (brw->gen >= 6) { | |||
/* Create a new hardware context. Using a hardware context means that | |||
* our GPU state will be saved/restored on context switch, allowing us | |||
@@ -848,6 +849,7 @@ brwCreateContext(gl_api api, | |||
} | |||
brw_init_state(brw); | |||
#endif | |||
intelInitExtensions(ctx); | |||
@@ -909,8 +911,10 @@ brwCreateContext(gl_api api, | |||
_mesa_compute_version(ctx); | |||
#if 0 | |||
_mesa_initialize_dispatch_tables(ctx); | |||
_mesa_initialize_vbo_vtxfmt(ctx); | |||
#endif | |||
if (ctx->Extensions.AMD_performance_monitor) { | |||
brw_init_performance_monitors(brw); |
@@ -359,6 +359,9 @@ struct brw_stage_prog_data { | |||
/** @} */ | |||
} binding_table; | |||
uint32_t *map_entries; | |||
uint32_t *bind_map[4]; | |||
GLuint nr_params; /**< number of float params/constants */ | |||
GLuint nr_pull_params; | |||
@@ -1976,6 +1979,12 @@ gen6_upload_push_constants(struct brw_context *brw, | |||
struct brw_stage_state *stage_state, | |||
enum aub_state_struct_type type); | |||
struct intel_screen *intel_screen_create(int fd); | |||
void intel_screen_destroy(struct intel_screen *screen); | |||
struct brw_context *intel_context_create(struct intel_screen *screen); | |||
void intel_context_destroy(struct brw_context *brw); | |||
#ifdef __cplusplus | |||
} | |||
#endif |
@@ -55,6 +55,7 @@ | |||
# define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL (0 << 8) | |||
# define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM (1 << 8) | |||
#ifndef _3DPRIM_POINTLIST /* FIXME: Avoid clashing with defines from bdw_pack.h */ | |||
#define _3DPRIM_POINTLIST 0x01 | |||
#define _3DPRIM_LINELIST 0x02 | |||
#define _3DPRIM_LINESTRIP 0x03 | |||
@@ -76,6 +77,7 @@ | |||
#define _3DPRIM_LINESTRIP_BF 0x13 | |||
#define _3DPRIM_LINESTRIP_CONT_BF 0x14 | |||
#define _3DPRIM_TRIFAN_NOSTIPPLE 0x15 | |||
#endif | |||
/* We use this offset to be able to pass native primitive types in struct | |||
* _mesa_prim::mode. Native primitive types are BRW_PRIM_OFFSET + |
@@ -353,3 +353,15 @@ brw_get_device_info(int devid, int revision) | |||
return devinfo; | |||
} | |||
const char * | |||
brw_get_device_name(int devid) | |||
{ | |||
switch (devid) { | |||
#undef CHIPSET | |||
#define CHIPSET(id, family, name) case id: return name; | |||
#include "pci_ids/i965_pci_ids.h" | |||
default: | |||
return NULL; | |||
} | |||
} |
@@ -85,3 +85,4 @@ struct brw_device_info | |||
}; | |||
const struct brw_device_info *brw_get_device_info(int devid, int revision); | |||
const char *brw_get_device_name(int devid); |
@@ -1910,6 +1910,10 @@ fs_visitor::assign_vs_urb_setup() | |||
unsigned vue_entries = | |||
MAX2(count, vs_prog_data->base.vue_map.num_slots); | |||
/* URB entry size is counted in units of 64 bytes (for the 3DSTATE_URB_VS | |||
* command). Each attribute is 16 bytes (4 floats/dwords), so each unit | |||
* fits four attributes. | |||
*/ | |||
vs_prog_data->base.urb_entry_size = ALIGN(vue_entries, 4) / 4; | |||
vs_prog_data->base.urb_read_length = (count + 1) / 2; | |||
@@ -3033,9 +3037,22 @@ fs_visitor::emit_repclear_shader() | |||
brw_wm_prog_key *key = (brw_wm_prog_key*) this->key; | |||
int base_mrf = 1; | |||
int color_mrf = base_mrf + 2; | |||
fs_inst *mov; | |||
if (uniforms == 1) { | |||
mov = emit(MOV(vec4(brw_message_reg(color_mrf)), | |||
fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F))); | |||
} else { | |||
struct brw_reg reg = | |||
brw_reg(BRW_GENERAL_REGISTER_FILE, | |||
2, 3, 0, 0, BRW_REGISTER_TYPE_F, | |||
BRW_VERTICAL_STRIDE_8, | |||
BRW_WIDTH_2, | |||
BRW_HORIZONTAL_STRIDE_4, BRW_SWIZZLE_XYZW, WRITEMASK_XYZW); | |||
mov = emit(MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg))); | |||
} | |||
fs_inst *mov = emit(MOV(vec4(brw_message_reg(color_mrf)), | |||
fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F))); | |||
mov->force_writemask_all = true; | |||
fs_inst *write; | |||
@@ -3065,8 +3082,10 @@ fs_visitor::emit_repclear_shader() | |||
assign_curb_setup(); | |||
/* Now that we have the uniform assigned, go ahead and force it to a vec4. */ | |||
assert(mov->src[0].file == HW_REG); | |||
mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0); | |||
if (uniforms == 1) { | |||
assert(mov->src[0].file == HW_REG); | |||
mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0); | |||
} | |||
} | |||
/** | |||
@@ -4081,7 +4100,8 @@ fs_visitor::run_vs() | |||
{ | |||
assert(stage == MESA_SHADER_VERTEX); | |||
assign_common_binding_table_offsets(0); | |||
if (prog_data->map_entries == NULL) | |||
assign_common_binding_table_offsets(0); | |||
setup_vs_payload(); | |||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) | |||
@@ -4129,7 +4149,8 @@ fs_visitor::run_fs() | |||
sanity_param_count = prog->Parameters->NumParameters; | |||
assign_binding_table_offsets(); | |||
if (prog_data->map_entries == NULL) | |||
assign_binding_table_offsets(); | |||
if (devinfo->gen >= 6) | |||
setup_payload_gen6(); |
@@ -1198,14 +1198,20 @@ fs_visitor::visit(ir_expression *ir) | |||
ir_constant *const_uniform_block = ir->operands[0]->as_constant(); | |||
ir_constant *const_offset = ir->operands[1]->as_constant(); | |||
fs_reg surf_index; | |||
uint32_t binding, set, index, set_index; | |||
if (const_uniform_block) { | |||
/* The block index is a constant, so just emit the binding table entry | |||
* as an immediate. | |||
*/ | |||
surf_index = fs_reg(stage_prog_data->binding_table.ubo_start + | |||
const_uniform_block->value.u[0]); | |||
index = const_uniform_block->value.u[0]; | |||
set = shader->base.UniformBlocks[index].Set; | |||
set_index = shader->base.UniformBlocks[index].Index; | |||
binding = stage_prog_data->bind_map[set][set_index]; | |||
surf_index = fs_reg(binding); | |||
} else { | |||
assert(0 && "need more info from the ir for this."); | |||
/* The block index is not a constant. Evaluate the index expression | |||
* per-channel and add the base UBO index; we have to select a value | |||
* from any live channel. | |||
@@ -2289,8 +2295,13 @@ fs_visitor::emit_texture(ir_texture_opcode op, | |||
void | |||
fs_visitor::visit(ir_texture *ir) | |||
{ | |||
uint32_t sampler = | |||
_mesa_get_sampler_uniform_value(ir->sampler, shader_prog, prog); | |||
uint32_t sampler; | |||
ir_dereference_variable *deref_var = ir->sampler->as_dereference_variable(); | |||
assert(deref_var); | |||
ir_variable *var = deref_var->var; | |||
sampler = stage_prog_data->bind_map[var->data.set][var->data.index]; | |||
ir_rvalue *nonconst_sampler_index = | |||
_mesa_get_sampler_array_nonconst_index(ir->sampler); |
@@ -33,19 +33,23 @@ | |||
#include "brw_state.h" | |||
#include "brw_ff_gs.h" | |||
bool | |||
brw_codegen_gs_prog(struct brw_context *brw, | |||
brw_compile_gs_prog(struct brw_context *brw, | |||
struct gl_shader_program *prog, | |||
struct brw_geometry_program *gp, | |||
struct brw_gs_prog_key *key) | |||
struct brw_gs_prog_key *key, | |||
struct brw_gs_compile_output *output) | |||
{ | |||
struct brw_stage_state *stage_state = &brw->gs.base; | |||
struct brw_gs_compile c; | |||
memset(&c, 0, sizeof(c)); | |||
c.key = *key; | |||
c.gp = gp; | |||
/* We get the bind map as input in the output struct...*/ | |||
c.prog_data.base.base.map_entries = output->prog_data.base.base.map_entries; | |||
memcpy(c.prog_data.base.base.bind_map, output->prog_data.base.base.bind_map, | |||
sizeof(c.prog_data.base.base.bind_map)); | |||
c.prog_data.include_primitive_id = | |||
(gp->program.Base.InputsRead & VARYING_BIT_PRIMITIVE_ID) != 0; | |||
@@ -274,18 +278,41 @@ brw_codegen_gs_prog(struct brw_context *brw, | |||
c.prog_data.base.base.total_scratch | |||
= brw_get_scratch_size(c.base.last_scratch*REG_SIZE); | |||
} | |||
output->mem_ctx = mem_ctx; | |||
output->program = program; | |||
output->program_size = program_size; | |||
memcpy(&output->prog_data, &c.prog_data, | |||
sizeof(output->prog_data)); | |||
return true; | |||
} | |||
bool | |||
brw_codegen_gs_prog(struct brw_context *brw, | |||
struct gl_shader_program *prog, | |||
struct brw_geometry_program *gp, | |||
struct brw_gs_prog_key *key) | |||
{ | |||
struct brw_gs_compile_output output; | |||
struct brw_stage_state *stage_state = &brw->gs.base; | |||
if (brw_compile_gs_prog(brw, prog, gp, key, &output)) | |||
return false; | |||
if (output.prog_data.base.base.total_scratch) { | |||
brw_get_scratch_bo(brw, &stage_state->scratch_bo, | |||
c.prog_data.base.base.total_scratch * | |||
output.prog_data.base.base.total_scratch * | |||
brw->max_gs_threads); | |||
} | |||
brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG, | |||
&c.key, sizeof(c.key), | |||
program, program_size, | |||
&c.prog_data, sizeof(c.prog_data), | |||
key, sizeof(*key), | |||
output.program, output.program_size, | |||
&output.prog_data, sizeof(output.prog_data), | |||
&stage_state->prog_offset, &brw->gs.prog_data); | |||
ralloc_free(mem_ctx); | |||
ralloc_free(output.mem_ctx); | |||
return true; | |||
} |
@@ -37,6 +37,22 @@ struct gl_context; | |||
struct gl_shader_program; | |||
struct gl_program; | |||
struct brw_gs_compile_output { | |||
void *mem_ctx; | |||
const void *program; | |||
uint32_t program_size; | |||
struct brw_gs_prog_data prog_data; | |||
}; | |||
struct brw_gs_prog_key; | |||
bool | |||
brw_compile_gs_prog(struct brw_context *brw, | |||
struct gl_shader_program *prog, | |||
struct brw_geometry_program *gp, | |||
struct brw_gs_prog_key *key, | |||
struct brw_gs_compile_output *output); | |||
bool brw_gs_prog_data_compare(const void *a, const void *b); | |||
void |
@@ -427,6 +427,9 @@ brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache) | |||
DBG("%s\n", __func__); | |||
if (cache->bo == NULL) | |||
return; | |||
if (brw->has_llc) | |||
drm_intel_bo_unmap(cache->bo); | |||
drm_intel_bo_unreference(cache->bo); |
@@ -46,7 +46,7 @@ | |||
* Return a bitfield where bit n is set if barycentric interpolation mode n | |||
* (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader. | |||
*/ | |||
static unsigned | |||
unsigned | |||
brw_compute_barycentric_interp_modes(struct brw_context *brw, | |||
bool shade_model_flat, | |||
bool persample_shading, |
@@ -90,6 +90,12 @@ bool brw_wm_prog_data_compare(const void *a, const void *b); | |||
void | |||
brw_upload_wm_prog(struct brw_context *brw); | |||
unsigned | |||
brw_compute_barycentric_interp_modes(struct brw_context *brw, | |||
bool shade_model_flat, | |||
bool persample_shading, | |||
const struct gl_fragment_program *fprog); | |||
#ifdef __cplusplus | |||
} // extern "C" | |||
#endif |
@@ -60,7 +60,7 @@ static const struct dri_debug_control debug_control[] = { | |||
{ "urb", DEBUG_URB }, | |||
{ "vs", DEBUG_VS }, | |||
{ "clip", DEBUG_CLIP }, | |||
{ "aub", DEBUG_AUB }, | |||
{ "foob", DEBUG_AUB }, /* disable aub dumbing in the dri driver */ | |||
{ "shader_time", DEBUG_SHADER_TIME }, | |||
{ "no16", DEBUG_NO16 }, | |||
{ "blorp", DEBUG_BLORP }, |
@@ -1416,6 +1416,78 @@ __DRIconfig **intelInitScreen2(__DRIscreen *psp) | |||
return (const __DRIconfig**) intel_screen_make_configs(psp); | |||
} | |||
struct intel_screen * | |||
intel_screen_create(int fd) | |||
{ | |||
__DRIscreen *psp; | |||
__DRIconfig **configs; | |||
int i; | |||
psp = malloc(sizeof(*psp)); | |||
if (psp == NULL) | |||
return NULL; | |||
psp->image.loader = (void *) 1; /* Don't complain about this being NULL */ | |||
psp->fd = fd; | |||
psp->dri2.useInvalidate = (void *) 1; | |||
configs = (__DRIconfig **) intelInitScreen2(psp); | |||
for (i = 0; configs[i]; i++) | |||
free(configs[i]); | |||
free(configs); | |||
return psp->driverPrivate; | |||
} | |||
void | |||
intel_screen_destroy(struct intel_screen *screen) | |||
{ | |||
__DRIscreen *psp; | |||
psp = screen->driScrnPriv; | |||
intelDestroyScreen(screen->driScrnPriv); | |||
free(psp); | |||
} | |||
struct brw_context * | |||
intel_context_create(struct intel_screen *screen) | |||
{ | |||
__DRIcontext *driContextPriv; | |||
struct brw_context *brw; | |||
unsigned error; | |||
driContextPriv = malloc(sizeof(*driContextPriv)); | |||
if (driContextPriv == NULL) | |||
return NULL; | |||
driContextPriv->driScreenPriv = screen->driScrnPriv; | |||
brwCreateContext(API_OPENGL_CORE, | |||
NULL, /* visual */ | |||
driContextPriv, | |||
3, 0, | |||
0, /* flags */ | |||
false, /* notify_reset */ | |||
&error, | |||
NULL); | |||
brw = driContextPriv->driverPrivate; | |||
brw->ctx.FirstTimeCurrent = false; | |||
return driContextPriv->driverPrivate; | |||
} | |||
void | |||
intel_context_destroy(struct brw_context *brw) | |||
{ | |||
__DRIcontext *driContextPriv; | |||
driContextPriv = brw->driContext; | |||
intelDestroyContext(driContextPriv); | |||
free(driContextPriv); | |||
} | |||
struct intel_buffer { | |||
__DRIbuffer base; | |||
drm_intel_bo *bo; |
@@ -2570,6 +2570,12 @@ struct gl_uniform_block | |||
*/ | |||
GLuint Binding; | |||
/** | |||
* Vulkan descriptor set and index qualifiers for this block. | |||
*/ | |||
GLuint Set; | |||
GLuint Index; | |||
/** | |||
* Minimum size (in bytes) of a buffer object to back this uniform buffer | |||
* (GL_UNIFORM_BLOCK_DATA_SIZE). |
@@ -0,0 +1,67 @@ | |||
# Copyright © 2015 Intel Corporation | |||
# | |||
# Permission is hereby granted, free of charge, to any person obtaining a | |||
# copy of this software and associated documentation files (the "Software"), | |||
# to deal in the Software without restriction, including without limitation | |||
# the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
# and/or sell copies of the Software, and to permit persons to whom the | |||
# Software is furnished to do so, subject to the following conditions: | |||
# | |||
# The above copyright notice and this permission notice (including the next | |||
# paragraph) shall be included in all copies or substantial portions of the | |||
# Software. | |||
# | |||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
# IN THE SOFTWARE. | |||
lib_LTLIBRARIES = libvulkan.la | |||
# The gallium includes are for the util/u_math.h include from main/macros.h | |||
AM_CPPFLAGS = \ | |||
$(INTEL_CFLAGS) \ | |||
$(VALGRIND_CFLAGS) \ | |||
$(DEFINES) \ | |||
-I$(top_srcdir)/include \ | |||
-I$(top_srcdir)/src \ | |||
-I$(top_srcdir)/src/mapi \ | |||
-I$(top_srcdir)/src/mesa \ | |||
-I$(top_srcdir)/src/mesa/drivers/dri/common \ | |||
-I$(top_srcdir)/src/mesa/drivers/dri/i965 \ | |||
-I$(top_srcdir)/src/gallium/auxiliary \ | |||
-I$(top_srcdir)/src/gallium/include | |||
libvulkan_la_CFLAGS = \ | |||
-Wall -Wextra -Wno-unused-parameter -fvisibility=hidden -O0 -g \ | |||
-Wstrict-prototypes -Wmissing-prototypes -Wno-override-init | |||
libvulkan_la_CXXFLAGS = \ | |||
-Wall -Wextra -Wno-unused-parameter -fvisibility=hidden -O0 -g | |||
libvulkan_la_SOURCES = \ | |||
private.h \ | |||
gem.c \ | |||
device.c \ | |||
aub.c \ | |||
allocator.c \ | |||
util.c \ | |||
pipeline.c \ | |||
image.c \ | |||
meta.c \ | |||
intel.c \ | |||
compiler.cpp | |||
bin_PROGRAMS = vk | |||
vk_SOURCES = vk.c | |||
vk_LDADD = libvulkan.la -lpng16 | |||
libvulkan_la_LIBADD = -lxcb -lxcb-dri3 \ | |||
$(top_builddir)/src/mesa/drivers/dri/i965/libi965_compiler.la | |||
include $(top_srcdir)/install-lib-links.mk |
@@ -0,0 +1,499 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#define _DEFAULT_SOURCE | |||
#include <stdint.h> | |||
#include <stdlib.h> | |||
#include <unistd.h> | |||
#include <values.h> | |||
#include <assert.h> | |||
#include <linux/futex.h> | |||
#include <linux/memfd.h> | |||
#include <sys/time.h> | |||
#include <sys/mman.h> | |||
#include <sys/syscall.h> | |||
#include "private.h" | |||
/* Design goals: | |||
* | |||
* - Lock free (except when resizing underlying bos) | |||
* | |||
* - Constant time allocation with typically only one atomic | |||
* | |||
* - Multiple allocation sizes without fragmentation | |||
* | |||
* - Can grow while keeping addresses and offset of contents stable | |||
* | |||
* - All allocations within one bo so we can point one of the | |||
* STATE_BASE_ADDRESS pointers at it. | |||
* | |||
* The overall design is a two-level allocator: top level is a fixed size, big | |||
* block (8k) allocator, which operates out of a bo. Allocation is done by | |||
* either pulling a block from the free list or growing the used range of the | |||
* bo. Growing the range may run out of space in the bo which we then need to | |||
* grow. Growing the bo is tricky in a multi-threaded, lockless environment: | |||
* we need to keep all pointers and contents in the old map valid. GEM bos in | |||
* general can't grow, but we use a trick: we create a memfd and use ftruncate | |||
* to grow it as necessary. We mmap the new size and then create a gem bo for | |||
* it using the new gem userptr ioctl. Without heavy-handed locking around | |||
* our allocation fast-path, there isn't really a way to munmap the old mmap, | |||
* so we just keep it around until garbage collection time. While the block | |||
* allocator is lockless for normal operations, we block other threads trying | |||
* to allocate while we're growing the map. It sholdn't happen often, and | |||
* growing is fast anyway. | |||
* | |||
* At the next level we can use various sub-allocators. The state pool is a | |||
* pool of smaller, fixed size objects, which operates much like the block | |||
* pool. It uses a free list for freeing objects, but when it runs out of | |||
* space it just allocates a new block from the block pool. This allocator is | |||
* intended for longer lived state objects such as SURFACE_STATE and most | |||
* other persistent state objects in the API. We may need to track more info | |||
* with these object and a pointer back to the CPU object (eg VkImage). In | |||
* those cases we just allocate a slightly bigger object and put the extra | |||
* state after the GPU state object. | |||
* | |||
* The state stream allocator works similar to how the i965 DRI driver streams | |||
* all its state. Even with Vulkan, we need to emit transient state (whether | |||
* surface state base or dynamic state base), and for that we can just get a | |||
* block and fill it up. These cases are local to a command buffer and the | |||
* sub-allocator need not be thread safe. The streaming allocator gets a new | |||
* block when it runs out of space and chains them together so they can be | |||
* easily freed. | |||
*/ | |||
/* Allocations are always at least 64 byte aligned, so 1 is an invalid value. | |||
* We use it to indicate the free list is empty. */ | |||
#define EMPTY 1 | |||
struct anv_mmap_cleanup { | |||
void *map; | |||
size_t size; | |||
uint32_t gem_handle; | |||
}; | |||
#define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0}) | |||
static inline long | |||
sys_futex(void *addr1, int op, int val1, | |||
struct timespec *timeout, void *addr2, int val3) | |||
{ | |||
return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3); | |||
} | |||
static inline int | |||
futex_wake(uint32_t *addr, int count) | |||
{ | |||
return sys_futex(addr, FUTEX_WAKE, count, NULL, NULL, 0); | |||
} | |||
static inline int | |||
futex_wait(uint32_t *addr, int32_t value) | |||
{ | |||
return sys_futex(addr, FUTEX_WAIT, value, NULL, NULL, 0); | |||
} | |||
static inline int | |||
memfd_create(const char *name, unsigned int flags) | |||
{ | |||
return syscall(SYS_memfd_create, name, flags); | |||
} | |||
static inline uint32_t | |||
ilog2_round_up(uint32_t value) | |||
{ | |||
assert(value != 0); | |||
return 32 - __builtin_clz(value - 1); | |||
} | |||
static inline uint32_t | |||
round_to_power_of_two(uint32_t value) | |||
{ | |||
return 1 << ilog2_round_up(value); | |||
} | |||
static bool | |||
anv_free_list_pop(union anv_free_list *list, void **map, uint32_t *offset) | |||
{ | |||
union anv_free_list current, next, old; | |||
current = *list; | |||
while (current.offset != EMPTY) { | |||
/* We have to add a memory barrier here so that the list head (and | |||
* offset) gets read before we read the map pointer. This way we | |||
* know that the map pointer is valid for the given offset at the | |||
* point where we read it. | |||
*/ | |||
__sync_synchronize(); | |||
next.offset = *(uint32_t *)(*map + current.offset); | |||
next.count = current.count + 1; | |||
old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, next.u64); | |||
if (old.u64 == current.u64) { | |||
*offset = current.offset; | |||
return true; | |||
} | |||
current = old; | |||
} | |||
return false; | |||
} | |||
static void | |||
anv_free_list_push(union anv_free_list *list, void *map, uint32_t offset) | |||
{ | |||
union anv_free_list current, old, new; | |||
uint32_t *next_ptr = map + offset; | |||
old = *list; | |||
do { | |||
current = old; | |||
*next_ptr = current.offset; | |||
new.offset = offset; | |||
new.count = current.count + 1; | |||
old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64); | |||
} while (old.u64 != current.u64); | |||
} | |||
static int | |||
anv_block_pool_grow(struct anv_block_pool *pool); | |||
void | |||
anv_block_pool_init(struct anv_block_pool *pool, | |||
struct anv_device *device, uint32_t block_size) | |||
{ | |||
assert(is_power_of_two(block_size)); | |||
pool->device = device; | |||
pool->bo.gem_handle = 0; | |||
pool->bo.offset = 0; | |||
pool->size = 0; | |||
pool->block_size = block_size; | |||
pool->next_block = 0; | |||
pool->free_list = ANV_FREE_LIST_EMPTY; | |||
anv_vector_init(&pool->mmap_cleanups, | |||
round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128); | |||
/* Immediately grow the pool so we'll have a backing bo. */ | |||
anv_block_pool_grow(pool); | |||
} | |||
/* The memfd path lets us create a map for an fd and lets us grow and remap | |||
* without copying. It breaks valgrind however, so we have a MAP_ANONYMOUS | |||
* path we can take for valgrind debugging. */ | |||
#define USE_MEMFD 0 | |||
void | |||
anv_block_pool_finish(struct anv_block_pool *pool) | |||
{ | |||
struct anv_mmap_cleanup *cleanup; | |||
anv_vector_foreach(cleanup, &pool->mmap_cleanups) { | |||
if (cleanup->map) | |||
munmap(cleanup->map, cleanup->size); | |||
if (cleanup->gem_handle) | |||
anv_gem_close(pool->device, cleanup->gem_handle); | |||
} | |||
anv_vector_finish(&pool->mmap_cleanups); | |||
#if USE_MEMFD | |||
close(pool->fd); | |||
#endif | |||
} | |||
static int | |||
anv_block_pool_grow(struct anv_block_pool *pool) | |||
{ | |||
size_t size; | |||
void *map; | |||
int gem_handle; | |||
struct anv_mmap_cleanup *cleanup; | |||
if (pool->size == 0) { | |||
size = 32 * pool->block_size; | |||
} else { | |||
size = pool->size * 2; | |||
} | |||
cleanup = anv_vector_add(&pool->mmap_cleanups); | |||
if (!cleanup) | |||
return -1; | |||
*cleanup = ANV_MMAP_CLEANUP_INIT; | |||
#if USE_MEMFD | |||
if (pool->size == 0) | |||
pool->fd = memfd_create("block pool", MFD_CLOEXEC); | |||
if (pool->fd == -1) | |||
return -1; | |||
if (ftruncate(pool->fd, size) == -1) | |||
return -1; | |||
/* First try to see if mremap can grow the map in place. */ | |||
map = MAP_FAILED; | |||
if (pool->size > 0) | |||
map = mremap(pool->map, pool->size, size, 0); | |||
if (map == MAP_FAILED) { | |||
/* Just leak the old map until we destroy the pool. We can't munmap it | |||
* without races or imposing locking on the block allocate fast path. On | |||
* the whole the leaked maps adds up to less than the size of the | |||
* current map. MAP_POPULATE seems like the right thing to do, but we | |||
* should try to get some numbers. | |||
*/ | |||
map = mmap(NULL, size, PROT_READ | PROT_WRITE, | |||
MAP_SHARED | MAP_POPULATE, pool->fd, 0); | |||
cleanup->map = map; | |||
cleanup->size = size; | |||
} | |||
if (map == MAP_FAILED) | |||
return -1; | |||
#else | |||
/* The MAP_ANONYMOUS fallback can't grow without races, so just bail here | |||
* if we're trying to grow the pool. */ | |||
assert(pool->size == 0); | |||
map = mmap(NULL, size, PROT_READ | PROT_WRITE, | |||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE, -1, 0); | |||
if (map == MAP_FAILED) | |||
return -1; | |||
cleanup->map = map; | |||
cleanup->size = size; | |||
#endif | |||
gem_handle = anv_gem_userptr(pool->device, map, size); | |||
if (gem_handle == 0) | |||
return -1; | |||
cleanup->gem_handle = gem_handle; | |||
/* Now that we successfull allocated everything, we can write the new | |||
* values back into pool. */ | |||
pool->map = map; | |||
pool->bo.gem_handle = gem_handle; | |||
pool->bo.size = size; | |||
pool->bo.map = map; | |||
pool->bo.index = 0; | |||
/* Write size last and after the memory barrier here. We need the memory | |||
* barrier to make sure map and gem_handle are written before other threads | |||
* see the new size. A thread could allocate a block and then go try using | |||
* the old pool->map and access out of bounds. */ | |||
__sync_synchronize(); | |||
pool->size = size; | |||
return 0; | |||
} | |||
uint32_t | |||
anv_block_pool_alloc(struct anv_block_pool *pool) | |||
{ | |||
uint32_t offset, block, size; | |||
/* Try free list first. */ | |||
if (anv_free_list_pop(&pool->free_list, &pool->map, &offset)) | |||
return offset; | |||
restart: | |||
size = pool->size; | |||
block = __sync_fetch_and_add(&pool->next_block, pool->block_size); | |||
if (block < size) { | |||
return block; | |||
} else if (block == size) { | |||
/* We allocated the first block outside the pool, we have to grow it. | |||
* pool->next_block acts a mutex: threads who try to allocate now will | |||
* get block indexes above the current limit and hit futex_wait | |||
* below. */ | |||
anv_block_pool_grow(pool); | |||
futex_wake(&pool->size, INT_MAX); | |||
} else { | |||
futex_wait(&pool->size, size); | |||
__sync_fetch_and_add(&pool->next_block, -pool->block_size); | |||
goto restart; | |||
} | |||
return block; | |||
} | |||
void | |||
anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset) | |||
{ | |||
anv_free_list_push(&pool->free_list, pool->map, offset); | |||
} | |||
static void | |||
anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool *pool, | |||
size_t state_size) | |||
{ | |||
/* At least a cache line and must divide the block size. */ | |||
assert(state_size >= 64 && is_power_of_two(state_size)); | |||
pool->state_size = state_size; | |||
pool->free_list = ANV_FREE_LIST_EMPTY; | |||
pool->block.next = 0; | |||
pool->block.end = 0; | |||
} | |||
static uint32_t | |||
anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool, | |||
struct anv_block_pool *block_pool) | |||
{ | |||
uint32_t offset; | |||
struct anv_block_state block, old, new; | |||
/* Try free list first. */ | |||
if (anv_free_list_pop(&pool->free_list, &block_pool->map, &offset)) | |||
return offset; | |||
/* If free list was empty (or somebody raced us and took the items) we | |||
* allocate a new item from the end of the block */ | |||
restart: | |||
block.u64 = __sync_fetch_and_add(&pool->block.u64, pool->state_size); | |||
if (block.next < block.end) { | |||
return block.next; | |||
} else if (block.next == block.end) { | |||
new.next = anv_block_pool_alloc(block_pool); | |||
new.end = new.next + block_pool->block_size; | |||
old.u64 = __sync_fetch_and_add(&pool->block.u64, new.u64 - block.u64); | |||
if (old.next != block.next) | |||
futex_wake(&pool->block.end, INT_MAX); | |||
return new.next; | |||
} else { | |||
futex_wait(&pool->block.end, block.end); | |||
__sync_fetch_and_add(&pool->block.u64, -pool->state_size); | |||
goto restart; | |||
} | |||
} | |||
static void | |||
anv_fixed_size_state_pool_free(struct anv_fixed_size_state_pool *pool, | |||
struct anv_block_pool *block_pool, | |||
uint32_t offset) | |||
{ | |||
anv_free_list_push(&pool->free_list, block_pool->map, offset); | |||
} | |||
void | |||
anv_state_pool_init(struct anv_state_pool *pool, | |||
struct anv_block_pool *block_pool) | |||
{ | |||
pool->block_pool = block_pool; | |||
for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) { | |||
size_t size = 1 << (ANV_MIN_STATE_SIZE_LOG2 + i); | |||
anv_fixed_size_state_pool_init(&pool->buckets[i], size); | |||
} | |||
} | |||
struct anv_state | |||
anv_state_pool_alloc(struct anv_state_pool *pool, size_t size, size_t align) | |||
{ | |||
unsigned size_log2 = ilog2_round_up(size < align ? align : size); | |||
assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2); | |||
if (size_log2 < ANV_MIN_STATE_SIZE_LOG2) | |||
size_log2 = ANV_MIN_STATE_SIZE_LOG2; | |||
unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2; | |||
struct anv_state state; | |||
state.alloc_size = 1 << size_log2; | |||
state.offset = anv_fixed_size_state_pool_alloc(&pool->buckets[bucket], | |||
pool->block_pool); | |||
state.map = pool->block_pool->map + state.offset; | |||
return state; | |||
} | |||
void | |||
anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state) | |||
{ | |||
assert(is_power_of_two(state.alloc_size)); | |||
unsigned size_log2 = ilog2_round_up(state.alloc_size); | |||
assert(size_log2 >= ANV_MIN_STATE_SIZE_LOG2 && | |||
size_log2 <= ANV_MAX_STATE_SIZE_LOG2); | |||
unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2; | |||
anv_fixed_size_state_pool_free(&pool->buckets[bucket], | |||
pool->block_pool, state.offset); | |||
} | |||
#define NULL_BLOCK 1 | |||
struct stream_block { | |||
uint32_t next; | |||
}; | |||
/* The state stream allocator is a one-shot, single threaded allocator for | |||
* variable sized blocks. We use it for allocating dynamic state. | |||
*/ | |||
void | |||
anv_state_stream_init(struct anv_state_stream *stream, | |||
struct anv_block_pool *block_pool) | |||
{ | |||
stream->block_pool = block_pool; | |||
stream->next = 0; | |||
stream->end = 0; | |||
stream->current_block = NULL_BLOCK; | |||
} | |||
void | |||
anv_state_stream_finish(struct anv_state_stream *stream) | |||
{ | |||
struct stream_block *sb; | |||
uint32_t block, next_block; | |||
block = stream->current_block; | |||
while (block != 1) { | |||
sb = stream->block_pool->map + block; | |||
next_block = sb->next; | |||
anv_block_pool_free(stream->block_pool, block); | |||
block = next_block; | |||
} | |||
} | |||
struct anv_state | |||
anv_state_stream_alloc(struct anv_state_stream *stream, | |||
uint32_t size, uint32_t alignment) | |||
{ | |||
struct stream_block *sb; | |||
struct anv_state state; | |||
uint32_t block; | |||
state.offset = ALIGN_U32(stream->next, alignment); | |||
if (state.offset + size > stream->end) { | |||
block = anv_block_pool_alloc(stream->block_pool); | |||
sb = stream->block_pool->map + block; | |||
sb->next = stream->current_block; | |||
stream->current_block = block; | |||
stream->next = block + sizeof(*sb); | |||
stream->end = block + stream->block_pool->block_size; | |||
state.offset = ALIGN_U32(stream->next, alignment); | |||
assert(state.offset + size <= stream->end); | |||
} | |||
stream->next = state.offset + size; | |||
state.alloc_size = size; | |||
state.map = stream->block_pool->map + state.offset; | |||
return state; | |||
} |
@@ -0,0 +1,292 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#include <stdlib.h> | |||
#include <stdio.h> | |||
#include <stdint.h> | |||
#include <string.h> | |||
#include <unistd.h> | |||
#include <assert.h> | |||
#include <sys/types.h> | |||
#include <sys/mman.h> | |||
#include <drm.h> | |||
#include <i915_drm.h> | |||
#include "private.h" | |||
#include "aub.h" | |||
struct anv_aub_writer { | |||
FILE *file; | |||
uint32_t offset; | |||
int gen; | |||
}; | |||
static void | |||
aub_out(struct anv_aub_writer *writer, uint32_t data) | |||
{ | |||
fwrite(&data, 1, 4, writer->file); | |||
} | |||
static void | |||
aub_out_data(struct anv_aub_writer *writer, const void *data, size_t size) | |||
{ | |||
fwrite(data, 1, size, writer->file); | |||
} | |||
static struct anv_aub_writer * | |||
get_anv_aub_writer(struct anv_device *device) | |||
{ | |||
struct anv_aub_writer *writer = device->aub_writer; | |||
int entry = 0x200003; | |||
int i; | |||
int gtt_size = 0x10000; | |||
const char *filename; | |||
if (geteuid() != getuid()) | |||
return NULL; | |||
if (writer) | |||
return writer; | |||
writer = malloc(sizeof(*writer)); | |||
if (writer == NULL) | |||
return NULL; | |||
filename = "intel.aub"; | |||
writer->gen = device->info.gen; | |||
writer->file = fopen(filename, "w+"); | |||
if (!writer->file) { | |||
free(writer); | |||
return NULL; | |||
} | |||
/* Start allocating objects from just after the GTT. */ | |||
writer->offset = gtt_size; | |||
/* Start with a (required) version packet. */ | |||
aub_out(writer, CMD_AUB_HEADER | (13 - 2)); | |||
aub_out(writer, | |||
(4 << AUB_HEADER_MAJOR_SHIFT) | | |||
(0 << AUB_HEADER_MINOR_SHIFT)); | |||
for (i = 0; i < 8; i++) { | |||
aub_out(writer, 0); /* app name */ | |||
} | |||
aub_out(writer, 0); /* timestamp */ | |||
aub_out(writer, 0); /* timestamp */ | |||
aub_out(writer, 0); /* comment len */ | |||
/* Set up the GTT. The max we can handle is 256M */ | |||
aub_out(writer, CMD_AUB_TRACE_HEADER_BLOCK | ((writer->gen >= 8 ? 6 : 5) - 2)); | |||
aub_out(writer, | |||
AUB_TRACE_MEMTYPE_GTT_ENTRY | | |||
AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE); | |||
aub_out(writer, 0); /* subtype */ | |||
aub_out(writer, 0); /* offset */ | |||
aub_out(writer, gtt_size); /* size */ | |||
if (writer->gen >= 8) | |||
aub_out(writer, 0); | |||
for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) { | |||
aub_out(writer, entry); | |||
} | |||
return device->aub_writer = writer; | |||
} | |||
void | |||
anv_aub_writer_destroy(struct anv_aub_writer *writer) | |||
{ | |||
fclose(writer->file); | |||
free(writer); | |||
} | |||
/** | |||
* Break up large objects into multiple writes. Otherwise a 128kb VBO | |||
* would overflow the 16 bits of size field in the packet header and | |||
* everything goes badly after that. | |||
*/ | |||
static void | |||
aub_write_trace_block(struct anv_aub_writer *writer, uint32_t type, | |||
void *virtual, uint32_t size, uint32_t gtt_offset) | |||
{ | |||
uint32_t block_size; | |||
uint32_t offset; | |||
uint32_t subtype = 0; | |||
static const char null_block[8 * 4096]; | |||
for (offset = 0; offset < size; offset += block_size) { | |||
block_size = size - offset; | |||
if (block_size > 8 * 4096) | |||
block_size = 8 * 4096; | |||
aub_out(writer, | |||
CMD_AUB_TRACE_HEADER_BLOCK | | |||
((writer->gen >= 8 ? 6 : 5) - 2)); | |||
aub_out(writer, | |||
AUB_TRACE_MEMTYPE_GTT | | |||
type | AUB_TRACE_OP_DATA_WRITE); | |||
aub_out(writer, subtype); | |||
aub_out(writer, gtt_offset + offset); | |||
aub_out(writer, ALIGN_U32(block_size, 4)); | |||
if (writer->gen >= 8) | |||
aub_out(writer, 0); | |||
if (virtual) | |||
aub_out_data(writer, (char *) virtual + offset, block_size); | |||
else | |||
aub_out_data(writer, null_block, block_size); | |||
/* Pad to a multiple of 4 bytes. */ | |||
aub_out_data(writer, null_block, -block_size & 3); | |||
} | |||
} | |||
/* | |||
* Make a ringbuffer on fly and dump it | |||
*/ | |||
static void | |||
aub_build_dump_ringbuffer(struct anv_aub_writer *writer, | |||
uint32_t batch_offset, uint32_t offset, | |||
int ring_flag) | |||
{ | |||
uint32_t ringbuffer[4096]; | |||
int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */ | |||
int ring_count = 0; | |||
if (ring_flag == I915_EXEC_BSD) | |||
ring = AUB_TRACE_TYPE_RING_PRB1; | |||
else if (ring_flag == I915_EXEC_BLT) | |||
ring = AUB_TRACE_TYPE_RING_PRB2; | |||
/* Make a ring buffer to execute our batchbuffer. */ | |||
memset(ringbuffer, 0, sizeof(ringbuffer)); | |||
if (writer->gen >= 8) { | |||
ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2); | |||
ringbuffer[ring_count++] = batch_offset; | |||
ringbuffer[ring_count++] = 0; | |||
} else { | |||
ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START; | |||
ringbuffer[ring_count++] = batch_offset; | |||
} | |||
/* Write out the ring. This appears to trigger execution of | |||
* the ring in the simulator. | |||
*/ | |||
aub_out(writer, | |||
CMD_AUB_TRACE_HEADER_BLOCK | | |||
((writer->gen >= 8 ? 6 : 5) - 2)); | |||
aub_out(writer, | |||
AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE); | |||
aub_out(writer, 0); /* general/surface subtype */ | |||
aub_out(writer, offset); | |||
aub_out(writer, ring_count * 4); | |||
if (writer->gen >= 8) | |||
aub_out(writer, 0); | |||
/* FIXME: Need some flush operations here? */ | |||
aub_out_data(writer, ringbuffer, ring_count * 4); | |||
} | |||
struct aub_bo { | |||
uint32_t offset; | |||
void *map; | |||
void *relocated; | |||
}; | |||
static void | |||
relocate_bo(struct anv_bo *bo, struct anv_reloc_list *list, struct aub_bo *bos) | |||
{ | |||
struct aub_bo *aub_bo = &bos[bo->index]; | |||
struct drm_i915_gem_relocation_entry *reloc; | |||
uint32_t *dw; | |||
aub_bo->relocated = malloc(bo->size); | |||
memcpy(aub_bo->relocated, aub_bo->map, bo->size); | |||
for (size_t i = 0; i < list->num_relocs; i++) { | |||
reloc = &list->relocs[i]; | |||
assert(reloc->offset < bo->size); | |||
dw = aub_bo->relocated + reloc->offset; | |||
*dw = bos[reloc->target_handle].offset + reloc->delta; | |||
} | |||
} | |||
void | |||
anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer) | |||
{ | |||
struct anv_device *device = cmd_buffer->device; | |||
struct anv_batch *batch = &cmd_buffer->batch; | |||
struct anv_aub_writer *writer; | |||
struct anv_bo *bo; | |||
uint32_t ring_flag = 0; | |||
uint32_t offset, length; | |||
struct aub_bo *aub_bos; | |||
writer = get_anv_aub_writer(device); | |||
if (writer == NULL) | |||
return; | |||
aub_bos = malloc(cmd_buffer->bo_count * sizeof(aub_bos[0])); | |||
offset = writer->offset; | |||
for (uint32_t i = 0; i < cmd_buffer->bo_count; i++) { | |||
bo = cmd_buffer->exec2_bos[i]; | |||
if (bo->map) | |||
aub_bos[i].map = bo->map; | |||
else | |||
aub_bos[i].map = anv_gem_mmap(device, bo->gem_handle, 0, bo->size); | |||
aub_bos[i].relocated = aub_bos[i].map; | |||
aub_bos[i].offset = offset; | |||
offset = ALIGN_U32(offset + bo->size + 4095, 4096); | |||
} | |||
relocate_bo(&batch->bo, &batch->cmd_relocs, aub_bos); | |||
relocate_bo(&device->surface_state_block_pool.bo, | |||
&batch->surf_relocs, aub_bos); | |||
for (uint32_t i = 0; i < cmd_buffer->bo_count; i++) { | |||
bo = cmd_buffer->exec2_bos[i]; | |||
if (i == cmd_buffer->bo_count - 1) { | |||
length = batch->next - batch->bo.map; | |||
aub_write_trace_block(writer, AUB_TRACE_TYPE_BATCH, | |||
aub_bos[i].relocated, | |||
length, aub_bos[i].offset); | |||
} else { | |||
aub_write_trace_block(writer, AUB_TRACE_TYPE_NOTYPE, | |||
aub_bos[i].relocated, | |||
bo->size, aub_bos[i].offset); | |||
} | |||
if (aub_bos[i].relocated != aub_bos[i].map) | |||
free(aub_bos[i].relocated); | |||
if (aub_bos[i].map != bo->map) | |||
anv_gem_munmap(aub_bos[i].map, bo->size); | |||
} | |||
/* Dump ring buffer */ | |||
aub_build_dump_ringbuffer(writer, aub_bos[batch->bo.index].offset, | |||
offset, ring_flag); | |||
free(aub_bos); | |||
fflush(writer->file); | |||
} |
@@ -0,0 +1,153 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
* | |||
* Authors: | |||
* Eric Anholt <eric@anholt.net> | |||
* | |||
*/ | |||
/** @file intel_aub.h | |||
* | |||
* The AUB file is a file format used by Intel's internal simulation | |||
* and other validation tools. It can be used at various levels by a | |||
* driver to input state to the simulated hardware or a replaying | |||
* debugger. | |||
* | |||
* We choose to dump AUB files using the trace block format for ease | |||
* of implementation -- dump out the blocks of memory as plain blobs | |||
* and insert ring commands to execute the batchbuffer blob. | |||
*/ | |||
#ifndef _INTEL_AUB_H | |||
#define _INTEL_AUB_H | |||
#define AUB_MI_NOOP (0) | |||
#define AUB_MI_BATCH_BUFFER_START (0x31 << 23) | |||
#define AUB_PIPE_CONTROL (0x7a000002) | |||
/* DW0: instruction type. */ | |||
#define CMD_AUB (7 << 29) | |||
#define CMD_AUB_HEADER (CMD_AUB | (1 << 23) | (0x05 << 16)) | |||
/* DW1 */ | |||
# define AUB_HEADER_MAJOR_SHIFT 24 | |||
# define AUB_HEADER_MINOR_SHIFT 16 | |||
#define CMD_AUB_TRACE_HEADER_BLOCK (CMD_AUB | (1 << 23) | (0x41 << 16)) | |||
#define CMD_AUB_DUMP_BMP (CMD_AUB | (1 << 23) | (0x9e << 16)) | |||
/* DW1 */ | |||
#define AUB_TRACE_OPERATION_MASK 0x000000ff | |||
#define AUB_TRACE_OP_COMMENT 0x00000000 | |||
#define AUB_TRACE_OP_DATA_WRITE 0x00000001 | |||
#define AUB_TRACE_OP_COMMAND_WRITE 0x00000002 | |||
#define AUB_TRACE_OP_MMIO_WRITE 0x00000003 | |||
// operation = TRACE_DATA_WRITE, Type | |||
#define AUB_TRACE_TYPE_MASK 0x0000ff00 | |||
#define AUB_TRACE_TYPE_NOTYPE (0 << 8) | |||
#define AUB_TRACE_TYPE_BATCH (1 << 8) | |||
#define AUB_TRACE_TYPE_VERTEX_BUFFER (5 << 8) | |||
#define AUB_TRACE_TYPE_2D_MAP (6 << 8) | |||
#define AUB_TRACE_TYPE_CUBE_MAP (7 << 8) | |||
#define AUB_TRACE_TYPE_VOLUME_MAP (9 << 8) | |||
#define AUB_TRACE_TYPE_1D_MAP (10 << 8) | |||
#define AUB_TRACE_TYPE_CONSTANT_BUFFER (11 << 8) | |||
#define AUB_TRACE_TYPE_CONSTANT_URB (12 << 8) | |||
#define AUB_TRACE_TYPE_INDEX_BUFFER (13 << 8) | |||
#define AUB_TRACE_TYPE_GENERAL (14 << 8) | |||
#define AUB_TRACE_TYPE_SURFACE (15 << 8) | |||
// operation = TRACE_COMMAND_WRITE, Type = | |||
#define AUB_TRACE_TYPE_RING_HWB (1 << 8) | |||
#define AUB_TRACE_TYPE_RING_PRB0 (2 << 8) | |||
#define AUB_TRACE_TYPE_RING_PRB1 (3 << 8) | |||
#define AUB_TRACE_TYPE_RING_PRB2 (4 << 8) | |||
// Address space | |||
#define AUB_TRACE_ADDRESS_SPACE_MASK 0x00ff0000 | |||
#define AUB_TRACE_MEMTYPE_GTT (0 << 16) | |||
#define AUB_TRACE_MEMTYPE_LOCAL (1 << 16) | |||
#define AUB_TRACE_MEMTYPE_NONLOCAL (2 << 16) | |||
#define AUB_TRACE_MEMTYPE_PCI (3 << 16) | |||
#define AUB_TRACE_MEMTYPE_GTT_ENTRY (4 << 16) | |||
/* DW2 */ | |||
/** | |||
* aub_state_struct_type enum values are encoded with the top 16 bits | |||
* representing the type to be delivered to the .aub file, and the bottom 16 | |||
* bits representing the subtype. This macro performs the encoding. | |||
*/ | |||
#define ENCODE_SS_TYPE(type, subtype) (((type) << 16) | (subtype)) | |||
enum aub_state_struct_type { | |||
AUB_TRACE_VS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 1), | |||
AUB_TRACE_GS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 2), | |||
AUB_TRACE_CLIP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 3), | |||
AUB_TRACE_SF_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 4), | |||
AUB_TRACE_WM_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 5), | |||
AUB_TRACE_CC_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 6), | |||
AUB_TRACE_CLIP_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 7), | |||
AUB_TRACE_SF_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 8), | |||
AUB_TRACE_CC_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x9), | |||
AUB_TRACE_SAMPLER_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xa), | |||
AUB_TRACE_KERNEL_INSTRUCTIONS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xb), | |||
AUB_TRACE_SCRATCH_SPACE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xc), | |||
AUB_TRACE_SAMPLER_DEFAULT_COLOR = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xd), | |||
AUB_TRACE_SCISSOR_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x15), | |||
AUB_TRACE_BLEND_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x16), | |||
AUB_TRACE_DEPTH_STENCIL_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x17), | |||
AUB_TRACE_VERTEX_BUFFER = ENCODE_SS_TYPE(AUB_TRACE_TYPE_VERTEX_BUFFER, 0), | |||
AUB_TRACE_BINDING_TABLE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x100), | |||
AUB_TRACE_SURFACE_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x200), | |||
AUB_TRACE_VS_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 0), | |||
AUB_TRACE_WM_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 1), | |||
}; | |||
#undef ENCODE_SS_TYPE | |||
/** | |||
* Decode a aub_state_struct_type value to determine the type that should be | |||
* stored in the .aub file. | |||
*/ | |||
static inline uint32_t AUB_TRACE_TYPE(enum aub_state_struct_type ss_type) | |||
{ | |||
return (ss_type & 0xFFFF0000) >> 16; | |||
} | |||
/** | |||
* Decode a state_struct_type value to determine the subtype that should be | |||
* stored in the .aub file. | |||
*/ | |||
static inline uint32_t AUB_TRACE_SUBTYPE(enum aub_state_struct_type ss_type) | |||
{ | |||
return ss_type & 0xFFFF; | |||
} | |||
/* DW3: address */ | |||
/* DW4: len */ | |||
#endif /* _INTEL_AUB_H */ |
@@ -0,0 +1,931 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |||
* DEALINGS IN THE SOFTWARE. | |||
*/ | |||
#include <sys/stat.h> | |||
#include <unistd.h> | |||
#include <fcntl.h> | |||
#include <brw_context.h> | |||
#include <brw_wm.h> /* brw_new_shader_program is here */ | |||
#include <brw_vs.h> | |||
#include <brw_gs.h> | |||
#include <mesa/main/shaderobj.h> | |||
#include <mesa/main/fbobject.h> | |||
#include <mesa/program/program.h> | |||
#include <glsl/program.h> | |||
#include "private.h" | |||
static void | |||
fail_if(int cond, const char *format, ...) | |||
{ | |||
va_list args; | |||
if (!cond) | |||
return; | |||
va_start(args, format); | |||
vfprintf(stderr, format, args); | |||
va_end(args); | |||
exit(1); | |||
} | |||
static VkResult | |||
set_binding_table_layout(struct brw_stage_prog_data *prog_data, | |||
struct anv_pipeline *pipeline, uint32_t stage) | |||
{ | |||
uint32_t count, bias, set, *map; | |||
struct anv_pipeline_layout_entry *entries; | |||
if (stage == VK_SHADER_STAGE_FRAGMENT) | |||
bias = MAX_RTS; | |||
else | |||
bias = 0; | |||
count = pipeline->layout->stage[stage].count; | |||
entries = pipeline->layout->stage[stage].entries; | |||
prog_data->map_entries = | |||
(uint32_t *) malloc(count * sizeof(prog_data->map_entries[0])); | |||
if (prog_data->map_entries == NULL) | |||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
set = 0; | |||
map = prog_data->map_entries; | |||
for (uint32_t i = 0; i < count; i++) { | |||
if (entries[i].set == set) { | |||
prog_data->bind_map[set] = map; | |||
set++; | |||
} | |||
*map++ = bias + i; | |||
} | |||
return VK_SUCCESS; | |||
} | |||
static void | |||
brw_vs_populate_key(struct brw_context *brw, | |||
struct brw_vertex_program *vp, | |||
struct brw_vs_prog_key *key) | |||
{ | |||
struct gl_context *ctx = &brw->ctx; | |||
/* BRW_NEW_VERTEX_PROGRAM */ | |||
struct gl_program *prog = (struct gl_program *) vp; | |||
memset(key, 0, sizeof(*key)); | |||
/* Just upload the program verbatim for now. Always send it all | |||
* the inputs it asks for, whether they are varying or not. | |||
*/ | |||
key->base.program_string_id = vp->id; | |||
brw_setup_vue_key_clip_info(brw, &key->base, | |||
vp->program.Base.UsesClipDistanceOut); | |||
/* _NEW_POLYGON */ | |||
if (brw->gen < 6) { | |||
key->copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL || | |||
ctx->Polygon.BackMode != GL_FILL); | |||
} | |||
if (prog->OutputsWritten & (VARYING_BIT_COL0 | VARYING_BIT_COL1 | | |||
VARYING_BIT_BFC0 | VARYING_BIT_BFC1)) { | |||
/* _NEW_LIGHT | _NEW_BUFFERS */ | |||
key->clamp_vertex_color = ctx->Light._ClampVertexColor; | |||
} | |||
/* _NEW_POINT */ | |||
if (brw->gen < 6 && ctx->Point.PointSprite) { | |||
for (int i = 0; i < 8; i++) { | |||
if (ctx->Point.CoordReplace[i]) | |||
key->point_coord_replace |= (1 << i); | |||
} | |||
} | |||
/* _NEW_TEXTURE */ | |||
brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count, | |||
&key->base.tex); | |||
} | |||
static bool | |||
really_do_vs_prog(struct brw_context *brw, | |||
struct gl_shader_program *prog, | |||
struct brw_vertex_program *vp, | |||
struct brw_vs_prog_key *key, struct anv_pipeline *pipeline) | |||
{ | |||
GLuint program_size; | |||
const GLuint *program; | |||
struct brw_vs_compile c; | |||
struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data; | |||
struct brw_stage_prog_data *stage_prog_data = &prog_data->base.base; | |||
void *mem_ctx; | |||
struct gl_shader *vs = NULL; | |||
if (prog) | |||
vs = prog->_LinkedShaders[MESA_SHADER_VERTEX]; | |||
memset(&c, 0, sizeof(c)); | |||
memcpy(&c.key, key, sizeof(*key)); | |||
memset(prog_data, 0, sizeof(*prog_data)); | |||
mem_ctx = ralloc_context(NULL); | |||
c.vp = vp; | |||
/* Allocate the references to the uniforms that will end up in the | |||
* prog_data associated with the compiled program, and which will be freed | |||
* by the state cache. | |||
*/ | |||
int param_count; | |||
if (vs) { | |||
/* We add padding around uniform values below vec4 size, with the worst | |||
* case being a float value that gets blown up to a vec4, so be | |||
* conservative here. | |||
*/ | |||
param_count = vs->num_uniform_components * 4; | |||
} else { | |||
param_count = vp->program.Base.Parameters->NumParameters * 4; | |||
} | |||
/* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip | |||
* planes as uniforms. | |||
*/ | |||
param_count += c.key.base.nr_userclip_plane_consts * 4; | |||
/* Setting nr_params here NOT to the size of the param and pull_param | |||
* arrays, but to the number of uniform components vec4_visitor | |||
* needs. vec4_visitor::setup_uniforms() will set it back to a proper value. | |||
*/ | |||
stage_prog_data->nr_params = ALIGN(param_count, 4) / 4; | |||
if (vs) { | |||
stage_prog_data->nr_params += vs->num_samplers; | |||
} | |||
GLbitfield64 outputs_written = vp->program.Base.OutputsWritten; | |||
prog_data->inputs_read = vp->program.Base.InputsRead; | |||
if (c.key.copy_edgeflag) { | |||
outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE); | |||
prog_data->inputs_read |= VERT_BIT_EDGEFLAG; | |||
} | |||
if (brw->gen < 6) { | |||
/* Put dummy slots into the VUE for the SF to put the replaced | |||
* point sprite coords in. We shouldn't need these dummy slots, | |||
* which take up precious URB space, but it would mean that the SF | |||
* doesn't get nice aligned pairs of input coords into output | |||
* coords, which would be a pain to handle. | |||
*/ | |||
for (int i = 0; i < 8; i++) { | |||
if (c.key.point_coord_replace & (1 << i)) | |||
outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i); | |||
} | |||
/* if back colors are written, allocate slots for front colors too */ | |||
if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0)) | |||
outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0); | |||
if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1)) | |||
outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1); | |||
} | |||
/* In order for legacy clipping to work, we need to populate the clip | |||
* distance varying slots whenever clipping is enabled, even if the vertex | |||
* shader doesn't write to gl_ClipDistance. | |||
*/ | |||
if (c.key.base.userclip_active) { | |||
outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0); | |||
outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1); | |||
} | |||
brw_compute_vue_map(brw->intelScreen->devinfo, | |||
&prog_data->base.vue_map, outputs_written); | |||
\ | |||
set_binding_table_layout(&prog_data->base.base, pipeline, | |||
VK_SHADER_STAGE_VERTEX); | |||
/* Emit GEN4 code. | |||
*/ | |||
program = brw_vs_emit(brw, prog, &c, prog_data, mem_ctx, &program_size); | |||
if (program == NULL) { | |||
ralloc_free(mem_ctx); | |||
return false; | |||
} | |||
pipeline->vs_simd8 = pipeline->program_next; | |||
memcpy((char *) pipeline->device->instruction_block_pool.map + | |||
pipeline->vs_simd8, program, program_size); | |||
pipeline->program_next = align(pipeline->program_next + program_size, 64); | |||
ralloc_free(mem_ctx); | |||
if (stage_prog_data->total_scratch > 0) | |||
if (!anv_bo_init_new(&pipeline->vs_scratch_bo, | |||
pipeline->device, | |||
stage_prog_data->total_scratch)) | |||
return false; | |||
return true; | |||
} | |||
void brw_wm_populate_key(struct brw_context *brw, | |||
struct brw_fragment_program *fp, | |||
struct brw_wm_prog_key *key) | |||
{ | |||
struct gl_context *ctx = &brw->ctx; | |||
struct gl_program *prog = (struct gl_program *) brw->fragment_program; | |||
GLuint lookup = 0; | |||
GLuint line_aa; | |||
bool program_uses_dfdy = fp->program.UsesDFdy; | |||
struct gl_framebuffer draw_buffer; | |||
bool multisample_fbo; | |||
memset(key, 0, sizeof(*key)); | |||
for (int i = 0; i < MAX_SAMPLERS; i++) { | |||
/* Assume color sampler, no swizzling. */ | |||
key->tex.swizzles[i] = SWIZZLE_XYZW; | |||
} | |||
/* A non-zero framebuffer name indicates that the framebuffer was created by | |||
* the user rather than the window system. */ | |||
draw_buffer.Name = 1; | |||
draw_buffer.Visual.samples = 1; | |||
draw_buffer._NumColorDrawBuffers = 1; | |||
draw_buffer._NumColorDrawBuffers = 1; | |||
draw_buffer.Width = 400; | |||
draw_buffer.Height = 400; | |||
ctx->DrawBuffer = &draw_buffer; | |||
multisample_fbo = ctx->DrawBuffer->Visual.samples > 1; | |||
/* Build the index for table lookup | |||
*/ | |||
if (brw->gen < 6) { | |||
/* _NEW_COLOR */ | |||
if (fp->program.UsesKill || ctx->Color.AlphaEnabled) | |||
lookup |= IZ_PS_KILL_ALPHATEST_BIT; | |||
if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) | |||
lookup |= IZ_PS_COMPUTES_DEPTH_BIT; | |||
/* _NEW_DEPTH */ | |||
if (ctx->Depth.Test) | |||
lookup |= IZ_DEPTH_TEST_ENABLE_BIT; | |||
if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */ | |||
lookup |= IZ_DEPTH_WRITE_ENABLE_BIT; | |||
/* _NEW_STENCIL | _NEW_BUFFERS */ | |||
if (ctx->Stencil._Enabled) { | |||
lookup |= IZ_STENCIL_TEST_ENABLE_BIT; | |||
if (ctx->Stencil.WriteMask[0] || | |||
ctx->Stencil.WriteMask[ctx->Stencil._BackFace]) | |||
lookup |= IZ_STENCIL_WRITE_ENABLE_BIT; | |||
} | |||
key->iz_lookup = lookup; | |||
} | |||
line_aa = AA_NEVER; | |||
/* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */ | |||
if (ctx->Line.SmoothFlag) { | |||
if (brw->reduced_primitive == GL_LINES) { | |||
line_aa = AA_ALWAYS; | |||
} | |||
else if (brw->reduced_primitive == GL_TRIANGLES) { | |||
if (ctx->Polygon.FrontMode == GL_LINE) { | |||
line_aa = AA_SOMETIMES; | |||
if (ctx->Polygon.BackMode == GL_LINE || | |||
(ctx->Polygon.CullFlag && | |||
ctx->Polygon.CullFaceMode == GL_BACK)) | |||
line_aa = AA_ALWAYS; | |||
} | |||
else if (ctx->Polygon.BackMode == GL_LINE) { | |||
line_aa = AA_SOMETIMES; | |||
if ((ctx->Polygon.CullFlag && | |||
ctx->Polygon.CullFaceMode == GL_FRONT)) | |||
line_aa = AA_ALWAYS; | |||
} | |||
} | |||
} | |||
key->line_aa = line_aa; | |||
/* _NEW_HINT */ | |||
key->high_quality_derivatives = | |||
ctx->Hint.FragmentShaderDerivative == GL_NICEST; | |||
if (brw->gen < 6) | |||
key->stats_wm = brw->stats_wm; | |||
/* _NEW_LIGHT */ | |||
key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT); | |||
/* _NEW_FRAG_CLAMP | _NEW_BUFFERS */ | |||
key->clamp_fragment_color = ctx->Color._ClampFragmentColor; | |||
/* _NEW_TEXTURE */ | |||
brw_populate_sampler_prog_key_data(ctx, prog, brw->wm.base.sampler_count, | |||
&key->tex); | |||
/* _NEW_BUFFERS */ | |||
/* | |||
* Include the draw buffer origin and height so that we can calculate | |||
* fragment position values relative to the bottom left of the drawable, | |||
* from the incoming screen origin relative position we get as part of our | |||
* payload. | |||
* | |||
* This is only needed for the WM_WPOSXY opcode when the fragment program | |||
* uses the gl_FragCoord input. | |||
* | |||
* We could avoid recompiling by including this as a constant referenced by | |||
* our program, but if we were to do that it would also be nice to handle | |||
* getting that constant updated at batchbuffer submit time (when we | |||
* hold the lock and know where the buffer really is) rather than at emit | |||
* time when we don't hold the lock and are just guessing. We could also | |||
* just avoid using this as key data if the program doesn't use | |||
* fragment.position. | |||
* | |||
* For DRI2 the origin_x/y will always be (0,0) but we still need the | |||
* drawable height in order to invert the Y axis. | |||
*/ | |||
if (fp->program.Base.InputsRead & VARYING_BIT_POS) { | |||
key->drawable_height = ctx->DrawBuffer->Height; | |||
} | |||
if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) { | |||
key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer); | |||
} | |||
/* _NEW_BUFFERS */ | |||
key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers; | |||
/* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */ | |||
key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 && | |||
(ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled); | |||
/* _NEW_BUFFERS _NEW_MULTISAMPLE */ | |||
/* Ignore sample qualifier while computing this flag. */ | |||
key->persample_shading = | |||
_mesa_get_min_invocations_per_fragment(ctx, &fp->program, true) > 1; | |||
if (key->persample_shading) | |||
key->persample_2x = ctx->DrawBuffer->Visual.samples == 2; | |||
key->compute_pos_offset = | |||
_mesa_get_min_invocations_per_fragment(ctx, &fp->program, false) > 1 && | |||
fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_POS; | |||
key->compute_sample_id = | |||
multisample_fbo && | |||
ctx->Multisample.Enabled && | |||
(fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_ID); | |||
/* BRW_NEW_VUE_MAP_GEOM_OUT */ | |||
if (brw->gen < 6 || _mesa_bitcount_64(fp->program.Base.InputsRead & | |||
BRW_FS_VARYING_INPUT_MASK) > 16) | |||
key->input_slots_valid = brw->vue_map_geom_out.slots_valid; | |||
/* _NEW_COLOR | _NEW_BUFFERS */ | |||
/* Pre-gen6, the hardware alpha test always used each render | |||
* target's alpha to do alpha test, as opposed to render target 0's alpha | |||
* like GL requires. Fix that by building the alpha test into the | |||
* shader, and we'll skip enabling the fixed function alpha test. | |||
*/ | |||
if (brw->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 && ctx->Color.AlphaEnabled) { | |||
key->alpha_test_func = ctx->Color.AlphaFunc; | |||
key->alpha_test_ref = ctx->Color.AlphaRef; | |||
} | |||
/* The unique fragment program ID */ | |||
key->program_string_id = fp->id; | |||
ctx->DrawBuffer = NULL; | |||
} | |||
static uint8_t | |||
computed_depth_mode(struct gl_fragment_program *fp) | |||
{ | |||
if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) { | |||
switch (fp->FragDepthLayout) { | |||
case FRAG_DEPTH_LAYOUT_NONE: | |||
case FRAG_DEPTH_LAYOUT_ANY: | |||
return BRW_PSCDEPTH_ON; | |||
case FRAG_DEPTH_LAYOUT_GREATER: | |||
return BRW_PSCDEPTH_ON_GE; | |||
case FRAG_DEPTH_LAYOUT_LESS: | |||
return BRW_PSCDEPTH_ON_LE; | |||
case FRAG_DEPTH_LAYOUT_UNCHANGED: | |||
return BRW_PSCDEPTH_OFF; | |||
} | |||
} | |||
return BRW_PSCDEPTH_OFF; | |||
} | |||
static bool | |||
really_do_wm_prog(struct brw_context *brw, | |||
struct gl_shader_program *prog, | |||
struct brw_fragment_program *fp, | |||
struct brw_wm_prog_key *key, struct anv_pipeline *pipeline) | |||
{ | |||
struct gl_context *ctx = &brw->ctx; | |||
void *mem_ctx = ralloc_context(NULL); | |||
struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data; | |||
struct gl_shader *fs = NULL; | |||
unsigned int program_size; | |||
const uint32_t *program; | |||
uint32_t offset; | |||
if (prog) | |||
fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; | |||
memset(prog_data, 0, sizeof(*prog_data)); | |||
/* key->alpha_test_func means simulating alpha testing via discards, | |||
* so the shader definitely kills pixels. | |||
*/ | |||
prog_data->uses_kill = fp->program.UsesKill || key->alpha_test_func; | |||
prog_data->computed_depth_mode = computed_depth_mode(&fp->program); | |||
/* Allocate the references to the uniforms that will end up in the | |||
* prog_data associated with the compiled program, and which will be freed | |||
* by the state cache. | |||
*/ | |||
int param_count; | |||
if (fs) { | |||
param_count = fs->num_uniform_components; | |||
} else { | |||
param_count = fp->program.Base.Parameters->NumParameters * 4; | |||
} | |||
/* The backend also sometimes adds params for texture size. */ | |||
param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits; | |||
prog_data->base.param = | |||
rzalloc_array(NULL, const gl_constant_value *, param_count); | |||
prog_data->base.pull_param = | |||
rzalloc_array(NULL, const gl_constant_value *, param_count); | |||
prog_data->base.nr_params = param_count; | |||
prog_data->barycentric_interp_modes = | |||
brw_compute_barycentric_interp_modes(brw, key->flat_shade, | |||
key->persample_shading, | |||
&fp->program); | |||
set_binding_table_layout(&prog_data->base, pipeline, | |||
VK_SHADER_STAGE_FRAGMENT); | |||
/* This needs to come after shader time and pull constant entries, but we | |||
* don't have those set up now, so just put it after the layout entries. | |||
*/ | |||
prog_data->binding_table.render_target_start = 0; | |||
program = brw_wm_fs_emit(brw, mem_ctx, key, prog_data, | |||
&fp->program, prog, &program_size); | |||
if (program == NULL) { | |||
ralloc_free(mem_ctx); | |||
return false; | |||
} | |||
offset = pipeline->program_next; | |||
pipeline->program_next = align(pipeline->program_next + program_size, 64); | |||
if (prog_data->no_8) | |||
pipeline->ps_simd8 = NO_KERNEL; | |||
else | |||
pipeline->ps_simd8 = offset; | |||
if (prog_data->no_8 || prog_data->prog_offset_16) | |||
pipeline->ps_simd16 = offset + prog_data->prog_offset_16; | |||
else | |||
pipeline->ps_simd16 = NO_KERNEL; | |||
memcpy((char *) pipeline->device->instruction_block_pool.map + | |||
offset, program, program_size); | |||
ralloc_free(mem_ctx); | |||
if (prog_data->base.total_scratch > 0) | |||
if (!anv_bo_init_new(&pipeline->ps_scratch_bo, | |||
pipeline->device, | |||
prog_data->base.total_scratch)) | |||
return false; | |||
return true; | |||
} | |||
static void | |||
brw_gs_populate_key(struct brw_context *brw, | |||
struct anv_pipeline *pipeline, | |||
struct brw_geometry_program *gp, | |||
struct brw_gs_prog_key *key) | |||
{ | |||
struct gl_context *ctx = &brw->ctx; | |||
struct brw_stage_state *stage_state = &brw->gs.base; | |||
struct gl_program *prog = &gp->program.Base; | |||
memset(key, 0, sizeof(*key)); | |||
key->base.program_string_id = gp->id; | |||
brw_setup_vue_key_clip_info(brw, &key->base, | |||
gp->program.Base.UsesClipDistanceOut); | |||
/* _NEW_TEXTURE */ | |||
brw_populate_sampler_prog_key_data(ctx, prog, stage_state->sampler_count, | |||
&key->base.tex); | |||
struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data; | |||
/* BRW_NEW_VUE_MAP_VS */ | |||
key->input_varyings = prog_data->base.vue_map.slots_valid; | |||
} | |||
static bool | |||
really_do_gs_prog(struct brw_context *brw, | |||
struct gl_shader_program *prog, | |||
struct brw_geometry_program *gp, | |||
struct brw_gs_prog_key *key, struct anv_pipeline *pipeline) | |||
{ | |||
struct brw_gs_compile_output output; | |||
uint32_t offset; | |||
/* FIXME: We pass the bind map to the compile in the output struct. Need | |||
* something better. */ | |||
set_binding_table_layout(&output.prog_data.base.base, | |||
pipeline, VK_SHADER_STAGE_GEOMETRY); | |||
brw_compile_gs_prog(brw, prog, gp, key, &output); | |||
offset = pipeline->program_next; | |||
pipeline->program_next = align(pipeline->program_next + output.program_size, 64); | |||
pipeline->gs_vec4 = offset; | |||
pipeline->gs_vertex_count = gp->program.VerticesIn; | |||
memcpy((char *) pipeline->device->instruction_block_pool.map + | |||
offset, output.program, output.program_size); | |||
ralloc_free(output.mem_ctx); | |||
if (output.prog_data.base.base.total_scratch) { | |||
if (!anv_bo_init_new(&pipeline->gs_scratch_bo, | |||
pipeline->device, | |||
output.prog_data.base.base.total_scratch)) | |||
return false; | |||
} | |||
memcpy(&pipeline->gs_prog_data, &output.prog_data, sizeof pipeline->gs_prog_data); | |||
return true; | |||
} | |||
static void | |||
fail_on_compile_error(int status, const char *msg) | |||
{ | |||
int source, line, column; | |||
char error[256]; | |||
if (status) | |||
return; | |||
if (sscanf(msg, "%d:%d(%d): error: %255[^\n]", &source, &line, &column, error) == 4) | |||
fail_if(!status, "%d:%s\n", line, error); | |||
else | |||
fail_if(!status, "%s\n", msg); | |||
} | |||
struct anv_compiler { | |||
struct intel_screen *screen; | |||
struct brw_context *brw; | |||
}; | |||
extern "C" { | |||
struct anv_compiler * | |||
anv_compiler_create(int fd) | |||
{ | |||
struct anv_compiler *compiler; | |||
compiler = (struct anv_compiler *) malloc(sizeof *compiler); | |||
if (compiler == NULL) | |||
return NULL; | |||
compiler->screen = intel_screen_create(fd); | |||
if (compiler->screen == NULL) { | |||
free(compiler); | |||
return NULL; | |||
} | |||
compiler->brw = intel_context_create(compiler->screen); | |||
if (compiler->brw == NULL) { | |||
free(compiler); | |||
return NULL; | |||
} | |||
compiler->brw->precompile = false; | |||
return compiler; | |||
} | |||
void | |||
anv_compiler_destroy(struct anv_compiler *compiler) | |||
{ | |||
intel_context_destroy(compiler->brw); | |||
intel_screen_destroy(compiler->screen); | |||
free(compiler); | |||
} | |||
/* From gen7_urb.c */ | |||
/* FIXME: Add to struct intel_device_info */ | |||
static const int gen8_push_size = 32 * 1024; | |||
static void | |||
gen7_compute_urb_partition(struct anv_pipeline *pipeline) | |||
{ | |||
const struct brw_device_info *devinfo = &pipeline->device->info; | |||
unsigned vs_size = pipeline->vs_prog_data.base.urb_entry_size; | |||
unsigned vs_entry_size_bytes = vs_size * 64; | |||
bool gs_present = pipeline->gs_vec4 != NO_KERNEL; | |||
unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1; | |||
unsigned gs_entry_size_bytes = gs_size * 64; | |||
/* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS): | |||
* | |||
* VS Number of URB Entries must be divisible by 8 if the VS URB Entry | |||
* Allocation Size is less than 9 512-bit URB entries. | |||
* | |||
* Similar text exists for GS. | |||
*/ | |||
unsigned vs_granularity = (vs_size < 9) ? 8 : 1; | |||
unsigned gs_granularity = (gs_size < 9) ? 8 : 1; | |||
/* URB allocations must be done in 8k chunks. */ | |||
unsigned chunk_size_bytes = 8192; | |||
/* Determine the size of the URB in chunks. */ | |||
unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes; | |||
/* Reserve space for push constants */ | |||
unsigned push_constant_bytes = gen8_push_size; | |||
unsigned push_constant_chunks = | |||
push_constant_bytes / chunk_size_bytes; | |||
/* Initially, assign each stage the minimum amount of URB space it needs, | |||
* and make a note of how much additional space it "wants" (the amount of | |||
* additional space it could actually make use of). | |||
*/ | |||
/* VS has a lower limit on the number of URB entries */ | |||
unsigned vs_chunks = | |||
ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes, | |||
chunk_size_bytes) / chunk_size_bytes; | |||
unsigned vs_wants = | |||
ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes, | |||
chunk_size_bytes) / chunk_size_bytes - vs_chunks; | |||
unsigned gs_chunks = 0; | |||
unsigned gs_wants = 0; | |||
if (gs_present) { | |||
/* There are two constraints on the minimum amount of URB space we can | |||
* allocate: | |||
* | |||
* (1) We need room for at least 2 URB entries, since we always operate | |||
* the GS in DUAL_OBJECT mode. | |||
* | |||
* (2) We can't allocate less than nr_gs_entries_granularity. | |||
*/ | |||
gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes, | |||
chunk_size_bytes) / chunk_size_bytes; | |||
gs_wants = | |||
ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes, | |||
chunk_size_bytes) / chunk_size_bytes - gs_chunks; | |||
} | |||
/* There should always be enough URB space to satisfy the minimum | |||
* requirements of each stage. | |||
*/ | |||
unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks; | |||
assert(total_needs <= urb_chunks); | |||
/* Mete out remaining space (if any) in proportion to "wants". */ | |||
unsigned total_wants = vs_wants + gs_wants; | |||
unsigned remaining_space = urb_chunks - total_needs; | |||
if (remaining_space > total_wants) | |||
remaining_space = total_wants; | |||
if (remaining_space > 0) { | |||
unsigned vs_additional = (unsigned) | |||
round(vs_wants * (((double) remaining_space) / total_wants)); | |||
vs_chunks += vs_additional; | |||
remaining_space -= vs_additional; | |||
gs_chunks += remaining_space; | |||
} | |||
/* Sanity check that we haven't over-allocated. */ | |||
assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks); | |||
/* Finally, compute the number of entries that can fit in the space | |||
* allocated to each stage. | |||
*/ | |||
unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes; | |||
unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes; | |||
/* Since we rounded up when computing *_wants, this may be slightly more | |||
* than the maximum allowed amount, so correct for that. | |||
*/ | |||
nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries); | |||
nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries); | |||
/* Ensure that we program a multiple of the granularity. */ | |||
nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity); | |||
nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity); | |||
/* Finally, sanity check to make sure we have at least the minimum number | |||
* of entries needed for each stage. | |||
*/ | |||
assert(nr_vs_entries >= devinfo->urb.min_vs_entries); | |||
if (gs_present) | |||
assert(nr_gs_entries >= 2); | |||
/* Lay out the URB in the following order: | |||
* - push constants | |||
* - VS | |||
* - GS | |||
*/ | |||
pipeline->urb.vs_start = push_constant_chunks; | |||
pipeline->urb.vs_size = vs_size; | |||
pipeline->urb.nr_vs_entries = nr_vs_entries; | |||
pipeline->urb.gs_start = push_constant_chunks + vs_chunks; | |||
pipeline->urb.gs_size = gs_size; | |||
pipeline->urb.nr_gs_entries = nr_gs_entries; | |||
} | |||
static const struct { | |||
uint32_t token; | |||
const char *name; | |||
} stage_info[] = { | |||
{ GL_VERTEX_SHADER, "vertex" }, | |||
{ GL_TESS_CONTROL_SHADER, "tess control" }, | |||
{ GL_TESS_EVALUATION_SHADER, "tess evaluation" }, | |||
{ GL_GEOMETRY_SHADER, "geometry" }, | |||
{ GL_FRAGMENT_SHADER, "fragment" }, | |||
{ GL_COMPUTE_SHADER, "compute" }, | |||
}; | |||
static void | |||
anv_compile_shader(struct anv_compiler *compiler, | |||
struct gl_shader_program *program, | |||
struct anv_pipeline *pipeline, uint32_t stage) | |||
{ | |||
struct brw_context *brw = compiler->brw; | |||
struct gl_shader *shader; | |||
int name = 0; | |||
shader = brw_new_shader(&brw->ctx, name, stage_info[stage].token); | |||
fail_if(shader == NULL, "failed to create %s shader\n", stage_info[stage].name); | |||
shader->Source = strdup(pipeline->shaders[stage]->data); | |||
_mesa_glsl_compile_shader(&brw->ctx, shader, false, false); | |||
fail_on_compile_error(shader->CompileStatus, shader->InfoLog); | |||
program->Shaders[program->NumShaders] = shader; | |||
program->NumShaders++; | |||
} | |||
int | |||
anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline) | |||
{ | |||
struct gl_shader_program *program; | |||
int name = 0; | |||
struct brw_context *brw = compiler->brw; | |||
struct anv_device *device = pipeline->device; | |||
brw->use_rep_send = pipeline->use_repclear; | |||
brw->no_simd8 = pipeline->use_repclear; | |||
program = brw->ctx.Driver.NewShaderProgram(name); | |||
program->Shaders = (struct gl_shader **) | |||
calloc(VK_NUM_SHADER_STAGE, sizeof(struct gl_shader *)); | |||
fail_if(program == NULL || program->Shaders == NULL, | |||
"failed to create program\n"); | |||
/* FIXME: Only supports vs and fs combo at the moment */ | |||
assert(pipeline->shaders[VK_SHADER_STAGE_VERTEX]); | |||
assert(pipeline->shaders[VK_SHADER_STAGE_FRAGMENT]); | |||
anv_compile_shader(compiler, program, pipeline, VK_SHADER_STAGE_VERTEX); | |||
anv_compile_shader(compiler, program, pipeline, VK_SHADER_STAGE_FRAGMENT); | |||
if (pipeline->shaders[VK_SHADER_STAGE_GEOMETRY]) | |||
anv_compile_shader(compiler, program, pipeline, VK_SHADER_STAGE_GEOMETRY); | |||
_mesa_glsl_link_shader(&brw->ctx, program); | |||
fail_on_compile_error(program->LinkStatus, | |||
program->InfoLog); | |||
pipeline->program_block = | |||
anv_block_pool_alloc(&device->instruction_block_pool); | |||
pipeline->program_next = pipeline->program_block; | |||
bool success; | |||
struct brw_wm_prog_key wm_key; | |||
struct gl_fragment_program *fp = (struct gl_fragment_program *) | |||
program->_LinkedShaders[MESA_SHADER_FRAGMENT]->Program; | |||
struct brw_fragment_program *bfp = brw_fragment_program(fp); | |||
brw_wm_populate_key(brw, bfp, &wm_key); | |||
success = really_do_wm_prog(brw, program, bfp, &wm_key, pipeline); | |||
fail_if(!success, "do_wm_prog failed\n"); | |||
pipeline->prog_data[VK_SHADER_STAGE_FRAGMENT] = &pipeline->wm_prog_data.base; | |||
struct brw_vs_prog_key vs_key; | |||
struct gl_vertex_program *vp = (struct gl_vertex_program *) | |||
program->_LinkedShaders[MESA_SHADER_VERTEX]->Program; | |||
struct brw_vertex_program *bvp = brw_vertex_program(vp); | |||
brw_vs_populate_key(brw, bvp, &vs_key); | |||
success = really_do_vs_prog(brw, program, bvp, &vs_key, pipeline); | |||
fail_if(!success, "do_wm_prog failed\n"); | |||
pipeline->prog_data[VK_SHADER_STAGE_VERTEX] = &pipeline->vs_prog_data.base.base; | |||
if (pipeline->shaders[VK_SHADER_STAGE_GEOMETRY]) { | |||
struct brw_gs_prog_key gs_key; | |||
struct gl_geometry_program *gp = (struct gl_geometry_program *) | |||
program->_LinkedShaders[MESA_SHADER_GEOMETRY]->Program; | |||
struct brw_geometry_program *bgp = brw_geometry_program(gp); | |||
brw_gs_populate_key(brw, pipeline, bgp, &gs_key); | |||
success = really_do_gs_prog(brw, program, bgp, &gs_key, pipeline); | |||
fail_if(!success, "do_gs_prog failed\n"); | |||
pipeline->active_stages = VK_SHADER_STAGE_VERTEX_BIT | | |||
VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; | |||
pipeline->prog_data[VK_SHADER_STAGE_GEOMETRY] = &pipeline->gs_prog_data.base.base; | |||
} else { | |||
pipeline->gs_vec4 = NO_KERNEL; | |||
pipeline->active_stages = | |||
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; | |||
} | |||
/* FIXME: Allocate more blocks if we fill up this one and worst case, | |||
* allocate multiple continuous blocks from end of pool to hold really big | |||
* programs. */ | |||
assert(pipeline->program_next - pipeline->program_block < 8192); | |||
brw->ctx.Driver.DeleteShaderProgram(&brw->ctx, program); | |||
gen7_compute_urb_partition(pipeline); | |||
return 0; | |||
} | |||
/* This badly named function frees the struct anv_pipeline data that the compiler | |||
* allocates. Currently just the prog_data structs. | |||
*/ | |||
void | |||
anv_compiler_free(struct anv_pipeline *pipeline) | |||
{ | |||
struct anv_device *device = pipeline->device; | |||
for (uint32_t stage = 0; stage < VK_NUM_SHADER_STAGE; stage++) | |||
if (pipeline->prog_data[stage]) | |||
free(pipeline->prog_data[stage]->map_entries); | |||
anv_block_pool_free(&device->instruction_block_pool, | |||
pipeline->program_block); | |||
} | |||
} |
@@ -0,0 +1,283 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#define _DEFAULT_SOURCE | |||
#include <sys/ioctl.h> | |||
#include <sys/mman.h> | |||
#include <string.h> | |||
#include <errno.h> | |||
#include <unistd.h> | |||
#include <fcntl.h> | |||
#include "private.h" | |||
#ifdef HAVE_VALGRIND | |||
#include <valgrind.h> | |||
#include <memcheck.h> | |||
#define VG(x) x | |||
#else | |||
#define VG(x) | |||
#endif | |||
#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s))) | |||
static int | |||
anv_ioctl(int fd, unsigned long request, void *arg) | |||
{ | |||
int ret; | |||
do { | |||
ret = ioctl(fd, request, arg); | |||
} while (ret == -1 && (errno == EINTR || errno == EAGAIN)); | |||
return ret; | |||
} | |||
/** | |||
* Wrapper around DRM_IOCTL_I915_GEM_CREATE. | |||
* | |||
* Return gem handle, or 0 on failure. Gem handles are never 0. | |||
*/ | |||
uint32_t | |||
anv_gem_create(struct anv_device *device, size_t size) | |||
{ | |||
struct drm_i915_gem_create gem_create; | |||
int ret; | |||
VG_CLEAR(gem_create); | |||
gem_create.size = size; | |||
ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create); | |||
if (ret != 0) { | |||
/* FIXME: What do we do if this fails? */ | |||
return 0; | |||
} | |||
return gem_create.handle; | |||
} | |||
void | |||
anv_gem_close(struct anv_device *device, int gem_handle) | |||
{ | |||
struct drm_gem_close close; | |||
VG_CLEAR(close); | |||
close.handle = gem_handle; | |||
anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close); | |||
} | |||
/** | |||
* Wrapper around DRM_IOCTL_I915_GEM_MMAP. | |||
*/ | |||
void* | |||
anv_gem_mmap(struct anv_device *device, uint32_t gem_handle, | |||
uint64_t offset, uint64_t size) | |||
{ | |||
struct drm_i915_gem_mmap gem_mmap; | |||
int ret; | |||
VG_CLEAR(gem_mmap); | |||
gem_mmap.handle = gem_handle; | |||
gem_mmap.offset = offset; | |||
gem_mmap.size = size; | |||
ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap); | |||
if (ret != 0) { | |||
/* FIXME: Is NULL the right error return? Cf MAP_INVALID */ | |||
return NULL; | |||
} | |||
VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1)); | |||
return (void *)(uintptr_t) gem_mmap.addr_ptr; | |||
} | |||
/* This is just a wrapper around munmap, but it also notifies valgrind that | |||
* this map is no longer valid. Pair this with anv_gem_mmap(). | |||
*/ | |||
void | |||
anv_gem_munmap(void *p, uint64_t size) | |||
{ | |||
munmap(p, size); | |||
VG(VALGRIND_FREELIKE_BLOCK(p, 0)); | |||
} | |||
int | |||
anv_gem_userptr(struct anv_device *device, void *mem, size_t size) | |||
{ | |||
struct drm_i915_gem_userptr userptr; | |||
int ret; | |||
VG_CLEAR(userptr); | |||
userptr.user_ptr = (__u64)((unsigned long) mem); | |||
userptr.user_size = size; | |||
userptr.flags = 0; | |||
ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr); | |||
if (ret == -1) | |||
return 0; | |||
return userptr.handle; | |||
} | |||
/** | |||
* On error, \a timeout_ns holds the remaining time. | |||
*/ | |||
int | |||
anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns) | |||
{ | |||
struct drm_i915_gem_wait wait; | |||
int ret; | |||
VG_CLEAR(wait); | |||
wait.bo_handle = gem_handle; | |||
wait.timeout_ns = *timeout_ns; | |||
wait.flags = 0; | |||
ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); | |||
*timeout_ns = wait.timeout_ns; | |||
if (ret == -1) | |||
return -errno; | |||
return ret; | |||
} | |||
int | |||
anv_gem_execbuffer(struct anv_device *device, | |||
struct drm_i915_gem_execbuffer2 *execbuf) | |||
{ | |||
return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf); | |||
} | |||
int | |||
anv_gem_set_tiling(struct anv_device *device, | |||
int gem_handle, uint32_t stride, uint32_t tiling) | |||
{ | |||
struct drm_i915_gem_set_tiling set_tiling; | |||
int ret; | |||
/* set_tiling overwrites the input on the error path, so we have to open | |||
* code anv_ioctl. | |||
*/ | |||
do { | |||
VG_CLEAR(set_tiling); | |||
set_tiling.handle = gem_handle; | |||
set_tiling.tiling_mode = I915_TILING_X; | |||
set_tiling.stride = stride; | |||
ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); | |||
} while (ret == -1 && (errno == EINTR || errno == EAGAIN)); | |||
return ret; | |||
} | |||
int | |||
anv_gem_get_param(int fd, uint32_t param) | |||
{ | |||
drm_i915_getparam_t gp; | |||
int ret, tmp; | |||
VG_CLEAR(gp); | |||
gp.param = param; | |||
gp.value = &tmp; | |||
ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp); | |||
if (ret == 0) | |||
return tmp; | |||
return 0; | |||
} | |||
int | |||
anv_gem_create_context(struct anv_device *device) | |||
{ | |||
struct drm_i915_gem_context_create create; | |||
int ret; | |||
VG_CLEAR(create); | |||
ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); | |||
if (ret == -1) | |||
return -1; | |||
return create.ctx_id; | |||
} | |||
int | |||
anv_gem_destroy_context(struct anv_device *device, int context) | |||
{ | |||
struct drm_i915_gem_context_destroy destroy; | |||
VG_CLEAR(destroy); | |||
destroy.ctx_id = context; | |||
return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy); | |||
} | |||
int | |||
anv_gem_get_aperture(struct anv_device *device, uint64_t *size) | |||
{ | |||
struct drm_i915_gem_get_aperture aperture; | |||
int ret; | |||
VG_CLEAR(aperture); | |||
ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); | |||
if (ret == -1) | |||
return -1; | |||
*size = aperture.aper_available_size; | |||
return 0; | |||
} | |||
int | |||
anv_gem_handle_to_fd(struct anv_device *device, int gem_handle) | |||
{ | |||
struct drm_prime_handle args; | |||
int ret; | |||
VG_CLEAR(args); | |||
args.handle = gem_handle; | |||
args.flags = DRM_CLOEXEC; | |||
ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args); | |||
if (ret == -1) | |||
return -1; | |||
return args.fd; | |||
} | |||
int | |||
anv_gem_fd_to_handle(struct anv_device *device, int fd) | |||
{ | |||
struct drm_prime_handle args; | |||
int ret; | |||
VG_CLEAR(args); | |||
args.fd = fd; | |||
ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args); | |||
if (ret == -1) | |||
return 0; | |||
return args.handle; | |||
} |
@@ -0,0 +1,404 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#include <assert.h> | |||
#include <stdbool.h> | |||
#include <string.h> | |||
#include <unistd.h> | |||
#include <fcntl.h> | |||
#include "private.h" | |||
// Image functions | |||
static const struct anv_format anv_formats[] = { | |||
[VK_FORMAT_UNDEFINED] = { .format = RAW }, | |||
// [VK_FORMAT_R4G4_UNORM] = { .format = R4G4_UNORM }, | |||
// [VK_FORMAT_R4G4_USCALED] = { .format = R4G4_USCALED }, | |||
// [VK_FORMAT_R4G4B4A4_UNORM] = { .format = R4G4B4A4_UNORM }, | |||
// [VK_FORMAT_R4G4B4A4_USCALED] = { .format = R4G4B4A4_USCALED }, | |||
// [VK_FORMAT_R5G6B5_UNORM] = { .format = R5G6B5_UNORM }, | |||
// [VK_FORMAT_R5G6B5_USCALED] = { .format = R5G6B5_USCALED }, | |||
// [VK_FORMAT_R5G5B5A1_UNORM] = { .format = R5G5B5A1_UNORM }, | |||
// [VK_FORMAT_R5G5B5A1_USCALED] = { .format = R5G5B5A1_USCALED }, | |||
[VK_FORMAT_R8_UNORM] = { .format = R8_UNORM, .cpp = 1, .channels = 1 }, | |||
[VK_FORMAT_R8_SNORM] = { .format = R8_SNORM, .cpp = 1, .channels = 1 }, | |||
[VK_FORMAT_R8_USCALED] = { .format = R8_USCALED, .cpp = 1, .channels = 1 }, | |||
[VK_FORMAT_R8_SSCALED] = { .format = R8_SSCALED, .cpp = 1, .channels = 1 }, | |||
[VK_FORMAT_R8_UINT] = { .format = R8_UINT, .cpp = 1, .channels = 1 }, | |||
[VK_FORMAT_R8_SINT] = { .format = R8_SINT, .cpp = 1, .channels = 1 }, | |||
// [VK_FORMAT_R8_SRGB] = { .format = R8_SRGB, .cpp = 1 }, | |||
[VK_FORMAT_R8G8_UNORM] = { .format = R8G8_UNORM, .cpp = 2, .channels = 2 }, | |||
[VK_FORMAT_R8G8_SNORM] = { .format = R8G8_SNORM, .cpp = 2, .channels = 2 }, | |||
[VK_FORMAT_R8G8_USCALED] = { .format = R8G8_USCALED, .cpp = 2, .channels = 2 }, | |||
[VK_FORMAT_R8G8_SSCALED] = { .format = R8G8_SSCALED, .cpp = 2, .channels = 2 }, | |||
[VK_FORMAT_R8G8_UINT] = { .format = R8G8_UINT, .cpp = 2, .channels = 2 }, | |||
[VK_FORMAT_R8G8_SINT] = { .format = R8G8_SINT, .cpp = 2, .channels = 2 }, | |||
// [VK_FORMAT_R8G8_SRGB] = { .format = R8G8_SRGB }, | |||
[VK_FORMAT_R8G8B8_UNORM] = { .format = R8G8B8X8_UNORM, .cpp = 3, .channels = 3 }, | |||
// [VK_FORMAT_R8G8B8_SNORM] = { .format = R8G8B8X8_SNORM, .cpp = 4 }, | |||
[VK_FORMAT_R8G8B8_USCALED] = { .format = R8G8B8_USCALED, .cpp = 3, .channels = 3 }, | |||
[VK_FORMAT_R8G8B8_SSCALED] = { .format = R8G8B8_SSCALED, .cpp = 3, .channels = 3 }, | |||
[VK_FORMAT_R8G8B8_UINT] = { .format = R8G8B8_UINT, .cpp = 3, .channels = 3 }, | |||
[VK_FORMAT_R8G8B8_SINT] = { .format = R8G8B8_SINT, .cpp = 3, .channels = 3 }, | |||
// [VK_FORMAT_R8G8B8_SRGB] = { .format = R8G8B8_SRGB }, | |||
[VK_FORMAT_R8G8B8A8_UNORM] = { .format = R8G8B8A8_UNORM, .cpp = 4, .channels = 4 }, | |||
[VK_FORMAT_R8G8B8A8_SNORM] = { .format = R8G8B8A8_SNORM, .cpp = 4, .channels = 4 }, | |||
[VK_FORMAT_R8G8B8A8_USCALED] = { .format = R8G8B8A8_USCALED, .cpp = 4, .channels = 4 }, | |||
[VK_FORMAT_R8G8B8A8_SSCALED] = { .format = R8G8B8A8_SSCALED, .cpp = 4, .channels = 4 }, | |||
[VK_FORMAT_R8G8B8A8_UINT] = { .format = R8G8B8A8_UINT, .cpp = 4, .channels = 4 }, | |||
[VK_FORMAT_R8G8B8A8_SINT] = { .format = R8G8B8A8_SINT, .cpp = 4, .channels = 4 }, | |||
// [VK_FORMAT_R8G8B8A8_SRGB] = { .format = R8G8B8A8_SRGB }, | |||
// [VK_FORMAT_R10G10B10A2_UNORM] = { .format = R10G10B10A2_UNORM }, | |||
// [VK_FORMAT_R10G10B10A2_SNORM] = { .format = R10G10B10A2_SNORM }, | |||
// [VK_FORMAT_R10G10B10A2_USCALED] = { .format = R10G10B10A2_USCALED }, | |||
// [VK_FORMAT_R10G10B10A2_SSCALED] = { .format = R10G10B10A2_SSCALED }, | |||
// [VK_FORMAT_R10G10B10A2_UINT] = { .format = R10G10B10A2_UINT }, | |||
// [VK_FORMAT_R10G10B10A2_SINT] = { .format = R10G10B10A2_SINT }, | |||
// [VK_FORMAT_R16_UNORM] = { .format = R16_UNORM }, | |||
// [VK_FORMAT_R16_SNORM] = { .format = R16_SNORM }, | |||
// [VK_FORMAT_R16_USCALED] = { .format = R16_USCALED }, | |||
// [VK_FORMAT_R16_SSCALED] = { .format = R16_SSCALED }, | |||
// [VK_FORMAT_R16_UINT] = { .format = R16_UINT }, | |||
// [VK_FORMAT_R16_SINT] = { .format = R16_SINT }, | |||
[VK_FORMAT_R16_SFLOAT] = { .format = R16_FLOAT, .cpp = 2, .channels = 1 }, | |||
// [VK_FORMAT_R16G16_UNORM] = { .format = R16G16_UNORM }, | |||
// [VK_FORMAT_R16G16_SNORM] = { .format = R16G16_SNORM }, | |||
// [VK_FORMAT_R16G16_USCALED] = { .format = R16G16_USCALED }, | |||
// [VK_FORMAT_R16G16_SSCALED] = { .format = R16G16_SSCALED }, | |||
// [VK_FORMAT_R16G16_UINT] = { .format = R16G16_UINT }, | |||
// [VK_FORMAT_R16G16_SINT] = { .format = R16G16_SINT }, | |||
[VK_FORMAT_R16G16_SFLOAT] = { .format = R16G16_FLOAT, .cpp = 4, .channels = 2 }, | |||
// [VK_FORMAT_R16G16B16_UNORM] = { .format = R16G16B16_UNORM }, | |||
// [VK_FORMAT_R16G16B16_SNORM] = { .format = R16G16B16_SNORM }, | |||
// [VK_FORMAT_R16G16B16_USCALED] = { .format = R16G16B16_USCALED }, | |||
// [VK_FORMAT_R16G16B16_SSCALED] = { .format = R16G16B16_SSCALED }, | |||
// [VK_FORMAT_R16G16B16_UINT] = { .format = R16G16B16_UINT }, | |||
// [VK_FORMAT_R16G16B16_SINT] = { .format = R16G16B16_SINT }, | |||
[VK_FORMAT_R16G16B16_SFLOAT] = { .format = R16G16B16_FLOAT, .cpp = 6, .channels = 3 }, | |||
// [VK_FORMAT_R16G16B16A16_UNORM] = { .format = R16G16B16A16_UNORM }, | |||
// [VK_FORMAT_R16G16B16A16_SNORM] = { .format = R16G16B16A16_SNORM }, | |||
// [VK_FORMAT_R16G16B16A16_USCALED] = { .format = R16G16B16A16_USCALED }, | |||
// [VK_FORMAT_R16G16B16A16_SSCALED] = { .format = R16G16B16A16_SSCALED }, | |||
// [VK_FORMAT_R16G16B16A16_UINT] = { .format = R16G16B16A16_UINT }, | |||
// [VK_FORMAT_R16G16B16A16_SINT] = { .format = R16G16B16A16_SINT }, | |||
[VK_FORMAT_R16G16B16A16_SFLOAT] = { .format = R16G16B16A16_FLOAT, .cpp = 8, .channels = 4 }, | |||
// [VK_FORMAT_R32_UINT] = { .format = R32_UINT }, | |||
// [VK_FORMAT_R32_SINT] = { .format = R32_SINT }, | |||
[VK_FORMAT_R32_SFLOAT] = { .format = R32_FLOAT, .cpp = 4, .channels = 1 }, | |||
// [VK_FORMAT_R32G32_UINT] = { .format = R32G32_UINT }, | |||
// [VK_FORMAT_R32G32_SINT] = { .format = R32G32_SINT }, | |||
[VK_FORMAT_R32G32_SFLOAT] = { .format = R32G32_FLOAT, .cpp = 8, .channels = 2 }, | |||
// [VK_FORMAT_R32G32B32_UINT] = { .format = R32G32B32_UINT }, | |||
// [VK_FORMAT_R32G32B32_SINT] = { .format = R32G32B32_SINT }, | |||
[VK_FORMAT_R32G32B32_SFLOAT] = { .format = R32G32B32_FLOAT, .cpp = 12, .channels = 3 }, | |||
// [VK_FORMAT_R32G32B32A32_UINT] = { .format = R32G32B32A32_UINT }, | |||
// [VK_FORMAT_R32G32B32A32_SINT] = { .format = R32G32B32A32_SINT }, | |||
[VK_FORMAT_R32G32B32A32_SFLOAT] = { .format = R32G32B32A32_FLOAT, .cpp = 16, .channels = 4 }, | |||
[VK_FORMAT_R64_SFLOAT] = { .format = R64_FLOAT, .cpp = 8, .channels = 1 }, | |||
[VK_FORMAT_R64G64_SFLOAT] = { .format = R64G64_FLOAT, .cpp = 16, .channels = 2 }, | |||
[VK_FORMAT_R64G64B64_SFLOAT] = { .format = R64G64B64_FLOAT, .cpp = 24, .channels = 3 }, | |||
[VK_FORMAT_R64G64B64A64_SFLOAT] = { .format = R64G64B64A64_FLOAT, .cpp = 32, .channels = 4 }, | |||
// [VK_FORMAT_R11G11B10_UFLOAT] = { .format = R11G11B10_UFLOAT }, | |||
// [VK_FORMAT_R9G9B9E5_UFLOAT] = { .format = R9G9B9E5_UFLOAT }, | |||
// [VK_FORMAT_D16_UNORM] = { .format = D16_UNORM }, | |||
// [VK_FORMAT_D24_UNORM] = { .format = D24_UNORM }, | |||
// [VK_FORMAT_D32_SFLOAT] = { .format = D32_SFLOAT }, | |||
// [VK_FORMAT_S8_UINT] = { .format = S8_UINT }, | |||
// [VK_FORMAT_D16_UNORM_S8_UINT] = { .format = D16_UNORM }, | |||
// [VK_FORMAT_D24_UNORM_S8_UINT] = { .format = D24_UNORM }, | |||
// [VK_FORMAT_D32_SFLOAT_S8_UINT] = { .format = D32_SFLOAT }, | |||
// [VK_FORMAT_BC1_RGB_UNORM] = { .format = BC1_RGB }, | |||
// [VK_FORMAT_BC1_RGB_SRGB] = { .format = BC1_RGB }, | |||
// [VK_FORMAT_BC1_RGBA_UNORM] = { .format = BC1_RGBA }, | |||
// [VK_FORMAT_BC1_RGBA_SRGB] = { .format = BC1_RGBA }, | |||
// [VK_FORMAT_BC2_UNORM] = { .format = BC2_UNORM }, | |||
// [VK_FORMAT_BC2_SRGB] = { .format = BC2_SRGB }, | |||
// [VK_FORMAT_BC3_UNORM] = { .format = BC3_UNORM }, | |||
// [VK_FORMAT_BC3_SRGB] = { .format = BC3_SRGB }, | |||
// [VK_FORMAT_BC4_UNORM] = { .format = BC4_UNORM }, | |||
// [VK_FORMAT_BC4_SNORM] = { .format = BC4_SNORM }, | |||
// [VK_FORMAT_BC5_UNORM] = { .format = BC5_UNORM }, | |||
// [VK_FORMAT_BC5_SNORM] = { .format = BC5_SNORM }, | |||
// [VK_FORMAT_BC6H_UFLOAT] = { .format = BC6H_UFLOAT }, | |||
// [VK_FORMAT_BC6H_SFLOAT] = { .format = BC6H_SFLOAT }, | |||
// [VK_FORMAT_BC7_UNORM] = { .format = BC7_UNORM }, | |||
// [VK_FORMAT_BC7_SRGB] = { .format = BC7_SRGB }, | |||
// [VK_FORMAT_ETC2_R8G8B8_UNORM] = { .format = ETC2_R8G8B8 }, | |||
// [VK_FORMAT_ETC2_R8G8B8_SRGB] = { .format = ETC2_R8G8B8 }, | |||
// [VK_FORMAT_ETC2_R8G8B8A1_UNORM] = { .format = ETC2_R8G8B8A1 }, | |||
// [VK_FORMAT_ETC2_R8G8B8A1_SRGB] = { .format = ETC2_R8G8B8A1 }, | |||
// [VK_FORMAT_ETC2_R8G8B8A8_UNORM] = { .format = ETC2_R8G8B8A8 }, | |||
// [VK_FORMAT_ETC2_R8G8B8A8_SRGB] = { .format = ETC2_R8G8B8A8 }, | |||
// [VK_FORMAT_EAC_R11_UNORM] = { .format = EAC_R11 }, | |||
// [VK_FORMAT_EAC_R11_SNORM] = { .format = EAC_R11 }, | |||
// [VK_FORMAT_EAC_R11G11_UNORM] = { .format = EAC_R11G11 }, | |||
// [VK_FORMAT_EAC_R11G11_SNORM] = { .format = EAC_R11G11 }, | |||
// [VK_FORMAT_ASTC_4x4_UNORM] = { .format = ASTC_4x4 }, | |||
// [VK_FORMAT_ASTC_4x4_SRGB] = { .format = ASTC_4x4 }, | |||
// [VK_FORMAT_ASTC_5x4_UNORM] = { .format = ASTC_5x4 }, | |||
// [VK_FORMAT_ASTC_5x4_SRGB] = { .format = ASTC_5x4 }, | |||
// [VK_FORMAT_ASTC_5x5_UNORM] = { .format = ASTC_5x5 }, | |||
// [VK_FORMAT_ASTC_5x5_SRGB] = { .format = ASTC_5x5 }, | |||
// [VK_FORMAT_ASTC_6x5_UNORM] = { .format = ASTC_6x5 }, | |||
// [VK_FORMAT_ASTC_6x5_SRGB] = { .format = ASTC_6x5 }, | |||
// [VK_FORMAT_ASTC_6x6_UNORM] = { .format = ASTC_6x6 }, | |||
// [VK_FORMAT_ASTC_6x6_SRGB] = { .format = ASTC_6x6 }, | |||
// [VK_FORMAT_ASTC_8x5_UNORM] = { .format = ASTC_8x5 }, | |||
// [VK_FORMAT_ASTC_8x5_SRGB] = { .format = ASTC_8x5 }, | |||
// [VK_FORMAT_ASTC_8x6_UNORM] = { .format = ASTC_8x6 }, | |||
// [VK_FORMAT_ASTC_8x6_SRGB] = { .format = ASTC_8x6 }, | |||
// [VK_FORMAT_ASTC_8x8_UNORM] = { .format = ASTC_8x8 }, | |||
// [VK_FORMAT_ASTC_8x8_SRGB] = { .format = ASTC_8x8 }, | |||
// [VK_FORMAT_ASTC_10x5_UNORM] = { .format = ASTC_10x5 }, | |||
// [VK_FORMAT_ASTC_10x5_SRGB] = { .format = ASTC_10x5 }, | |||
// [VK_FORMAT_ASTC_10x6_UNORM] = { .format = ASTC_10x6 }, | |||
// [VK_FORMAT_ASTC_10x6_SRGB] = { .format = ASTC_10x6 }, | |||
// [VK_FORMAT_ASTC_10x8_UNORM] = { .format = ASTC_10x8 }, | |||
// [VK_FORMAT_ASTC_10x8_SRGB] = { .format = ASTC_10x8 }, | |||
// [VK_FORMAT_ASTC_10x10_UNORM] = { .format = ASTC_10x10 }, | |||
// [VK_FORMAT_ASTC_10x10_SRGB] = { .format = ASTC_10x10 }, | |||
// [VK_FORMAT_ASTC_12x10_UNORM] = { .format = ASTC_12x10 }, | |||
// [VK_FORMAT_ASTC_12x10_SRGB] = { .format = ASTC_12x10 }, | |||
// [VK_FORMAT_ASTC_12x12_UNORM] = { .format = ASTC_12x12 }, | |||
// [VK_FORMAT_ASTC_12x12_SRGB] = { .format = ASTC_12x12 }, | |||
// [VK_FORMAT_B4G4R4A4_UNORM] = { .format = B4G4R4A4_UNORM }, | |||
// [VK_FORMAT_B5G5R5A1_UNORM] = { .format = B5G5R5A1_UNORM }, | |||
// [VK_FORMAT_B5G6R5_UNORM] = { .format = B5G6R5_UNORM }, | |||
// [VK_FORMAT_B5G6R5_USCALED] = { .format = B5G6R5_USCALED }, | |||
// [VK_FORMAT_B8G8R8_UNORM] = { .format = B8G8R8_UNORM }, | |||
// [VK_FORMAT_B8G8R8_SNORM] = { .format = B8G8R8_SNORM }, | |||
// [VK_FORMAT_B8G8R8_USCALED] = { .format = B8G8R8_USCALED }, | |||
// [VK_FORMAT_B8G8R8_SSCALED] = { .format = B8G8R8_SSCALED }, | |||
// [VK_FORMAT_B8G8R8_UINT] = { .format = B8G8R8_UINT }, | |||
// [VK_FORMAT_B8G8R8_SINT] = { .format = B8G8R8_SINT }, | |||
// [VK_FORMAT_B8G8R8_SRGB] = { .format = B8G8R8_SRGB }, | |||
[VK_FORMAT_B8G8R8A8_UNORM] = { .format = B8G8R8A8_UNORM, .cpp = 4, .channels = 4 }, | |||
// [VK_FORMAT_B8G8R8A8_SNORM] = { .format = B8G8R8A8_SNORM }, | |||
// [VK_FORMAT_B8G8R8A8_USCALED] = { .format = B8G8R8A8_USCALED }, | |||
// [VK_FORMAT_B8G8R8A8_SSCALED] = { .format = B8G8R8A8_SSCALED }, | |||
// [VK_FORMAT_B8G8R8A8_UINT] = { .format = B8G8R8A8_UINT }, | |||
// [VK_FORMAT_B8G8R8A8_SINT] = { .format = B8G8R8A8_SINT }, | |||
// [VK_FORMAT_B8G8R8A8_SRGB] = { .format = B8G8R8A8_SRGB }, | |||
// [VK_FORMAT_B10G10R10A2_UNORM] = { .format = B10G10R10A2_UNORM }, | |||
// [VK_FORMAT_B10G10R10A2_SNORM] = { .format = B10G10R10A2_SNORM }, | |||
// [VK_FORMAT_B10G10R10A2_USCALED] = { .format = B10G10R10A2_USCALED }, | |||
// [VK_FORMAT_B10G10R10A2_SSCALED] = { .format = B10G10R10A2_SSCALED }, | |||
// [VK_FORMAT_B10G10R10A2_UINT] = { .format = B10G10R10A2_UINT }, | |||
// [VK_FORMAT_B10G10R10A2_SINT] = { .format = B10G10R10A2_SINT } | |||
}; | |||
const struct anv_format * | |||
anv_format_for_vk_format(VkFormat format) | |||
{ | |||
return &anv_formats[format]; | |||
} | |||
static const struct anv_tile_mode_info { | |||
int32_t tile_width; | |||
int32_t tile_height; | |||
} tile_mode_info[] = { | |||
[LINEAR] = { 1, 1 }, | |||
[XMAJOR] = { 512, 8 }, | |||
[YMAJOR] = { 128, 32 }, | |||
[WMAJOR] = { 128, 32 } | |||
}; | |||
VkResult VKAPI vkCreateImage( | |||
VkDevice _device, | |||
const VkImageCreateInfo* pCreateInfo, | |||
VkImage* pImage) | |||
{ | |||
struct anv_device *device = (struct anv_device *) _device; | |||
struct anv_image *image; | |||
const struct anv_format *format; | |||
int32_t aligned_height; | |||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO); | |||
image = anv_device_alloc(device, sizeof(*image), 8, | |||
VK_SYSTEM_ALLOC_TYPE_API_OBJECT); | |||
if (image == NULL) | |||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
image->mem = NULL; | |||
image->offset = 0; | |||
image->type = pCreateInfo->imageType; | |||
image->extent = pCreateInfo->extent; | |||
assert(image->extent.width > 0); | |||
assert(image->extent.height > 0); | |||
assert(image->extent.depth > 0); | |||
switch (pCreateInfo->tiling) { | |||
case VK_IMAGE_TILING_LINEAR: | |||
image->tile_mode = LINEAR; | |||
/* Linear depth buffers must be 64 byte aligned, which is the strictest | |||
* requirement for all kinds of linear surfaces. | |||
*/ | |||
image->alignment = 64; | |||
break; | |||
case VK_IMAGE_TILING_OPTIMAL: | |||
image->tile_mode = YMAJOR; | |||
image->alignment = 4096; | |||
break; | |||
default: | |||
break; | |||
} | |||
format = anv_format_for_vk_format(pCreateInfo->format); | |||
image->stride = ALIGN_I32(image->extent.width * format->cpp, | |||
tile_mode_info[image->tile_mode].tile_width); | |||
aligned_height = ALIGN_I32(image->extent.height, | |||
tile_mode_info[image->tile_mode].tile_height); | |||
image->size = image->stride * aligned_height; | |||
*pImage = (VkImage) image; | |||
return VK_SUCCESS; | |||
} | |||
VkResult VKAPI vkGetImageSubresourceInfo( | |||
VkDevice device, | |||
VkImage image, | |||
const VkImageSubresource* pSubresource, | |||
VkSubresourceInfoType infoType, | |||
size_t* pDataSize, | |||
void* pData) | |||
{ | |||
return VK_UNSUPPORTED; | |||
} | |||
// Image view functions | |||
static struct anv_state | |||
create_surface_state(struct anv_device *device, | |||
struct anv_image *image, const struct anv_format *format) | |||
{ | |||
struct anv_state state = | |||
anv_state_pool_alloc(&device->surface_state_pool, 64, 64); | |||
struct GEN8_RENDER_SURFACE_STATE surface_state = { | |||
.SurfaceType = SURFTYPE_2D, | |||
.SurfaceArray = false, | |||
.SurfaceFormat = format->format, | |||
.SurfaceVerticalAlignment = VALIGN4, | |||
.SurfaceHorizontalAlignment = HALIGN4, | |||
.TileMode = image->tile_mode, | |||
.VerticalLineStride = 0, | |||
.VerticalLineStrideOffset = 0, | |||
.SamplerL2BypassModeDisable = true, | |||
.RenderCacheReadWriteMode = WriteOnlyCache, | |||
.MemoryObjectControlState = 0, /* FIXME: MOCS */ | |||
.BaseMipLevel = 0, | |||
.SurfaceQPitch = 0, | |||
.Height = image->extent.height - 1, | |||
.Width = image->extent.width - 1, | |||
.Depth = image->extent.depth - 1, | |||
.SurfacePitch = image->stride - 1, | |||
.MinimumArrayElement = 0, | |||
.NumberofMultisamples = MULTISAMPLECOUNT_1, | |||
.XOffset = 0, | |||
.YOffset = 0, | |||
.SurfaceMinLOD = 0, | |||
.MIPCountLOD = 0, | |||
.AuxiliarySurfaceMode = AUX_NONE, | |||
.RedClearColor = 0, | |||
.GreenClearColor = 0, | |||
.BlueClearColor = 0, | |||
.AlphaClearColor = 0, | |||
.ShaderChannelSelectRed = SCS_RED, | |||
.ShaderChannelSelectGreen = SCS_GREEN, | |||
.ShaderChannelSelectBlue = SCS_BLUE, | |||
.ShaderChannelSelectAlpha = SCS_ALPHA, | |||
.ResourceMinLOD = 0, | |||
/* FIXME: We assume that the image must be bound at this time. */ | |||
.SurfaceBaseAddress = { NULL, image->offset }, | |||
}; | |||
GEN8_RENDER_SURFACE_STATE_pack(NULL, state.map, &surface_state); | |||
return state; | |||
} | |||
VkResult VKAPI vkCreateImageView( | |||
VkDevice _device, | |||
const VkImageViewCreateInfo* pCreateInfo, | |||
VkImageView* pView) | |||
{ | |||
struct anv_device *device = (struct anv_device *) _device; | |||
struct anv_image_view *view; | |||
const struct anv_format *format = | |||
anv_format_for_vk_format(pCreateInfo->format); | |||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO); | |||
view = anv_device_alloc(device, sizeof(*view), 8, | |||
VK_SYSTEM_ALLOC_TYPE_API_OBJECT); | |||
if (view == NULL) | |||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
view->image = (struct anv_image *) pCreateInfo->image; | |||
view->surface_state = create_surface_state(device, view->image, format); | |||
*pView = (VkImageView) view; | |||
return VK_SUCCESS; | |||
} | |||
VkResult VKAPI vkCreateColorAttachmentView( | |||
VkDevice _device, | |||
const VkColorAttachmentViewCreateInfo* pCreateInfo, | |||
VkColorAttachmentView* pView) | |||
{ | |||
struct anv_device *device = (struct anv_device *) _device; | |||
struct anv_color_attachment_view *view; | |||
struct anv_image *image; | |||
const struct anv_format *format = | |||
anv_format_for_vk_format(pCreateInfo->format); | |||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO); | |||
view = anv_device_alloc(device, sizeof(*view), 8, | |||
VK_SYSTEM_ALLOC_TYPE_API_OBJECT); | |||
if (view == NULL) | |||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
view->image = (struct anv_image *) pCreateInfo->image; | |||
image = view->image; | |||
view->surface_state = create_surface_state(device, image, format); | |||
*pView = (VkColorAttachmentView) view; | |||
return VK_SUCCESS; | |||
} | |||
VkResult VKAPI vkCreateDepthStencilView( | |||
VkDevice device, | |||
const VkDepthStencilViewCreateInfo* pCreateInfo, | |||
VkDepthStencilView* pView) | |||
{ | |||
return VK_UNSUPPORTED; | |||
} |
@@ -0,0 +1,93 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#include <assert.h> | |||
#include <stdbool.h> | |||
#include <string.h> | |||
#include <unistd.h> | |||
#include <fcntl.h> | |||
#include "private.h" | |||
#include <vulkan/vulkan_intel.h> | |||
VkResult VKAPI vkCreateDmaBufImageINTEL( | |||
VkDevice _device, | |||
const VkDmaBufImageCreateInfo* pCreateInfo, | |||
VkDeviceMemory* pMem, | |||
VkImage* pImage) | |||
{ | |||
struct anv_device *device = (struct anv_device *) _device; | |||
struct anv_device_memory *mem; | |||
struct anv_image *image; | |||
VkResult result; | |||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DMA_BUF_IMAGE_CREATE_INFO_INTEL); | |||
mem = anv_device_alloc(device, sizeof(*mem), 8, | |||
VK_SYSTEM_ALLOC_TYPE_API_OBJECT); | |||
if (mem == NULL) | |||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
mem->bo.gem_handle = anv_gem_fd_to_handle(device, pCreateInfo->fd); | |||
if (!mem->bo.gem_handle) { | |||
result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY); | |||
goto fail; | |||
} | |||
mem->bo.map = NULL; | |||
mem->bo.index = 0; | |||
mem->bo.offset = 0; | |||
mem->bo.size = pCreateInfo->strideInBytes * pCreateInfo->extent.height; | |||
image = anv_device_alloc(device, sizeof(*image), 8, | |||
VK_SYSTEM_ALLOC_TYPE_API_OBJECT); | |||
if (image == NULL) { | |||
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
goto fail_mem; | |||
} | |||
image->mem = mem; | |||
image->offset = 0; | |||
image->type = VK_IMAGE_TYPE_2D; | |||
image->extent = pCreateInfo->extent; | |||
image->tile_mode = XMAJOR; | |||
image->stride = pCreateInfo->strideInBytes; | |||
image->size = mem->bo.size; | |||
assert(image->extent.width > 0); | |||
assert(image->extent.height > 0); | |||
assert(image->extent.depth == 1); | |||
*pMem = (VkDeviceMemory) mem; | |||
*pImage = (VkImage) image; | |||
return VK_SUCCESS; | |||
fail_mem: | |||
anv_gem_close(device, mem->bo.gem_handle); | |||
fail: | |||
anv_device_free(device, mem); | |||
return result; | |||
} |
@@ -0,0 +1,140 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#include <assert.h> | |||
#include <stdbool.h> | |||
#include <string.h> | |||
#include <unistd.h> | |||
#include <fcntl.h> | |||
#include "private.h" | |||
void VKAPI vkCmdCopyBuffer( | |||
VkCmdBuffer cmdBuffer, | |||
VkBuffer srcBuffer, | |||
VkBuffer destBuffer, | |||
uint32_t regionCount, | |||
const VkBufferCopy* pRegions) | |||
{ | |||
} | |||
void VKAPI vkCmdCopyImage( | |||
VkCmdBuffer cmdBuffer, | |||
VkImage srcImage, | |||
VkImageLayout srcImageLayout, | |||
VkImage destImage, | |||
VkImageLayout destImageLayout, | |||
uint32_t regionCount, | |||
const VkImageCopy* pRegions) | |||
{ | |||
} | |||
void VKAPI vkCmdBlitImage( | |||
VkCmdBuffer cmdBuffer, | |||
VkImage srcImage, | |||
VkImageLayout srcImageLayout, | |||
VkImage destImage, | |||
VkImageLayout destImageLayout, | |||
uint32_t regionCount, | |||
const VkImageBlit* pRegions) | |||
{ | |||
} | |||
void VKAPI vkCmdCopyBufferToImage( | |||
VkCmdBuffer cmdBuffer, | |||
VkBuffer srcBuffer, | |||
VkImage destImage, | |||
VkImageLayout destImageLayout, | |||
uint32_t regionCount, | |||
const VkBufferImageCopy* pRegions) | |||
{ | |||
} | |||
void VKAPI vkCmdCopyImageToBuffer( | |||
VkCmdBuffer cmdBuffer, | |||
VkImage srcImage, | |||
VkImageLayout srcImageLayout, | |||
VkBuffer destBuffer, | |||
uint32_t regionCount, | |||
const VkBufferImageCopy* pRegions) | |||
{ | |||
} | |||
void VKAPI vkCmdCloneImageData( | |||
VkCmdBuffer cmdBuffer, | |||
VkImage srcImage, | |||
VkImageLayout srcImageLayout, | |||
VkImage destImage, | |||
VkImageLayout destImageLayout) | |||
{ | |||
} | |||
void VKAPI vkCmdUpdateBuffer( | |||
VkCmdBuffer cmdBuffer, | |||
VkBuffer destBuffer, | |||
VkDeviceSize destOffset, | |||
VkDeviceSize dataSize, | |||
const uint32_t* pData) | |||
{ | |||
} | |||
void VKAPI vkCmdFillBuffer( | |||
VkCmdBuffer cmdBuffer, | |||
VkBuffer destBuffer, | |||
VkDeviceSize destOffset, | |||
VkDeviceSize fillSize, | |||
uint32_t data) | |||
{ | |||
} | |||
void VKAPI vkCmdClearColorImage( | |||
VkCmdBuffer cmdBuffer, | |||
VkImage image, | |||
VkImageLayout imageLayout, | |||
const VkClearColor* color, | |||
uint32_t rangeCount, | |||
const VkImageSubresourceRange* pRanges) | |||
{ | |||
} | |||
void VKAPI vkCmdClearDepthStencil( | |||
VkCmdBuffer cmdBuffer, | |||
VkImage image, | |||
VkImageLayout imageLayout, | |||
float depth, | |||
uint32_t stencil, | |||
uint32_t rangeCount, | |||
const VkImageSubresourceRange* pRanges) | |||
{ | |||
} | |||
void VKAPI vkCmdResolveImage( | |||
VkCmdBuffer cmdBuffer, | |||
VkImage srcImage, | |||
VkImageLayout srcImageLayout, | |||
VkImage destImage, | |||
VkImageLayout destImageLayout, | |||
uint32_t regionCount, | |||
const VkImageResolve* pRegions) | |||
{ | |||
} |
@@ -0,0 +1,565 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#include <assert.h> | |||
#include <stdbool.h> | |||
#include <string.h> | |||
#include <unistd.h> | |||
#include <fcntl.h> | |||
#include "private.h" | |||
// Shader functions | |||
VkResult VKAPI vkCreateShader( | |||
VkDevice _device, | |||
const VkShaderCreateInfo* pCreateInfo, | |||
VkShader* pShader) | |||
{ | |||
struct anv_device *device = (struct anv_device *) _device; | |||
struct anv_shader *shader; | |||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO); | |||
shader = anv_device_alloc(device, sizeof(*shader) + pCreateInfo->codeSize, 8, | |||
VK_SYSTEM_ALLOC_TYPE_API_OBJECT); | |||
if (shader == NULL) | |||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
shader->size = pCreateInfo->codeSize; | |||
memcpy(shader->data, pCreateInfo->pCode, shader->size); | |||
*pShader = (VkShader) shader; | |||
return VK_SUCCESS; | |||
} | |||
// Pipeline functions | |||
static void | |||
emit_vertex_input(struct anv_pipeline *pipeline, VkPipelineVertexInputCreateInfo *info) | |||
{ | |||
const uint32_t num_dwords = 1 + info->attributeCount * 2; | |||
uint32_t *p; | |||
bool instancing_enable[32]; | |||
for (uint32_t i = 0; i < info->bindingCount; i++) { | |||
const VkVertexInputBindingDescription *desc = | |||
&info->pVertexBindingDescriptions[i]; | |||
pipeline->binding_stride[desc->binding] = desc->strideInBytes; | |||
/* Step rate is programmed per vertex element (attribute), not | |||
* binding. Set up a map of which bindings step per instance, for | |||
* reference by vertex element setup. */ | |||
switch (desc->stepRate) { | |||
default: | |||
case VK_VERTEX_INPUT_STEP_RATE_VERTEX: | |||
instancing_enable[desc->binding] = false; | |||
break; | |||
case VK_VERTEX_INPUT_STEP_RATE_INSTANCE: | |||
instancing_enable[desc->binding] = true; | |||
break; | |||
} | |||
} | |||
p = anv_batch_emitn(&pipeline->batch, num_dwords, | |||
GEN8_3DSTATE_VERTEX_ELEMENTS); | |||
for (uint32_t i = 0; i < info->attributeCount; i++) { | |||
const VkVertexInputAttributeDescription *desc = | |||
&info->pVertexAttributeDescriptions[i]; | |||
const struct anv_format *format = anv_format_for_vk_format(desc->format); | |||
struct GEN8_VERTEX_ELEMENT_STATE element = { | |||
.VertexBufferIndex = desc->location, | |||
.Valid = true, | |||
.SourceElementFormat = format->format, | |||
.EdgeFlagEnable = false, | |||
.SourceElementOffset = desc->offsetInBytes, | |||
.Component0Control = VFCOMP_STORE_SRC, | |||
.Component1Control = format->channels >= 2 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0, | |||
.Component2Control = format->channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0, | |||
.Component3Control = format->channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP | |||
}; | |||
GEN8_VERTEX_ELEMENT_STATE_pack(NULL, &p[1 + i * 2], &element); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_INSTANCING, | |||
.InstancingEnable = instancing_enable[desc->binding], | |||
.VertexElementIndex = i, | |||
/* Vulkan so far doesn't have an instance divisor, so | |||
* this is always 1 (ignored if not instancing). */ | |||
.InstanceDataStepRate = 1); | |||
} | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_SGVS, | |||
.VertexIDEnable = pipeline->vs_prog_data.uses_vertexid, | |||
.VertexIDComponentNumber = 2, | |||
.VertexIDElementOffset = info->bindingCount, | |||
.InstanceIDEnable = pipeline->vs_prog_data.uses_instanceid, | |||
.InstanceIDComponentNumber = 3, | |||
.InstanceIDElementOffset = info->bindingCount); | |||
} | |||
static void | |||
emit_ia_state(struct anv_pipeline *pipeline, VkPipelineIaStateCreateInfo *info) | |||
{ | |||
static const uint32_t vk_to_gen_primitive_type[] = { | |||
[VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST, | |||
[VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST, | |||
[VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP, | |||
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST, | |||
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP, | |||
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN, | |||
[VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ] = _3DPRIM_LINELIST_ADJ, | |||
[VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ] = _3DPRIM_LISTSTRIP_ADJ, | |||
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ] = _3DPRIM_TRILIST_ADJ, | |||
[VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ] = _3DPRIM_TRISTRIP_ADJ, | |||
[VK_PRIMITIVE_TOPOLOGY_PATCH] = _3DPRIM_PATCHLIST_1 | |||
}; | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF, | |||
.IndexedDrawCutIndexEnable = info->primitiveRestartEnable, | |||
.CutIndex = info->primitiveRestartIndex); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_TOPOLOGY, | |||
.PrimitiveTopologyType = vk_to_gen_primitive_type[info->topology]); | |||
} | |||
static void | |||
emit_rs_state(struct anv_pipeline *pipeline, VkPipelineRsStateCreateInfo *info) | |||
{ | |||
static const uint32_t vk_to_gen_cullmode[] = { | |||
[VK_CULL_MODE_NONE] = CULLMODE_NONE, | |||
[VK_CULL_MODE_FRONT] = CULLMODE_FRONT, | |||
[VK_CULL_MODE_BACK] = CULLMODE_BACK, | |||
[VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH | |||
}; | |||
static const uint32_t vk_to_gen_fillmode[] = { | |||
[VK_FILL_MODE_POINTS] = RASTER_POINT, | |||
[VK_FILL_MODE_WIREFRAME] = RASTER_WIREFRAME, | |||
[VK_FILL_MODE_SOLID] = RASTER_SOLID | |||
}; | |||
static const uint32_t vk_to_gen_front_face[] = { | |||
[VK_FRONT_FACE_CCW] = CounterClockwise, | |||
[VK_FRONT_FACE_CW] = Clockwise | |||
}; | |||
static const uint32_t vk_to_gen_coordinate_origin[] = { | |||
[VK_COORDINATE_ORIGIN_UPPER_LEFT] = UPPERLEFT, | |||
[VK_COORDINATE_ORIGIN_LOWER_LEFT] = LOWERLEFT | |||
}; | |||
struct GEN8_3DSTATE_SF sf = { | |||
GEN8_3DSTATE_SF_header, | |||
.ViewportTransformEnable = true, | |||
.TriangleStripListProvokingVertexSelect = | |||
info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 2, | |||
.LineStripListProvokingVertexSelect = | |||
info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 1, | |||
.TriangleFanProvokingVertexSelect = | |||
info->provokingVertex == VK_PROVOKING_VERTEX_FIRST ? 0 : 2, | |||
.PointWidthSource = info->programPointSize ? Vertex : State, | |||
}; | |||
/* bool32_t rasterizerDiscardEnable; */ | |||
GEN8_3DSTATE_SF_pack(NULL, pipeline->state_sf, &sf); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_RASTER, | |||
.FrontWinding = vk_to_gen_front_face[info->frontFace], | |||
.CullMode = vk_to_gen_cullmode[info->cullMode], | |||
.FrontFaceFillMode = vk_to_gen_fillmode[info->fillMode], | |||
.BackFaceFillMode = vk_to_gen_fillmode[info->fillMode], | |||
.ScissorRectangleEnable = true, | |||
.ViewportZClipTestEnable = info->depthClipEnable); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE, | |||
.ForceVertexURBEntryReadLength = false, | |||
.ForceVertexURBEntryReadOffset = false, | |||
.PointSpriteTextureCoordinateOrigin = | |||
vk_to_gen_coordinate_origin[info->pointOrigin], | |||
.NumberofSFOutputAttributes = | |||
pipeline->wm_prog_data.num_varying_inputs); | |||
} | |||
VkResult VKAPI vkCreateGraphicsPipeline( | |||
VkDevice _device, | |||
const VkGraphicsPipelineCreateInfo* pCreateInfo, | |||
VkPipeline* pPipeline) | |||
{ | |||
struct anv_device *device = (struct anv_device *) _device; | |||
struct anv_pipeline *pipeline; | |||
const struct anv_common *common; | |||
VkPipelineShaderStageCreateInfo *shader_create_info; | |||
VkPipelineIaStateCreateInfo *ia_info; | |||
VkPipelineRsStateCreateInfo *rs_info; | |||
VkPipelineVertexInputCreateInfo *vi_info; | |||
VkResult result; | |||
uint32_t offset, length; | |||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO); | |||
pipeline = anv_device_alloc(device, sizeof(*pipeline), 8, | |||
VK_SYSTEM_ALLOC_TYPE_API_OBJECT); | |||
if (pipeline == NULL) | |||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
pipeline->device = device; | |||
pipeline->layout = (struct anv_pipeline_layout *) pCreateInfo->layout; | |||
memset(pipeline->shaders, 0, sizeof(pipeline->shaders)); | |||
result = anv_batch_init(&pipeline->batch, device); | |||
if (result != VK_SUCCESS) | |||
goto fail; | |||
for (common = pCreateInfo->pNext; common; common = common->pNext) { | |||
switch (common->sType) { | |||
case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO: | |||
vi_info = (VkPipelineVertexInputCreateInfo *) common; | |||
break; | |||
case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO: | |||
ia_info = (VkPipelineIaStateCreateInfo *) common; | |||
break; | |||
case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO: | |||
case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO: | |||
break; | |||
case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO: | |||
rs_info = (VkPipelineRsStateCreateInfo *) common; | |||
break; | |||
case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO: | |||
case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO: | |||
case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO: | |||
case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO: | |||
shader_create_info = (VkPipelineShaderStageCreateInfo *) common; | |||
pipeline->shaders[shader_create_info->shader.stage] = | |||
(struct anv_shader *) shader_create_info->shader.shader; | |||
break; | |||
default: | |||
break; | |||
} | |||
} | |||
pipeline->use_repclear = false; | |||
anv_compiler_run(device->compiler, pipeline); | |||
emit_vertex_input(pipeline, vi_info); | |||
emit_ia_state(pipeline, ia_info); | |||
emit_rs_state(pipeline, rs_info); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM, | |||
.StatisticsEnable = true, | |||
.LineEndCapAntialiasingRegionWidth = _05pixels, | |||
.LineAntialiasingRegionWidth = _10pixels, | |||
.EarlyDepthStencilControl = NORMAL, | |||
.ForceThreadDispatchEnable = NORMAL, | |||
.PointRasterizationRule = RASTRULE_UPPER_RIGHT, | |||
.BarycentricInterpolationMode = | |||
pipeline->wm_prog_data.barycentric_interp_modes); | |||
uint32_t samples = 1; | |||
uint32_t log2_samples = __builtin_ffs(samples) - 1; | |||
bool enable_sampling = samples > 1 ? true : false; | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_MULTISAMPLE, | |||
.PixelPositionOffsetEnable = enable_sampling, | |||
.PixelLocation = CENTER, | |||
.NumberofMultisamples = log2_samples); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_VS, | |||
.VSURBStartingAddress = pipeline->urb.vs_start, | |||
.VSURBEntryAllocationSize = pipeline->urb.vs_size - 1, | |||
.VSNumberofURBEntries = pipeline->urb.nr_vs_entries); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_GS, | |||
.GSURBStartingAddress = pipeline->urb.gs_start, | |||
.GSURBEntryAllocationSize = pipeline->urb.gs_size - 1, | |||
.GSNumberofURBEntries = pipeline->urb.nr_gs_entries); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_HS, | |||
.HSURBStartingAddress = pipeline->urb.vs_start, | |||
.HSURBEntryAllocationSize = 0, | |||
.HSNumberofURBEntries = 0); | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_DS, | |||
.DSURBStartingAddress = pipeline->urb.vs_start, | |||
.DSURBEntryAllocationSize = 0, | |||
.DSNumberofURBEntries = 0); | |||
const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data; | |||
offset = 1; | |||
length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset; | |||
if (pipeline->gs_vec4 == NO_KERNEL) | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, .Enable = false); | |||
else | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, | |||
.SingleProgramFlow = false, | |||
.KernelStartPointer = pipeline->gs_vec4, | |||
.VectorMaskEnable = Vmask, | |||
.SamplerCount = 0, | |||
.BindingTableEntryCount = 0, | |||
.ExpectedVertexCount = pipeline->gs_vertex_count, | |||
.PerThreadScratchSpace = 0, | |||
.ScratchSpaceBasePointer = 0, | |||
.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1, | |||
.OutputTopology = gs_prog_data->output_topology, | |||
.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length, | |||
.DispatchGRFStartRegisterForURBData = | |||
gs_prog_data->base.base.dispatch_grf_start_reg, | |||
.MaximumNumberofThreads = device->info.max_gs_threads, | |||
.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords, | |||
//pipeline->gs_prog_data.dispatch_mode | | |||
.StatisticsEnable = true, | |||
.IncludePrimitiveID = gs_prog_data->include_primitive_id, | |||
.ReorderMode = TRAILING, | |||
.Enable = true, | |||
.ControlDataFormat = gs_prog_data->control_data_format, | |||
/* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled: | |||
* UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v) | |||
* UserClipDistanceCullTestEnableBitmask(v) | |||
*/ | |||
.VertexURBEntryOutputReadOffset = offset, | |||
.VertexURBEntryOutputLength = length); | |||
//trp_generate_blend_hw_cmds(batch, pipeline); | |||
const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base; | |||
/* Skip the VUE header and position slots */ | |||
offset = 1; | |||
length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset; | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS, | |||
.KernelStartPointer = pipeline->vs_simd8, | |||
.SingleVertexDispatch = Multiple, | |||
.VectorMaskEnable = Dmask, | |||
.SamplerCount = 0, | |||
.BindingTableEntryCount = | |||
vue_prog_data->base.binding_table.size_bytes / 4, | |||
.ThreadDispatchPriority = Normal, | |||
.FloatingPointMode = IEEE754, | |||
.IllegalOpcodeExceptionEnable = false, | |||
.AccessesUAV = false, | |||
.SoftwareExceptionEnable = false, | |||
/* FIXME: pointer needs to be assigned outside as it aliases | |||
* PerThreadScratchSpace. | |||
*/ | |||
.ScratchSpaceBasePointer = 0, | |||
.PerThreadScratchSpace = 0, | |||
.DispatchGRFStartRegisterForURBData = | |||
vue_prog_data->base.dispatch_grf_start_reg, | |||
.VertexURBEntryReadLength = vue_prog_data->urb_read_length, | |||
.VertexURBEntryReadOffset = 0, | |||
.MaximumNumberofThreads = device->info.max_vs_threads - 1, | |||
.StatisticsEnable = false, | |||
.SIMD8DispatchEnable = true, | |||
.VertexCacheDisable = ia_info->disableVertexReuse, | |||
.FunctionEnable = true, | |||
.VertexURBEntryOutputReadOffset = offset, | |||
.VertexURBEntryOutputLength = length, | |||
.UserClipDistanceClipTestEnableBitmask = 0, | |||
.UserClipDistanceCullTestEnableBitmask = 0); | |||
const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data; | |||
uint32_t ksp0, ksp2, grf_start0, grf_start2; | |||
ksp2 = 0; | |||
grf_start2 = 0; | |||
if (pipeline->ps_simd8 != NO_KERNEL) { | |||
ksp0 = pipeline->ps_simd8; | |||
grf_start0 = wm_prog_data->base.dispatch_grf_start_reg; | |||
if (pipeline->ps_simd16 != NO_KERNEL) { | |||
ksp2 = pipeline->ps_simd16; | |||
grf_start2 = wm_prog_data->dispatch_grf_start_reg_16; | |||
} | |||
} else if (pipeline->ps_simd16 != NO_KERNEL) { | |||
ksp0 = pipeline->ps_simd16; | |||
grf_start0 = wm_prog_data->dispatch_grf_start_reg_16; | |||
} else { | |||
unreachable("no ps shader"); | |||
} | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS, | |||
.KernelStartPointer0 = ksp0, | |||
.SingleProgramFlow = false, | |||
.VectorMaskEnable = true, | |||
.SamplerCount = 0, | |||
.ScratchSpaceBasePointer = 0, | |||
.PerThreadScratchSpace = 0, | |||
.MaximumNumberofThreadsPerPSD = 64 - 2, | |||
.PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ? | |||
POSOFFSET_SAMPLE: POSOFFSET_NONE, | |||
.PushConstantEnable = wm_prog_data->base.nr_params > 0, | |||
._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL, | |||
._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL, | |||
._32PixelDispatchEnable = false, | |||
.DispatchGRFStartRegisterForConstantSetupData0 = grf_start0, | |||
.DispatchGRFStartRegisterForConstantSetupData1 = 0, | |||
.DispatchGRFStartRegisterForConstantSetupData2 = grf_start2, | |||
.KernelStartPointer1 = 0, | |||
.KernelStartPointer2 = ksp2); | |||
bool per_sample_ps = false; | |||
anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS_EXTRA, | |||
.PixelShaderValid = true, | |||
.PixelShaderKillsPixel = wm_prog_data->uses_kill, | |||
.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode, | |||
.AttributeEnable = wm_prog_data->num_varying_inputs > 0, | |||
.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask, | |||
.PixelShaderIsPerSample = per_sample_ps); | |||
*pPipeline = (VkPipeline) pipeline; | |||
return VK_SUCCESS; | |||
fail: | |||
anv_device_free(device, pipeline); | |||
return result; | |||
} | |||
VkResult | |||
anv_pipeline_destroy(struct anv_pipeline *pipeline) | |||
{ | |||
anv_compiler_free(pipeline); | |||
anv_batch_finish(&pipeline->batch, pipeline->device); | |||
anv_device_free(pipeline->device, pipeline); | |||
return VK_SUCCESS; | |||
} | |||
VkResult VKAPI vkCreateGraphicsPipelineDerivative( | |||
VkDevice device, | |||
const VkGraphicsPipelineCreateInfo* pCreateInfo, | |||
VkPipeline basePipeline, | |||
VkPipeline* pPipeline) | |||
{ | |||
return VK_UNSUPPORTED; | |||
} | |||
VkResult VKAPI vkCreateComputePipeline( | |||
VkDevice device, | |||
const VkComputePipelineCreateInfo* pCreateInfo, | |||
VkPipeline* pPipeline) | |||
{ | |||
return VK_UNSUPPORTED; | |||
} | |||
VkResult VKAPI vkStorePipeline( | |||
VkDevice device, | |||
VkPipeline pipeline, | |||
size_t* pDataSize, | |||
void* pData) | |||
{ | |||
return VK_UNSUPPORTED; | |||
} | |||
VkResult VKAPI vkLoadPipeline( | |||
VkDevice device, | |||
size_t dataSize, | |||
const void* pData, | |||
VkPipeline* pPipeline) | |||
{ | |||
return VK_UNSUPPORTED; | |||
} | |||
VkResult VKAPI vkLoadPipelineDerivative( | |||
VkDevice device, | |||
size_t dataSize, | |||
const void* pData, | |||
VkPipeline basePipeline, | |||
VkPipeline* pPipeline) | |||
{ | |||
return VK_UNSUPPORTED; | |||
} | |||
// Pipeline layout functions | |||
VkResult VKAPI vkCreatePipelineLayout( | |||
VkDevice _device, | |||
const VkPipelineLayoutCreateInfo* pCreateInfo, | |||
VkPipelineLayout* pPipelineLayout) | |||
{ | |||
struct anv_device *device = (struct anv_device *) _device; | |||
struct anv_pipeline_layout *layout; | |||
struct anv_pipeline_layout_entry *entry; | |||
uint32_t total; | |||
size_t size; | |||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO); | |||
total = 0; | |||
for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) { | |||
struct anv_descriptor_set_layout *set_layout = | |||
(struct anv_descriptor_set_layout *) pCreateInfo->pSetLayouts[i]; | |||
for (uint32_t j = 0; j < set_layout->count; j++) | |||
total += set_layout->total; | |||
} | |||
size = sizeof(*layout) + total * sizeof(layout->entries[0]); | |||
layout = anv_device_alloc(device, size, 8, | |||
VK_SYSTEM_ALLOC_TYPE_API_OBJECT); | |||
if (layout == NULL) | |||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); | |||
entry = layout->entries; | |||
for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) { | |||
layout->stage[s].entries = entry; | |||
for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) { | |||
struct anv_descriptor_set_layout *set_layout = | |||
(struct anv_descriptor_set_layout *) pCreateInfo->pSetLayouts[i]; | |||
for (uint32_t j = 0; j < set_layout->count; j++) | |||
if (set_layout->bindings[j].mask & (1 << s)) { | |||
entry->type = set_layout->bindings[j].type; | |||
entry->set = i; | |||
entry->index = j; | |||
entry++; | |||
} | |||
} | |||
layout->stage[s].count = entry - layout->stage[s].entries; | |||
} | |||
*pPipelineLayout = (VkPipelineLayout) layout; | |||
return VK_SUCCESS; | |||
} |
@@ -0,0 +1,594 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#pragma once | |||
#include <stdlib.h> | |||
#include <stdio.h> | |||
#include <stdbool.h> | |||
#include <pthread.h> | |||
#include <assert.h> | |||
#include <i915_drm.h> | |||
#include "brw_device_info.h" | |||
#include "util/macros.h" | |||
#define VK_PROTOTYPES | |||
#include <vulkan/vulkan.h> | |||
#undef VKAPI | |||
#define VKAPI __attribute__ ((visibility ("default"))) | |||
#include "brw_context.h" | |||
#ifdef __cplusplus | |||
extern "C" { | |||
#endif | |||
static inline uint32_t | |||
ALIGN_U32(uint32_t v, uint32_t a) | |||
{ | |||
return (v + a - 1) & ~(a - 1); | |||
} | |||
static inline int32_t | |||
ALIGN_I32(int32_t v, int32_t a) | |||
{ | |||
return (v + a - 1) & ~(a - 1); | |||
} | |||
#define for_each_bit(b, dword) \ | |||
for (uint32_t __dword = (dword); \ | |||
(b) = __builtin_ffs(__dword) - 1, __dword; \ | |||
__dword &= ~(1 << (b))) | |||
/* Define no kernel as 1, since that's an illegal offset for a kernel */ | |||
#define NO_KERNEL 1 | |||
struct anv_common { | |||
VkStructureType sType; | |||
const void* pNext; | |||
}; | |||
/* Whenever we generate an error, pass it through this function. Useful for | |||
* debugging, where we can break on it. Only call at error site, not when | |||
* propagating errors. Might be useful to plug in a stack trace here. | |||
*/ | |||
static inline VkResult | |||
vk_error(VkResult error) | |||
{ | |||
#ifdef DEBUG | |||
fprintf(stderr, "vk_error: %x\n", error); | |||
#endif | |||
return error; | |||
} | |||
/** | |||
* A dynamically growable, circular buffer. Elements are added at head and | |||
* removed from tail. head and tail are free-running uint32_t indices and we | |||
* only compute the modulo with size when accessing the array. This way, | |||
* number of bytes in the queue is always head - tail, even in case of | |||
* wraparound. | |||
*/ | |||
struct anv_vector { | |||
uint32_t head; | |||
uint32_t tail; | |||
uint32_t element_size; | |||
uint32_t size; | |||
void *data; | |||
}; | |||
int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size); | |||
void *anv_vector_add(struct anv_vector *queue); | |||
void *anv_vector_remove(struct anv_vector *queue); | |||
static inline int | |||
anv_vector_length(struct anv_vector *queue) | |||
{ | |||
return (queue->head - queue->tail) / queue->element_size; | |||
} | |||
static inline void | |||
anv_vector_finish(struct anv_vector *queue) | |||
{ | |||
free(queue->data); | |||
} | |||
#define anv_vector_foreach(elem, queue) \ | |||
static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \ | |||
for (uint32_t __anv_vector_offset = (queue)->tail; \ | |||
elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \ | |||
__anv_vector_offset += (queue)->element_size) | |||
struct anv_bo { | |||
int gem_handle; | |||
uint32_t index; | |||
uint64_t offset; | |||
uint64_t size; | |||
/* This field is here for the benefit of the aub dumper. It can (and for | |||
* userptr bos it must) be set to the cpu map of the buffer. Destroying | |||
* the bo won't clean up the mmap, it's still the responsibility of the bo | |||
* user to do that. */ | |||
void *map; | |||
}; | |||
/* Represents a lock-free linked list of "free" things. This is used by | |||
* both the block pool and the state pools. Unfortunately, in order to | |||
* solve the ABA problem, we can't use a single uint32_t head. | |||
*/ | |||
union anv_free_list { | |||
struct { | |||
uint32_t offset; | |||
/* A simple count that is incremented every time the head changes. */ | |||
uint32_t count; | |||
}; | |||
uint64_t u64; | |||
}; | |||
#define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } }) | |||
struct anv_block_pool { | |||
struct anv_device *device; | |||
struct anv_bo bo; | |||
void *map; | |||
int fd; | |||
uint32_t size; | |||
/** | |||
* Array of mmaps and gem handles owned by the block pool, reclaimed when | |||
* the block pool is destroyed. | |||
*/ | |||
struct anv_vector mmap_cleanups; | |||
uint32_t block_size; | |||
uint32_t next_block; | |||
union anv_free_list free_list; | |||
}; | |||
struct anv_block_state { | |||
union { | |||
struct { | |||
uint32_t next; | |||
uint32_t end; | |||
}; | |||
uint64_t u64; | |||
}; | |||
}; | |||
struct anv_state { | |||
uint32_t offset; | |||
uint32_t alloc_size; | |||
void *map; | |||
}; | |||
struct anv_fixed_size_state_pool { | |||
size_t state_size; | |||
union anv_free_list free_list; | |||
struct anv_block_state block; | |||
}; | |||
#define ANV_MIN_STATE_SIZE_LOG2 6 | |||
#define ANV_MAX_STATE_SIZE_LOG2 10 | |||
#define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2) | |||
struct anv_state_pool { | |||
struct anv_block_pool *block_pool; | |||
struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS]; | |||
}; | |||
struct anv_state_stream { | |||
struct anv_block_pool *block_pool; | |||
uint32_t next; | |||
uint32_t current_block; | |||
uint32_t end; | |||
}; | |||
void anv_block_pool_init(struct anv_block_pool *pool, | |||
struct anv_device *device, uint32_t block_size); | |||
void anv_block_pool_finish(struct anv_block_pool *pool); | |||
uint32_t anv_block_pool_alloc(struct anv_block_pool *pool); | |||
void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset); | |||
void anv_state_pool_init(struct anv_state_pool *pool, | |||
struct anv_block_pool *block_pool); | |||
struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool, | |||
size_t state_size, size_t alignment); | |||
void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state); | |||
void anv_state_stream_init(struct anv_state_stream *stream, | |||
struct anv_block_pool *block_pool); | |||
void anv_state_stream_finish(struct anv_state_stream *stream); | |||
struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream, | |||
uint32_t size, uint32_t alignment); | |||
struct anv_physical_device { | |||
struct anv_instance * instance; | |||
uint32_t chipset_id; | |||
bool no_hw; | |||
const char * path; | |||
const char * name; | |||
const struct brw_device_info * info; | |||
}; | |||
struct anv_instance { | |||
void * pAllocUserData; | |||
PFN_vkAllocFunction pfnAlloc; | |||
PFN_vkFreeFunction pfnFree; | |||
uint32_t apiVersion; | |||
uint32_t physicalDeviceCount; | |||
struct anv_physical_device physicalDevice; | |||
}; | |||
struct anv_device { | |||
struct anv_instance * instance; | |||
uint32_t chipset_id; | |||
struct brw_device_info info; | |||
int context_id; | |||
int fd; | |||
bool no_hw; | |||
bool dump_aub; | |||
struct anv_block_pool dyn_state_block_pool; | |||
struct anv_state_pool dyn_state_pool; | |||
struct anv_block_pool instruction_block_pool; | |||
struct anv_block_pool surface_state_block_pool; | |||
struct anv_state_pool surface_state_pool; | |||
struct anv_compiler * compiler; | |||
struct anv_aub_writer * aub_writer; | |||
pthread_mutex_t mutex; | |||
}; | |||
struct anv_queue { | |||
struct anv_device * device; | |||
struct anv_state_pool * pool; | |||
/** | |||
* Serial number of the most recently completed batch executed on the | |||
* engine. | |||
*/ | |||
struct anv_state completed_serial; | |||
/** | |||
* The next batch submitted to the engine will be assigned this serial | |||
* number. | |||
*/ | |||
uint32_t next_serial; | |||
uint32_t last_collected_serial; | |||
}; | |||
void * | |||
anv_device_alloc(struct anv_device * device, | |||
size_t size, | |||
size_t alignment, | |||
VkSystemAllocType allocType); | |||
void | |||
anv_device_free(struct anv_device * device, | |||
void * mem); | |||
void* anv_gem_mmap(struct anv_device *device, | |||
uint32_t gem_handle, uint64_t offset, uint64_t size); | |||
void anv_gem_munmap(void *p, uint64_t size); | |||
uint32_t anv_gem_create(struct anv_device *device, size_t size); | |||
void anv_gem_close(struct anv_device *device, int gem_handle); | |||
int anv_gem_userptr(struct anv_device *device, void *mem, size_t size); | |||
int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns); | |||
int anv_gem_execbuffer(struct anv_device *device, | |||
struct drm_i915_gem_execbuffer2 *execbuf); | |||
int anv_gem_set_tiling(struct anv_device *device, int gem_handle, | |||
uint32_t stride, uint32_t tiling); | |||
int anv_gem_create_context(struct anv_device *device); | |||
int anv_gem_destroy_context(struct anv_device *device, int context); | |||
int anv_gem_get_param(int fd, uint32_t param); | |||
int anv_gem_get_aperture(struct anv_device *device, uint64_t *size); | |||
int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle); | |||
int anv_gem_fd_to_handle(struct anv_device *device, int fd); | |||
int anv_gem_userptr(struct anv_device *device, void *mem, size_t size); | |||
VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size); | |||
/* TODO: Remove hardcoded reloc limit. */ | |||
#define ANV_BATCH_MAX_RELOCS 256 | |||
struct anv_reloc_list { | |||
size_t num_relocs; | |||
struct drm_i915_gem_relocation_entry relocs[ANV_BATCH_MAX_RELOCS]; | |||
struct anv_bo * reloc_bos[ANV_BATCH_MAX_RELOCS]; | |||
}; | |||
struct anv_batch { | |||
struct anv_bo bo; | |||
void * next; | |||
struct anv_reloc_list cmd_relocs; | |||
struct anv_reloc_list surf_relocs; | |||
}; | |||
VkResult anv_batch_init(struct anv_batch *batch, struct anv_device *device); | |||
void anv_batch_finish(struct anv_batch *batch, struct anv_device *device); | |||
void anv_batch_reset(struct anv_batch *batch); | |||
void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords); | |||
void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other); | |||
uint64_t anv_batch_emit_reloc(struct anv_batch *batch, | |||
void *location, struct anv_bo *bo, uint32_t offset); | |||
struct anv_address { | |||
struct anv_bo *bo; | |||
uint32_t offset; | |||
}; | |||
#define __gen_address_type struct anv_address | |||
#define __gen_user_data struct anv_batch | |||
static inline uint64_t | |||
__gen_combine_address(struct anv_batch *batch, void *location, | |||
const struct anv_address address, uint32_t delta) | |||
{ | |||
if (address.bo == NULL) { | |||
return delta; | |||
} else { | |||
assert(batch->bo.map <= location && | |||
(char *) location < (char *) batch->bo.map + batch->bo.size); | |||
return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta); | |||
} | |||
} | |||
#undef GEN8_3DSTATE_MULTISAMPLE | |||
#include "gen8_pack.h" | |||
#define anv_batch_emit(batch, cmd, ...) do { \ | |||
struct cmd __template = { \ | |||
cmd ## _header, \ | |||
__VA_ARGS__ \ | |||
}; \ | |||
void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \ | |||
cmd ## _pack(batch, __dst, &__template); \ | |||
} while (0) | |||
#define anv_batch_emitn(batch, n, cmd, ...) ({ \ | |||
struct cmd __template = { \ | |||
cmd ## _header, \ | |||
.DwordLength = n - cmd ## _length_bias, \ | |||
__VA_ARGS__ \ | |||
}; \ | |||
void *__dst = anv_batch_emit_dwords(batch, n); \ | |||
cmd ## _pack(batch, __dst, &__template); \ | |||
__dst; \ | |||
}) | |||
struct anv_device_memory { | |||
struct anv_bo bo; | |||
VkDeviceSize map_size; | |||
void *map; | |||
}; | |||
struct anv_dynamic_vp_state { | |||
struct anv_state sf_clip_vp; | |||
struct anv_state cc_vp; | |||
struct anv_state scissor; | |||
}; | |||
struct anv_dynamic_rs_state { | |||
uint32_t state_sf[GEN8_3DSTATE_SF_length]; | |||
}; | |||
struct anv_dynamic_cb_state { | |||
uint32_t blend_offset; | |||
}; | |||
struct anv_descriptor_set_layout { | |||
uint32_t total; /* total number of entries in all stages */ | |||
uint32_t count; | |||
struct { | |||
VkDescriptorType type; | |||
uint32_t mask; | |||
} bindings[0]; | |||
}; | |||
struct anv_descriptor_set { | |||
void *descriptors[0]; | |||
}; | |||
struct anv_pipeline_layout_entry { | |||
VkDescriptorType type; | |||
uint32_t set; | |||
uint32_t index; | |||
}; | |||
struct anv_pipeline_layout { | |||
struct { | |||
uint32_t count; | |||
struct anv_pipeline_layout_entry *entries; | |||
} stage[VK_NUM_SHADER_STAGE]; | |||
struct anv_pipeline_layout_entry entries[0]; | |||
}; | |||
struct anv_buffer { | |||
struct anv_device * device; | |||
VkDeviceSize size; | |||
/* Set when bound */ | |||
struct anv_device_memory * mem; | |||
VkDeviceSize offset; | |||
}; | |||
#define MAX_VBS 32 | |||
#define MAX_SETS 8 | |||
#define MAX_RTS 8 | |||
#define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0) | |||
#define ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY (1 << 1) | |||
#define ANV_CMD_BUFFER_RS_DIRTY (1 << 2) | |||
struct anv_cmd_buffer { | |||
struct anv_device * device; | |||
struct drm_i915_gem_execbuffer2 execbuf; | |||
struct drm_i915_gem_exec_object2 * exec2_objects; | |||
struct anv_bo ** exec2_bos; | |||
bool need_reloc; | |||
uint32_t serial; | |||
uint32_t bo_count; | |||
struct anv_batch batch; | |||
struct anv_state_stream surface_state_stream; | |||
/* State required while building cmd buffer */ | |||
struct { | |||
struct anv_buffer *buffer; | |||
VkDeviceSize offset; | |||
} vb[MAX_VBS]; | |||
uint32_t vb_dirty; | |||
uint32_t num_descriptor_sets; | |||
struct anv_descriptor_set * descriptor_sets[MAX_SETS]; | |||
uint32_t dirty; | |||
struct anv_pipeline * pipeline; | |||
struct anv_framebuffer * framebuffer; | |||
struct anv_dynamic_rs_state * rs_state; | |||
}; | |||
void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer); | |||
void anv_aub_writer_destroy(struct anv_aub_writer *writer); | |||
struct anv_shader { | |||
uint32_t size; | |||
char data[0]; | |||
}; | |||
struct anv_pipeline { | |||
struct anv_device * device; | |||
struct anv_batch batch; | |||
struct anv_shader * shaders[VK_NUM_SHADER_STAGE]; | |||
struct anv_pipeline_layout * layout; | |||
bool use_repclear; | |||
struct brw_vs_prog_data vs_prog_data; | |||
struct brw_wm_prog_data wm_prog_data; | |||
struct brw_gs_prog_data gs_prog_data; | |||
struct brw_stage_prog_data * prog_data[VK_NUM_SHADER_STAGE]; | |||
struct { | |||
uint32_t vs_start; | |||
uint32_t vs_size; | |||
uint32_t nr_vs_entries; | |||
uint32_t gs_start; | |||
uint32_t gs_size; | |||
uint32_t nr_gs_entries; | |||
} urb; | |||
struct anv_bo vs_scratch_bo; | |||
struct anv_bo ps_scratch_bo; | |||
struct anv_bo gs_scratch_bo; | |||
uint32_t active_stages; | |||
uint32_t program_block; | |||
uint32_t program_next; | |||
uint32_t vs_simd8; | |||
uint32_t ps_simd8; | |||
uint32_t ps_simd16; | |||
uint32_t gs_vec4; | |||
uint32_t gs_vertex_count; | |||
uint32_t binding_stride[MAX_VBS]; | |||
uint32_t state_sf[GEN8_3DSTATE_SF_length]; | |||
uint32_t state_raster[GEN8_3DSTATE_RASTER_length]; | |||
}; | |||
VkResult anv_pipeline_destroy(struct anv_pipeline *pipeline); | |||
struct anv_compiler *anv_compiler_create(int fd); | |||
void anv_compiler_destroy(struct anv_compiler *compiler); | |||
int anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline); | |||
void anv_compiler_free(struct anv_pipeline *pipeline); | |||
struct anv_format { | |||
uint32_t format; | |||
int32_t cpp; | |||
int32_t channels; | |||
}; | |||
const struct anv_format * | |||
anv_format_for_vk_format(VkFormat format); | |||
struct anv_image { | |||
VkImageType type; | |||
VkExtent3D extent; | |||
uint32_t tile_mode; | |||
VkDeviceSize size; | |||
uint32_t alignment; | |||
int32_t stride; | |||
/* Set when bound */ | |||
struct anv_device_memory * mem; | |||
VkDeviceSize offset; | |||
}; | |||
struct anv_buffer_view { | |||
struct anv_buffer * buffer; | |||
struct anv_state surface_state; | |||
uint32_t offset; | |||
}; | |||
struct anv_color_attachment_view { | |||
struct anv_image * image; | |||
struct anv_state surface_state; | |||
}; | |||
struct anv_image_view { | |||
struct anv_image * image; | |||
struct anv_state surface_state; | |||
}; | |||
struct anv_depth_stencil_view { | |||
}; | |||
struct anv_framebuffer { | |||
uint32_t color_attachment_count; | |||
struct anv_color_attachment_view * color_attachments[MAX_RTS]; | |||
struct anv_depth_stencil_view * depth_stencil; | |||
uint32_t sample_count; | |||
uint32_t width; | |||
uint32_t height; | |||
uint32_t layers; | |||
}; | |||
struct anv_render_pass { | |||
VkRect render_area; | |||
}; | |||
#ifdef __cplusplus | |||
} | |||
#endif |
@@ -0,0 +1,99 @@ | |||
/* | |||
* Copyright © 2015 Intel Corporation | |||
* | |||
* Permission is hereby granted, free of charge, to any person obtaining a | |||
* copy of this software and associated documentation files (the "Software"), | |||
* to deal in the Software without restriction, including without limitation | |||
* the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
* and/or sell copies of the Software, and to permit persons to whom the | |||
* Software is furnished to do so, subject to the following conditions: | |||
* | |||
* The above copyright notice and this permission notice (including the next | |||
* paragraph) shall be included in all copies or substantial portions of the | |||
* Software. | |||
* | |||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
* IN THE SOFTWARE. | |||
*/ | |||
#include <stdarg.h> | |||
#include <stdio.h> | |||
#include <stdlib.h> | |||
#include <string.h> | |||
#include <errno.h> | |||
#include <assert.h> | |||
#include "private.h" | |||
int | |||
anv_vector_init(struct anv_vector *vector, uint32_t element_size, uint32_t size) | |||
{ | |||
assert(is_power_of_two(size)); | |||
assert(element_size < size && is_power_of_two(element_size)); | |||
vector->head = 0; | |||
vector->tail = 0; | |||
vector->element_size = element_size; | |||
vector->size = size; | |||
vector->data = malloc(size); | |||
return vector->data != NULL; | |||
} | |||
void * | |||
anv_vector_add(struct anv_vector *vector) | |||
{ | |||
uint32_t offset, size, split, tail; | |||
void *data; | |||
if (vector->head - vector->tail == vector->size) { | |||
size = vector->size * 2; | |||
data = malloc(size); | |||
if (data == NULL) | |||
return NULL; | |||
split = ALIGN_U32(vector->tail, vector->size); | |||
tail = vector->tail & (vector->size - 1); | |||
if (vector->head - split < vector->size) { | |||
memcpy(data + tail, | |||
vector->data + tail, | |||
split - vector->tail); | |||
memcpy(data + vector->size, | |||
vector->data, vector->head - split); | |||
} else { | |||
memcpy(data + tail, | |||
vector->data + tail, | |||
vector->head - vector->tail); | |||
} | |||
free(vector->data); | |||
vector->data = data; | |||
vector->size = size; | |||
} | |||
assert(vector->head - vector->tail < vector->size); | |||
offset = vector->head & (vector->size - 1); | |||
vector->head += vector->element_size; | |||
return vector->data + offset; | |||
} | |||
void * | |||
anv_vector_remove(struct anv_vector *vector) | |||
{ | |||
uint32_t offset; | |||
if (vector->head == vector->tail) | |||
return NULL; | |||
assert(vector->head - vector->tail <= vector->size); | |||
offset = vector->tail & (vector->size - 1); | |||
vector->tail += vector->element_size; | |||
return vector->data + offset; | |||
} |
@@ -0,0 +1,723 @@ | |||
#include <stdlib.h> | |||
#include <stdbool.h> | |||
#include <stdio.h> | |||
#include <string.h> | |||
#define VK_PROTOTYPES | |||
#include <vulkan/vulkan.h> | |||
#include <stdio.h> | |||
#include <stdlib.h> | |||
#include <stdint.h> | |||
#include <stdarg.h> | |||
#include <poll.h> | |||
#include <libpng16/png.h> | |||
static void | |||
fail_if(int cond, const char *format, ...) | |||
{ | |||
va_list args; | |||
if (!cond) | |||
return; | |||
va_start(args, format); | |||
vfprintf(stderr, format, args); | |||
va_end(args); | |||
exit(1); | |||
} | |||
static void | |||
write_png(char *path, int32_t width, int32_t height, int32_t stride, void *pixels) | |||
{ | |||
FILE *f = NULL; | |||
png_structp png_writer = NULL; | |||
png_infop png_info = NULL; | |||
uint8_t *rows[height]; | |||
for (int32_t y = 0; y < height; y++) | |||
rows[y] = pixels + y * stride; | |||
f = fopen(path, "wb"); | |||
fail_if(!f, "failed to open file for writing: %s", path); | |||
png_writer = png_create_write_struct(PNG_LIBPNG_VER_STRING, | |||
NULL, NULL, NULL); | |||
fail_if (!png_writer, "failed to create png writer"); | |||
png_info = png_create_info_struct(png_writer); | |||
fail_if(!png_info, "failed to create png writer info"); | |||
png_init_io(png_writer, f); | |||
png_set_IHDR(png_writer, png_info, | |||
width, height, | |||
8, PNG_COLOR_TYPE_RGBA, | |||
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, | |||
PNG_FILTER_TYPE_DEFAULT); | |||
png_write_info(png_writer, png_info); | |||
png_set_rows(png_writer, png_info, rows); | |||
png_write_png(png_writer, png_info, PNG_TRANSFORM_IDENTITY, NULL); | |||
png_destroy_write_struct(&png_writer, &png_info); | |||
fclose(f); | |||
} | |||
static void * | |||
test_alloc(void* pUserData, | |||
size_t size, | |||
size_t alignment, | |||
VkSystemAllocType allocType) | |||
{ | |||
return malloc(size); | |||
} | |||
static void | |||
test_free(void* pUserData, | |||
void* pMem) | |||
{ | |||
free(pMem); | |||
} | |||
#define GLSL(src) "#version 330\n" #src | |||
static void | |||
create_pipeline(VkDevice device, VkPipeline *pipeline, | |||
VkPipelineLayout pipeline_layout) | |||
{ | |||
VkPipelineIaStateCreateInfo ia_create_info = { | |||
.sType = VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO, | |||
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, | |||
.disableVertexReuse = false, | |||
.primitiveRestartEnable = false, | |||
.primitiveRestartIndex = 0 | |||
}; | |||
static const char vs_source[] = GLSL( | |||
layout(location = 0) in vec4 a_position; | |||
layout(location = 1) in vec4 a_color; | |||
layout(set = 0, index = 0) uniform block1 { | |||
vec4 color; | |||
} u1; | |||
layout(set = 0, index = 1) uniform block2 { | |||
vec4 color; | |||
} u2; | |||
layout(set = 1, index = 0) uniform block3 { | |||
vec4 color; | |||
} u3; | |||
out vec4 v_color; | |||
void main() | |||
{ | |||
gl_Position = a_position; | |||
v_color = a_color + u1.color + u2.color + u3.color; | |||
}); | |||
static const char fs_source[] = GLSL( | |||
out vec4 f_color; | |||
in vec4 v_color; | |||
layout(set = 0, index = 0) uniform sampler2D tex; | |||
void main() | |||
{ | |||
f_color = v_color + texture2D(tex, vec2(0.1, 0.1)); | |||
}); | |||
VkShader vs; | |||
vkCreateShader(device, | |||
&(VkShaderCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_SHADER_CREATE_INFO, | |||
.codeSize = sizeof(vs_source), | |||
.pCode = vs_source, | |||
.flags = 0 | |||
}, | |||
&vs); | |||
VkShader fs; | |||
vkCreateShader(device, | |||
&(VkShaderCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_SHADER_CREATE_INFO, | |||
.codeSize = sizeof(fs_source), | |||
.pCode = fs_source, | |||
.flags = 0 | |||
}, | |||
&fs); | |||
VkPipelineShaderStageCreateInfo vs_create_info = { | |||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, | |||
.pNext = &ia_create_info, | |||
.shader = { | |||
.stage = VK_SHADER_STAGE_VERTEX, | |||
.shader = vs, | |||
.linkConstBufferCount = 0, | |||
.pLinkConstBufferInfo = NULL, | |||
.pSpecializationInfo = NULL | |||
} | |||
}; | |||
VkPipelineShaderStageCreateInfo fs_create_info = { | |||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, | |||
.pNext = &vs_create_info, | |||
.shader = { | |||
.stage = VK_SHADER_STAGE_FRAGMENT, | |||
.shader = fs, | |||
.linkConstBufferCount = 0, | |||
.pLinkConstBufferInfo = NULL, | |||
.pSpecializationInfo = NULL | |||
} | |||
}; | |||
VkPipelineVertexInputCreateInfo vi_create_info = { | |||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO, | |||
.pNext = &fs_create_info, | |||
.bindingCount = 2, | |||
.pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) { | |||
{ | |||
.binding = 0, | |||
.strideInBytes = 16, | |||
.stepRate = VK_VERTEX_INPUT_STEP_RATE_VERTEX | |||
}, | |||
{ | |||
.binding = 1, | |||
.strideInBytes = 0, | |||
.stepRate = VK_VERTEX_INPUT_STEP_RATE_VERTEX | |||
} | |||
}, | |||
.attributeCount = 2, | |||
.pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) { | |||
{ | |||
.location = 0, | |||
.binding = 0, | |||
.format = VK_FORMAT_R32G32B32A32_SFLOAT, | |||
.offsetInBytes = 0 | |||
}, | |||
{ | |||
.location = 1, | |||
.binding = 1, | |||
.format = VK_FORMAT_R32G32B32A32_SFLOAT, | |||
.offsetInBytes = 0 | |||
} | |||
} | |||
}; | |||
VkPipelineRsStateCreateInfo rs_create_info = { | |||
.sType = VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO, | |||
.pNext = &vi_create_info, | |||
.depthClipEnable = true, | |||
.rasterizerDiscardEnable = false, | |||
.fillMode = VK_FILL_MODE_SOLID, | |||
.cullMode = VK_CULL_MODE_NONE, | |||
.frontFace = VK_FRONT_FACE_CCW | |||
}; | |||
vkCreateGraphicsPipeline(device, | |||
&(VkGraphicsPipelineCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, | |||
.pNext = &rs_create_info, | |||
.flags = 0, | |||
.layout = pipeline_layout | |||
}, | |||
pipeline); | |||
vkDestroyObject(device, VK_OBJECT_TYPE_SHADER, fs); | |||
vkDestroyObject(device, VK_OBJECT_TYPE_SHADER, vs); | |||
} | |||
int main(int argc, char *argv[]) | |||
{ | |||
VkInstance instance; | |||
vkCreateInstance(&(VkInstanceCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, | |||
.pAllocCb = &(VkAllocCallbacks) { | |||
.pUserData = NULL, | |||
.pfnAlloc = test_alloc, | |||
.pfnFree = test_free | |||
}, | |||
.pAppInfo = &(VkApplicationInfo) { | |||
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, | |||
.pAppName = "vk", | |||
.apiVersion = 1 | |||
} | |||
}, | |||
&instance); | |||
uint32_t count = 1; | |||
VkPhysicalDevice physicalDevices[1]; | |||
vkEnumeratePhysicalDevices(instance, &count, physicalDevices); | |||
printf("%d physical devices\n", count); | |||
VkPhysicalDeviceProperties properties; | |||
size_t size = sizeof(properties); | |||
vkGetPhysicalDeviceInfo(physicalDevices[0], | |||
VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES, | |||
&size, &properties); | |||
printf("vendor id %04x, device name %s\n", | |||
properties.vendorId, properties.deviceName); | |||
VkDevice device; | |||
vkCreateDevice(physicalDevices[0], | |||
&(VkDeviceCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, | |||
.queueRecordCount = 1, | |||
.pRequestedQueues = &(VkDeviceQueueCreateInfo) { | |||
.queueNodeIndex = 0, | |||
.queueCount = 1 | |||
} | |||
}, | |||
&device); | |||
VkQueue queue; | |||
vkGetDeviceQueue(device, 0, 0, &queue); | |||
VkCmdBuffer cmdBuffer; | |||
vkCreateCommandBuffer(device, | |||
&(VkCmdBufferCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO, | |||
.queueNodeIndex = 0, | |||
.flags = 0 | |||
}, | |||
&cmdBuffer); | |||
VkDescriptorSetLayout set_layout[2]; | |||
vkCreateDescriptorSetLayout(device, | |||
&(VkDescriptorSetLayoutCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, | |||
.count = 2, | |||
.pBinding = (VkDescriptorSetLayoutBinding[]) { | |||
{ | |||
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, | |||
.count = 2, | |||
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT, | |||
.pImmutableSamplers = NULL | |||
}, | |||
{ | |||
.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, | |||
.count = 1, | |||
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT, | |||
.pImmutableSamplers = NULL | |||
} | |||
} | |||
}, | |||
&set_layout[0]); | |||
vkCreateDescriptorSetLayout(device, | |||
&(VkDescriptorSetLayoutCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, | |||
.count = 1, | |||
.pBinding = (VkDescriptorSetLayoutBinding[]) { | |||
{ | |||
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, | |||
.count = 1, | |||
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT, | |||
.pImmutableSamplers = NULL | |||
} | |||
} | |||
}, | |||
&set_layout[1]); | |||
VkPipelineLayout pipeline_layout; | |||
vkCreatePipelineLayout(device, | |||
&(VkPipelineLayoutCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, | |||
.descriptorSetCount = 2, | |||
.pSetLayouts = set_layout, | |||
}, | |||
&pipeline_layout); | |||
VkPipeline pipeline; | |||
create_pipeline(device, &pipeline, pipeline_layout); | |||
VkDescriptorSet set[2]; | |||
vkAllocDescriptorSets(device, 0 /* pool */, | |||
VK_DESCRIPTOR_SET_USAGE_STATIC, | |||
2, set_layout, set, &count); | |||
VkBuffer buffer; | |||
vkCreateBuffer(device, | |||
&(VkBufferCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, | |||
.size = 1024, | |||
.usage = VK_BUFFER_USAGE_GENERAL, | |||
.flags = 0 | |||
}, | |||
&buffer); | |||
VkMemoryRequirements buffer_requirements; | |||
size = sizeof(buffer_requirements); | |||
vkGetObjectInfo(device, VK_OBJECT_TYPE_BUFFER, buffer, | |||
VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS, | |||
&size, &buffer_requirements); | |||
int32_t width = 256, height = 256; | |||
VkImage rt; | |||
vkCreateImage(device, | |||
&(VkImageCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, | |||
.imageType = VK_IMAGE_TYPE_2D, | |||
.format = VK_FORMAT_R8G8B8A8_UNORM, | |||
.extent = { .width = width, .height = height, .depth = 1 }, | |||
.mipLevels = 1, | |||
.arraySize = 1, | |||
.samples = 1, | |||
.tiling = VK_IMAGE_TILING_LINEAR, | |||
.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, | |||
.flags = 0, | |||
}, | |||
&rt); | |||
VkMemoryRequirements rt_requirements; | |||
size = sizeof(rt_requirements); | |||
vkGetObjectInfo(device, VK_OBJECT_TYPE_IMAGE, rt, | |||
VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS, | |||
&size, &rt_requirements); | |||
VkBuffer vertex_buffer; | |||
vkCreateBuffer(device, | |||
&(VkBufferCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, | |||
.size = 1024, | |||
.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, | |||
.flags = 0 | |||
}, | |||
&vertex_buffer); | |||
VkMemoryRequirements vb_requirements; | |||
size = sizeof(vb_requirements); | |||
vkGetObjectInfo(device, VK_OBJECT_TYPE_BUFFER, vertex_buffer, | |||
VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS, | |||
&size, &vb_requirements); | |||
printf("buffer size: %lu, buffer alignment: %lu\n", | |||
buffer_requirements.size, buffer_requirements.alignment); | |||
printf("rt size: %lu, rt alignment: %lu\n", | |||
rt_requirements.size, rt_requirements.alignment); | |||
printf("vb size: %lu vb alignment: %lu\n", | |||
vb_requirements.size, vb_requirements.alignment); | |||
size_t mem_size = rt_requirements.size + 2048 + 16 * 16 * 4; | |||
VkDeviceMemory mem; | |||
vkAllocMemory(device, | |||
&(VkMemoryAllocInfo) { | |||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO, | |||
.allocationSize = mem_size, | |||
.memProps = VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT, | |||
.memPriority = VK_MEMORY_PRIORITY_NORMAL | |||
}, | |||
&mem); | |||
void *map; | |||
vkMapMemory(device, mem, 0, mem_size, 0, &map); | |||
memset(map, 192, mem_size); | |||
vkQueueBindObjectMemory(queue, VK_OBJECT_TYPE_BUFFER, | |||
buffer, | |||
0, /* allocation index; for objects which need to bind to multiple mems */ | |||
mem, 128); | |||
float color[12] = { | |||
0.0, 0.2, 0.0, 0.0, | |||
0.0, 0.0, 0.5, 0.0, | |||
0.0, 0.0, 0.5, 0.5 | |||
}; | |||
memcpy(map + 128 + 16, color, sizeof(color)); | |||
VkBufferView buffer_view[3]; | |||
vkCreateBufferView(device, | |||
&(VkBufferViewCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, | |||
.buffer = buffer, | |||
.viewType = VK_BUFFER_VIEW_TYPE_RAW, | |||
.format = VK_FORMAT_R32G32B32A32_SFLOAT, | |||
.offset = 16, | |||
.range = 64 | |||
}, | |||
&buffer_view[0]); | |||
vkCreateBufferView(device, | |||
&(VkBufferViewCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, | |||
.buffer = buffer, | |||
.viewType = VK_BUFFER_VIEW_TYPE_RAW, | |||
.format = VK_FORMAT_R32G32B32A32_SFLOAT, | |||
.offset = 32, | |||
.range = 64 | |||
}, | |||
&buffer_view[1]); | |||
vkCreateBufferView(device, | |||
&(VkBufferViewCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, | |||
.buffer = buffer, | |||
.viewType = VK_BUFFER_VIEW_TYPE_RAW, | |||
.format = VK_FORMAT_R32G32B32A32_SFLOAT, | |||
.offset = 48, | |||
.range = 64 | |||
}, | |||
&buffer_view[2]); | |||
vkQueueBindObjectMemory(queue, VK_OBJECT_TYPE_BUFFER, | |||
vertex_buffer, | |||
0, /* allocation index; for objects which need to bind to multiple mems */ | |||
mem, 1024); | |||
static const float vertex_data[] = { | |||
/* Triangle coordinates */ | |||
-0.5, -0.5, 0.0, 1.0, | |||
0.5, -0.5, 0.0, 1.0, | |||
0.0, 0.5, 0.0, 1.0, | |||
/* Color */ | |||
1.0, 0.0, 0.0, 0.2, | |||
}; | |||
memcpy(map + 1024, vertex_data, sizeof(vertex_data)); | |||
VkDynamicVpState vp_state; | |||
vkCreateDynamicViewportState(device, | |||
&(VkDynamicVpStateCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO, | |||
.viewportAndScissorCount = 2, | |||
.pViewports = (VkViewport[]) { | |||
{ | |||
.originX = 0, | |||
.originY = 0, | |||
.width = width, | |||
.height = height, | |||
.minDepth = 0, | |||
.maxDepth = 1 | |||
}, | |||
{ | |||
.originX = -10, | |||
.originY = -10, | |||
.width = 20, | |||
.height = 20, | |||
.minDepth = -1, | |||
.maxDepth = 1 | |||
}, | |||
}, | |||
.pScissors = (VkRect[]) { | |||
{ { 0, 0 }, { width, height } }, | |||
{ { 10, 10 }, { 236, 236 } } | |||
} | |||
}, | |||
&vp_state); | |||
VkDynamicRsState rs_state; | |||
vkCreateDynamicRasterState(device, | |||
&(VkDynamicRsStateCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO, | |||
}, | |||
&rs_state); | |||
/* FIXME: Need to query memory info before binding to memory */ | |||
vkQueueBindObjectMemory(queue, VK_OBJECT_TYPE_IMAGE, | |||
rt, | |||
0, /* allocation index; for objects which need to bind to multiple mems */ | |||
mem, 2048); | |||
const uint32_t texture_width = 16, texture_height = 16; | |||
VkImage texture; | |||
vkCreateImage(device, | |||
&(VkImageCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, | |||
.imageType = VK_IMAGE_TYPE_2D, | |||
.format = VK_FORMAT_R8G8B8A8_UNORM, | |||
.extent = { .width = texture_width, .height = texture_height, .depth = 1 }, | |||
.mipLevels = 1, | |||
.arraySize = 1, | |||
.samples = 1, | |||
.tiling = VK_IMAGE_TILING_LINEAR, | |||
.usage = VK_IMAGE_USAGE_SAMPLED_BIT, | |||
.flags = 0, | |||
}, | |||
&texture); | |||
VkImageView image_view; | |||
vkCreateImageView(device, | |||
&(VkImageViewCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, | |||
.image = texture, | |||
.viewType = VK_IMAGE_VIEW_TYPE_2D, | |||
.format = VK_FORMAT_R8G8B8A8_UNORM, | |||
.channels = { | |||
VK_CHANNEL_SWIZZLE_R, | |||
VK_CHANNEL_SWIZZLE_G, | |||
VK_CHANNEL_SWIZZLE_B, | |||
VK_CHANNEL_SWIZZLE_A | |||
}, | |||
.subresourceRange = { | |||
.aspect = VK_IMAGE_ASPECT_COLOR, | |||
.baseMipLevel = 0, | |||
.mipLevels = 1, | |||
.baseArraySlice = 0, | |||
.arraySize = 1 | |||
}, | |||
.minLod = 0 | |||
}, | |||
&image_view); | |||
vkQueueBindObjectMemory(queue, VK_OBJECT_TYPE_IMAGE, | |||
texture, | |||
0, /* allocation index; for objects which need to bind to multiple mems */ | |||
mem, 2048 + 256 * 256 * 4); | |||
vkUpdateDescriptors(device, set[0], 2, | |||
(const void * []) { | |||
&(VkUpdateBuffers) { | |||
.sType = VK_STRUCTURE_TYPE_UPDATE_BUFFERS, | |||
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, | |||
.arrayIndex = 0, | |||
.binding = 0, | |||
.count = 2, | |||
.pBufferViews = (VkBufferViewAttachInfo[]) { | |||
{ | |||
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_ATTACH_INFO, | |||
.view = buffer_view[0] | |||
}, | |||
{ | |||
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_ATTACH_INFO, | |||
.view = buffer_view[1] | |||
} | |||
} | |||
}, | |||
&(VkUpdateImages) { | |||
.sType = VK_STRUCTURE_TYPE_UPDATE_IMAGES, | |||
.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, | |||
.binding = 2, | |||
.count = 1, | |||
.pImageViews = (VkImageViewAttachInfo[]) { | |||
{ | |||
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO, | |||
.view = image_view, | |||
.layout = VK_IMAGE_LAYOUT_GENERAL, | |||
} | |||
} | |||
} | |||
}); | |||
vkUpdateDescriptors(device, set[1], 1, | |||
(const void * []) { | |||
&(VkUpdateBuffers) { | |||
.sType = VK_STRUCTURE_TYPE_UPDATE_BUFFERS, | |||
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, | |||
.arrayIndex = 0, | |||
.count = 1, | |||
.pBufferViews = (VkBufferViewAttachInfo[]) { | |||
{ | |||
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_ATTACH_INFO, | |||
.view = buffer_view[2] | |||
} | |||
} | |||
} | |||
}); | |||
VkColorAttachmentView view; | |||
vkCreateColorAttachmentView(device, | |||
&(VkColorAttachmentViewCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO, | |||
.image = rt, | |||
.format = VK_FORMAT_R8G8B8A8_UNORM, | |||
.mipLevel = 0, | |||
.baseArraySlice = 0, | |||
.arraySize = 1, | |||
.msaaResolveImage = 0, | |||
.msaaResolveSubResource = { 0, } | |||
}, | |||
&view); | |||
VkFramebuffer framebuffer; | |||
vkCreateFramebuffer(device, | |||
&(VkFramebufferCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, | |||
.colorAttachmentCount = 1, | |||
.pColorAttachments = (VkColorAttachmentBindInfo[]) { | |||
{ | |||
.view = view, | |||
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL | |||
} | |||
}, | |||
.pDepthStencilAttachment = NULL, | |||
.sampleCount = 1, | |||
.width = width, | |||
.height = height, | |||
.layers = 1 | |||
}, | |||
&framebuffer); | |||
VkRenderPass pass; | |||
vkCreateRenderPass(device, | |||
&(VkRenderPassCreateInfo) { | |||
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, | |||
.renderArea = { { 0, 0 }, { width, height } }, | |||
.colorAttachmentCount = 1, | |||
.extent = { }, | |||
.sampleCount = 1, | |||
.layers = 1, | |||
.pColorFormats = (VkFormat[]) { VK_FORMAT_R8G8B8A8_UNORM }, | |||
.pColorLayouts = (VkImageLayout[]) { VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }, | |||
.pColorLoadOps = (VkAttachmentLoadOp[]) { VK_ATTACHMENT_LOAD_OP_CLEAR }, | |||
.pColorStoreOps = (VkAttachmentStoreOp[]) { VK_ATTACHMENT_STORE_OP_STORE }, | |||
.pColorLoadClearValues = (VkClearColor[]) { | |||
{ .color = { .floatColor = { 1.0, 0.0, 0.0, 1.0 } }, .useRawValue = false } | |||
}, | |||
.depthStencilFormat = VK_FORMAT_UNDEFINED, | |||
}, | |||
&pass); | |||
vkBeginCommandBuffer(cmdBuffer, | |||
&(VkCmdBufferBeginInfo) { | |||
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO, | |||
.flags = 0 | |||
}); | |||
vkCmdBeginRenderPass(cmdBuffer, | |||
&(VkRenderPassBegin) { | |||
.renderPass = pass, | |||
.framebuffer = framebuffer | |||
}); | |||
vkCmdBindVertexBuffers(cmdBuffer, 0, 2, | |||
(VkBuffer[]) { vertex_buffer, vertex_buffer }, | |||
(VkDeviceSize[]) { 0, 3 * 4 * sizeof(float) }); | |||
vkCmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); | |||
vkCmdBindDescriptorSets(cmdBuffer, | |||
VK_PIPELINE_BIND_POINT_GRAPHICS, 0, 1, | |||
&set[0], 0, NULL); | |||
vkCmdBindDescriptorSets(cmdBuffer, | |||
VK_PIPELINE_BIND_POINT_GRAPHICS, 1, 1, | |||
&set[1], 0, NULL); | |||
vkCmdBindDynamicStateObject(cmdBuffer, | |||
VK_STATE_BIND_POINT_VIEWPORT, vp_state); | |||
vkCmdBindDynamicStateObject(cmdBuffer, | |||
VK_STATE_BIND_POINT_RASTER, rs_state); | |||
vkCmdWriteTimestamp(cmdBuffer, VK_TIMESTAMP_TYPE_TOP, buffer, 0); | |||
vkCmdWriteTimestamp(cmdBuffer, VK_TIMESTAMP_TYPE_BOTTOM, buffer, 8); | |||
vkCmdDraw(cmdBuffer, 0, 3, 0, 1); | |||
vkCmdEndRenderPass(cmdBuffer, pass); | |||
vkEndCommandBuffer(cmdBuffer); | |||
vkQueueSubmit(queue, 1, &cmdBuffer, 0); | |||
vkQueueWaitIdle(queue); | |||
write_png("vk.png", width, height, 1024, map + 2048); | |||
vkDestroyObject(device, VK_OBJECT_TYPE_IMAGE, texture); | |||
vkDestroyObject(device, VK_OBJECT_TYPE_IMAGE, rt); | |||
vkDestroyObject(device, VK_OBJECT_TYPE_BUFFER, buffer); | |||
vkDestroyObject(device, VK_OBJECT_TYPE_COMMAND_BUFFER, cmdBuffer); | |||
vkDestroyObject(device, VK_OBJECT_TYPE_PIPELINE, pipeline); | |||
vkDestroyDevice(device); | |||
vkDestroyInstance(instance); | |||
return 0; | |||
} |