Acked-by: Kristian Høgsberg <krh@bitplanet.net>tags/12.0-branchpoint
| @@ -455,7 +455,7 @@ emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer, | |||
| const uint32_t gen8_length = | |||
| GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias; | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) { | |||
| anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) { | |||
| bbs.DWordLength = cmd_buffer->device->info.gen < 8 ? | |||
| gen7_length : gen8_length; | |||
| bbs._2ndLevelBatchBuffer = _1stlevelbatch; | |||
| @@ -712,11 +712,11 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer) | |||
| cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4; | |||
| assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END, bbe); | |||
| anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END, bbe); | |||
| /* Round batch up to an even number of dwords. */ | |||
| if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4) | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GEN7_MI_NOOP, noop); | |||
| anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP, noop); | |||
| cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY; | |||
| } | |||
| @@ -1081,8 +1081,8 @@ VkResult anv_DeviceWaitIdle( | |||
| batch.start = batch.next = cmds; | |||
| batch.end = (void *) cmds + sizeof(cmds); | |||
| anv_batch_emit_blk(&batch, GEN7_MI_BATCH_BUFFER_END, bbe); | |||
| anv_batch_emit_blk(&batch, GEN7_MI_NOOP, noop); | |||
| anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe); | |||
| anv_batch_emit(&batch, GEN7_MI_NOOP, noop); | |||
| return anv_device_submit_simple_batch(device, &batch); | |||
| } | |||
| @@ -1423,8 +1423,8 @@ VkResult anv_CreateFence( | |||
| const uint32_t batch_offset = align_u32(sizeof(*fence), CACHELINE_SIZE); | |||
| batch.next = batch.start = fence->bo.map + batch_offset; | |||
| batch.end = fence->bo.map + fence->bo.size; | |||
| anv_batch_emit_blk(&batch, GEN7_MI_BATCH_BUFFER_END, bbe); | |||
| anv_batch_emit_blk(&batch, GEN7_MI_NOOP, noop); | |||
| anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe); | |||
| anv_batch_emit(&batch, GEN7_MI_NOOP, noop); | |||
| if (!device->info.has_llc) { | |||
| assert(((uintptr_t) batch.start & CACHELINE_MASK) == 0); | |||
| @@ -848,7 +848,7 @@ __gen_combine_address(struct anv_batch *batch, void *location, | |||
| VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\ | |||
| } while (0) | |||
| #define anv_batch_emit_blk(batch, cmd, name) \ | |||
| #define anv_batch_emit(batch, cmd, name) \ | |||
| for (struct cmd name = { __anv_cmd_header(cmd) }, \ | |||
| *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \ | |||
| __builtin_expect(_dst != NULL, 1); \ | |||
| @@ -57,8 +57,8 @@ gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer, | |||
| anv_foreach_stage(s, stages) { | |||
| if (cmd_buffer->state.samplers[s].alloc_size > 0) { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) { | |||
| anv_batch_emit(&cmd_buffer->batch, | |||
| GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) { | |||
| ssp._3DCommandSubOpcode = sampler_state_opcodes[s]; | |||
| ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset; | |||
| } | |||
| @@ -66,8 +66,8 @@ gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer, | |||
| /* Always emit binding table pointers if we're asked to, since on SKL | |||
| * this is what flushes push constants. */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) { | |||
| anv_batch_emit(&cmd_buffer->batch, | |||
| GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) { | |||
| btp._3DCommandSubOpcode = binding_table_opcodes[s]; | |||
| btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset; | |||
| } | |||
| @@ -175,8 +175,8 @@ gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer) | |||
| } | |||
| } | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GEN7_3DSTATE_SCISSOR_STATE_POINTERS, ssp) { | |||
| anv_batch_emit(&cmd_buffer->batch, | |||
| GEN7_3DSTATE_SCISSOR_STATE_POINTERS, ssp) { | |||
| ssp.ScissorRectPointer = scissor_state.offset; | |||
| } | |||
| @@ -241,7 +241,7 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) | |||
| unsigned push_constant_regs = reg_aligned_constant_size / 32; | |||
| if (push_state.alloc_size) { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) { | |||
| curbe.CURBETotalDataLength = push_state.alloc_size; | |||
| curbe.CURBEDataStartAddress = push_state.offset; | |||
| } | |||
| @@ -274,8 +274,8 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) | |||
| pipeline->cs_thread_width_max); | |||
| const uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), idl) { | |||
| anv_batch_emit(&cmd_buffer->batch, | |||
| GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), idl) { | |||
| idl.InterfaceDescriptorTotalLength = size; | |||
| idl.InterfaceDescriptorDataStartAddress = state.offset; | |||
| } | |||
| @@ -312,7 +312,7 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) | |||
| * flushed, which involves a first PIPE_CONTROL flush which stalls the | |||
| * pipeline... | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DCFlushEnable = true; | |||
| pc.CommandStreamerStallEnable = true; | |||
| pc.PostSyncOperation = NoWrite; | |||
| @@ -332,7 +332,7 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) | |||
| * previous and subsequent PIPE_CONTROLs already guarantee that there is | |||
| * no concurrent GPGPU kernel execution (see SKL HSD 2132585). | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.TextureCacheInvalidationEnable = true; | |||
| pc.ConstantCacheInvalidationEnable = true; | |||
| pc.InstructionCacheInvalidateEnable = true; | |||
| @@ -343,14 +343,14 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) | |||
| /* Now send a third stalling flush to make sure that invalidation is | |||
| * complete when the L3 configuration registers are modified. | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DCFlushEnable = true; | |||
| pc.CommandStreamerStallEnable = true; | |||
| pc.PostSyncOperation = NoWrite; | |||
| } | |||
| anv_finishme("write GEN7_L3SQCREG1"); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { | |||
| lri.RegisterOffset = GENX(L3CNTLREG2_num); | |||
| lri.DataDWord = l3cr2_val; | |||
| } | |||
| @@ -365,7 +365,7 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) | |||
| .CAllocation = 8, | |||
| .TAllocation = 8); | |||
| const uint32_t l3cr3_val = enable_slm ? l3cr3_slm : l3cr3_noslm; | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { | |||
| lri.RegisterOffset = GENX(L3CNTLREG3_num); | |||
| lri.DataDWord = l3cr3_val; | |||
| } | |||
| @@ -455,8 +455,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) | |||
| if (!cmd_buffer->device->info.has_llc) | |||
| anv_state_clflush(cc_state); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(3DSTATE_CC_STATE_POINTERS), ccp) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), ccp) { | |||
| ccp.ColorCalcStatePointer = cc_state.offset; | |||
| } | |||
| } | |||
| @@ -482,8 +481,8 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) | |||
| pipeline->gen7.depth_stencil_state, | |||
| GENX(DEPTH_STENCIL_STATE_length), 64); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), dsp) { | |||
| anv_batch_emit(&cmd_buffer->batch, | |||
| GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), dsp) { | |||
| dsp.PointertoDEPTH_STENCIL_STATE = ds_state.offset; | |||
| } | |||
| } | |||
| @@ -495,13 +494,13 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) | |||
| uint32_t offset = cmd_buffer->state.gen7.index_offset; | |||
| #if GEN_IS_HASWELL | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GEN75_3DSTATE_VF, vf) { | |||
| anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF, vf) { | |||
| vf.IndexedDrawCutIndexEnable = pipeline->primitive_restart; | |||
| vf.CutIndex = cmd_buffer->state.restart_index; | |||
| } | |||
| #endif | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) { | |||
| #if !GEN_IS_HASWELL | |||
| ib.CutIndexEnable = pipeline->primitive_restart; | |||
| #endif | |||
| @@ -175,8 +175,7 @@ gen7_emit_cb_state(struct anv_pipeline *pipeline, | |||
| anv_state_clflush(pipeline->blend_state); | |||
| } | |||
| anv_batch_emit_blk(&pipeline->batch, | |||
| GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { | |||
| bsp.BlendStatePointer = pipeline->blend_state.offset; | |||
| } | |||
| } | |||
| @@ -224,7 +223,7 @@ genX(graphics_pipeline_create)( | |||
| const VkPipelineRasterizationStateCreateInfo *rs_info = | |||
| pCreateInfo->pRasterizationState; | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_CLIP), clip) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) { | |||
| clip.FrontWinding = vk_to_gen_front_face[rs_info->frontFace], | |||
| clip.CullMode = vk_to_gen_cullmode[rs_info->cullMode], | |||
| clip.ClipEnable = !(extra && extra->use_rectlist), | |||
| @@ -248,12 +247,12 @@ genX(graphics_pipeline_create)( | |||
| uint32_t samples = 1; | |||
| uint32_t log2_samples = __builtin_ffs(samples) - 1; | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) { | |||
| ms.PixelLocation = PIXLOC_CENTER; | |||
| ms.NumberofMultisamples = log2_samples; | |||
| } | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) { | |||
| sm.SampleMask = 0xff; | |||
| } | |||
| @@ -279,9 +278,9 @@ genX(graphics_pipeline_create)( | |||
| #endif | |||
| if (pipeline->vs_vec4 == NO_KERNEL || (extra && extra->disable_vs)) | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_VS), vs); | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs); | |||
| else | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_VS), vs) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) { | |||
| vs.KernelStartPointer = pipeline->vs_vec4; | |||
| vs.ScratchSpaceBaseOffset = pipeline->scratch_start[MESA_SHADER_VERTEX]; | |||
| vs.PerThreadScratchSpace = scratch_space(&vs_prog_data->base.base); | |||
| @@ -299,9 +298,9 @@ genX(graphics_pipeline_create)( | |||
| const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline); | |||
| if (pipeline->gs_kernel == NO_KERNEL || (extra && extra->disable_vs)) { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_GS), gs); | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs); | |||
| } else { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_GS), gs) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) { | |||
| gs.KernelStartPointer = pipeline->gs_kernel; | |||
| gs.ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_GEOMETRY]; | |||
| gs.PerThreadScratchSpace = scratch_space(&gs_prog_data->base.base); | |||
| @@ -332,9 +331,9 @@ genX(graphics_pipeline_create)( | |||
| } | |||
| if (pipeline->ps_ksp0 == NO_KERNEL) { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_SBE), sbe); | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe); | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_WM), wm) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) { | |||
| wm.StatisticsEnable = true; | |||
| wm.ThreadDispatchEnable = false; | |||
| wm.LineEndCapAntialiasingRegionWidth = 0; /* 0.5 pixels */ | |||
| @@ -346,7 +345,7 @@ genX(graphics_pipeline_create)( | |||
| /* Even if no fragments are ever dispatched, the hardware hangs if we | |||
| * don't at least set the maximum number of threads. | |||
| */ | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_PS), ps) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { | |||
| ps.MaximumNumberofThreads = device->info.max_wm_threads - 1; | |||
| } | |||
| } else { | |||
| @@ -359,7 +358,7 @@ genX(graphics_pipeline_create)( | |||
| emit_3dstate_sbe(pipeline); | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_PS), ps) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { | |||
| ps.KernelStartPointer0 = pipeline->ps_ksp0; | |||
| ps.ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_FRAGMENT]; | |||
| ps.PerThreadScratchSpace = scratch_space(&wm_prog_data->base); | |||
| @@ -392,7 +391,7 @@ genX(graphics_pipeline_create)( | |||
| } | |||
| /* FIXME-GEN7: This needs a lot more work, cf gen7 upload_wm_state(). */ | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_WM), wm) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) { | |||
| wm.StatisticsEnable = true; | |||
| wm.ThreadDispatchEnable = true; | |||
| wm.LineEndCapAntialiasingRegionWidth = 0; /* 0.5 pixels */ | |||
| @@ -80,12 +80,12 @@ gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer) | |||
| anv_state_clflush(cc_state); | |||
| } | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), cc) { | |||
| anv_batch_emit(&cmd_buffer->batch, | |||
| GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), cc) { | |||
| cc.CCViewportPointer = cc_state.offset; | |||
| } | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), clip) { | |||
| anv_batch_emit(&cmd_buffer->batch, | |||
| GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), clip) { | |||
| clip.SFClipViewportPointer = sf_clip_state.offset; | |||
| } | |||
| } | |||
| @@ -117,7 +117,7 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) | |||
| * flushed, which involves a first PIPE_CONTROL flush which stalls the | |||
| * pipeline... | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DCFlushEnable = true; | |||
| pc.PostSyncOperation = NoWrite; | |||
| pc.CommandStreamerStallEnable = true; | |||
| @@ -137,7 +137,7 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) | |||
| * previous and subsequent PIPE_CONTROLs already guarantee that there is | |||
| * no concurrent GPGPU kernel execution (see SKL HSD 2132585). | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.TextureCacheInvalidationEnable = true, | |||
| pc.ConstantCacheInvalidationEnable = true, | |||
| pc.InstructionCacheInvalidateEnable = true, | |||
| @@ -148,13 +148,13 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) | |||
| /* Now send a third stalling flush to make sure that invalidation is | |||
| * complete when the L3 configuration registers are modified. | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DCFlushEnable = true; | |||
| pc.PostSyncOperation = NoWrite; | |||
| pc.CommandStreamerStallEnable = true; | |||
| } | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { | |||
| lri.RegisterOffset = GENX(L3CNTLREG_num); | |||
| lri.DataDWord = l3cr_val; | |||
| } | |||
| @@ -250,8 +250,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) | |||
| if (!cmd_buffer->device->info.has_llc) | |||
| anv_state_clflush(cc_state); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(3DSTATE_CC_STATE_POINTERS), ccp) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), ccp) { | |||
| ccp.ColorCalcStatePointer = cc_state.offset; | |||
| ccp.ColorCalcStatePointerValid = true; | |||
| } | |||
| @@ -295,8 +294,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) | |||
| if (!cmd_buffer->device->info.has_llc) | |||
| anv_state_clflush(cc_state); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GEN9_3DSTATE_CC_STATE_POINTERS, ccp) { | |||
| anv_batch_emit(&cmd_buffer->batch, GEN9_3DSTATE_CC_STATE_POINTERS, ccp) { | |||
| ccp.ColorCalcStatePointer = cc_state.offset; | |||
| ccp.ColorCalcStatePointerValid = true; | |||
| } | |||
| @@ -329,7 +327,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) | |||
| if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE | | |||
| ANV_CMD_DIRTY_INDEX_BUFFER)) { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_VF), vf) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF), vf) { | |||
| vf.IndexedDrawCutIndexEnable = pipeline->primitive_restart; | |||
| vf.CutIndex = cmd_buffer->state.restart_index; | |||
| } | |||
| @@ -359,7 +357,7 @@ void genX(CmdBindIndexBuffer)( | |||
| cmd_buffer->state.restart_index = restart_index_for_type[indexType]; | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) { | |||
| ib.IndexFormat = vk_to_gen_index_type[indexType]; | |||
| ib.MemoryObjectControlState = GENX(MOCS); | |||
| ib.BufferStartingAddress = | |||
| @@ -399,7 +397,7 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) | |||
| unsigned push_constant_regs = reg_aligned_constant_size / 32; | |||
| if (push_state.alloc_size) { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) { | |||
| curbe.CURBETotalDataLength = push_state.alloc_size; | |||
| curbe.CURBEDataStartAddress = push_state.offset; | |||
| } | |||
| @@ -432,8 +430,8 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) | |||
| pipeline->cs_thread_width_max); | |||
| uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, | |||
| GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) { | |||
| anv_batch_emit(&cmd_buffer->batch, | |||
| GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) { | |||
| mid.InterfaceDescriptorTotalLength = size; | |||
| mid.InterfaceDescriptorDataStartAddress = state.offset; | |||
| } | |||
| @@ -476,7 +474,7 @@ void genX(CmdSetEvent)( | |||
| ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); | |||
| ANV_FROM_HANDLE(anv_event, event, _event); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DestinationAddressType = DAT_PPGTT, | |||
| pc.PostSyncOperation = WriteImmediateData, | |||
| pc.Address = (struct anv_address) { | |||
| @@ -495,7 +493,7 @@ void genX(CmdResetEvent)( | |||
| ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); | |||
| ANV_FROM_HANDLE(anv_event, event, _event); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DestinationAddressType = DAT_PPGTT; | |||
| pc.PostSyncOperation = WriteImmediateData; | |||
| pc.Address = (struct anv_address) { | |||
| @@ -523,7 +521,7 @@ void genX(CmdWaitEvents)( | |||
| for (uint32_t i = 0; i < eventCount; i++) { | |||
| ANV_FROM_HANDLE(anv_event, event, pEvents[i]); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) { | |||
| sem.WaitMode = PollingMode, | |||
| sem.CompareOperation = COMPARE_SAD_EQUAL_SDD, | |||
| sem.SemaphoreDataDword = VK_EVENT_SET, | |||
| @@ -39,7 +39,7 @@ emit_ia_state(struct anv_pipeline *pipeline, | |||
| const VkPipelineInputAssemblyStateCreateInfo *info, | |||
| const struct anv_graphics_pipeline_create_info *extra) | |||
| { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) { | |||
| vft.PrimitiveTopologyType = pipeline->topology; | |||
| } | |||
| } | |||
| @@ -192,7 +192,7 @@ emit_cb_state(struct anv_pipeline *pipeline, | |||
| struct GENX(BLEND_STATE_ENTRY) *bs0 = &blend_state.Entry[0]; | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) { | |||
| blend.AlphaToCoverageEnable = blend_state.AlphaToCoverageEnable; | |||
| blend.HasWriteableRT = has_writeable_rt; | |||
| blend.ColorBufferBlendEnable = bs0->ColorBufferBlendEnable; | |||
| @@ -209,8 +209,7 @@ emit_cb_state(struct anv_pipeline *pipeline, | |||
| if (!device->info.has_llc) | |||
| anv_state_clflush(pipeline->blend_state); | |||
| anv_batch_emit_blk(&pipeline->batch, | |||
| GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { | |||
| bsp.BlendStatePointer = pipeline->blend_state.offset; | |||
| bsp.BlendStatePointerValid = true; | |||
| } | |||
| @@ -291,7 +290,7 @@ emit_ms_state(struct anv_pipeline *pipeline, | |||
| if (info && info->sampleShadingEnable) | |||
| anv_finishme("VkPipelineMultisampleStateCreateInfo::sampleShadingEnable"); | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) { | |||
| /* The PRM says that this bit is valid only for DX9: | |||
| * | |||
| * SW can choose to set this bit only for DX9 API. DX10/OGL API's | |||
| @@ -303,7 +302,7 @@ emit_ms_state(struct anv_pipeline *pipeline, | |||
| ms.NumberofMultisamples = log2_samples; | |||
| } | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) { | |||
| sm.SampleMask = sample_mask; | |||
| } | |||
| } | |||
| @@ -351,7 +350,7 @@ genX(graphics_pipeline_create)( | |||
| emit_urb_setup(pipeline); | |||
| const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_CLIP), clip) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) { | |||
| clip.ClipEnable = !(extra && extra->use_rectlist); | |||
| clip.EarlyCullEnable = true; | |||
| clip.APIMode = 1; /* D3D */ | |||
| @@ -373,7 +372,7 @@ genX(graphics_pipeline_create)( | |||
| clip.MaximumVPIndex = pCreateInfo->pViewportState->viewportCount - 1; | |||
| } | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_WM), wm) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) { | |||
| wm.StatisticsEnable = true; | |||
| wm.LineEndCapAntialiasingRegionWidth = _05pixels; | |||
| wm.LineAntialiasingRegionWidth = _10pixels; | |||
| @@ -386,13 +385,13 @@ genX(graphics_pipeline_create)( | |||
| } | |||
| if (pipeline->gs_kernel == NO_KERNEL) { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_GS), gs); | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs); | |||
| } else { | |||
| const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline); | |||
| offset = 1; | |||
| length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset; | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_GS), gs) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) { | |||
| gs.SingleProgramFlow = false; | |||
| gs.KernelStartPointer = pipeline->gs_kernel; | |||
| gs.VectorMaskEnable = false; | |||
| @@ -444,7 +443,7 @@ genX(graphics_pipeline_create)( | |||
| pipeline->vs_vec4; | |||
| if (vs_start == NO_KERNEL || (extra && extra->disable_vs)) { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_VS), vs) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) { | |||
| vs.FunctionEnable = false; | |||
| /* Even if VS is disabled, SBE still gets the amount of | |||
| * vertex data to read from this field. */ | |||
| @@ -452,7 +451,7 @@ genX(graphics_pipeline_create)( | |||
| vs.VertexURBEntryOutputLength = length; | |||
| } | |||
| } else { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_VS), vs) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) { | |||
| vs.KernelStartPointer = vs_start; | |||
| vs.SingleVertexDispatch = false; | |||
| vs.VectorMaskEnable = false; | |||
| @@ -493,14 +492,14 @@ genX(graphics_pipeline_create)( | |||
| const int num_thread_bias = GEN_GEN == 8 ? 2 : 1; | |||
| if (pipeline->ps_ksp0 == NO_KERNEL) { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_PS), ps); | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), extra) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps); | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), extra) { | |||
| extra.PixelShaderValid = false; | |||
| } | |||
| } else { | |||
| emit_3dstate_sbe(pipeline); | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_PS), ps) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { | |||
| ps.KernelStartPointer0 = pipeline->ps_ksp0; | |||
| ps.KernelStartPointer1 = 0; | |||
| ps.KernelStartPointer2 = pipeline->ps_ksp2; | |||
| @@ -527,7 +526,7 @@ genX(graphics_pipeline_create)( | |||
| bool per_sample_ps = pCreateInfo->pMultisampleState && | |||
| pCreateInfo->pMultisampleState->sampleShadingEnable; | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) { | |||
| ps.PixelShaderValid = true; | |||
| ps.PixelShaderKillsPixel = wm_prog_data->uses_kill; | |||
| ps.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode; | |||
| @@ -49,12 +49,12 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer) | |||
| * this, we get GPU hangs when using multi-level command buffers which | |||
| * clear depth, reset state base address, and then go render stuff. | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.RenderTargetCacheFlushEnable = true; | |||
| } | |||
| #endif | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) { | |||
| sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 }; | |||
| sba.GeneralStateMemoryObjectControlState = GENX(MOCS); | |||
| sba.GeneralStateBaseAddressModifyEnable = true; | |||
| @@ -131,7 +131,7 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer) | |||
| * units cache the binding table in the texture cache. However, we have | |||
| * yet to be able to actually confirm this. | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.TextureCacheInvalidationEnable = true; | |||
| } | |||
| } | |||
| @@ -295,10 +295,10 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) | |||
| struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage); | |||
| if (state.offset == 0) { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) | |||
| c._3DCommandSubOpcode = push_constant_opcodes[stage]; | |||
| } else { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) { | |||
| c._3DCommandSubOpcode = push_constant_opcodes[stage], | |||
| c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) { | |||
| #if GEN_GEN >= 9 | |||
| @@ -420,7 +420,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) | |||
| * PIPE_CONTROL needs to be sent before any combination of VS | |||
| * associated 3DSTATE." | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DepthStallEnable = true; | |||
| pc.PostSyncOperation = WriteImmediateData; | |||
| pc.Address = | |||
| @@ -521,7 +521,7 @@ void genX(CmdDraw)( | |||
| if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) | |||
| emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { | |||
| prim.VertexAccessType = SEQUENTIAL; | |||
| prim.PrimitiveTopologyType = pipeline->topology; | |||
| prim.VertexCountPerInstance = vertexCount; | |||
| @@ -549,7 +549,7 @@ void genX(CmdDrawIndexed)( | |||
| if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) | |||
| emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { | |||
| prim.VertexAccessType = RANDOM; | |||
| prim.PrimitiveTopologyType = pipeline->topology; | |||
| prim.VertexCountPerInstance = indexCount; | |||
| @@ -572,7 +572,7 @@ static void | |||
| emit_lrm(struct anv_batch *batch, | |||
| uint32_t reg, struct anv_bo *bo, uint32_t offset) | |||
| { | |||
| anv_batch_emit_blk(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { | |||
| anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { | |||
| lrm.RegisterAddress = reg; | |||
| lrm.MemoryAddress = (struct anv_address) { bo, offset }; | |||
| } | |||
| @@ -581,7 +581,7 @@ emit_lrm(struct anv_batch *batch, | |||
| static void | |||
| emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm) | |||
| { | |||
| anv_batch_emit_blk(batch, GENX(MI_LOAD_REGISTER_IMM), lri) { | |||
| anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) { | |||
| lri.RegisterOffset = reg; | |||
| lri.DataDWord = imm; | |||
| } | |||
| @@ -612,7 +612,7 @@ void genX(CmdDrawIndirect)( | |||
| emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12); | |||
| emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { | |||
| prim.IndirectParameterEnable = true; | |||
| prim.VertexAccessType = SEQUENTIAL; | |||
| prim.PrimitiveTopologyType = pipeline->topology; | |||
| @@ -645,7 +645,7 @@ void genX(CmdDrawIndexedIndirect)( | |||
| emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12); | |||
| emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { | |||
| prim.IndirectParameterEnable = true; | |||
| prim.VertexAccessType = RANDOM; | |||
| prim.PrimitiveTopologyType = pipeline->topology; | |||
| @@ -697,7 +697,7 @@ void genX(CmdDispatch)( | |||
| genX(cmd_buffer_flush_compute_state)(cmd_buffer); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) { | |||
| ggw.SIMDSize = prog_data->simd_size / 16; | |||
| ggw.ThreadDepthCounterMaximum = 0; | |||
| ggw.ThreadHeightCounterMaximum = 0; | |||
| @@ -709,7 +709,7 @@ void genX(CmdDispatch)( | |||
| ggw.BottomExecutionMask = 0xffffffff; | |||
| } | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf); | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf); | |||
| } | |||
| #define GPGPU_DISPATCHDIMX 0x2500 | |||
| @@ -761,7 +761,7 @@ void genX(CmdDispatchIndirect)( | |||
| emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0); | |||
| /* predicate = (compute_dispatch_indirect_x_size == 0); */ | |||
| anv_batch_emit_blk(batch, GENX(MI_PREDICATE), mip) { | |||
| anv_batch_emit(batch, GENX(MI_PREDICATE), mip) { | |||
| mip.LoadOperation = LOAD_LOAD; | |||
| mip.CombineOperation = COMBINE_SET; | |||
| mip.CompareOperation = COMPARE_SRCS_EQUAL; | |||
| @@ -771,7 +771,7 @@ void genX(CmdDispatchIndirect)( | |||
| emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4); | |||
| /* predicate |= (compute_dispatch_indirect_y_size == 0); */ | |||
| anv_batch_emit_blk(batch, GENX(MI_PREDICATE), mip) { | |||
| anv_batch_emit(batch, GENX(MI_PREDICATE), mip) { | |||
| mip.LoadOperation = LOAD_LOAD; | |||
| mip.CombineOperation = COMBINE_OR; | |||
| mip.CompareOperation = COMPARE_SRCS_EQUAL; | |||
| @@ -781,7 +781,7 @@ void genX(CmdDispatchIndirect)( | |||
| emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8); | |||
| /* predicate |= (compute_dispatch_indirect_z_size == 0); */ | |||
| anv_batch_emit_blk(batch, GENX(MI_PREDICATE), mip) { | |||
| anv_batch_emit(batch, GENX(MI_PREDICATE), mip) { | |||
| mip.LoadOperation = LOAD_LOAD; | |||
| mip.CombineOperation = COMBINE_OR; | |||
| mip.CompareOperation = COMPARE_SRCS_EQUAL; | |||
| @@ -789,14 +789,14 @@ void genX(CmdDispatchIndirect)( | |||
| /* predicate = !predicate; */ | |||
| #define COMPARE_FALSE 1 | |||
| anv_batch_emit_blk(batch, GENX(MI_PREDICATE), mip) { | |||
| anv_batch_emit(batch, GENX(MI_PREDICATE), mip) { | |||
| mip.LoadOperation = LOAD_LOADINV; | |||
| mip.CombineOperation = COMBINE_OR; | |||
| mip.CompareOperation = COMPARE_FALSE; | |||
| } | |||
| #endif | |||
| anv_batch_emit_blk(batch, GENX(GPGPU_WALKER), ggw) { | |||
| anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) { | |||
| ggw.IndirectParameterEnable = true; | |||
| ggw.PredicateEnable = GEN_GEN <= 7; | |||
| ggw.SIMDSize = prog_data->simd_size / 16; | |||
| @@ -807,7 +807,7 @@ void genX(CmdDispatchIndirect)( | |||
| ggw.BottomExecutionMask = 0xffffffff; | |||
| } | |||
| anv_batch_emit_blk(batch, GENX(MEDIA_STATE_FLUSH), msf); | |||
| anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf); | |||
| } | |||
| static void | |||
| @@ -825,7 +825,7 @@ flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer, | |||
| * hardware too. | |||
| */ | |||
| if (pipeline == GPGPU) | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t); | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t); | |||
| #elif GEN_GEN <= 7 | |||
| /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction] | |||
| * PIPELINE_SELECT [DevBWR+]": | |||
| @@ -837,7 +837,7 @@ flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer, | |||
| * command to invalidate read only caches prior to programming | |||
| * MI_PIPELINE_SELECT command to change the Pipeline Select Mode. | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.RenderTargetCacheFlushEnable = true; | |||
| pc.DepthCacheFlushEnable = true; | |||
| pc.DCFlushEnable = true; | |||
| @@ -845,7 +845,7 @@ flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer, | |||
| pc.CommandStreamerStallEnable = true; | |||
| } | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.TextureCacheInvalidationEnable = true; | |||
| pc.ConstantCacheInvalidationEnable = true; | |||
| pc.StateCacheInvalidationEnable = true; | |||
| @@ -861,7 +861,7 @@ genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer) | |||
| if (cmd_buffer->state.current_pipeline != _3D) { | |||
| flush_pipeline_before_pipeline_select(cmd_buffer, _3D); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) { | |||
| #if GEN_GEN >= 9 | |||
| ps.MaskBits = 3; | |||
| #endif | |||
| @@ -878,7 +878,7 @@ genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer) | |||
| if (cmd_buffer->state.current_pipeline != GPGPU) { | |||
| flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU); | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) { | |||
| #if GEN_GEN >= 9 | |||
| ps.MaskBits = 3; | |||
| #endif | |||
| @@ -937,7 +937,7 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) | |||
| /* Emit 3DSTATE_DEPTH_BUFFER */ | |||
| if (has_depth) { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) { | |||
| db.SurfaceType = SURFTYPE_2D; | |||
| db.DepthWriteEnable = true; | |||
| db.StencilWriteEnable = has_stencil; | |||
| @@ -984,7 +984,7 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) | |||
| * nor stencil buffer is present. Also, D16_UNORM is not allowed to | |||
| * be combined with a stencil buffer so we use D32_FLOAT instead. | |||
| */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) { | |||
| db.SurfaceType = SURFTYPE_2D; | |||
| db.SurfaceFormat = D32_FLOAT; | |||
| db.Width = fb->width - 1; | |||
| @@ -995,7 +995,7 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) | |||
| /* Emit 3DSTATE_STENCIL_BUFFER */ | |||
| if (has_stencil) { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) { | |||
| #if GEN_GEN >= 8 || GEN_IS_HASWELL | |||
| sb.StencilBufferEnable = true, | |||
| #endif | |||
| @@ -1017,14 +1017,14 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) | |||
| }; | |||
| } | |||
| } else { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb); | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb); | |||
| } | |||
| /* Disable hierarchial depth buffers. */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz); | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz); | |||
| /* Clear the clear params. */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp); | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp); | |||
| } | |||
| /** | |||
| @@ -1058,7 +1058,7 @@ void genX(CmdBeginRenderPass)( | |||
| const VkRect2D *render_area = &pRenderPassBegin->renderArea; | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), r) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), r) { | |||
| r.ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0); | |||
| r.ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0); | |||
| r.ClippedDrawingRectangleYMax = | |||
| @@ -1098,7 +1098,7 @@ static void | |||
| emit_ps_depth_count(struct anv_batch *batch, | |||
| struct anv_bo *bo, uint32_t offset) | |||
| { | |||
| anv_batch_emit_blk(batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DestinationAddressType = DAT_PPGTT; | |||
| pc.PostSyncOperation = WritePSDepthCount; | |||
| pc.DepthStallEnable = true; | |||
| @@ -1110,7 +1110,7 @@ static void | |||
| emit_query_availability(struct anv_batch *batch, | |||
| struct anv_bo *bo, uint32_t offset) | |||
| { | |||
| anv_batch_emit_blk(batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DestinationAddressType = DAT_PPGTT; | |||
| pc.PostSyncOperation = WriteImmediateData; | |||
| pc.Address = (struct anv_address) { bo, offset }; | |||
| @@ -1135,7 +1135,7 @@ void genX(CmdBeginQuery)( | |||
| */ | |||
| if (cmd_buffer->state.need_query_wa) { | |||
| cmd_buffer->state.need_query_wa = false; | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DepthCacheFlushEnable = true; | |||
| pc.DepthStallEnable = true; | |||
| } | |||
| @@ -1192,11 +1192,11 @@ void genX(CmdWriteTimestamp)( | |||
| switch (pipelineStage) { | |||
| case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) { | |||
| srm.RegisterAddress = TIMESTAMP; | |||
| srm.MemoryAddress = (struct anv_address) { &pool->bo, offset }; | |||
| } | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) { | |||
| srm.RegisterAddress = TIMESTAMP + 4; | |||
| srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 }; | |||
| } | |||
| @@ -1204,7 +1204,7 @@ void genX(CmdWriteTimestamp)( | |||
| default: | |||
| /* Everything else is bottom-of-pipe */ | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.DestinationAddressType = DAT_PPGTT, | |||
| pc.PostSyncOperation = WriteTimestamp, | |||
| pc.Address = (struct anv_address) { &pool->bo, offset }; | |||
| @@ -1253,11 +1253,11 @@ static void | |||
| emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg, | |||
| struct anv_bo *bo, uint32_t offset) | |||
| { | |||
| anv_batch_emit_blk(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { | |||
| anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { | |||
| lrm.RegisterAddress = reg, | |||
| lrm.MemoryAddress = (struct anv_address) { bo, offset }; | |||
| } | |||
| anv_batch_emit_blk(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { | |||
| anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { | |||
| lrm.RegisterAddress = reg + 4; | |||
| lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 }; | |||
| } | |||
| @@ -1267,13 +1267,13 @@ static void | |||
| store_query_result(struct anv_batch *batch, uint32_t reg, | |||
| struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags) | |||
| { | |||
| anv_batch_emit_blk(batch, GENX(MI_STORE_REGISTER_MEM), srm) { | |||
| anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) { | |||
| srm.RegisterAddress = reg; | |||
| srm.MemoryAddress = (struct anv_address) { bo, offset }; | |||
| } | |||
| if (flags & VK_QUERY_RESULT_64_BIT) { | |||
| anv_batch_emit_blk(batch, GENX(MI_STORE_REGISTER_MEM), srm) { | |||
| anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) { | |||
| srm.RegisterAddress = reg + 4; | |||
| srm.MemoryAddress = (struct anv_address) { bo, offset + 4 }; | |||
| } | |||
| @@ -1296,7 +1296,7 @@ void genX(CmdCopyQueryPoolResults)( | |||
| uint32_t slot_offset, dst_offset; | |||
| if (flags & VK_QUERY_RESULT_WAIT_BIT) { | |||
| anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { | |||
| pc.CommandStreamerStallEnable = true; | |||
| pc.StallAtPixelScoreboard = true; | |||
| } | |||
| @@ -105,7 +105,7 @@ genX(compute_pipeline_create)( | |||
| const uint32_t vfe_curbe_allocation = | |||
| push_constant_regs * pipeline->cs_thread_width_max; | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) { | |||
| anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) { | |||
| vfe.ScratchSpaceBasePointer = pipeline->scratch_start[MESA_SHADER_COMPUTE]; | |||
| vfe.PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048); | |||
| #if GEN_GEN > 7 | |||
| @@ -130,7 +130,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, | |||
| * that controls instancing. On Haswell and prior, that's part of | |||
| * VERTEX_BUFFER_STATE which we emit later. | |||
| */ | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) { | |||
| vfi.InstancingEnable = pipeline->instancing_enable[desc->binding], | |||
| vfi.VertexElementIndex = slot, | |||
| /* Vulkan so far doesn't have an instance divisor, so | |||
| @@ -173,7 +173,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, | |||
| } | |||
| #if GEN_GEN >= 8 | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) { | |||
| sgvs.VertexIDEnable = vs_prog_data->uses_vertexid; | |||
| sgvs.VertexIDComponentNumber = 2; | |||
| sgvs.VertexIDElementOffset = id_slot; | |||
| @@ -198,7 +198,7 @@ emit_urb_setup(struct anv_pipeline *pipeline) | |||
| * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL | |||
| * needs to be sent before any combination of VS associated 3DSTATE." | |||
| */ | |||
| anv_batch_emit_blk(&pipeline->batch, GEN7_PIPE_CONTROL, pc) { | |||
| anv_batch_emit(&pipeline->batch, GEN7_PIPE_CONTROL, pc) { | |||
| pc.DepthStallEnable = true; | |||
| pc.PostSyncOperation = WriteImmediateData; | |||
| pc.Address = (struct anv_address) { &device->workaround_bo, 0 }; | |||
| @@ -208,8 +208,8 @@ emit_urb_setup(struct anv_pipeline *pipeline) | |||
| unsigned push_start = 0; | |||
| for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) { | |||
| unsigned push_size = pipeline->urb.push_size[i]; | |||
| anv_batch_emit_blk(&pipeline->batch, | |||
| GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) { | |||
| anv_batch_emit(&pipeline->batch, | |||
| GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) { | |||
| alloc._3DCommandSubOpcode = 18 + i; | |||
| alloc.ConstantBufferOffset = (push_size > 0) ? push_start : 0; | |||
| alloc.ConstantBufferSize = push_size; | |||
| @@ -218,7 +218,7 @@ emit_urb_setup(struct anv_pipeline *pipeline) | |||
| } | |||
| for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { | |||
| anv_batch_emit_blk(&pipeline->batch, GENX(3DSTATE_URB_VS), urb) { | |||
| anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_VS), urb) { | |||
| urb._3DCommandSubOpcode = 48 + i; | |||
| urb.VSURBStartingAddress = pipeline->urb.start[i]; | |||
| urb.VSURBEntryAllocationSize = pipeline->urb.size[i] - 1; | |||
| @@ -44,30 +44,30 @@ genX(init_device_state)(struct anv_device *device) | |||
| batch.start = batch.next = cmds; | |||
| batch.end = (void *) cmds + sizeof(cmds); | |||
| anv_batch_emit_blk(&batch, GENX(PIPELINE_SELECT), ps) { | |||
| anv_batch_emit(&batch, GENX(PIPELINE_SELECT), ps) { | |||
| #if GEN_GEN >= 9 | |||
| ps.MaskBits = 3; | |||
| #endif | |||
| ps.PipelineSelection = _3D; | |||
| } | |||
| anv_batch_emit_blk(&batch, GENX(3DSTATE_VF_STATISTICS), vfs) | |||
| anv_batch_emit(&batch, GENX(3DSTATE_VF_STATISTICS), vfs) | |||
| vfs.StatisticsEnable = true; | |||
| anv_batch_emit_blk(&batch, GENX(3DSTATE_HS), hs); | |||
| anv_batch_emit_blk(&batch, GENX(3DSTATE_TE), ts); | |||
| anv_batch_emit_blk(&batch, GENX(3DSTATE_DS), ds); | |||
| anv_batch_emit(&batch, GENX(3DSTATE_HS), hs); | |||
| anv_batch_emit(&batch, GENX(3DSTATE_TE), ts); | |||
| anv_batch_emit(&batch, GENX(3DSTATE_DS), ds); | |||
| anv_batch_emit_blk(&batch, GENX(3DSTATE_STREAMOUT), so); | |||
| anv_batch_emit_blk(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS), aa); | |||
| anv_batch_emit(&batch, GENX(3DSTATE_STREAMOUT), so); | |||
| anv_batch_emit(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS), aa); | |||
| #if GEN_GEN >= 8 | |||
| anv_batch_emit_blk(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck); | |||
| anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck); | |||
| /* See the Vulkan 1.0 spec Table 24.1 "Standard sample locations" and | |||
| * VkPhysicalDeviceFeatures::standardSampleLocations. | |||
| */ | |||
| anv_batch_emit_blk(&batch, GENX(3DSTATE_SAMPLE_PATTERN), sp) { | |||
| anv_batch_emit(&batch, GENX(3DSTATE_SAMPLE_PATTERN), sp) { | |||
| sp._1xSample0XOffset = 0.5; | |||
| sp._1xSample0YOffset = 0.5; | |||
| sp._2xSample0XOffset = 0.25; | |||
| @@ -135,7 +135,7 @@ genX(init_device_state)(struct anv_device *device) | |||
| } | |||
| #endif | |||
| anv_batch_emit_blk(&batch, GENX(MI_BATCH_BUFFER_END), bbe); | |||
| anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe); | |||
| assert(batch.next <= batch.end); | |||