Fixes dEQP-GLES3.functional.shaders.texture_functions.textureoffset.sampler2d_fixed_fragment. Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Reviewed-by: Tomeu Vizoso <tomeu.vizoso@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3169>master
@@ -78,7 +78,7 @@ typedef struct midgard_branch { | |||
* emitted before the register allocation pass. | |||
*/ | |||
#define MIR_SRC_COUNT 3 | |||
#define MIR_SRC_COUNT 4 | |||
#define MIR_VEC_COMPONENTS 16 | |||
typedef struct midgard_instruction { | |||
@@ -89,7 +89,7 @@ typedef struct midgard_instruction { | |||
/* Instruction arguments represented as block-local SSA | |||
* indices, rather than registers. ~0 means unused. */ | |||
unsigned src[3]; | |||
unsigned src[MIR_SRC_COUNT]; | |||
unsigned dest; | |||
/* vec16 swizzle, unpacked, per source */ | |||
@@ -558,7 +558,7 @@ v_mov(unsigned src, unsigned dest) | |||
midgard_instruction ins = { | |||
.type = TAG_ALU_4, | |||
.mask = 0xF, | |||
.src = { SSA_UNUSED, src, SSA_UNUSED }, | |||
.src = { ~0, src, ~0, ~0 }, | |||
.swizzle = SWIZZLE_IDENTITY, | |||
.dest = dest, | |||
.alu = { | |||
@@ -596,7 +596,7 @@ v_load_store_scratch( | |||
.type = TAG_LOAD_STORE_4, | |||
.mask = mask, | |||
.dest = ~0, | |||
.src = { ~0, ~0, ~0 }, | |||
.src = { ~0, ~0, ~0, ~0 }, | |||
.swizzle = SWIZZLE_IDENTITY_4, | |||
.load_store = { | |||
.op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4, |
@@ -161,7 +161,6 @@ quadword_size(int tag) | |||
/* SSA helper aliases to mimic the registers. */ | |||
#define SSA_UNUSED ~0 | |||
#define SSA_FIXED_SHIFT 24 | |||
#define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1) | |||
#define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1) | |||
@@ -173,6 +172,7 @@ quadword_size(int tag) | |||
#define COMPONENT_W 0x3 | |||
#define SWIZZLE_IDENTITY { \ | |||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \ | |||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \ | |||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \ | |||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } \ | |||
@@ -182,6 +182,7 @@ quadword_size(int tag) | |||
{ 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \ | |||
{ 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \ | |||
{ 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \ | |||
{ 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \ | |||
} | |||
static inline unsigned |
@@ -133,7 +133,7 @@ schedule_barrier(compiler_context *ctx) | |||
.type = TAG_LOAD_STORE_4, \ | |||
.mask = 0xF, \ | |||
.dest = ~0, \ | |||
.src = { ~0, ~0, ~0 }, \ | |||
.src = { ~0, ~0, ~0, ~0 }, \ | |||
.swizzle = SWIZZLE_IDENTITY_4, \ | |||
.load_store = { \ | |||
.op = midgard_op_##name, \ | |||
@@ -238,7 +238,7 @@ v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, u | |||
.compact_branch = true, | |||
.br_compact = compact, | |||
.dest = ~0, | |||
.src = { ~0, ~0, ~0 }, | |||
.src = { ~0, ~0, ~0, ~0 }, | |||
}; | |||
if (op == midgard_jmp_writeout_op_writeout) | |||
@@ -259,7 +259,7 @@ v_branch(bool conditional, bool invert) | |||
.invert_conditional = invert | |||
}, | |||
.dest = ~0, | |||
.src = { ~0, ~0, ~0 }, | |||
.src = { ~0, ~0, ~0, ~0 }, | |||
}; | |||
return ins; | |||
@@ -992,6 +992,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) | |||
quirk_flipped_r24 ? ~0 : src0, | |||
quirk_flipped_r24 ? src0 : src1, | |||
src2, | |||
~0 | |||
}, | |||
.dest = dest, | |||
}; | |||
@@ -1712,7 +1713,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, | |||
.type = TAG_TEXTURE_4, | |||
.mask = 0xF, | |||
.dest = nir_dest_index(ctx, &instr->dest), | |||
.src = { ~0, ~0, ~0 }, | |||
.src = { ~0, ~0, ~0, ~0 }, | |||
.swizzle = SWIZZLE_IDENTITY_4, | |||
.texture = { | |||
.op = midgard_texop, | |||
@@ -1823,6 +1824,16 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, | |||
break; | |||
}; | |||
case nir_tex_src_offset: { | |||
ins.texture.offset_register = true; | |||
ins.src[3] = index; | |||
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) | |||
ins.swizzle[3][c] = (c > COMPONENT_Z) ? 0 : c; | |||
emit_explicit_constant(ctx, index, index); | |||
}; | |||
case nir_tex_src_comparator: { | |||
/* TODO: generalize */ | |||
unsigned comp = COMPONENT_Z; |
@@ -102,7 +102,7 @@ midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr) | |||
.type = TAG_TEXTURE_4, | |||
.mask = mask_of(nr_components), | |||
.dest = nir_dest_index(ctx, &instr->dest.dest), | |||
.src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0 }, | |||
.src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0, ~0 }, | |||
.texture = { | |||
.op = mir_derivative_op(instr->op), | |||
.format = MALI_TEX_2D, |
@@ -40,7 +40,7 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block) | |||
midgard_instruction not = { | |||
.type = TAG_ALU_4, | |||
.mask = ins->mask, | |||
.src = { temp, ~0, ~0 }, | |||
.src = { temp, ~0, ~0, ~0 }, | |||
.swizzle = SWIZZLE_IDENTITY, | |||
.dest = ins->dest, | |||
.has_inline_constant = true, |
@@ -116,7 +116,7 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block) | |||
.type = TAG_LOAD_STORE_4, | |||
.mask = ins->mask, | |||
.dest = to, | |||
.src = { frcp_from, ~0, ~0 }, | |||
.src = { frcp_from, ~0, ~0, ~0 }, | |||
.swizzle = SWIZZLE_IDENTITY_4, | |||
.load_store = { | |||
.op = frcp_component == COMPONENT_W ? |
@@ -166,6 +166,10 @@ mir_print_instruction(midgard_instruction *ins) | |||
mir_print_index(ins->src[2]); | |||
mir_print_swizzle(ins->swizzle[2]); | |||
printf(", "); | |||
mir_print_index(ins->src[3]); | |||
mir_print_swizzle(ins->swizzle[3]); | |||
if (ins->has_constants) { | |||
uint32_t *uc = ins->constants; | |||
float *fc = (float *) uc; |
@@ -513,6 +513,7 @@ allocate_registers(compiler_context *ctx, bool *spilled) | |||
set_class(l->class, ins->src[0], REG_CLASS_TEXR); | |||
set_class(l->class, ins->src[1], REG_CLASS_TEXR); | |||
set_class(l->class, ins->src[2], REG_CLASS_TEXR); | |||
set_class(l->class, ins->src[3], REG_CLASS_TEXR); | |||
} | |||
} | |||
@@ -544,6 +545,14 @@ allocate_registers(compiler_context *ctx, bool *spilled) | |||
return l; | |||
} | |||
/* Reverses 2 bits, used to pack swizzles of offsets for some reason */ | |||
static unsigned | |||
mir_reverse2(unsigned in) | |||
{ | |||
return (in >> 1) | ((in & 1) << 1); | |||
} | |||
/* Once registers have been decided via register allocation | |||
* (allocate_registers), we need to rewrite the MIR to use registers instead of | |||
* indices */ | |||
@@ -650,6 +659,7 @@ install_registers_instr( | |||
struct phys_reg dest = index_to_reg(ctx, l, ins->dest, mir_typesize(ins)); | |||
struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], mir_srcsize(ins, 1)); | |||
struct phys_reg lod = index_to_reg(ctx, l, ins->src[2], mir_srcsize(ins, 2)); | |||
struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], mir_srcsize(ins, 2)); | |||
/* First, install the texture coordinate */ | |||
ins->texture.in_reg_full = 1; | |||
@@ -668,7 +678,7 @@ install_registers_instr( | |||
if (ins->src[2] != ~0) { | |||
assert(!(lod.offset & 3)); | |||
midgard_tex_register_select sel = { | |||
.select = lod.reg, | |||
.select = lod.reg & 1, | |||
.full = 1, | |||
.component = lod.offset / 4 | |||
}; | |||
@@ -678,6 +688,24 @@ install_registers_instr( | |||
ins->texture.bias = packed; | |||
} | |||
/* If there is an offset register, install it */ | |||
if (ins->src[3] != ~0) { | |||
ins->texture.offset_x = | |||
(1) | /* full */ | |||
(offset.reg & 1) << 1 | /* select */ | |||
0 << 2; /* upper */ | |||
unsigned x = offset.offset / 4; | |||
unsigned y = x + 1; | |||
unsigned z = x + 2; | |||
ins->texture.offset_y = | |||
mir_reverse2(y) | (mir_reverse2(x) << 2); | |||
ins->texture.offset_z = | |||
mir_reverse2(z); | |||
} | |||
break; | |||
} | |||
@@ -265,7 +265,6 @@ mir_srcsize(midgard_instruction *ins, unsigned i) | |||
if (i >= 2) { | |||
/* TODO: 16-bit conditions, ffma */ | |||
assert(i == 2); | |||
return midgard_reg_mode_32; | |||
} | |||