It's what all the call-sites once, so gets rid of a bunch of inlined glsl_get_base_type() at the call-sites. Signed-off-by: Rob Clark <robclark@freedesktop.org> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>tags/12.0-branchpoint
@@ -856,7 +856,7 @@ nir_visitor::visit(ir_call *ir) | |||
instr->num_components = type->vector_elements; | |||
/* Setup destination register */ | |||
unsigned bit_size = glsl_get_bit_size(type->base_type); | |||
unsigned bit_size = glsl_get_bit_size(type); | |||
nir_ssa_dest_init(&instr->instr, &instr->dest, | |||
type->vector_elements, bit_size, NULL); | |||
@@ -942,7 +942,7 @@ nir_visitor::visit(ir_call *ir) | |||
instr->num_components = type->vector_elements; | |||
/* Setup destination register */ | |||
unsigned bit_size = glsl_get_bit_size(type->base_type); | |||
unsigned bit_size = glsl_get_bit_size(type); | |||
nir_ssa_dest_init(&instr->instr, &instr->dest, | |||
type->vector_elements, bit_size, NULL); | |||
@@ -1005,7 +1005,7 @@ nir_visitor::visit(ir_call *ir) | |||
/* Atomic result */ | |||
assert(ir->return_deref); | |||
unsigned bit_size = glsl_get_bit_size(ir->return_deref->type->base_type); | |||
unsigned bit_size = glsl_get_bit_size(ir->return_deref->type); | |||
nir_ssa_dest_init(&instr->instr, &instr->dest, | |||
ir->return_deref->type->vector_elements, | |||
bit_size, NULL); | |||
@@ -1186,7 +1186,7 @@ nir_visitor::evaluate_rvalue(ir_rvalue* ir) | |||
load_instr->num_components = ir->type->vector_elements; | |||
load_instr->variables[0] = this->deref_head; | |||
ralloc_steal(load_instr, load_instr->variables[0]); | |||
unsigned bit_size = glsl_get_bit_size(ir->type->base_type); | |||
unsigned bit_size = glsl_get_bit_size(ir->type); | |||
add_instr(&load_instr->instr, ir->type->vector_elements, bit_size); | |||
} | |||
@@ -1207,7 +1207,7 @@ nir_visitor::visit(ir_expression *ir) | |||
case ir_binop_ubo_load: { | |||
nir_intrinsic_instr *load = | |||
nir_intrinsic_instr_create(this->shader, nir_intrinsic_load_ubo); | |||
unsigned bit_size = glsl_get_bit_size(ir->type->base_type); | |||
unsigned bit_size = glsl_get_bit_size(ir->type); | |||
load->num_components = ir->type->vector_elements; | |||
load->src[0] = nir_src_for_ssa(evaluate_rvalue(ir->operands[0])); | |||
load->src[1] = nir_src_for_ssa(evaluate_rvalue(ir->operands[1])); | |||
@@ -1276,7 +1276,7 @@ nir_visitor::visit(ir_expression *ir) | |||
intrin->intrinsic == nir_intrinsic_interp_var_at_sample) | |||
intrin->src[0] = nir_src_for_ssa(evaluate_rvalue(ir->operands[1])); | |||
unsigned bit_size = glsl_get_bit_size(deref->type->base_type); | |||
unsigned bit_size = glsl_get_bit_size(deref->type); | |||
add_instr(&intrin->instr, deref->type->vector_elements, bit_size); | |||
if (swizzle) { | |||
@@ -1496,7 +1496,7 @@ nir_visitor::visit(ir_expression *ir) | |||
nir_intrinsic_get_buffer_size); | |||
load->num_components = ir->type->vector_elements; | |||
load->src[0] = nir_src_for_ssa(evaluate_rvalue(ir->operands[0])); | |||
unsigned bit_size = glsl_get_bit_size(ir->type->base_type); | |||
unsigned bit_size = glsl_get_bit_size(ir->type); | |||
add_instr(&load->instr, ir->type->vector_elements, bit_size); | |||
return; | |||
} | |||
@@ -1934,7 +1934,7 @@ nir_visitor::visit(ir_texture *ir) | |||
assert(src_number == num_srcs); | |||
unsigned bit_size = glsl_get_bit_size(ir->type->base_type); | |||
unsigned bit_size = glsl_get_bit_size(ir->type); | |||
add_instr(&instr->instr, nir_tex_instr_dest_size(instr), bit_size); | |||
} | |||
@@ -694,7 +694,7 @@ nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref) | |||
tail = tail->child; | |||
} | |||
unsigned bit_size = glsl_get_bit_size(glsl_get_base_type(tail->type)); | |||
unsigned bit_size = glsl_get_bit_size(tail->type); | |||
nir_load_const_instr *load = | |||
nir_load_const_instr_create(shader, glsl_get_vector_elements(tail->type), | |||
bit_size); |
@@ -403,7 +403,7 @@ nir_load_var(nir_builder *build, nir_variable *var) | |||
load->num_components = num_components; | |||
load->variables[0] = nir_deref_var_create(load, var); | |||
nir_ssa_dest_init(&load->instr, &load->dest, num_components, | |||
glsl_get_bit_size(glsl_get_base_type(var->type)), NULL); | |||
glsl_get_bit_size(var->type), NULL); | |||
nir_builder_instr_insert(build, &load->instr); | |||
return &load->dest.ssa; | |||
} |
@@ -119,7 +119,7 @@ get_reg_for_deref(nir_deref_var *deref, struct locals_to_regs_state *state) | |||
nir_register *reg = nir_local_reg_create(state->impl); | |||
reg->num_components = glsl_get_vector_elements(tail->type); | |||
reg->num_array_elems = array_size > 1 ? array_size : 0; | |||
reg->bit_size = glsl_get_bit_size(glsl_get_base_type(tail->type)); | |||
reg->bit_size = glsl_get_bit_size(tail->type); | |||
_mesa_hash_table_insert_pre_hashed(state->regs_table, hash, deref, reg); | |||
nir_array_add(&state->derefs_array, nir_deref_var *, deref); |
@@ -116,8 +116,7 @@ emit_copy_load_store(nir_intrinsic_instr *copy_instr, | |||
assert(src_tail->type == dest_tail->type); | |||
unsigned num_components = glsl_get_vector_elements(src_tail->type); | |||
unsigned bit_size = | |||
glsl_get_bit_size(glsl_get_base_type(src_tail->type)); | |||
unsigned bit_size = glsl_get_bit_size(src_tail->type); | |||
nir_intrinsic_instr *load = | |||
nir_intrinsic_instr_create(mem_ctx, nir_intrinsic_load_var); |
@@ -725,7 +725,7 @@ nir_lower_vars_to_ssa_impl(nir_function_impl *impl) | |||
node->pb_value = | |||
nir_phi_builder_add_value(state.phi_builder, | |||
glsl_get_vector_elements(node->type), | |||
glsl_get_bit_size(glsl_get_base_type(node->type)), | |||
glsl_get_bit_size(node->type), | |||
store_blocks); | |||
if (node->deref->var->constant_initializer) { |
@@ -82,9 +82,9 @@ unsigned glsl_get_record_location_offset(const struct glsl_type *type, | |||
unsigned length); | |||
static inline unsigned | |||
glsl_get_bit_size(enum glsl_base_type type) | |||
glsl_get_bit_size(const struct glsl_type *type) | |||
{ | |||
switch (type) { | |||
switch (glsl_get_base_type(type)) { | |||
case GLSL_TYPE_INT: | |||
case GLSL_TYPE_UINT: | |||
case GLSL_TYPE_BOOL: |
@@ -38,7 +38,7 @@ vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type) | |||
if (glsl_type_is_vector_or_scalar(type)) { | |||
unsigned num_components = glsl_get_vector_elements(val->type); | |||
unsigned bit_size = glsl_get_bit_size(glsl_get_base_type(val->type)); | |||
unsigned bit_size = glsl_get_bit_size(val->type); | |||
val->def = nir_ssa_undef(&b->nb, num_components, bit_size); | |||
} else { | |||
unsigned elems = glsl_get_length(val->type); | |||
@@ -1034,7 +1034,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, | |||
unsigned num_components = glsl_get_vector_elements(val->const_type); | |||
unsigned bit_size = | |||
glsl_get_bit_size(glsl_get_base_type(val->const_type)); | |||
glsl_get_bit_size(val->const_type); | |||
nir_const_value src[3]; | |||
assert(count <= 7); | |||
@@ -1783,7 +1783,7 @@ vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src) | |||
for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) { | |||
nir_alu_instr *vec = create_vec(b->shader, | |||
glsl_get_matrix_columns(src->type), | |||
glsl_get_bit_size(glsl_get_base_type(src->type))); | |||
glsl_get_bit_size(src->type)); | |||
if (glsl_type_is_vector_or_scalar(src->type)) { | |||
vec->src[0].src = nir_src_for_ssa(src->def); | |||
vec->src[0].swizzle[0] = i; |
@@ -191,7 +191,7 @@ _vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_var *deref, | |||
if (load) { | |||
nir_ssa_dest_init(&intrin->instr, &intrin->dest, | |||
intrin->num_components, | |||
glsl_get_bit_size(glsl_get_base_type(tail->type)), | |||
glsl_get_bit_size(tail->type), | |||
NULL); | |||
inout->def = &intrin->dest.ssa; | |||
} else { | |||
@@ -414,7 +414,7 @@ _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load, | |||
if (load) { | |||
nir_ssa_dest_init(&instr->instr, &instr->dest, | |||
instr->num_components, | |||
glsl_get_bit_size(glsl_get_base_type(type)), NULL); | |||
glsl_get_bit_size(type), NULL); | |||
(*inout)->def = &instr->dest.ssa; | |||
} | |||