We use *64*_PASSTHRU formats to upload vertex attributes of 64 bits to avoid conversions. From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE): "When SourceElementFormat is set to one of the *64*_PASSTHRU formats, 64-bit components are stored in the URB without any conversion. In this case, vertex elements must be written as 128 or 256 bits, with VFCOMP_STORE_0 being used to pad the output as required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red component into the URB, Component 1 must be specified as VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE) in order to output a 128-bit vertex element, or Components 1-3 must be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex element. Likewise, use of R64G64B64_PASSTHRU requires Component 3 to be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex element." v2,v3 (Jason): - Don't delete unused formats. Signed-off-by: Samuel Iglesias Gonsálvez <siglesias@igalia.com> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>tags/17.0-branchpoint
@@ -97,7 +97,7 @@ static const struct surface_format_info format_info[] = { | |||
SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32A32_SSCALED) | |||
SF( x, x, x, x, x, x, Y, x, x, x, R32G32B32A32_USCALED) | |||
SF( x, x, x, x, x, x, 75, x, x, x, R32G32B32A32_SFIXED) | |||
SF( x, x, x, x, x, x, x, x, x, x, R64G64_PASSTHRU) | |||
SF( x, x, x, x, x, x, 80, x, x, x, R64G64_PASSTHRU) | |||
SF( Y, 50, x, x, x, x, Y, Y, x, x, R32G32B32_FLOAT) | |||
SF( Y, x, x, x, x, x, Y, Y, x, x, R32G32B32_SINT) | |||
SF( Y, x, x, x, x, x, Y, Y, x, x, R32G32B32_UINT) | |||
@@ -131,7 +131,7 @@ static const struct surface_format_info format_info[] = { | |||
SF( x, x, x, x, x, x, Y, x, x, x, R32G32_SSCALED) | |||
SF( x, x, x, x, x, x, Y, x, x, x, R32G32_USCALED) | |||
SF( x, x, x, x, x, x, 75, x, x, x, R32G32_SFIXED) | |||
SF( x, x, x, x, x, x, x, x, x, x, R64_PASSTHRU) | |||
SF( x, x, x, x, x, x, 80, x, x, x, R64_PASSTHRU) | |||
SF( Y, Y, x, Y, Y, Y, Y, x, 60, 90, B8G8R8A8_UNORM) | |||
SF( Y, Y, x, x, Y, Y, x, x, x, x, B8G8R8A8_UNORM_SRGB) | |||
/* smpl filt shad CK RT AB VB SO color ccs_e */ |
@@ -156,16 +156,16 @@ static const struct anv_format anv_formats[] = { | |||
fmt(VK_FORMAT_R32G32B32A32_SFLOAT, ISL_FORMAT_R32G32B32A32_FLOAT), | |||
fmt(VK_FORMAT_R64_UINT, ISL_FORMAT_R64_PASSTHRU), | |||
fmt(VK_FORMAT_R64_SINT, ISL_FORMAT_R64_PASSTHRU), | |||
fmt(VK_FORMAT_R64_SFLOAT, ISL_FORMAT_R64_FLOAT), | |||
fmt(VK_FORMAT_R64_SFLOAT, ISL_FORMAT_R64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64_UINT, ISL_FORMAT_R64G64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64_SINT, ISL_FORMAT_R64G64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64_SFLOAT, ISL_FORMAT_R64G64_FLOAT), | |||
fmt(VK_FORMAT_R64G64_SFLOAT, ISL_FORMAT_R64G64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64B64_UINT, ISL_FORMAT_R64G64B64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64B64_SINT, ISL_FORMAT_R64G64B64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64B64_SFLOAT, ISL_FORMAT_R64G64B64_FLOAT), | |||
fmt(VK_FORMAT_R64G64B64_SFLOAT, ISL_FORMAT_R64G64B64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64B64A64_UINT, ISL_FORMAT_R64G64B64A64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64B64A64_SINT, ISL_FORMAT_R64G64B64A64_PASSTHRU), | |||
fmt(VK_FORMAT_R64G64B64A64_SFLOAT, ISL_FORMAT_R64G64B64A64_FLOAT), | |||
fmt(VK_FORMAT_R64G64B64A64_SFLOAT, ISL_FORMAT_R64G64B64A64_PASSTHRU), | |||
fmt(VK_FORMAT_B10G11R11_UFLOAT_PACK32, ISL_FORMAT_R11G11B10_FLOAT), | |||
fmt(VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, ISL_FORMAT_R9G9B9E5_SHAREDEXP), | |||