diff --git a/src/back/hlsl/mod.rs b/src/back/hlsl/mod.rs index 6f6339d430..8f3134bb1b 100644 --- a/src/back/hlsl/mod.rs +++ b/src/back/hlsl/mod.rs @@ -16,7 +16,8 @@ becomes `vec * mat`, etc. This acts as the inverse transpose making the results The only time we don't get this implicit transposition is when reading matrices from Uniforms/Push Constants. To deal with this, we add `row_major` to all declarations of matrices in Uniforms/Push Constants. -Finally because all of our matrices are transposed, if you use `mat3x4`, it'll become `float4x3` in HLSL. +Finally because all of our matrices are transposed, if you use `mat3x4`, it'll become `float3x4` in HLSL +(HLSL has inverted col/row notation). [hlsl]: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl */ diff --git a/src/back/hlsl/storage.rs b/src/back/hlsl/storage.rs index 4abe663509..cab351aa59 100644 --- a/src/back/hlsl/storage.rs +++ b/src/back/hlsl/storage.rs @@ -122,19 +122,19 @@ impl super::Writer<'_, W> { self.out, "{}{}x{}(", crate::ScalarKind::Float.to_hlsl_str(width)?, - rows as u8, columns as u8, + rows as u8, )?; // Note: Matrices containing vec3s, due to padding, act like they contain vec4s. - let padded_columns = match columns { + let padded_rows = match rows { crate::VectorSize::Tri => 4, - columns => columns as u32, + rows => rows as u32, }; - let row_stride = width as u32 * padded_columns; - let iter = (0..rows as u32).map(|i| { + let row_stride = width as u32 * padded_rows; + let iter = (0..columns as u32).map(|i| { let ty_inner = crate::TypeInner::Vector { - size: columns, + size: rows, kind: crate::ScalarKind::Float, width, }; @@ -261,8 +261,8 @@ impl super::Writer<'_, W> { "{}{}{}x{} {}{} = ", level.next(), crate::ScalarKind::Float.to_hlsl_str(width)?, - rows as u8, columns as u8, + rows as u8, STORE_TEMP_NAME, depth, )?; @@ -270,18 +270,18 @@ impl super::Writer<'_, W> { writeln!(self.out, ";")?; // Note: Matrices containing vec3s, due to padding, act like they contain vec4s. - let padded_columns = match columns { + let padded_rows = match rows { crate::VectorSize::Tri => 4, - columns => columns as u32, + rows => rows as u32, }; - let row_stride = width as u32 * padded_columns; + let row_stride = width as u32 * padded_rows; // then iterate the stores - for i in 0..rows as u32 { + for i in 0..columns as u32 { self.temp_access_chain .push(SubAccess::Offset(i * row_stride)); let ty_inner = crate::TypeInner::Vector { - size: columns, + size: rows, kind: crate::ScalarKind::Float, width, }; @@ -401,8 +401,13 @@ impl super::Writer<'_, W> { crate::TypeInner::Vector { width, .. } => Parent::Array { stride: width as u32, }, - crate::TypeInner::Matrix { rows, width, .. } => Parent::Array { - stride: width as u32 * if rows > crate::VectorSize::Bi { 4 } else { 2 }, + crate::TypeInner::Matrix { columns, width, .. } => Parent::Array { + stride: width as u32 + * if columns > crate::VectorSize::Bi { + 4 + } else { + 2 + }, }, _ => unreachable!(), }, diff --git a/src/back/hlsl/writer.rs b/src/back/hlsl/writer.rs index fce74ee91f..b6aea44603 100644 --- a/src/back/hlsl/writer.rs +++ b/src/back/hlsl/writer.rs @@ -825,8 +825,8 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { self.out, "{}{}x{}", crate::ScalarKind::Float.to_hlsl_str(width)?, - back::vector_size_str(rows), back::vector_size_str(columns), + back::vector_size_str(rows), )?; } TypeInner::Image { diff --git a/tests/in/access.wgsl b/tests/in/access.wgsl index 390a018b11..bfce0bba8c 100644 --- a/tests/in/access.wgsl +++ b/tests/in/access.wgsl @@ -5,7 +5,7 @@ struct AlignedWrapper { }; struct Bar { - matrix: mat4x4, + matrix: mat4x3, matrix_array: array, 2>, atom: atomic, arr: array, 2>, @@ -42,14 +42,14 @@ fn foo_vert(@builtin(vertex_index) vi: u32) -> @builtin(position) vec4 { c[vi + 1u] = 42; let value = c[vi]; - return matrix * vec4(vec4(value)); + return vec4(matrix * vec4(vec4(value)), 2.0); } @stage(fragment) fn foo_frag() -> @location(0) vec4 { // test storage stores bar.matrix[1].z = 1.0; - bar.matrix = mat4x4(vec4(0.0), vec4(1.0), vec4(2.0), vec4(3.0)); + bar.matrix = mat4x3(vec3(0.0), vec3(1.0), vec3(2.0), vec3(3.0)); bar.arr = array, 2>(vec2(0u), vec2(1u)); bar.data[1].value = 1; diff --git a/tests/out/glsl/access.atomics.Compute.glsl b/tests/out/glsl/access.atomics.Compute.glsl index d1fddc7e11..5edd19fea5 100644 --- a/tests/out/glsl/access.atomics.Compute.glsl +++ b/tests/out/glsl/access.atomics.Compute.glsl @@ -9,7 +9,7 @@ struct AlignedWrapper { int value; }; layout(std430) buffer Bar_block_0Compute { - mat4x4 matrix; + mat4x3 matrix; mat2x2 matrix_array[2]; int atom; uvec2 arr[2]; diff --git a/tests/out/glsl/access.foo_frag.Fragment.glsl b/tests/out/glsl/access.foo_frag.Fragment.glsl index ab14704b0b..63b40b7ab1 100644 --- a/tests/out/glsl/access.foo_frag.Fragment.glsl +++ b/tests/out/glsl/access.foo_frag.Fragment.glsl @@ -7,7 +7,7 @@ struct AlignedWrapper { int value; }; layout(std430) buffer Bar_block_0Fragment { - mat4x4 matrix; + mat4x3 matrix; mat2x2 matrix_array[2]; int atom; uvec2 arr[2]; @@ -23,7 +23,7 @@ float read_from_private(inout float foo_1) { void main() { _group_0_binding_0_fs.matrix[1][2] = 1.0; - _group_0_binding_0_fs.matrix = mat4x4(vec4(0.0), vec4(1.0), vec4(2.0), vec4(3.0)); + _group_0_binding_0_fs.matrix = mat4x3(vec3(0.0), vec3(1.0), vec3(2.0), vec3(3.0)); _group_0_binding_0_fs.arr = uvec2[2](uvec2(0u), uvec2(1u)); _group_0_binding_0_fs.data[1].value = 1; _fs2p_location0 = vec4(0.0); diff --git a/tests/out/glsl/access.foo_vert.Vertex.glsl b/tests/out/glsl/access.foo_vert.Vertex.glsl index 05fd493214..f1fb3e09a1 100644 --- a/tests/out/glsl/access.foo_vert.Vertex.glsl +++ b/tests/out/glsl/access.foo_vert.Vertex.glsl @@ -7,7 +7,7 @@ struct AlignedWrapper { int value; }; layout(std430) buffer Bar_block_0Vertex { - mat4x4 matrix; + mat4x3 matrix; mat2x2 matrix_array[2]; int atom; uvec2 arr[2]; @@ -26,7 +26,7 @@ void main() { int c[5] = int[5](0, 0, 0, 0, 0); float baz = foo; foo = 1.0; - mat4x4 matrix = _group_0_binding_0_vs.matrix; + mat4x3 matrix = _group_0_binding_0_vs.matrix; uvec2 arr[2] = _group_0_binding_0_vs.arr; float b = _group_0_binding_0_vs.matrix[3][0]; int a = _group_0_binding_0_vs.data[(uint(_group_0_binding_0_vs.data.length()) - 2u)].value; @@ -34,7 +34,7 @@ void main() { c = int[5](a, int(b), 3, 4, 5); c[(vi + 1u)] = 42; int value = c[vi]; - gl_Position = (matrix * vec4(ivec4(value))); + gl_Position = vec4((matrix * vec4(ivec4(value))), 2.0); gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w); return; } diff --git a/tests/out/hlsl/access.hlsl b/tests/out/hlsl/access.hlsl index 91ffd5293a..8af92afdd8 100644 --- a/tests/out/hlsl/access.hlsl +++ b/tests/out/hlsl/access.hlsl @@ -25,7 +25,7 @@ float4 foo_vert(uint vi : SV_VertexID) : SV_Position float baz = foo; foo = 1.0; - float4x4 matrix_ = float4x4(asfloat(bar.Load4(0+0)), asfloat(bar.Load4(0+16)), asfloat(bar.Load4(0+32)), asfloat(bar.Load4(0+48))); + float4x3 matrix_ = float4x3(asfloat(bar.Load3(0+0)), asfloat(bar.Load3(0+16)), asfloat(bar.Load3(0+32)), asfloat(bar.Load3(0+48))); uint2 arr[2] = {asuint(bar.Load2(104+0)), asuint(bar.Load2(104+8))}; float b = asfloat(bar.Load(0+48+0)); int a = asint(bar.Load(0+(((NagaBufferLengthRW(bar) - 120) / 8) - 2u)*8+120)); @@ -36,18 +36,18 @@ float4 foo_vert(uint vi : SV_VertexID) : SV_Position } c[(vi + 1u)] = 42; int value = c[vi]; - return mul(float4(int4(value.xxxx)), matrix_); + return float4(mul(float4(int4(value.xxxx)), matrix_), 2.0); } float4 foo_frag() : SV_Target0 { bar.Store(8+16+0, asuint(1.0)); { - float4x4 _value2 = float4x4(float4(0.0.xxxx), float4(1.0.xxxx), float4(2.0.xxxx), float4(3.0.xxxx)); - bar.Store4(0+0, asuint(_value2[0])); - bar.Store4(0+16, asuint(_value2[1])); - bar.Store4(0+32, asuint(_value2[2])); - bar.Store4(0+48, asuint(_value2[3])); + float4x3 _value2 = float4x3(float3(0.0.xxx), float3(1.0.xxx), float3(2.0.xxx), float3(3.0.xxx)); + bar.Store3(0+0, asuint(_value2[0])); + bar.Store3(0+16, asuint(_value2[1])); + bar.Store3(0+32, asuint(_value2[2])); + bar.Store3(0+48, asuint(_value2[3])); } { uint2 _value2[2] = { uint2(0u.xx), uint2(1u.xx) }; diff --git a/tests/out/msl/access.msl b/tests/out/msl/access.msl index 0a1c0cfb2a..dbdad72e18 100644 --- a/tests/out/msl/access.msl +++ b/tests/out/msl/access.msl @@ -19,7 +19,7 @@ struct type_6 { }; typedef AlignedWrapper type_7[1]; struct Bar { - metal::float4x4 matrix; + metal::float4x3 matrix; type_3 matrix_array; metal::atomic_int atom; char _pad3[4]; @@ -51,7 +51,7 @@ vertex foo_vertOutput foo_vert( type_13 c; float baz = foo; foo = 1.0; - metal::float4x4 matrix = bar.matrix; + metal::float4x3 matrix = bar.matrix; type_6 arr = bar.arr; float b = bar.matrix[3].x; int a = bar.data[(1 + (_buffer_sizes.size0 - 120 - 8) / 8) - 2u].value; @@ -59,7 +59,7 @@ vertex foo_vertOutput foo_vert( for(int _i=0; _i<5; ++_i) c.inner[_i] = type_13 {a, static_cast(b), 3, 4, 5}.inner[_i]; c.inner[vi + 1u] = 42; int value = c.inner[vi]; - return foo_vertOutput { matrix * static_cast(metal::int4(value)) }; + return foo_vertOutput { metal::float4(matrix * static_cast(metal::int4(value)), 2.0) }; } @@ -71,7 +71,7 @@ fragment foo_fragOutput foo_frag( , constant _mslBufferSizes& _buffer_sizes [[buffer(24)]] ) { bar.matrix[1].z = 1.0; - bar.matrix = metal::float4x4(metal::float4(0.0), metal::float4(1.0), metal::float4(2.0), metal::float4(3.0)); + bar.matrix = metal::float4x3(metal::float3(0.0), metal::float3(1.0), metal::float3(2.0), metal::float3(3.0)); for(int _i=0; _i<2; ++_i) bar.arr.inner[_i] = type_6 {metal::uint2(0u), metal::uint2(1u)}.inner[_i]; bar.data[1].value = 1; return foo_fragOutput { metal::float4(0.0) }; diff --git a/tests/out/spv/access.spvasm b/tests/out/spv/access.spvasm index 21e4f513e6..a78c502374 100644 --- a/tests/out/spv/access.spvasm +++ b/tests/out/spv/access.spvasm @@ -1,16 +1,16 @@ ; SPIR-V ; Version: 1.1 ; Generator: rspirv -; Bound: 126 +; Bound: 128 OpCapability Shader OpExtension "SPV_KHR_storage_buffer_storage_class" %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 -OpEntryPoint Vertex %51 "foo_vert" %46 %49 -OpEntryPoint Fragment %86 "foo_frag" %85 -OpEntryPoint GLCompute %103 "atomics" -OpExecutionMode %86 OriginUpperLeft -OpExecutionMode %103 LocalSize 1 1 1 +OpEntryPoint Vertex %52 "foo_vert" %47 %50 +OpEntryPoint Fragment %88 "foo_frag" %87 +OpEntryPoint GLCompute %105 "atomics" +OpExecutionMode %88 OriginUpperLeft +OpExecutionMode %105 LocalSize 1 1 1 OpSource GLSL 450 OpMemberName %21 0 "value" OpName %21 "AlignedWrapper" @@ -20,16 +20,16 @@ OpMemberName %30 2 "atom" OpMemberName %30 3 "arr" OpMemberName %30 4 "data" OpName %30 "Bar" -OpName %34 "bar" -OpName %37 "foo" -OpName %38 "read_from_private" -OpName %42 "foo" -OpName %43 "c" -OpName %46 "vi" -OpName %51 "foo_vert" -OpName %86 "foo_frag" -OpName %101 "tmp" -OpName %103 "atomics" +OpName %35 "bar" +OpName %38 "foo" +OpName %39 "read_from_private" +OpName %43 "foo" +OpName %44 "c" +OpName %47 "vi" +OpName %52 "foo_vert" +OpName %88 "foo_frag" +OpName %103 "tmp" +OpName %105 "atomics" OpMemberDecorate %21 0 Offset 0 OpDecorate %26 ArrayStride 16 OpDecorate %28 ArrayStride 8 @@ -43,13 +43,13 @@ OpMemberDecorate %30 1 MatrixStride 8 OpMemberDecorate %30 2 Offset 96 OpMemberDecorate %30 3 Offset 104 OpMemberDecorate %30 4 Offset 120 -OpDecorate %33 ArrayStride 4 -OpDecorate %34 DescriptorSet 0 -OpDecorate %34 Binding 0 +OpDecorate %34 ArrayStride 4 +OpDecorate %35 DescriptorSet 0 +OpDecorate %35 Binding 0 OpDecorate %30 Block -OpDecorate %46 BuiltIn VertexIndex -OpDecorate %49 BuiltIn Position -OpDecorate %85 Location 0 +OpDecorate %47 BuiltIn VertexIndex +OpDecorate %50 BuiltIn Position +OpDecorate %87 Location 0 %2 = OpTypeVoid %4 = OpTypeInt 32 1 %3 = OpConstant %4 2 @@ -65,12 +65,12 @@ OpDecorate %85 Location 0 %14 = OpConstant %4 4 %15 = OpConstant %9 1 %16 = OpConstant %4 42 -%17 = OpConstant %4 1 -%18 = OpConstant %6 2.0 +%17 = OpConstant %6 2.0 +%18 = OpConstant %4 1 %19 = OpConstant %6 3.0 %20 = OpConstant %9 0 %21 = OpTypeStruct %4 -%23 = OpTypeVector %6 4 +%23 = OpTypeVector %6 3 %22 = OpTypeMatrix %23 4 %25 = OpTypeVector %6 2 %24 = OpTypeMatrix %25 2 @@ -80,127 +80,129 @@ OpDecorate %85 Location 0 %29 = OpTypeRuntimeArray %21 %30 = OpTypeStruct %22 %26 %4 %28 %29 %31 = OpTypePointer Function %6 -%32 = OpTypePointer StorageBuffer %4 -%33 = OpTypeArray %4 %12 -%35 = OpTypePointer StorageBuffer %30 -%34 = OpVariable %35 StorageBuffer -%39 = OpTypeFunction %6 %31 -%44 = OpTypePointer Function %33 -%47 = OpTypePointer Input %9 -%46 = OpVariable %47 Input -%50 = OpTypePointer Output %23 -%49 = OpVariable %50 Output -%52 = OpTypeFunction %2 -%55 = OpTypePointer StorageBuffer %22 -%58 = OpTypePointer StorageBuffer %28 -%61 = OpTypePointer StorageBuffer %23 -%62 = OpTypePointer StorageBuffer %6 -%65 = OpTypePointer StorageBuffer %29 -%68 = OpTypePointer StorageBuffer %21 -%69 = OpConstant %9 4 -%76 = OpTypePointer Function %4 -%80 = OpTypeVector %4 4 -%85 = OpVariable %50 Output -%105 = OpTypePointer StorageBuffer %4 -%108 = OpConstant %9 64 -%38 = OpFunction %6 None %39 -%37 = OpFunctionParameter %31 -%36 = OpLabel -OpBranch %40 -%40 = OpLabel -%41 = OpLoad %6 %37 -OpReturnValue %41 +%32 = OpTypeVector %6 4 +%33 = OpTypePointer StorageBuffer %4 +%34 = OpTypeArray %4 %12 +%36 = OpTypePointer StorageBuffer %30 +%35 = OpVariable %36 StorageBuffer +%40 = OpTypeFunction %6 %31 +%45 = OpTypePointer Function %34 +%48 = OpTypePointer Input %9 +%47 = OpVariable %48 Input +%51 = OpTypePointer Output %32 +%50 = OpVariable %51 Output +%53 = OpTypeFunction %2 +%56 = OpTypePointer StorageBuffer %22 +%59 = OpTypePointer StorageBuffer %28 +%62 = OpTypePointer StorageBuffer %23 +%63 = OpTypePointer StorageBuffer %6 +%66 = OpTypePointer StorageBuffer %29 +%69 = OpTypePointer StorageBuffer %21 +%70 = OpConstant %9 4 +%77 = OpTypePointer Function %4 +%81 = OpTypeVector %4 4 +%87 = OpVariable %51 Output +%107 = OpTypePointer StorageBuffer %4 +%110 = OpConstant %9 64 +%39 = OpFunction %6 None %40 +%38 = OpFunctionParameter %31 +%37 = OpLabel +OpBranch %41 +%41 = OpLabel +%42 = OpLoad %6 %38 +OpReturnValue %42 OpFunctionEnd -%51 = OpFunction %2 None %52 -%45 = OpLabel -%42 = OpVariable %31 Function %5 -%43 = OpVariable %44 Function -%48 = OpLoad %9 %46 -OpBranch %53 -%53 = OpLabel -%54 = OpLoad %6 %42 -OpStore %42 %7 -%56 = OpAccessChain %55 %34 %20 -%57 = OpLoad %22 %56 -%59 = OpAccessChain %58 %34 %8 -%60 = OpLoad %28 %59 -%63 = OpAccessChain %62 %34 %20 %8 %20 -%64 = OpLoad %6 %63 -%66 = OpArrayLength %9 %34 4 -%67 = OpISub %9 %66 %10 -%70 = OpAccessChain %32 %34 %69 %67 %20 -%71 = OpLoad %4 %70 -%72 = OpFunctionCall %6 %38 %42 -%73 = OpConvertFToS %4 %64 -%74 = OpCompositeConstruct %33 %71 %73 %13 %14 %12 -OpStore %43 %74 -%75 = OpIAdd %9 %48 %15 -%77 = OpAccessChain %76 %43 %75 -OpStore %77 %16 -%78 = OpAccessChain %76 %43 %48 -%79 = OpLoad %4 %78 -%81 = OpCompositeConstruct %80 %79 %79 %79 %79 -%82 = OpConvertSToF %23 %81 -%83 = OpMatrixTimesVector %23 %57 %82 -OpStore %49 %83 +%52 = OpFunction %2 None %53 +%46 = OpLabel +%43 = OpVariable %31 Function %5 +%44 = OpVariable %45 Function +%49 = OpLoad %9 %47 +OpBranch %54 +%54 = OpLabel +%55 = OpLoad %6 %43 +OpStore %43 %7 +%57 = OpAccessChain %56 %35 %20 +%58 = OpLoad %22 %57 +%60 = OpAccessChain %59 %35 %8 +%61 = OpLoad %28 %60 +%64 = OpAccessChain %63 %35 %20 %8 %20 +%65 = OpLoad %6 %64 +%67 = OpArrayLength %9 %35 4 +%68 = OpISub %9 %67 %10 +%71 = OpAccessChain %33 %35 %70 %68 %20 +%72 = OpLoad %4 %71 +%73 = OpFunctionCall %6 %39 %43 +%74 = OpConvertFToS %4 %65 +%75 = OpCompositeConstruct %34 %72 %74 %13 %14 %12 +OpStore %44 %75 +%76 = OpIAdd %9 %49 %15 +%78 = OpAccessChain %77 %44 %76 +OpStore %78 %16 +%79 = OpAccessChain %77 %44 %49 +%80 = OpLoad %4 %79 +%82 = OpCompositeConstruct %81 %80 %80 %80 %80 +%83 = OpConvertSToF %32 %82 +%84 = OpMatrixTimesVector %23 %58 %83 +%85 = OpCompositeConstruct %32 %84 %17 +OpStore %50 %85 OpReturn OpFunctionEnd -%86 = OpFunction %2 None %52 -%84 = OpLabel -OpBranch %87 -%87 = OpLabel -%88 = OpAccessChain %62 %34 %20 %15 %10 -OpStore %88 %7 -%89 = OpCompositeConstruct %23 %5 %5 %5 %5 -%90 = OpCompositeConstruct %23 %7 %7 %7 %7 -%91 = OpCompositeConstruct %23 %18 %18 %18 %18 -%92 = OpCompositeConstruct %23 %19 %19 %19 %19 -%93 = OpCompositeConstruct %22 %89 %90 %91 %92 -%94 = OpAccessChain %55 %34 %20 -OpStore %94 %93 -%95 = OpCompositeConstruct %27 %20 %20 -%96 = OpCompositeConstruct %27 %15 %15 -%97 = OpCompositeConstruct %28 %95 %96 -%98 = OpAccessChain %58 %34 %8 -OpStore %98 %97 -%99 = OpAccessChain %32 %34 %69 %15 %20 -OpStore %99 %17 -%100 = OpCompositeConstruct %23 %5 %5 %5 %5 -OpStore %85 %100 +%88 = OpFunction %2 None %53 +%86 = OpLabel +OpBranch %89 +%89 = OpLabel +%90 = OpAccessChain %63 %35 %20 %15 %10 +OpStore %90 %7 +%91 = OpCompositeConstruct %23 %5 %5 %5 +%92 = OpCompositeConstruct %23 %7 %7 %7 +%93 = OpCompositeConstruct %23 %17 %17 %17 +%94 = OpCompositeConstruct %23 %19 %19 %19 +%95 = OpCompositeConstruct %22 %91 %92 %93 %94 +%96 = OpAccessChain %56 %35 %20 +OpStore %96 %95 +%97 = OpCompositeConstruct %27 %20 %20 +%98 = OpCompositeConstruct %27 %15 %15 +%99 = OpCompositeConstruct %28 %97 %98 +%100 = OpAccessChain %59 %35 %8 +OpStore %100 %99 +%101 = OpAccessChain %33 %35 %70 %15 %20 +OpStore %101 %18 +%102 = OpCompositeConstruct %32 %5 %5 %5 %5 +OpStore %87 %102 OpReturn OpFunctionEnd -%103 = OpFunction %2 None %52 -%102 = OpLabel -%101 = OpVariable %76 Function -OpBranch %104 +%105 = OpFunction %2 None %53 %104 = OpLabel -%106 = OpAccessChain %105 %34 %10 -%107 = OpAtomicLoad %4 %106 %17 %108 -%110 = OpAccessChain %105 %34 %10 -%109 = OpAtomicIAdd %4 %110 %17 %108 %12 -OpStore %101 %109 -%112 = OpAccessChain %105 %34 %10 -%111 = OpAtomicISub %4 %112 %17 %108 %12 -OpStore %101 %111 -%114 = OpAccessChain %105 %34 %10 -%113 = OpAtomicAnd %4 %114 %17 %108 %12 -OpStore %101 %113 -%116 = OpAccessChain %105 %34 %10 -%115 = OpAtomicOr %4 %116 %17 %108 %12 -OpStore %101 %115 -%118 = OpAccessChain %105 %34 %10 -%117 = OpAtomicXor %4 %118 %17 %108 %12 -OpStore %101 %117 -%120 = OpAccessChain %105 %34 %10 -%119 = OpAtomicSMin %4 %120 %17 %108 %12 -OpStore %101 %119 -%122 = OpAccessChain %105 %34 %10 -%121 = OpAtomicSMax %4 %122 %17 %108 %12 -OpStore %101 %121 -%124 = OpAccessChain %105 %34 %10 -%123 = OpAtomicExchange %4 %124 %17 %108 %12 -OpStore %101 %123 -%125 = OpAccessChain %105 %34 %10 -OpAtomicStore %125 %17 %108 %107 +%103 = OpVariable %77 Function +OpBranch %106 +%106 = OpLabel +%108 = OpAccessChain %107 %35 %10 +%109 = OpAtomicLoad %4 %108 %18 %110 +%112 = OpAccessChain %107 %35 %10 +%111 = OpAtomicIAdd %4 %112 %18 %110 %12 +OpStore %103 %111 +%114 = OpAccessChain %107 %35 %10 +%113 = OpAtomicISub %4 %114 %18 %110 %12 +OpStore %103 %113 +%116 = OpAccessChain %107 %35 %10 +%115 = OpAtomicAnd %4 %116 %18 %110 %12 +OpStore %103 %115 +%118 = OpAccessChain %107 %35 %10 +%117 = OpAtomicOr %4 %118 %18 %110 %12 +OpStore %103 %117 +%120 = OpAccessChain %107 %35 %10 +%119 = OpAtomicXor %4 %120 %18 %110 %12 +OpStore %103 %119 +%122 = OpAccessChain %107 %35 %10 +%121 = OpAtomicSMin %4 %122 %18 %110 %12 +OpStore %103 %121 +%124 = OpAccessChain %107 %35 %10 +%123 = OpAtomicSMax %4 %124 %18 %110 %12 +OpStore %103 %123 +%126 = OpAccessChain %107 %35 %10 +%125 = OpAtomicExchange %4 %126 %18 %110 %12 +OpStore %103 %125 +%127 = OpAccessChain %107 %35 %10 +OpAtomicStore %127 %18 %110 %109 OpReturn OpFunctionEnd \ No newline at end of file diff --git a/tests/out/wgsl/access.wgsl b/tests/out/wgsl/access.wgsl index cab0428e9d..944a803d29 100644 --- a/tests/out/wgsl/access.wgsl +++ b/tests/out/wgsl/access.wgsl @@ -3,7 +3,7 @@ struct AlignedWrapper { }; struct Bar { - matrix: mat4x4, + matrix: mat4x3, matrix_array: array,2>, atom: atomic, arr: array,2>, @@ -34,13 +34,13 @@ fn foo_vert(@builtin(vertex_index) vi: u32) -> @builtin(position) vec4 { c = array(a, i32(b), 3, 4, 5); c[(vi + 1u)] = 42; let value = c[vi]; - return (matrix * vec4(vec4(value))); + return vec4((matrix * vec4(vec4(value))), 2.0); } @stage(fragment) fn foo_frag() -> @location(0) vec4 { bar.matrix[1][2] = 1.0; - bar.matrix = mat4x4(vec4(0.0), vec4(1.0), vec4(2.0), vec4(3.0)); + bar.matrix = mat4x3(vec3(0.0), vec3(1.0), vec3(2.0), vec3(3.0)); bar.arr = array,2>(vec2(0u), vec2(1u)); bar.data[1].value = 1; return vec4(0.0);