From 5ec1cc8aa8e8f8fd50a6cdd12d7f0568aa35e450 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 29 Sep 2024 11:52:34 +0200 Subject: [PATCH] update rspirv: undo mistaken constant_u{32, 64} rename in constant.rs --- crates/rustc_codegen_spirv/src/abi.rs | 2 +- .../src/builder/builder_methods.rs | 24 ++++++------- .../src/builder/byte_addressable_buffer.rs | 8 ++--- .../src/builder/intrinsics.rs | 36 +++++++++---------- .../src/codegen_cx/constant.rs | 8 ++--- .../src/codegen_cx/entry.rs | 2 +- .../src/codegen_cx/type_.rs | 2 +- 7 files changed, 41 insertions(+), 41 deletions(-) diff --git a/crates/rustc_codegen_spirv/src/abi.rs b/crates/rustc_codegen_spirv/src/abi.rs index 8b9cab619b..44188bfc5c 100644 --- a/crates/rustc_codegen_spirv/src/abi.rs +++ b/crates/rustc_codegen_spirv/src/abi.rs @@ -653,7 +653,7 @@ fn trans_aggregate<'tcx>(cx: &CodegenCx<'tcx>, span: Span, ty: TyAndLayout<'tcx> // spir-v doesn't support zero-sized arrays create_zst(cx, span, ty) } else { - let count_const = cx.constant_bit32(span, count as u32); + let count_const = cx.constant_u32(span, count as u32); let element_spv = cx.lookup_type(element_type); let stride_spv = element_spv .sizeof(cx) diff --git a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs index 63e41f5c52..c97a8a2ecb 100644 --- a/crates/rustc_codegen_spirv/src/builder/builder_methods.rs +++ b/crates/rustc_codegen_spirv/src/builder/builder_methods.rs @@ -175,7 +175,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { | MemorySemantics::SEQUENTIALLY_CONSISTENT } }; - let semantics = self.constant_bit32(self.span(), semantics.bits()); + let semantics = self.constant_u32(self.span(), semantics.bits()); if invalid_seq_cst { self.zombie( semantics.def(self), @@ -196,10 +196,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .constant_u16(self.span(), memset_fill_u16(fill_byte)) .def(self), 32 => self - .constant_bit32(self.span(), memset_fill_u32(fill_byte)) + .constant_u32(self.span(), memset_fill_u32(fill_byte)) .def(self), 64 => self - .constant_bit64(self.span(), memset_fill_u64(fill_byte)) + .constant_u64(self.span(), memset_fill_u64(fill_byte)) .def(self), _ => self.fatal(format!( "memset on integer width {width} not implemented yet" @@ -314,7 +314,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.store(pat, ptr, Align::from_bytes(0).unwrap()); } else { for index in 0..count { - let const_index = self.constant_bit32(self.span(), index as u32); + let const_index = self.constant_u32(self.span(), index as u32); let gep_ptr = self.gep(pat.ty, ptr, &[const_index]); self.store(pat, gep_ptr, Align::from_bytes(0).unwrap()); } @@ -431,7 +431,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } else { let indices = indices .into_iter() - .map(|idx| self.constant_bit32(self.span(), idx).def(self)) + .map(|idx| self.constant_u32(self.span(), idx).def(self)) .collect::>(); self.emit() .in_bounds_access_chain(leaf_ptr_ty, None, ptr.def(self), indices) @@ -614,7 +614,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { }; let indices = base_indices .into_iter() - .map(|idx| self.constant_bit32(self.span(), idx).def(self)) + .map(|idx| self.constant_u32(self.span(), idx).def(self)) .chain(indices) .collect(); return self.emit_access_chain( @@ -1478,7 +1478,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { let (ptr, access_ty) = self.adjust_pointer_for_typed_access(ptr, ty); // TODO: Default to device scope - let memory = self.constant_bit32(self.span(), Scope::Device as u32); + let memory = self.constant_u32(self.span(), Scope::Device as u32); let semantics = self.ordering_to_semantics_def(order); let result = self .emit() @@ -1611,7 +1611,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { let val = self.bitcast(val, access_ty); // TODO: Default to device scope - let memory = self.constant_bit32(self.span(), Scope::Device as u32); + let memory = self.constant_u32(self.span(), Scope::Device as u32); let semantics = self.ordering_to_semantics_def(order); self.validate_atomic(val.ty, ptr.def(self)); self.emit() @@ -1944,7 +1944,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { ) { let indices = indices .into_iter() - .map(|idx| self.constant_bit32(self.span(), idx).def(self)) + .map(|idx| self.constant_u32(self.span(), idx).def(self)) .collect::>(); self.emit() .in_bounds_access_chain(dest_ty, None, ptr.def(self), indices) @@ -2495,7 +2495,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { self.validate_atomic(access_ty, dst.def(self)); // TODO: Default to device scope - let memory = self.constant_bit32(self.span(), Scope::Device as u32); + let memory = self.constant_u32(self.span(), Scope::Device as u32); let semantics_equal = self.ordering_to_semantics_def(order); let semantics_unequal = self.ordering_to_semantics_def(failure_order); // Note: OpAtomicCompareExchangeWeak is deprecated, and has the same semantics @@ -2535,7 +2535,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { self.validate_atomic(access_ty, dst.def(self)); // TODO: Default to device scope let memory = self - .constant_bit32(self.span(), Scope::Device as u32) + .constant_u32(self.span(), Scope::Device as u32) .def(self); let semantics = self.ordering_to_semantics_def(order).def(self); use AtomicRmwBinOp::*; @@ -2631,7 +2631,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> { // Ignore sync scope (it only has "single thread" and "cross thread") // TODO: Default to device scope let memory = self - .constant_bit32(self.span(), Scope::Device as u32) + .constant_u32(self.span(), Scope::Device as u32) .def(self); let semantics = self.ordering_to_semantics_def(order).def(self); self.emit().memory_barrier(memory, semantics).unwrap(); diff --git a/crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs b/crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs index 128e3fba0b..1248da7f80 100644 --- a/crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs +++ b/crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs @@ -31,7 +31,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { constant_offset: u32, ) -> SpirvValue { let actual_index = if constant_offset != 0 { - let const_offset_val = self.constant_bit32(DUMMY_SP, constant_offset); + let const_offset_val = self.constant_u32(DUMMY_SP, constant_offset); self.add(dynamic_index, const_offset_val) } else { dynamic_index @@ -199,7 +199,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Note that the &[u32] gets split into two arguments - pointer, length let array = args[0]; let byte_index = args[2]; - let two = self.constant_bit32(DUMMY_SP, 2); + let two = self.constant_u32(DUMMY_SP, 2); let word_index = self.lshr(byte_index, two); self.recurse_load_type(result_type, result_type, array, word_index, 0) } @@ -223,7 +223,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { value: SpirvValue, ) -> Result<(), ErrorGuaranteed> { let actual_index = if constant_offset != 0 { - let const_offset_val = self.constant_bit32(DUMMY_SP, constant_offset); + let const_offset_val = self.constant_u32(DUMMY_SP, constant_offset); self.add(dynamic_index, const_offset_val) } else { dynamic_index @@ -367,7 +367,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Note that the &[u32] gets split into two arguments - pointer, length let array = args[0]; let byte_index = args[2]; - let two = self.constant_bit32(DUMMY_SP, 2); + let two = self.constant_u32(DUMMY_SP, 2); let word_index = self.lshr(byte_index, two); if is_pair { let value_one = args[3]; diff --git a/crates/rustc_codegen_spirv/src/builder/intrinsics.rs b/crates/rustc_codegen_spirv/src/builder/intrinsics.rs index 018aad1ac6..ad48100fa0 100644 --- a/crates/rustc_codegen_spirv/src/builder/intrinsics.rs +++ b/crates/rustc_codegen_spirv/src/builder/intrinsics.rs @@ -45,12 +45,12 @@ impl Builder<'_, '_> { let int_ty = SpirvType::Integer(width, false).def(self.span(), self); let (mask_sign, mask_value) = match width { 32 => ( - self.constant_bit32(self.span(), 1 << 31), - self.constant_bit32(self.span(), u32::MAX >> 1), + self.constant_u32(self.span(), 1 << 31), + self.constant_u32(self.span(), u32::MAX >> 1), ), 64 => ( - self.constant_bit64(self.span(), 1 << 63), - self.constant_bit64(self.span(), u64::MAX >> 1), + self.constant_u64(self.span(), 1 << 63), + self.constant_u64(self.span(), u64::MAX >> 1), ), _ => bug!("copysign must have width 32 or 64, not {}", width), }; @@ -272,10 +272,10 @@ impl<'a, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'tcx> { self.or(tmp1, tmp2) } 32 => { - let offset8 = self.constant_bit32(self.span(), 8); - let offset24 = self.constant_bit32(self.span(), 24); - let mask16 = self.constant_bit32(self.span(), 0xFF00); - let mask24 = self.constant_bit32(self.span(), 0xFF0000); + let offset8 = self.constant_u32(self.span(), 8); + let offset24 = self.constant_u32(self.span(), 24); + let mask16 = self.constant_u32(self.span(), 0xFF00); + let mask24 = self.constant_u32(self.span(), 0xFF0000); let tmp4 = self.shl(arg, offset24); let tmp3 = self.shl(arg, offset8); let tmp2 = self.lshr(arg, offset8); @@ -287,16 +287,16 @@ impl<'a, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'tcx> { self.or(res1, res2) } 64 => { - let offset8 = self.constant_bit64(self.span(), 8); - let offset24 = self.constant_bit64(self.span(), 24); - let offset40 = self.constant_bit64(self.span(), 40); - let offset56 = self.constant_bit64(self.span(), 56); - let mask16 = self.constant_bit64(self.span(), 0xff00); - let mask24 = self.constant_bit64(self.span(), 0xff0000); - let mask32 = self.constant_bit64(self.span(), 0xff000000); - let mask40 = self.constant_bit64(self.span(), 0xff00000000); - let mask48 = self.constant_bit64(self.span(), 0xff0000000000); - let mask56 = self.constant_bit64(self.span(), 0xff000000000000); + let offset8 = self.constant_u64(self.span(), 8); + let offset24 = self.constant_u64(self.span(), 24); + let offset40 = self.constant_u64(self.span(), 40); + let offset56 = self.constant_u64(self.span(), 56); + let mask16 = self.constant_u64(self.span(), 0xff00); + let mask24 = self.constant_u64(self.span(), 0xff0000); + let mask32 = self.constant_u64(self.span(), 0xff000000); + let mask40 = self.constant_u64(self.span(), 0xff00000000); + let mask48 = self.constant_u64(self.span(), 0xff0000000000); + let mask56 = self.constant_u64(self.span(), 0xff000000000000); let tmp8 = self.shl(arg, offset56); let tmp7 = self.shl(arg, offset40); let tmp6 = self.shl(arg, offset24); diff --git a/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs b/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs index 38e7c24e51..04b8d72f91 100644 --- a/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs +++ b/crates/rustc_codegen_spirv/src/codegen_cx/constant.rs @@ -40,12 +40,12 @@ impl<'tcx> CodegenCx<'tcx> { self.def_constant(ty, SpirvConst::U32(val as u32)) } - pub fn constant_bit32(&self, span: Span, val: u32) -> SpirvValue { + pub fn constant_u32(&self, span: Span, val: u32) -> SpirvValue { let ty = SpirvType::Integer(32, false).def(span, self); self.def_constant(ty, SpirvConst::U32(val)) } - pub fn constant_bit64(&self, span: Span, val: u64) -> SpirvValue { + pub fn constant_u64(&self, span: Span, val: u64) -> SpirvValue { let ty = SpirvType::Integer(64, false).def(span, self); self.def_constant(ty, SpirvConst::U64(val)) } @@ -171,10 +171,10 @@ impl<'tcx> ConstMethods<'tcx> for CodegenCx<'tcx> { self.constant_i32(DUMMY_SP, i) } fn const_u32(&self, i: u32) -> Self::Value { - self.constant_bit32(DUMMY_SP, i) + self.constant_u32(DUMMY_SP, i) } fn const_u64(&self, i: u64) -> Self::Value { - self.constant_bit64(DUMMY_SP, i) + self.constant_u64(DUMMY_SP, i) } fn const_u128(&self, i: u128) -> Self::Value { let ty = SpirvType::Integer(128, false).def(DUMMY_SP, self); diff --git a/crates/rustc_codegen_spirv/src/codegen_cx/entry.rs b/crates/rustc_codegen_spirv/src/codegen_cx/entry.rs index 0a71bd652c..865bcc7a77 100644 --- a/crates/rustc_codegen_spirv/src/codegen_cx/entry.rs +++ b/crates/rustc_codegen_spirv/src/codegen_cx/entry.rs @@ -529,7 +529,7 @@ impl<'tcx> CodegenCx<'tcx> { .def(hir_param.span, self); var_ptr_spirv_type = self.type_ptr_to(var_spirv_type); - let zero_u32 = self.constant_bit32(hir_param.span, 0).def_cx(self); + let zero_u32 = self.constant_u32(hir_param.span, 0).def_cx(self); let value_ptr_spirv_type = self.type_ptr_to(value_spirv_type); let value_ptr = bx .emit() diff --git a/crates/rustc_codegen_spirv/src/codegen_cx/type_.rs b/crates/rustc_codegen_spirv/src/codegen_cx/type_.rs index 2296d08f88..599c5ff9b7 100644 --- a/crates/rustc_codegen_spirv/src/codegen_cx/type_.rs +++ b/crates/rustc_codegen_spirv/src/codegen_cx/type_.rs @@ -164,7 +164,7 @@ impl<'tcx> BaseTypeMethods<'tcx> for CodegenCx<'tcx> { fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type { SpirvType::Array { element: ty, - count: self.constant_bit64(DUMMY_SP, len), + count: self.constant_u64(DUMMY_SP, len), } .def(DUMMY_SP, self) }