Skip to content

Commit

Permalink
update rspirv: undo mistaken constant_u{32, 64} rename in constant.rs
Browse files Browse the repository at this point in the history
  • Loading branch information
eddyb committed Nov 7, 2024
1 parent b1cbff4 commit 5ec1cc8
Show file tree
Hide file tree
Showing 7 changed files with 41 additions and 41 deletions.
2 changes: 1 addition & 1 deletion crates/rustc_codegen_spirv/src/abi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -653,7 +653,7 @@ fn trans_aggregate<'tcx>(cx: &CodegenCx<'tcx>, span: Span, ty: TyAndLayout<'tcx>
// spir-v doesn't support zero-sized arrays
create_zst(cx, span, ty)
} else {
let count_const = cx.constant_bit32(span, count as u32);
let count_const = cx.constant_u32(span, count as u32);
let element_spv = cx.lookup_type(element_type);
let stride_spv = element_spv
.sizeof(cx)
Expand Down
24 changes: 12 additions & 12 deletions crates/rustc_codegen_spirv/src/builder/builder_methods.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
| MemorySemantics::SEQUENTIALLY_CONSISTENT
}
};
let semantics = self.constant_bit32(self.span(), semantics.bits());
let semantics = self.constant_u32(self.span(), semantics.bits());
if invalid_seq_cst {
self.zombie(
semantics.def(self),
Expand All @@ -196,10 +196,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.constant_u16(self.span(), memset_fill_u16(fill_byte))
.def(self),
32 => self
.constant_bit32(self.span(), memset_fill_u32(fill_byte))
.constant_u32(self.span(), memset_fill_u32(fill_byte))
.def(self),
64 => self
.constant_bit64(self.span(), memset_fill_u64(fill_byte))
.constant_u64(self.span(), memset_fill_u64(fill_byte))
.def(self),
_ => self.fatal(format!(
"memset on integer width {width} not implemented yet"
Expand Down Expand Up @@ -314,7 +314,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.store(pat, ptr, Align::from_bytes(0).unwrap());
} else {
for index in 0..count {
let const_index = self.constant_bit32(self.span(), index as u32);
let const_index = self.constant_u32(self.span(), index as u32);
let gep_ptr = self.gep(pat.ty, ptr, &[const_index]);
self.store(pat, gep_ptr, Align::from_bytes(0).unwrap());
}
Expand Down Expand Up @@ -431,7 +431,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} else {
let indices = indices
.into_iter()
.map(|idx| self.constant_bit32(self.span(), idx).def(self))
.map(|idx| self.constant_u32(self.span(), idx).def(self))
.collect::<Vec<_>>();
self.emit()
.in_bounds_access_chain(leaf_ptr_ty, None, ptr.def(self), indices)
Expand Down Expand Up @@ -614,7 +614,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
};
let indices = base_indices
.into_iter()
.map(|idx| self.constant_bit32(self.span(), idx).def(self))
.map(|idx| self.constant_u32(self.span(), idx).def(self))
.chain(indices)
.collect();
return self.emit_access_chain(
Expand Down Expand Up @@ -1478,7 +1478,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
let (ptr, access_ty) = self.adjust_pointer_for_typed_access(ptr, ty);

// TODO: Default to device scope
let memory = self.constant_bit32(self.span(), Scope::Device as u32);
let memory = self.constant_u32(self.span(), Scope::Device as u32);
let semantics = self.ordering_to_semantics_def(order);
let result = self
.emit()
Expand Down Expand Up @@ -1611,7 +1611,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
let val = self.bitcast(val, access_ty);

// TODO: Default to device scope
let memory = self.constant_bit32(self.span(), Scope::Device as u32);
let memory = self.constant_u32(self.span(), Scope::Device as u32);
let semantics = self.ordering_to_semantics_def(order);
self.validate_atomic(val.ty, ptr.def(self));
self.emit()
Expand Down Expand Up @@ -1944,7 +1944,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
) {
let indices = indices
.into_iter()
.map(|idx| self.constant_bit32(self.span(), idx).def(self))
.map(|idx| self.constant_u32(self.span(), idx).def(self))
.collect::<Vec<_>>();
self.emit()
.in_bounds_access_chain(dest_ty, None, ptr.def(self), indices)
Expand Down Expand Up @@ -2495,7 +2495,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {

self.validate_atomic(access_ty, dst.def(self));
// TODO: Default to device scope
let memory = self.constant_bit32(self.span(), Scope::Device as u32);
let memory = self.constant_u32(self.span(), Scope::Device as u32);
let semantics_equal = self.ordering_to_semantics_def(order);
let semantics_unequal = self.ordering_to_semantics_def(failure_order);
// Note: OpAtomicCompareExchangeWeak is deprecated, and has the same semantics
Expand Down Expand Up @@ -2535,7 +2535,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
self.validate_atomic(access_ty, dst.def(self));
// TODO: Default to device scope
let memory = self
.constant_bit32(self.span(), Scope::Device as u32)
.constant_u32(self.span(), Scope::Device as u32)
.def(self);
let semantics = self.ordering_to_semantics_def(order).def(self);
use AtomicRmwBinOp::*;
Expand Down Expand Up @@ -2631,7 +2631,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
// Ignore sync scope (it only has "single thread" and "cross thread")
// TODO: Default to device scope
let memory = self
.constant_bit32(self.span(), Scope::Device as u32)
.constant_u32(self.span(), Scope::Device as u32)
.def(self);
let semantics = self.ordering_to_semantics_def(order).def(self);
self.emit().memory_barrier(memory, semantics).unwrap();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
constant_offset: u32,
) -> SpirvValue {
let actual_index = if constant_offset != 0 {
let const_offset_val = self.constant_bit32(DUMMY_SP, constant_offset);
let const_offset_val = self.constant_u32(DUMMY_SP, constant_offset);
self.add(dynamic_index, const_offset_val)
} else {
dynamic_index
Expand Down Expand Up @@ -199,7 +199,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Note that the &[u32] gets split into two arguments - pointer, length
let array = args[0];
let byte_index = args[2];
let two = self.constant_bit32(DUMMY_SP, 2);
let two = self.constant_u32(DUMMY_SP, 2);
let word_index = self.lshr(byte_index, two);
self.recurse_load_type(result_type, result_type, array, word_index, 0)
}
Expand All @@ -223,7 +223,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
value: SpirvValue,
) -> Result<(), ErrorGuaranteed> {
let actual_index = if constant_offset != 0 {
let const_offset_val = self.constant_bit32(DUMMY_SP, constant_offset);
let const_offset_val = self.constant_u32(DUMMY_SP, constant_offset);
self.add(dynamic_index, const_offset_val)
} else {
dynamic_index
Expand Down Expand Up @@ -367,7 +367,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Note that the &[u32] gets split into two arguments - pointer, length
let array = args[0];
let byte_index = args[2];
let two = self.constant_bit32(DUMMY_SP, 2);
let two = self.constant_u32(DUMMY_SP, 2);
let word_index = self.lshr(byte_index, two);
if is_pair {
let value_one = args[3];
Expand Down
36 changes: 18 additions & 18 deletions crates/rustc_codegen_spirv/src/builder/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@ impl Builder<'_, '_> {
let int_ty = SpirvType::Integer(width, false).def(self.span(), self);
let (mask_sign, mask_value) = match width {
32 => (
self.constant_bit32(self.span(), 1 << 31),
self.constant_bit32(self.span(), u32::MAX >> 1),
self.constant_u32(self.span(), 1 << 31),
self.constant_u32(self.span(), u32::MAX >> 1),
),
64 => (
self.constant_bit64(self.span(), 1 << 63),
self.constant_bit64(self.span(), u64::MAX >> 1),
self.constant_u64(self.span(), 1 << 63),
self.constant_u64(self.span(), u64::MAX >> 1),
),
_ => bug!("copysign must have width 32 or 64, not {}", width),
};
Expand Down Expand Up @@ -272,10 +272,10 @@ impl<'a, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'tcx> {
self.or(tmp1, tmp2)
}
32 => {
let offset8 = self.constant_bit32(self.span(), 8);
let offset24 = self.constant_bit32(self.span(), 24);
let mask16 = self.constant_bit32(self.span(), 0xFF00);
let mask24 = self.constant_bit32(self.span(), 0xFF0000);
let offset8 = self.constant_u32(self.span(), 8);
let offset24 = self.constant_u32(self.span(), 24);
let mask16 = self.constant_u32(self.span(), 0xFF00);
let mask24 = self.constant_u32(self.span(), 0xFF0000);
let tmp4 = self.shl(arg, offset24);
let tmp3 = self.shl(arg, offset8);
let tmp2 = self.lshr(arg, offset8);
Expand All @@ -287,16 +287,16 @@ impl<'a, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'tcx> {
self.or(res1, res2)
}
64 => {
let offset8 = self.constant_bit64(self.span(), 8);
let offset24 = self.constant_bit64(self.span(), 24);
let offset40 = self.constant_bit64(self.span(), 40);
let offset56 = self.constant_bit64(self.span(), 56);
let mask16 = self.constant_bit64(self.span(), 0xff00);
let mask24 = self.constant_bit64(self.span(), 0xff0000);
let mask32 = self.constant_bit64(self.span(), 0xff000000);
let mask40 = self.constant_bit64(self.span(), 0xff00000000);
let mask48 = self.constant_bit64(self.span(), 0xff0000000000);
let mask56 = self.constant_bit64(self.span(), 0xff000000000000);
let offset8 = self.constant_u64(self.span(), 8);
let offset24 = self.constant_u64(self.span(), 24);
let offset40 = self.constant_u64(self.span(), 40);
let offset56 = self.constant_u64(self.span(), 56);
let mask16 = self.constant_u64(self.span(), 0xff00);
let mask24 = self.constant_u64(self.span(), 0xff0000);
let mask32 = self.constant_u64(self.span(), 0xff000000);
let mask40 = self.constant_u64(self.span(), 0xff00000000);
let mask48 = self.constant_u64(self.span(), 0xff0000000000);
let mask56 = self.constant_u64(self.span(), 0xff000000000000);
let tmp8 = self.shl(arg, offset56);
let tmp7 = self.shl(arg, offset40);
let tmp6 = self.shl(arg, offset24);
Expand Down
8 changes: 4 additions & 4 deletions crates/rustc_codegen_spirv/src/codegen_cx/constant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ impl<'tcx> CodegenCx<'tcx> {
self.def_constant(ty, SpirvConst::U32(val as u32))
}

pub fn constant_bit32(&self, span: Span, val: u32) -> SpirvValue {
pub fn constant_u32(&self, span: Span, val: u32) -> SpirvValue {
let ty = SpirvType::Integer(32, false).def(span, self);
self.def_constant(ty, SpirvConst::U32(val))
}

pub fn constant_bit64(&self, span: Span, val: u64) -> SpirvValue {
pub fn constant_u64(&self, span: Span, val: u64) -> SpirvValue {
let ty = SpirvType::Integer(64, false).def(span, self);
self.def_constant(ty, SpirvConst::U64(val))
}
Expand Down Expand Up @@ -171,10 +171,10 @@ impl<'tcx> ConstMethods<'tcx> for CodegenCx<'tcx> {
self.constant_i32(DUMMY_SP, i)
}
fn const_u32(&self, i: u32) -> Self::Value {
self.constant_bit32(DUMMY_SP, i)
self.constant_u32(DUMMY_SP, i)
}
fn const_u64(&self, i: u64) -> Self::Value {
self.constant_bit64(DUMMY_SP, i)
self.constant_u64(DUMMY_SP, i)
}
fn const_u128(&self, i: u128) -> Self::Value {
let ty = SpirvType::Integer(128, false).def(DUMMY_SP, self);
Expand Down
2 changes: 1 addition & 1 deletion crates/rustc_codegen_spirv/src/codegen_cx/entry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,7 @@ impl<'tcx> CodegenCx<'tcx> {
.def(hir_param.span, self);
var_ptr_spirv_type = self.type_ptr_to(var_spirv_type);

let zero_u32 = self.constant_bit32(hir_param.span, 0).def_cx(self);
let zero_u32 = self.constant_u32(hir_param.span, 0).def_cx(self);
let value_ptr_spirv_type = self.type_ptr_to(value_spirv_type);
let value_ptr = bx
.emit()
Expand Down
2 changes: 1 addition & 1 deletion crates/rustc_codegen_spirv/src/codegen_cx/type_.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ impl<'tcx> BaseTypeMethods<'tcx> for CodegenCx<'tcx> {
fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type {
SpirvType::Array {
element: ty,
count: self.constant_bit64(DUMMY_SP, len),
count: self.constant_u64(DUMMY_SP, len),
}
.def(DUMMY_SP, self)
}
Expand Down

0 comments on commit 5ec1cc8

Please sign in to comment.