diff --git a/src/libcompiler_builtins/lib.rs b/src/libcompiler_builtins/lib.rs index fb42b915c7694..58aba11e4394f 100644 --- a/src/libcompiler_builtins/lib.rs +++ b/src/libcompiler_builtins/lib.rs @@ -34,8 +34,8 @@ pub mod reimpls { macro_rules! ashl { ($a:expr, $b:expr, $ty:ty) => {{ let (a, b) = ($a, $b); - let bits = (::core::mem::size_of::<$ty>() * 8) as $ty; - let half_bits = bits >> 1; + let bits = ::core::mem::size_of::<$ty>().wrapping_mul(8) as $ty; + let half_bits = bits.wrapping_shr(1); if b & half_bits != 0 { <$ty>::from_parts(0, a.low().wrapping_shl( b.wrapping_sub(half_bits) as u32)) @@ -58,8 +58,8 @@ pub mod reimpls { macro_rules! ashr { ($a: expr, $b: expr, $ty:ty) => {{ let (a, b) = ($a, $b); - let bits = (::core::mem::size_of::<$ty>() * 8) as $ty; - let half_bits = bits >> 1; + let bits = ::core::mem::size_of::<$ty>().wrapping_mul(8) as $ty; + let half_bits = bits.wrapping_shr(1); if b & half_bits != 0 { <$ty>::from_parts(a.high().wrapping_shr(b.wrapping_sub(half_bits) as u32) as <$ty as LargeInt>::LowHalf, @@ -83,8 +83,8 @@ pub mod reimpls { macro_rules! lshr { ($a: expr, $b: expr, $ty:ty) => {{ let (a, b) = ($a, $b); - let bits = (::core::mem::size_of::<$ty>() * 8) as $ty; - let half_bits = bits >> 1; + let bits = ::core::mem::size_of::<$ty>().wrapping_mul(8) as $ty; + let half_bits = bits.wrapping_shr(1); if b & half_bits != 0 { <$ty>::from_parts(a.high().wrapping_shr(b.wrapping_sub(half_bits) as u32), 0) } else if b == 0 { @@ -370,7 +370,7 @@ pub mod reimpls { macro_rules! mul { ($a:expr, $b:expr, $ty: ty, $tyh: ty) => {{ let (a, b) = ($a, $b); - let half_bits = ((::core::mem::size_of::<$tyh>() * 8) / 2) as u32; + let half_bits = ::core::mem::size_of::<$tyh>().wrapping_mul(4) as u32; let lower_mask = (!0u64).wrapping_shr(half_bits); let mut low = (a.low() & lower_mask).wrapping_mul(b.low() & lower_mask); let mut t = low.wrapping_shr(half_bits); @@ -478,7 +478,7 @@ pub mod reimpls { let mantissa_fraction = repr & <$fromty as FloatStuff>::MANTISSA_MASK; let mantissa = mantissa_fraction | <$fromty as FloatStuff>::MANTISSA_LEAD_BIT; if sign == -1.0 || exponent < 0 { return 0 as u128; } - if exponent > ::core::mem::size_of::<$outty>() as i32 * 8 { + if exponent > ::core::mem::size_of::<$outty>().wrapping_mul(8) as i32 { return !(0 as u128); } (if exponent < (<$fromty as FloatStuff>::MANTISSA_BITS) as i32 { @@ -503,7 +503,7 @@ pub mod reimpls { let mantissa = mantissa_fraction | <$fromty as FloatStuff>::MANTISSA_LEAD_BIT; if exponent < 0 { return 0 as i128; } - if exponent > ::core::mem::size_of::<$outty>() as i32 * 8 { + if exponent > ::core::mem::size_of::<$outty>().wrapping_mul(8) as i32 { let ret = if sign > 0.0 { <$outty>::max_value() } else { <$outty>::min_value() }; return ret } diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 12410c08f399b..f8d067e9696fd 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -1238,6 +1238,15 @@ extern "rust-intrinsic" { /// undefined behavior where y = 0 or x = `T::min_value()` and y = -1 pub fn unchecked_rem(x: T, y: T) -> T; + /// Performs an unchecked left shift, resulting in undefined behavior when + /// y < 0 or y >= N, where N is the width of T in bits. + #[cfg(not(stage0))] + pub fn unchecked_shl(x: T, y: T) -> T; + /// Performs an unchecked right shift, resulting in undefined behavior when + /// y < 0 or y >= N, where N is the width of T in bits. + #[cfg(not(stage0))] + pub fn unchecked_shr(x: T, y: T) -> T; + /// Returns (a + b) mod 2^N, where N is the width of T in bits. /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `wrapping_add` method. For example, diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 13fa099c8eb01..c0aa88cd3920f 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -177,7 +177,7 @@ macro_rules! checked_op { // `Int` + `SignedInt` implemented for signed integers macro_rules! int_impl { - ($ActualT:ident, $UnsignedT:ty, $BITS:expr, + ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr, $add_with_overflow:path, $sub_with_overflow:path, $mul_with_overflow:path) => { @@ -850,6 +850,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] + #[cfg(not(stage0))] + pub fn wrapping_shl(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + + /// Stage 0 + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[cfg(stage0)] pub fn wrapping_shl(self, rhs: u32) -> Self { self.overflowing_shl(rhs).0 } @@ -875,6 +885,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] + #[cfg(not(stage0))] + pub fn wrapping_shr(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + + /// Stage 0 + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[cfg(stage0)] pub fn wrapping_shr(self, rhs: u32) -> Self { self.overflowing_shr(rhs).0 } @@ -1089,6 +1109,15 @@ macro_rules! int_impl { /// ``` #[inline] #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(not(stage0))] + pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + } + + /// Stage 0 + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(stage0)] pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { (self << (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } @@ -1111,6 +1140,15 @@ macro_rules! int_impl { /// ``` #[inline] #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(not(stage0))] + pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + } + + /// Stage 0 + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(stage0)] pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { (self >> (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } @@ -1268,7 +1306,7 @@ macro_rules! int_impl { #[lang = "i8"] impl i8 { - int_impl! { i8, u8, 8, + int_impl! { i8, i8, u8, 8, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1276,7 +1314,7 @@ impl i8 { #[lang = "i16"] impl i16 { - int_impl! { i16, u16, 16, + int_impl! { i16, i16, u16, 16, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1284,7 +1322,7 @@ impl i16 { #[lang = "i32"] impl i32 { - int_impl! { i32, u32, 32, + int_impl! { i32, i32, u32, 32, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1292,7 +1330,7 @@ impl i32 { #[lang = "i64"] impl i64 { - int_impl! { i64, u64, 64, + int_impl! { i64, i64, u64, 64, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1300,7 +1338,7 @@ impl i64 { #[lang = "i128"] impl i128 { - int_impl! { i128, u128, 128, + int_impl! { i128, i128, u128, 128, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1309,7 +1347,7 @@ impl i128 { #[cfg(target_pointer_width = "16")] #[lang = "isize"] impl isize { - int_impl! { i16, u16, 16, + int_impl! { isize, i16, u16, 16, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1318,7 +1356,7 @@ impl isize { #[cfg(target_pointer_width = "32")] #[lang = "isize"] impl isize { - int_impl! { i32, u32, 32, + int_impl! { isize, i32, u32, 32, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1327,7 +1365,7 @@ impl isize { #[cfg(target_pointer_width = "64")] #[lang = "isize"] impl isize { - int_impl! { i64, u64, 64, + int_impl! { isize, i64, u64, 64, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1335,7 +1373,7 @@ impl isize { // `Int` + `UnsignedInt` implemented for unsigned integers macro_rules! uint_impl { - ($ActualT:ty, $BITS:expr, + ($SelfT:ty, $ActualT:ty, $BITS:expr, $ctpop:path, $ctlz:path, $cttz:path, @@ -1978,6 +2016,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] + #[cfg(not(stage0))] + pub fn wrapping_shl(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + + /// Stage 0 + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[cfg(stage0)] pub fn wrapping_shl(self, rhs: u32) -> Self { self.overflowing_shl(rhs).0 } @@ -2003,6 +2051,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] + #[cfg(not(stage0))] + pub fn wrapping_shr(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + + /// Stage 0 + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[cfg(stage0)] pub fn wrapping_shr(self, rhs: u32) -> Self { self.overflowing_shr(rhs).0 } @@ -2170,6 +2228,15 @@ macro_rules! uint_impl { /// ``` #[inline] #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(not(stage0))] + pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + } + + /// Stage 0 + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(stage0)] pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { (self << (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } @@ -2192,6 +2259,16 @@ macro_rules! uint_impl { /// ``` #[inline] #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(not(stage0))] + pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + + } + + /// Stage 0 + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(stage0)] pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { (self >> (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } @@ -2292,7 +2369,7 @@ macro_rules! uint_impl { #[lang = "u8"] impl u8 { - uint_impl! { u8, 8, + uint_impl! { u8, u8, 8, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2304,7 +2381,7 @@ impl u8 { #[lang = "u16"] impl u16 { - uint_impl! { u16, 16, + uint_impl! { u16, u16, 16, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2316,7 +2393,7 @@ impl u16 { #[lang = "u32"] impl u32 { - uint_impl! { u32, 32, + uint_impl! { u32, u32, 32, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2328,7 +2405,7 @@ impl u32 { #[lang = "u64"] impl u64 { - uint_impl! { u64, 64, + uint_impl! { u64, u64, 64, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2340,7 +2417,7 @@ impl u64 { #[lang = "u128"] impl u128 { - uint_impl! { u128, 128, + uint_impl! { u128, u128, 128, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2353,7 +2430,7 @@ impl u128 { #[cfg(target_pointer_width = "16")] #[lang = "usize"] impl usize { - uint_impl! { u16, 16, + uint_impl! { usize, u16, 16, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2365,7 +2442,7 @@ impl usize { #[cfg(target_pointer_width = "32")] #[lang = "usize"] impl usize { - uint_impl! { u32, 32, + uint_impl! { usize, u32, 32, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2378,7 +2455,7 @@ impl usize { #[cfg(target_pointer_width = "64")] #[lang = "usize"] impl usize { - uint_impl! { u64, 64, + uint_impl! { usize, u64, 64, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index b7aedb742db02..762bf8592ffcc 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -261,7 +261,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "ctlz" | "cttz" | "ctpop" | "bswap" | "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | - "unchecked_div" | "unchecked_rem" => { + "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => { let sty = &arg_tys[0].sty; match int_type_width_signed(sty, ccx) { Some((width, signed)) => @@ -311,6 +311,13 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } else { bcx.urem(llargs[0], llargs[1]) }, + "unchecked_shl" => bcx.shl(llargs[0], llargs[1]), + "unchecked_shr" => + if signed { + bcx.ashr(llargs[0], llargs[1]) + } else { + bcx.lshr(llargs[0], llargs[1]) + }, _ => bug!(), }, None => { diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index 28996b40cfdfe..2861fd288326b 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -273,6 +273,8 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "unchecked_div" | "unchecked_rem" => (1, vec![param(0), param(0)], param(0)), + "unchecked_shl" | "unchecked_shr" => + (1, vec![param(0), param(0)], param(0)), "overflowing_add" | "overflowing_sub" | "overflowing_mul" => (1, vec![param(0), param(0)], param(0)),