Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Saturating casts between integers and floats #45205

Merged
merged 7 commits into from
Nov 8, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions src/librustc/session/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1107,6 +1107,9 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"control whether #[inline] functions are in all cgus"),
tls_model: Option<String> = (None, parse_opt_string, [TRACKED],
"choose the TLS model to use (rustc --print tls-models for details)"),
saturating_float_casts: bool = (false, parse_bool, [TRACKED],
"make casts between integers and floats safe: clip out-of-range inputs to the min/max \
integer or to infinity respectively, and turn `NAN` into 0 when casting to integers"),
}

pub fn default_lib_output() -> CrateType {
Expand Down
4 changes: 2 additions & 2 deletions src/librustc_apfloat/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ impl Status {
}

impl<T> StatusAnd<T> {
fn map<F: FnOnce(T) -> U, U>(self, f: F) -> StatusAnd<U> {
pub fn map<F: FnOnce(T) -> U, U>(self, f: F) -> StatusAnd<U> {
StatusAnd {
status: self.status,
value: f(self.value),
Expand Down Expand Up @@ -378,7 +378,7 @@ pub trait Float
fn from_bits(input: u128) -> Self;
fn from_i128_r(input: i128, round: Round) -> StatusAnd<Self> {
if input < 0 {
Self::from_u128_r(-input as u128, -round).map(|r| -r)
Self::from_u128_r(input.wrapping_neg() as u128, -round).map(|r| -r)
} else {
Self::from_u128_r(input as u128, round)
}
Expand Down
8 changes: 8 additions & 0 deletions src/librustc_const_math/float.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,3 +203,11 @@ impl ::std::ops::Neg for ConstFloat {
ConstFloat { bits, ty: self.ty }
}
}

/// This is `f32::MAX + (0.5 ULP)` as an integer. Numbers greater or equal to this
/// are rounded to infinity when converted to `f32`.
///
/// NB: Computed as maximum significand with an extra 1 bit added (for the half ULP)
/// shifted by the maximum exponent (accounting for normalization).
pub const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
<< (Single::MAX_EXP - Single::PRECISION as i16);
1 change: 1 addition & 0 deletions src/librustc_trans/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ owning_ref = "0.3.3"
rustc-demangle = "0.1.4"
rustc = { path = "../librustc" }
rustc_allocator = { path = "../librustc_allocator" }
rustc_apfloat = { path = "../librustc_apfloat" }
rustc_back = { path = "../librustc_back" }
rustc_const_math = { path = "../librustc_const_math" }
rustc_data_structures = { path = "../librustc_data_structures" }
Expand Down
2 changes: 2 additions & 0 deletions src/librustc_trans/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#![feature(custom_attribute)]
#![allow(unused_attributes)]
#![feature(i128_type)]
#![feature(i128)]
#![feature(libc)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
Expand All @@ -43,6 +44,7 @@ extern crate libc;
extern crate owning_ref;
#[macro_use] extern crate rustc;
extern crate rustc_allocator;
extern crate rustc_apfloat;
extern crate rustc_back;
extern crate rustc_data_structures;
extern crate rustc_incremental;
Expand Down
73 changes: 65 additions & 8 deletions src/librustc_trans/mir/constant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
use llvm::{self, ValueRef};
use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind};
use rustc_const_math::ConstInt::*;
use rustc_const_math::{ConstInt, ConstMathErr};
use rustc_const_math::{ConstInt, ConstMathErr, MAX_F32_PLUS_HALF_ULP};
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use rustc::traits;
Expand All @@ -21,6 +21,7 @@ use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::subst::{Kind, Substs, Subst};
use rustc_apfloat::{ieee, Float, Status};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use {adt, base, machine};
use abi::{self, Abi};
Expand Down Expand Up @@ -689,20 +690,18 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
llvm::LLVMConstIntCast(llval, ll_t_out.to_ref(), s)
}
(CastTy::Int(_), CastTy::Float) => {
if signed {
llvm::LLVMConstSIToFP(llval, ll_t_out.to_ref())
} else {
llvm::LLVMConstUIToFP(llval, ll_t_out.to_ref())
}
cast_const_int_to_float(self.ccx, llval, signed, ll_t_out)
}
(CastTy::Float, CastTy::Float) => {
llvm::LLVMConstFPCast(llval, ll_t_out.to_ref())
}
(CastTy::Float, CastTy::Int(IntTy::I)) => {
llvm::LLVMConstFPToSI(llval, ll_t_out.to_ref())
cast_const_float_to_int(self.ccx, &operand,
true, ll_t_out, span)
}
(CastTy::Float, CastTy::Int(_)) => {
llvm::LLVMConstFPToUI(llval, ll_t_out.to_ref())
cast_const_float_to_int(self.ccx, &operand,
false, ll_t_out, span)
}
(CastTy::Ptr(_), CastTy::Ptr(_)) |
(CastTy::FnPtr, CastTy::Ptr(_)) |
Expand Down Expand Up @@ -955,6 +954,64 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}

unsafe fn cast_const_float_to_int(ccx: &CrateContext,
operand: &Const,
signed: bool,
int_ty: Type,
span: Span) -> ValueRef {
let llval = operand.llval;
let float_bits = match operand.ty.sty {
ty::TyFloat(fty) => fty.bit_width(),
_ => bug!("cast_const_float_to_int: operand not a float"),
};
// Note: this breaks if llval is a complex constant expression rather than a simple constant.
// One way that might happen would be if addresses could be turned into integers in constant
// expressions, but that doesn't appear to be possible?
// In any case, an ICE is better than producing undef.
let llval_bits = consts::bitcast(llval, Type::ix(ccx, float_bits as u64));
let bits = const_to_opt_u128(llval_bits, false).unwrap_or_else(|| {
panic!("could not get bits of constant float {:?}",
Value(llval));
});
let int_width = int_ty.int_width() as usize;
// Try to convert, but report an error for overflow and NaN. This matches HIR const eval.
let cast_result = match float_bits {
32 if signed => ieee::Single::from_bits(bits).to_i128(int_width).map(|v| v as u128),
64 if signed => ieee::Double::from_bits(bits).to_i128(int_width).map(|v| v as u128),
32 => ieee::Single::from_bits(bits).to_u128(int_width),
64 => ieee::Double::from_bits(bits).to_u128(int_width),
n => bug!("unsupported float width {}", n),
};
if cast_result.status.contains(Status::INVALID_OP) {
let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast };
err.report(ccx.tcx(), span, "expression");
}
C_big_integral(int_ty, cast_result.value)
}

unsafe fn cast_const_int_to_float(ccx: &CrateContext,
llval: ValueRef,
signed: bool,
float_ty: Type) -> ValueRef {
// Note: this breaks if llval is a complex constant expression rather than a simple constant.
// One way that might happen would be if addresses could be turned into integers in constant
// expressions, but that doesn't appear to be possible?
// In any case, an ICE is better than producing undef.
let value = const_to_opt_u128(llval, signed).unwrap_or_else(|| {
panic!("could not get z128 value of constant integer {:?}",
Value(llval));
});
if signed {
llvm::LLVMConstSIToFP(llval, float_ty.to_ref())
} else if float_ty.float_width() == 32 && value >= MAX_F32_PLUS_HALF_ULP {
// We're casting to f32 and the value is > f32::MAX + 0.5 ULP -> round up to infinity.
let infinity_bits = C_u32(ccx, ieee::Single::INFINITY.to_bits() as u32);
consts::bitcast(infinity_bits, float_ty)
} else {
llvm::LLVMConstUIToFP(llval, float_ty.to_ref())
}
}

impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_constant(&mut self,
bcx: &Builder<'a, 'tcx>,
Expand Down
169 changes: 163 additions & 6 deletions src/librustc_trans/mir/rvalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,15 @@ use rustc::ty::layout::{Layout, LayoutTyper};
use rustc::mir::tcx::LvalueTy;
use rustc::mir;
use rustc::middle::lang_items::ExchangeMallocFnLangItem;
use rustc_apfloat::{ieee, Float, Status, Round};
use rustc_const_math::MAX_F32_PLUS_HALF_ULP;
use std::{u128, i128};

use base;
use builder::Builder;
use callee;
use common::{self, val_ty, C_bool, C_i32, C_null, C_usize, C_uint};
use common::{self, val_ty, C_bool, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral};
use consts;
use adt;
use machine;
use monomorphize;
Expand Down Expand Up @@ -333,14 +337,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx.ptrtoint(llval, ll_t_out),
(CastTy::Int(_), CastTy::Ptr(_)) =>
bcx.inttoptr(llval, ll_t_out),
(CastTy::Int(_), CastTy::Float) if signed =>
bcx.sitofp(llval, ll_t_out),
(CastTy::Int(_), CastTy::Float) =>
bcx.uitofp(llval, ll_t_out),
cast_int_to_float(&bcx, signed, llval, ll_t_in, ll_t_out),
(CastTy::Float, CastTy::Int(IntTy::I)) =>
bcx.fptosi(llval, ll_t_out),
cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out),
(CastTy::Float, CastTy::Int(_)) =>
bcx.fptoui(llval, ll_t_out),
cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out),
_ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
};
OperandValue::Immediate(newval)
Expand Down Expand Up @@ -815,3 +817,158 @@ fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {

bcx.ccx.get_intrinsic(&name)
}

fn cast_int_to_float(bcx: &Builder,
signed: bool,
x: ValueRef,
int_ty: Type,
float_ty: Type) -> ValueRef {
// Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
// It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
// LLVM's uitofp produces undef in those cases, so we manually check for that case.
let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32;
if is_u128_to_f32 && bcx.sess().opts.debugging_opts.saturating_float_casts {
// All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
// and for everything else LLVM's uitofp works just fine.
let max = C_big_integral(int_ty, MAX_F32_PLUS_HALF_ULP);
let overflow = bcx.icmp(llvm::IntUGE, x, max);
let infinity_bits = C_u32(bcx.ccx, ieee::Single::INFINITY.to_bits() as u32);
let infinity = consts::bitcast(infinity_bits, float_ty);
bcx.select(overflow, infinity, bcx.uitofp(x, float_ty))
} else {
if signed {
bcx.sitofp(x, float_ty)
} else {
bcx.uitofp(x, float_ty)
}
}
}

fn cast_float_to_int(bcx: &Builder,
signed: bool,
x: ValueRef,
float_ty: Type,
int_ty: Type) -> ValueRef {
let fptosui_result = if signed {
bcx.fptosi(x, int_ty)
} else {
bcx.fptoui(x, int_ty)
};

if !bcx.sess().opts.debugging_opts.saturating_float_casts {
return fptosui_result;
}
// LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
// destination integer type after rounding towards zero. This `undef` value can cause UB in
// safe code (see issue #10184), so we implement a saturating conversion on top of it:
// Semantically, the mathematical value of the input is rounded towards zero to the next
// mathematical integer, and then the result is clamped into the range of the destination
// integer type. Positive and negative infinity are mapped to the maximum and minimum value of
// the destination integer type. NaN is mapped to 0.
//
// Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
// a value representable in int_ty.
// They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
// Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
// int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
// representable. Note that this only works if float_ty's exponent range is sufficently large.
// f16 or 256 bit integers would break this property. Right now the smallest float type is f32
// with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
// On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
// we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
// This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
fn compute_clamp_bounds<F: Float>(signed: bool, int_ty: Type) -> (u128, u128) {
let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
assert_eq!(rounded_min.status, Status::OK);
let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
assert!(rounded_max.value.is_finite());
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
}
fn int_max(signed: bool, int_ty: Type) -> u128 {
let shift_amount = 128 - int_ty.int_width();
if signed {
i128::MAX as u128 >> shift_amount
} else {
u128::MAX >> shift_amount
}
}
fn int_min(signed: bool, int_ty: Type) -> i128 {
if signed {
i128::MIN >> (128 - int_ty.int_width())
} else {
0
}
}
let float_bits_to_llval = |bits| {
let bits_llval = match float_ty.float_width() {
32 => C_u32(bcx.ccx, bits as u32),
64 => C_u64(bcx.ccx, bits as u64),
n => bug!("unsupported float width {}", n),
};
consts::bitcast(bits_llval, float_ty)
};
let (f_min, f_max) = match float_ty.float_width() {
32 => compute_clamp_bounds::<ieee::Single>(signed, int_ty),
64 => compute_clamp_bounds::<ieee::Double>(signed, int_ty),
n => bug!("unsupported float width {}", n),
};
let f_min = float_bits_to_llval(f_min);
let f_max = float_bits_to_llval(f_max);
// To implement saturation, we perform the following steps:
//
// 1. Cast x to an integer with fpto[su]i. This may result in undef.
// 2. Compare x to f_min and f_max, and use the comparison results to select:
// a) int_ty::MIN if x < f_min or x is NaN
// b) int_ty::MAX if x > f_max
// c) the result of fpto[su]i otherwise
// 3. If x is NaN, return 0.0, otherwise return the result of step 2.
//
// This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
// destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
// undef does not introduce any non-determinism either.
// More importantly, the above procedure correctly implements saturating conversion.
// Proof (sketch):
// If x is NaN, 0 is returned by definition.
// Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
// This yields three cases to consider:
// (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
// saturating conversion for inputs in that range.
// (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
// (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
// than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
// is correct.
// (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
// int_ty::MIN and therefore the return value of int_ty::MIN is correct.
// QED.

// Step 1 was already performed above.

// Step 2: We use two comparisons and two selects, with %s1 being the result:
// %less_or_nan = fcmp ult %x, %f_min
// %greater = fcmp olt %x, %f_max
// %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
// %s1 = select %greater, int_ty::MAX, %s0
// Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
// operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
// becomes int_ty::MIN if x is NaN.
// Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
// negation, and the negation can be merged into the select. Therefore, it not necessarily any
// more expensive than a ordered ("normal") comparison. Whether these optimizations will be
// performed is ultimately up to the backend, but at least x86 does perform them.
let less_or_nan = bcx.fcmp(llvm::RealULT, x, f_min);
let greater = bcx.fcmp(llvm::RealOGT, x, f_max);
let int_max = C_big_integral(int_ty, int_max(signed, int_ty));
let int_min = C_big_integral(int_ty, int_min(signed, int_ty) as u128);
let s0 = bcx.select(less_or_nan, int_min, fptosui_result);
let s1 = bcx.select(greater, int_max, s0);

// Step 3: NaN replacement.
// For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
// Therefore we only need to execute this step for signed integer types.
if signed {
// LLVM has no isNaN predicate, so we use (x == x) instead
bcx.select(bcx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0))
} else {
s1
}
}
Loading