Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Miri: convert to/from apfloat instead of host floats #61673

Merged
merged 7 commits into from
Jun 11, 2019
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 49 additions & 10 deletions src/librustc/mir/interpret/value.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use std::fmt;
use rustc_macros::HashStable;
use rustc_apfloat::{Float, ieee::{Double, Single}};

use crate::ty::{Ty, InferConst, ParamConst, layout::{HasDataLayout, Size}, subst::SubstsRef};
use crate::ty::PlaceholderConst;
Expand Down Expand Up @@ -131,6 +132,20 @@ impl<Tag> fmt::Display for Scalar<Tag> {
}
}

impl<Tag> From<Single> for Scalar<Tag> {
#[inline(always)]
fn from(f: Single) -> Self {
Scalar::from_f32(f)
}
}

impl<Tag> From<Double> for Scalar<Tag> {
#[inline(always)]
fn from(f: Double) -> Self {
Scalar::from_f64(f)
}
}

impl<'tcx> Scalar<()> {
#[inline(always)]
fn check_data(data: u128, size: u8) {
Expand Down Expand Up @@ -279,6 +294,26 @@ impl<'tcx, Tag> Scalar<Tag> {
Scalar::Raw { data: i, size: size.bytes() as u8 }
}

#[inline]
pub fn from_u8(i: u8) -> Self {
Scalar::Raw { data: i as u128, size: 1 }
}

#[inline]
pub fn from_u16(i: u16) -> Self {
Scalar::Raw { data: i as u128, size: 2 }
}

#[inline]
pub fn from_u32(i: u32) -> Self {
Scalar::Raw { data: i as u128, size: 4 }
}

#[inline]
pub fn from_u64(i: u64) -> Self {
Scalar::Raw { data: i as u128, size: 8 }
}

#[inline]
pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
let i = i.into();
Expand All @@ -292,13 +327,15 @@ impl<'tcx, Tag> Scalar<Tag> {
}

#[inline]
pub fn from_f32(f: f32) -> Self {
Scalar::Raw { data: f.to_bits() as u128, size: 4 }
pub fn from_f32(f: Single) -> Self {
// We trust apfloat to give us properly truncated data
Scalar::Raw { data: f.to_bits(), size: 4 }
}

#[inline]
pub fn from_f64(f: f64) -> Self {
Scalar::Raw { data: f.to_bits() as u128, size: 8 }
pub fn from_f64(f: Double) -> Self {
// We trust apfloat to give us properly truncated data
Scalar::Raw { data: f.to_bits(), size: 8 }
}

#[inline]
Expand Down Expand Up @@ -427,13 +464,15 @@ impl<'tcx, Tag> Scalar<Tag> {
}

#[inline]
pub fn to_f32(self) -> InterpResult<'static, f32> {
Ok(f32::from_bits(self.to_u32()?))
pub fn to_f32(self) -> InterpResult<'static, Single> {
// Going through `u32` to check size and truncation.
Ok(Single::from_bits(self.to_u32()? as u128))
}

#[inline]
pub fn to_f64(self) -> InterpResult<'static, f64> {
Ok(f64::from_bits(self.to_u64()?))
pub fn to_f64(self) -> InterpResult<'static, Double> {
// Going through `u64` to check size and truncation.
Ok(Double::from_bits(self.to_u64()? as u128))
}
}

Expand Down Expand Up @@ -517,12 +556,12 @@ impl<'tcx, Tag> ScalarMaybeUndef<Tag> {
}

#[inline(always)]
pub fn to_f32(self) -> InterpResult<'tcx, f32> {
pub fn to_f32(self) -> InterpResult<'tcx, Single> {
self.not_undef()?.to_f32()
}

#[inline(always)]
pub fn to_f64(self) -> InterpResult<'tcx, f64> {
pub fn to_f64(self) -> InterpResult<'tcx, Double> {
self.not_undef()?.to_f64()
}

Expand Down
11 changes: 5 additions & 6 deletions src/librustc_mir/hair/constant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,7 @@ fn parse_float<'tcx>(
) -> Result<ConstValue<'tcx>, ()> {
let num = num.as_str();
use rustc_apfloat::ieee::{Single, Double};
use rustc_apfloat::Float;
let (data, size) = match fty {
let scalar = match fty {
ast::FloatTy::F32 => {
num.parse::<f32>().map_err(|_| ())?;
let mut f = num.parse::<Single>().unwrap_or_else(|e| {
Expand All @@ -79,19 +78,19 @@ fn parse_float<'tcx>(
if neg {
f = -f;
}
(f.to_bits(), 4)
Scalar::from_f32(f)
}
ast::FloatTy::F64 => {
num.parse::<f64>().map_err(|_| ())?;
let mut f = num.parse::<Double>().unwrap_or_else(|e| {
panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e)
panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e)
});
if neg {
f = -f;
}
(f.to_bits(), 8)
Scalar::from_f64(f)
}
};

Ok(ConstValue::Scalar(Scalar::from_uint(data, Size::from_bytes(size))))
Ok(ConstValue::Scalar(scalar))
}
42 changes: 16 additions & 26 deletions src/librustc_mir/interpret/cast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,21 +166,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
Ok(Scalar::from_uint(v, dest_layout.size))
}

Float(FloatTy::F32) if signed => Ok(Scalar::from_uint(
Single::from_i128(v as i128).value.to_bits(),
Size::from_bits(32)
Float(FloatTy::F32) if signed => Ok(Scalar::from_f32(
Single::from_i128(v as i128).value
)),
Float(FloatTy::F64) if signed => Ok(Scalar::from_uint(
Double::from_i128(v as i128).value.to_bits(),
Size::from_bits(64)
Float(FloatTy::F64) if signed => Ok(Scalar::from_f64(
Double::from_i128(v as i128).value
)),
Float(FloatTy::F32) => Ok(Scalar::from_uint(
Single::from_u128(v).value.to_bits(),
Size::from_bits(32)
Float(FloatTy::F32) => Ok(Scalar::from_f32(
Single::from_u128(v).value
)),
Float(FloatTy::F64) => Ok(Scalar::from_uint(
Double::from_u128(v).value.to_bits(),
Size::from_bits(64)
Float(FloatTy::F64) => Ok(Scalar::from_f64(
Double::from_u128(v).value
)),

Char => {
Expand Down Expand Up @@ -223,22 +219,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
Ok(Scalar::from_int(v, Size::from_bits(width as u64)))
},
// f64 -> f32
Float(FloatTy::F32) if fty == FloatTy::F64 => {
Ok(Scalar::from_uint(
Single::to_bits(Double::from_bits(bits).convert(&mut false).value),
Size::from_bits(32),
))
},
Float(FloatTy::F32) if fty == FloatTy::F64 =>
Ok(Scalar::from_f32(Double::from_bits(bits).convert(&mut false).value)),
// f32 -> f64
Float(FloatTy::F64) if fty == FloatTy::F32 => {
Ok(Scalar::from_uint(
Double::to_bits(Single::from_bits(bits).convert(&mut false).value),
Size::from_bits(64),
))
},
Float(FloatTy::F64) if fty == FloatTy::F32 =>
Ok(Scalar::from_f64(Single::from_bits(bits).convert(&mut false).value)),
// identity cast
Float(FloatTy:: F64) => Ok(Scalar::from_uint(bits, Size::from_bits(64))),
Float(FloatTy:: F32) => Ok(Scalar::from_uint(bits, Size::from_bits(32))),
Float(FloatTy::F64) if fty == FloatTy::F64 =>
Ok(Scalar::from_uint(bits, Size::from_bits(64))),
Float(FloatTy::F32) if fty == FloatTy::F32 =>
Ok(Scalar::from_uint(bits, Size::from_bits(32))),
_ => err!(Unimplemented(format!("float to {:?} cast", dest_ty))),
}
}
Expand Down
91 changes: 39 additions & 52 deletions src/librustc_mir/interpret/operator.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use rustc::mir;
use rustc::ty::{self, layout::{Size, TyLayout}};
use rustc::ty::{self, layout::TyLayout};
use syntax::ast::FloatTy;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
use rustc::mir::interpret::{InterpResult, Scalar};

Expand Down Expand Up @@ -43,7 +42,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
bin_op: mir::BinOp,
l: char,
r: char,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
) -> (Scalar<M::PointerTag>, bool) {
use rustc::mir::BinOp::*;

let res = match bin_op {
Expand All @@ -55,15 +54,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
Ge => l >= r,
_ => bug!("Invalid operation on char: {:?}", bin_op),
};
return Ok((Scalar::from_bool(res), false));
return (Scalar::from_bool(res), false);
}

fn binary_bool_op(
&self,
bin_op: mir::BinOp,
l: bool,
r: bool,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
) -> (Scalar<M::PointerTag>, bool) {
use rustc::mir::BinOp::*;

let res = match bin_op {
Expand All @@ -78,46 +77,32 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
BitXor => l ^ r,
_ => bug!("Invalid operation on bool: {:?}", bin_op),
};
return Ok((Scalar::from_bool(res), false));
return (Scalar::from_bool(res), false);
}

fn binary_float_op(
fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
&self,
bin_op: mir::BinOp,
fty: FloatTy,
// passing in raw bits
l: u128,
r: u128,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
l: F,
r: F,
) -> (Scalar<M::PointerTag>, bool) {
use rustc::mir::BinOp::*;

macro_rules! float_math {
($ty:path, $size:expr) => {{
let l = <$ty>::from_bits(l);
let r = <$ty>::from_bits(r);
let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>|
Scalar::from_uint(res.value.to_bits(), Size::from_bytes($size));
let val = match bin_op {
Eq => Scalar::from_bool(l == r),
Ne => Scalar::from_bool(l != r),
Lt => Scalar::from_bool(l < r),
Le => Scalar::from_bool(l <= r),
Gt => Scalar::from_bool(l > r),
Ge => Scalar::from_bool(l >= r),
Add => bitify(l + r),
Sub => bitify(l - r),
Mul => bitify(l * r),
Div => bitify(l / r),
Rem => bitify(l % r),
_ => bug!("invalid float op: `{:?}`", bin_op),
};
return Ok((val, false));
}};
}
match fty {
FloatTy::F32 => float_math!(Single, 4),
FloatTy::F64 => float_math!(Double, 8),
}
let val = match bin_op {
Eq => Scalar::from_bool(l == r),
Ne => Scalar::from_bool(l != r),
Lt => Scalar::from_bool(l < r),
Le => Scalar::from_bool(l <= r),
Gt => Scalar::from_bool(l > r),
Ge => Scalar::from_bool(l >= r),
Add => (l + r).value.into(),
Sub => (l - r).value.into(),
Mul => (l * r).value.into(),
Div => (l / r).value.into(),
Rem => (l % r).value.into(),
_ => bug!("invalid float op: `{:?}`", bin_op),
};
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Much nicer!

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I love this. :) If only we had a similar trait for integers.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

All integer operations can be implemented with a runtime bitwidth n and an u128 to hold the value, though (maybe i128 for signed).

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Like, LLVM also has an APInt, not just APFloat, and APFloat uses APInt for the significand, but I didn't port APInt as its own thing, just added a bunch of functions, because of how relatively simple it is:

/// Implementation details of IeeeFloat significands, such as big integer arithmetic.
/// As a rule of thumb, no functions in this module should dynamically allocate.
mod sig {
use std::cmp::Ordering;
use std::mem;
use super::{ExpInt, Limb, LIMB_BITS, limbs_for_bits, Loss};
pub(super) fn is_all_zeros(limbs: &[Limb]) -> bool {
limbs.iter().all(|&l| l == 0)
}
/// One, not zero, based LSB. That is, returns 0 for a zeroed significand.
pub(super) fn olsb(limbs: &[Limb]) -> usize {
limbs.iter().enumerate().find(|(_, &limb)| limb != 0).map_or(0,
|(i, limb)| i * LIMB_BITS + limb.trailing_zeros() as usize + 1)
}
/// One, not zero, based MSB. That is, returns 0 for a zeroed significand.
pub(super) fn omsb(limbs: &[Limb]) -> usize {
limbs.iter().enumerate().rfind(|(_, &limb)| limb != 0).map_or(0,
|(i, limb)| (i + 1) * LIMB_BITS - limb.leading_zeros() as usize)
}
/// Comparison (unsigned) of two significands.
pub(super) fn cmp(a: &[Limb], b: &[Limb]) -> Ordering {
assert_eq!(a.len(), b.len());
for (a, b) in a.iter().zip(b).rev() {
match a.cmp(b) {
Ordering::Equal => {}
o => return o,
}
}
Ordering::Equal
}
/// Extracts the given bit.
pub(super) fn get_bit(limbs: &[Limb], bit: usize) -> bool {
limbs[bit / LIMB_BITS] & (1 << (bit % LIMB_BITS)) != 0
}
/// Sets the given bit.
pub(super) fn set_bit(limbs: &mut [Limb], bit: usize) {
limbs[bit / LIMB_BITS] |= 1 << (bit % LIMB_BITS);
}
/// Clear the given bit.
pub(super) fn clear_bit(limbs: &mut [Limb], bit: usize) {
limbs[bit / LIMB_BITS] &= !(1 << (bit % LIMB_BITS));
}
/// Shifts `dst` left `bits` bits, subtract `bits` from its exponent.
pub(super) fn shift_left(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) {
if bits > 0 {
// Our exponent should not underflow.
*exp = exp.checked_sub(bits as ExpInt).unwrap();
// Jump is the inter-limb jump; shift is the intra-limb shift.
let jump = bits / LIMB_BITS;
let shift = bits % LIMB_BITS;
for i in (0..dst.len()).rev() {
let mut limb;
if i < jump {
limb = 0;
} else {
// dst[i] comes from the two limbs src[i - jump] and, if we have
// an intra-limb shift, src[i - jump - 1].
limb = dst[i - jump];
if shift > 0 {
limb <<= shift;
if i > jump {
limb |= dst[i - jump - 1] >> (LIMB_BITS - shift);
}
}
}
dst[i] = limb;
}
}
}
/// Shifts `dst` right `bits` bits noting lost fraction.
pub(super) fn shift_right(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) -> Loss {
let loss = Loss::through_truncation(dst, bits);
if bits > 0 {
// Our exponent should not overflow.
*exp = exp.checked_add(bits as ExpInt).unwrap();
// Jump is the inter-limb jump; shift is the intra-limb shift.
let jump = bits / LIMB_BITS;
let shift = bits % LIMB_BITS;
// Perform the shift. This leaves the most significant `bits` bits
// of the result at zero.
for i in 0..dst.len() {
let mut limb;
if i + jump >= dst.len() {
limb = 0;
} else {
limb = dst[i + jump];
if shift > 0 {
limb >>= shift;
if i + jump + 1 < dst.len() {
limb |= dst[i + jump + 1] << (LIMB_BITS - shift);
}
}
}
dst[i] = limb;
}
}
loss
}
/// Copies the bit vector of width `src_bits` from `src`, starting at bit SRC_LSB,
/// to `dst`, such that the bit SRC_LSB becomes the least significant bit of `dst`.
/// All high bits above `src_bits` in `dst` are zero-filled.
pub(super) fn extract(dst: &mut [Limb], src: &[Limb], src_bits: usize, src_lsb: usize) {
if src_bits == 0 {
return;
}
let dst_limbs = limbs_for_bits(src_bits);
assert!(dst_limbs <= dst.len());
let src = &src[src_lsb / LIMB_BITS..];
dst[..dst_limbs].copy_from_slice(&src[..dst_limbs]);
let shift = src_lsb % LIMB_BITS;
let _: Loss = shift_right(&mut dst[..dst_limbs], &mut 0, shift);
// We now have (dst_limbs * LIMB_BITS - shift) bits from `src`
// in `dst`. If this is less that src_bits, append the rest, else
// clear the high bits.
let n = dst_limbs * LIMB_BITS - shift;
if n < src_bits {
let mask = (1 << (src_bits - n)) - 1;
dst[dst_limbs - 1] |= (src[dst_limbs] & mask) << (n % LIMB_BITS);
} else if n > src_bits && src_bits % LIMB_BITS > 0 {
dst[dst_limbs - 1] &= (1 << (src_bits % LIMB_BITS)) - 1;
}
// Clear high limbs.
for x in &mut dst[dst_limbs..] {
*x = 0;
}
}
/// We want the most significant PRECISION bits of `src`. There may not
/// be that many; extract what we can.
pub(super) fn from_limbs(dst: &mut [Limb], src: &[Limb], precision: usize) -> (Loss, ExpInt) {
let omsb = omsb(src);
if precision <= omsb {
extract(dst, src, precision, omsb - precision);
(
Loss::through_truncation(src, omsb - precision),
omsb as ExpInt - 1,
)
} else {
extract(dst, src, omsb, 0);
(Loss::ExactlyZero, precision as ExpInt - 1)
}
}
/// For every consecutive chunk of `bits` bits from `limbs`,
/// going from most significant to the least significant bits,
/// call `f` to transform those bits and store the result back.
pub(super) fn each_chunk<F: FnMut(Limb) -> Limb>(limbs: &mut [Limb], bits: usize, mut f: F) {
assert_eq!(LIMB_BITS % bits, 0);
for limb in limbs.iter_mut().rev() {
let mut r = 0;
for i in (0..LIMB_BITS / bits).rev() {
r |= f((*limb >> (i * bits)) & ((1 << bits) - 1)) << (i * bits);
}
*limb = r;
}
}
/// Increment in-place, return the carry flag.
pub(super) fn increment(dst: &mut [Limb]) -> Limb {
for x in dst {
*x = x.wrapping_add(1);
if *x != 0 {
return 0;
}
}
1
}
/// Decrement in-place, return the borrow flag.
pub(super) fn decrement(dst: &mut [Limb]) -> Limb {
for x in dst {
*x = x.wrapping_sub(1);
if *x != !0 {
return 0;
}
}
1
}
/// `a += b + c` where `c` is zero or one. Returns the carry flag.
pub(super) fn add(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb {
assert!(c <= 1);
for (a, &b) in a.iter_mut().zip(b) {
let (r, overflow) = a.overflowing_add(b);
let (r, overflow2) = r.overflowing_add(c);
*a = r;
c = (overflow | overflow2) as Limb;
}
c
}
/// `a -= b + c` where `c` is zero or one. Returns the borrow flag.
pub(super) fn sub(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb {
assert!(c <= 1);
for (a, &b) in a.iter_mut().zip(b) {
let (r, overflow) = a.overflowing_sub(b);
let (r, overflow2) = r.overflowing_sub(c);
*a = r;
c = (overflow | overflow2) as Limb;
}
c
}
/// `a += b` or `a -= b`. Does not preserve `b`.
pub(super) fn add_or_sub(
a_sig: &mut [Limb],
a_exp: &mut ExpInt,
a_sign: &mut bool,
b_sig: &mut [Limb],
b_exp: ExpInt,
b_sign: bool,
) -> Loss {
// Are we bigger exponent-wise than the RHS?
let bits = *a_exp - b_exp;
// Determine if the operation on the absolute values is effectively
// an addition or subtraction.
// Subtraction is more subtle than one might naively expect.
if *a_sign ^ b_sign {
let (reverse, loss);
if bits == 0 {
reverse = cmp(a_sig, b_sig) == Ordering::Less;
loss = Loss::ExactlyZero;
} else if bits > 0 {
loss = shift_right(b_sig, &mut 0, (bits - 1) as usize);
shift_left(a_sig, a_exp, 1);
reverse = false;
} else {
loss = shift_right(a_sig, a_exp, (-bits - 1) as usize);
shift_left(b_sig, &mut 0, 1);
reverse = true;
}
let borrow = (loss != Loss::ExactlyZero) as Limb;
if reverse {
// The code above is intended to ensure that no borrow is necessary.
assert_eq!(sub(b_sig, a_sig, borrow), 0);
a_sig.copy_from_slice(b_sig);
*a_sign = !*a_sign;
} else {
// The code above is intended to ensure that no borrow is necessary.
assert_eq!(sub(a_sig, b_sig, borrow), 0);
}
// Invert the lost fraction - it was on the RHS and subtracted.
match loss {
Loss::LessThanHalf => Loss::MoreThanHalf,
Loss::MoreThanHalf => Loss::LessThanHalf,
_ => loss,
}
} else {
let loss = if bits > 0 {
shift_right(b_sig, &mut 0, bits as usize)
} else {
shift_right(a_sig, a_exp, -bits as usize)
};
// We have a guard bit; generating a carry cannot happen.
assert_eq!(add(a_sig, b_sig, 0), 0);
loss
}
}
/// `[low, high] = a * b`.
///
/// This cannot overflow, because
///
/// `(n - 1) * (n - 1) + 2 * (n - 1) == (n - 1) * (n + 1)`
///
/// which is less than n<sup>2</sup>.
pub(super) fn widening_mul(a: Limb, b: Limb) -> [Limb; 2] {
let mut wide = [0, 0];
if a == 0 || b == 0 {
return wide;
}
const HALF_BITS: usize = LIMB_BITS / 2;
let select = |limb, i| (limb >> (i * HALF_BITS)) & ((1 << HALF_BITS) - 1);
for i in 0..2 {
for j in 0..2 {
let mut x = [select(a, i) * select(b, j), 0];
shift_left(&mut x, &mut 0, (i + j) * HALF_BITS);
assert_eq!(add(&mut wide, &x, 0), 0);
}
}
wide
}
/// `dst = a * b` (for normal `a` and `b`). Returns the lost fraction.
pub(super) fn mul<'a>(
dst: &mut [Limb],
exp: &mut ExpInt,
mut a: &'a [Limb],
mut b: &'a [Limb],
precision: usize,
) -> Loss {
// Put the narrower number on the `a` for less loops below.
if a.len() > b.len() {
mem::swap(&mut a, &mut b);
}
for x in &mut dst[..b.len()] {
*x = 0;
}
for i in 0..a.len() {
let mut carry = 0;
for j in 0..b.len() {
let [low, mut high] = widening_mul(a[i], b[j]);
// Now add carry.
let (low, overflow) = low.overflowing_add(carry);
high += overflow as Limb;
// And now `dst[i + j]`, and store the new low part there.
let (low, overflow) = low.overflowing_add(dst[i + j]);
high += overflow as Limb;
dst[i + j] = low;
carry = high;
}
dst[i + b.len()] = carry;
}
// Assume the operands involved in the multiplication are single-precision
// FP, and the two multiplicants are:
// a = a23 . a22 ... a0 * 2^e1
// b = b23 . b22 ... b0 * 2^e2
// the result of multiplication is:
// dst = c48 c47 c46 . c45 ... c0 * 2^(e1+e2)
// Note that there are three significant bits at the left-hand side of the
// radix point: two for the multiplication, and an overflow bit for the
// addition (that will always be zero at this point). Move the radix point
// toward left by two bits, and adjust exponent accordingly.
*exp += 2;
// Convert the result having "2 * precision" significant-bits back to the one
// having "precision" significant-bits. First, move the radix point from
// poision "2*precision - 1" to "precision - 1". The exponent need to be
// adjusted by "2*precision - 1" - "precision - 1" = "precision".
*exp -= precision as ExpInt + 1;
// In case MSB resides at the left-hand side of radix point, shift the
// mantissa right by some amount to make sure the MSB reside right before
// the radix point (i.e., "MSB . rest-significant-bits").
//
// Note that the result is not normalized when "omsb < precision". So, the
// caller needs to call IeeeFloat::normalize() if normalized value is
// expected.
let omsb = omsb(dst);
if omsb <= precision {
Loss::ExactlyZero
} else {
shift_right(dst, exp, omsb - precision)
}
}
/// `quotient = dividend / divisor`. Returns the lost fraction.
/// Does not preserve `dividend` or `divisor`.
pub(super) fn div(
quotient: &mut [Limb],
exp: &mut ExpInt,
dividend: &mut [Limb],
divisor: &mut [Limb],
precision: usize,
) -> Loss {
// Normalize the divisor.
let bits = precision - omsb(divisor);
shift_left(divisor, &mut 0, bits);
*exp += bits as ExpInt;
// Normalize the dividend.
let bits = precision - omsb(dividend);
shift_left(dividend, exp, bits);
// Division by 1.
let olsb_divisor = olsb(divisor);
if olsb_divisor == precision {
quotient.copy_from_slice(dividend);
return Loss::ExactlyZero;
}
// Ensure the dividend >= divisor initially for the loop below.
// Incidentally, this means that the division loop below is
// guaranteed to set the integer bit to one.
if cmp(dividend, divisor) == Ordering::Less {
shift_left(dividend, exp, 1);
assert_ne!(cmp(dividend, divisor), Ordering::Less)
}
// Helper for figuring out the lost fraction.
let lost_fraction = |dividend: &[Limb], divisor: &[Limb]| {
match cmp(dividend, divisor) {
Ordering::Greater => Loss::MoreThanHalf,
Ordering::Equal => Loss::ExactlyHalf,
Ordering::Less => {
if is_all_zeros(dividend) {
Loss::ExactlyZero
} else {
Loss::LessThanHalf
}
}
}
};
// Try to perform a (much faster) short division for small divisors.
let divisor_bits = precision - (olsb_divisor - 1);
macro_rules! try_short_div {
($W:ty, $H:ty, $half:expr) => {
if divisor_bits * 2 <= $half {
// Extract the small divisor.
let _: Loss = shift_right(divisor, &mut 0, olsb_divisor - 1);
let divisor = divisor[0] as $H as $W;
// Shift the dividend to produce a quotient with the unit bit set.
let top_limb = *dividend.last().unwrap();
let mut rem = (top_limb >> (LIMB_BITS - (divisor_bits - 1))) as $H;
shift_left(dividend, &mut 0, divisor_bits - 1);
// Apply short division in place on $H (of $half bits) chunks.
each_chunk(dividend, $half, |chunk| {
let chunk = chunk as $H;
let combined = ((rem as $W) << $half) | (chunk as $W);
rem = (combined % divisor) as $H;
(combined / divisor) as $H as Limb
});
quotient.copy_from_slice(dividend);
return lost_fraction(&[(rem as Limb) << 1], &[divisor as Limb]);
}
}
}
try_short_div!(u32, u16, 16);
try_short_div!(u64, u32, 32);
try_short_div!(u128, u64, 64);
// Zero the quotient before setting bits in it.
for x in &mut quotient[..limbs_for_bits(precision)] {
*x = 0;
}
// Long division.
for bit in (0..precision).rev() {
if cmp(dividend, divisor) != Ordering::Less {
sub(dividend, divisor, 0);
set_bit(quotient, bit);
}
shift_left(dividend, &mut 0, 1);
}
lost_fraction(dividend, divisor)
}
}

Copy link
Member Author

@RalfJung RalfJung Jun 10, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, I feel like at least for the signed/unsigned distinction this will become ugly when done "untyped".

That "simple" thing you pointed to is still way more complicated than what we currently do for integers ops in CTFE.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@RalfJung Yes, because it handles arbitrary-size integers, while you have only one "limb".
What you do is more or less what I mean.

return (val, false);
}

fn binary_int_op(
Expand Down Expand Up @@ -286,21 +271,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
match left.layout.ty.sty {
ty::Char => {
assert_eq!(left.layout.ty, right.layout.ty);
let left = left.to_scalar()?.to_char()?;
let right = right.to_scalar()?.to_char()?;
self.binary_char_op(bin_op, left, right)
let left = left.to_scalar()?;
let right = right.to_scalar()?;
Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
}
ty::Bool => {
assert_eq!(left.layout.ty, right.layout.ty);
let left = left.to_scalar()?.to_bool()?;
let right = right.to_scalar()?.to_bool()?;
self.binary_bool_op(bin_op, left, right)
let left = left.to_scalar()?;
let right = right.to_scalar()?;
Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
}
ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty);
let left = left.to_bits()?;
let right = right.to_bits()?;
self.binary_float_op(bin_op, fty, left, right)
let left = left.to_scalar()?;
let right = right.to_scalar()?;
Ok(match fty {
FloatTy::F32 => self.binary_float_op(bin_op, left.to_f32()?, right.to_f32()?),
FloatTy::F64 => self.binary_float_op(bin_op, left.to_f64()?, right.to_f64()?),
})
}
_ => {
// Must be integer(-like) types. Don't forget about == on fn pointers.
Expand Down Expand Up @@ -346,13 +334,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
Ok(Scalar::from_bool(res))
}
ty::Float(fty) => {
let val = val.to_bits(layout.size)?;
let res = match (un_op, fty) {
(Neg, FloatTy::F32) => Single::to_bits(-Single::from_bits(val)),
(Neg, FloatTy::F64) => Double::to_bits(-Double::from_bits(val)),
(Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => bug!("Invalid float op {:?}", un_op)
};
Ok(Scalar::from_uint(res, layout.size))
Ok(res)
}
_ => {
assert!(layout.ty.is_integral());
Expand Down