Register new snapshots
Lots of cruft to remove!
This commit is contained in:
@@ -512,164 +512,32 @@ extern "rust-intrinsic" {
|
||||
/// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
|
||||
pub fn roundf64(x: f64) -> f64;
|
||||
|
||||
/// Returns the number of bits set in a `u8`.
|
||||
#[cfg(stage0)]
|
||||
pub fn ctpop8(x: u8) -> u8;
|
||||
/// Returns the number of bits set in a `u16`.
|
||||
#[cfg(stage0)]
|
||||
pub fn ctpop16(x: u16) -> u16;
|
||||
/// Returns the number of bits set in a `u32`.
|
||||
#[cfg(stage0)]
|
||||
pub fn ctpop32(x: u32) -> u32;
|
||||
/// Returns the number of bits set in a `u64`.
|
||||
#[cfg(stage0)]
|
||||
pub fn ctpop64(x: u64) -> u64;
|
||||
/// Returns the number of bits set in an integer type `T`
|
||||
#[cfg(not(stage0))]
|
||||
pub fn ctpop<T>(x: T) -> T;
|
||||
|
||||
/// Returns the number of leading bits unset in a `u8`.
|
||||
#[cfg(stage0)]
|
||||
pub fn ctlz8(x: u8) -> u8;
|
||||
/// Returns the number of leading bits unset in a `u16`.
|
||||
#[cfg(stage0)]
|
||||
pub fn ctlz16(x: u16) -> u16;
|
||||
/// Returns the number of leading bits unset in a `u32`.
|
||||
#[cfg(stage0)]
|
||||
pub fn ctlz32(x: u32) -> u32;
|
||||
/// Returns the number of leading bits unset in a `u64`.
|
||||
#[cfg(stage0)]
|
||||
pub fn ctlz64(x: u64) -> u64;
|
||||
/// Returns the number of leading bits unset in an integer type `T`
|
||||
#[cfg(not(stage0))]
|
||||
pub fn ctlz<T>(x: T) -> T;
|
||||
|
||||
/// Returns the number of trailing bits unset in a `u8`.
|
||||
#[cfg(stage0)]
|
||||
pub fn cttz8(x: u8) -> u8;
|
||||
/// Returns the number of trailing bits unset in a `u16`.
|
||||
#[cfg(stage0)]
|
||||
pub fn cttz16(x: u16) -> u16;
|
||||
/// Returns the number of trailing bits unset in a `u32`.
|
||||
#[cfg(stage0)]
|
||||
pub fn cttz32(x: u32) -> u32;
|
||||
/// Returns the number of trailing bits unset in a `u64`.
|
||||
#[cfg(stage0)]
|
||||
pub fn cttz64(x: u64) -> u64;
|
||||
/// Returns the number of trailing bits unset in an integer type `T`
|
||||
#[cfg(not(stage0))]
|
||||
pub fn cttz<T>(x: T) -> T;
|
||||
|
||||
/// Reverses the bytes in a `u16`.
|
||||
#[cfg(stage0)]
|
||||
pub fn bswap16(x: u16) -> u16;
|
||||
/// Reverses the bytes in a `u32`.
|
||||
#[cfg(stage0)]
|
||||
pub fn bswap32(x: u32) -> u32;
|
||||
/// Reverses the bytes in a `u64`.
|
||||
#[cfg(stage0)]
|
||||
pub fn bswap64(x: u64) -> u64;
|
||||
/// Reverses the bytes in an integer type `T`.
|
||||
#[cfg(not(stage0))]
|
||||
pub fn bswap<T>(x: T) -> T;
|
||||
|
||||
/// Performs checked `i8` addition.
|
||||
#[cfg(stage0)]
|
||||
pub fn i8_add_with_overflow(x: i8, y: i8) -> (i8, bool);
|
||||
/// Performs checked `i16` addition.
|
||||
#[cfg(stage0)]
|
||||
pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
|
||||
/// Performs checked `i32` addition.
|
||||
#[cfg(stage0)]
|
||||
pub fn i32_add_with_overflow(x: i32, y: i32) -> (i32, bool);
|
||||
/// Performs checked `i64` addition.
|
||||
#[cfg(stage0)]
|
||||
pub fn i64_add_with_overflow(x: i64, y: i64) -> (i64, bool);
|
||||
|
||||
/// Performs checked `u8` addition.
|
||||
#[cfg(stage0)]
|
||||
pub fn u8_add_with_overflow(x: u8, y: u8) -> (u8, bool);
|
||||
/// Performs checked `u16` addition.
|
||||
#[cfg(stage0)]
|
||||
pub fn u16_add_with_overflow(x: u16, y: u16) -> (u16, bool);
|
||||
/// Performs checked `u32` addition.
|
||||
#[cfg(stage0)]
|
||||
pub fn u32_add_with_overflow(x: u32, y: u32) -> (u32, bool);
|
||||
/// Performs checked `u64` addition.
|
||||
#[cfg(stage0)]
|
||||
pub fn u64_add_with_overflow(x: u64, y: u64) -> (u64, bool);
|
||||
|
||||
/// Performs checked integer addition.
|
||||
#[cfg(not(stage0))]
|
||||
pub fn add_with_overflow<T>(x: T, y: T) -> (T, bool);
|
||||
|
||||
/// Performs checked `i8` subtraction.
|
||||
#[cfg(stage0)]
|
||||
pub fn i8_sub_with_overflow(x: i8, y: i8) -> (i8, bool);
|
||||
/// Performs checked `i16` subtraction.
|
||||
#[cfg(stage0)]
|
||||
pub fn i16_sub_with_overflow(x: i16, y: i16) -> (i16, bool);
|
||||
/// Performs checked `i32` subtraction.
|
||||
#[cfg(stage0)]
|
||||
pub fn i32_sub_with_overflow(x: i32, y: i32) -> (i32, bool);
|
||||
/// Performs checked `i64` subtraction.
|
||||
#[cfg(stage0)]
|
||||
pub fn i64_sub_with_overflow(x: i64, y: i64) -> (i64, bool);
|
||||
|
||||
/// Performs checked `u8` subtraction.
|
||||
#[cfg(stage0)]
|
||||
pub fn u8_sub_with_overflow(x: u8, y: u8) -> (u8, bool);
|
||||
/// Performs checked `u16` subtraction.
|
||||
#[cfg(stage0)]
|
||||
pub fn u16_sub_with_overflow(x: u16, y: u16) -> (u16, bool);
|
||||
/// Performs checked `u32` subtraction.
|
||||
#[cfg(stage0)]
|
||||
pub fn u32_sub_with_overflow(x: u32, y: u32) -> (u32, bool);
|
||||
/// Performs checked `u64` subtraction.
|
||||
#[cfg(stage0)]
|
||||
pub fn u64_sub_with_overflow(x: u64, y: u64) -> (u64, bool);
|
||||
|
||||
/// Performs checked integer subtraction
|
||||
#[cfg(not(stage0))]
|
||||
pub fn sub_with_overflow<T>(x: T, y: T) -> (T, bool);
|
||||
|
||||
/// Performs checked `i8` multiplication.
|
||||
#[cfg(stage0)]
|
||||
pub fn i8_mul_with_overflow(x: i8, y: i8) -> (i8, bool);
|
||||
/// Performs checked `i16` multiplication.
|
||||
#[cfg(stage0)]
|
||||
pub fn i16_mul_with_overflow(x: i16, y: i16) -> (i16, bool);
|
||||
/// Performs checked `i32` multiplication.
|
||||
#[cfg(stage0)]
|
||||
pub fn i32_mul_with_overflow(x: i32, y: i32) -> (i32, bool);
|
||||
/// Performs checked `i64` multiplication.
|
||||
#[cfg(stage0)]
|
||||
pub fn i64_mul_with_overflow(x: i64, y: i64) -> (i64, bool);
|
||||
|
||||
/// Performs checked `u8` multiplication.
|
||||
#[cfg(stage0)]
|
||||
pub fn u8_mul_with_overflow(x: u8, y: u8) -> (u8, bool);
|
||||
/// Performs checked `u16` multiplication.
|
||||
#[cfg(stage0)]
|
||||
pub fn u16_mul_with_overflow(x: u16, y: u16) -> (u16, bool);
|
||||
/// Performs checked `u32` multiplication.
|
||||
#[cfg(stage0)]
|
||||
pub fn u32_mul_with_overflow(x: u32, y: u32) -> (u32, bool);
|
||||
/// Performs checked `u64` multiplication.
|
||||
#[cfg(stage0)]
|
||||
pub fn u64_mul_with_overflow(x: u64, y: u64) -> (u64, bool);
|
||||
|
||||
/// Performs checked integer multiplication
|
||||
#[cfg(not(stage0))]
|
||||
pub fn mul_with_overflow<T>(x: T, y: T) -> (T, bool);
|
||||
|
||||
/// Performs an unchecked division, resulting in undefined behavior
|
||||
/// where y = 0 or x = `T::min_value()` and y = -1
|
||||
#[cfg(not(stage0))]
|
||||
pub fn unchecked_div<T>(x: T, y: T) -> T;
|
||||
/// Returns the remainder of an unchecked division, resulting in
|
||||
/// undefined behavior where y = 0 or x = `T::min_value()` and y = -1
|
||||
#[cfg(not(stage0))]
|
||||
pub fn unchecked_rem<T>(x: T, y: T) -> T;
|
||||
|
||||
/// Returns (a + b) mod 2^N, where N is the width of T in bits.
|
||||
|
||||
@@ -43,11 +43,8 @@
|
||||
// Since libcore defines many fundamental lang items, all tests live in a
|
||||
// separate crate, libcoretest, to avoid bizarre issues.
|
||||
|
||||
// Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
|
||||
#![cfg_attr(stage0, feature(custom_attribute))]
|
||||
#![crate_name = "core"]
|
||||
#![stable(feature = "core", since = "1.6.0")]
|
||||
#![cfg_attr(stage0, staged_api)]
|
||||
#![crate_type = "rlib"]
|
||||
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
|
||||
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
|
||||
@@ -60,8 +57,6 @@
|
||||
#![no_core]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
#![cfg_attr(stage0, feature(rustc_attrs))]
|
||||
#![cfg_attr(stage0, allow(unused_attributes))]
|
||||
#![feature(allow_internal_unstable)]
|
||||
#![feature(associated_type_defaults)]
|
||||
#![feature(concat_idents)]
|
||||
@@ -75,8 +70,7 @@
|
||||
#![feature(optin_builtin_traits)]
|
||||
#![feature(reflect)]
|
||||
#![feature(unwind_attributes)]
|
||||
#![cfg_attr(stage0, feature(simd))]
|
||||
#![cfg_attr(not(stage0), feature(repr_simd, platform_intrinsics))]
|
||||
#![feature(repr_simd, platform_intrinsics)]
|
||||
#![feature(staged_api)]
|
||||
#![feature(unboxed_closures)]
|
||||
|
||||
|
||||
@@ -38,31 +38,13 @@ unsafe impl Zeroable for u64 {}
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
|
||||
pub struct NonZero<T: Zeroable>(T);
|
||||
|
||||
#[cfg(stage0)]
|
||||
macro_rules! nonzero_new {
|
||||
() => (
|
||||
/// Creates an instance of NonZero with the provided value.
|
||||
/// You must indeed ensure that the value is actually "non-zero".
|
||||
#[inline(always)]
|
||||
pub unsafe fn new(inner: T) -> NonZero<T> {
|
||||
NonZero(inner)
|
||||
}
|
||||
)
|
||||
}
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! nonzero_new {
|
||||
() => (
|
||||
/// Creates an instance of NonZero with the provided value.
|
||||
/// You must indeed ensure that the value is actually "non-zero".
|
||||
#[inline(always)]
|
||||
pub const unsafe fn new(inner: T) -> NonZero<T> {
|
||||
NonZero(inner)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
impl<T: Zeroable> NonZero<T> {
|
||||
nonzero_new!{}
|
||||
/// Creates an instance of NonZero with the provided value.
|
||||
/// You must indeed ensure that the value is actually "non-zero".
|
||||
#[inline(always)]
|
||||
pub const unsafe fn new(inner: T) -> NonZero<T> {
|
||||
NonZero(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Zeroable> Deref for NonZero<T> {
|
||||
|
||||
@@ -55,15 +55,6 @@ macro_rules! impl_full_ops {
|
||||
($($ty:ty: add($addfn:path), mul/div($bigty:ident);)*) => (
|
||||
$(
|
||||
impl FullOps for $ty {
|
||||
#[cfg(stage0)]
|
||||
fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) {
|
||||
// this cannot overflow, the output is between 0 and 2*2^nbits - 1
|
||||
// FIXME will LLVM optimize this into ADC or similar???
|
||||
let (v, carry1) = unsafe { $addfn(self, other) };
|
||||
let (v, carry2) = unsafe { $addfn(v, if carry {1} else {0}) };
|
||||
(carry1 || carry2, v)
|
||||
}
|
||||
#[cfg(not(stage0))]
|
||||
fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) {
|
||||
// this cannot overflow, the output is between 0 and 2*2^nbits - 1
|
||||
// FIXME will LLVM optimize this into ADC or similar???
|
||||
|
||||
@@ -115,11 +115,6 @@ macro_rules! zero_one_impl_float {
|
||||
}
|
||||
zero_one_impl_float! { f32 f64 }
|
||||
|
||||
// Just for stage0; a byte swap on a byte is a no-op
|
||||
// Delete this once it becomes unused
|
||||
#[cfg(stage0)]
|
||||
unsafe fn bswap8(x: u8) -> u8 { x }
|
||||
|
||||
macro_rules! checked_op {
|
||||
($U:ty, $op:path, $x:expr, $y:expr) => {{
|
||||
let (result, overflowed) = unsafe { $op($x as $U, $y as $U) };
|
||||
@@ -785,15 +780,6 @@ macro_rules! int_impl {
|
||||
}
|
||||
|
||||
#[lang = "i8"]
|
||||
#[cfg(stage0)]
|
||||
impl i8 {
|
||||
int_impl! { i8, u8, 8,
|
||||
intrinsics::i8_add_with_overflow,
|
||||
intrinsics::i8_sub_with_overflow,
|
||||
intrinsics::i8_mul_with_overflow }
|
||||
}
|
||||
#[lang = "i8"]
|
||||
#[cfg(not(stage0))]
|
||||
impl i8 {
|
||||
int_impl! { i8, u8, 8,
|
||||
intrinsics::add_with_overflow,
|
||||
@@ -802,15 +788,6 @@ impl i8 {
|
||||
}
|
||||
|
||||
#[lang = "i16"]
|
||||
#[cfg(stage0)]
|
||||
impl i16 {
|
||||
int_impl! { i16, u16, 16,
|
||||
intrinsics::i16_add_with_overflow,
|
||||
intrinsics::i16_sub_with_overflow,
|
||||
intrinsics::i16_mul_with_overflow }
|
||||
}
|
||||
#[lang = "i16"]
|
||||
#[cfg(not(stage0))]
|
||||
impl i16 {
|
||||
int_impl! { i16, u16, 16,
|
||||
intrinsics::add_with_overflow,
|
||||
@@ -819,15 +796,6 @@ impl i16 {
|
||||
}
|
||||
|
||||
#[lang = "i32"]
|
||||
#[cfg(stage0)]
|
||||
impl i32 {
|
||||
int_impl! { i32, u32, 32,
|
||||
intrinsics::i32_add_with_overflow,
|
||||
intrinsics::i32_sub_with_overflow,
|
||||
intrinsics::i32_mul_with_overflow }
|
||||
}
|
||||
#[lang = "i32"]
|
||||
#[cfg(not(stage0))]
|
||||
impl i32 {
|
||||
int_impl! { i32, u32, 32,
|
||||
intrinsics::add_with_overflow,
|
||||
@@ -836,15 +804,6 @@ impl i32 {
|
||||
}
|
||||
|
||||
#[lang = "i64"]
|
||||
#[cfg(stage0)]
|
||||
impl i64 {
|
||||
int_impl! { i64, u64, 64,
|
||||
intrinsics::i64_add_with_overflow,
|
||||
intrinsics::i64_sub_with_overflow,
|
||||
intrinsics::i64_mul_with_overflow }
|
||||
}
|
||||
#[lang = "i64"]
|
||||
#[cfg(not(stage0))]
|
||||
impl i64 {
|
||||
int_impl! { i64, u64, 64,
|
||||
intrinsics::add_with_overflow,
|
||||
@@ -854,16 +813,6 @@ impl i64 {
|
||||
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
#[lang = "isize"]
|
||||
#[cfg(stage0)]
|
||||
impl isize {
|
||||
int_impl! { i32, u32, 32,
|
||||
intrinsics::i32_add_with_overflow,
|
||||
intrinsics::i32_sub_with_overflow,
|
||||
intrinsics::i32_mul_with_overflow }
|
||||
}
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
#[lang = "isize"]
|
||||
#[cfg(not(stage0))]
|
||||
impl isize {
|
||||
int_impl! { i32, u32, 32,
|
||||
intrinsics::add_with_overflow,
|
||||
@@ -873,16 +822,6 @@ impl isize {
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
#[lang = "isize"]
|
||||
#[cfg(stage0)]
|
||||
impl isize {
|
||||
int_impl! { i64, u64, 64,
|
||||
intrinsics::i64_add_with_overflow,
|
||||
intrinsics::i64_sub_with_overflow,
|
||||
intrinsics::i64_mul_with_overflow }
|
||||
}
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
#[lang = "isize"]
|
||||
#[cfg(not(stage0))]
|
||||
impl isize {
|
||||
int_impl! { i64, u64, 64,
|
||||
intrinsics::add_with_overflow,
|
||||
@@ -980,25 +919,6 @@ macro_rules! uint_impl {
|
||||
unsafe { $ctlz(self as $ActualT) as u32 }
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(stage0)]
|
||||
#[inline]
|
||||
pub fn trailing_zeros(self) -> u32 {
|
||||
// As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic
|
||||
// emits two conditional moves on x86_64. By promoting the value to
|
||||
// u16 and setting bit 8, we get better code without any conditional
|
||||
// operations.
|
||||
// FIXME: There's a LLVM patch (http://reviews.llvm.org/D9284)
|
||||
// pending, remove this workaround once LLVM generates better code
|
||||
// for cttz8.
|
||||
unsafe {
|
||||
if $BITS == 8 {
|
||||
intrinsics::cttz16(self as u16 | 0x100) as u32
|
||||
} else {
|
||||
$cttz(self as $ActualT) as u32
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Returns the number of trailing zeros in the binary representation
|
||||
/// of `self`.
|
||||
///
|
||||
@@ -1012,7 +932,6 @@ macro_rules! uint_impl {
|
||||
/// assert_eq!(n.trailing_zeros(), 3);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(not(stage0))]
|
||||
#[inline]
|
||||
pub fn trailing_zeros(self) -> u32 {
|
||||
// As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic
|
||||
@@ -1563,19 +1482,6 @@ macro_rules! uint_impl {
|
||||
}
|
||||
|
||||
#[lang = "u8"]
|
||||
#[cfg(stage0)]
|
||||
impl u8 {
|
||||
uint_impl! { u8, 8,
|
||||
intrinsics::ctpop8,
|
||||
intrinsics::ctlz8,
|
||||
intrinsics::cttz8,
|
||||
bswap8,
|
||||
intrinsics::u8_add_with_overflow,
|
||||
intrinsics::u8_sub_with_overflow,
|
||||
intrinsics::u8_mul_with_overflow }
|
||||
}
|
||||
#[lang = "u8"]
|
||||
#[cfg(not(stage0))]
|
||||
impl u8 {
|
||||
uint_impl! { u8, 8,
|
||||
intrinsics::ctpop,
|
||||
@@ -1588,19 +1494,6 @@ impl u8 {
|
||||
}
|
||||
|
||||
#[lang = "u16"]
|
||||
#[cfg(stage0)]
|
||||
impl u16 {
|
||||
uint_impl! { u16, 16,
|
||||
intrinsics::ctpop16,
|
||||
intrinsics::ctlz16,
|
||||
intrinsics::cttz16,
|
||||
intrinsics::bswap16,
|
||||
intrinsics::u16_add_with_overflow,
|
||||
intrinsics::u16_sub_with_overflow,
|
||||
intrinsics::u16_mul_with_overflow }
|
||||
}
|
||||
#[lang = "u16"]
|
||||
#[cfg(not(stage0))]
|
||||
impl u16 {
|
||||
uint_impl! { u16, 16,
|
||||
intrinsics::ctpop,
|
||||
@@ -1613,19 +1506,6 @@ impl u16 {
|
||||
}
|
||||
|
||||
#[lang = "u32"]
|
||||
#[cfg(stage0)]
|
||||
impl u32 {
|
||||
uint_impl! { u32, 32,
|
||||
intrinsics::ctpop32,
|
||||
intrinsics::ctlz32,
|
||||
intrinsics::cttz32,
|
||||
intrinsics::bswap32,
|
||||
intrinsics::u32_add_with_overflow,
|
||||
intrinsics::u32_sub_with_overflow,
|
||||
intrinsics::u32_mul_with_overflow }
|
||||
}
|
||||
#[lang = "u32"]
|
||||
#[cfg(not(stage0))]
|
||||
impl u32 {
|
||||
uint_impl! { u32, 32,
|
||||
intrinsics::ctpop,
|
||||
@@ -1638,19 +1518,6 @@ impl u32 {
|
||||
}
|
||||
|
||||
#[lang = "u64"]
|
||||
#[cfg(stage0)]
|
||||
impl u64 {
|
||||
uint_impl! { u64, 64,
|
||||
intrinsics::ctpop64,
|
||||
intrinsics::ctlz64,
|
||||
intrinsics::cttz64,
|
||||
intrinsics::bswap64,
|
||||
intrinsics::u64_add_with_overflow,
|
||||
intrinsics::u64_sub_with_overflow,
|
||||
intrinsics::u64_mul_with_overflow }
|
||||
}
|
||||
#[lang = "u64"]
|
||||
#[cfg(not(stage0))]
|
||||
impl u64 {
|
||||
uint_impl! { u64, 64,
|
||||
intrinsics::ctpop,
|
||||
@@ -1664,20 +1531,6 @@ impl u64 {
|
||||
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
#[lang = "usize"]
|
||||
#[cfg(stage0)]
|
||||
impl usize {
|
||||
uint_impl! { u32, 32,
|
||||
intrinsics::ctpop32,
|
||||
intrinsics::ctlz32,
|
||||
intrinsics::cttz32,
|
||||
intrinsics::bswap32,
|
||||
intrinsics::u32_add_with_overflow,
|
||||
intrinsics::u32_sub_with_overflow,
|
||||
intrinsics::u32_mul_with_overflow }
|
||||
}
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
#[lang = "usize"]
|
||||
#[cfg(not(stage0))]
|
||||
impl usize {
|
||||
uint_impl! { u32, 32,
|
||||
intrinsics::ctpop,
|
||||
@@ -1691,20 +1544,6 @@ impl usize {
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
#[lang = "usize"]
|
||||
#[cfg(stage0)]
|
||||
impl usize {
|
||||
uint_impl! { u64, 64,
|
||||
intrinsics::ctpop64,
|
||||
intrinsics::ctlz64,
|
||||
intrinsics::cttz64,
|
||||
intrinsics::bswap64,
|
||||
intrinsics::u64_add_with_overflow,
|
||||
intrinsics::u64_sub_with_overflow,
|
||||
intrinsics::u64_mul_with_overflow }
|
||||
}
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
#[lang = "usize"]
|
||||
#[cfg(not(stage0))]
|
||||
impl usize {
|
||||
uint_impl! { u64, 64,
|
||||
intrinsics::ctpop,
|
||||
|
||||
@@ -12,30 +12,7 @@
|
||||
#![unstable(feature = "wrapping", reason = "may be removed or relocated",
|
||||
issue = "27755")]
|
||||
|
||||
#[cfg(stage0)]
|
||||
pub use intrinsics::{
|
||||
u8_add_with_overflow, i8_add_with_overflow,
|
||||
u16_add_with_overflow, i16_add_with_overflow,
|
||||
u32_add_with_overflow, i32_add_with_overflow,
|
||||
u64_add_with_overflow, i64_add_with_overflow,
|
||||
|
||||
u8_sub_with_overflow, i8_sub_with_overflow,
|
||||
u16_sub_with_overflow, i16_sub_with_overflow,
|
||||
u32_sub_with_overflow, i32_sub_with_overflow,
|
||||
u64_sub_with_overflow, i64_sub_with_overflow,
|
||||
|
||||
u8_mul_with_overflow, i8_mul_with_overflow,
|
||||
u16_mul_with_overflow, i16_mul_with_overflow,
|
||||
u32_mul_with_overflow, i32_mul_with_overflow,
|
||||
u64_mul_with_overflow, i64_mul_with_overflow,
|
||||
};
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub use intrinsics::{
|
||||
add_with_overflow,
|
||||
sub_with_overflow,
|
||||
mul_with_overflow,
|
||||
};
|
||||
pub use intrinsics::{add_with_overflow, sub_with_overflow, mul_with_overflow};
|
||||
|
||||
use super::Wrapping;
|
||||
|
||||
@@ -203,42 +180,18 @@ macro_rules! signed_overflowing_impl {
|
||||
($($t:ident)*) => ($(
|
||||
impl OverflowingOps for $t {
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
concat_idents!($t, _add_with_overflow)(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
add_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
concat_idents!($t, _sub_with_overflow)(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
sub_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
concat_idents!($t, _mul_with_overflow)(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
mul_with_overflow(self, rhs)
|
||||
@@ -289,42 +242,18 @@ macro_rules! unsigned_overflowing_impl {
|
||||
($($t:ident)*) => ($(
|
||||
impl OverflowingOps for $t {
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
concat_idents!($t, _add_with_overflow)(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
add_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
concat_idents!($t, _sub_with_overflow)(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
sub_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
concat_idents!($t, _mul_with_overflow)(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
|
||||
unsafe {
|
||||
mul_with_overflow(self, rhs)
|
||||
@@ -365,45 +294,18 @@ unsigned_overflowing_impl! { u8 u16 u32 u64 }
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
impl OverflowingOps for usize {
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
let res = u64_add_with_overflow(self as u64, rhs as u64);
|
||||
(res.0 as usize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
add_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
let res = u64_sub_with_overflow(self as u64, rhs as u64);
|
||||
(res.0 as usize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
sub_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
let res = u64_mul_with_overflow(self as u64, rhs as u64);
|
||||
(res.0 as usize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
mul_with_overflow(self, rhs)
|
||||
@@ -439,45 +341,18 @@ impl OverflowingOps for usize {
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
impl OverflowingOps for usize {
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
let res = u32_add_with_overflow(self as u32, rhs as u32);
|
||||
(res.0 as usize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
add_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
let res = u32_sub_with_overflow(self as u32, rhs as u32);
|
||||
(res.0 as usize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
sub_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
let res = u32_mul_with_overflow(self as u32, rhs as u32);
|
||||
(res.0 as usize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
|
||||
unsafe {
|
||||
mul_with_overflow(self, rhs)
|
||||
@@ -513,45 +388,18 @@ impl OverflowingOps for usize {
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
impl OverflowingOps for isize {
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
let res = i64_add_with_overflow(self as i64, rhs as i64);
|
||||
(res.0 as isize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
add_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
let res = i64_sub_with_overflow(self as i64, rhs as i64);
|
||||
(res.0 as isize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
sub_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
let res = i64_mul_with_overflow(self as i64, rhs as i64);
|
||||
(res.0 as isize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
mul_with_overflow(self, rhs)
|
||||
@@ -587,45 +435,18 @@ impl OverflowingOps for isize {
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
impl OverflowingOps for isize {
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
let res = i32_add_with_overflow(self as i32, rhs as i32);
|
||||
(res.0 as isize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
add_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
let res = i32_sub_with_overflow(self as i32, rhs as i32);
|
||||
(res.0 as isize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
sub_with_overflow(self, rhs)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(stage0)]
|
||||
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
let res = i32_mul_with_overflow(self as i32, rhs as i32);
|
||||
(res.0 as isize, res.1)
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
#[cfg(not(stage0))]
|
||||
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
|
||||
unsafe {
|
||||
mul_with_overflow(self, rhs)
|
||||
|
||||
@@ -441,7 +441,6 @@ macro_rules! rem_impl_integer {
|
||||
|
||||
rem_impl_integer! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! rem_impl_float {
|
||||
($($t:ty)*) => ($(
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
@@ -456,48 +455,8 @@ macro_rules! rem_impl_float {
|
||||
)*)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
rem_impl_float! { f32 f64 }
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(stage0)]
|
||||
impl Rem for f32 {
|
||||
type Output = f32;
|
||||
|
||||
// The builtin f32 rem operator is broken when targeting
|
||||
// MSVC; see comment in std::f32::floor.
|
||||
// FIXME: See also #27859.
|
||||
#[inline]
|
||||
#[cfg(target_env = "msvc")]
|
||||
fn rem(self, other: f32) -> f32 {
|
||||
(self as f64).rem(other as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(not(target_env = "msvc"))]
|
||||
fn rem(self, other: f32) -> f32 {
|
||||
extern { fn fmodf(a: f32, b: f32) -> f32; }
|
||||
unsafe { fmodf(self, other) }
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(stage0)]
|
||||
impl Rem for f64 {
|
||||
type Output = f64;
|
||||
|
||||
#[inline]
|
||||
fn rem(self, other: f64) -> f64 {
|
||||
extern { fn fmod(a: f64, b: f64) -> f64; }
|
||||
unsafe { fmod(self, other) }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(stage0)]
|
||||
forward_ref_binop! { impl Rem, rem for f64, f64 }
|
||||
#[cfg(stage0)]
|
||||
forward_ref_binop! { impl Rem, rem for f32, f32 }
|
||||
|
||||
/// The `Neg` trait is used to specify the functionality of unary `-`.
|
||||
///
|
||||
/// # Examples
|
||||
@@ -954,7 +913,6 @@ shr_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
|
||||
/// foo += Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "add_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait AddAssign<Rhs=Self> {
|
||||
@@ -962,7 +920,6 @@ pub trait AddAssign<Rhs=Self> {
|
||||
fn add_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! add_assign_impl {
|
||||
($($t:ty)+) => ($(
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -973,7 +930,6 @@ macro_rules! add_assign_impl {
|
||||
)+)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
add_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
|
||||
/// The `SubAssign` trait is used to specify the functionality of `-=`.
|
||||
@@ -1004,7 +960,6 @@ add_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
/// foo -= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "sub_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait SubAssign<Rhs=Self> {
|
||||
@@ -1012,7 +967,6 @@ pub trait SubAssign<Rhs=Self> {
|
||||
fn sub_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! sub_assign_impl {
|
||||
($($t:ty)+) => ($(
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1023,7 +977,6 @@ macro_rules! sub_assign_impl {
|
||||
)+)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
sub_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
|
||||
/// The `MulAssign` trait is used to specify the functionality of `*=`.
|
||||
@@ -1054,7 +1007,6 @@ sub_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
/// foo *= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "mul_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait MulAssign<Rhs=Self> {
|
||||
@@ -1062,7 +1014,6 @@ pub trait MulAssign<Rhs=Self> {
|
||||
fn mul_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! mul_assign_impl {
|
||||
($($t:ty)+) => ($(
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1073,7 +1024,6 @@ macro_rules! mul_assign_impl {
|
||||
)+)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
mul_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
|
||||
/// The `DivAssign` trait is used to specify the functionality of `/=`.
|
||||
@@ -1104,7 +1054,6 @@ mul_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
/// foo /= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "div_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait DivAssign<Rhs=Self> {
|
||||
@@ -1112,7 +1061,6 @@ pub trait DivAssign<Rhs=Self> {
|
||||
fn div_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! div_assign_impl {
|
||||
($($t:ty)+) => ($(
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1123,7 +1071,6 @@ macro_rules! div_assign_impl {
|
||||
)+)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
div_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
|
||||
/// The `RemAssign` trait is used to specify the functionality of `%=`.
|
||||
@@ -1154,7 +1101,6 @@ div_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
/// foo %= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "rem_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait RemAssign<Rhs=Self> {
|
||||
@@ -1162,7 +1108,6 @@ pub trait RemAssign<Rhs=Self> {
|
||||
fn rem_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! rem_assign_impl {
|
||||
($($t:ty)+) => ($(
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1173,7 +1118,6 @@ macro_rules! rem_assign_impl {
|
||||
)+)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
rem_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
|
||||
/// The `BitAndAssign` trait is used to specify the functionality of `&=`.
|
||||
@@ -1204,7 +1148,6 @@ rem_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
|
||||
/// foo &= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "bitand_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait BitAndAssign<Rhs=Self> {
|
||||
@@ -1212,7 +1155,6 @@ pub trait BitAndAssign<Rhs=Self> {
|
||||
fn bitand_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! bitand_assign_impl {
|
||||
($($t:ty)+) => ($(
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1223,7 +1165,6 @@ macro_rules! bitand_assign_impl {
|
||||
)+)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
bitand_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
|
||||
|
||||
/// The `BitOrAssign` trait is used to specify the functionality of `|=`.
|
||||
@@ -1254,7 +1195,6 @@ bitand_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
|
||||
/// foo |= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "bitor_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait BitOrAssign<Rhs=Self> {
|
||||
@@ -1262,7 +1202,6 @@ pub trait BitOrAssign<Rhs=Self> {
|
||||
fn bitor_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! bitor_assign_impl {
|
||||
($($t:ty)+) => ($(
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1273,7 +1212,6 @@ macro_rules! bitor_assign_impl {
|
||||
)+)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
bitor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
|
||||
|
||||
/// The `BitXorAssign` trait is used to specify the functionality of `^=`.
|
||||
@@ -1304,7 +1242,6 @@ bitor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
|
||||
/// foo ^= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "bitxor_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait BitXorAssign<Rhs=Self> {
|
||||
@@ -1312,7 +1249,6 @@ pub trait BitXorAssign<Rhs=Self> {
|
||||
fn bitxor_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! bitxor_assign_impl {
|
||||
($($t:ty)+) => ($(
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1323,7 +1259,6 @@ macro_rules! bitxor_assign_impl {
|
||||
)+)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
bitxor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
|
||||
|
||||
/// The `ShlAssign` trait is used to specify the functionality of `<<=`.
|
||||
@@ -1354,7 +1289,6 @@ bitxor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
|
||||
/// foo <<= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "shl_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait ShlAssign<Rhs> {
|
||||
@@ -1362,7 +1296,6 @@ pub trait ShlAssign<Rhs> {
|
||||
fn shl_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! shl_assign_impl {
|
||||
($t:ty, $f:ty) => (
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1375,7 +1308,6 @@ macro_rules! shl_assign_impl {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! shl_assign_impl_all {
|
||||
($($t:ty)*) => ($(
|
||||
shl_assign_impl! { $t, u8 }
|
||||
@@ -1392,7 +1324,6 @@ macro_rules! shl_assign_impl_all {
|
||||
)*)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
shl_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
|
||||
|
||||
/// The `ShrAssign` trait is used to specify the functionality of `>>=`.
|
||||
@@ -1423,7 +1354,6 @@ shl_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
|
||||
/// foo >>= Foo;
|
||||
/// }
|
||||
/// ```
|
||||
#[cfg(not(stage0))]
|
||||
#[lang = "shr_assign"]
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
pub trait ShrAssign<Rhs=Self> {
|
||||
@@ -1431,7 +1361,6 @@ pub trait ShrAssign<Rhs=Self> {
|
||||
fn shr_assign(&mut self, Rhs);
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! shr_assign_impl {
|
||||
($t:ty, $f:ty) => (
|
||||
#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")]
|
||||
@@ -1444,7 +1373,6 @@ macro_rules! shr_assign_impl {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! shr_assign_impl_all {
|
||||
($($t:ty)*) => ($(
|
||||
shr_assign_impl! { $t, u8 }
|
||||
@@ -1461,7 +1389,6 @@ macro_rules! shr_assign_impl_all {
|
||||
)*)
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
shr_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
|
||||
|
||||
/// The `Index` trait is used to specify the functionality of indexing operations
|
||||
|
||||
@@ -499,28 +499,12 @@ unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
|
||||
#[unstable(feature = "unique", issue = "27730")]
|
||||
unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
|
||||
|
||||
#[cfg(stage0)]
|
||||
macro_rules! unique_new {
|
||||
() => (
|
||||
/// Creates a new `Unique`.
|
||||
pub unsafe fn new(ptr: *mut T) -> Unique<T> {
|
||||
Unique { pointer: NonZero::new(ptr), _marker: PhantomData }
|
||||
}
|
||||
)
|
||||
}
|
||||
#[cfg(not(stage0))]
|
||||
macro_rules! unique_new {
|
||||
() => (
|
||||
/// Creates a new `Unique`.
|
||||
pub const unsafe fn new(ptr: *mut T) -> Unique<T> {
|
||||
Unique { pointer: NonZero::new(ptr), _marker: PhantomData }
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
#[unstable(feature = "unique", issue = "27730")]
|
||||
impl<T: ?Sized> Unique<T> {
|
||||
unique_new!{}
|
||||
/// Creates a new `Unique`.
|
||||
pub const unsafe fn new(ptr: *mut T) -> Unique<T> {
|
||||
Unique { pointer: NonZero::new(ptr), _marker: PhantomData }
|
||||
}
|
||||
|
||||
/// Dereferences the content.
|
||||
pub unsafe fn get(&self) -> &T {
|
||||
@@ -533,7 +517,6 @@ impl<T: ?Sized> Unique<T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(stage0))] // remove cfg after new snapshot
|
||||
#[unstable(feature = "unique", issue = "27730")]
|
||||
impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
|
||||
|
||||
@@ -598,7 +581,6 @@ impl<T: ?Sized> Clone for Shared<T> {
|
||||
#[unstable(feature = "shared", issue = "27730")]
|
||||
impl<T: ?Sized> Copy for Shared<T> { }
|
||||
|
||||
#[cfg(not(stage0))] // remove cfg after new snapshot
|
||||
#[unstable(feature = "shared", issue = "27730")]
|
||||
impl<T: ?Sized, U: ?Sized> CoerceUnsized<Shared<U>> for Shared<T> where T: Unsize<U> { }
|
||||
|
||||
|
||||
Reference in New Issue
Block a user