2019-04-15 11:23:21 +09:00
|
|
|
use crate::convert::TryFrom;
|
|
|
|
|
use crate::mem;
|
2023-03-13 20:07:53 +01:00
|
|
|
use crate::num::NonZeroUsize;
|
2021-02-04 00:20:08 +01:00
|
|
|
use crate::ops::{self, Try};
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2021-07-01 17:49:47 +02:00
|
|
|
use super::{
|
|
|
|
|
FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, TrustedStep,
|
|
|
|
|
};
|
2021-04-02 00:58:45 -04:00
|
|
|
|
|
|
|
|
// Safety: All invariants are upheld.
|
|
|
|
|
macro_rules! unsafe_impl_trusted_step {
|
|
|
|
|
($($type:ty)*) => {$(
|
|
|
|
|
#[unstable(feature = "trusted_step", issue = "85731")]
|
|
|
|
|
unsafe impl TrustedStep for $type {}
|
|
|
|
|
)*};
|
|
|
|
|
}
|
|
|
|
|
unsafe_impl_trusted_step![char i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize];
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Objects that have a notion of *successor* and *predecessor* operations.
|
2016-04-18 20:08:27 +02:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// The *successor* operation moves towards values that compare greater.
|
|
|
|
|
/// The *predecessor* operation moves towards values that compare lesser.
|
|
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
2023-03-28 00:01:16 +00:00
|
|
|
pub trait Step: Clone + PartialOrd + Sized {
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the number of *successor* steps required to get from `start` to `end`.
|
|
|
|
|
///
|
|
|
|
|
/// Returns `None` if the number of steps would overflow `usize`
|
|
|
|
|
/// (or is infinite, or if `end` would never be reached).
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `b`, and `n`:
|
|
|
|
|
///
|
2020-05-13 14:49:45 -04:00
|
|
|
/// * `steps_between(&a, &b) == Some(n)` if and only if `Step::forward_checked(&a, n) == Some(b)`
|
2021-07-14 13:48:18 -07:00
|
|
|
/// * `steps_between(&a, &b) == Some(n)` if and only if `Step::backward_checked(&b, n) == Some(a)`
|
2020-02-18 13:18:33 -05:00
|
|
|
/// * `steps_between(&a, &b) == Some(n)` only if `a <= b`
|
|
|
|
|
/// * Corollary: `steps_between(&a, &b) == Some(0)` if and only if `a == b`
|
|
|
|
|
/// * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`;
|
2020-07-23 17:29:52 +09:00
|
|
|
/// this is the case when it would require more than `usize::MAX` steps to get to `b`
|
2020-02-18 13:18:33 -05:00
|
|
|
/// * `steps_between(&a, &b) == None` if `a > b`
|
2017-07-06 00:10:40 +02:00
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize>;
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *successor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// If this would overflow the range of values supported by `Self`, returns `None`.
|
2019-11-20 14:40:54 -05:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// # Invariants
|
2019-11-20 14:40:54 -05:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// For any `a`, `n`, and `m`:
|
|
|
|
|
///
|
2020-03-14 15:13:18 -04:00
|
|
|
/// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, m).and_then(|x| Step::forward_checked(x, n))`
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `n`, and `m` where `n + m` does not overflow:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, n + m)`
|
2020-02-18 13:18:33 -05:00
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::forward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::forward_checked(&x, 1))`
|
|
|
|
|
/// * Corollary: `Step::forward_checked(&a, 0) == Some(a)`
|
|
|
|
|
fn forward_checked(start: Self, count: usize) -> Option<Self>;
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *successor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// If this would overflow the range of values supported by `Self`,
|
|
|
|
|
/// this function is allowed to panic, wrap, or saturate.
|
|
|
|
|
/// The suggested behavior is to panic when debug assertions are enabled,
|
|
|
|
|
/// and to wrap or saturate otherwise.
|
2019-11-20 14:40:54 -05:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Unsafe code should not rely on the correctness of behavior after overflow.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `n`, and `m`, where no overflow occurs:
|
2019-11-20 14:40:54 -05:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// * `Step::forward(Step::forward(a, n), m) == Step::forward(a, n + m)`
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::forward_checked(a, n) == Some(Step::forward(a, n))`
|
|
|
|
|
/// * `Step::forward(a, n) == (0..n).fold(a, |x, _| Step::forward(x, 1))`
|
|
|
|
|
/// * Corollary: `Step::forward(a, 0) == a`
|
|
|
|
|
/// * `Step::forward(a, n) >= a`
|
|
|
|
|
/// * `Step::backward(Step::forward(a, n), n) == a`
|
|
|
|
|
fn forward(start: Self, count: usize) -> Self {
|
|
|
|
|
Step::forward_checked(start, count).expect("overflow in `Step::forward`")
|
|
|
|
|
}
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *successor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// It is undefined behavior for this operation to overflow the
|
|
|
|
|
/// range of values supported by `Self`. If you cannot guarantee that this
|
|
|
|
|
/// will not overflow, use `forward` or `forward_checked` instead.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`:
|
|
|
|
|
///
|
|
|
|
|
/// * if there exists `b` such that `b > a`, it is safe to call `Step::forward_unchecked(a, 1)`
|
|
|
|
|
/// * if there exists `b`, `n` such that `steps_between(&a, &b) == Some(n)`,
|
|
|
|
|
/// it is safe to call `Step::forward_unchecked(a, m)` for any `m <= n`.
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)`
|
|
|
|
|
unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
|
|
|
|
|
Step::forward(start, count)
|
|
|
|
|
}
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2021-01-07 21:29:17 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *predecessor*
|
2020-02-18 13:18:33 -05:00
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// If this would overflow the range of values supported by `Self`, returns `None`.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `n`, and `m`:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == n.checked_add(m).and_then(|x| Step::backward_checked(a, x))`
|
|
|
|
|
/// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == try { Step::backward_checked(a, n.checked_add(m)?) }`
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::backward_checked(&x, 1))`
|
|
|
|
|
/// * Corollary: `Step::backward_checked(&a, 0) == Some(a)`
|
|
|
|
|
fn backward_checked(start: Self, count: usize) -> Option<Self>;
|
2017-07-06 01:14:20 +02:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *predecessor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// If this would overflow the range of values supported by `Self`,
|
|
|
|
|
/// this function is allowed to panic, wrap, or saturate.
|
|
|
|
|
/// The suggested behavior is to panic when debug assertions are enabled,
|
|
|
|
|
/// and to wrap or saturate otherwise.
|
|
|
|
|
///
|
|
|
|
|
/// Unsafe code should not rely on the correctness of behavior after overflow.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `n`, and `m`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward(Step::backward(a, n), m) == Step::backward(a, n + m)`
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward_checked(a, n) == Some(Step::backward(a, n))`
|
|
|
|
|
/// * `Step::backward(a, n) == (0..n).fold(a, |x, _| Step::backward(x, 1))`
|
|
|
|
|
/// * Corollary: `Step::backward(a, 0) == a`
|
|
|
|
|
/// * `Step::backward(a, n) <= a`
|
|
|
|
|
/// * `Step::forward(Step::backward(a, n), n) == a`
|
|
|
|
|
fn backward(start: Self, count: usize) -> Self {
|
|
|
|
|
Step::backward_checked(start, count).expect("overflow in `Step::backward`")
|
|
|
|
|
}
|
2019-05-04 12:04:52 +02:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *predecessor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// It is undefined behavior for this operation to overflow the
|
|
|
|
|
/// range of values supported by `Self`. If you cannot guarantee that this
|
|
|
|
|
/// will not overflow, use `backward` or `backward_checked` instead.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`:
|
|
|
|
|
///
|
|
|
|
|
/// * if there exists `b` such that `b < a`, it is safe to call `Step::backward_unchecked(a, 1)`
|
|
|
|
|
/// * if there exists `b`, `n` such that `steps_between(&b, &a) == Some(n)`,
|
|
|
|
|
/// it is safe to call `Step::backward_unchecked(a, m)` for any `m <= n`.
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)`
|
|
|
|
|
unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
|
|
|
|
|
Step::backward(start, count)
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2017-07-06 00:32:54 +02:00
|
|
|
// These are still macro-generated because the integer literals resolve to different types.
|
|
|
|
|
macro_rules! step_identical_methods {
|
|
|
|
|
() => {
|
|
|
|
|
#[inline]
|
2020-02-18 13:18:33 -05:00
|
|
|
unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
|
|
|
|
|
unsafe { start.unchecked_add(n as Self) }
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-02-18 13:18:33 -05:00
|
|
|
unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
|
|
|
|
|
unsafe { start.unchecked_sub(n as Self) }
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-12-30 02:09:29 +00:00
|
|
|
#[allow(arithmetic_overflow)]
|
2021-02-04 00:20:08 +01:00
|
|
|
#[rustc_inherit_overflow_checks]
|
2020-02-18 13:18:33 -05:00
|
|
|
fn forward(start: Self, n: usize) -> Self {
|
2020-05-13 17:57:06 -04:00
|
|
|
// In debug builds, trigger a panic on overflow.
|
|
|
|
|
// This should optimize completely out in release builds.
|
|
|
|
|
if Self::forward_checked(start, n).is_none() {
|
2021-02-04 00:20:08 +01:00
|
|
|
let _ = Self::MAX + 1;
|
2020-02-18 13:18:33 -05:00
|
|
|
}
|
2020-05-14 16:57:02 -04:00
|
|
|
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
|
|
|
|
|
start.wrapping_add(n as Self)
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-12-30 02:09:29 +00:00
|
|
|
#[allow(arithmetic_overflow)]
|
2021-02-04 00:20:08 +01:00
|
|
|
#[rustc_inherit_overflow_checks]
|
2020-02-18 13:18:33 -05:00
|
|
|
fn backward(start: Self, n: usize) -> Self {
|
2020-05-13 17:57:06 -04:00
|
|
|
// In debug builds, trigger a panic on overflow.
|
|
|
|
|
// This should optimize completely out in release builds.
|
|
|
|
|
if Self::backward_checked(start, n).is_none() {
|
2021-02-04 00:20:08 +01:00
|
|
|
let _ = Self::MIN - 1;
|
2020-02-18 13:18:33 -05:00
|
|
|
}
|
2020-05-14 16:57:02 -04:00
|
|
|
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
|
|
|
|
|
start.wrapping_sub(n as Self)
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
2020-04-22 16:16:43 -04:00
|
|
|
};
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
macro_rules! step_integer_impls {
|
|
|
|
|
{
|
|
|
|
|
narrower than or same width as usize:
|
|
|
|
|
$( [ $u_narrower:ident $i_narrower:ident ] ),+;
|
|
|
|
|
wider than usize:
|
|
|
|
|
$( [ $u_wider:ident $i_wider:ident ] ),+;
|
|
|
|
|
} => {
|
|
|
|
|
$(
|
|
|
|
|
#[allow(unreachable_patterns)]
|
|
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
2023-03-28 00:01:16 +00:00
|
|
|
impl Step for $u_narrower {
|
2020-05-14 16:57:02 -04:00
|
|
|
step_identical_methods!();
|
2020-02-18 13:18:33 -05:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
|
|
|
|
if *start <= *end {
|
|
|
|
|
// This relies on $u_narrower <= usize
|
|
|
|
|
Some((*end - *start) as usize)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
match Self::try_from(n) {
|
|
|
|
|
Ok(n) => start.checked_add(n),
|
|
|
|
|
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
match Self::try_from(n) {
|
|
|
|
|
Ok(n) => start.checked_sub(n),
|
|
|
|
|
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-04 12:04:52 +02:00
|
|
|
#[allow(unreachable_patterns)]
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
2023-03-28 00:01:16 +00:00
|
|
|
impl Step for $i_narrower {
|
2020-05-14 16:57:02 -04:00
|
|
|
step_identical_methods!();
|
2020-02-18 13:18:33 -05:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
|
|
|
|
if *start <= *end {
|
|
|
|
|
// This relies on $i_narrower <= usize
|
|
|
|
|
//
|
|
|
|
|
// Casting to isize extends the width but preserves the sign.
|
|
|
|
|
// Use wrapping_sub in isize space and cast to usize to compute
|
2021-07-23 19:14:28 -04:00
|
|
|
// the difference that might not fit inside the range of isize.
|
2020-02-18 13:18:33 -05:00
|
|
|
Some((*end as isize).wrapping_sub(*start as isize) as usize)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
match $u_narrower::try_from(n) {
|
|
|
|
|
Ok(n) => {
|
|
|
|
|
// Wrapping handles cases like
|
|
|
|
|
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
|
|
|
|
|
// even though 200 is out of range for i8.
|
|
|
|
|
let wrapped = start.wrapping_add(n as Self);
|
|
|
|
|
if wrapped >= start {
|
|
|
|
|
Some(wrapped)
|
|
|
|
|
} else {
|
|
|
|
|
None // Addition overflowed
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// If n is out of range of e.g. u8,
|
|
|
|
|
// then it is bigger than the entire range for i8 is wide
|
|
|
|
|
// so `any_i8 + n` necessarily overflows i8.
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
match $u_narrower::try_from(n) {
|
|
|
|
|
Ok(n) => {
|
|
|
|
|
// Wrapping handles cases like
|
|
|
|
|
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
|
|
|
|
|
// even though 200 is out of range for i8.
|
|
|
|
|
let wrapped = start.wrapping_sub(n as Self);
|
|
|
|
|
if wrapped <= start {
|
|
|
|
|
Some(wrapped)
|
|
|
|
|
} else {
|
|
|
|
|
None // Subtraction overflowed
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// If n is out of range of e.g. u8,
|
|
|
|
|
// then it is bigger than the entire range for i8 is wide
|
|
|
|
|
// so `any_i8 - n` necessarily overflows i8.
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
)+
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
$(
|
2017-08-29 22:13:21 -07:00
|
|
|
#[allow(unreachable_patterns)]
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
2023-03-28 00:01:16 +00:00
|
|
|
impl Step for $u_wider {
|
2020-02-18 13:18:33 -05:00
|
|
|
step_identical_methods!();
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
|
|
|
|
if *start <= *end {
|
|
|
|
|
usize::try_from(*end - *start).ok()
|
|
|
|
|
} else {
|
|
|
|
|
None
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
start.checked_add(n as Self)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
start.checked_sub(n as Self)
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-04 12:04:52 +02:00
|
|
|
#[allow(unreachable_patterns)]
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
2023-03-28 00:01:16 +00:00
|
|
|
impl Step for $i_wider {
|
2020-02-18 13:18:33 -05:00
|
|
|
step_identical_methods!();
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
|
|
|
|
if *start <= *end {
|
|
|
|
|
match end.checked_sub(*start) {
|
|
|
|
|
Some(result) => usize::try_from(result).ok(),
|
|
|
|
|
// If the difference is too big for e.g. i128,
|
|
|
|
|
// it's also gonna be too big for usize with fewer bits.
|
|
|
|
|
None => None,
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
} else {
|
|
|
|
|
None
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
start.checked_add(n as Self)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
start.checked_sub(n as Self)
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
|
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
)+
|
|
|
|
|
};
|
|
|
|
|
}
|
2019-05-04 12:04:52 +02:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[cfg(target_pointer_width = "64")]
|
|
|
|
|
step_integer_impls! {
|
|
|
|
|
narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [u64 i64], [usize isize];
|
|
|
|
|
wider than usize: [u128 i128];
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[cfg(target_pointer_width = "32")]
|
|
|
|
|
step_integer_impls! {
|
|
|
|
|
narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [usize isize];
|
|
|
|
|
wider than usize: [u64 i64], [u128 i128];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(target_pointer_width = "16")]
|
|
|
|
|
step_integer_impls! {
|
|
|
|
|
narrower than or same width as usize: [u8 i8], [u16 i16], [usize isize];
|
|
|
|
|
wider than usize: [u32 i32], [u64 i64], [u128 i128];
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2020-05-20 23:46:57 -04:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
2023-03-28 00:01:16 +00:00
|
|
|
impl Step for char {
|
2020-05-20 23:46:57 -04:00
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(&start: &char, &end: &char) -> Option<usize> {
|
|
|
|
|
let start = start as u32;
|
|
|
|
|
let end = end as u32;
|
|
|
|
|
if start <= end {
|
2020-05-28 01:11:46 -04:00
|
|
|
let count = end - start;
|
2020-05-20 23:46:57 -04:00
|
|
|
if start < 0xD800 && 0xE000 <= end {
|
|
|
|
|
usize::try_from(count - 0x800).ok()
|
|
|
|
|
} else {
|
|
|
|
|
usize::try_from(count).ok()
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: char, count: usize) -> Option<char> {
|
|
|
|
|
let start = start as u32;
|
|
|
|
|
let mut res = Step::forward_checked(start, count)?;
|
|
|
|
|
if start < 0xD800 && 0xD800 <= res {
|
|
|
|
|
res = Step::forward_checked(res, 0x800)?;
|
|
|
|
|
}
|
|
|
|
|
if res <= char::MAX as u32 {
|
2020-05-21 13:21:55 -04:00
|
|
|
// SAFETY: res is a valid unicode scalar
|
|
|
|
|
// (below 0x110000 and not in 0xD800..0xE000)
|
2020-05-20 23:46:57 -04:00
|
|
|
Some(unsafe { char::from_u32_unchecked(res) })
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: char, count: usize) -> Option<char> {
|
|
|
|
|
let start = start as u32;
|
|
|
|
|
let mut res = Step::backward_checked(start, count)?;
|
|
|
|
|
if start >= 0xE000 && 0xE000 > res {
|
|
|
|
|
res = Step::backward_checked(res, 0x800)?;
|
|
|
|
|
}
|
2020-05-21 13:21:55 -04:00
|
|
|
// SAFETY: res is a valid unicode scalar
|
|
|
|
|
// (below 0x110000 and not in 0xD800..0xE000)
|
2020-05-20 23:46:57 -04:00
|
|
|
Some(unsafe { char::from_u32_unchecked(res) })
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
unsafe fn forward_unchecked(start: char, count: usize) -> char {
|
|
|
|
|
let start = start as u32;
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller must guarantee that this doesn't overflow
|
|
|
|
|
// the range of values for a char.
|
|
|
|
|
let mut res = unsafe { Step::forward_unchecked(start, count) };
|
2020-05-20 23:46:57 -04:00
|
|
|
if start < 0xD800 && 0xD800 <= res {
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller must guarantee that this doesn't overflow
|
|
|
|
|
// the range of values for a char.
|
|
|
|
|
res = unsafe { Step::forward_unchecked(res, 0x800) };
|
2020-05-20 23:46:57 -04:00
|
|
|
}
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: because of the previous contract, this is guaranteed
|
|
|
|
|
// by the caller to be a valid char.
|
|
|
|
|
unsafe { char::from_u32_unchecked(res) }
|
2020-05-20 23:46:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
unsafe fn backward_unchecked(start: char, count: usize) -> char {
|
|
|
|
|
let start = start as u32;
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller must guarantee that this doesn't overflow
|
|
|
|
|
// the range of values for a char.
|
|
|
|
|
let mut res = unsafe { Step::backward_unchecked(start, count) };
|
2020-05-20 23:46:57 -04:00
|
|
|
if start >= 0xE000 && 0xE000 > res {
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller must guarantee that this doesn't overflow
|
|
|
|
|
// the range of values for a char.
|
|
|
|
|
res = unsafe { Step::backward_unchecked(res, 0x800) };
|
2020-05-20 23:46:57 -04:00
|
|
|
}
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: because of the previous contract, this is guaranteed
|
|
|
|
|
// by the caller to be a valid char.
|
|
|
|
|
unsafe { char::from_u32_unchecked(res) }
|
2020-05-20 23:46:57 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
macro_rules! range_exact_iter_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
|
impl ExactSizeIterator for ops::Range<$t> { }
|
2016-09-11 01:55:15 +00:00
|
|
|
)*)
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2021-01-31 21:15:18 +01:00
|
|
|
/// Safety: This macro must only be used on types that are `Copy` and result in ranges
|
|
|
|
|
/// which have an exact `size_hint()` where the upper bound must not be `None`.
|
|
|
|
|
macro_rules! unsafe_range_trusted_random_access_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
|
|
|
|
#[doc(hidden)]
|
|
|
|
|
#[unstable(feature = "trusted_random_access", issue = "none")]
|
2021-07-01 17:49:47 +02:00
|
|
|
unsafe impl TrustedRandomAccess for ops::Range<$t> {}
|
|
|
|
|
|
|
|
|
|
#[doc(hidden)]
|
|
|
|
|
#[unstable(feature = "trusted_random_access", issue = "none")]
|
|
|
|
|
unsafe impl TrustedRandomAccessNoCoerce for ops::Range<$t> {
|
2021-01-31 21:15:18 +01:00
|
|
|
const MAY_HAVE_SIDE_EFFECT: bool = false;
|
|
|
|
|
}
|
|
|
|
|
)*)
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-11 01:55:15 +00:00
|
|
|
macro_rules! range_incl_exact_iter_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
2018-01-28 03:09:36 +08:00
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
2016-04-18 20:08:27 +02:00
|
|
|
impl ExactSizeIterator for ops::RangeInclusive<$t> { }
|
|
|
|
|
)*)
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
/// Specialization implementations for `Range`.
|
|
|
|
|
trait RangeIteratorImpl {
|
|
|
|
|
type Item;
|
|
|
|
|
|
|
|
|
|
// Iterator
|
|
|
|
|
fn spec_next(&mut self) -> Option<Self::Item>;
|
|
|
|
|
fn spec_nth(&mut self, n: usize) -> Option<Self::Item>;
|
2023-03-13 20:07:53 +01:00
|
|
|
fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize>;
|
2021-04-02 00:58:45 -04:00
|
|
|
|
|
|
|
|
// DoubleEndedIterator
|
|
|
|
|
fn spec_next_back(&mut self) -> Option<Self::Item>;
|
|
|
|
|
fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item>;
|
2023-03-13 20:07:53 +01:00
|
|
|
fn spec_advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize>;
|
2021-04-02 00:58:45 -04:00
|
|
|
}
|
|
|
|
|
|
2023-03-28 00:00:58 +00:00
|
|
|
impl<A: Step> RangeIteratorImpl for ops::Range<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-02 00:58:45 -04:00
|
|
|
default fn spec_next(&mut self) -> Option<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
if self.start < self.end {
|
2021-04-01 05:06:28 -04:00
|
|
|
let n =
|
|
|
|
|
Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
|
2020-02-18 13:18:33 -05:00
|
|
|
Some(mem::replace(&mut self.start, n))
|
2016-04-18 20:08:27 +02:00
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-02 00:58:45 -04:00
|
|
|
default fn spec_nth(&mut self, n: usize) -> Option<A> {
|
|
|
|
|
if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
|
|
|
|
|
if plus_n < self.end {
|
|
|
|
|
self.start =
|
|
|
|
|
Step::forward_checked(plus_n.clone(), 1).expect("`Step` invariants not upheld");
|
|
|
|
|
return Some(plus_n);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.start = self.end.clone();
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-12 21:40:38 +02:00
|
|
|
#[inline]
|
2023-03-13 20:07:53 +01:00
|
|
|
default fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
|
2021-07-12 21:40:38 +02:00
|
|
|
let available = if self.start <= self.end {
|
|
|
|
|
Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
|
|
|
|
|
} else {
|
|
|
|
|
0
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let taken = available.min(n);
|
|
|
|
|
|
|
|
|
|
self.start =
|
|
|
|
|
Step::forward_checked(self.start.clone(), taken).expect("`Step` invariants not upheld");
|
|
|
|
|
|
2023-03-13 20:07:53 +01:00
|
|
|
NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
|
2021-07-12 21:40:38 +02:00
|
|
|
}
|
|
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
#[inline]
|
|
|
|
|
default fn spec_next_back(&mut self) -> Option<A> {
|
2020-02-18 13:18:33 -05:00
|
|
|
if self.start < self.end {
|
2021-04-02 00:58:45 -04:00
|
|
|
self.end =
|
|
|
|
|
Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
|
|
|
|
|
Some(self.end.clone())
|
2020-02-18 13:18:33 -05:00
|
|
|
} else {
|
2021-04-02 00:58:45 -04:00
|
|
|
None
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-02 00:58:45 -04:00
|
|
|
default fn spec_nth_back(&mut self, n: usize) -> Option<A> {
|
|
|
|
|
if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
|
|
|
|
|
if minus_n > self.start {
|
|
|
|
|
self.end =
|
|
|
|
|
Step::backward_checked(minus_n, 1).expect("`Step` invariants not upheld");
|
|
|
|
|
return Some(self.end.clone());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.end = self.start.clone();
|
|
|
|
|
None
|
|
|
|
|
}
|
2021-07-12 21:40:38 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
2023-03-13 20:07:53 +01:00
|
|
|
default fn spec_advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
|
2021-07-12 21:40:38 +02:00
|
|
|
let available = if self.start <= self.end {
|
|
|
|
|
Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
|
|
|
|
|
} else {
|
|
|
|
|
0
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let taken = available.min(n);
|
|
|
|
|
|
|
|
|
|
self.end =
|
|
|
|
|
Step::backward_checked(self.end.clone(), taken).expect("`Step` invariants not upheld");
|
|
|
|
|
|
2023-03-13 20:07:53 +01:00
|
|
|
NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
|
2021-07-12 21:40:38 +02:00
|
|
|
}
|
2021-04-02 00:58:45 -04:00
|
|
|
}
|
|
|
|
|
|
2023-03-28 00:00:58 +00:00
|
|
|
impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
|
2021-04-02 00:58:45 -04:00
|
|
|
#[inline]
|
|
|
|
|
fn spec_next(&mut self) -> Option<T> {
|
|
|
|
|
if self.start < self.end {
|
|
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
|
|
|
|
|
Some(mem::replace(&mut self.start, n))
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn spec_nth(&mut self, n: usize) -> Option<T> {
|
2020-02-18 13:18:33 -05:00
|
|
|
if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
|
2017-07-06 01:14:20 +02:00
|
|
|
if plus_n < self.end {
|
2021-04-02 00:58:45 -04:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
self.start = unsafe { Step::forward_unchecked(plus_n.clone(), 1) };
|
2017-07-06 01:14:20 +02:00
|
|
|
return Some(plus_n);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.start = self.end.clone();
|
|
|
|
|
None
|
|
|
|
|
}
|
2018-01-04 01:51:18 +00:00
|
|
|
|
2021-07-12 21:40:38 +02:00
|
|
|
#[inline]
|
2023-03-13 20:07:53 +01:00
|
|
|
fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
|
2021-07-12 21:40:38 +02:00
|
|
|
let available = if self.start <= self.end {
|
|
|
|
|
Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
|
|
|
|
|
} else {
|
|
|
|
|
0
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let taken = available.min(n);
|
|
|
|
|
|
|
|
|
|
// SAFETY: the conditions above ensure that the count is in bounds. If start <= end
|
|
|
|
|
// then steps_between either returns a bound to which we clamp or returns None which
|
|
|
|
|
// together with the initial inequality implies more than usize::MAX steps.
|
|
|
|
|
// Otherwise 0 is returned which always safe to use.
|
|
|
|
|
self.start = unsafe { Step::forward_unchecked(self.start.clone(), taken) };
|
|
|
|
|
|
2023-03-13 20:07:53 +01:00
|
|
|
NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
|
2021-07-12 21:40:38 +02:00
|
|
|
}
|
|
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
#[inline]
|
|
|
|
|
fn spec_next_back(&mut self) -> Option<T> {
|
|
|
|
|
if self.start < self.end {
|
|
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
self.end = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
|
|
|
|
|
Some(self.end.clone())
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn spec_nth_back(&mut self, n: usize) -> Option<T> {
|
|
|
|
|
if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
|
|
|
|
|
if minus_n > self.start {
|
|
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
self.end = unsafe { Step::backward_unchecked(minus_n, 1) };
|
|
|
|
|
return Some(self.end.clone());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.end = self.start.clone();
|
|
|
|
|
None
|
|
|
|
|
}
|
2021-07-12 21:40:38 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
2023-03-13 20:07:53 +01:00
|
|
|
fn spec_advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
|
2021-07-12 21:40:38 +02:00
|
|
|
let available = if self.start <= self.end {
|
|
|
|
|
Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
|
|
|
|
|
} else {
|
|
|
|
|
0
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let taken = available.min(n);
|
|
|
|
|
|
|
|
|
|
// SAFETY: same as the spec_advance_by() implementation
|
|
|
|
|
self.end = unsafe { Step::backward_unchecked(self.end.clone(), taken) };
|
|
|
|
|
|
2023-03-13 20:07:53 +01:00
|
|
|
NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
|
2021-07-12 21:40:38 +02:00
|
|
|
}
|
2021-04-02 00:58:45 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2023-03-28 00:00:58 +00:00
|
|
|
impl<A: Step> Iterator for ops::Range<A> {
|
2021-04-02 00:58:45 -04:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
|
|
|
|
self.spec_next()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
|
if self.start < self.end {
|
|
|
|
|
let hint = Step::steps_between(&self.start, &self.end);
|
|
|
|
|
(hint.unwrap_or(usize::MAX), hint)
|
|
|
|
|
} else {
|
|
|
|
|
(0, Some(0))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
|
|
|
|
self.spec_nth(n)
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-08 18:46:40 +00:00
|
|
|
#[inline]
|
|
|
|
|
fn last(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn min(mut self) -> Option<A> {
|
|
|
|
|
self.next()
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-04 01:51:18 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn max(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 01:51:18 +00:00
|
|
|
}
|
2021-01-31 21:15:18 +01:00
|
|
|
|
2021-09-28 12:50:38 -07:00
|
|
|
#[inline]
|
|
|
|
|
fn is_sorted(self) -> bool {
|
|
|
|
|
true
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-12 21:40:38 +02:00
|
|
|
#[inline]
|
2023-03-13 20:07:53 +01:00
|
|
|
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
|
2021-07-12 21:40:38 +02:00
|
|
|
self.spec_advance_by(n)
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-31 21:15:18 +01:00
|
|
|
#[inline]
|
|
|
|
|
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item
|
|
|
|
|
where
|
2021-07-01 18:54:02 +02:00
|
|
|
Self: TrustedRandomAccessNoCoerce,
|
2021-01-31 21:15:18 +01:00
|
|
|
{
|
2023-01-14 16:33:11 +01:00
|
|
|
// SAFETY: The TrustedRandomAccess contract requires that callers only pass an index
|
2021-01-31 21:15:18 +01:00
|
|
|
// that is in bounds.
|
|
|
|
|
// Additionally Self: TrustedRandomAccess is only implemented for Copy types
|
|
|
|
|
// which means even repeated reads of the same index would be safe.
|
|
|
|
|
unsafe { Step::forward_unchecked(self.start.clone(), idx) }
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2016-09-11 01:55:15 +00:00
|
|
|
// These macros generate `ExactSizeIterator` impls for various range types.
|
2016-10-20 14:34:34 +02:00
|
|
|
//
|
2020-02-18 13:18:33 -05:00
|
|
|
// * `ExactSizeIterator::len` is required to always return an exact `usize`,
|
|
|
|
|
// so no range can be longer than `usize::MAX`.
|
|
|
|
|
// * For integer types in `Range<_>` this is the case for types narrower than or as wide as `usize`.
|
|
|
|
|
// For integer types in `RangeInclusive<_>`
|
|
|
|
|
// this is the case for types *strictly narrower* than `usize`
|
|
|
|
|
// since e.g. `(0..=u64::MAX).len()` would be `u64::MAX + 1`.
|
|
|
|
|
range_exact_iter_impl! {
|
|
|
|
|
usize u8 u16
|
|
|
|
|
isize i8 i16
|
|
|
|
|
|
2021-12-14 15:23:34 +01:00
|
|
|
// These are incorrect per the reasoning above,
|
2020-02-18 13:18:33 -05:00
|
|
|
// but removing them would be a breaking change as they were stabilized in Rust 1.0.0.
|
|
|
|
|
// So e.g. `(0..66_000_u32).len()` for example will compile without error or warnings
|
|
|
|
|
// on 16-bit platforms, but continue to give a wrong result.
|
|
|
|
|
u32
|
|
|
|
|
i32
|
|
|
|
|
}
|
2021-01-31 21:15:18 +01:00
|
|
|
|
|
|
|
|
unsafe_range_trusted_random_access_impl! {
|
|
|
|
|
usize u8 u16
|
|
|
|
|
isize i8 i16
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(target_pointer_width = "32")]
|
|
|
|
|
unsafe_range_trusted_random_access_impl! {
|
|
|
|
|
u32 i32
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(target_pointer_width = "64")]
|
|
|
|
|
unsafe_range_trusted_random_access_impl! {
|
|
|
|
|
u32 i32
|
|
|
|
|
u64 i64
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
range_incl_exact_iter_impl! {
|
|
|
|
|
u8
|
|
|
|
|
i8
|
|
|
|
|
|
2021-12-14 15:23:34 +01:00
|
|
|
// These are incorrect per the reasoning above,
|
2020-02-18 13:18:33 -05:00
|
|
|
// but removing them would be a breaking change as they were stabilized in Rust 1.26.0.
|
|
|
|
|
// So e.g. `(0..=u16::MAX).len()` for example will compile without error or warnings
|
|
|
|
|
// on 16-bit platforms, but continue to give a wrong result.
|
|
|
|
|
u16
|
|
|
|
|
i16
|
|
|
|
|
}
|
2016-10-20 14:34:34 +02:00
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2023-03-28 00:00:58 +00:00
|
|
|
impl<A: Step> DoubleEndedIterator for ops::Range<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn next_back(&mut self) -> Option<A> {
|
2021-04-02 00:58:45 -04:00
|
|
|
self.spec_next_back()
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2019-06-08 22:30:45 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth_back(&mut self, n: usize) -> Option<A> {
|
2021-04-02 00:58:45 -04:00
|
|
|
self.spec_nth_back(n)
|
2019-06-08 22:30:45 +02:00
|
|
|
}
|
2021-07-12 21:40:38 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
2023-03-13 20:07:53 +01:00
|
|
|
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
|
2021-07-12 21:40:38 +02:00
|
|
|
self.spec_advance_back_by(n)
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
// Safety:
|
|
|
|
|
// The following invariants for `Step::steps_between` exist:
|
|
|
|
|
//
|
|
|
|
|
// > * `steps_between(&a, &b) == Some(n)` only if `a <= b`
|
|
|
|
|
// > * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`;
|
|
|
|
|
// > this is the case when it would require more than `usize::MAX` steps to
|
|
|
|
|
// > get to `b`
|
|
|
|
|
// > * `steps_between(&a, &b) == None` if `a > b`
|
|
|
|
|
//
|
|
|
|
|
// The first invariant is what is generally required for `TrustedLen` to be
|
|
|
|
|
// sound. The note addendum satisfies an additional `TrustedLen` invariant.
|
|
|
|
|
//
|
|
|
|
|
// > The upper bound must only be `None` if the actual iterator length is larger
|
|
|
|
|
// > than `usize::MAX`
|
|
|
|
|
//
|
|
|
|
|
// The second invariant logically follows the first so long as the `PartialOrd`
|
|
|
|
|
// implementation is correct; regardless it is explicitly stated. If `a < b`
|
|
|
|
|
// then `(0, Some(0))` is returned by `ops::Range<A: Step>::size_hint`. As such
|
|
|
|
|
// the second invariant is upheld.
|
|
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
|
|
|
|
unsafe impl<A: TrustedStep> TrustedLen for ops::Range<A> {}
|
2020-02-18 13:18:33 -05:00
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::Range<A> {}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> Iterator for ops::RangeFrom<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
2020-02-18 13:18:33 -05:00
|
|
|
let n = Step::forward(self.start.clone(), 1);
|
|
|
|
|
Some(mem::replace(&mut self.start, n))
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2017-05-30 09:15:25 -07:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
|
(usize::MAX, None)
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
2020-05-19 20:00:29 -04:00
|
|
|
let plus_n = Step::forward(self.start.clone(), n);
|
2020-02-18 13:18:33 -05:00
|
|
|
self.start = Step::forward(plus_n.clone(), 1);
|
2017-07-06 01:14:20 +02:00
|
|
|
Some(plus_n)
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
// Safety: See above implementation for `ops::Range<A>`
|
|
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
|
|
|
|
unsafe impl<A: TrustedStep> TrustedLen for ops::RangeFrom<A> {}
|
|
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::RangeFrom<A> {}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
trait RangeInclusiveIteratorImpl {
|
|
|
|
|
type Item;
|
|
|
|
|
|
|
|
|
|
// Iterator
|
|
|
|
|
fn spec_next(&mut self) -> Option<Self::Item>;
|
|
|
|
|
fn spec_try_fold<B, F, R>(&mut self, init: B, f: F) -> R
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> R,
|
|
|
|
|
R: Try<Output = B>;
|
|
|
|
|
|
|
|
|
|
// DoubleEndedIterator
|
|
|
|
|
fn spec_next_back(&mut self) -> Option<Self::Item>;
|
|
|
|
|
fn spec_try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> R,
|
|
|
|
|
R: Try<Output = B>;
|
2021-04-01 01:55:03 -04:00
|
|
|
}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
impl<A: Step> RangeInclusiveIteratorImpl for ops::RangeInclusive<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-02 00:58:45 -04:00
|
|
|
default fn spec_next(&mut self) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2018-06-19 04:08:20 +08:00
|
|
|
return None;
|
|
|
|
|
}
|
2018-07-13 13:08:28 +08:00
|
|
|
let is_iterating = self.start < self.end;
|
|
|
|
|
Some(if is_iterating {
|
2021-04-01 05:06:28 -04:00
|
|
|
let n =
|
|
|
|
|
Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
|
2018-07-13 13:08:28 +08:00
|
|
|
mem::replace(&mut self.start, n)
|
2018-02-07 11:11:54 -08:00
|
|
|
} else {
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2018-07-13 13:08:28 +08:00
|
|
|
self.start.clone()
|
|
|
|
|
})
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
#[inline]
|
|
|
|
|
default fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, A) -> R,
|
|
|
|
|
R: Try<Output = B>,
|
|
|
|
|
{
|
|
|
|
|
if self.is_empty() {
|
|
|
|
|
return try { init };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut accum = init;
|
|
|
|
|
|
|
|
|
|
while self.start < self.end {
|
|
|
|
|
let n =
|
|
|
|
|
Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
|
|
|
|
|
let n = mem::replace(&mut self.start, n);
|
|
|
|
|
accum = f(accum, n)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.exhausted = true;
|
|
|
|
|
|
|
|
|
|
if self.start == self.end {
|
|
|
|
|
accum = f(accum, self.start.clone())?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try { accum }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
default fn spec_next_back(&mut self) -> Option<A> {
|
|
|
|
|
if self.is_empty() {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
let is_iterating = self.start < self.end;
|
|
|
|
|
Some(if is_iterating {
|
|
|
|
|
let n =
|
|
|
|
|
Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
|
|
|
|
|
mem::replace(&mut self.end, n)
|
|
|
|
|
} else {
|
|
|
|
|
self.exhausted = true;
|
|
|
|
|
self.end.clone()
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
default fn spec_try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, A) -> R,
|
|
|
|
|
R: Try<Output = B>,
|
|
|
|
|
{
|
|
|
|
|
if self.is_empty() {
|
|
|
|
|
return try { init };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut accum = init;
|
|
|
|
|
|
|
|
|
|
while self.start < self.end {
|
|
|
|
|
let n =
|
|
|
|
|
Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
|
|
|
|
|
let n = mem::replace(&mut self.end, n);
|
|
|
|
|
accum = f(accum, n)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.exhausted = true;
|
|
|
|
|
|
|
|
|
|
if self.start == self.end {
|
|
|
|
|
accum = f(accum, self.start.clone())?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try { accum }
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<T: TrustedStep> RangeInclusiveIteratorImpl for ops::RangeInclusive<T> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn spec_next(&mut self) -> Option<T> {
|
|
|
|
|
if self.is_empty() {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
let is_iterating = self.start < self.end;
|
|
|
|
|
Some(if is_iterating {
|
|
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
|
|
|
|
|
mem::replace(&mut self.start, n)
|
|
|
|
|
} else {
|
|
|
|
|
self.exhausted = true;
|
|
|
|
|
self.start.clone()
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, T) -> R,
|
|
|
|
|
R: Try<Output = B>,
|
|
|
|
|
{
|
|
|
|
|
if self.is_empty() {
|
|
|
|
|
return try { init };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut accum = init;
|
|
|
|
|
|
|
|
|
|
while self.start < self.end {
|
|
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
|
|
|
|
|
let n = mem::replace(&mut self.start, n);
|
|
|
|
|
accum = f(accum, n)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.exhausted = true;
|
|
|
|
|
|
|
|
|
|
if self.start == self.end {
|
|
|
|
|
accum = f(accum, self.start.clone())?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try { accum }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn spec_next_back(&mut self) -> Option<T> {
|
|
|
|
|
if self.is_empty() {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
let is_iterating = self.start < self.end;
|
|
|
|
|
Some(if is_iterating {
|
|
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
|
|
|
|
|
mem::replace(&mut self.end, n)
|
|
|
|
|
} else {
|
|
|
|
|
self.exhausted = true;
|
|
|
|
|
self.end.clone()
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn spec_try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, T) -> R,
|
|
|
|
|
R: Try<Output = B>,
|
|
|
|
|
{
|
|
|
|
|
if self.is_empty() {
|
|
|
|
|
return try { init };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut accum = init;
|
|
|
|
|
|
|
|
|
|
while self.start < self.end {
|
|
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
|
|
|
|
|
let n = mem::replace(&mut self.end, n);
|
|
|
|
|
accum = f(accum, n)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.exhausted = true;
|
|
|
|
|
|
|
|
|
|
if self.start == self.end {
|
|
|
|
|
accum = f(accum, self.start.clone())?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try { accum }
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
|
|
|
|
impl<A: Step> Iterator for ops::RangeInclusive<A> {
|
|
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
|
|
|
|
self.spec_next()
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
2018-06-19 04:08:20 +08:00
|
|
|
if self.is_empty() {
|
2017-05-21 05:03:49 -07:00
|
|
|
return (0, Some(0));
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-06 00:10:40 +02:00
|
|
|
match Step::steps_between(&self.start, &self.end) {
|
2017-04-23 21:14:32 -07:00
|
|
|
Some(hint) => (hint.saturating_add(1), hint.checked_add(1)),
|
2019-03-25 21:12:53 -07:00
|
|
|
None => (usize::MAX, None),
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2018-06-19 04:08:20 +08:00
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
|
2019-04-15 11:23:21 +09:00
|
|
|
use crate::cmp::Ordering::*;
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
match plus_n.partial_cmp(&self.end) {
|
|
|
|
|
Some(Less) => {
|
2020-02-18 13:18:33 -05:00
|
|
|
self.start = Step::forward(plus_n.clone(), 1);
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
return Some(plus_n);
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
Some(Equal) => {
|
2020-01-30 20:09:23 +00:00
|
|
|
self.start = plus_n.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
return Some(plus_n);
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-30 20:09:23 +00:00
|
|
|
self.start = self.end.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2017-07-06 01:14:20 +02:00
|
|
|
None
|
|
|
|
|
}
|
2018-01-04 00:17:36 +00:00
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
#[inline]
|
2021-04-02 00:58:45 -04:00
|
|
|
fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> R,
|
2021-05-05 22:47:02 -07:00
|
|
|
R: Try<Output = B>,
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
{
|
2021-04-02 00:58:45 -04:00
|
|
|
self.spec_try_fold(init, f)
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
}
|
|
|
|
|
|
2022-09-26 00:51:42 -07:00
|
|
|
impl_fold_via_try_fold! { fold -> try_fold }
|
2020-05-18 09:34:34 +10:00
|
|
|
|
2018-01-04 00:58:41 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn last(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 00:58:41 +00:00
|
|
|
}
|
|
|
|
|
|
2018-01-04 00:17:36 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn min(mut self) -> Option<A> {
|
|
|
|
|
self.next()
|
2018-01-04 00:17:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn max(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 00:17:36 +00:00
|
|
|
}
|
2021-09-28 12:50:38 -07:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn is_sorted(self) -> bool {
|
|
|
|
|
true
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2018-01-28 03:09:36 +08:00
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn next_back(&mut self) -> Option<A> {
|
2021-04-02 00:58:45 -04:00
|
|
|
self.spec_next_back()
|
2018-02-04 23:48:40 -08:00
|
|
|
}
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
|
2019-06-09 22:45:11 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn nth_back(&mut self, n: usize) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2019-06-09 22:45:11 +02:00
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
|
2019-06-09 22:45:11 +02:00
|
|
|
use crate::cmp::Ordering::*;
|
|
|
|
|
|
|
|
|
|
match minus_n.partial_cmp(&self.start) {
|
|
|
|
|
Some(Greater) => {
|
2020-02-18 13:18:33 -05:00
|
|
|
self.end = Step::backward(minus_n.clone(), 1);
|
2019-06-09 22:45:11 +02:00
|
|
|
return Some(minus_n);
|
|
|
|
|
}
|
|
|
|
|
Some(Equal) => {
|
2020-01-30 20:09:23 +00:00
|
|
|
self.end = minus_n.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-06-09 22:45:11 +02:00
|
|
|
return Some(minus_n);
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-30 20:09:23 +00:00
|
|
|
self.end = self.start.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-06-09 22:45:11 +02:00
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
#[inline]
|
2021-04-02 00:58:45 -04:00
|
|
|
fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> R,
|
2021-05-05 22:47:02 -07:00
|
|
|
R: Try<Output = B>,
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
{
|
2021-04-02 00:58:45 -04:00
|
|
|
self.spec_try_rfold(init, f)
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
}
|
2020-05-18 09:34:34 +10:00
|
|
|
|
2022-09-26 00:51:42 -07:00
|
|
|
impl_fold_via_try_fold! { rfold -> try_rfold }
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2021-04-02 00:58:45 -04:00
|
|
|
// Safety: See above implementation for `ops::Range<A>`
|
|
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
|
|
|
|
unsafe impl<A: TrustedStep> TrustedLen for ops::RangeInclusive<A> {}
|
2020-02-18 13:18:33 -05:00
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::RangeInclusive<A> {}
|