2020-05-20 23:46:57 -04:00
|
|
|
use crate::char;
|
2019-04-15 11:23:21 +09:00
|
|
|
use crate::convert::TryFrom;
|
|
|
|
|
use crate::mem;
|
|
|
|
|
use crate::ops::{self, Add, Sub, Try};
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2018-01-04 12:36:43 +00:00
|
|
|
use super::{FusedIterator, TrustedLen};
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Objects that have a notion of *successor* and *predecessor* operations.
|
2016-04-18 20:08:27 +02:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// The *successor* operation moves towards values that compare greater.
|
|
|
|
|
/// The *predecessor* operation moves towards values that compare lesser.
|
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// This trait is `unsafe` because its implementation must be correct for
|
|
|
|
|
/// the safety of `unsafe trait TrustedLen` implementations, and the results
|
|
|
|
|
/// of using this trait can otherwise be trusted by `unsafe` code to be correct
|
2020-05-13 14:49:45 -04:00
|
|
|
/// and fulfill the listed obligations.
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
|
|
|
|
pub unsafe trait Step: Clone + PartialOrd + Sized {
|
|
|
|
|
/// Returns the number of *successor* steps required to get from `start` to `end`.
|
|
|
|
|
///
|
|
|
|
|
/// Returns `None` if the number of steps would overflow `usize`
|
|
|
|
|
/// (or is infinite, or if `end` would never be reached).
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `b`, and `n`:
|
|
|
|
|
///
|
2020-05-13 14:49:45 -04:00
|
|
|
/// * `steps_between(&a, &b) == Some(n)` if and only if `Step::forward_checked(&a, n) == Some(b)`
|
|
|
|
|
/// * `steps_between(&a, &b) == Some(n)` if and only if `Step::backward_checked(&a, n) == Some(a)`
|
2020-02-18 13:18:33 -05:00
|
|
|
/// * `steps_between(&a, &b) == Some(n)` only if `a <= b`
|
|
|
|
|
/// * Corollary: `steps_between(&a, &b) == Some(0)` if and only if `a == b`
|
|
|
|
|
/// * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`;
|
2020-07-23 17:29:52 +09:00
|
|
|
/// this is the case when it would require more than `usize::MAX` steps to get to `b`
|
2020-02-18 13:18:33 -05:00
|
|
|
/// * `steps_between(&a, &b) == None` if `a > b`
|
2017-07-06 00:10:40 +02:00
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize>;
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *successor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// If this would overflow the range of values supported by `Self`, returns `None`.
|
2019-11-20 14:40:54 -05:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// # Invariants
|
2019-11-20 14:40:54 -05:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// For any `a`, `n`, and `m`:
|
|
|
|
|
///
|
2020-03-14 15:13:18 -04:00
|
|
|
/// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, m).and_then(|x| Step::forward_checked(x, n))`
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `n`, and `m` where `n + m` does not overflow:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, n + m)`
|
2020-02-18 13:18:33 -05:00
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::forward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::forward_checked(&x, 1))`
|
|
|
|
|
/// * Corollary: `Step::forward_checked(&a, 0) == Some(a)`
|
|
|
|
|
#[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
|
|
|
|
|
fn forward_checked(start: Self, count: usize) -> Option<Self>;
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *successor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// If this would overflow the range of values supported by `Self`,
|
|
|
|
|
/// this function is allowed to panic, wrap, or saturate.
|
|
|
|
|
/// The suggested behavior is to panic when debug assertions are enabled,
|
|
|
|
|
/// and to wrap or saturate otherwise.
|
2019-11-20 14:40:54 -05:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Unsafe code should not rely on the correctness of behavior after overflow.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `n`, and `m`, where no overflow occurs:
|
2019-11-20 14:40:54 -05:00
|
|
|
///
|
2020-02-18 13:18:33 -05:00
|
|
|
/// * `Step::forward(Step::forward(a, n), m) == Step::forward(a, n + m)`
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::forward_checked(a, n) == Some(Step::forward(a, n))`
|
|
|
|
|
/// * `Step::forward(a, n) == (0..n).fold(a, |x, _| Step::forward(x, 1))`
|
|
|
|
|
/// * Corollary: `Step::forward(a, 0) == a`
|
|
|
|
|
/// * `Step::forward(a, n) >= a`
|
|
|
|
|
/// * `Step::backward(Step::forward(a, n), n) == a`
|
|
|
|
|
#[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
|
|
|
|
|
fn forward(start: Self, count: usize) -> Self {
|
|
|
|
|
Step::forward_checked(start, count).expect("overflow in `Step::forward`")
|
|
|
|
|
}
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *successor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// It is undefined behavior for this operation to overflow the
|
|
|
|
|
/// range of values supported by `Self`. If you cannot guarantee that this
|
|
|
|
|
/// will not overflow, use `forward` or `forward_checked` instead.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`:
|
|
|
|
|
///
|
|
|
|
|
/// * if there exists `b` such that `b > a`, it is safe to call `Step::forward_unchecked(a, 1)`
|
|
|
|
|
/// * if there exists `b`, `n` such that `steps_between(&a, &b) == Some(n)`,
|
|
|
|
|
/// it is safe to call `Step::forward_unchecked(a, m)` for any `m <= n`.
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)`
|
|
|
|
|
#[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
|
|
|
|
|
unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
|
|
|
|
|
Step::forward(start, count)
|
|
|
|
|
}
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2021-01-07 21:29:17 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *predecessor*
|
2020-02-18 13:18:33 -05:00
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// If this would overflow the range of values supported by `Self`, returns `None`.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `n`, and `m`:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == n.checked_add(m).and_then(|x| Step::backward_checked(a, x))`
|
|
|
|
|
/// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == try { Step::backward_checked(a, n.checked_add(m)?) }`
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::backward_checked(&x, 1))`
|
|
|
|
|
/// * Corollary: `Step::backward_checked(&a, 0) == Some(a)`
|
|
|
|
|
#[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
|
|
|
|
|
fn backward_checked(start: Self, count: usize) -> Option<Self>;
|
2017-07-06 01:14:20 +02:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *predecessor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// If this would overflow the range of values supported by `Self`,
|
|
|
|
|
/// this function is allowed to panic, wrap, or saturate.
|
|
|
|
|
/// The suggested behavior is to panic when debug assertions are enabled,
|
|
|
|
|
/// and to wrap or saturate otherwise.
|
|
|
|
|
///
|
|
|
|
|
/// Unsafe code should not rely on the correctness of behavior after overflow.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`, `n`, and `m`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward(Step::backward(a, n), m) == Step::backward(a, n + m)`
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward_checked(a, n) == Some(Step::backward(a, n))`
|
|
|
|
|
/// * `Step::backward(a, n) == (0..n).fold(a, |x, _| Step::backward(x, 1))`
|
|
|
|
|
/// * Corollary: `Step::backward(a, 0) == a`
|
|
|
|
|
/// * `Step::backward(a, n) <= a`
|
|
|
|
|
/// * `Step::forward(Step::backward(a, n), n) == a`
|
|
|
|
|
#[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
|
|
|
|
|
fn backward(start: Self, count: usize) -> Self {
|
|
|
|
|
Step::backward_checked(start, count).expect("overflow in `Step::backward`")
|
|
|
|
|
}
|
2019-05-04 12:04:52 +02:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
/// Returns the value that would be obtained by taking the *predecessor*
|
|
|
|
|
/// of `self` `count` times.
|
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// It is undefined behavior for this operation to overflow the
|
|
|
|
|
/// range of values supported by `Self`. If you cannot guarantee that this
|
|
|
|
|
/// will not overflow, use `backward` or `backward_checked` instead.
|
|
|
|
|
///
|
|
|
|
|
/// # Invariants
|
|
|
|
|
///
|
|
|
|
|
/// For any `a`:
|
|
|
|
|
///
|
|
|
|
|
/// * if there exists `b` such that `b < a`, it is safe to call `Step::backward_unchecked(a, 1)`
|
|
|
|
|
/// * if there exists `b`, `n` such that `steps_between(&b, &a) == Some(n)`,
|
|
|
|
|
/// it is safe to call `Step::backward_unchecked(a, m)` for any `m <= n`.
|
|
|
|
|
///
|
|
|
|
|
/// For any `a` and `n`, where no overflow occurs:
|
|
|
|
|
///
|
|
|
|
|
/// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)`
|
|
|
|
|
#[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
|
|
|
|
|
unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
|
|
|
|
|
Step::backward(start, count)
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2017-07-06 00:32:54 +02:00
|
|
|
// These are still macro-generated because the integer literals resolve to different types.
|
|
|
|
|
macro_rules! step_identical_methods {
|
|
|
|
|
() => {
|
|
|
|
|
#[inline]
|
2020-02-18 13:18:33 -05:00
|
|
|
unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
|
|
|
|
|
unsafe { start.unchecked_add(n as Self) }
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-02-18 13:18:33 -05:00
|
|
|
unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
|
|
|
|
|
unsafe { start.unchecked_sub(n as Self) }
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-12-30 02:09:29 +00:00
|
|
|
#[allow(arithmetic_overflow)]
|
2020-02-18 13:18:33 -05:00
|
|
|
fn forward(start: Self, n: usize) -> Self {
|
2020-05-13 17:57:06 -04:00
|
|
|
// In debug builds, trigger a panic on overflow.
|
|
|
|
|
// This should optimize completely out in release builds.
|
|
|
|
|
if Self::forward_checked(start, n).is_none() {
|
|
|
|
|
let _ = Add::add(Self::MAX, 1);
|
2020-02-18 13:18:33 -05:00
|
|
|
}
|
2020-05-14 16:57:02 -04:00
|
|
|
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
|
|
|
|
|
start.wrapping_add(n as Self)
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-12-30 02:09:29 +00:00
|
|
|
#[allow(arithmetic_overflow)]
|
2020-02-18 13:18:33 -05:00
|
|
|
fn backward(start: Self, n: usize) -> Self {
|
2020-05-13 17:57:06 -04:00
|
|
|
// In debug builds, trigger a panic on overflow.
|
|
|
|
|
// This should optimize completely out in release builds.
|
|
|
|
|
if Self::backward_checked(start, n).is_none() {
|
|
|
|
|
let _ = Sub::sub(Self::MIN, 1);
|
2020-02-18 13:18:33 -05:00
|
|
|
}
|
2020-05-14 16:57:02 -04:00
|
|
|
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
|
|
|
|
|
start.wrapping_sub(n as Self)
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
2020-04-22 16:16:43 -04:00
|
|
|
};
|
2017-07-06 00:32:54 +02:00
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
macro_rules! step_integer_impls {
|
|
|
|
|
{
|
|
|
|
|
narrower than or same width as usize:
|
|
|
|
|
$( [ $u_narrower:ident $i_narrower:ident ] ),+;
|
|
|
|
|
wider than usize:
|
|
|
|
|
$( [ $u_wider:ident $i_wider:ident ] ),+;
|
|
|
|
|
} => {
|
|
|
|
|
$(
|
|
|
|
|
#[allow(unreachable_patterns)]
|
|
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
|
|
|
|
unsafe impl Step for $u_narrower {
|
2020-05-14 16:57:02 -04:00
|
|
|
step_identical_methods!();
|
2020-02-18 13:18:33 -05:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
|
|
|
|
if *start <= *end {
|
|
|
|
|
// This relies on $u_narrower <= usize
|
|
|
|
|
Some((*end - *start) as usize)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
match Self::try_from(n) {
|
|
|
|
|
Ok(n) => start.checked_add(n),
|
|
|
|
|
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
match Self::try_from(n) {
|
|
|
|
|
Ok(n) => start.checked_sub(n),
|
|
|
|
|
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-04 12:04:52 +02:00
|
|
|
#[allow(unreachable_patterns)]
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
|
|
|
|
unsafe impl Step for $i_narrower {
|
2020-05-14 16:57:02 -04:00
|
|
|
step_identical_methods!();
|
2020-02-18 13:18:33 -05:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
|
|
|
|
if *start <= *end {
|
|
|
|
|
// This relies on $i_narrower <= usize
|
|
|
|
|
//
|
|
|
|
|
// Casting to isize extends the width but preserves the sign.
|
|
|
|
|
// Use wrapping_sub in isize space and cast to usize to compute
|
|
|
|
|
// the difference that may not fit inside the range of isize.
|
|
|
|
|
Some((*end as isize).wrapping_sub(*start as isize) as usize)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
match $u_narrower::try_from(n) {
|
|
|
|
|
Ok(n) => {
|
|
|
|
|
// Wrapping handles cases like
|
|
|
|
|
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
|
|
|
|
|
// even though 200 is out of range for i8.
|
|
|
|
|
let wrapped = start.wrapping_add(n as Self);
|
|
|
|
|
if wrapped >= start {
|
|
|
|
|
Some(wrapped)
|
|
|
|
|
} else {
|
|
|
|
|
None // Addition overflowed
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// If n is out of range of e.g. u8,
|
|
|
|
|
// then it is bigger than the entire range for i8 is wide
|
|
|
|
|
// so `any_i8 + n` necessarily overflows i8.
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
match $u_narrower::try_from(n) {
|
|
|
|
|
Ok(n) => {
|
|
|
|
|
// Wrapping handles cases like
|
|
|
|
|
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
|
|
|
|
|
// even though 200 is out of range for i8.
|
|
|
|
|
let wrapped = start.wrapping_sub(n as Self);
|
|
|
|
|
if wrapped <= start {
|
|
|
|
|
Some(wrapped)
|
|
|
|
|
} else {
|
|
|
|
|
None // Subtraction overflowed
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// If n is out of range of e.g. u8,
|
|
|
|
|
// then it is bigger than the entire range for i8 is wide
|
|
|
|
|
// so `any_i8 - n` necessarily overflows i8.
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
)+
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
$(
|
2017-08-29 22:13:21 -07:00
|
|
|
#[allow(unreachable_patterns)]
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
|
|
|
|
unsafe impl Step for $u_wider {
|
|
|
|
|
step_identical_methods!();
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
|
|
|
|
if *start <= *end {
|
|
|
|
|
usize::try_from(*end - *start).ok()
|
|
|
|
|
} else {
|
|
|
|
|
None
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
start.checked_add(n as Self)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
start.checked_sub(n as Self)
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-04 12:04:52 +02:00
|
|
|
#[allow(unreachable_patterns)]
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
|
|
|
|
unsafe impl Step for $i_wider {
|
|
|
|
|
step_identical_methods!();
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
|
|
|
|
if *start <= *end {
|
|
|
|
|
match end.checked_sub(*start) {
|
|
|
|
|
Some(result) => usize::try_from(result).ok(),
|
|
|
|
|
// If the difference is too big for e.g. i128,
|
|
|
|
|
// it's also gonna be too big for usize with fewer bits.
|
|
|
|
|
None => None,
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
} else {
|
|
|
|
|
None
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
start.checked_add(n as Self)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: Self, n: usize) -> Option<Self> {
|
|
|
|
|
start.checked_sub(n as Self)
|
2019-05-04 12:04:52 +02:00
|
|
|
}
|
|
|
|
|
}
|
2020-02-18 13:18:33 -05:00
|
|
|
)+
|
|
|
|
|
};
|
|
|
|
|
}
|
2019-05-04 12:04:52 +02:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[cfg(target_pointer_width = "64")]
|
|
|
|
|
step_integer_impls! {
|
|
|
|
|
narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [u64 i64], [usize isize];
|
|
|
|
|
wider than usize: [u128 i128];
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[cfg(target_pointer_width = "32")]
|
|
|
|
|
step_integer_impls! {
|
|
|
|
|
narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [usize isize];
|
|
|
|
|
wider than usize: [u64 i64], [u128 i128];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(target_pointer_width = "16")]
|
|
|
|
|
step_integer_impls! {
|
|
|
|
|
narrower than or same width as usize: [u8 i8], [u16 i16], [usize isize];
|
|
|
|
|
wider than usize: [u32 i32], [u64 i64], [u128 i128];
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2020-05-20 23:46:57 -04:00
|
|
|
#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
|
|
|
|
|
unsafe impl Step for char {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn steps_between(&start: &char, &end: &char) -> Option<usize> {
|
|
|
|
|
let start = start as u32;
|
|
|
|
|
let end = end as u32;
|
|
|
|
|
if start <= end {
|
2020-05-28 01:11:46 -04:00
|
|
|
let count = end - start;
|
2020-05-20 23:46:57 -04:00
|
|
|
if start < 0xD800 && 0xE000 <= end {
|
|
|
|
|
usize::try_from(count - 0x800).ok()
|
|
|
|
|
} else {
|
|
|
|
|
usize::try_from(count).ok()
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn forward_checked(start: char, count: usize) -> Option<char> {
|
|
|
|
|
let start = start as u32;
|
|
|
|
|
let mut res = Step::forward_checked(start, count)?;
|
|
|
|
|
if start < 0xD800 && 0xD800 <= res {
|
|
|
|
|
res = Step::forward_checked(res, 0x800)?;
|
|
|
|
|
}
|
|
|
|
|
if res <= char::MAX as u32 {
|
2020-05-21 13:21:55 -04:00
|
|
|
// SAFETY: res is a valid unicode scalar
|
|
|
|
|
// (below 0x110000 and not in 0xD800..0xE000)
|
2020-05-20 23:46:57 -04:00
|
|
|
Some(unsafe { char::from_u32_unchecked(res) })
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn backward_checked(start: char, count: usize) -> Option<char> {
|
|
|
|
|
let start = start as u32;
|
|
|
|
|
let mut res = Step::backward_checked(start, count)?;
|
|
|
|
|
if start >= 0xE000 && 0xE000 > res {
|
|
|
|
|
res = Step::backward_checked(res, 0x800)?;
|
|
|
|
|
}
|
2020-05-21 13:21:55 -04:00
|
|
|
// SAFETY: res is a valid unicode scalar
|
|
|
|
|
// (below 0x110000 and not in 0xD800..0xE000)
|
2020-05-20 23:46:57 -04:00
|
|
|
Some(unsafe { char::from_u32_unchecked(res) })
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
unsafe fn forward_unchecked(start: char, count: usize) -> char {
|
|
|
|
|
let start = start as u32;
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller must guarantee that this doesn't overflow
|
|
|
|
|
// the range of values for a char.
|
|
|
|
|
let mut res = unsafe { Step::forward_unchecked(start, count) };
|
2020-05-20 23:46:57 -04:00
|
|
|
if start < 0xD800 && 0xD800 <= res {
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller must guarantee that this doesn't overflow
|
|
|
|
|
// the range of values for a char.
|
|
|
|
|
res = unsafe { Step::forward_unchecked(res, 0x800) };
|
2020-05-20 23:46:57 -04:00
|
|
|
}
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: because of the previous contract, this is guaranteed
|
|
|
|
|
// by the caller to be a valid char.
|
|
|
|
|
unsafe { char::from_u32_unchecked(res) }
|
2020-05-20 23:46:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
unsafe fn backward_unchecked(start: char, count: usize) -> char {
|
|
|
|
|
let start = start as u32;
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller must guarantee that this doesn't overflow
|
|
|
|
|
// the range of values for a char.
|
|
|
|
|
let mut res = unsafe { Step::backward_unchecked(start, count) };
|
2020-05-20 23:46:57 -04:00
|
|
|
if start >= 0xE000 && 0xE000 > res {
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: the caller must guarantee that this doesn't overflow
|
|
|
|
|
// the range of values for a char.
|
|
|
|
|
res = unsafe { Step::backward_unchecked(res, 0x800) };
|
2020-05-20 23:46:57 -04:00
|
|
|
}
|
2020-06-22 19:25:39 +02:00
|
|
|
// SAFETY: because of the previous contract, this is guaranteed
|
|
|
|
|
// by the caller to be a valid char.
|
|
|
|
|
unsafe { char::from_u32_unchecked(res) }
|
2020-05-20 23:46:57 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
macro_rules! range_exact_iter_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
|
impl ExactSizeIterator for ops::Range<$t> { }
|
2016-09-11 01:55:15 +00:00
|
|
|
)*)
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2016-09-11 01:55:15 +00:00
|
|
|
macro_rules! range_incl_exact_iter_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
2018-01-28 03:09:36 +08:00
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
2016-04-18 20:08:27 +02:00
|
|
|
impl ExactSizeIterator for ops::RangeInclusive<$t> { }
|
|
|
|
|
)*)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> Iterator for ops::Range<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
|
|
|
|
if self.start < self.end {
|
2020-02-18 13:18:33 -05:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
|
|
|
|
|
Some(mem::replace(&mut self.start, n))
|
2016-04-18 20:08:27 +02:00
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
2020-02-18 13:18:33 -05:00
|
|
|
if self.start < self.end {
|
|
|
|
|
let hint = Step::steps_between(&self.start, &self.end);
|
|
|
|
|
(hint.unwrap_or(usize::MAX), hint)
|
|
|
|
|
} else {
|
|
|
|
|
(0, Some(0))
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
2020-02-18 13:18:33 -05:00
|
|
|
if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
|
2017-07-06 01:14:20 +02:00
|
|
|
if plus_n < self.end {
|
2020-06-18 21:40:40 -04:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
self.start = unsafe { Step::forward_unchecked(plus_n.clone(), 1) };
|
2019-12-22 17:42:04 -05:00
|
|
|
return Some(plus_n);
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.start = self.end.clone();
|
|
|
|
|
None
|
|
|
|
|
}
|
2018-01-04 01:51:18 +00:00
|
|
|
|
2018-01-08 18:46:40 +00:00
|
|
|
#[inline]
|
|
|
|
|
fn last(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn min(mut self) -> Option<A> {
|
|
|
|
|
self.next()
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-04 01:51:18 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn max(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 01:51:18 +00:00
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2016-09-11 01:55:15 +00:00
|
|
|
// These macros generate `ExactSizeIterator` impls for various range types.
|
2016-10-20 14:34:34 +02:00
|
|
|
//
|
2020-02-18 13:18:33 -05:00
|
|
|
// * `ExactSizeIterator::len` is required to always return an exact `usize`,
|
|
|
|
|
// so no range can be longer than `usize::MAX`.
|
|
|
|
|
// * For integer types in `Range<_>` this is the case for types narrower than or as wide as `usize`.
|
|
|
|
|
// For integer types in `RangeInclusive<_>`
|
|
|
|
|
// this is the case for types *strictly narrower* than `usize`
|
|
|
|
|
// since e.g. `(0..=u64::MAX).len()` would be `u64::MAX + 1`.
|
|
|
|
|
range_exact_iter_impl! {
|
|
|
|
|
usize u8 u16
|
|
|
|
|
isize i8 i16
|
|
|
|
|
|
|
|
|
|
// These are incorect per the reasoning above,
|
|
|
|
|
// but removing them would be a breaking change as they were stabilized in Rust 1.0.0.
|
|
|
|
|
// So e.g. `(0..66_000_u32).len()` for example will compile without error or warnings
|
|
|
|
|
// on 16-bit platforms, but continue to give a wrong result.
|
|
|
|
|
u32
|
|
|
|
|
i32
|
|
|
|
|
}
|
|
|
|
|
range_incl_exact_iter_impl! {
|
|
|
|
|
u8
|
|
|
|
|
i8
|
|
|
|
|
|
|
|
|
|
// These are incorect per the reasoning above,
|
|
|
|
|
// but removing them would be a breaking change as they were stabilized in Rust 1.26.0.
|
|
|
|
|
// So e.g. `(0..=u16::MAX).len()` for example will compile without error or warnings
|
|
|
|
|
// on 16-bit platforms, but continue to give a wrong result.
|
|
|
|
|
u16
|
|
|
|
|
i16
|
|
|
|
|
}
|
2016-10-20 14:34:34 +02:00
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2017-07-06 01:14:20 +02:00
|
|
|
impl<A: Step> DoubleEndedIterator for ops::Range<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn next_back(&mut self) -> Option<A> {
|
|
|
|
|
if self.start < self.end {
|
2020-06-18 21:40:40 -04:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
self.end = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
|
2016-04-18 20:08:27 +02:00
|
|
|
Some(self.end.clone())
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-06-08 22:30:45 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth_back(&mut self, n: usize) -> Option<A> {
|
2020-02-18 13:18:33 -05:00
|
|
|
if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
|
2019-06-08 22:30:45 +02:00
|
|
|
if minus_n > self.start {
|
2020-06-18 21:40:40 -04:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
self.end = unsafe { Step::backward_unchecked(minus_n, 1) };
|
2019-12-22 17:42:04 -05:00
|
|
|
return Some(self.end.clone());
|
2019-06-08 22:30:45 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.end = self.start.clone();
|
|
|
|
|
None
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
|
|
|
|
unsafe impl<A: Step> TrustedLen for ops::Range<A> {}
|
|
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::Range<A> {}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> Iterator for ops::RangeFrom<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
2020-02-18 13:18:33 -05:00
|
|
|
let n = Step::forward(self.start.clone(), 1);
|
|
|
|
|
Some(mem::replace(&mut self.start, n))
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2017-05-30 09:15:25 -07:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
|
(usize::MAX, None)
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
2020-05-19 20:00:29 -04:00
|
|
|
let plus_n = Step::forward(self.start.clone(), n);
|
2020-02-18 13:18:33 -05:00
|
|
|
self.start = Step::forward(plus_n.clone(), 1);
|
2017-07-06 01:14:20 +02:00
|
|
|
Some(plus_n)
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::RangeFrom<A> {}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2018-01-18 18:40:08 +01:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
|
|
|
|
unsafe impl<A: Step> TrustedLen for ops::RangeFrom<A> {}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2018-01-28 03:09:36 +08:00
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> Iterator for ops::RangeInclusive<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2018-06-19 04:08:20 +08:00
|
|
|
return None;
|
|
|
|
|
}
|
2018-07-13 13:08:28 +08:00
|
|
|
let is_iterating = self.start < self.end;
|
|
|
|
|
Some(if is_iterating {
|
2020-05-20 23:50:28 -04:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
|
2018-07-13 13:08:28 +08:00
|
|
|
mem::replace(&mut self.start, n)
|
2018-02-07 11:11:54 -08:00
|
|
|
} else {
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2018-07-13 13:08:28 +08:00
|
|
|
self.start.clone()
|
|
|
|
|
})
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
2018-06-19 04:08:20 +08:00
|
|
|
if self.is_empty() {
|
2017-05-21 05:03:49 -07:00
|
|
|
return (0, Some(0));
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-06 00:10:40 +02:00
|
|
|
match Step::steps_between(&self.start, &self.end) {
|
2017-04-23 21:14:32 -07:00
|
|
|
Some(hint) => (hint.saturating_add(1), hint.checked_add(1)),
|
2019-03-25 21:12:53 -07:00
|
|
|
None => (usize::MAX, None),
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2018-06-19 04:08:20 +08:00
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
|
2019-04-15 11:23:21 +09:00
|
|
|
use crate::cmp::Ordering::*;
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
match plus_n.partial_cmp(&self.end) {
|
|
|
|
|
Some(Less) => {
|
2020-02-18 13:18:33 -05:00
|
|
|
self.start = Step::forward(plus_n.clone(), 1);
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
return Some(plus_n);
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
Some(Equal) => {
|
2020-01-30 20:09:23 +00:00
|
|
|
self.start = plus_n.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
return Some(plus_n);
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-30 20:09:23 +00:00
|
|
|
self.start = self.end.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2017-07-06 01:14:20 +02:00
|
|
|
None
|
|
|
|
|
}
|
2018-01-04 00:17:36 +00:00
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
#[inline]
|
|
|
|
|
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
|
|
|
|
|
where
|
2019-12-22 17:42:04 -05:00
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> R,
|
|
|
|
|
R: Try<Ok = B>,
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
{
|
2019-02-09 18:42:34 +01:00
|
|
|
if self.is_empty() {
|
2020-09-03 17:11:02 -07:00
|
|
|
return try { init };
|
2019-02-09 18:42:34 +01:00
|
|
|
}
|
|
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
let mut accum = init;
|
|
|
|
|
|
|
|
|
|
while self.start < self.end {
|
2020-06-18 21:40:40 -04:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
let n = mem::replace(&mut self.start, n);
|
|
|
|
|
accum = f(accum, n)?;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-02-09 18:42:34 +01:00
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
if self.start == self.end {
|
|
|
|
|
accum = f(accum, self.start.clone())?;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-03 17:11:02 -07:00
|
|
|
try { accum }
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
}
|
|
|
|
|
|
2020-05-14 09:39:50 +10:00
|
|
|
#[inline]
|
|
|
|
|
fn fold<B, F>(mut self, init: B, f: F) -> B
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> B,
|
|
|
|
|
{
|
|
|
|
|
#[inline]
|
|
|
|
|
fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
|
|
|
|
|
move |acc, x| Ok(f(acc, x))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.try_fold(init, ok(f)).unwrap()
|
|
|
|
|
}
|
2020-05-18 09:34:34 +10:00
|
|
|
|
2018-01-04 00:58:41 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn last(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 00:58:41 +00:00
|
|
|
}
|
|
|
|
|
|
2018-01-04 00:17:36 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn min(mut self) -> Option<A> {
|
|
|
|
|
self.next()
|
2018-01-04 00:17:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn max(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 00:17:36 +00:00
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2018-01-28 03:09:36 +08:00
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn next_back(&mut self) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2018-06-19 04:08:20 +08:00
|
|
|
return None;
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2018-07-13 13:08:28 +08:00
|
|
|
let is_iterating = self.start < self.end;
|
|
|
|
|
Some(if is_iterating {
|
2020-06-18 21:40:40 -04:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
|
2018-07-13 13:08:28 +08:00
|
|
|
mem::replace(&mut self.end, n)
|
2018-06-19 04:08:20 +08:00
|
|
|
} else {
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2018-07-13 13:08:28 +08:00
|
|
|
self.end.clone()
|
|
|
|
|
})
|
2018-02-04 23:48:40 -08:00
|
|
|
}
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
|
2019-06-09 22:45:11 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn nth_back(&mut self, n: usize) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2019-06-09 22:45:11 +02:00
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
|
2019-06-09 22:45:11 +02:00
|
|
|
use crate::cmp::Ordering::*;
|
|
|
|
|
|
|
|
|
|
match minus_n.partial_cmp(&self.start) {
|
|
|
|
|
Some(Greater) => {
|
2020-02-18 13:18:33 -05:00
|
|
|
self.end = Step::backward(minus_n.clone(), 1);
|
2019-06-09 22:45:11 +02:00
|
|
|
return Some(minus_n);
|
|
|
|
|
}
|
|
|
|
|
Some(Equal) => {
|
2020-01-30 20:09:23 +00:00
|
|
|
self.end = minus_n.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-06-09 22:45:11 +02:00
|
|
|
return Some(minus_n);
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-30 20:09:23 +00:00
|
|
|
self.end = self.start.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-06-09 22:45:11 +02:00
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
#[inline]
|
2019-12-22 17:42:04 -05:00
|
|
|
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> R,
|
|
|
|
|
R: Try<Ok = B>,
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
{
|
2019-02-09 18:42:34 +01:00
|
|
|
if self.is_empty() {
|
2020-09-03 17:11:02 -07:00
|
|
|
return try { init };
|
2019-02-09 18:42:34 +01:00
|
|
|
}
|
|
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
let mut accum = init;
|
|
|
|
|
|
|
|
|
|
while self.start < self.end {
|
2020-06-18 21:40:40 -04:00
|
|
|
// SAFETY: just checked precondition
|
|
|
|
|
let n = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
let n = mem::replace(&mut self.end, n);
|
|
|
|
|
accum = f(accum, n)?;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-02-09 18:42:34 +01:00
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
if self.start == self.end {
|
|
|
|
|
accum = f(accum, self.start.clone())?;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-03 17:11:02 -07:00
|
|
|
try { accum }
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
}
|
2020-05-18 09:34:34 +10:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn rfold<B, F>(mut self, init: B, f: F) -> B
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> B,
|
|
|
|
|
{
|
|
|
|
|
#[inline]
|
|
|
|
|
fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
|
|
|
|
|
move |acc, x| Ok(f(acc, x))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.try_rfold(init, ok(f)).unwrap()
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2020-02-18 13:18:33 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
|
|
|
|
unsafe impl<A: Step> TrustedLen for ops::RangeInclusive<A> {}
|
|
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::RangeInclusive<A> {}
|