2019-04-15 11:23:21 +09:00
|
|
|
use crate::convert::TryFrom;
|
|
|
|
|
use crate::mem;
|
|
|
|
|
use crate::ops::{self, Add, Sub, Try};
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2018-01-04 12:36:43 +00:00
|
|
|
use super::{FusedIterator, TrustedLen};
|
2016-04-18 20:08:27 +02:00
|
|
|
|
|
|
|
|
/// Objects that can be stepped over in both directions.
|
|
|
|
|
///
|
|
|
|
|
/// The `steps_between` function provides a way to efficiently compare
|
|
|
|
|
/// two `Step` objects.
|
2019-12-22 17:42:04 -05:00
|
|
|
#[unstable(
|
|
|
|
|
feature = "step_trait",
|
|
|
|
|
reason = "likely to be replaced by finer-grained traits",
|
|
|
|
|
issue = "42168"
|
|
|
|
|
)]
|
2017-07-06 01:14:20 +02:00
|
|
|
pub trait Step: Clone + PartialOrd + Sized {
|
2016-04-18 20:08:27 +02:00
|
|
|
/// Returns the number of steps between two step objects. The count is
|
|
|
|
|
/// inclusive of `start` and exclusive of `end`.
|
|
|
|
|
///
|
|
|
|
|
/// Returns `None` if it is not possible to calculate `steps_between`
|
|
|
|
|
/// without overflow.
|
2017-07-06 00:10:40 +02:00
|
|
|
fn steps_between(start: &Self, end: &Self) -> Option<usize>;
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2019-11-20 14:40:54 -05:00
|
|
|
/// Replaces this step with `1`, returning a clone of itself.
|
|
|
|
|
///
|
|
|
|
|
/// The output of this method should always be greater than the output of replace_zero.
|
2016-06-28 08:56:56 -07:00
|
|
|
fn replace_one(&mut self) -> Self;
|
|
|
|
|
|
2019-11-20 14:40:54 -05:00
|
|
|
/// Replaces this step with `0`, returning a clone of itself.
|
|
|
|
|
///
|
|
|
|
|
/// The output of this method should always be less than the output of replace_one.
|
2016-06-28 08:56:56 -07:00
|
|
|
fn replace_zero(&mut self) -> Self;
|
|
|
|
|
|
2019-02-09 22:16:58 +00:00
|
|
|
/// Adds one to this step, returning the result.
|
2016-06-28 08:56:56 -07:00
|
|
|
fn add_one(&self) -> Self;
|
|
|
|
|
|
2019-02-09 22:16:58 +00:00
|
|
|
/// Subtracts one to this step, returning the result.
|
2016-06-28 08:56:56 -07:00
|
|
|
fn sub_one(&self) -> Self;
|
2017-07-06 01:14:20 +02:00
|
|
|
|
2019-02-09 22:16:58 +00:00
|
|
|
/// Adds a `usize`, returning `None` on overflow.
|
2017-07-06 01:14:20 +02:00
|
|
|
fn add_usize(&self, n: usize) -> Option<Self>;
|
2019-05-04 12:04:52 +02:00
|
|
|
|
|
|
|
|
/// Subtracts a `usize`, returning `None` on underflow.
|
|
|
|
|
fn sub_usize(&self, n: usize) -> Option<Self> {
|
|
|
|
|
// this default implementation makes the addition of `sub_usize` a non-breaking change
|
|
|
|
|
let _ = n;
|
|
|
|
|
unimplemented!()
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2017-07-06 00:32:54 +02:00
|
|
|
// These are still macro-generated because the integer literals resolve to different types.
|
|
|
|
|
macro_rules! step_identical_methods {
|
|
|
|
|
() => {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn replace_one(&mut self) -> Self {
|
|
|
|
|
mem::replace(self, 1)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn replace_zero(&mut self) -> Self {
|
|
|
|
|
mem::replace(self, 0)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn add_one(&self) -> Self {
|
|
|
|
|
Add::add(*self, 1)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn sub_one(&self) -> Self {
|
|
|
|
|
Sub::sub(*self, 1)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
macro_rules! step_impl_unsigned {
|
|
|
|
|
($($t:ty)*) => ($(
|
|
|
|
|
#[unstable(feature = "step_trait",
|
|
|
|
|
reason = "likely to be replaced by finer-grained traits",
|
2017-05-23 03:08:18 -07:00
|
|
|
issue = "42168")]
|
2016-04-18 20:08:27 +02:00
|
|
|
impl Step for $t {
|
|
|
|
|
#[inline]
|
2017-07-06 00:10:40 +02:00
|
|
|
fn steps_between(start: &$t, end: &$t) -> Option<usize> {
|
2016-04-18 20:08:27 +02:00
|
|
|
if *start < *end {
|
2019-03-25 21:12:53 -07:00
|
|
|
usize::try_from(*end - *start).ok()
|
2016-04-18 20:08:27 +02:00
|
|
|
} else {
|
|
|
|
|
Some(0)
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2017-07-06 01:14:20 +02:00
|
|
|
#[inline]
|
2017-08-29 22:13:21 -07:00
|
|
|
#[allow(unreachable_patterns)]
|
2017-07-06 01:14:20 +02:00
|
|
|
fn add_usize(&self, n: usize) -> Option<Self> {
|
2018-06-06 12:54:25 +02:00
|
|
|
match <$t>::try_from(n) {
|
2017-07-06 01:14:20 +02:00
|
|
|
Ok(n_as_t) => self.checked_add(n_as_t),
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-04 12:04:52 +02:00
|
|
|
#[inline]
|
|
|
|
|
#[allow(unreachable_patterns)]
|
|
|
|
|
fn sub_usize(&self, n: usize) -> Option<Self> {
|
|
|
|
|
match <$t>::try_from(n) {
|
|
|
|
|
Ok(n_as_t) => self.checked_sub(n_as_t),
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-06 00:32:54 +02:00
|
|
|
step_identical_methods!();
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
)*)
|
|
|
|
|
}
|
|
|
|
|
macro_rules! step_impl_signed {
|
2017-07-06 01:14:20 +02:00
|
|
|
($( [$t:ty : $unsigned:ty] )*) => ($(
|
2016-04-18 20:08:27 +02:00
|
|
|
#[unstable(feature = "step_trait",
|
|
|
|
|
reason = "likely to be replaced by finer-grained traits",
|
2017-05-23 03:08:18 -07:00
|
|
|
issue = "42168")]
|
2016-04-18 20:08:27 +02:00
|
|
|
impl Step for $t {
|
|
|
|
|
#[inline]
|
2017-07-06 00:10:40 +02:00
|
|
|
fn steps_between(start: &$t, end: &$t) -> Option<usize> {
|
|
|
|
|
if *start < *end {
|
2019-03-25 21:12:53 -07:00
|
|
|
// Use .wrapping_sub and cast to unsigned to compute the
|
|
|
|
|
// difference that may not fit inside the range of $t.
|
|
|
|
|
usize::try_from(end.wrapping_sub(*start) as $unsigned).ok()
|
2016-04-18 20:08:27 +02:00
|
|
|
} else {
|
2017-07-06 00:10:40 +02:00
|
|
|
Some(0)
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2016-06-28 08:56:56 -07:00
|
|
|
|
2017-07-06 01:14:20 +02:00
|
|
|
#[inline]
|
2017-08-29 22:13:21 -07:00
|
|
|
#[allow(unreachable_patterns)]
|
2017-07-06 01:14:20 +02:00
|
|
|
fn add_usize(&self, n: usize) -> Option<Self> {
|
2018-06-06 12:54:25 +02:00
|
|
|
match <$unsigned>::try_from(n) {
|
2017-07-06 01:14:20 +02:00
|
|
|
Ok(n_as_unsigned) => {
|
|
|
|
|
// Wrapping in unsigned space handles cases like
|
|
|
|
|
// `-120_i8.add_usize(200) == Some(80_i8)`,
|
|
|
|
|
// even though 200_usize is out of range for i8.
|
|
|
|
|
let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t;
|
|
|
|
|
if wrapped >= *self {
|
|
|
|
|
Some(wrapped)
|
|
|
|
|
} else {
|
|
|
|
|
None // Addition overflowed
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-04 12:04:52 +02:00
|
|
|
#[inline]
|
|
|
|
|
#[allow(unreachable_patterns)]
|
|
|
|
|
fn sub_usize(&self, n: usize) -> Option<Self> {
|
|
|
|
|
match <$unsigned>::try_from(n) {
|
|
|
|
|
Ok(n_as_unsigned) => {
|
|
|
|
|
// Wrapping in unsigned space handles cases like
|
|
|
|
|
// `80_i8.sub_usize(200) == Some(-120_i8)`,
|
|
|
|
|
// even though 200_usize is out of range for i8.
|
|
|
|
|
let wrapped = (*self as $unsigned).wrapping_sub(n_as_unsigned) as $t;
|
|
|
|
|
if wrapped <= *self {
|
|
|
|
|
Some(wrapped)
|
|
|
|
|
} else {
|
|
|
|
|
None // Subtraction underflowed
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(_) => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-06 00:32:54 +02:00
|
|
|
step_identical_methods!();
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
)*)
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-25 21:12:53 -07:00
|
|
|
step_impl_unsigned!(usize u8 u16 u32 u64 u128);
|
2019-12-22 17:42:04 -05:00
|
|
|
step_impl_signed!([isize: usize][i8: u8][i16: u16]);
|
|
|
|
|
step_impl_signed!([i32: u32][i64: u64][i128: u128]);
|
2016-04-18 20:08:27 +02:00
|
|
|
|
|
|
|
|
macro_rules! range_exact_iter_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
|
impl ExactSizeIterator for ops::Range<$t> { }
|
2016-09-11 01:55:15 +00:00
|
|
|
)*)
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2016-09-11 01:55:15 +00:00
|
|
|
macro_rules! range_incl_exact_iter_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
2018-01-28 03:09:36 +08:00
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
2016-04-18 20:08:27 +02:00
|
|
|
impl ExactSizeIterator for ops::RangeInclusive<$t> { }
|
|
|
|
|
)*)
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-20 14:34:34 +02:00
|
|
|
macro_rules! range_trusted_len_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
2016-11-04 00:24:59 +01:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 14:34:34 +02:00
|
|
|
unsafe impl TrustedLen for ops::Range<$t> { }
|
|
|
|
|
)*)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
macro_rules! range_incl_trusted_len_impl {
|
|
|
|
|
($($t:ty)*) => ($(
|
2018-04-05 00:35:09 +01:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 14:34:34 +02:00
|
|
|
unsafe impl TrustedLen for ops::RangeInclusive<$t> { }
|
|
|
|
|
)*)
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> Iterator for ops::Range<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
|
|
|
|
if self.start < self.end {
|
2017-08-01 19:17:33 +02:00
|
|
|
// We check for overflow here, even though it can't actually
|
|
|
|
|
// happen. Adding this check does however help llvm vectorize loops
|
|
|
|
|
// for some ranges that don't get vectorized otherwise,
|
|
|
|
|
// and this won't actually result in an extra check in an optimized build.
|
|
|
|
|
if let Some(mut n) = self.start.add_usize(1) {
|
|
|
|
|
mem::swap(&mut n, &mut self.start);
|
|
|
|
|
Some(n)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
2017-07-06 00:10:40 +02:00
|
|
|
match Step::steps_between(&self.start, &self.end) {
|
2016-04-18 20:08:27 +02:00
|
|
|
Some(hint) => (hint, Some(hint)),
|
2019-12-22 17:42:04 -05:00
|
|
|
None => (usize::MAX, None),
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
|
|
|
|
if let Some(plus_n) = self.start.add_usize(n) {
|
|
|
|
|
if plus_n < self.end {
|
|
|
|
|
self.start = plus_n.add_one();
|
2019-12-22 17:42:04 -05:00
|
|
|
return Some(plus_n);
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.start = self.end.clone();
|
|
|
|
|
None
|
|
|
|
|
}
|
2018-01-04 01:51:18 +00:00
|
|
|
|
2018-01-08 18:46:40 +00:00
|
|
|
#[inline]
|
|
|
|
|
fn last(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn min(mut self) -> Option<A> {
|
|
|
|
|
self.next()
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-04 01:51:18 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn max(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 01:51:18 +00:00
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2016-09-11 01:55:15 +00:00
|
|
|
// These macros generate `ExactSizeIterator` impls for various range types.
|
|
|
|
|
// Range<{u,i}64> and RangeInclusive<{u,i}{32,64,size}> are excluded
|
|
|
|
|
// because they cannot guarantee having a length <= usize::MAX, which is
|
|
|
|
|
// required by ExactSizeIterator.
|
2016-04-18 20:08:27 +02:00
|
|
|
range_exact_iter_impl!(usize u8 u16 u32 isize i8 i16 i32);
|
2016-09-11 01:55:15 +00:00
|
|
|
range_incl_exact_iter_impl!(u8 u16 i8 i16);
|
2016-04-18 20:08:27 +02:00
|
|
|
|
2016-10-20 14:34:34 +02:00
|
|
|
// These macros generate `TrustedLen` impls.
|
|
|
|
|
//
|
|
|
|
|
// They need to guarantee that .size_hint() is either exact, or that
|
|
|
|
|
// the upper bound is None when it does not fit the type limits.
|
2019-03-26 14:45:54 -07:00
|
|
|
range_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128);
|
|
|
|
|
range_incl_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128);
|
2016-10-20 14:34:34 +02:00
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2017-07-06 01:14:20 +02:00
|
|
|
impl<A: Step> DoubleEndedIterator for ops::Range<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn next_back(&mut self) -> Option<A> {
|
|
|
|
|
if self.start < self.end {
|
2016-06-28 08:56:56 -07:00
|
|
|
self.end = self.end.sub_one();
|
2016-04-18 20:08:27 +02:00
|
|
|
Some(self.end.clone())
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-06-08 22:30:45 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth_back(&mut self, n: usize) -> Option<A> {
|
|
|
|
|
if let Some(minus_n) = self.end.sub_usize(n) {
|
|
|
|
|
if minus_n > self.start {
|
|
|
|
|
self.end = minus_n.sub_one();
|
2019-12-22 17:42:04 -05:00
|
|
|
return Some(self.end.clone());
|
2019-06-08 22:30:45 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.end = self.start.clone();
|
|
|
|
|
None
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::Range<A> {}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2016-04-18 20:08:27 +02:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> Iterator for ops::RangeFrom<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
2016-06-28 08:56:56 -07:00
|
|
|
let mut n = self.start.add_one();
|
2016-04-18 20:08:27 +02:00
|
|
|
mem::swap(&mut n, &mut self.start);
|
|
|
|
|
Some(n)
|
|
|
|
|
}
|
2017-05-30 09:15:25 -07:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
|
(usize::MAX, None)
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
|
|
|
|
let plus_n = self.start.add_usize(n).expect("overflow in RangeFrom::nth");
|
|
|
|
|
self.start = plus_n.add_one();
|
|
|
|
|
Some(plus_n)
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::RangeFrom<A> {}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2018-01-18 18:40:08 +01:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
|
|
|
|
unsafe impl<A: Step> TrustedLen for ops::RangeFrom<A> {}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2018-01-28 03:09:36 +08:00
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> Iterator for ops::RangeInclusive<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
type Item = A;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn next(&mut self) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2018-06-19 04:08:20 +08:00
|
|
|
return None;
|
|
|
|
|
}
|
2018-07-13 13:08:28 +08:00
|
|
|
let is_iterating = self.start < self.end;
|
|
|
|
|
Some(if is_iterating {
|
2018-06-19 04:08:20 +08:00
|
|
|
let n = self.start.add_one();
|
2018-07-13 13:08:28 +08:00
|
|
|
mem::replace(&mut self.start, n)
|
2018-02-07 11:11:54 -08:00
|
|
|
} else {
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2018-07-13 13:08:28 +08:00
|
|
|
self.start.clone()
|
|
|
|
|
})
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
2018-06-19 04:08:20 +08:00
|
|
|
if self.is_empty() {
|
2017-05-21 05:03:49 -07:00
|
|
|
return (0, Some(0));
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-06 00:10:40 +02:00
|
|
|
match Step::steps_between(&self.start, &self.end) {
|
2017-04-23 21:14:32 -07:00
|
|
|
Some(hint) => (hint.saturating_add(1), hint.checked_add(1)),
|
2019-03-25 21:12:53 -07:00
|
|
|
None => (usize::MAX, None),
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
}
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn nth(&mut self, n: usize) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2018-06-19 04:08:20 +08:00
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-06 01:14:20 +02:00
|
|
|
if let Some(plus_n) = self.start.add_usize(n) {
|
2019-04-15 11:23:21 +09:00
|
|
|
use crate::cmp::Ordering::*;
|
2017-07-06 01:14:20 +02:00
|
|
|
|
|
|
|
|
match plus_n.partial_cmp(&self.end) {
|
|
|
|
|
Some(Less) => {
|
|
|
|
|
self.start = plus_n.add_one();
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
return Some(plus_n);
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
Some(Equal) => {
|
2020-01-30 20:09:23 +00:00
|
|
|
self.start = plus_n.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
return Some(plus_n);
|
2017-07-06 01:14:20 +02:00
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-30 20:09:23 +00:00
|
|
|
self.start = self.end.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2017-07-06 01:14:20 +02:00
|
|
|
None
|
|
|
|
|
}
|
2018-01-04 00:17:36 +00:00
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
#[inline]
|
|
|
|
|
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
|
|
|
|
|
where
|
2019-12-22 17:42:04 -05:00
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> R,
|
|
|
|
|
R: Try<Ok = B>,
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
{
|
2019-02-09 18:42:34 +01:00
|
|
|
if self.is_empty() {
|
|
|
|
|
return Try::from_ok(init);
|
|
|
|
|
}
|
|
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
let mut accum = init;
|
|
|
|
|
|
|
|
|
|
while self.start < self.end {
|
|
|
|
|
let n = self.start.add_one();
|
|
|
|
|
let n = mem::replace(&mut self.start, n);
|
|
|
|
|
accum = f(accum, n)?;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-02-09 18:42:34 +01:00
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
if self.start == self.end {
|
|
|
|
|
accum = f(accum, self.start.clone())?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Try::from_ok(accum)
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-04 00:58:41 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn last(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 00:58:41 +00:00
|
|
|
}
|
|
|
|
|
|
2018-01-04 00:17:36 +00:00
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn min(mut self) -> Option<A> {
|
|
|
|
|
self.next()
|
2018-01-04 00:17:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2018-01-06 22:14:02 +00:00
|
|
|
fn max(mut self) -> Option<A> {
|
|
|
|
|
self.next_back()
|
2018-01-04 00:17:36 +00:00
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
|
|
|
|
|
2018-01-28 03:09:36 +08:00
|
|
|
#[stable(feature = "inclusive_range", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
|
2016-04-18 20:08:27 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn next_back(&mut self) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2018-06-19 04:08:20 +08:00
|
|
|
return None;
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2018-07-13 13:08:28 +08:00
|
|
|
let is_iterating = self.start < self.end;
|
|
|
|
|
Some(if is_iterating {
|
2018-06-19 04:08:20 +08:00
|
|
|
let n = self.end.sub_one();
|
2018-07-13 13:08:28 +08:00
|
|
|
mem::replace(&mut self.end, n)
|
2018-06-19 04:08:20 +08:00
|
|
|
} else {
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2018-07-13 13:08:28 +08:00
|
|
|
self.end.clone()
|
|
|
|
|
})
|
2018-02-04 23:48:40 -08:00
|
|
|
}
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
|
2019-06-09 22:45:11 +02:00
|
|
|
#[inline]
|
|
|
|
|
fn nth_back(&mut self, n: usize) -> Option<A> {
|
2020-02-04 17:54:16 -05:00
|
|
|
if self.is_empty() {
|
2019-06-09 22:45:11 +02:00
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if let Some(minus_n) = self.end.sub_usize(n) {
|
|
|
|
|
use crate::cmp::Ordering::*;
|
|
|
|
|
|
|
|
|
|
match minus_n.partial_cmp(&self.start) {
|
|
|
|
|
Some(Greater) => {
|
|
|
|
|
self.end = minus_n.sub_one();
|
|
|
|
|
return Some(minus_n);
|
|
|
|
|
}
|
|
|
|
|
Some(Equal) => {
|
2020-01-30 20:09:23 +00:00
|
|
|
self.end = minus_n.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-06-09 22:45:11 +02:00
|
|
|
return Some(minus_n);
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-30 20:09:23 +00:00
|
|
|
self.end = self.start.clone();
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-06-09 22:45:11 +02:00
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
#[inline]
|
2019-12-22 17:42:04 -05:00
|
|
|
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
|
|
|
|
|
where
|
|
|
|
|
Self: Sized,
|
|
|
|
|
F: FnMut(B, Self::Item) -> R,
|
|
|
|
|
R: Try<Ok = B>,
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
{
|
2019-02-09 18:42:34 +01:00
|
|
|
if self.is_empty() {
|
|
|
|
|
return Try::from_ok(init);
|
|
|
|
|
}
|
|
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
let mut accum = init;
|
|
|
|
|
|
|
|
|
|
while self.start < self.end {
|
|
|
|
|
let n = self.end.sub_one();
|
|
|
|
|
let n = mem::replace(&mut self.end, n);
|
|
|
|
|
accum = f(accum, n)?;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-04 17:54:16 -05:00
|
|
|
self.exhausted = true;
|
2019-02-09 18:42:34 +01:00
|
|
|
|
RangeInclusive internal iteration performance improvement.
Specialize Iterator::try_fold and DoubleEndedIterator::try_rfold to
improve code generation in all internal iteration scenarios.
This changes brings the performance of internal iteration with
RangeInclusive on par with the performance of iteration with Range:
- Single conditional jump in hot loop,
- Unrolling and vectorization,
- And even Closed Form substitution.
Unfortunately, it only applies to internal iteration. Despite various
attempts at stream-lining the implementation of next and next_back,
LLVM has stubbornly refused to optimize external iteration
appropriately, leaving me with a choice between:
- The current implementation, for which Closed Form substitution is
performed, but which uses 2 conditional jumps in the hot loop when
optimization fail.
- An implementation using a "is_done" boolean, which uses 1
conditional jump in the hot loop when optimization fail, allowing
unrolling and vectorization, but for which Closed Form substitution
fails.
In the absence of any conclusive evidence as to which usecase matters
most, and with no assurance that the lack of Closed Form substitution
is not indicative of other optimizations being foiled, there is no way
to pick one implementation over the other, and thus I defer to the
statu quo as far as next and next_back are concerned.
2019-02-03 16:58:29 +01:00
|
|
|
if self.start == self.end {
|
|
|
|
|
accum = f(accum, self.start.clone())?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Try::from_ok(accum)
|
|
|
|
|
}
|
2016-04-18 20:08:27 +02:00
|
|
|
}
|
2016-08-13 14:42:36 -04:00
|
|
|
|
2018-03-03 14:15:28 +01:00
|
|
|
#[stable(feature = "fused", since = "1.26.0")]
|
2017-07-05 23:49:33 +02:00
|
|
|
impl<A: Step> FusedIterator for ops::RangeInclusive<A> {}
|