diff --git a/src/libcollections/benches/lib.rs b/src/libcollections/benches/lib.rs index 1a21db5e344e3..42064e9ca5750 100644 --- a/src/libcollections/benches/lib.rs +++ b/src/libcollections/benches/lib.rs @@ -11,6 +11,7 @@ #![deny(warnings)] #![feature(rand)] +#![feature(sort_unstable)] #![feature(test)] extern crate test; diff --git a/src/libcollections/benches/slice.rs b/src/libcollections/benches/slice.rs index eb4b76509f913..7195a9f9bf2c6 100644 --- a/src/libcollections/benches/slice.rs +++ b/src/libcollections/benches/slice.rs @@ -169,6 +169,7 @@ fn random_inserts(b: &mut Bencher) { } }) } + #[bench] fn random_removes(b: &mut Bencher) { let mut rng = thread_rng(); @@ -216,65 +217,76 @@ fn gen_mostly_descending(len: usize) -> Vec { v } -fn gen_big_random(len: usize) -> Vec<[u64; 16]> { +fn gen_strings(len: usize) -> Vec { let mut rng = thread_rng(); - rng.gen_iter().map(|x| [x; 16]).take(len).collect() -} - -fn gen_big_ascending(len: usize) -> Vec<[u64; 16]> { - (0..len as u64).map(|x| [x; 16]).take(len).collect() + let mut v = vec![]; + for _ in 0..len { + let n = rng.gen::() % 20 + 1; + v.push(rng.gen_ascii_chars().take(n).collect()); + } + v } -fn gen_big_descending(len: usize) -> Vec<[u64; 16]> { - (0..len as u64).rev().map(|x| [x; 16]).take(len).collect() +fn gen_big_random(len: usize) -> Vec<[u64; 16]> { + let mut rng = thread_rng(); + rng.gen_iter().map(|x| [x; 16]).take(len).collect() } -macro_rules! sort_bench { - ($name:ident, $gen:expr, $len:expr) => { +macro_rules! sort { + ($f:ident, $name:ident, $gen:expr, $len:expr) => { #[bench] fn $name(b: &mut Bencher) { - b.iter(|| $gen($len).sort()); + b.iter(|| $gen($len).$f()); b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; } } } -sort_bench!(sort_small_random, gen_random, 10); -sort_bench!(sort_small_ascending, gen_ascending, 10); -sort_bench!(sort_small_descending, gen_descending, 10); - -sort_bench!(sort_small_big_random, gen_big_random, 10); -sort_bench!(sort_small_big_ascending, gen_big_ascending, 10); -sort_bench!(sort_small_big_descending, gen_big_descending, 10); - -sort_bench!(sort_medium_random, gen_random, 100); -sort_bench!(sort_medium_ascending, gen_ascending, 100); -sort_bench!(sort_medium_descending, gen_descending, 100); - -sort_bench!(sort_large_random, gen_random, 10000); -sort_bench!(sort_large_ascending, gen_ascending, 10000); -sort_bench!(sort_large_descending, gen_descending, 10000); -sort_bench!(sort_large_mostly_ascending, gen_mostly_ascending, 10000); -sort_bench!(sort_large_mostly_descending, gen_mostly_descending, 10000); - -sort_bench!(sort_large_big_random, gen_big_random, 10000); -sort_bench!(sort_large_big_ascending, gen_big_ascending, 10000); -sort_bench!(sort_large_big_descending, gen_big_descending, 10000); +macro_rules! sort_expensive { + ($f:ident, $name:ident, $gen:expr, $len:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + b.iter(|| { + let mut v = $gen($len); + let mut count = 0; + v.$f(|a: &u64, b: &u64| { + count += 1; + if count % 1_000_000_000 == 0 { + panic!("should not happen"); + } + (*a as f64).cos().partial_cmp(&(*b as f64).cos()).unwrap() + }); + black_box(count); + }); + b.bytes = $len as u64 * mem::size_of::() as u64; + } + } +} -#[bench] -fn sort_large_random_expensive(b: &mut Bencher) { - let len = 10000; - b.iter(|| { - let mut v = gen_random(len); - let mut count = 0; - v.sort_by(|a: &u64, b: &u64| { - count += 1; - if count % 1_000_000_000 == 0 { - panic!("should not happen"); - } - (*a as f64).cos().partial_cmp(&(*b as f64).cos()).unwrap() - }); - black_box(count); - }); - b.bytes = len as u64 * mem::size_of::() as u64; -} \ No newline at end of file +sort!(sort, sort_small_ascending, gen_ascending, 10); +sort!(sort, sort_small_descending, gen_descending, 10); +sort!(sort, sort_small_random, gen_random, 10); +sort!(sort, sort_small_big_random, gen_big_random, 10); +sort!(sort, sort_medium_random, gen_random, 100); +sort!(sort, sort_large_ascending, gen_ascending, 10000); +sort!(sort, sort_large_descending, gen_descending, 10000); +sort!(sort, sort_large_mostly_ascending, gen_mostly_ascending, 10000); +sort!(sort, sort_large_mostly_descending, gen_mostly_descending, 10000); +sort!(sort, sort_large_random, gen_random, 10000); +sort!(sort, sort_large_big_random, gen_big_random, 10000); +sort!(sort, sort_large_strings, gen_strings, 10000); +sort_expensive!(sort_by, sort_large_random_expensive, gen_random, 10000); + +sort!(sort_unstable, sort_unstable_small_ascending, gen_ascending, 10); +sort!(sort_unstable, sort_unstable_small_descending, gen_descending, 10); +sort!(sort_unstable, sort_unstable_small_random, gen_random, 10); +sort!(sort_unstable, sort_unstable_small_big_random, gen_big_random, 10); +sort!(sort_unstable, sort_unstable_medium_random, gen_random, 100); +sort!(sort_unstable, sort_unstable_large_ascending, gen_ascending, 10000); +sort!(sort_unstable, sort_unstable_large_descending, gen_descending, 10000); +sort!(sort_unstable, sort_unstable_large_mostly_ascending, gen_mostly_ascending, 10000); +sort!(sort_unstable, sort_unstable_large_mostly_descending, gen_mostly_descending, 10000); +sort!(sort_unstable, sort_unstable_large_random, gen_random, 10000); +sort!(sort_unstable, sort_unstable_large_big_random, gen_big_random, 10000); +sort!(sort_unstable, sort_unstable_large_strings, gen_strings, 10000); +sort_expensive!(sort_unstable_by, sort_unstable_large_random_expensive, gen_random, 10000); diff --git a/src/libcollections/lib.rs b/src/libcollections/lib.rs index 10650dab583c3..72e950bc91fa9 100644 --- a/src/libcollections/lib.rs +++ b/src/libcollections/lib.rs @@ -52,6 +52,7 @@ #![feature(shared)] #![feature(slice_get_slice)] #![feature(slice_patterns)] +#![cfg_attr(not(test), feature(sort_unstable))] #![feature(specialization)] #![feature(staged_api)] #![feature(str_internals)] diff --git a/src/libcollections/slice.rs b/src/libcollections/slice.rs index 653310b8cb591..5233887620a91 100644 --- a/src/libcollections/slice.rs +++ b/src/libcollections/slice.rs @@ -1092,6 +1092,39 @@ impl [T] { merge_sort(self, |a, b| a.lt(b)); } + /// Sorts the slice using `compare` to compare elements. + /// + /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case. + /// + /// # Current implementation + /// + /// The current algorithm is an adaptive, iterative merge sort inspired by + /// [timsort](https://en.wikipedia.org/wiki/Timsort). + /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of + /// two or more sorted sequences concatenated one after another. + /// + /// Also, it allocates temporary storage half the size of `self`, but for short slices a + /// non-allocating insertion sort is used instead. + /// + /// # Examples + /// + /// ``` + /// let mut v = [5, 4, 1, 3, 2]; + /// v.sort_by(|a, b| a.cmp(b)); + /// assert!(v == [1, 2, 3, 4, 5]); + /// + /// // reverse sorting + /// v.sort_by(|a, b| b.cmp(a)); + /// assert!(v == [5, 4, 3, 2, 1]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn sort_by(&mut self, mut compare: F) + where F: FnMut(&T, &T) -> Ordering + { + merge_sort(self, |a, b| compare(a, b) == Less); + } + /// Sorts the slice using `f` to extract a key to compare elements by. /// /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case. @@ -1122,37 +1155,118 @@ impl [T] { merge_sort(self, |a, b| f(a).lt(&f(b))); } - /// Sorts the slice using `compare` to compare elements. + /// Sorts the slice, but may not preserve the order of equal elements. /// - /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case. + /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), + /// and `O(n log n)` worst-case. /// /// # Current implementation /// - /// The current algorithm is an adaptive, iterative merge sort inspired by - /// [timsort](https://en.wikipedia.org/wiki/Timsort). - /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of - /// two or more sorted sequences concatenated one after another. + /// The current algorithm is based on Orson Peters' [pdqsort][pattern-defeating quicksort], + /// which is a quicksort variant designed to be very fast on certain kinds of patterns, + /// sometimes achieving linear time. It is randomized but deterministic, and falls back to + /// heapsort on degenerate inputs. /// - /// Also, it allocates temporary storage half the size of `self`, but for short slices a - /// non-allocating insertion sort is used instead. + /// It is generally faster than stable sorting, except in a few special cases, e.g. when the + /// slice consists of several concatenated sorted sequences. /// /// # Examples /// /// ``` + /// #![feature(sort_unstable)] + /// + /// let mut v = [-5, 4, 1, -3, 2]; + /// + /// v.sort_unstable(); + /// assert!(v == [-5, -3, 1, 2, 4]); + /// ``` + /// + /// [pdqsort]: https://github.com/orlp/pdqsort + // FIXME #40585: Mention `sort_unstable` in the documentation for `sort`. + #[unstable(feature = "sort_unstable", issue = "40585")] + #[inline] + pub fn sort_unstable(&mut self) + where T: Ord + { + core_slice::SliceExt::sort_unstable(self); + } + + /// Sorts the slice using `compare` to compare elements, but may not preserve the order of + /// equal elements. + /// + /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), + /// and `O(n log n)` worst-case. + /// + /// # Current implementation + /// + /// The current algorithm is based on Orson Peters' [pdqsort][pattern-defeating quicksort], + /// which is a quicksort variant designed to be very fast on certain kinds of patterns, + /// sometimes achieving linear time. It is randomized but deterministic, and falls back to + /// heapsort on degenerate inputs. + /// + /// It is generally faster than stable sorting, except in a few special cases, e.g. when the + /// slice consists of several concatenated sorted sequences. + /// + /// # Examples + /// + /// ``` + /// #![feature(sort_unstable)] + /// /// let mut v = [5, 4, 1, 3, 2]; - /// v.sort_by(|a, b| a.cmp(b)); + /// v.sort_unstable_by(|a, b| a.cmp(b)); /// assert!(v == [1, 2, 3, 4, 5]); /// /// // reverse sorting - /// v.sort_by(|a, b| b.cmp(a)); + /// v.sort_unstable_by(|a, b| b.cmp(a)); /// assert!(v == [5, 4, 3, 2, 1]); /// ``` - #[stable(feature = "rust1", since = "1.0.0")] + /// + /// [pdqsort]: https://github.com/orlp/pdqsort + // FIXME #40585: Mention `sort_unstable_by` in the documentation for `sort_by`. + #[unstable(feature = "sort_unstable", issue = "40585")] #[inline] - pub fn sort_by(&mut self, mut compare: F) + pub fn sort_unstable_by(&mut self, compare: F) where F: FnMut(&T, &T) -> Ordering { - merge_sort(self, |a, b| compare(a, b) == Less); + core_slice::SliceExt::sort_unstable_by(self, compare); + } + + /// Sorts the slice using `f` to extract a key to compare elements by, but may not preserve the + /// order of equal elements. + /// + /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), + /// and `O(n log n)` worst-case. + /// + /// # Current implementation + /// + /// The current algorithm is based on Orson Peters' [pdqsort][pattern-defeating quicksort], + /// which is a quicksort variant designed to be very fast on certain kinds of patterns, + /// sometimes achieving linear time. It is randomized but deterministic, and falls back to + /// heapsort on degenerate inputs. + /// + /// It is generally faster than stable sorting, except in a few special cases, e.g. when the + /// slice consists of several concatenated sorted sequences. + /// + /// # Examples + /// + /// ``` + /// #![feature(sort_unstable)] + /// + /// let mut v = [-5i32, 4, 1, -3, 2]; + /// + /// v.sort_unstable_by_key(|k| k.abs()); + /// assert!(v == [1, 2, -3, 4, -5]); + /// ``` + /// + /// [pdqsort]: https://github.com/orlp/pdqsort + // FIXME #40585: Mention `sort_unstable_by_key` in the documentation for `sort_by_key`. + #[unstable(feature = "sort_unstable", issue = "40585")] + #[inline] + pub fn sort_unstable_by_key(&mut self, f: F) + where F: FnMut(&T) -> B, + B: Ord + { + core_slice::SliceExt::sort_unstable_by_key(self, f); } /// Copies the elements from `src` into `self`. @@ -1553,28 +1667,20 @@ unsafe fn merge(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F) fn merge_sort(v: &mut [T], mut is_less: F) where F: FnMut(&T, &T) -> bool { + // Slices of up to this length get sorted using insertion sort. + const MAX_INSERTION: usize = 20; + // Very short runs are extended using insertion sort to span at least this many elements. + const MIN_RUN: usize = 10; + // Sorting has no meaningful behavior on zero-sized types. if size_of::() == 0 { return; } - // FIXME #12092: These numbers are platform-specific and need more extensive testing/tuning. - // - // If `v` has length up to `max_insertion`, simply switch to insertion sort because it is going - // to perform better than merge sort. For bigger types `T`, the threshold is smaller. - // - // Short runs are extended using insertion sort to span at least `min_run` elements, in order - // to improve performance. - let (max_insertion, min_run) = if size_of::() <= 2 * mem::size_of::() { - (64, 32) - } else { - (32, 16) - }; - let len = v.len(); // Short arrays get sorted in-place via insertion sort to avoid allocations. - if len <= max_insertion { + if len <= MAX_INSERTION { if len >= 2 { for i in (0..len-1).rev() { insert_head(&mut v[i..], &mut is_less); @@ -1618,7 +1724,7 @@ fn merge_sort(v: &mut [T], mut is_less: F) // Insert some more elements into the run if it's too short. Insertion sort is faster than // merge sort on short sequences, so this significantly improves performance. - while start > 0 && end - start < min_run { + while start > 0 && end - start < MIN_RUN { start -= 1; insert_head(&mut v[start..end], &mut is_less); } diff --git a/src/libcollectionstest/slice.rs b/src/libcollectionstest/slice.rs index a7f7baf38518c..00d4dbe9c0458 100644 --- a/src/libcollectionstest/slice.rs +++ b/src/libcollectionstest/slice.rs @@ -399,9 +399,10 @@ fn test_sort() { } } - // shouldn't panic - let mut v: [i32; 0] = []; - v.sort(); + // Should not panic. + [0i32; 0].sort(); + [(); 10].sort(); + [(); 100].sort(); let mut v = [0xDEADBEEFu64]; v.sort(); @@ -441,13 +442,6 @@ fn test_sort_stability() { } } -#[test] -fn test_sort_zero_sized_type() { - // Should not panic. - [(); 10].sort(); - [(); 100].sort(); -} - #[test] fn test_concat() { let v: [Vec; 0] = []; diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 3d124a8aa8b75..af61342749319 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -71,26 +71,27 @@ #![feature(asm)] #![feature(associated_type_defaults)] #![feature(cfg_target_feature)] +#![feature(cfg_target_has_atomic)] #![feature(concat_idents)] #![feature(const_fn)] -#![feature(cfg_target_has_atomic)] #![feature(custom_attribute)] #![feature(fundamental)] +#![feature(i128_type)] #![feature(inclusive_range_syntax)] #![feature(intrinsics)] #![feature(lang_items)] +#![feature(never_type)] #![feature(no_core)] #![feature(on_unimplemented)] #![feature(optin_builtin_traits)] -#![feature(unwind_attributes)] +#![feature(prelude_import)] #![feature(repr_simd, platform_intrinsics)] #![feature(rustc_attrs)] #![feature(specialization)] #![feature(staged_api)] #![feature(unboxed_closures)] -#![feature(never_type)] -#![feature(i128_type)] -#![feature(prelude_import)] +#![feature(untagged_unions)] +#![feature(unwind_attributes)] #[prelude_import] #[allow(unused)] diff --git a/src/libcore/slice.rs b/src/libcore/slice/mod.rs similarity index 96% rename from src/libcore/slice.rs rename to src/libcore/slice/mod.rs index 22658f9a81b0d..6f8b199f886b7 100644 --- a/src/libcore/slice.rs +++ b/src/libcore/slice/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -51,6 +51,8 @@ use mem; use marker::{Copy, Send, Sync, Sized, self}; use iter_private::TrustedRandomAccess; +mod sort; + #[repr(C)] struct Repr { pub data: *const T, @@ -71,86 +73,119 @@ pub trait SliceExt { #[stable(feature = "core", since = "1.6.0")] fn split_at(&self, mid: usize) -> (&[Self::Item], &[Self::Item]); + #[stable(feature = "core", since = "1.6.0")] fn iter(&self) -> Iter; + #[stable(feature = "core", since = "1.6.0")] fn split

(&self, pred: P) -> Split - where P: FnMut(&Self::Item) -> bool; + where P: FnMut(&Self::Item) -> bool; + #[stable(feature = "core", since = "1.6.0")] fn splitn

(&self, n: usize, pred: P) -> SplitN - where P: FnMut(&Self::Item) -> bool; + where P: FnMut(&Self::Item) -> bool; + #[stable(feature = "core", since = "1.6.0")] fn rsplitn

(&self, n: usize, pred: P) -> RSplitN - where P: FnMut(&Self::Item) -> bool; + where P: FnMut(&Self::Item) -> bool; + #[stable(feature = "core", since = "1.6.0")] fn windows(&self, size: usize) -> Windows; + #[stable(feature = "core", since = "1.6.0")] fn chunks(&self, size: usize) -> Chunks; + #[stable(feature = "core", since = "1.6.0")] fn get(&self, index: I) -> Option<&I::Output> where I: SliceIndex; + #[stable(feature = "core", since = "1.6.0")] fn first(&self) -> Option<&Self::Item>; + #[stable(feature = "core", since = "1.6.0")] fn split_first(&self) -> Option<(&Self::Item, &[Self::Item])>; + #[stable(feature = "core", since = "1.6.0")] fn split_last(&self) -> Option<(&Self::Item, &[Self::Item])>; + #[stable(feature = "core", since = "1.6.0")] fn last(&self) -> Option<&Self::Item>; + #[stable(feature = "core", since = "1.6.0")] unsafe fn get_unchecked(&self, index: I) -> &I::Output where I: SliceIndex; + #[stable(feature = "core", since = "1.6.0")] fn as_ptr(&self) -> *const Self::Item; + #[stable(feature = "core", since = "1.6.0")] fn binary_search(&self, x: &Q) -> Result where Self::Item: Borrow, Q: Ord; + #[stable(feature = "core", since = "1.6.0")] fn binary_search_by<'a, F>(&'a self, f: F) -> Result where F: FnMut(&'a Self::Item) -> Ordering; + #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] fn binary_search_by_key<'a, B, F, Q: ?Sized>(&'a self, b: &Q, f: F) -> Result where F: FnMut(&'a Self::Item) -> B, B: Borrow, Q: Ord; + #[stable(feature = "core", since = "1.6.0")] fn len(&self) -> usize; + #[stable(feature = "core", since = "1.6.0")] fn is_empty(&self) -> bool { self.len() == 0 } + #[stable(feature = "core", since = "1.6.0")] fn get_mut(&mut self, index: I) -> Option<&mut I::Output> where I: SliceIndex; + #[stable(feature = "core", since = "1.6.0")] fn iter_mut(&mut self) -> IterMut; + #[stable(feature = "core", since = "1.6.0")] fn first_mut(&mut self) -> Option<&mut Self::Item>; + #[stable(feature = "core", since = "1.6.0")] fn split_first_mut(&mut self) -> Option<(&mut Self::Item, &mut [Self::Item])>; + #[stable(feature = "core", since = "1.6.0")] fn split_last_mut(&mut self) -> Option<(&mut Self::Item, &mut [Self::Item])>; + #[stable(feature = "core", since = "1.6.0")] fn last_mut(&mut self) -> Option<&mut Self::Item>; + #[stable(feature = "core", since = "1.6.0")] fn split_mut

(&mut self, pred: P) -> SplitMut - where P: FnMut(&Self::Item) -> bool; + where P: FnMut(&Self::Item) -> bool; + #[stable(feature = "core", since = "1.6.0")] fn splitn_mut

(&mut self, n: usize, pred: P) -> SplitNMut - where P: FnMut(&Self::Item) -> bool; + where P: FnMut(&Self::Item) -> bool; + #[stable(feature = "core", since = "1.6.0")] fn rsplitn_mut

(&mut self, n: usize, pred: P) -> RSplitNMut - where P: FnMut(&Self::Item) -> bool; + where P: FnMut(&Self::Item) -> bool; + #[stable(feature = "core", since = "1.6.0")] fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut; + #[stable(feature = "core", since = "1.6.0")] fn swap(&mut self, a: usize, b: usize); + #[stable(feature = "core", since = "1.6.0")] fn split_at_mut(&mut self, mid: usize) -> (&mut [Self::Item], &mut [Self::Item]); + #[stable(feature = "core", since = "1.6.0")] fn reverse(&mut self); + #[stable(feature = "core", since = "1.6.0")] unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output where I: SliceIndex; + #[stable(feature = "core", since = "1.6.0")] fn as_mut_ptr(&mut self) -> *mut Self::Item; @@ -165,8 +200,22 @@ pub trait SliceExt { #[stable(feature = "clone_from_slice", since = "1.7.0")] fn clone_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Clone; + #[stable(feature = "copy_from_slice", since = "1.9.0")] fn copy_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Copy; + + #[unstable(feature = "sort_unstable", issue = "40585")] + fn sort_unstable(&mut self) + where Self::Item: Ord; + + #[unstable(feature = "sort_unstable", issue = "40585")] + fn sort_unstable_by(&mut self, compare: F) + where F: FnMut(&Self::Item, &Self::Item) -> Ordering; + + #[unstable(feature = "sort_unstable", issue = "40585")] + fn sort_unstable_by_key(&mut self, f: F) + where F: FnMut(&Self::Item) -> B, + B: Ord; } // Use macros to be generic over const/mut @@ -238,7 +287,9 @@ impl SliceExt for [T] { } #[inline] - fn split

(&self, pred: P) -> Split where P: FnMut(&T) -> bool { + fn split

(&self, pred: P) -> Split + where P: FnMut(&T) -> bool + { Split { v: self, pred: pred, @@ -247,8 +298,8 @@ impl SliceExt for [T] { } #[inline] - fn splitn

(&self, n: usize, pred: P) -> SplitN where - P: FnMut(&T) -> bool, + fn splitn

(&self, n: usize, pred: P) -> SplitN + where P: FnMut(&T) -> bool { SplitN { inner: GenericSplitN { @@ -260,8 +311,8 @@ impl SliceExt for [T] { } #[inline] - fn rsplitn

(&self, n: usize, pred: P) -> RSplitN where - P: FnMut(&T) -> bool, + fn rsplitn

(&self, n: usize, pred: P) -> RSplitN + where P: FnMut(&T) -> bool { RSplitN { inner: GenericSplitN { @@ -422,13 +473,15 @@ impl SliceExt for [T] { } #[inline] - fn split_mut

(&mut self, pred: P) -> SplitMut where P: FnMut(&T) -> bool { + fn split_mut

(&mut self, pred: P) -> SplitMut + where P: FnMut(&T) -> bool + { SplitMut { v: self, pred: pred, finished: false } } #[inline] - fn splitn_mut

(&mut self, n: usize, pred: P) -> SplitNMut where - P: FnMut(&T) -> bool + fn splitn_mut

(&mut self, n: usize, pred: P) -> SplitNMut + where P: FnMut(&T) -> bool { SplitNMut { inner: GenericSplitN { @@ -450,7 +503,7 @@ impl SliceExt for [T] { invert: true } } - } + } #[inline] fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut { @@ -512,7 +565,10 @@ impl SliceExt for [T] { m >= n && needle == &self[m-n..] } - fn binary_search(&self, x: &Q) -> Result where T: Borrow, Q: Ord { + fn binary_search(&self, x: &Q) -> Result + where T: Borrow, + Q: Ord + { self.binary_search_by(|p| p.borrow().cmp(x)) } @@ -548,6 +604,28 @@ impl SliceExt for [T] { { self.binary_search_by(|k| f(k).borrow().cmp(b)) } + + #[inline] + fn sort_unstable(&mut self) + where Self::Item: Ord + { + sort::quicksort(self, |a, b| a.lt(b)); + } + + #[inline] + fn sort_unstable_by(&mut self, mut compare: F) + where F: FnMut(&Self::Item, &Self::Item) -> Ordering + { + sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less); + } + + #[inline] + fn sort_unstable_by_key(&mut self, mut f: F) + where F: FnMut(&Self::Item) -> B, + B: Ord + { + sort::quicksort(self, |a, b| f(a).lt(&f(b))); + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -2175,6 +2253,15 @@ pub unsafe fn from_raw_parts_mut<'a, T>(p: *mut T, len: usize) -> &'a mut [T] { mem::transmute(Repr { data: p, len: len }) } +// This function is public only because there is no other way to unit test heapsort. +#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "0")] +#[doc(hidden)] +pub fn heapsort(v: &mut [T], mut is_less: F) + where F: FnMut(&T, &T) -> bool +{ + sort::heapsort(v, &mut is_less); +} + // // Comparison traits // diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs new file mode 100644 index 0000000000000..fdfba33f8a9d9 --- /dev/null +++ b/src/libcore/slice/sort.rs @@ -0,0 +1,699 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Slice sorting +//! +//! This module contains an sort algorithm based on Orson Peters' pattern-defeating quicksort, +//! published at: https://github.com/orlp/pdqsort +//! +//! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our +//! stable sorting implementation. + +use cmp; +use mem; +use ptr; + +/// Holds a value, but never drops it. +#[allow(unions_with_drop_fields)] +union NoDrop { + value: T +} + +/// When dropped, copies from `src` into `dest`. +struct CopyOnDrop { + src: *mut T, + dest: *mut T, +} + +impl Drop for CopyOnDrop { + fn drop(&mut self) { + unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); } + } +} + +/// Shifts the first element to the right until it encounters a greater or equal element. +fn shift_head(v: &mut [T], is_less: &mut F) + where F: FnMut(&T, &T) -> bool +{ + let len = v.len(); + unsafe { + // If the first two elements are out-of-order... + if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) { + // Read the first element into a stack-allocated variable. If a following comparison + // operation panics, `hole` will get dropped and automatically write the element back + // into the slice. + let mut tmp = NoDrop { value: ptr::read(v.get_unchecked(0)) }; + let mut hole = CopyOnDrop { + src: &mut tmp.value, + dest: v.get_unchecked_mut(1), + }; + ptr::copy_nonoverlapping(v.get_unchecked(1), v.get_unchecked_mut(0), 1); + + for i in 2..len { + if !is_less(v.get_unchecked(i), &tmp.value) { + break; + } + + // Move `i`-th element one place to the left, thus shifting the hole to the right. + ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i - 1), 1); + hole.dest = v.get_unchecked_mut(i); + } + // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. + } + } +} + +/// Shifts the last element to the left until it encounters a smaller or equal element. +fn shift_tail(v: &mut [T], is_less: &mut F) + where F: FnMut(&T, &T) -> bool +{ + let len = v.len(); + unsafe { + // If the last two elements are out-of-order... + if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) { + // Read the last element into a stack-allocated variable. If a following comparison + // operation panics, `hole` will get dropped and automatically write the element back + // into the slice. + let mut tmp = NoDrop { value: ptr::read(v.get_unchecked(len - 1)) }; + let mut hole = CopyOnDrop { + src: &mut tmp.value, + dest: v.get_unchecked_mut(len - 2), + }; + ptr::copy_nonoverlapping(v.get_unchecked(len - 2), v.get_unchecked_mut(len - 1), 1); + + for i in (0..len-2).rev() { + if !is_less(&tmp.value, v.get_unchecked(i)) { + break; + } + + // Move `i`-th element one place to the right, thus shifting the hole to the left. + ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i + 1), 1); + hole.dest = v.get_unchecked_mut(i); + } + // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. + } + } +} + +/// Partially sorts a slice by shifting several out-of-order elements around. +/// +/// Returns true if the slice is sorted at the end. This function is `O(n)` worst-case. +#[cold] +fn partial_insertion_sort(v: &mut [T], is_less: &mut F) -> bool + where F: FnMut(&T, &T) -> bool +{ + // Maximum number of adjacent out-of-order pairs that will get shifted. + const MAX_STEPS: usize = 5; + // If the slice is shorter than this, don't shift any elements. + const SHORTEST_SHIFTING: usize = 50; + + let len = v.len(); + let mut i = 1; + + for _ in 0..MAX_STEPS { + unsafe { + // Find the next pair of adjacent out-of-order elements. + while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) { + i += 1; + } + } + + // Are we done? + if i == len { + return true; + } + + // Don't shift elements on short arrays, that has a performance cost. + if len < SHORTEST_SHIFTING { + return false; + } + + // Swap the found pair of elements. This puts them in correct order. + v.swap(i - 1, i); + + // Shift the smaller element to the left. + shift_tail(&mut v[..i], is_less); + // Shift the greater element to the right. + shift_head(&mut v[i..], is_less); + } + + // Didn't manage to sort the slice in the limited number of steps. + false +} + +/// Sorts a slice using insertion sort, which is `O(n^2)` worst-case. +fn insertion_sort(v: &mut [T], is_less: &mut F) + where F: FnMut(&T, &T) -> bool +{ + for i in 2..v.len()+1 { + shift_tail(&mut v[..i], is_less); + } +} + +/// Sorts `v` using heapsort, which guarantees `O(n log n)` worst-case. +#[cold] +pub fn heapsort(v: &mut [T], is_less: &mut F) + where F: FnMut(&T, &T) -> bool +{ + // This binary heap respects the invariant `parent >= child`. + let mut sift_down = |v: &mut [T], mut node| { + loop { + // Children of `node`: + let left = 2 * node + 1; + let right = 2 * node + 2; + + // Choose the greater child. + let greater = if right < v.len() && is_less(&v[left], &v[right]) { + right + } else { + left + }; + + // Stop if the invariant holds at `node`. + if greater >= v.len() || !is_less(&v[node], &v[greater]) { + break; + } + + // Swap `node` with the greater child, move one step down, and continue sifting. + v.swap(node, greater); + node = greater; + } + }; + + // Build the heap in linear time. + for i in (0 .. v.len() / 2).rev() { + sift_down(v, i); + } + + // Pop maximal elements from the heap. + for i in (1 .. v.len()).rev() { + v.swap(0, i); + sift_down(&mut v[..i], 0); + } +} + +/// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal +/// to `pivot`. +/// +/// Returns the number of elements smaller than `pivot`. +/// +/// Partitioning is performed block-by-block in order to minimize the cost of branching operations. +/// This idea is presented in the [BlockQuicksort][pdf] paper. +/// +/// [pdf]: http://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf +fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize + where F: FnMut(&T, &T) -> bool +{ + // Number of elements in a typical block. + const BLOCK: usize = 128; + + // The partitioning algorithm repeats the following steps until completion: + // + // 1. Trace a block from the left side to identify elements greater than or equal to the pivot. + // 2. Trace a block from the right side to identify elements smaller than the pivot. + // 3. Exchange the identified elements between the left and right side. + // + // We keep the following variables for a block of elements: + // + // 1. `block` - Number of elements in the block. + // 2. `start` - Start pointer into the `offsets` array. + // 3. `end` - End pointer into the `offsets` array. + // 4. `offsets - Indices of out-of-order elements within the block. + + // The current block on the left side (from `l` to `l.offset(block_l)`). + let mut l = v.as_mut_ptr(); + let mut block_l = BLOCK; + let mut start_l = ptr::null_mut(); + let mut end_l = ptr::null_mut(); + let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() }; + + // The current block on the right side (from `r.offset(-block_r)` to `r`). + let mut r = unsafe { l.offset(v.len() as isize) }; + let mut block_r = BLOCK; + let mut start_r = ptr::null_mut(); + let mut end_r = ptr::null_mut(); + let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() }; + + // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather + // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient. + + // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive). + fn width(l: *mut T, r: *mut T) -> usize { + assert!(mem::size_of::() > 0); + (r as usize - l as usize) / mem::size_of::() + } + + loop { + // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do + // some patch-up work in order to partition the remaining elements in between. + let is_done = width(l, r) <= 2 * BLOCK; + + if is_done { + // Number of remaining elements (still not compared to the pivot). + let mut rem = width(l, r); + if start_l < end_l || start_r < end_r { + rem -= BLOCK; + } + + // Adjust block sizes so that the left and right block don't overlap, but get perfectly + // aligned to cover the whole remaining gap. + if start_l < end_l { + block_r = rem; + } else if start_r < end_r { + block_l = rem; + } else { + block_l = rem / 2; + block_r = rem - block_l; + } + debug_assert!(block_l <= BLOCK && block_r <= BLOCK); + debug_assert!(width(l, r) == block_l + block_r); + } + + if start_l == end_l { + // Trace `block_l` elements from the left side. + start_l = offsets_l.as_mut_ptr(); + end_l = offsets_l.as_mut_ptr(); + let mut elem = l; + + for i in 0..block_l { + unsafe { + // Branchless comparison. + *end_l = i as u8; + end_l = end_l.offset(!is_less(&*elem, pivot) as isize); + elem = elem.offset(1); + } + } + } + + if start_r == end_r { + // Trace `block_r` elements from the right side. + start_r = offsets_r.as_mut_ptr(); + end_r = offsets_r.as_mut_ptr(); + let mut elem = r; + + for i in 0..block_r { + unsafe { + // Branchless comparison. + elem = elem.offset(-1); + *end_r = i as u8; + end_r = end_r.offset(is_less(&*elem, pivot) as isize); + } + } + } + + // Number of out-of-order elements to swap between the left and right side. + let count = cmp::min(width(start_l, end_l), width(start_r, end_r)); + + if count > 0 { + macro_rules! left { () => { l.offset(*start_l as isize) } } + macro_rules! right { () => { r.offset(-(*start_r as isize) - 1) } } + + // Instead of swapping one pair at the time, it is more efficient to perform a cyclic + // permutation. This is not strictly equivalent to swapping, but produces a similar + // result using fewer memory operations. + unsafe { + let tmp = ptr::read(left!()); + ptr::copy_nonoverlapping(right!(), left!(), 1); + + for _ in 1..count { + start_l = start_l.offset(1); + ptr::copy_nonoverlapping(left!(), right!(), 1); + start_r = start_r.offset(1); + ptr::copy_nonoverlapping(right!(), left!(), 1); + } + + ptr::copy_nonoverlapping(&tmp, right!(), 1); + mem::forget(tmp); + start_l = start_l.offset(1); + start_r = start_r.offset(1); + } + } + + if start_l == end_l { + // All out-of-order elements in the left block were moved. Move to the next block. + l = unsafe { l.offset(block_l as isize) }; + } + + if start_r == end_r { + // All out-of-order elements in the right block were moved. Move to the previous block. + r = unsafe { r.offset(-(block_r as isize)) }; + } + + if is_done { + break; + } + } + + // All that remains now is at most one block (either the left or the right) with out-of-order + // elements that need to be moved. Such remaining elements can be simply shifted to the end + // within their block. + + if start_l < end_l { + // The left block remains. + // Move it's remaining out-of-order elements to the far right. + debug_assert_eq!(width(l, r), block_l); + while start_l < end_l { + unsafe { + end_l = end_l.offset(-1); + ptr::swap(l.offset(*end_l as isize), r.offset(-1)); + r = r.offset(-1); + } + } + width(v.as_mut_ptr(), r) + } else if start_r < end_r { + // The right block remains. + // Move it's remaining out-of-order elements to the far left. + debug_assert_eq!(width(l, r), block_r); + while start_r < end_r { + unsafe { + end_r = end_r.offset(-1); + ptr::swap(l, r.offset(-(*end_r as isize) - 1)); + l = l.offset(1); + } + } + width(v.as_mut_ptr(), l) + } else { + // Nothing else to do, we're done. + width(v.as_mut_ptr(), l) + } +} + +/// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or +/// equal to `v[pivot]`. +/// +/// Returns a tuple of: +/// +/// 1. Number of elements smaller than `v[pivot]`. +/// 2. True if `v` was already partitioned. +fn partition(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool) + where F: FnMut(&T, &T) -> bool +{ + let (mid, was_partitioned) = { + // Place the pivot at the beginning of slice. + v.swap(0, pivot); + let (pivot, v) = v.split_at_mut(1); + let pivot = &mut pivot[0]; + + // Read the pivot into a stack-allocated variable for efficiency. If a following comparison + // operation panics, the pivot will be automatically written back into the slice. + let mut tmp = NoDrop { value: unsafe { ptr::read(pivot) } }; + let _pivot_guard = CopyOnDrop { + src: unsafe { &mut tmp.value }, + dest: pivot, + }; + let pivot = unsafe { &tmp.value }; + + // Find the first pair of out-of-order elements. + let mut l = 0; + let mut r = v.len(); + unsafe { + // Find the first element greater then or equal to the pivot. + while l < r && is_less(v.get_unchecked(l), pivot) { + l += 1; + } + + // Find the last element smaller that the pivot. + while l < r && !is_less(v.get_unchecked(r - 1), pivot) { + r -= 1; + } + } + + (l + partition_in_blocks(&mut v[l..r], pivot, is_less), l >= r) + + // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated + // variable) back into the slice where it originally was. This step is critical in ensuring + // safety! + }; + + // Place the pivot between the two partitions. + v.swap(0, mid); + + (mid, was_partitioned) +} + +/// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`. +/// +/// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain +/// elements smaller than the pivot. +fn partition_equal(v: &mut [T], pivot: usize, is_less: &mut F) -> usize + where F: FnMut(&T, &T) -> bool +{ + // Place the pivot at the beginning of slice. + v.swap(0, pivot); + let (pivot, v) = v.split_at_mut(1); + let pivot = &mut pivot[0]; + + // Read the pivot into a stack-allocated variable for efficiency. If a following comparison + // operation panics, the pivot will be automatically written back into the slice. + let mut tmp = NoDrop { value: unsafe { ptr::read(pivot) } }; + let _pivot_guard = CopyOnDrop { + src: unsafe { &mut tmp.value }, + dest: pivot, + }; + let pivot = unsafe { &tmp.value }; + + // Now partition the slice. + let mut l = 0; + let mut r = v.len(); + loop { + unsafe { + // Find the first element greater that the pivot. + while l < r && !is_less(pivot, v.get_unchecked(l)) { + l += 1; + } + + // Find the last element equal to the pivot. + while l < r && is_less(pivot, v.get_unchecked(r - 1)) { + r -= 1; + } + + // Are we done? + if l >= r { + break; + } + + // Swap the found pair of out-of-order elements. + r -= 1; + ptr::swap(v.get_unchecked_mut(l), v.get_unchecked_mut(r)); + l += 1; + } + } + + // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself. + l + 1 + + // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable) + // back into the slice where it originally was. This step is critical in ensuring safety! +} + +/// Scatters some elements around in an attempt to break patterns that might cause imbalanced +/// partitions in quicksort. +#[cold] +fn break_patterns(v: &mut [T]) { + let len = v.len(); + + if len >= 8 { + // A random number will be taken modulo this one. The modulus is a power of two so that we + // can simply take bitwise "and", thus avoiding costly CPU operations. + let modulus = (len / 4).next_power_of_two(); + debug_assert!(modulus >= 1 && modulus <= len / 2); + + // Pseudorandom number generation from the "Xorshift RNGs" paper by George Marsaglia. + let mut random = len; + random ^= random << 13; + random ^= random >> 17; + random ^= random << 5; + random &= modulus - 1; + debug_assert!(random < len / 2); + + // The first index. + let a = len / 4 * 2; + debug_assert!(a >= 1 && a < len - 2); + + // The second index. + let b = len / 4 + random; + debug_assert!(b >= 1 && b < len - 2); + + // Swap neighbourhoods of `a` and `b`. + for i in 0..3 { + v.swap(a - 1 + i, b - 1 + i); + } + } +} + +/// Chooses a pivot in `v` and returns the index and true if the slice is likely already sorted. +/// +/// Elements in `v` might be reordered in the process. +fn choose_pivot(v: &mut [T], is_less: &mut F) -> (usize, bool) + where F: FnMut(&T, &T) -> bool +{ + // Minimum length to choose the median-of-medians method. + // Shorter slices use the simple median-of-three method. + const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50; + // Maximum number of swaps that can be performed in this function. + const MAX_SWAPS: usize = 4 * 3; + + let len = v.len(); + + // Three indices near which we are going to choose a pivot. + let mut a = len / 4 * 1; + let mut b = len / 4 * 2; + let mut c = len / 4 * 3; + + // Counts the total number of swaps we are about to perform while sorting indices. + let mut swaps = 0; + + if len >= 8 { + // Swaps indices so that `v[a] <= v[b]`. + let mut sort2 = |a: &mut usize, b: &mut usize| unsafe { + if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) { + ptr::swap(a, b); + swaps += 1; + } + }; + + // Swaps indices so that `v[a] <= v[b] <= v[c]`. + let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| { + sort2(a, b); + sort2(b, c); + sort2(a, b); + }; + + if len >= SHORTEST_MEDIAN_OF_MEDIANS { + // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`. + let mut sort_adjacent = |a: &mut usize| { + let tmp = *a; + sort3(&mut (tmp - 1), a, &mut (tmp + 1)); + }; + + // Find medians in the neighborhoods of `a`, `b`, and `c`. + sort_adjacent(&mut a); + sort_adjacent(&mut b); + sort_adjacent(&mut c); + } + + // Find the median among `a`, `b`, and `c`. + sort3(&mut a, &mut b, &mut c); + } + + if swaps < MAX_SWAPS { + (b, swaps == 0) + } else { + // The maximum number of swaps was performed. Chances are the slice is descending or mostly + // descending, so reversing will probably help sort it faster. + v.reverse(); + (len - 1 - b, true) + } +} + +/// Sorts `v` recursively. +/// +/// If the slice had a predecessor in the original array, it is specified as `pred`. +/// +/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero, +/// this function will immediately switch to heapsort. +fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: usize) + where F: FnMut(&T, &T) -> bool +{ + // Slices of up to this length get sorted using insertion sort. + const MAX_INSERTION: usize = 20; + + // True if the last partitioning was reasonably balanced. + let mut was_balanced = true; + // True if the last partitioning didn't shuffle elements (the slice was already partitioned). + let mut was_partitioned = true; + + loop { + let len = v.len(); + + // Very short slices get sorted using insertion sort. + if len <= MAX_INSERTION { + insertion_sort(v, is_less); + return; + } + + // If too many bad pivot choices were made, simply fall back to heapsort in order to + // guarantee `O(n log n)` worst-case. + if limit == 0 { + heapsort(v, is_less); + return; + } + + // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling + // some elements around. Hopefully we'll choose a better pivot this time. + if !was_balanced { + break_patterns(v); + limit -= 1; + } + + // Choose a pivot and try guessing whether the slice is already sorted. + let (pivot, likely_sorted) = choose_pivot(v, is_less); + + // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot + // selection predicts the slice is likely already sorted... + if was_balanced && was_partitioned && likely_sorted { + // Try identifying several out-of-order elements and shifting them to correct + // positions. If the slice ends up being completely sorted, we're done. + if partial_insertion_sort(v, is_less) { + return; + } + } + + // If the chosen pivot is equal to the predecessor, then it's the smallest element in the + // slice. Partition the slice into elements equal to and elements greater than the pivot. + // This case is usually hit when the slice contains many duplicate elements. + if let Some(p) = pred { + if !is_less(p, &v[pivot]) { + let mid = partition_equal(v, pivot, is_less); + + // Continue sorting elements greater than the pivot. + v = &mut {v}[mid..]; + continue; + } + } + + // Partition the slice. + let (mid, was_p) = partition(v, pivot, is_less); + was_balanced = cmp::min(mid, len - mid) >= len / 8; + was_partitioned = was_p; + + // Split the slice into `left`, `pivot`, and `right`. + let (left, right) = {v}.split_at_mut(mid); + let (pivot, right) = right.split_at_mut(1); + let pivot = &pivot[0]; + + // Recurse into the shorter side only in order to minimize the total number of recursive + // calls and consume less stack space. Then just continue with the longer side (this is + // akin to tail recursion). + if left.len() < right.len() { + recurse(left, is_less, pred, limit); + v = right; + pred = Some(pivot); + } else { + recurse(right, is_less, Some(pivot), limit); + v = left; + } + } +} + +/// Sorts `v` using pattern-defeating quicksort, which is `O(n log n)` worst-case. +pub fn quicksort(v: &mut [T], mut is_less: F) + where F: FnMut(&T, &T) -> bool +{ + // Sorting has no meaningful behavior on zero-sized types. + if mem::size_of::() == 0 { + return; + } + + // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`. + let limit = mem::size_of::() * 8 - v.len().leading_zeros() as usize; + + recurse(v, &mut is_less, None, limit); +} diff --git a/src/libcoretest/lib.rs b/src/libcoretest/lib.rs index d84a1e227560e..d92c378160d2e 100644 --- a/src/libcoretest/lib.rs +++ b/src/libcoretest/lib.rs @@ -19,18 +19,20 @@ #![feature(decode_utf8)] #![feature(fixed_size_array)] #![feature(flt2dec)] +#![feature(fmt_internals)] #![feature(libc)] #![feature(nonzero)] #![feature(rand)] #![feature(raw)] #![feature(sip_hash_13)] #![feature(slice_patterns)] +#![feature(sort_internals)] +#![feature(sort_unstable)] #![feature(step_by)] #![feature(test)] #![feature(try_from)] #![feature(unicode)] #![feature(unique)] -#![feature(fmt_internals)] extern crate core; extern crate test; diff --git a/src/libcoretest/slice.rs b/src/libcoretest/slice.rs index ad39e6b081b42..89bd3be08519c 100644 --- a/src/libcoretest/slice.rs +++ b/src/libcoretest/slice.rs @@ -8,7 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use core::slice::heapsort; use core::result::Result::{Ok, Err}; +use rand::{Rng, XorShiftRng}; #[test] fn test_binary_search() { @@ -139,9 +141,6 @@ fn test_chunks_mut_last() { assert_eq!(c2.last().unwrap()[0], 4); } - - - #[test] fn test_windows_count() { let v: &[i32] = &[0, 1, 2, 3, 4, 5]; @@ -224,3 +223,57 @@ fn get_unchecked_mut_range() { assert_eq!(v.get_unchecked_mut(1..4), &mut [1, 2, 3][..]); } } + +#[test] +fn sort_unstable() { + let mut v = [0; 600]; + let mut tmp = [0; 600]; + let mut rng = XorShiftRng::new_unseeded(); + + for len in (2..25).chain(500..510) { + let v = &mut v[0..len]; + let tmp = &mut tmp[0..len]; + + for &modulus in &[5, 10, 100, 1000] { + for _ in 0..100 { + for i in 0..len { + v[i] = rng.gen::() % modulus; + } + + // Sort in default order. + tmp.copy_from_slice(v); + tmp.sort_unstable(); + assert!(tmp.windows(2).all(|w| w[0] <= w[1])); + + // Sort in ascending order. + tmp.copy_from_slice(v); + tmp.sort_unstable_by(|a, b| a.cmp(b)); + assert!(tmp.windows(2).all(|w| w[0] <= w[1])); + + // Sort in descending order. + tmp.copy_from_slice(v); + tmp.sort_unstable_by(|a, b| b.cmp(a)); + assert!(tmp.windows(2).all(|w| w[0] >= w[1])); + + // Test heapsort using `<` operator. + tmp.copy_from_slice(v); + heapsort(tmp, |a, b| a < b); + assert!(tmp.windows(2).all(|w| w[0] <= w[1])); + + // Test heapsort using `>` operator. + tmp.copy_from_slice(v); + heapsort(tmp, |a, b| a > b); + assert!(tmp.windows(2).all(|w| w[0] >= w[1])); + } + } + } + + // Should not panic. + [0i32; 0].sort_unstable(); + [(); 10].sort_unstable(); + [(); 100].sort_unstable(); + + let mut v = [0xDEADBEEFu64]; + v.sort_unstable(); + assert!(v == [0xDEADBEEF]); +}