Bug 1438892 - Update Cargo lockfiles and re-vendor rust dependencies. r?jrmuizel draft
authorKartikaya Gupta <kgupta@mozilla.com>
Tue, 20 Feb 2018 09:05:08 -0500
changeset 757249 eb423d5dd95c4ef75ad78139b3019cd93d832bc1
parent 757248 3b2e7b65376d28dc5f3173a21c24fbb20d07b25b
child 757250 de8f2d2f2a3503bba9ed610fc626c091f54d2d73
push id99726
push userkgupta@mozilla.com
push dateTue, 20 Feb 2018 14:08:06 +0000
reviewersjrmuizel
bugs1438892
milestone60.0a1
Bug 1438892 - Update Cargo lockfiles and re-vendor rust dependencies. r?jrmuizel MozReview-Commit-ID: wsncokjtul
third_party/rust/coco/.cargo-checksum.json
third_party/rust/coco/.travis.yml
third_party/rust/coco/CHANGELOG.md
third_party/rust/coco/Cargo.toml
third_party/rust/coco/LICENSE-APACHE
third_party/rust/coco/LICENSE-MIT
third_party/rust/coco/README.md
third_party/rust/coco/benches/bench.rs
third_party/rust/coco/ci/script.sh
third_party/rust/coco/src/deque.rs
third_party/rust/coco/src/epoch/atomic.rs
third_party/rust/coco/src/epoch/garbage.rs
third_party/rust/coco/src/epoch/mod.rs
third_party/rust/coco/src/epoch/thread.rs
third_party/rust/coco/src/lib.rs
third_party/rust/coco/src/stack.rs
third_party/rust/crossbeam-deque/.cargo-checksum.json
third_party/rust/crossbeam-deque/.travis.yml
third_party/rust/crossbeam-deque/CHANGELOG.md
third_party/rust/crossbeam-deque/Cargo.toml
third_party/rust/crossbeam-deque/LICENSE-APACHE
third_party/rust/crossbeam-deque/LICENSE-MIT
third_party/rust/crossbeam-deque/README.md
third_party/rust/crossbeam-deque/src/lib.rs
third_party/rust/crossbeam-epoch/.cargo-checksum.json
third_party/rust/crossbeam-epoch/.travis.yml
third_party/rust/crossbeam-epoch/CHANGELOG.md
third_party/rust/crossbeam-epoch/Cargo.toml
third_party/rust/crossbeam-epoch/LICENSE-APACHE
third_party/rust/crossbeam-epoch/LICENSE-MIT
third_party/rust/crossbeam-epoch/README.md
third_party/rust/crossbeam-epoch/benches/defer.rs
third_party/rust/crossbeam-epoch/benches/flush.rs
third_party/rust/crossbeam-epoch/benches/pin.rs
third_party/rust/crossbeam-epoch/examples/sanitize.rs
third_party/rust/crossbeam-epoch/src/atomic.rs
third_party/rust/crossbeam-epoch/src/collector.rs
third_party/rust/crossbeam-epoch/src/default.rs
third_party/rust/crossbeam-epoch/src/deferred.rs
third_party/rust/crossbeam-epoch/src/epoch.rs
third_party/rust/crossbeam-epoch/src/garbage.rs
third_party/rust/crossbeam-epoch/src/guard.rs
third_party/rust/crossbeam-epoch/src/internal.rs
third_party/rust/crossbeam-epoch/src/lib.rs
third_party/rust/crossbeam-epoch/src/sync/list.rs
third_party/rust/crossbeam-epoch/src/sync/mod.rs
third_party/rust/crossbeam-epoch/src/sync/queue.rs
third_party/rust/crossbeam-utils/.cargo-checksum.json
third_party/rust/crossbeam-utils/.travis.yml
third_party/rust/crossbeam-utils/CHANGELOG.md
third_party/rust/crossbeam-utils/Cargo.toml
third_party/rust/crossbeam-utils/LICENSE-APACHE
third_party/rust/crossbeam-utils/LICENSE-MIT
third_party/rust/crossbeam-utils/README.md
third_party/rust/crossbeam-utils/src/atomic_option.rs
third_party/rust/crossbeam-utils/src/cache_padded.rs
third_party/rust/crossbeam-utils/src/lib.rs
third_party/rust/crossbeam-utils/src/scoped.rs
third_party/rust/memoffset/.cargo-checksum.json
third_party/rust/memoffset/Cargo.toml
third_party/rust/memoffset/LICENSE
third_party/rust/memoffset/README.md
third_party/rust/memoffset/src/lib.rs
third_party/rust/memoffset/src/offset_of.rs
third_party/rust/memoffset/src/span_of.rs
third_party/rust/rayon-0.8.2/.cargo-checksum.json
third_party/rust/rayon-0.8.2/.travis.yml
third_party/rust/rayon-0.8.2/Cargo.toml
third_party/rust/rayon-0.8.2/LICENSE-APACHE
third_party/rust/rayon-0.8.2/LICENSE-MIT
third_party/rust/rayon-0.8.2/README.md
third_party/rust/rayon-0.8.2/RELEASES.md
third_party/rust/rayon-0.8.2/appveyor.yml
third_party/rust/rayon-0.8.2/ci/highlander.sh
third_party/rust/rayon-0.8.2/examples/README.md
third_party/rust/rayon-0.8.2/examples/cpu_monitor.rs
third_party/rust/rayon-0.8.2/scripts/analyze.sh
third_party/rust/rayon-0.8.2/src/collections/binary_heap.rs
third_party/rust/rayon-0.8.2/src/collections/btree_map.rs
third_party/rust/rayon-0.8.2/src/collections/btree_set.rs
third_party/rust/rayon-0.8.2/src/collections/hash_map.rs
third_party/rust/rayon-0.8.2/src/collections/hash_set.rs
third_party/rust/rayon-0.8.2/src/collections/linked_list.rs
third_party/rust/rayon-0.8.2/src/collections/mod.rs
third_party/rust/rayon-0.8.2/src/collections/vec_deque.rs
third_party/rust/rayon-0.8.2/src/delegate.rs
third_party/rust/rayon-0.8.2/src/iter/README.md
third_party/rust/rayon-0.8.2/src/iter/chain.rs
third_party/rust/rayon-0.8.2/src/iter/cloned.rs
third_party/rust/rayon-0.8.2/src/iter/collect/consumer.rs
third_party/rust/rayon-0.8.2/src/iter/collect/mod.rs
third_party/rust/rayon-0.8.2/src/iter/collect/test.rs
third_party/rust/rayon-0.8.2/src/iter/enumerate.rs
third_party/rust/rayon-0.8.2/src/iter/extend.rs
third_party/rust/rayon-0.8.2/src/iter/filter.rs
third_party/rust/rayon-0.8.2/src/iter/filter_map.rs
third_party/rust/rayon-0.8.2/src/iter/find.rs
third_party/rust/rayon-0.8.2/src/iter/find_first_last/mod.rs
third_party/rust/rayon-0.8.2/src/iter/find_first_last/test.rs
third_party/rust/rayon-0.8.2/src/iter/flat_map.rs
third_party/rust/rayon-0.8.2/src/iter/fold.rs
third_party/rust/rayon-0.8.2/src/iter/for_each.rs
third_party/rust/rayon-0.8.2/src/iter/from_par_iter.rs
third_party/rust/rayon-0.8.2/src/iter/inspect.rs
third_party/rust/rayon-0.8.2/src/iter/internal.rs
third_party/rust/rayon-0.8.2/src/iter/len.rs
third_party/rust/rayon-0.8.2/src/iter/map.rs
third_party/rust/rayon-0.8.2/src/iter/map_with.rs
third_party/rust/rayon-0.8.2/src/iter/mod.rs
third_party/rust/rayon-0.8.2/src/iter/noop.rs
third_party/rust/rayon-0.8.2/src/iter/product.rs
third_party/rust/rayon-0.8.2/src/iter/reduce.rs
third_party/rust/rayon-0.8.2/src/iter/rev.rs
third_party/rust/rayon-0.8.2/src/iter/skip.rs
third_party/rust/rayon-0.8.2/src/iter/splitter.rs
third_party/rust/rayon-0.8.2/src/iter/sum.rs
third_party/rust/rayon-0.8.2/src/iter/take.rs
third_party/rust/rayon-0.8.2/src/iter/test.rs
third_party/rust/rayon-0.8.2/src/iter/unzip.rs
third_party/rust/rayon-0.8.2/src/iter/while_some.rs
third_party/rust/rayon-0.8.2/src/iter/zip.rs
third_party/rust/rayon-0.8.2/src/lib.rs
third_party/rust/rayon-0.8.2/src/option.rs
third_party/rust/rayon-0.8.2/src/prelude.rs
third_party/rust/rayon-0.8.2/src/private.rs
third_party/rust/rayon-0.8.2/src/range.rs
third_party/rust/rayon-0.8.2/src/result.rs
third_party/rust/rayon-0.8.2/src/slice/mergesort.rs
third_party/rust/rayon-0.8.2/src/slice/mod.rs
third_party/rust/rayon-0.8.2/src/slice/quicksort.rs
third_party/rust/rayon-0.8.2/src/slice/test.rs
third_party/rust/rayon-0.8.2/src/split_producer.rs
third_party/rust/rayon-0.8.2/src/str.rs
third_party/rust/rayon-0.8.2/src/test.rs
third_party/rust/rayon-0.8.2/src/vec.rs
third_party/rust/rayon-0.8.2/tests/compile-fail-unstable/README.md
third_party/rust/rayon-0.8.2/tests/compile-fail-unstable/future_escape.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/README.md
third_party/rust/rayon-0.8.2/tests/compile-fail/cannot_collect_filtermap_data.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/cannot_zip_filtered_data.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/cell_par_iter.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/must_use.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/no_send_par_iter.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/quicksort_race1.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/quicksort_race2.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/quicksort_race3.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/rc_par_iter.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/rc_return.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/rc_upvar.rs
third_party/rust/rayon-0.8.2/tests/compile-fail/scope_join_bad.rs
third_party/rust/rayon-0.8.2/tests/run-fail-unstable/README.md
third_party/rust/rayon-0.8.2/tests/run-fail/README.md
third_party/rust/rayon-0.8.2/tests/run-fail/iter_panic.rs
third_party/rust/rayon-0.8.2/tests/run-fail/simple_panic.rs
third_party/rust/rayon-0.8.2/tests/run-pass-unstable/README.md
third_party/rust/rayon-0.8.2/tests/run-pass/README.md
third_party/rust/rayon-0.8.2/tests/run-pass/double_init_fail.rs
third_party/rust/rayon-0.8.2/tests/run-pass/init_zero_threads.rs
third_party/rust/rayon-0.8.2/tests/run-pass/named-threads.rs
third_party/rust/rayon-0.8.2/tests/run-pass/scope_join.rs
third_party/rust/rayon-0.8.2/tests/run-pass/sort-panic-safe.rs
third_party/rust/rayon-0.8.2/tests/run-pass/stack_overflow_crash.rs
third_party/rust/rayon-core/.cargo-checksum.json
third_party/rust/rayon-core/Cargo.toml
third_party/rust/rayon-core/LICENSE-APACHE
third_party/rust/rayon-core/LICENSE-MIT
third_party/rust/rayon-core/README.md
third_party/rust/rayon-core/src/future/README.md
third_party/rust/rayon-core/src/future/mod.rs
third_party/rust/rayon-core/src/future/test.rs
third_party/rust/rayon-core/src/internal/mod.rs
third_party/rust/rayon-core/src/internal/task.rs
third_party/rust/rayon-core/src/internal/worker.rs
third_party/rust/rayon-core/src/job.rs
third_party/rust/rayon-core/src/join/mod.rs
third_party/rust/rayon-core/src/join/test.rs
third_party/rust/rayon-core/src/latch.rs
third_party/rust/rayon-core/src/lib.rs
third_party/rust/rayon-core/src/log.rs
third_party/rust/rayon-core/src/registry.rs
third_party/rust/rayon-core/src/scope/internal.rs
third_party/rust/rayon-core/src/scope/mod.rs
third_party/rust/rayon-core/src/scope/test.rs
third_party/rust/rayon-core/src/spawn/mod.rs
third_party/rust/rayon-core/src/spawn/test.rs
third_party/rust/rayon-core/src/test.rs
third_party/rust/rayon-core/src/thread_pool/internal.rs
third_party/rust/rayon-core/src/thread_pool/mod.rs
third_party/rust/rayon-core/src/thread_pool/test.rs
third_party/rust/rayon-core/src/unwind.rs
third_party/rust/rayon/.cargo-checksum.json
third_party/rust/rayon/.travis.yml
third_party/rust/rayon/Cargo.toml
third_party/rust/rayon/FAQ.md
third_party/rust/rayon/README.md
third_party/rust/rayon/RELEASES.md
third_party/rust/rayon/appveyor.yml
third_party/rust/rayon/bors.toml
third_party/rust/rayon/examples/cpu_monitor.rs
third_party/rust/rayon/src/collections/binary_heap.rs
third_party/rust/rayon/src/collections/btree_map.rs
third_party/rust/rayon/src/collections/btree_set.rs
third_party/rust/rayon/src/collections/hash_map.rs
third_party/rust/rayon/src/collections/hash_set.rs
third_party/rust/rayon/src/collections/linked_list.rs
third_party/rust/rayon/src/collections/mod.rs
third_party/rust/rayon/src/collections/vec_deque.rs
third_party/rust/rayon/src/delegate.rs
third_party/rust/rayon/src/iter/README.md
third_party/rust/rayon/src/iter/chain.rs
third_party/rust/rayon/src/iter/chunks.rs
third_party/rust/rayon/src/iter/cloned.rs
third_party/rust/rayon/src/iter/collect/consumer.rs
third_party/rust/rayon/src/iter/collect/mod.rs
third_party/rust/rayon/src/iter/collect/test.rs
third_party/rust/rayon/src/iter/empty.rs
third_party/rust/rayon/src/iter/enumerate.rs
third_party/rust/rayon/src/iter/extend.rs
third_party/rust/rayon/src/iter/filter.rs
third_party/rust/rayon/src/iter/filter_map.rs
third_party/rust/rayon/src/iter/find.rs
third_party/rust/rayon/src/iter/find_first_last/mod.rs
third_party/rust/rayon/src/iter/find_first_last/test.rs
third_party/rust/rayon/src/iter/flat_map.rs
third_party/rust/rayon/src/iter/flatten.rs
third_party/rust/rayon/src/iter/fold.rs
third_party/rust/rayon/src/iter/for_each.rs
third_party/rust/rayon/src/iter/from_par_iter.rs
third_party/rust/rayon/src/iter/inspect.rs
third_party/rust/rayon/src/iter/interleave.rs
third_party/rust/rayon/src/iter/interleave_shortest.rs
third_party/rust/rayon/src/iter/internal.rs
third_party/rust/rayon/src/iter/intersperse.rs
third_party/rust/rayon/src/iter/len.rs
third_party/rust/rayon/src/iter/map.rs
third_party/rust/rayon/src/iter/map_with.rs
third_party/rust/rayon/src/iter/mod.rs
third_party/rust/rayon/src/iter/noop.rs
third_party/rust/rayon/src/iter/once.rs
third_party/rust/rayon/src/iter/plumbing/README.md
third_party/rust/rayon/src/iter/plumbing/mod.rs
third_party/rust/rayon/src/iter/product.rs
third_party/rust/rayon/src/iter/reduce.rs
third_party/rust/rayon/src/iter/repeat.rs
third_party/rust/rayon/src/iter/rev.rs
third_party/rust/rayon/src/iter/skip.rs
third_party/rust/rayon/src/iter/splitter.rs
third_party/rust/rayon/src/iter/sum.rs
third_party/rust/rayon/src/iter/take.rs
third_party/rust/rayon/src/iter/test.rs
third_party/rust/rayon/src/iter/unzip.rs
third_party/rust/rayon/src/iter/update.rs
third_party/rust/rayon/src/iter/while_some.rs
third_party/rust/rayon/src/iter/zip.rs
third_party/rust/rayon/src/iter/zip_eq.rs
third_party/rust/rayon/src/lib.rs
third_party/rust/rayon/src/math.rs
third_party/rust/rayon/src/option.rs
third_party/rust/rayon/src/par_either.rs
third_party/rust/rayon/src/private.rs
third_party/rust/rayon/src/range.rs
third_party/rust/rayon/src/result.rs
third_party/rust/rayon/src/slice/mod.rs
third_party/rust/rayon/src/slice/test.rs
third_party/rust/rayon/src/split_producer.rs
third_party/rust/rayon/src/str.rs
third_party/rust/rayon/src/test.rs
third_party/rust/rayon/src/vec.rs
third_party/rust/rayon/tests/clones.rs
third_party/rust/rayon/tests/compile-fail-unstable/future_escape.rs
third_party/rust/rayon/tests/compile-fail/cannot_collect_filtermap_data.rs
third_party/rust/rayon/tests/compile-fail/must_use.rs
third_party/rust/rayon/tests/compile-fail/no_send_par_iter.rs
third_party/rust/rayon/tests/compile-fail/quicksort_race1.rs
third_party/rust/rayon/tests/compile-fail/quicksort_race2.rs
third_party/rust/rayon/tests/compile-fail/quicksort_race3.rs
third_party/rust/rayon/tests/compile-fail/rc_par_iter.rs
third_party/rust/rayon/tests/debug.rs
third_party/rust/rayon/tests/intersperse.rs
third_party/rust/rayon/tests/producer_split_at.rs
third_party/rust/rayon/tests/run-pass/double_init_fail.rs
third_party/rust/rayon/tests/run-pass/init_zero_threads.rs
third_party/rust/rayon/tests/run-pass/named-threads.rs
third_party/rust/rayon/tests/run-pass/sort-panic-safe.rs
third_party/rust/rayon/tests/run-pass/stack_overflow_crash.rs
third_party/rust/rayon/tests/sort-panic-safe.rs
toolkit/library/gtest/rust/Cargo.lock
toolkit/library/rust/Cargo.lock
deleted file mode 100644
--- a/third_party/rust/coco/.cargo-checksum.json
+++ /dev/null
@@ -1,1 +0,0 @@
-{"files":{".travis.yml":"b4ea42f2ade2f287c4b0b6eee0e34437ec7cad7462832c18c397372b2a18aef1","CHANGELOG.md":"255242d56d5ce66921e03665a7b4b87be94c4b2ca7c4333f6569abe45321f992","Cargo.toml":"3aeb19f8e670699b19d0627c2466e8a859a02d3b8697c2054ac1ce8f82876c3e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"7c3ce82aaba8e7bb81a62e1c99eb4c62c0116cd0832e343be5a52ec5e20942cb","benches/bench.rs":"ab1b7a1db73425735405fc214606c9ec783b350001f1be376ebf43cd4a540b67","ci/script.sh":"878f8b0a1d77d51834c152b299e6ef7b9c7d24a7ca2fbefe5070e9d2a72532c9","src/deque.rs":"5eaa6bec7c61435abebb35d52e9e02a6bb164c92d6c078f634e2b941f03e033d","src/epoch/atomic.rs":"1b7ed6f5abc0860a71a2d07f9099a4c0c7f274f7fe2a09733b64bf9f1a72fcd1","src/epoch/garbage.rs":"b1b35659796008001a8cb4a9edad7c101091f5ba45515cc5d64ef1ec862d36af","src/epoch/mod.rs":"0c83566f179b125ce37d40d5ba1c8731b3baa29fc0c46f966eeb44d1cb41502c","src/epoch/thread.rs":"cb8d17c75763004f4d3b227a7b710b1c8cbf3c5adc87d8346db57b2f8af59b27","src/lib.rs":"4b01d1e4bea889496b8c22713caaf34c65339aa8582e8b903fd3e0395c830a4a","src/stack.rs":"c1186eadfce0b83c3df2211cf15e0d2426b3a8fc3cd7726eca4e73851a502b60"},"package":"c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"}
\ No newline at end of file
deleted file mode 100644
--- a/third_party/rust/coco/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-language: rust
-
-rust:
-  - stable
-  - beta
-  - nightly
-
-env:
-    global:
-      - RUST_MIN_STACK=33554432
-
-matrix:
-
-addons:
-  apt:
-    sources:
-      - ubuntu-toolchain-r-test
-      - llvm-toolchain-precise
-      - llvm-toolchain-precise-3.8
-    packages:
-      - llvm-3.8
-      - llvm-3.8-dev
-      - clang-3.8
-      - clang-3.8-dev
-
-script:
-  - ./ci/script.sh
deleted file mode 100644
--- a/third_party/rust/coco/CHANGELOG.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Release 0.1.0
-
-* Implemented a lock-free stack.
-* Implemented a lock-free work-stealing deque.
deleted file mode 100644
--- a/third_party/rust/coco/Cargo.toml
+++ /dev/null
@@ -1,32 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g. crates.io) dependencies
-#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
-
-[package]
-name = "coco"
-version = "0.1.1"
-authors = ["Stjepan Glavina <stjepang@gmail.com>"]
-description = "Concurrent collections"
-documentation = "https://docs.rs/coco"
-readme = "README.md"
-license = "Apache-2.0/MIT"
-repository = "https://github.com/stjepang/coco"
-[dependencies.scopeguard]
-version = "0.3"
-
-[dependencies.either]
-version = "1.0"
-[dev-dependencies.rand]
-version = "0.3"
-
-[features]
-internals = []
-strict_gc = []
deleted file mode 100644
--- a/third_party/rust/coco/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Concurrent collections
-
-[![Build Status](https://travis-ci.org/stjepang/coco.svg?branch=master)](https://travis-ci.org/stjepang/coco)
-[![License](https://img.shields.io/badge/license-Apache--2.0%2FMIT-blue.svg)](https://github.com/stjepang/coco)
-[![Cargo](https://img.shields.io/crates/v/coco.svg)](https://crates.io/crates/coco)
-[![Documentation](https://docs.rs/coco/badge.svg)](https://docs.rs/coco)
-
-This crate offers several collections that are designed for performance in multithreaded
-contexts. They can be freely shared among multiple threads running in parallel, and concurrently
-modified without the overhead of locking.
-
-<!-- Some of these data structures are lock-free. Others are not strictly speaking lock-free, but -->
-<!-- still scale well with respect to the number of threads accessing them. -->
-
-The following collections are available:
-
-* `Stack`: A lock-free stack.
-* `deque`: A lock-free work-stealing deque.
deleted file mode 100755
--- a/third_party/rust/coco/benches/bench.rs
+++ /dev/null
@@ -1,12 +0,0 @@
-#![feature(test)]
-
-extern crate coco;
-extern crate test;
-
-use coco::epoch;
-use test::Bencher;
-
-#[bench]
-fn pin_empty(b: &mut Bencher) {
-    b.iter(|| epoch::pin(|_| ()))
-}
deleted file mode 100755
--- a/third_party/rust/coco/ci/script.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-cargo test
-cargo test --features strict_gc
-
-if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
-    cd sanitize
-
-    cargo test
-    cargo test --features coco/strict_gc
-
-    for _ in {1..10}; do
-        cargo test --release
-    done
-    for _ in {1..10}; do
-        cargo test --release --features coco/strict_gc
-    done
-fi
deleted file mode 100644
--- a/third_party/rust/coco/src/deque.rs
+++ /dev/null
@@ -1,813 +0,0 @@
-//! A lock-free work-stealing deque.
-//!
-//! There is one worker and possibly multiple stealers per deque. The worker has exclusive access
-//! to one side of the deque and may push and pop elements. Stealers can only steal (i.e. pop)
-//! elements from the other side.
-//!
-//! The implementation is based on the following papers:
-//!
-//! 1. Dynamic Circular Work-Stealing Deque
-//!    <sup>[pdf][chase-lev]</sup>
-//! 2. Correct and Efficient Work-Stealing for Weak Memory Models
-//!    <sup>[pdf][weak-mem]</sup>
-//! 3. CDSChecker: Checking Concurrent Data Structures Written with C/C++ Atomics
-//!    <sup>[pdf][checker] [code][code]</sup>
-//!
-//! [chase-lev]: https://pdfs.semanticscholar.org/3771/77bb82105c35e6e26ebad1698a20688473bd.pdf
-//! [weak-mem]: http://www.di.ens.fr/~zappa/readings/ppopp13.pdf
-//! [checker]: http://plrg.eecs.uci.edu/publications/c11modelcheck.pdf
-//! [code]: https://github.com/computersforpeace/model-checker-benchmarks/tree/master/chase-lev-deque-bugfix
-//!
-//! # Examples
-//!
-//! ```
-//! use coco::deque;
-//!
-//! let (w, s) = deque::new();
-//!
-//! // Create some work.
-//! for i in 0..1000 {
-//!     w.push(i);
-//! }
-//!
-//! let threads = (0..4).map(|_| {
-//!     let s = s.clone();
-//!     std::thread::spawn(move || {
-//!         while let Some(x) = s.steal() {
-//!             // Do something with `x`...
-//!         }
-//!     })
-//! }).collect::<Vec<_>>();
-//!
-//! while let Some(x) = w.pop() {
-//!     // Do something with `x`...
-//!     // Or create even more work...
-//!     if x > 1 {
-//!         w.push(x / 2);
-//!         w.push(x / 2);
-//!     }
-//! }
-//!
-//! for t in threads {
-//!     t.join().unwrap();
-//! }
-//! ```
-
-use std::cmp;
-use std::fmt;
-use std::marker::PhantomData;
-use std::mem;
-use std::ptr;
-use std::sync::Arc;
-use std::sync::atomic::{AtomicIsize, fence};
-use std::sync::atomic::Ordering::{Acquire, Release, Relaxed, SeqCst};
-
-use epoch::{self, Atomic};
-
-/// Minimum buffer capacity for a deque.
-const MIN_CAP: usize = 16;
-
-/// A buffer where deque elements are stored.
-struct Buffer<T> {
-    /// Pointer to the allocated memory.
-    ptr: *mut T,
-    /// Capacity of the buffer. Always a power of two.
-    cap: usize,
-}
-
-impl<T> Buffer<T> {
-    /// Returns a new buffe with the specified capacity.
-    fn new(cap: usize) -> Self {
-        let mut v = Vec::with_capacity(cap);
-        let ptr = v.as_mut_ptr();
-        mem::forget(v);
-        Buffer {
-            ptr: ptr,
-            cap: cap,
-        }
-    }
-
-    /// Returns a pointer to the element at the specified `index`.
-    unsafe fn at(&self, index: isize) -> *mut T {
-        // `self.len` is always a power of two.
-        self.ptr.offset(index & (self.cap - 1) as isize)
-    }
-
-    /// Writes `value` into the specified `index`.
-    unsafe fn write(&self, index: isize, value: T) {
-        ptr::write(self.at(index), value)
-    }
-
-    /// Reads the value from the specified `index`.
-    unsafe fn read(&self, index: isize) -> T {
-        ptr::read(self.at(index))
-    }
-}
-
-struct Deque<T> {
-    bottom: AtomicIsize,
-    top: AtomicIsize,
-    buffer: Atomic<Buffer<T>>,
-}
-
-/// A work-stealing deque.
-impl<T> Deque<T> {
-    /// Returns a new, empty deque.
-    fn new() -> Self {
-        Deque {
-            bottom: AtomicIsize::new(0),
-            top: AtomicIsize::new(0),
-            buffer: Atomic::new(Buffer::new(MIN_CAP), 0),
-        }
-    }
-
-    /// Returns the number of elements in the deque.
-    ///
-    /// If used concurrently with other operations, the returned number is just an estimate.
-    fn len(&self) -> usize {
-        let b = self.bottom.load(Relaxed);
-        let t = self.top.load(Relaxed);
-        // The length can be negative because `b` and `t` were loaded without synchronization.
-        cmp::max(b.wrapping_sub(t), 0) as usize
-    }
-
-    /// Resizes the buffer with new capacity of `new_cap`.
-    #[cold]
-    unsafe fn resize(&self, new_cap: usize) {
-        // Load the bottom, top, and buffer.
-        let b = self.bottom.load(Relaxed);
-        let t = self.top.load(Relaxed);
-        let buffer = self.buffer.load_raw(Relaxed).0;
-
-        // Allocate a new buffer.
-        let new = Buffer::new(new_cap);
-
-        // Copy data from the old buffer to the new one.
-        let mut i = t;
-        while i != b {
-            ptr::copy_nonoverlapping((*buffer).at(i), new.at(i), 1);
-            i = i.wrapping_add(1);
-        }
-
-        epoch::pin(|pin| {
-            // Replace the old buffer with the new one.
-            self.buffer.store_box(Box::new(new), 0, pin).as_raw();
-
-            let ptr = (*buffer).ptr;
-            let cap = (*buffer).cap;
-
-            // Destroy the old buffer later.
-            epoch::defer_free(ptr, cap, pin);
-            epoch::defer_free(buffer, 1, pin);
-
-            // If the size of the buffer at least than 1KB, then flush the thread-local garbage in
-            // order to destroy it sooner.
-            if mem::size_of::<T>() * cap >= 1 << 10 {
-                epoch::flush(pin);
-            }
-        })
-    }
-
-    /// Pushes an element onto the bottom of the deque.
-    fn push(&self, value: T) {
-        unsafe {
-            // Load the bottom, top, and buffer. The buffer doesn't have to be epoch-protected
-            // because the current thread (the worker) is the only one that grows and shrinks it.
-            let b = self.bottom.load(Relaxed);
-            let t = self.top.load(Acquire);
-            let mut buffer = self.buffer.load_raw(Relaxed).0;
-
-            // Calculate the length of the deque.
-            let len = b.wrapping_sub(t);
-
-            // Is the deque full?
-            let cap = (*buffer).cap;
-            if len >= cap as isize {
-                // Yes. Grow the underlying buffer.
-                self.resize(2 * cap);
-                buffer = self.buffer.load_raw(Relaxed).0;
-            }
-
-            // Write `value` into the right slot and increment `b`.
-            (*buffer).write(b, value);
-            fence(Release);
-            self.bottom.store(b.wrapping_add(1), Relaxed);
-        }
-    }
-
-    /// Pops an element from the bottom of the deque.
-    fn pop(&self) -> Option<T> {
-        // Load the bottom.
-        let b = self.bottom.load(Relaxed);
-
-        // If the deque is empty, return early without incurring the cost of a SeqCst fence.
-        let t = self.top.load(Relaxed);
-        if b.wrapping_sub(t) <= 0 {
-            return None;
-        }
-
-        // Decrement the bottom.
-        let b = b.wrapping_sub(1);
-        self.bottom.store(b, Relaxed);
-
-        // Load the buffer. The buffer doesn't have to be epoch-protected because the current
-        // thread (the worker) is the only one that grows and shrinks it.
-        let buffer = self.buffer.load_raw(Relaxed).0;
-
-        fence(SeqCst);
-
-        // Load the top.
-        let t = self.top.load(Relaxed);
-
-        // Compute the length after the bottom was decremented.
-        let len = b.wrapping_sub(t);
-
-        if len < 0 {
-            // The deque is empty. Restore the bottom back to the original value.
-            self.bottom.store(b.wrapping_add(1), Relaxed);
-            None
-        } else {
-            // Read the value to be popped.
-            let mut value = unsafe { Some((*buffer).read(b)) };
-
-            // Are we popping the last element from the deque?
-            if len == 0 {
-                // Try incrementing the top.
-                if self.top.compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed).is_err() {
-                    // Failed. We didn't pop anything.
-                    mem::forget(value.take());
-                }
-
-                // Restore the bottom back to the original value.
-                self.bottom.store(b.wrapping_add(1), Relaxed);
-            } else {
-                // Shrink the buffer if `len` is less than one fourth of `cap`.
-                unsafe {
-                    let cap = (*buffer).cap;
-                    if cap > MIN_CAP && len < cap as isize / 4 {
-                        self.resize(cap / 2);
-                    }
-                }
-            }
-
-            value
-        }
-    }
-
-    /// Steals an element from the top of the deque.
-    fn steal(&self) -> Option<T> {
-        // Load the top.
-        let mut t = self.top.load(Acquire);
-
-        // A SeqCst fence is needed here.
-        // If the current thread is already pinned (reentrantly), we must manually issue the fence.
-        // Otherwise, the following pinning will issue the fence anyway, so we don't have to.
-        if epoch::is_pinned() {
-            fence(SeqCst);
-        }
-
-        epoch::pin(|pin| {
-            // Loop until we successfully steal an element or find the deque empty.
-            loop {
-                // Load the bottom.
-                let b = self.bottom.load(Acquire);
-
-                // Is the deque empty?
-                if b.wrapping_sub(t) <= 0 {
-                    return None;
-                }
-
-                // Load the buffer and read the value at the top.
-                let a = self.buffer.load(pin).unwrap();
-                let value = unsafe { a.read(t) };
-
-                // Try incrementing the top to steal the value.
-                if self.top.compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed).is_ok() {
-                    return Some(value);
-                }
-
-                // We didn't steal this value, forget it.
-                mem::forget(value);
-
-                // Before every iteration of the loop we must load the top, issue a SeqCst fence,
-                // and then load the bottom. Now reload the top and issue the fence.
-                t = self.top.load(Acquire);
-                fence(SeqCst);
-            }
-        })
-    }
-
-    /// Steals an element from the top of the deque, but only the worker may call this method.
-    fn steal_as_worker(&self) -> Option<T> {
-        let b = self.bottom.load(Relaxed);
-        let a = self.buffer.load_raw(Relaxed).0;
-
-        // Loop until we successfully steal an element or find the deque empty.
-        loop {
-            let t = self.top.load(Relaxed);
-
-            // Is the deque empty?
-            if b.wrapping_sub(t) <= 0 {
-                return None;
-            }
-
-            // Try incrementing the top to steal the value.
-            if self.top.compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed).is_ok() {
-                return unsafe { Some((*a).read(t)) };
-            }
-        }
-    }
-}
-
-impl<T> Drop for Deque<T> {
-    fn drop(&mut self) {
-        // Load the bottom, top, and buffer.
-        let b = self.bottom.load(Relaxed);
-        let t = self.top.load(Relaxed);
-        let buffer = self.buffer.load_raw(Relaxed).0;
-
-        unsafe {
-            // Go through the buffer from top to bottom and drop all elements in the deque.
-            let mut i = t;
-            while i != b {
-                ptr::drop_in_place((*buffer).at(i));
-                i = i.wrapping_add(1);
-            }
-
-            // Free the memory allocated by the buffer.
-            drop(Vec::from_raw_parts((*buffer).ptr, 0, (*buffer).cap));
-            drop(Vec::from_raw_parts(buffer, 0, 1));
-        }
-    }
-}
-
-/// Worker side of a work-stealing deque.
-///
-/// There is only one worker per deque.
-pub struct Worker<T> {
-    deque: Arc<Deque<T>>,
-    _marker: PhantomData<*mut ()>, // !Send + !Sync
-}
-
-unsafe impl<T: Send> Send for Worker<T> {}
-
-impl<T> Worker<T> {
-    /// Returns the number of elements in the deque.
-    ///
-    /// If used concurrently with other operations, the returned number is just an estimate.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::deque;
-    ///
-    /// let (w, _) = deque::new();
-    /// for i in 0..30 {
-    ///     w.push(i);
-    /// }
-    /// assert_eq!(w.len(), 30);
-    /// ```
-    pub fn len(&self) -> usize {
-        self.deque.len()
-    }
-
-    /// Pushes an element onto the bottom of the deque.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::deque;
-    ///
-    /// let (w, _) = deque::new();
-    /// w.push(1);
-    /// w.push(2);
-    /// ```
-    pub fn push(&self, value: T) {
-        self.deque.push(value);
-    }
-
-    /// Pops an element from the bottom of the deque.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::deque;
-    ///
-    /// let (w, _) = deque::new();
-    /// w.push(1);
-    /// w.push(2);
-    ///
-    /// assert_eq!(w.pop(), Some(2));
-    /// assert_eq!(w.pop(), Some(1));
-    /// assert_eq!(w.pop(), None);
-    /// ```
-    pub fn pop(&self) -> Option<T> {
-        self.deque.pop()
-    }
-
-    /// Steals an element from the top of the deque.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::deque;
-    ///
-    /// let (w, _) = deque::new();
-    /// w.push(1);
-    /// w.push(2);
-    ///
-    /// assert_eq!(w.steal(), Some(1));
-    /// assert_eq!(w.steal(), Some(2));
-    /// assert_eq!(w.steal(), None);
-    /// ```
-    pub fn steal(&self) -> Option<T> {
-        self.deque.steal_as_worker()
-    }
-}
-
-impl<T> fmt::Debug for Worker<T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "Worker {{ ... }}")
-    }
-}
-
-/// Stealer side of a work-stealing deque.
-///
-/// Stealers may be cloned in order to create more stealers for the same deque.
-pub struct Stealer<T> {
-    deque: Arc<Deque<T>>,
-    _marker: PhantomData<*mut ()>, // !Send + !Sync
-}
-
-unsafe impl<T: Send> Send for Stealer<T> {}
-unsafe impl<T: Send> Sync for Stealer<T> {}
-
-impl<T> Stealer<T> {
-    /// Returns the number of elements in the deque.
-    ///
-    /// If used concurrently with other operations, the returned number is just an estimate.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::deque;
-    ///
-    /// let (w, _) = deque::new();
-    /// for i in 0..30 {
-    ///     w.push(i);
-    /// }
-    /// assert_eq!(w.len(), 30);
-    /// ```
-    pub fn len(&self) -> usize {
-        self.deque.len()
-    }
-
-    /// Steals an element from the top of the deque.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::deque;
-    ///
-    /// let (w, s) = deque::new();
-    /// w.push(1);
-    /// w.push(2);
-    ///
-    /// assert_eq!(s.steal(), Some(1));
-    /// assert_eq!(s.steal(), Some(2));
-    /// assert_eq!(s.steal(), None);
-    /// ```
-    pub fn steal(&self) -> Option<T> {
-        self.deque.steal()
-    }
-}
-
-impl<T> Clone for Stealer<T> {
-    fn clone(&self) -> Self {
-        Stealer {
-            deque: self.deque.clone(),
-            _marker: PhantomData,
-        }
-    }
-}
-
-impl<T> fmt::Debug for Stealer<T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "Stealer {{ ... }}")
-    }
-}
-
-/// Returns a new work-stealing deque.
-///
-/// The worker is unique, while stealers can be cloned and distributed among multiple threads.
-///
-/// The deque will be destructed as soon as it's worker and all it's stealers get dropped.
-///
-/// # Examples
-///
-/// ```
-/// use coco::deque;
-///
-/// let (w, s1) = deque::new();
-/// let s2 = s1.clone();
-///
-/// w.push('a');
-/// w.push('b');
-/// w.push('c');
-///
-/// assert_eq!(w.pop(), Some('c'));
-/// assert_eq!(s1.steal(), Some('a'));
-/// assert_eq!(s2.steal(), Some('b'));
-/// ```
-pub fn new<T>() -> (Worker<T>, Stealer<T>) {
-    let d = Arc::new(Deque::new());
-    let worker = Worker {
-        deque: d.clone(),
-        _marker: PhantomData,
-    };
-    let stealer = Stealer {
-        deque: d,
-        _marker: PhantomData,
-    };
-    (worker, stealer)
-}
-
-#[cfg(test)]
-mod tests {
-    extern crate rand;
-
-    use std::sync::{Arc, Mutex};
-    use std::sync::atomic::{AtomicBool, AtomicUsize};
-    use std::sync::atomic::Ordering::SeqCst;
-    use std::thread;
-
-    use epoch;
-    use self::rand::Rng;
-
-    #[test]
-    fn smoke() {
-        let (w, s) = super::new();
-        assert_eq!(w.pop(), None);
-        assert_eq!(s.steal(), None);
-        assert_eq!(w.len(), 0);
-        assert_eq!(s.len(), 0);
-
-        w.push(1);
-        assert_eq!(w.len(), 1);
-        assert_eq!(s.len(), 1);
-        assert_eq!(w.pop(), Some(1));
-        assert_eq!(w.pop(), None);
-        assert_eq!(s.steal(), None);
-        assert_eq!(w.len(), 0);
-        assert_eq!(s.len(), 0);
-
-        w.push(2);
-        assert_eq!(s.steal(), Some(2));
-        assert_eq!(s.steal(), None);
-        assert_eq!(w.pop(), None);
-
-        w.push(3);
-        w.push(4);
-        w.push(5);
-        assert_eq!(w.steal(), Some(3));
-        assert_eq!(s.steal(), Some(4));
-        assert_eq!(w.steal(), Some(5));
-        assert_eq!(w.steal(), None);
-    }
-
-    #[test]
-    fn steal_push() {
-        const STEPS: usize = 50_000;
-
-        let (w, s) = super::new();
-        let t = thread::spawn(move || {
-            for i in 0..STEPS {
-                loop {
-                    if let Some(v) = s.steal() {
-                        assert_eq!(i, v);
-                        break;
-                    }
-                }
-            }
-        });
-
-        for i in 0..STEPS {
-            w.push(i);
-        }
-        t.join().unwrap();
-    }
-
-    #[test]
-    fn stampede() {
-        const COUNT: usize = 50_000;
-
-        let (w, s) = super::new();
-
-        for i in 0..COUNT {
-            w.push(Box::new(i + 1));
-        }
-        let remaining = Arc::new(AtomicUsize::new(COUNT));
-
-        let threads = (0..8).map(|_| {
-            let s = s.clone();
-            let remaining = remaining.clone();
-
-            thread::spawn(move || {
-                let mut last = 0;
-                while remaining.load(SeqCst) > 0 {
-                    if let Some(x) = s.steal() {
-                        assert!(last < *x);
-                        last = *x;
-                        remaining.fetch_sub(1, SeqCst);
-                    }
-                }
-            })
-        }).collect::<Vec<_>>();
-
-        let mut last = COUNT + 1;
-        while remaining.load(SeqCst) > 0 {
-            if let Some(x) = w.pop() {
-                assert!(last > *x);
-                last = *x;
-                remaining.fetch_sub(1, SeqCst);
-            }
-        }
-
-        for t in threads {
-            t.join().unwrap();
-        }
-    }
-
-    fn run_stress() {
-        const COUNT: usize = 50_000;
-
-        let (w, s) = super::new();
-        let done = Arc::new(AtomicBool::new(false));
-        let hits = Arc::new(AtomicUsize::new(0));
-
-        let threads = (0..8).map(|_| {
-            let s = s.clone();
-            let done = done.clone();
-            let hits = hits.clone();
-
-            thread::spawn(move || {
-                while !done.load(SeqCst) {
-                    if let Some(_) = s.steal() {
-                        hits.fetch_add(1, SeqCst);
-                    }
-                }
-            })
-        }).collect::<Vec<_>>();
-
-        let mut rng = rand::thread_rng();
-        let mut expected = 0;
-        while expected < COUNT {
-            if rng.gen_range(0, 3) == 0 {
-                if w.pop().is_some() {
-                    hits.fetch_add(1, SeqCst);
-                }
-            } else {
-                w.push(expected);
-                expected += 1;
-            }
-        }
-
-        while hits.load(SeqCst) < COUNT {
-            if w.pop().is_some() {
-                hits.fetch_add(1, SeqCst);
-            }
-        }
-        done.store(true, SeqCst);
-
-        for t in threads {
-            t.join().unwrap();
-        }
-    }
-
-    #[test]
-    fn stress() {
-        run_stress();
-    }
-
-    #[test]
-    fn stress_pinned() {
-        epoch::pin(|_| run_stress());
-    }
-
-    #[test]
-    fn no_starvation() {
-        const COUNT: usize = 50_000;
-
-        let (w, s) = super::new();
-        let done = Arc::new(AtomicBool::new(false));
-
-        let (threads, hits): (Vec<_>, Vec<_>) = (0..8).map(|_| {
-            let s = s.clone();
-            let done = done.clone();
-            let hits = Arc::new(AtomicUsize::new(0));
-
-            let t = {
-                let hits = hits.clone();
-                thread::spawn(move || {
-                    while !done.load(SeqCst) {
-                        if let Some(_) = s.steal() {
-                            hits.fetch_add(1, SeqCst);
-                        }
-                    }
-                })
-            };
-
-            (t, hits)
-        }).unzip();
-
-        let mut rng = rand::thread_rng();
-        let mut my_hits = 0;
-        loop {
-            for i in 0..rng.gen_range(0, COUNT) {
-                if rng.gen_range(0, 3) == 0 && my_hits == 0 {
-                    if w.pop().is_some() {
-                        my_hits += 1;
-                    }
-                } else {
-                    w.push(i);
-                }
-            }
-
-            if my_hits > 0 && hits.iter().all(|h| h.load(SeqCst) > 0) {
-                break;
-            }
-        }
-        done.store(true, SeqCst);
-
-        for t in threads {
-            t.join().unwrap();
-        }
-    }
-
-    #[test]
-    fn destructors() {
-        const COUNT: usize = 50_000;
-
-        struct Elem(usize, Arc<Mutex<Vec<usize>>>);
-
-        impl Drop for Elem {
-            fn drop(&mut self) {
-                self.1.lock().unwrap().push(self.0);
-            }
-        }
-
-        let (w, s) = super::new();
-
-        let dropped = Arc::new(Mutex::new(Vec::new()));
-        let remaining = Arc::new(AtomicUsize::new(COUNT));
-        for i in 0..COUNT {
-            w.push(Elem(i, dropped.clone()));
-        }
-
-        let threads = (0..8).map(|_| {
-            let s = s.clone();
-            let remaining = remaining.clone();
-
-            thread::spawn(move || {
-                for _ in 0..1000 {
-                    if s.steal().is_some() {
-                        remaining.fetch_sub(1, SeqCst);
-                    }
-                }
-            })
-        }).collect::<Vec<_>>();
-
-        for _ in 0..1000 {
-            if w.pop().is_some() {
-                remaining.fetch_sub(1, SeqCst);
-            }
-        }
-
-        for t in threads {
-            t.join().unwrap();
-        }
-
-        let rem = remaining.load(SeqCst);
-        assert!(rem > 0);
-        assert_eq!(w.len(), rem);
-        assert_eq!(s.len(), rem);
-
-        {
-            let mut v = dropped.lock().unwrap();
-            assert_eq!(v.len(), COUNT - rem);
-            v.clear();
-        }
-
-        drop(w);
-        drop(s);
-
-        {
-            let mut v = dropped.lock().unwrap();
-            assert_eq!(v.len(), rem);
-            v.sort();
-            for w in v.windows(2) {
-                assert_eq!(w[0] + 1, w[1]);
-            }
-        }
-    }
-}
deleted file mode 100644
--- a/third_party/rust/coco/src/epoch/atomic.rs
+++ /dev/null
@@ -1,532 +0,0 @@
-use std::mem;
-use std::ptr;
-use std::marker::PhantomData;
-use std::sync::atomic::AtomicUsize;
-use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Release, SeqCst};
-
-use epoch::Pin;
-
-/// Returns a mask containing unused least significant bits of an aligned pointer to `T`.
-fn low_bits<T>() -> usize {
-    (1 << mem::align_of::<T>().trailing_zeros()) - 1
-}
-
-/// Tags the unused least significant bits of `raw` with `tag`.
-///
-/// # Panics
-///
-/// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-/// unaligned.
-fn raw_and_tag<T>(raw: *mut T, tag: usize) -> usize {
-    let mask = low_bits::<T>();
-    assert!(raw as usize & mask == 0, "unaligned pointer");
-    assert!(tag <= mask, "tag too large to fit into the unused bits: {} > {}", tag, mask);
-    raw as usize | tag
-}
-
-/// A tagged atomic nullable pointer.
-///
-/// The tag is stored into the unused least significant bits of the pointer. The pointer must be
-/// properly aligned.
-#[derive(Debug)]
-pub struct Atomic<T> {
-    data: AtomicUsize,
-    _marker: PhantomData<*mut T>, // !Send + !Sync
-}
-
-unsafe impl<T: Send + Sync> Send for Atomic<T> {}
-unsafe impl<T: Send + Sync> Sync for Atomic<T> {}
-
-impl<T> Atomic<T> {
-    /// Constructs a tagged atomic pointer from raw data.
-    unsafe fn from_data(data: usize) -> Self {
-        Atomic {
-            data: AtomicUsize::new(data),
-            _marker: PhantomData,
-        }
-    }
-
-    /// Returns a new, null atomic pointer tagged with `tag`.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of an aligned pointer.
-    pub fn null(tag: usize) -> Self {
-        unsafe { Self::from_raw(ptr::null_mut(), tag) }
-    }
-
-    /// Allocates `data` on the heap and returns a new atomic pointer that points to it and is
-    /// tagged with `tag`.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the allocated
-    /// pointer is unaligned.
-    pub fn new(data: T, tag: usize) -> Self {
-        unsafe { Self::from_raw(Box::into_raw(Box::new(data)), tag) }
-    }
-
-    /// Returns a new atomic pointer initialized with `ptr`.
-    pub fn from_ptr(ptr: Ptr<T>) -> Self {
-        unsafe { Self::from_data(ptr.data) }
-    }
-
-    /// Returns a new atomic pointer initialized with `b` and `tag`.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub fn from_box(b: Box<T>, tag: usize) -> Self {
-        unsafe { Self::from_raw(Box::into_raw(b), tag) }
-    }
-
-    /// Returns a new atomic pointer initialized with `raw` and `tag`.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub unsafe fn from_raw(raw: *mut T, tag: usize) -> Self {
-        Self::from_data(raw_and_tag(raw, tag))
-    }
-
-    /// Loads the tagged atomic pointer.
-    ///
-    /// This operation uses the `Acquire` ordering.
-    pub fn load<'p>(&self, _: &'p Pin) -> Ptr<'p, T> {
-        unsafe { Ptr::from_data(self.data.load(Acquire)) }
-    }
-
-    /// Loads the tagged atomic pointer as a raw pointer and a tag.
-    ///
-    /// Argument `order` describes the memory ordering of this operation.
-    pub fn load_raw(&self, order: Ordering) -> (*mut T, usize) {
-        let p = unsafe { Ptr::<T>::from_data(self.data.load(order)) };
-        (p.as_raw(), p.tag())
-    }
-
-    /// Stores `new` tagged with `tag` into the atomic.
-    ///
-    /// This operation uses the `Release` ordering.
-    pub fn store<'p>(&self, new: Ptr<'p, T>) {
-        self.data.store(new.data, Release);
-    }
-
-    /// Stores `new` tagged with `tag` into the atomic and returns it.
-    ///
-    /// This operation uses the `Release` ordering.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub fn store_box<'p>(&self, new: Box<T>, tag: usize, _: &'p Pin) -> Ptr<'p, T> {
-        let ptr = unsafe { Ptr::from_raw(Box::into_raw(new), tag) };
-        self.data.store(ptr.data, Release);
-        ptr
-    }
-
-    /// Stores `new` tagged with `tag` into the atomic.
-    ///
-    /// Argument `order` describes the memory ordering of this operation.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub unsafe fn store_raw<'p>(
-        &self,
-        new: *mut T,
-        tag: usize,
-        order: Ordering,
-        _: &'p Pin,
-    ) -> Ptr<'p, T> {
-        let ptr = Ptr::from_raw(new, tag);
-        self.data.store(ptr.data, order);
-        ptr
-    }
-
-    /// Stores `new` into the atomic, returning the old tagged pointer.
-    ///
-    /// This operation uses the `AcqRel` ordering.
-    pub fn swap<'p>(&self, new: Ptr<'p, T>) -> Ptr<'p, T> {
-        unsafe { Ptr::from_data(self.data.swap(new.data, AcqRel)) }
-    }
-
-    /// Stores `new` tagged with `tag` into the atomic, returning the old tagged pointer.
-    ///
-    /// This operation uses the `AcqRel` ordering.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub fn swap_box<'p>(&self, new: Box<T>, tag: usize, _: &'p Pin) -> Ptr<'p, T> {
-        let data = unsafe { Ptr::from_raw(Box::into_raw(new), tag).data };
-        unsafe { Ptr::from_data(self.data.swap(data, AcqRel)) }
-    }
-
-    /// Stores `new` tagged with `tag` into the atomic, returning the old tagged pointer.
-    ///
-    /// Argument `order` describes the memory ordering of this operation.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub unsafe fn swap_raw<'p>(&self, new: *mut T, tag: usize, order: Ordering) -> Ptr<'p, T> {
-        let data = Ptr::from_raw(new, tag).data;
-        Ptr::from_data(self.data.swap(data, order))
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On failure the
-    /// current value of the tagged atomic pointer is returned.
-    ///
-    /// This operation uses the `AcqRel` ordering.
-    pub fn cas<'p>(
-        &self,
-        current: Ptr<'p, T>,
-        new: Ptr<'p, T>,
-    ) -> Result<(), Ptr<'p, T>> {
-        let previous = self.data.compare_and_swap(current.data, new.data, AcqRel);
-        if previous == current.data {
-            Ok(())
-        } else {
-            unsafe { Err(Ptr::from_data(previous)) }
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On failure the
-    /// current value of the tagged atomic pointer is returned.
-    ///
-    /// This operation uses the `SeqCst` ordering.
-    pub fn cas_sc<'p>(&self, current: Ptr<'p, T>, new: Ptr<'p, T>) -> Result<(), Ptr<'p, T>> {
-        let previous = self.data.compare_and_swap(current.data, new.data, SeqCst);
-        if previous == current.data {
-            Ok(())
-        } else {
-            unsafe { Err(Ptr::from_data(previous)) }
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On failure the
-    /// current value of the tagged atomic pointer is returned.
-    ///
-    /// This method can sometimes spuriously fail even when comparison succeeds, which can result
-    /// in more efficient code on some platforms.
-    ///
-    /// This operation uses the `AcqRel` ordering.
-    pub fn cas_weak<'p>(&self, current: Ptr<'p, T>, new: Ptr<'p, T>) -> Result<(), Ptr<'p, T>> {
-        match self.data.compare_exchange_weak(current.data, new.data, AcqRel, Acquire) {
-            Ok(_) => Ok(()),
-            Err(previous) => unsafe { Err(Ptr::from_data(previous)) },
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On failure the
-    /// current value of the tagged atomic pointer is returned.
-    ///
-    /// This method can sometimes spuriously fail even when comparison succeeds, which can result
-    /// in more efficient code on some platforms.
-    ///
-    /// This operation uses the `SeqCst` ordering.
-    pub fn cas_weak_sc<'p>(
-        &self,
-        current: Ptr<'p, T>,
-        new: Ptr<'p, T>,
-    ) -> Result<(), Ptr<'p, T>> {
-        match self.data.compare_exchange_weak(current.data, new.data, SeqCst, SeqCst) {
-            Ok(_) => Ok(()),
-            Err(previous) => unsafe { Err(Ptr::from_data(previous)) },
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new` tagged with `tag`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On success the
-    /// new pointer is returned. On failure the current value of the tagged atomic pointer and
-    /// `new` are returned.
-    ///
-    /// This operation uses the `AcqRel` ordering.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub fn cas_box<'p>(
-        &self,
-        current: Ptr<'p, T>,
-        mut new: Box<T>,
-        tag: usize,
-    ) -> Result<Ptr<'p, T>, (Ptr<'p, T>, Box<T>)> {
-        let new_data = raw_and_tag(new.as_mut(), tag);
-        let previous = self.data.compare_and_swap(current.data, new_data, AcqRel);
-        if previous == current.data {
-            mem::forget(new);
-            unsafe { Ok(Ptr::from_data(new_data)) }
-        } else {
-            unsafe { Err((Ptr::from_data(previous), new)) }
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new` tagged with `tag`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On success the
-    /// new pointer is returned. On failure the current value of the tagged atomic pointer and
-    /// `new` are returned.
-    ///
-    /// This operation uses the `SeqCst` ordering.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub fn cas_box_sc<'p>(
-        &self,
-        current: Ptr<'p, T>,
-        mut new: Box<T>,
-        tag: usize,
-    ) -> Result<Ptr<'p, T>, (Ptr<'p, T>, Box<T>)> {
-        let new_data = raw_and_tag(new.as_mut(), tag);
-        let previous = self.data.compare_and_swap(current.data, new_data, SeqCst);
-        if previous == current.data {
-            mem::forget(new);
-            unsafe { Ok(Ptr::from_data(new_data)) }
-        } else {
-            unsafe { Err((Ptr::from_data(previous), new)) }
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new` tagged with `tag`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On success the
-    /// new pointer is returned. On failure the current value of the tagged atomic pointer and
-    /// `new` are returned.
-    ///
-    /// This method can sometimes spuriously fail even when comparison succeeds, which can result
-    /// in more efficient code on some platforms.
-    ///
-    /// This operation uses the `AcqRel` ordering.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub fn cas_box_weak<'p>(
-        &self,
-        current: Ptr<'p, T>,
-        mut new: Box<T>,
-        tag: usize
-    ) -> Result<Ptr<'p, T>, (Ptr<'p, T>, Box<T>)> {
-        let new_data = raw_and_tag(new.as_mut(), tag);
-        match self.data.compare_exchange_weak(current.data, new_data, AcqRel, Acquire) {
-            Ok(_) => {
-                mem::forget(new);
-                unsafe { Ok(Ptr::from_data(new_data)) }
-            }
-            Err(previous) => unsafe { Err((Ptr::from_data(previous), new)) },
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new` tagged with `tag`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On success the
-    /// new pointer is returned. On failure the current value of the tagged atomic pointer and
-    /// `new` are returned.
-    ///
-    /// This method can sometimes spuriously fail even when comparison succeeds, which can result
-    /// in more efficient code on some platforms.
-    ///
-    /// This operation uses the `AcqRel` ordering.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub fn cas_box_weak_sc<'p>(
-        &self,
-        current: Ptr<'p, T>,
-        mut new: Box<T>,
-        tag: usize,
-    ) -> Result<Ptr<'p, T>, (Ptr<'p, T>, Box<T>)> {
-        let new_data = raw_and_tag(new.as_mut(), tag);
-        match self.data.compare_exchange_weak(current.data, new_data, SeqCst, SeqCst) {
-            Ok(_) => {
-                mem::forget(new);
-                unsafe { Ok(Ptr::from_data(new_data)) }
-            }
-            Err(previous) => unsafe { Err((Ptr::from_data(previous), new)) },
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On failure the
-    /// current value of the tagged atomic pointer is returned.
-    ///
-    /// Argument `order` describes the memory ordering of this operation.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub unsafe fn cas_raw(
-        &self,
-        current: (*mut T, usize),
-        new: (*mut T, usize),
-        order: Ordering,
-    ) -> Result<(), (*mut T, usize)> {
-        let current_data = raw_and_tag(current.0, current.1);
-        let new_data = raw_and_tag(new.0, new.1);
-        let previous = self.data.compare_and_swap(current_data, new_data, order);
-        if previous == current_data {
-            Ok(())
-        } else {
-            let ptr = Ptr::from_data(previous);
-            Err((ptr.as_raw(), ptr.tag()))
-        }
-    }
-
-    /// If the tagged atomic pointer is equal to `current`, stores `new`.
-    ///
-    /// The return value is a result indicating whether the new pointer was stored. On failure the
-    /// current value of the tagged atomic pointer is returned.
-    ///
-    /// This method can sometimes spuriously fail even when comparison succeeds, which can result
-    /// in more efficient code on some platforms.
-    ///
-    /// Argument `order` describes the memory ordering of this operation.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub unsafe fn cas_raw_weak(
-        &self,
-        current: (*mut T, usize),
-        new: (*mut T, usize),
-        order: Ordering,
-    ) -> Result<(), (*mut T, usize)> {
-        let current_data = raw_and_tag(current.0, current.1);
-        let new_data = raw_and_tag(new.0, new.1);
-        let previous = self.data.compare_and_swap(current_data, new_data, order);
-        if previous == current_data {
-            Ok(())
-        } else {
-            let ptr = Ptr::from_data(previous);
-            Err((ptr.as_raw(), ptr.tag()))
-        }
-    }
-}
-
-impl<T> Default for Atomic<T> {
-    fn default() -> Self {
-        Atomic {
-            data: AtomicUsize::new(0),
-            _marker: PhantomData,
-        }
-    }
-}
-
-/// A tagged nullable pointer.
-#[derive(Debug)]
-pub struct Ptr<'p, T: 'p> {
-    data: usize,
-    _marker: PhantomData<(*mut T, &'p T)>, // !Send + !Sync
-}
-
-impl<'a, T> Clone for Ptr<'a, T> {
-    fn clone(&self) -> Self {
-        Ptr {
-            data: self.data,
-            _marker: PhantomData,
-        }
-    }
-}
-
-impl<'a, T> Copy for Ptr<'a, T> {}
-
-impl<'p, T: 'p> Ptr<'p, T> {
-    /// Constructs a nullable pointer from raw data.
-    unsafe fn from_data(data: usize) -> Self {
-        Ptr {
-            data: data,
-            _marker: PhantomData,
-        }
-    }
-
-    /// Returns a null pointer with a tag.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of an aligned pointer.
-    pub fn null(tag: usize) -> Self {
-        unsafe { Self::from_data(raw_and_tag::<T>(ptr::null_mut(), tag)) }
-    }
-
-    /// Constructs a tagged pointer from a raw pointer and tag.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer, or if the pointer is
-    /// unaligned.
-    pub unsafe fn from_raw(raw: *mut T, tag: usize) -> Self {
-        Self::from_data(raw_and_tag(raw, tag))
-    }
-
-    /// Returns `true` if the pointer is null.
-    pub fn is_null(&self) -> bool {
-        self.as_raw().is_null()
-    }
-
-    /// Converts the pointer to a reference.
-    pub fn as_ref(&self) -> Option<&'p T> {
-        unsafe { self.as_raw().as_ref() }
-    }
-
-    /// Converts the pointer to a raw pointer.
-    pub fn as_raw(&self) -> *mut T {
-        (self.data & !low_bits::<T>()) as *mut T
-    }
-
-    /// Returns a reference to the pointing object.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the pointer is null.
-    pub fn unwrap(&self) -> &'p T {
-        self.as_ref().unwrap()
-    }
-
-    /// Returns the tag.
-    pub fn tag(&self) -> usize {
-        self.data & low_bits::<T>()
-    }
-
-    /// Constructs a new tagged pointer with a different tag.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the tag doesn't fit into the unused bits of the pointer.
-    pub fn with_tag(&self, tag: usize) -> Self {
-        unsafe { Self::from_raw(self.as_raw(), tag) }
-    }
-}
-
-impl<'p, T> Default for Ptr<'p, T> {
-    fn default() -> Self {
-        Ptr {
-            data: 0,
-            _marker: PhantomData,
-        }
-    }
-}
deleted file mode 100644
--- a/third_party/rust/coco/src/epoch/garbage.rs
+++ /dev/null
@@ -1,736 +0,0 @@
-//! Garbage collection.
-//!
-//! # Bags and garbage queues
-//!
-//! Objects that get removed from concurrent data structures must be stashed away until the global
-//! epoch sufficiently advances so that they become safe for destruction. Pointers to such garbage
-//! objects are kept in bags.
-//!
-//! When a bag becomes full, it is marked with the current global epoch pushed into a `Garbage`
-//! queue. Usually each instance of concurrent data structure has it's own `Garbage` queue that
-//! gets fully destroyed as soon as the data structure gets dropped.
-//!
-//! Whenever a bag is pushed into the queue, some garbage is collected and destroyed along the way.
-//! Garbage collection can also be manually triggered by calling method `collect`.
-//!
-//! # The global garbage queue
-//!
-//! Some data structures don't own objects but merely transfer them between threads, e.g. queues.
-//! As such, queues don't execute destructors - they only allocate and free some memory. it would
-//! be costly for each queue to handle it's own `Garbage`, so there is a special global queue all
-//! data structures can share.
-//!
-//! The global garbage queue is very efficient. Each thread has a thread-local bag that is
-//! populated with garbage, and when it becomes full, it is finally pushed into queue. This design
-//! reduces contention on data structures. The global queue cannot be explicitly accessed - the
-//! only way to interact with it is by calling function `defer_free`.
-
-use std::cell::UnsafeCell;
-use std::cmp;
-use std::fmt;
-use std::mem;
-use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
-use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, SeqCst};
-
-use epoch::{self, Atomic, Pin, Ptr};
-
-/// Maximum number of objects a bag can contain.
-#[cfg(not(feature = "strict_gc"))]
-const MAX_OBJECTS: usize = 64;
-#[cfg(feature = "strict_gc")]
-const MAX_OBJECTS: usize = 4;
-
-/// The global epoch.
-///
-/// The last bit in this number is unused and is always zero. Every so often the global epoch is
-/// incremented, i.e. we say it "advances". A pinned thread may advance the global epoch only if
-/// all currently pinned threads have been pinned in the current epoch.
-///
-/// If an object became garbage in some epoch, then we can be sure that after two advancements no
-/// thread will hold a reference to it. That is the crux of safe memory reclamation.
-pub static EPOCH: AtomicUsize = ATOMIC_USIZE_INIT;
-
-/// Holds removed objects that will be eventually destroyed.
-pub struct Bag {
-    /// Number of objects in the bag.
-    len: AtomicUsize,
-    /// Removed objects.
-    objects: [UnsafeCell<(unsafe fn(*mut u8, usize), *mut u8, usize)>; MAX_OBJECTS],
-    /// The global epoch at the moment when this bag got pushed into the queue.
-    epoch: usize,
-    /// The next bag in the queue.
-    next: Atomic<Bag>,
-}
-
-impl Bag {
-    /// Returns a new, empty bag.
-    pub fn new() -> Self {
-        Bag {
-            len: AtomicUsize::new(0),
-            objects: unsafe { mem::zeroed() },
-            epoch: unsafe { mem::uninitialized() },
-            next: Atomic::null(0),
-        }
-    }
-
-    /// Returns `true` if the bag is empty.
-    pub fn is_empty(&self) -> bool {
-        self.len.load(Relaxed) == 0
-    }
-
-    /// Attempts to insert a garbage object into the bag and returns `true` if succeeded.
-    pub fn try_insert<T>(&self, destroy: unsafe fn(*mut T, usize), object: *mut T, count: usize)
-                         -> bool {
-        // Erase type `*mut T` and use `*mut u8` instead.
-        let destroy: unsafe fn(*mut u8, usize) = unsafe { mem::transmute(destroy) };
-        let object = object as *mut u8;
-
-        let mut len = self.len.load(Acquire);
-        loop {
-            // Is the bag full?
-            if len == self.objects.len() {
-                return false;
-            }
-
-            // Try incrementing `len`.
-            match self.len.compare_exchange(len, len + 1, AcqRel, Acquire) {
-                Ok(_) => {
-                    // Success! Now store the garbage object into the array. The current thread
-                    // will synchronize with the thread that destroys it through epoch advancement.
-                    unsafe { *self.objects[len].get() = (destroy, object, count) }
-                    return true;
-                }
-                Err(l) => len = l,
-            }
-        }
-    }
-
-    /// Destroys all objects in the bag.
-    ///
-    /// Note: can be called only once!
-    unsafe fn destroy_all_objects(&self) {
-        for cell in self.objects.iter().take(self.len.load(Relaxed)) {
-            let (destroy, object, count) = *cell.get();
-            destroy(object, count);
-        }
-    }
-}
-
-/// A garbage queue.
-///
-/// This is where a concurrent data structure can store removed objects for deferred destruction.
-///
-/// Stored garbage objects are first kept in the garbage buffer. When the buffer becomes full, it's
-/// objects are flushed into the garbage queue. Flushing can be manually triggered by calling
-/// [`flush`].
-///
-/// Some garbage in the queue can be manually collected by calling [`collect`].
-///
-/// [`flush`]: method.flush.html
-/// [`collect`]: method.collect.html
-pub struct Garbage {
-    /// Head of the queue.
-    head: Atomic<Bag>,
-    /// Tail of the queue.
-    tail: Atomic<Bag>,
-    /// The next bag that will be pushed into the queue as soon as it gets full.
-    pending: Atomic<Bag>,
-}
-
-unsafe impl Send for Garbage {}
-unsafe impl Sync for Garbage {}
-
-impl Garbage {
-    /// Returns a new, empty garbage queue.
-    pub fn new() -> Self {
-        let garbage = Garbage {
-            head: Atomic::null(0),
-            tail: Atomic::null(0),
-            pending: Atomic::null(0),
-        };
-
-        // This code may be executing while a thread harness is initializing, so normal pinning
-        // would try to access it while it is being initialized. Such accesses fail with a panic.
-        // We must therefore cheat by creating a fake pin.
-        let pin = unsafe { &mem::zeroed::<Pin>() };
-
-        // The head of the queue is always a sentinel entry.
-        let sentinel = garbage.head.store_box(Box::new(Bag::new()), 0, pin);
-        garbage.tail.store(sentinel);
-
-        garbage
-    }
-
-    /// Attempts to compare-and-swap the pending bag `old` with a new, empty one.
-    ///
-    /// The return value is a result indicating whether the compare-and-swap successfully installed
-    /// a new bag. On success the new bag is returned. On failure the current more up-to-date
-    /// pending bag is returned.
-    fn replace_pending<'p>(&self, old: Ptr<'p, Bag>, pin: &'p Pin)
-                           -> Result<Ptr<'p, Bag>, Ptr<'p, Bag>> {
-        match self.pending.cas_box(old, Box::new(Bag::new()), 0) {
-            Ok(new) => {
-                if !old.is_null() {
-                    // Push the old bag into the queue.
-                    let bag = unsafe { Box::from_raw(old.as_raw()) };
-                    self.push(bag, pin);
-                }
-
-                // Spare some cycles on garbage collection.
-                // Note: This may itself produce garbage and allocate new bags.
-                epoch::thread::try_advance(pin);
-                self.collect(pin);
-
-                Ok(new)
-            }
-            Err((pending, _)) => Err(pending),
-        }
-    }
-
-    /// Adds an object that will later be freed.
-    ///
-    /// The specified object is an array allocated at address `object` and consists of `count`
-    /// elements of type `T`.
-    ///
-    /// This method inserts the object into the garbage buffer. When the buffers becomes full, it's
-    /// objects are flushed into the garbage queue.
-    pub unsafe fn defer_free<T>(&self, object: *mut T, count: usize, pin: &Pin) {
-        unsafe fn free<T>(ptr: *mut T, count: usize) {
-            // Free the memory, but don't run the destructors.
-            drop(Vec::from_raw_parts(ptr, 0, count));
-        }
-        self.defer_destroy(free, object, count, pin);
-    }
-
-    /// Adds an object that will later be dropped and freed.
-    ///
-    /// The specified object is an array allocated at address `object` and consists of `count`
-    /// elements of type `T`.
-    ///
-    /// This method inserts the object into the garbage buffer. When the buffers becomes full, it's
-    /// objects are flushed into the garbage queue.
-    ///
-    /// Note: The object must be `Send + 'self`.
-    pub unsafe fn defer_drop<T>(&self, object: *mut T, count: usize, pin: &Pin) {
-        unsafe fn destruct<T>(ptr: *mut T, count: usize) {
-            // Run the destructors and free the memory.
-            drop(Vec::from_raw_parts(ptr, count, count));
-        }
-        self.defer_destroy(destruct, object, count, pin);
-    }
-
-    /// Adds an object that will later be destroyed using `destroy`.
-    ///
-    /// The specified object is an array allocated at address `object` and consists of `count`
-    /// elements of type `T`.
-    ///
-    /// This method inserts the object into the garbage buffer. When the buffers becomes full, it's
-    /// objects are flushed into the garbage queue.
-    ///
-    /// Note: The object must be `Send + 'self`.
-    pub unsafe fn defer_destroy<T>(
-        &self,
-        destroy: unsafe fn(*mut T, usize),
-        object: *mut T,
-        count: usize,
-        pin: &Pin
-    ) {
-        let mut pending = self.pending.load(pin);
-        loop {
-            match pending.as_ref() {
-                Some(p) if p.try_insert(destroy, object, count) => break,
-                _ => {
-                    match self.replace_pending(pending, pin) {
-                        Ok(p) => pending = p,
-                        Err(p) => pending = p,
-                    }
-                }
-            }
-        }
-    }
-
-    /// Flushes the buffered garbage.
-    ///
-    /// It is wise to flush the garbage just after passing a very large object to one of the
-    /// `defer_*` methods, so that it isn't sitting in the buffer for a long time.
-    pub fn flush(&self, pin: &Pin) {
-        let mut pending = self.pending.load(pin);
-        loop {
-            match pending.as_ref() {
-                None => break,
-                Some(p) => {
-                    if p.is_empty() {
-                        // The bag is already empty.
-                        break;
-                    } else {
-                        match self.replace_pending(pending, pin) {
-                            Ok(_) => break,
-                            Err(p) => pending = p,
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    /// Collects some garbage from the queue and destroys it.
-    ///
-    /// Generally speaking, it's not necessary to call this method because garbage production
-    /// already triggers garbage destruction. However, if there are long periods without garbage
-    /// production, it might be a good idea to call this method from time to time.
-    ///
-    /// This method collects several buffers worth of garbage objects.
-    pub fn collect(&self, pin: &Pin) {
-        /// Number of bags to destroy.
-        const COLLECT_STEPS: usize = 8;
-
-        let epoch = EPOCH.load(SeqCst);
-        let condition = |bag: &Bag| {
-            // A pinned thread can witness at most one epoch advancement. Therefore, any bag that
-            // is within one epoch of the current one cannot be destroyed yet.
-            let diff = epoch.wrapping_sub(bag.epoch);
-            cmp::min(diff, 0usize.wrapping_sub(diff)) > 2
-        };
-
-        for _ in 0..COLLECT_STEPS {
-            match self.try_pop_if(&condition, pin) {
-                None => break,
-                Some(bag) => unsafe { bag.destroy_all_objects() },
-            }
-        }
-    }
-
-    /// Pushes a bag into the queue.
-    fn push(&self, mut bag: Box<Bag>, pin: &Pin) {
-        // Mark the bag with the current epoch.
-        bag.epoch = EPOCH.load(SeqCst);
-
-        let mut tail = self.tail.load(pin);
-        loop {
-            let next = tail.unwrap().next.load(pin);
-            if next.is_null() {
-                // Try installing the new bag.
-                match tail.unwrap().next.cas_box(next, bag, 0) {
-                    Ok(bag) => {
-                        // Tail pointer shouldn't fall behind. Let's move it forward.
-                        let _ = self.tail.cas(tail, bag);
-                        break;
-                    }
-                    Err((t, b)) => {
-                        tail = t;
-                        bag = b;
-                    }
-                }
-            } else {
-                // This is not the actual tail. Move the tail pointer forward.
-                match self.tail.cas(tail, next) {
-                    Ok(()) => tail = next,
-                    Err(t) => tail = t,
-                }
-            }
-        }
-    }
-
-    /// Attempts to pop a bag from the front of the queue and returns it if `condition` is met.
-    ///
-    /// If the bag in the front doesn't meet it or if the queue is empty, `None` is returned.
-    fn try_pop_if<'p, F>(&self, condition: F, pin: &'p Pin) -> Option<&'p Bag>
-        where F: Fn(&Bag) -> bool
-    {
-        let mut head = self.head.load(pin);
-        loop {
-            let next = head.unwrap().next.load(pin);
-            match next.as_ref() {
-                Some(n) if condition(n) => {
-                    // Try moving the head forward.
-                    match self.head.cas(head, next) {
-                        Ok(()) => {
-                            // The old head may be later freed.
-                            unsafe { epoch::defer_free(head.as_raw(), 1, pin) }
-                            // The new head holds the popped value (heads are sentinels!).
-                            return Some(n);
-                        }
-                        Err(h) => head = h,
-                    }
-                }
-                None | Some(_) => return None,
-            }
-        }
-    }
-}
-
-impl Drop for Garbage {
-    fn drop(&mut self) {
-        unsafe {
-            // Load the pending bag, then destroy it and all it's objects.
-            let pending = self.pending.load_raw(Relaxed).0;
-            if !pending.is_null() {
-                (*pending).destroy_all_objects();
-                drop(Vec::from_raw_parts(pending, 0, 1));
-            }
-
-            // Destroy all bags and objects in the queue.
-            let mut head = self.head.load_raw(Relaxed).0;
-            loop {
-                // Load the next bag and destroy the current head.
-                let next = (*head).next.load_raw(Relaxed).0;
-                drop(Vec::from_raw_parts(head, 0, 1));
-
-                // If the next node is null, we've reached the end of the queue.
-                if next.is_null() {
-                    break;
-                }
-
-                // Move one step forward.
-                head = next;
-
-                // Destroy all objects in this bag.
-                // The bag itself will be destroyed in the next iteration of the loop.
-                (*head).destroy_all_objects();
-            }
-        }
-    }
-}
-
-impl fmt::Debug for Garbage {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "Garbage {{ ... }}")
-    }
-}
-
-/// Returns a reference to a global garbage, which is lazily initialized.
-fn global() -> &'static Garbage {
-    static GLOBAL: AtomicUsize = ATOMIC_USIZE_INIT;
-
-    let current = GLOBAL.load(Acquire);
-
-    let garbage = if current == 0 {
-        // Initialize the singleton.
-        let raw = Box::into_raw(Box::new(Garbage::new()));
-        let new = raw as usize;
-        let previous = GLOBAL.compare_and_swap(0, new, AcqRel);
-
-        if previous == 0 {
-            // Ok, we initialized it.
-            new
-        } else {
-            // Another thread has already initialized it.
-            unsafe { drop(Box::from_raw(raw)); }
-            previous
-        }
-    } else {
-        current
-    };
-
-    unsafe { &*(garbage as *const Garbage) }
-}
-
-/// Pushes a bag into the global garbage.
-pub fn push(bag: Box<Bag>, pin: &Pin) {
-    global().push(bag, pin);
-}
-
-/// Collects several bags from the global queue and destroys their objects.
-pub fn collect(pin: &Pin) {
-    global().collect(pin);
-}
-
-/// Destroys the global garbage.
-///
-/// # Safety
-///
-/// This function may only be called at the very end of the main thread, and only if the main
-/// thread has never been pinned.
-#[cfg(feature = "internals")]
-pub unsafe fn destroy_global() {
-    let global = global() as *const Garbage as *mut Garbage;
-    drop(Box::from_raw(global));
-}
-
-#[cfg(test)]
-mod tests {
-    extern crate rand;
-
-    use std::mem;
-    use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
-    use std::sync::atomic::Ordering::SeqCst;
-    use std::sync::Arc;
-    use std::thread;
-
-    use self::rand::{Rng, thread_rng};
-
-    use super::Garbage;
-    use ::epoch;
-
-    #[test]
-    fn smoke() {
-        let g = Garbage::new();
-        epoch::pin(|pin| {
-            let a = Box::into_raw(Box::new(7));
-            unsafe { g.defer_free(a, 1, pin) }
-            assert!(!g.pending.load(pin).unwrap().is_empty());
-        });
-    }
-
-    #[test]
-    fn flush_pending() {
-        let g = Garbage::new();
-        let mut rng = thread_rng();
-
-        for _ in 0..100_000 {
-            epoch::pin(|pin| unsafe {
-                let a = Box::into_raw(Box::new(7));
-                g.defer_drop(a, 1, pin);
-
-                if rng.gen_range(0, 100) == 0 {
-                    g.flush(pin);
-                    assert!(g.pending.load(pin).unwrap().is_empty());
-                }
-            });
-        }
-    }
-
-    #[test]
-    fn incremental() {
-        const COUNT: usize = 100_000;
-        static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
-
-        let g = Garbage::new();
-
-        epoch::pin(|pin| unsafe {
-            for _ in 0..COUNT {
-                let a = Box::into_raw(Box::new(7i32));
-                unsafe fn destroy(ptr: *mut i32, count: usize) {
-                    drop(Box::from_raw(ptr));
-                    DESTROYS.fetch_add(count, SeqCst);
-                }
-                g.defer_destroy(destroy, a, 1, pin);
-            }
-            g.flush(pin);
-        });
-
-        let mut last = 0;
-
-        while last < COUNT {
-            let curr = DESTROYS.load(SeqCst);
-            assert!(curr - last < 1000);
-            last = curr;
-
-            epoch::pin(|pin| g.collect(pin));
-        }
-        assert!(DESTROYS.load(SeqCst) == COUNT);
-    }
-
-    #[test]
-    fn buffering() {
-        const COUNT: usize = 10;
-        static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
-
-        let g = Garbage::new();
-
-        epoch::pin(|pin| unsafe {
-            for _ in 0..COUNT {
-                let a = Box::into_raw(Box::new(7i32));
-                unsafe fn destroy(ptr: *mut i32, count: usize) {
-                    drop(Box::from_raw(ptr));
-                    DESTROYS.fetch_add(count, SeqCst);
-                }
-                g.defer_destroy(destroy, a, 1, pin);
-            }
-        });
-
-        for _ in 0..100_000 {
-            epoch::pin(|pin| {
-                g.collect(pin);
-                assert!(DESTROYS.load(SeqCst) < COUNT);
-            });
-        }
-        epoch::pin(|pin| g.flush(pin));
-
-        while DESTROYS.load(SeqCst) < COUNT {
-            epoch::pin(|pin| g.collect(pin));
-        }
-        assert_eq!(DESTROYS.load(SeqCst), COUNT);
-    }
-
-    #[test]
-    fn count_drops() {
-        const COUNT: usize = 100_000;
-        static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
-
-        struct Elem(i32);
-
-        impl Drop for Elem {
-            fn drop(&mut self) {
-                DROPS.fetch_add(1, SeqCst);
-            }
-        }
-
-        let g = Garbage::new();
-
-        epoch::pin(|pin| unsafe {
-            for _ in 0..COUNT {
-                let a = Box::into_raw(Box::new(Elem(7i32)));
-                g.defer_drop(a, 1, pin);
-            }
-            g.flush(pin);
-        });
-
-        while DROPS.load(SeqCst) < COUNT {
-            epoch::pin(|pin| g.collect(pin));
-        }
-        assert_eq!(DROPS.load(SeqCst), COUNT);
-    }
-
-    #[test]
-    fn count_destroy() {
-        const COUNT: usize = 100_000;
-        static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
-
-        let g = Garbage::new();
-
-        epoch::pin(|pin| unsafe {
-            for _ in 0..COUNT {
-                let a = Box::into_raw(Box::new(7i32));
-                unsafe fn destroy(ptr: *mut i32, count: usize) {
-                    drop(Box::from_raw(ptr));
-                    DESTROYS.fetch_add(count, SeqCst);
-                }
-                g.defer_destroy(destroy, a, 1, pin);
-            }
-            g.flush(pin);
-        });
-
-        while DESTROYS.load(SeqCst) < COUNT {
-            epoch::pin(|pin| g.collect(pin));
-        }
-        assert_eq!(DESTROYS.load(SeqCst), COUNT);
-    }
-
-    #[test]
-    fn drop_array() {
-        const COUNT: usize = 700;
-        static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
-
-        struct Elem(i32);
-
-        impl Drop for Elem {
-            fn drop(&mut self) {
-                DROPS.fetch_add(1, SeqCst);
-            }
-        }
-
-        let g = Garbage::new();
-
-        epoch::pin(|pin| unsafe {
-            let mut v = Vec::with_capacity(COUNT);
-            for i in 0..COUNT {
-                v.push(Elem(i as i32));
-            }
-
-            g.defer_drop(v.as_mut_ptr(), v.len(), pin);
-            g.flush(pin);
-
-            mem::forget(v);
-        });
-
-        while DROPS.load(SeqCst) < COUNT {
-            epoch::pin(|pin| g.collect(pin));
-        }
-        assert_eq!(DROPS.load(SeqCst), COUNT);
-    }
-
-    #[test]
-    fn destroy_array() {
-        const COUNT: usize = 100_000;
-        static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
-
-        let g = Garbage::new();
-
-        epoch::pin(|pin| unsafe {
-            let mut v = Vec::with_capacity(COUNT);
-            for i in 0..COUNT {
-                v.push(i as i32);
-            }
-
-            unsafe fn destroy(ptr: *mut i32, count: usize) {
-                assert!(count == COUNT);
-                drop(Vec::from_raw_parts(ptr, count, count));
-                DESTROYS.fetch_add(count, SeqCst);
-            }
-            g.defer_destroy(destroy, v.as_mut_ptr(), v.len(), pin);
-            g.flush(pin);
-
-            mem::forget(v);
-        });
-
-        while DESTROYS.load(SeqCst) < COUNT {
-            epoch::pin(|pin| g.collect(pin));
-        }
-        assert_eq!(DESTROYS.load(SeqCst), COUNT);
-    }
-
-    #[test]
-    fn drop_garbage() {
-        const COUNT: usize = 100_000;
-        static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
-
-        struct Elem(i32);
-
-        impl Drop for Elem {
-            fn drop(&mut self) {
-                DROPS.fetch_add(1, SeqCst);
-            }
-        }
-
-        let g = Garbage::new();
-
-        epoch::pin(|pin| unsafe {
-            for _ in 0..COUNT {
-                let a = Box::into_raw(Box::new(Elem(7i32)));
-                g.defer_drop(a, 1, pin);
-            }
-            g.flush(pin);
-        });
-
-        drop(g);
-        assert_eq!(DROPS.load(SeqCst), COUNT);
-    }
-
-    #[test]
-    fn stress() {
-        const THREADS: usize = 8;
-        const COUNT: usize = 100_000;
-        static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
-
-        struct Elem(i32);
-
-        impl Drop for Elem {
-            fn drop(&mut self) {
-                DROPS.fetch_add(1, SeqCst);
-            }
-        }
-
-        let g = Arc::new(Garbage::new());
-
-        let threads = (0..THREADS).map(|_| {
-            let g = g.clone();
-
-            thread::spawn(move || {
-                for _ in 0..COUNT {
-                    epoch::pin(|pin| unsafe {
-                        let a = Box::into_raw(Box::new(Elem(7i32)));
-                        g.defer_drop(a, 1, pin);
-                    });
-                }
-            })
-        }).collect::<Vec<_>>();
-
-        for t in threads {
-            t.join().unwrap();
-        }
-
-        drop(g);
-        assert_eq!(DROPS.load(SeqCst), COUNT * THREADS);
-    }
-}
deleted file mode 100644
--- a/third_party/rust/coco/src/epoch/mod.rs
+++ /dev/null
@@ -1,40 +0,0 @@
-//! Epoch-based garbage collection.
-//!
-//! # Pointers
-//!
-//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which
-//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a
-//! [`Ptr`], which is an epoch-protected pointer through which the loaded object can be safely
-//! read.
-//!
-//! # Pinning
-//!
-//! Before an [`Atomic`] can be loaded, the current thread must be pinned. By pinning a thread we
-//! declare that any object that gets removed from now on must not be destructed just yet. Garbage
-//! collection of newly removed objects is suspended until the thread gets unpinned.
-//!
-//! # Garbage
-//!
-//! Objects that get removed from concurrent collections must be stashed away until all currently
-//! pinned threads get unpinned. Such objects can be stored into a [`Garbage`], where they are kept
-//! until the right time for their destruction comes.
-//!
-//! There is a global shared instance of [`Garbage`], which can only deallocate memory. It cannot
-//! drop objects or run arbitrary destruction procedures. Removed objects can be stored into it by
-//! calling [`defer_free`].
-//!
-//! [`Atomic`]: struct.Atomic.html
-//! [`Garbage`]: struct.Garbage.html
-//! [`Ptr`]: struct.Ptr.html
-//! [`defer_free`]: fn.defer_free.html
-
-mod atomic;
-mod garbage;
-mod thread;
-
-pub use self::atomic::{Atomic, Ptr};
-pub use self::garbage::Garbage;
-pub use self::thread::{Pin, defer_free, flush, is_pinned, pin};
-
-#[cfg(feature = "internals")]
-pub use self::garbage::destroy_global;
deleted file mode 100644
--- a/third_party/rust/coco/src/epoch/thread.rs
+++ /dev/null
@@ -1,462 +0,0 @@
-//! Thread synchronization and pinning.
-//!
-//! # Registration
-//!
-//! In order to track all threads in one place, we need some form of thread registration. Every
-//! thread has a thread-local so-called "harness" that registers it the first time it is pinned,
-//! and unregisters when it exits.
-//!
-//! Registered threads are tracked in a global lock-free singly-linked list of thread entries. The
-//! head of this list is accessed by calling the `participants` function.
-//!
-//! # Thread entries
-//!
-//! Thread entries are implemented as the `Thread` data type. Every entry contains an integer that
-//! tells whether the thread is pinned and if so, what was the global epoch at the time it was
-//! pinned. Entries also hold a pin counter that aids in periodic global epoch advancement.
-
-use std::cell::Cell;
-use std::mem;
-use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
-use std::sync::atomic::Ordering::{Relaxed, Release, SeqCst};
-
-use epoch::Atomic;
-use epoch::garbage::{self, Bag, EPOCH};
-
-thread_local! {
-    /// The thread registration harness.
-    ///
-    /// The harness is lazily initialized on it's first use. Initialization performs registration.
-    /// If initialized, the harness will get destructed on thread exit, which in turn unregisters
-    /// the thread.
-    static HARNESS: Harness = Harness {
-        thread: Thread::register(),
-        is_pinned: Cell::new(false),
-        pin_count: Cell::new(0),
-        bag: Cell::new(Box::into_raw(Box::new(Bag::new()))),
-    };
-}
-
-/// Holds thread-local data and unregisters the thread when dropped.
-struct Harness {
-    /// This thread's entry in the participants list.
-    thread: *const Thread,
-    /// Whether the thread is currently pinned.
-    is_pinned: Cell<bool>,
-    /// Total number of pinnings performed.
-    pin_count: Cell<usize>,
-    /// The local bag of objects that will be later freed.
-    bag: Cell<*mut Bag>,
-}
-
-impl Drop for Harness {
-    fn drop(&mut self) {
-        // Now that the thread is exiting, we must move the local bag into the global garbage
-        // queue. Also, let's try advancing the epoch and help free some garbage.
-        let thread = unsafe { &*self.thread };
-
-        // If we called `pin()` here, it would try to access `HARNESS` and then panic.
-        // To work around the problem, we manually pin the thread.
-        let pin = &Pin { bag: &self.bag };
-        thread.set_pinned(pin);
-
-        // Spare some cycles on garbage collection.
-        // Note: This may itself produce garbage and in turn allocate new bags.
-        try_advance(pin);
-        garbage::collect(pin);
-
-        // Push the local bag into the global garbage queue.
-        let bag = unsafe { Box::from_raw(self.bag.get()) };
-        garbage::push(bag, pin);
-
-        // Manually unpin the thread.
-        thread.set_unpinned();
-
-        // Mark the thread entry as deleted.
-        thread.unregister();
-    }
-}
-
-/// An entry in the linked list of participating threads.
-struct Thread {
-    /// The least significant bit is set if the thread is currently pinned. The rest of the bits
-    /// encode the current epoch.
-    state: AtomicUsize,
-    /// The next thread in the linked list of participants. If the tag is 1, this entry is deleted
-    /// and can be unlinked from the list.
-    next: Atomic<Thread>,
-}
-
-impl Thread {
-    /// Marks the thread as pinned.
-    ///
-    /// Must not be called if the thread is already pinned!
-    #[inline]
-    fn set_pinned(&self, _pin: &Pin) {
-        let epoch = EPOCH.load(Relaxed);
-        let state = epoch | 1;
-
-        // Now we must store `state` into `self.state`. It's important that any succeeding loads
-        // don't get reordered with this store. In order words, this thread's epoch must be fully
-        // announced to other threads. Only then it becomes safe to load from the shared memory.
-        if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
-            // On x86 architectures we have a choice:
-            // 1. `atomic::fence(SeqCst)`, which compiles to a `mfence` instruction.
-            // 2. `compare_and_swap(_, _, SeqCst)`, which compiles to a `lock cmpxchg` instruction.
-            //
-            // Both instructions have the effect of a full barrier, but the second one seems to be
-            // faster in this particular case.
-            let previous = self.state.load(Relaxed);
-            self.state.compare_and_swap(previous, state, SeqCst);
-        } else {
-            self.state.store(state, Relaxed);
-            ::std::sync::atomic::fence(SeqCst);
-        }
-    }
-
-    /// Marks the thread as unpinned.
-    #[inline]
-    fn set_unpinned(&self) {
-        // Clear the last bit.
-        // We don't need to preserve the epoch, so just store the number zero.
-        self.state.store(0, Release);
-    }
-
-    /// Registers a thread by adding a new entry to the list of participanting threads.
-    ///
-    /// Returns a pointer to the newly allocated entry.
-    fn register() -> *mut Thread {
-        let list = participants();
-
-        let mut new = Box::new(Thread {
-            state: AtomicUsize::new(0),
-            next: Atomic::null(0),
-        });
-
-        // This code is executing while the thread harness is initializing, so normal pinning would
-        // try to access it while it is being initialized. Such accesses fail with a panic. We must
-        // therefore cheat by creating a fake pin.
-        let pin = unsafe { &mem::zeroed::<Pin>() };
-
-        let mut head = list.load(pin);
-        loop {
-            new.next.store(head);
-
-            // Try installing this thread's entry as the new head.
-            match list.cas_box(head, new, 0) {
-                Ok(n) => return n.as_raw(),
-                Err((h, n)) => {
-                    head = h;
-                    new = n;
-                }
-            }
-        }
-    }
-
-    /// Unregisters the thread by marking it's entry as deleted.
-    ///
-    /// This function doesn't physically remove the entry from the linked list, though. That will
-    /// do a future call to `try_advance`.
-    fn unregister(&self) {
-        // This code is executing while the thread harness is initializing, so normal pinning would
-        // try to access it while it is being initialized. Such accesses fail with a panic. We must
-        // therefore cheat by creating a fake pin.
-        let pin = unsafe { &mem::zeroed::<Pin>() };
-
-        // Simply mark the next-pointer in this thread's entry.
-        let mut next = self.next.load(pin);
-        while next.tag() == 0 {
-            match self.next.cas(next, next.with_tag(1)) {
-                Ok(()) => break,
-                Err(n) => next = n,
-            }
-        }
-    }
-}
-
-/// Returns a reference to the head pointer of the list of participating threads.
-fn participants() -> &'static Atomic<Thread> {
-    static PARTICIPANTS: AtomicUsize = ATOMIC_USIZE_INIT;
-    unsafe { &*(&PARTICIPANTS as *const _ as *const _) }
-}
-
-/// Attempts to advance the global epoch.
-///
-/// The global epoch can advance only if all currently pinned threads have been pinned in the
-/// current epoch.
-#[cold]
-pub fn try_advance(pin: &Pin) {
-    let epoch = EPOCH.load(SeqCst);
-
-    // Traverse the linked list of participating threads.
-    let mut pred = participants();
-    let mut curr = pred.load(pin);
-
-    while let Some(c) = curr.as_ref() {
-        let succ = c.next.load(pin);
-
-        if succ.tag() == 1 {
-            // This thread has exited. Try unlinking it from the list.
-            let succ = succ.with_tag(0);
-
-            if pred.cas(curr, succ).is_err() {
-                // We lost the race to unlink the thread. Usually that means we should traverse the
-                // list again from the beginning, but since another thread trying to advance the
-                // epoch has won the race, we leave the job to that one.
-                return;
-            }
-
-            // The unlinked entry can later be freed.
-            unsafe { defer_free(c as *const _ as *mut Thread, 1, pin) }
-
-            // Move forward, but don't change the predecessor.
-            curr = succ;
-        } else {
-            let thread_state = c.state.load(SeqCst);
-            let thread_is_pinned = thread_state & 1 == 1;
-            let thread_epoch = thread_state & !1;
-
-            // If the thread was pinned in a different epoch, we cannot advance the global epoch
-            // just yet.
-            if thread_is_pinned && thread_epoch != epoch {
-                return;
-            }
-
-            // Move one step forward.
-            pred = &c.next;
-            curr = succ;
-        }
-    }
-
-    // All pinned threads were pinned in the current global epoch.
-    // Finally, try advancing the epoch. We increment by 2 and simply wrap around on overflow.
-    EPOCH.compare_and_swap(epoch, epoch.wrapping_add(2), SeqCst);
-}
-
-/// A witness that the current thread is pinned.
-///
-/// A reference to `Pin` is proof that the current thread is pinned. Lots of methods that interact
-/// with [`Atomic`]s can safely be called only while the thread is pinned so they often require a
-/// reference to `Pin`.
-///
-/// This data type is inherently bound to the thread that created it, therefore it does not
-/// implement `Send` nor `Sync`.
-///
-/// [`Atomic`]: struct.Atomic.html
-#[derive(Debug)]
-pub struct Pin {
-    /// A pointer to the cell within the harness, which holds a pointer to the local bag.
-    ///
-    /// This pointer is kept within `Pin` as a matter of convenience. It could also be reached
-    /// through the harness itself, but that doesn't work if we're in the process of it's
-    /// destruction.
-    bag: *const Cell<*mut Bag>, // !Send + !Sync
-}
-
-/// Pins the current thread.
-///
-/// The provided function takes a reference to a `Pin`, which can be used to interact with
-/// [`Atomic`]s. The pin serves as a proof that whatever data you load from an [`Atomic`] will not
-/// be concurrently deleted by another thread while the pin is alive.
-///
-/// Note that keeping a thread pinned for a long time prevents memory reclamation of any newly
-/// deleted objects protected by [`Atomic`]s. The provided function should be very quick -
-/// generally speaking, it shouldn't take more than 100 ms.
-///
-/// Pinning is reentrant. There is no harm in pinning a thread while it's already pinned (repinning
-/// is essentially a noop).
-///
-/// Pinning itself comes with a price: it begins with a `SeqCst` fence and performs a few other
-/// atomic operations. However, this mechanism is designed to be as performant as possible, so it
-/// can be used pretty liberally. On a modern machine pinning takes 10 to 15 nanoseconds.
-///
-/// [`Atomic`]: struct.Atomic.html
-pub fn pin<F, T>(f: F) -> T
-    where F: FnOnce(&Pin) -> T
-{
-    /// Number of pinnings after which a thread will collect some global garbage.
-    const PINS_BETWEEN_COLLECT: usize = 128;
-
-    HARNESS.with(|harness| {
-        let thread = unsafe { &*harness.thread };
-        let pin = &Pin { bag: &harness.bag };
-
-        let was_pinned = harness.is_pinned.get();
-        if !was_pinned {
-            // Pin the thread.
-            harness.is_pinned.set(true);
-            thread.set_pinned(pin);
-
-            // Increment the pin counter.
-            let count = harness.pin_count.get();
-            harness.pin_count.set(count.wrapping_add(1));
-
-            // If the counter progressed enough, try advancing the epoch and collecting garbage.
-            if count % PINS_BETWEEN_COLLECT == 0 {
-                try_advance(pin);
-                garbage::collect(pin);
-            }
-        }
-
-        // This will unpin the thread even if `f` panics.
-        defer! {
-            if !was_pinned {
-                // Unpin the thread.
-                thread.set_unpinned();
-                harness.is_pinned.set(false);
-            }
-        }
-
-        f(pin)
-    })
-}
-
-/// Returns `true` if the current thread is pinned.
-#[inline]
-pub fn is_pinned() -> bool {
-    HARNESS.with(|harness| harness.is_pinned.get())
-}
-
-/// Stashes away an object that will later be freed.
-///
-/// The specified object is an array allocated at address `object` and consists of `count` elements
-/// of type `T`.
-///
-/// This function inserts the object into a thread-local buffer. When the buffers becomes full,
-/// it's objects are flushed into the globally shared [`Garbage`] instance.
-///
-/// If the object is unusually large, it is wise to follow up with a call to [`flush`] so that it
-/// doesn't get stuck waiting in the buffer for a long time.
-///
-/// [`Garbage`]: struct.Garbage.html
-/// [`flush`]: fn.flush.html
-pub unsafe fn defer_free<T>(object: *mut T, count: usize, pin: &Pin) {
-    unsafe fn free<T>(ptr: *mut T, count: usize) {
-        // Free the memory, but don't run the destructors.
-        drop(Vec::from_raw_parts(ptr, 0, count));
-    }
-
-    loop {
-        // Get the thread-local bag.
-        let cell = &*pin.bag;
-        let bag = cell.get();
-
-        // Try inserting the object into the bag.
-        if (*bag).try_insert(free::<T>, object, count) {
-            // Success! We're done.
-            break;
-        }
-
-        // Flush the garbage and create a new bag.
-        flush(pin);
-    }
-}
-
-/// Flushes the buffered thread-local garbage.
-///
-/// It is wise to flush the garbage just after passing a very large object to [`defer_free`], so
-/// that it isn't sitting in the buffer for a long time.
-///
-/// [`defer_free`]: fn.defer_free.html
-pub fn flush(pin: &Pin) {
-    unsafe {
-        // Get the thread-local bag.
-        let cell = &*pin.bag;
-        let bag = cell.get();
-
-        if !(*bag).is_empty() {
-            // The bag is full. We must replace it with a fresh one.
-            cell.set(Box::into_raw(Box::new(Bag::new())));
-
-            // Push the old bag into the garbage queue.
-            let bag = Box::from_raw(bag);
-            garbage::push(bag, pin);
-
-            // Spare some cycles on garbage collection.
-            // Note: This may itself produce garbage and allocate new bags.
-            try_advance(pin);
-            garbage::collect(pin);
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use std::thread;
-    use std::sync::atomic::Ordering::SeqCst;
-
-    use epoch;
-    use epoch::garbage::EPOCH;
-    use epoch::thread::{HARNESS, try_advance};
-
-    #[test]
-    fn pin_reentrant() {
-        assert!(!epoch::is_pinned());
-        epoch::pin(|_| {
-            assert!(epoch::is_pinned());
-            epoch::pin(|_| {
-                assert!(epoch::is_pinned());
-            });
-            assert!(epoch::is_pinned());
-        });
-        assert!(!epoch::is_pinned());
-    }
-
-    #[test]
-    fn flush_local_garbage() {
-        for _ in 0..100 {
-            epoch::pin(|pin| {
-                unsafe {
-                    let a = Box::into_raw(Box::new(7));
-                    epoch::defer_free(a, 1, pin);
-
-                    HARNESS.with(|h| {
-                        assert!(!(*h.bag.get()).is_empty());
-
-                        while !(*h.bag.get()).is_empty() {
-                            epoch::flush(pin);
-                        }
-                    });
-                }
-            });
-        }
-    }
-
-    #[test]
-    fn garbage_buffering() {
-        HARNESS.with(|h| unsafe {
-            while !(*h.bag.get()).is_empty() {
-                epoch::pin(|pin| epoch::flush(pin));
-            }
-
-            epoch::pin(|pin| {
-                for _ in 0..10 {
-                    let a = Box::into_raw(Box::new(7));
-                    epoch::defer_free(a, 1, pin);
-                }
-                assert!(!(*h.bag.get()).is_empty());
-            });
-        });
-    }
-
-    #[test]
-    fn pin_holds_advance() {
-        let threads = (0..8).map(|_| {
-            thread::spawn(|| {
-                for _ in 0..500_000 {
-                    epoch::pin(|pin| {
-                        let before = EPOCH.load(SeqCst);
-                        try_advance(pin);
-                        let after = EPOCH.load(SeqCst);
-
-                        assert!(after.wrapping_sub(before) <= 2);
-                    });
-                }
-            })
-        }).collect::<Vec<_>>();
-
-        for t in threads {
-            t.join().unwrap();
-        }
-    }
-}
deleted file mode 100644
--- a/third_party/rust/coco/src/lib.rs
+++ /dev/null
@@ -1,65 +0,0 @@
-//! Concurrent collections.
-//!
-//! This crate offers several collections that are designed for performance in multithreaded
-//! contexts. They can be freely shared among multiple threads running in parallel, and
-//! concurrently modified without the overhead of locking.
-//!
-//! <!--
-//! Some of these data structures are lock-free. Others are not strictly speaking lock-free, but
-//! still scale well with respect to the number of threads accessing them.
-//! -->
-//!
-//! # Collections
-//!
-//! The following collections are available:
-//!
-//! * [`Stack`]: A lock-free stack.
-//! * [`deque`]: A lock-free work-stealing deque.
-//!
-//! # Which collection should you use?
-//!
-//! ### Use a [`Stack`] when:
-//!
-//! * You want a simple shared collection where objects can be insert and removed.
-//! * You want to avoid performance degradation due to locking.
-//! * You want the first-in first-out order of elements.
-//!
-//! ### Use a [`deque`] when:
-//!
-//! * You want one thread inserting and removing objects, and multiple threads just removing them.
-//! * You don't care about the order of elements.
-//!
-//! # Garbage collection
-//!
-//! An interesting problem concurrent collections deal with comes from the remove operation.
-//! Suppose that a thread removes an element from a lock-free map, while another thread is reading
-//! that same element at the same time. The first thread must wait until the second thread stops
-//! reading the element. Only then it is safe to destruct it.
-//!
-//! Programming languages that come with garbage collectors solve this problem trivially. The
-//! garbage collector will destruct the removed element when no thread can hold a reference to it
-//! anymore.
-//!
-//! This crate implements a basic garbage collection mechanism, which is based on epochs (see the
-//! `epoch` module). When an element gets removed from a concurrent collection, it is inserted into
-//! a pile of garbage and marked with the current epoch. Every time a thread accesses a collection,
-//! it checks the current epoch, attempts to increment it, and destructs some garbage that became
-//! so old that no thread can be referencing it anymore.
-//!
-//! That is the general mechanism behind garbage collection, but the details are a bit more
-//! complicated. Anyhow, garbage collection is designed to be fully automatic and something users
-//! of concurrent collections don't have to worry about.
-//!
-//! [`Stack`]: stack/struct.Stack.html
-//! [`deque`]: deque/fn.new.html
-
-extern crate either;
-
-#[macro_use(defer)]
-extern crate scopeguard;
-
-pub mod deque;
-pub mod epoch;
-pub mod stack;
-
-pub use stack::Stack;
deleted file mode 100644
--- a/third_party/rust/coco/src/stack.rs
+++ /dev/null
@@ -1,292 +0,0 @@
-//! A lock-free stack.
-//!
-//! This is an implementation of the Treiber stack, one of the simplest lock-free data structures.
-
-use std::ptr;
-use std::sync::atomic::Ordering::Relaxed;
-
-use epoch::{self, Atomic};
-
-/// A single node in a stack.
-struct Node<T> {
-    /// The payload.
-    value: T,
-    /// The next node in the stack.
-    next: Atomic<Node<T>>,
-}
-
-/// A lock-free stack.
-///
-/// It can be used with multiple producers and multiple consumers at the same time.
-pub struct Stack<T> {
-    head: Atomic<Node<T>>,
-}
-
-unsafe impl<T: Send> Send for Stack<T> {}
-unsafe impl<T: Send> Sync for Stack<T> {}
-
-impl<T> Stack<T> {
-    /// Returns a new, empty stack.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::Stack;
-    ///
-    /// let s = Stack::<i32>::new();
-    /// ```
-    pub fn new() -> Self {
-        Stack { head: Atomic::null(0) }
-    }
-
-    /// Returns `true` if the stack is empty.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::Stack;
-    ///
-    /// let s = Stack::new();
-    /// assert!(s.is_empty());
-    /// s.push("hello");
-    /// assert!(!s.is_empty());
-    /// ```
-    pub fn is_empty(&self) -> bool {
-        epoch::pin(|pin| self.head.load(pin).is_null())
-    }
-
-    /// Pushes a new value onto the stack.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::Stack;
-    ///
-    /// let s = Stack::new();
-    /// s.push(1);
-    /// s.push(2);
-    /// ```
-    pub fn push(&self, value: T) {
-        let mut node = Box::new(Node {
-            value: value,
-            next: Atomic::null(0),
-        });
-
-        epoch::pin(|pin| {
-            let mut head = self.head.load(pin);
-            loop {
-                node.next.store(head);
-                match self.head.cas_box(head, node, 0) {
-                    Ok(_) => break,
-                    Err((h, n)) => {
-                        head = h;
-                        node = n;
-                    }
-                }
-            }
-        })
-    }
-
-    /// Attemps to pop an value from the stack.
-    ///
-    /// Returns `None` if the stack is empty.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use coco::Stack;
-    ///
-    /// let s = Stack::new();
-    /// s.push(1);
-    /// s.push(2);
-    /// assert_eq!(s.pop(), Some(2));
-    /// assert_eq!(s.pop(), Some(1));
-    /// assert_eq!(s.pop(), None);
-    /// ```
-    pub fn pop(&self) -> Option<T> {
-        epoch::pin(|pin| {
-            let mut head = self.head.load(pin);
-            loop {
-                match head.as_ref() {
-                    Some(h) => {
-                        let next = h.next.load(pin);
-                        match self.head.cas(head, next) {
-                            Ok(_) => unsafe {
-                                epoch::defer_free(head.as_raw(), 1, pin);
-                                return Some(ptr::read(&h.value));
-                            },
-                            Err(h) => head = h,
-                        }
-                    }
-                    None => return None,
-                }
-            }
-        })
-    }
-}
-
-impl<T> Drop for Stack<T> {
-    fn drop(&mut self) {
-        // Destruct all nodes in the stack.
-        let mut curr = self.head.load_raw(Relaxed).0;
-        while !curr.is_null() {
-            unsafe {
-                let next = (*curr).next.load_raw(Relaxed).0;
-                drop(Box::from_raw(curr));
-                curr = next;
-            }
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    extern crate rand;
-
-    use std::sync::Arc;
-    use std::sync::atomic::AtomicUsize;
-    use std::sync::atomic::Ordering::SeqCst;
-    use std::thread;
-
-    use super::Stack;
-    use self::rand::Rng;
-
-    #[test]
-    fn smoke() {
-        let s = Stack::new();
-        s.push(1);
-        assert_eq!(s.pop(), Some(1));
-        assert_eq!(s.pop(), None);
-    }
-
-    #[test]
-    fn push_pop() {
-        let s = Stack::new();
-        s.push(1);
-        s.push(2);
-        s.push(3);
-        assert_eq!(s.pop(), Some(3));
-        s.push(4);
-        assert_eq!(s.pop(), Some(4));
-        assert_eq!(s.pop(), Some(2));
-        assert_eq!(s.pop(), Some(1));
-        assert_eq!(s.pop(), None);
-        s.push(5);
-        assert_eq!(s.pop(), Some(5));
-        assert_eq!(s.pop(), None);
-    }
-
-    #[test]
-    fn is_empty() {
-        let s = Stack::new();
-        assert!(s.is_empty());
-
-        for i in 0..3 {
-            s.push(i);
-            assert!(!s.is_empty());
-        }
-
-        for _ in 0..3 {
-            assert!(!s.is_empty());
-            s.pop();
-        }
-
-        assert!(s.is_empty());
-        s.push(3);
-        assert!(!s.is_empty());
-        s.pop();
-        assert!(s.is_empty());
-    }
-
-    #[test]
-    fn stress() {
-        const THREADS: usize = 8;
-
-        let s = Arc::new(Stack::new());
-        let len = Arc::new(AtomicUsize::new(0));
-
-        let threads = (0..THREADS).map(|t| {
-            let s = s.clone();
-            let len = len.clone();
-
-            thread::spawn(move || {
-                let mut rng = rand::thread_rng();
-                for i in 0..100_000 {
-                    if rng.gen_range(0, t + 1) == 0 {
-                        if s.pop().is_some() {
-                            len.fetch_sub(1, SeqCst);
-                        }
-                    } else {
-                        s.push(t + THREADS * i);
-                        len.fetch_add(1, SeqCst);
-                    }
-                }
-            })
-        }).collect::<Vec<_>>();
-
-        for t in threads {
-            t.join().unwrap();
-        }
-
-        let mut last = [::std::usize::MAX; THREADS];
-
-        while !s.is_empty() {
-            let x = s.pop().unwrap();
-            let t = x % THREADS;
-
-            assert!(last[t] > x);
-            last[t] = x;
-
-            len.fetch_sub(1, SeqCst);
-        }
-        assert_eq!(len.load(SeqCst), 0);
-    }
-
-    #[test]
-    fn destructors() {
-        struct Elem((), Arc<AtomicUsize>);
-
-        impl Drop for Elem {
-            fn drop(&mut self) {
-                self.1.fetch_add(1, SeqCst);
-            }
-        }
-
-        const THREADS: usize = 8;
-
-        let s = Arc::new(Stack::new());
-        let len = Arc::new(AtomicUsize::new(0));
-        let popped = Arc::new(AtomicUsize::new(0));
-        let dropped = Arc::new(AtomicUsize::new(0));
-
-        let threads = (0..THREADS).map(|t| {
-            let s = s.clone();
-            let len = len.clone();
-            let popped = popped.clone();
-            let dropped = dropped.clone();
-
-            thread::spawn(move || {
-                let mut rng = rand::thread_rng();
-                for _ in 0..100_000 {
-                    if rng.gen_range(0, t + 1) == 0 {
-                        if s.pop().is_some() {
-                            len.fetch_sub(1, SeqCst);
-                            popped.fetch_add(1, SeqCst);
-                        }
-                    } else {
-                        s.push(Elem((), dropped.clone()));
-                        len.fetch_add(1, SeqCst);
-                    }
-                }
-            })
-        }).collect::<Vec<_>>();
-
-        for t in threads {
-            t.join().unwrap();
-        }
-
-        assert_eq!(dropped.load(SeqCst), popped.load(SeqCst));
-        drop(s);
-        assert_eq!(dropped.load(SeqCst), popped.load(SeqCst) + len.load(SeqCst));
-    }
-}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-deque/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{".travis.yml":"7a28ab46755ee3ed2ad3078ecec5f26cf1b95fa122d947edfc1a15bff4849ae8","CHANGELOG.md":"c134cbbcfdf39e86a51337715daca6498d000e019f2d0d5050d04e14e7ef5219","Cargo.toml":"a247839eb4e5a43632eee8727e969a23b4474a6d1b390ea4a19e3e714d8ba060","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"acc366bfcc7262f4719306196e40d59b4e832179adc9cfe2cd27cc710a6787ac","src/lib.rs":"6f50bc16841c93b80d588bbeae9d56b55a2f3a32fe5232fd6e748362b680b4ef"},"package":"f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-deque/.travis.yml
@@ -0,0 +1,13 @@
+language: rust
+
+rust:
+  - stable
+  - beta
+  - nightly
+  - 1.13.0
+
+script:
+  - cargo build
+  - cargo build --release
+  - cargo test
+  - cargo test --release
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-deque/CHANGELOG.md
@@ -0,0 +1,18 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [0.1.1] - 2017-11-29
+### Changed
+- Update `crossbeam-epoch` to `0.2.0`.
+
+## 0.1.0 - 2017-11-26
+### Added
+- First implementation of the Chase-Lev deque.
+
+[Unreleased]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.1...HEAD
+[0.1.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.1.1
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-deque/Cargo.toml
@@ -0,0 +1,33 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "crossbeam-deque"
+version = "0.2.0"
+authors = ["The Crossbeam Project Developers"]
+description = "Concurrent work-stealing deque"
+homepage = "https://github.com/crossbeam-rs/crossbeam-deque"
+documentation = "https://docs.rs/crossbeam-deque"
+readme = "README.md"
+keywords = ["chase-lev", "lock-free", "scheduler", "scheduling"]
+categories = ["algorithms", "concurrency", "data-structures"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/crossbeam-rs/crossbeam-deque"
+[dependencies.crossbeam-epoch]
+version = "0.3.0"
+
+[dependencies.crossbeam-utils]
+version = "0.2.1"
+[dev-dependencies.rand]
+version = "0.4"
+[badges.travis-ci]
+repository = "crossbeam-rs/crossbeam-deque"
rename from third_party/rust/coco/LICENSE-APACHE
rename to third_party/rust/crossbeam-deque/LICENSE-APACHE
rename from third_party/rust/coco/LICENSE-MIT
rename to third_party/rust/crossbeam-deque/LICENSE-MIT
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-deque/README.md
@@ -0,0 +1,27 @@
+# Concurrent work-stealing deque
+
+[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-deque.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-deque)
+[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-deque)
+[![Cargo](https://img.shields.io/crates/v/crossbeam-deque.svg)](https://crates.io/crates/crossbeam-deque)
+[![Documentation](https://docs.rs/crossbeam-deque/badge.svg)](https://docs.rs/crossbeam-deque)
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+crossbeam-deque = "0.1"
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate crossbeam_deque;
+```
+
+## License
+
+Licensed under the terms of MIT license and the Apache License (Version 2.0).
+
+See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-deque/src/lib.rs
@@ -0,0 +1,1045 @@
+//! A concurrent work-stealing deque.
+//!
+//! The data structure can be thought of as a dynamically growable and shrinkable buffer that has
+//! two ends: bottom and top. A [`Deque`] can [`push`] elements into the bottom and [`pop`]
+//! elements from the bottom, but it can only [`steal`][Deque::steal] elements from the top.
+//!
+//! A [`Deque`] doesn't implement `Sync` so it cannot be shared among multiple threads. However, it
+//! can create [`Stealer`]s, and those can be easily cloned, shared, and sent to other threads.
+//! [`Stealer`]s can only [`steal`][Stealer::steal] elements from the top.
+//!
+//! Here's a visualization of the data structure:
+//!
+//! ```text
+//!                    top
+//!                     _
+//!    Deque::steal -> | | <- Stealer::steal
+//!                    | |
+//!                    | |
+//!                    | |
+//! Deque::push/pop -> |_|
+//!
+//!                  bottom
+//! ```
+//!
+//! # Work-stealing schedulers
+//!
+//! Usually, the data structure is used in work-stealing schedulers as follows.
+//!
+//! There is a number of threads. Each thread owns a [`Deque`] and creates a [`Stealer`] that is
+//! shared among all other threads. Alternatively, it creates multiple [`Stealer`]s - one for each
+//! of the other threads.
+//!
+//! Then, all threads are executing in a loop. In the loop, each one attempts to [`pop`] some work
+//! from its own [`Deque`]. But if it is empty, it attempts to [`steal`][Stealer::steal] work from
+//! some other thread instead. When executing work (or being idle), a thread may produce more work,
+//! which gets [`push`]ed into its [`Deque`].
+//!
+//! Of course, there are many variations of this strategy. For example, sometimes it may be
+//! beneficial for a thread to always [`steal`][Deque::steal] work from the top of its deque
+//! instead of calling [`pop`] and taking it from the bottom.
+//!
+//! # Examples
+//!
+//! ```
+//! use crossbeam_deque::{Deque, Steal};
+//! use std::thread;
+//!
+//! let d = Deque::new();
+//! let s = d.stealer();
+//!
+//! d.push('a');
+//! d.push('b');
+//! d.push('c');
+//!
+//! assert_eq!(d.pop(), Some('c'));
+//! drop(d);
+//!
+//! thread::spawn(move || {
+//!     assert_eq!(s.steal(), Steal::Data('a'));
+//!     assert_eq!(s.steal(), Steal::Data('b'));
+//! }).join().unwrap();
+//! ```
+//!
+//! # References
+//!
+//! The implementation is based on the following work:
+//!
+//! 1. [Chase and Lev. Dynamic circular work-stealing deque. SPAA 2005.][chase-lev]
+//! 2. [Le, Pop, Cohen, and Nardelli. Correct and efficient work-stealing for weak memory models.
+//!    PPoPP 2013.][weak-mem]
+//! 3. [Norris and Demsky. CDSchecker: checking concurrent data structures written with C/C++
+//!    atomics. OOPSLA 2013.][checker]
+//!
+//! [chase-lev]: https://dl.acm.org/citation.cfm?id=1073974
+//! [weak-mem]: https://dl.acm.org/citation.cfm?id=2442524
+//! [checker]: https://dl.acm.org/citation.cfm?id=2509514
+//!
+//! [`Deque`]: struct.Deque.html
+//! [`Stealer`]: struct.Stealer.html
+//! [`push`]: struct.Deque.html#method.push
+//! [`pop`]: struct.Deque.html#method.pop
+//! [Deque::steal]: struct.Deque.html#method.steal
+//! [Stealer::steal]: struct.Stealer.html#method.steal
+
+extern crate crossbeam_epoch as epoch;
+extern crate crossbeam_utils as utils;
+
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem;
+use std::ptr;
+use std::sync::Arc;
+use std::sync::atomic::{self, AtomicIsize};
+use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
+
+use epoch::{Atomic, Owned};
+use utils::cache_padded::CachePadded;
+
+/// Minimum buffer capacity for a deque.
+const DEFAULT_MIN_CAP: usize = 16;
+
+/// If a buffer of at least this size is retired, thread-local garbage is flushed so that it gets
+/// deallocated as soon as possible.
+const FLUSH_THRESHOLD_BYTES: usize = 1 << 10;
+
+/// Possible outcomes of a steal operation.
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
+pub enum Steal<T> {
+    /// The deque was empty at the time of stealing.
+    Empty,
+
+    /// Some data has been successfully stolen.
+    Data(T),
+
+    /// Lost the race for stealing data to another concurrent operation. Try again.
+    Retry,
+}
+
+/// A buffer that holds elements in a deque.
+struct Buffer<T> {
+    /// Pointer to the allocated memory.
+    ptr: *mut T,
+
+    /// Capacity of the buffer. Always a power of two.
+    cap: usize,
+}
+
+unsafe impl<T> Send for Buffer<T> {}
+
+impl<T> Buffer<T> {
+    /// Returns a new buffer with the specified capacity.
+    fn new(cap: usize) -> Self {
+        debug_assert_eq!(cap, cap.next_power_of_two());
+
+        let mut v = Vec::with_capacity(cap);
+        let ptr = v.as_mut_ptr();
+        mem::forget(v);
+
+        Buffer {
+            ptr: ptr,
+            cap: cap,
+        }
+    }
+
+    /// Returns a pointer to the element at the specified `index`.
+    unsafe fn at(&self, index: isize) -> *mut T {
+        // `self.cap` is always a power of two.
+        self.ptr.offset(index & (self.cap - 1) as isize)
+    }
+
+    /// Writes `value` into the specified `index`.
+    unsafe fn write(&self, index: isize, value: T) {
+        ptr::write(self.at(index), value)
+    }
+
+    /// Reads a value from the specified `index`.
+    unsafe fn read(&self, index: isize) -> T {
+        ptr::read(self.at(index))
+    }
+}
+
+impl<T> Drop for Buffer<T> {
+    fn drop(&mut self) {
+        unsafe {
+            drop(Vec::from_raw_parts(self.ptr, 0, self.cap));
+        }
+    }
+}
+
+/// Internal data that is shared between the deque and its stealers.
+struct Inner<T> {
+    /// The bottom index.
+    bottom: AtomicIsize,
+
+    /// The top index.
+    top: AtomicIsize,
+
+    /// The underlying buffer.
+    buffer: Atomic<Buffer<T>>,
+
+    /// Minimum capacity of the buffer. Always a power of two.
+    min_cap: usize,
+}
+
+impl<T> Inner<T> {
+    /// Returns a new `Inner` with default minimum capacity.
+    fn new() -> Self {
+        Self::with_min_capacity(DEFAULT_MIN_CAP)
+    }
+
+    /// Returns a new `Inner` with minimum capacity of `min_cap` rounded to the next power of two.
+    fn with_min_capacity(min_cap: usize) -> Self {
+        let power = min_cap.next_power_of_two();
+        assert!(power >= min_cap, "capacity too large: {}", min_cap);
+        Inner {
+            bottom: AtomicIsize::new(0),
+            top: AtomicIsize::new(0),
+            buffer: Atomic::new(Buffer::new(power)),
+            min_cap: power,
+        }
+    }
+
+    /// Resizes the internal buffer to the new capacity of `new_cap`.
+    #[cold]
+    unsafe fn resize(&self, new_cap: usize) {
+        // Load the bottom, top, and buffer.
+        let b = self.bottom.load(Relaxed);
+        let t = self.top.load(Relaxed);
+
+        let buffer = self.buffer.load(Relaxed, epoch::unprotected());
+
+        // Allocate a new buffer.
+        let new = Buffer::new(new_cap);
+
+        // Copy data from the old buffer to the new one.
+        let mut i = t;
+        while i != b {
+            ptr::copy_nonoverlapping(buffer.deref().at(i), new.at(i), 1);
+            i = i.wrapping_add(1);
+        }
+
+        let guard = &epoch::pin();
+
+        // Replace the old buffer with the new one.
+        let old = self.buffer
+            .swap(Owned::new(new).into_shared(guard), Release, guard);
+
+        // Destroy the old buffer later.
+        guard.defer(move || old.into_owned());
+
+        // If the buffer is very large, then flush the thread-local garbage in order to
+        // deallocate it as soon as possible.
+        if mem::size_of::<T>() * new_cap >= FLUSH_THRESHOLD_BYTES {
+            guard.flush();
+        }
+    }
+}
+
+impl<T> Drop for Inner<T> {
+    fn drop(&mut self) {
+        // Load the bottom, top, and buffer.
+        let b = self.bottom.load(Relaxed);
+        let t = self.top.load(Relaxed);
+
+        unsafe {
+            let buffer = self.buffer.load(Relaxed, epoch::unprotected());
+
+            // Go through the buffer from top to bottom and drop all elements in the deque.
+            let mut i = t;
+            while i != b {
+                ptr::drop_in_place(buffer.deref().at(i));
+                i = i.wrapping_add(1);
+            }
+
+            // Free the memory allocated by the buffer.
+            drop(buffer.into_owned());
+        }
+    }
+}
+
+/// A concurrent work-stealing deque.
+///
+/// A deque has two ends: bottom and top. Elements can be [`push`]ed into the bottom and [`pop`]ped
+/// from the bottom. The top end is special in that elements can only be stolen from it using the
+/// [`steal`][Deque::steal] method.
+///
+/// # Stealers
+///
+/// While [`Deque`] doesn't implement `Sync`, it can create [`Stealer`]s using the method
+/// [`stealer`][stealer], and those can be easily shared among multiple threads. [`Stealer`]s can
+/// only [`steal`][Stealer::steal] elements from the top end of the deque.
+///
+/// # Capacity
+///
+/// The data structure is dynamically grows as elements are inserted and removed from it. If the
+/// internal buffer gets full, a new one twice the size of the original is allocated. Similarly,
+/// if it is less than a quarter full, a new buffer half the size of the original is allocated.
+///
+/// In order to prevent frequent resizing (reallocations may be costly), it is possible to specify
+/// a large minimum capacity for the deque by calling [`Deque::with_min_capacity`]. This
+/// constructor will make sure that the internal buffer never shrinks below that size.
+///
+/// # Examples
+///
+/// ```
+/// use crossbeam_deque::{Deque, Steal};
+///
+/// let d = Deque::with_min_capacity(1000);
+/// let s = d.stealer();
+///
+/// d.push('a');
+/// d.push('b');
+/// d.push('c');
+///
+/// assert_eq!(d.pop(), Some('c'));
+/// assert_eq!(d.steal(), Steal::Data('a'));
+/// assert_eq!(s.steal(), Steal::Data('b'));
+/// ```
+///
+/// [`Deque`]: struct.Deque.html
+/// [`Stealer`]: struct.Stealer.html
+/// [`push`]: struct.Deque.html#method.push
+/// [`pop`]: struct.Deque.html#method.pop
+/// [stealer]: struct.Deque.html#method.stealer
+/// [`Deque::with_min_capacity`]: struct.Deque.html#method.with_min_capacity
+/// [Deque::steal]: struct.Deque.html#method.steal
+/// [Stealer::steal]: struct.Stealer.html#method.steal
+pub struct Deque<T> {
+    inner: Arc<CachePadded<Inner<T>>>,
+    _marker: PhantomData<*mut ()>, // !Send + !Sync
+}
+
+unsafe impl<T: Send> Send for Deque<T> {}
+
+impl<T> Deque<T> {
+    /// Returns a new deque.
+    ///
+    /// The internal buffer is destructed as soon as the deque and all its stealers get dropped.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::Deque;
+    ///
+    /// let d = Deque::<i32>::new();
+    /// ```
+    pub fn new() -> Deque<T> {
+        Deque {
+            inner: Arc::new(CachePadded::new(Inner::new())),
+            _marker: PhantomData,
+        }
+    }
+
+    /// Returns a new deque with the specified minimum capacity.
+    ///
+    /// If the capacity is not a power of two, it will be rounded up to the next one.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::Deque;
+    ///
+    /// // The minimum capacity will be rounded up to 1024.
+    /// let d = Deque::<i32>::with_min_capacity(1000);
+    /// ```
+    pub fn with_min_capacity(min_cap: usize) -> Deque<T> {
+        Deque {
+            inner: Arc::new(CachePadded::new(Inner::with_min_capacity(min_cap))),
+            _marker: PhantomData,
+        }
+    }
+
+    /// Returns `true` if the deque is empty.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::Deque;
+    ///
+    /// let d = Deque::new();
+    /// assert!(d.is_empty());
+    /// d.push("foo");
+    /// assert!(!d.is_empty());
+    /// ```
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Returns the number of elements in the deque.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::Deque;
+    ///
+    /// let d = Deque::new();
+    /// d.push('a');
+    /// d.push('b');
+    /// d.push('c');
+    /// assert_eq!(d.len(), 3);
+    /// ```
+    pub fn len(&self) -> usize {
+        let b = self.inner.bottom.load(Relaxed);
+        let t = self.inner.top.load(Relaxed);
+        b.wrapping_sub(t) as usize
+    }
+
+    /// Pushes an element into the bottom of the deque.
+    ///
+    /// If the internal buffer is full, a new one twice the capacity of the current one will be
+    /// allocated.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::Deque;
+    ///
+    /// let d = Deque::new();
+    /// d.push(1);
+    /// d.push(2);
+    /// ```
+    pub fn push(&self, value: T) {
+        unsafe {
+            // Load the bottom, top, and buffer. The buffer doesn't have to be epoch-protected
+            // because the current thread (the worker) is the only one that grows and shrinks it.
+            let b = self.inner.bottom.load(Relaxed);
+            let t = self.inner.top.load(Acquire);
+
+            let mut buffer = self.inner.buffer.load(Relaxed, epoch::unprotected());
+
+            // Calculate the length of the deque.
+            let len = b.wrapping_sub(t);
+
+            // Is the deque full?
+            let cap = buffer.deref().cap;
+            if len >= cap as isize {
+                // Yes. Grow the underlying buffer.
+                self.inner.resize(2 * cap);
+                buffer = self.inner.buffer.load(Relaxed, epoch::unprotected());
+            }
+
+            // Write `value` into the right slot and increment `b`.
+            buffer.deref().write(b, value);
+            atomic::fence(Release);
+            self.inner.bottom.store(b.wrapping_add(1), Relaxed);
+        }
+    }
+
+    /// Pops an element from the bottom of the deque.
+    ///
+    /// If the internal buffer is less than a quarter full, a new buffer half the capacity of the
+    /// current one will be allocated.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::Deque;
+    ///
+    /// let d = Deque::new();
+    /// d.push(1);
+    /// d.push(2);
+    ///
+    /// assert_eq!(d.pop(), Some(2));
+    /// assert_eq!(d.pop(), Some(1));
+    /// assert_eq!(d.pop(), None);
+    /// ```
+    pub fn pop(&self) -> Option<T> {
+        // Load the bottom.
+        let b = self.inner.bottom.load(Relaxed);
+
+        // If the deque is empty, return early without incurring the cost of a SeqCst fence.
+        let t = self.inner.top.load(Relaxed);
+        if b.wrapping_sub(t) <= 0 {
+            return None;
+        }
+
+        // Decrement the bottom.
+        let b = b.wrapping_sub(1);
+        self.inner.bottom.store(b, Relaxed);
+
+        // Load the buffer. The buffer doesn't have to be epoch-protected because the current
+        // thread (the worker) is the only one that grows and shrinks it.
+        let buf = unsafe { self.inner.buffer.load(Relaxed, epoch::unprotected()) };
+
+        atomic::fence(SeqCst);
+
+        // Load the top.
+        let t = self.inner.top.load(Relaxed);
+
+        // Compute the length after the bottom was decremented.
+        let len = b.wrapping_sub(t);
+
+        if len < 0 {
+            // The deque is empty. Restore the bottom back to the original value.
+            self.inner.bottom.store(b.wrapping_add(1), Relaxed);
+            None
+        } else {
+            // Read the value to be popped.
+            let mut value = unsafe { Some(buf.deref().read(b)) };
+
+            // Are we popping the last element from the deque?
+            if len == 0 {
+                // Try incrementing the top.
+                if self.inner
+                    .top
+                    .compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed)
+                    .is_err()
+                {
+                    // Failed. We didn't pop anything.
+                    mem::forget(value.take());
+                }
+
+                // Restore the bottom back to the original value.
+                self.inner.bottom.store(b.wrapping_add(1), Relaxed);
+            } else {
+                // Shrink the buffer if `len` is less than one fourth of `self.inner.min_cap`.
+                unsafe {
+                    let cap = buf.deref().cap;
+                    if cap > self.inner.min_cap && len < cap as isize / 4 {
+                        self.inner.resize(cap / 2);
+                    }
+                }
+            }
+
+            value
+        }
+    }
+
+    /// Steals an element from the top of the deque.
+    ///
+    /// Unlike most methods in concurrent data structures, if another operation gets in the way
+    /// while attempting to steal data, this method will return immediately with [`Steal::Retry`]
+    /// instead of retrying.
+    ///
+    /// If the internal buffer is less than a quarter full, a new buffer half the capacity of the
+    /// current one will be allocated.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::{Deque, Steal};
+    ///
+    /// let d = Deque::new();
+    /// d.push(1);
+    /// d.push(2);
+    ///
+    /// // Attempt to steal an element.
+    /// //
+    /// // No other threads are working with the deque, so this time we know for sure that we
+    /// // won't get `Steal::Retry` as the result.
+    /// assert_eq!(d.steal(), Steal::Data(1));
+    ///
+    /// // Attempt to steal an element, but keep retrying if we get `Retry`.
+    /// loop {
+    ///     match d.steal() {
+    ///         Steal::Empty => panic!("should steal something"),
+    ///         Steal::Data(data) => {
+    ///             assert_eq!(data, 2);
+    ///             break;
+    ///         }
+    ///         Steal::Retry => {}
+    ///     }
+    /// }
+    /// ```
+    ///
+    /// [`Steal::Retry`]: enum.Steal.html#variant.Retry
+    pub fn steal(&self) -> Steal<T> {
+        let b = self.inner.bottom.load(Relaxed);
+        let buf = unsafe { self.inner.buffer.load(Relaxed, epoch::unprotected()) };
+        let t = self.inner.top.load(Relaxed);
+        let len = b.wrapping_sub(t);
+
+        // Is the deque empty?
+        if len <= 0 {
+            return Steal::Empty;
+        }
+
+        // Try incrementing the top to steal the value.
+        if self.inner
+            .top
+            .compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed)
+            .is_ok()
+        {
+            let data = unsafe { buf.deref().read(t) };
+
+            // Shrink the buffer if `len - 1` is less than one fourth of `self.inner.min_cap`.
+            unsafe {
+                let cap = buf.deref().cap;
+                if cap > self.inner.min_cap && len <= cap as isize / 4 {
+                    self.inner.resize(cap / 2);
+                }
+            }
+
+            return Steal::Data(data);
+        }
+
+        Steal::Retry
+    }
+
+    /// Creates a stealer that can be shared with other threads.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::{Deque, Steal};
+    /// use std::thread;
+    ///
+    /// let d = Deque::new();
+    /// d.push(1);
+    /// d.push(2);
+    ///
+    /// let s = d.stealer();
+    ///
+    /// thread::spawn(move || {
+    ///     assert_eq!(s.steal(), Steal::Data(1));
+    /// }).join().unwrap();
+    /// ```
+    pub fn stealer(&self) -> Stealer<T> {
+        Stealer {
+            inner: self.inner.clone(),
+            _marker: PhantomData,
+        }
+    }
+}
+
+impl<T> fmt::Debug for Deque<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "Deque {{ ... }}")
+    }
+}
+
+impl<T> Default for Deque<T> {
+    fn default() -> Deque<T> {
+        Deque::new()
+    }
+}
+
+/// A stealer that steals elements from the top of a deque.
+///
+/// The only operation a stealer can do that manipulates the deque is [`steal`].
+///
+/// Stealers can be cloned in order to create more of them. They also implement `Send` and `Sync`
+/// so they can be easily shared among multiple threads.
+///
+/// [`steal`]: struct.Stealer.html#method.steal
+pub struct Stealer<T> {
+    inner: Arc<CachePadded<Inner<T>>>,
+    _marker: PhantomData<*mut ()>, // !Send + !Sync
+}
+
+unsafe impl<T: Send> Send for Stealer<T> {}
+unsafe impl<T: Send> Sync for Stealer<T> {}
+
+impl<T> Stealer<T> {
+    /// Returns `true` if the deque is empty.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::Deque;
+    ///
+    /// let d = Deque::new();
+    /// d.push("foo");
+    ///
+    /// let s = d.stealer();
+    /// assert!(!d.is_empty());
+    /// s.steal();
+    /// assert!(d.is_empty());
+    /// ```
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Returns the number of elements in the deque.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::Deque;
+    ///
+    /// let d = Deque::new();
+    /// let s = d.stealer();
+    /// d.push('a');
+    /// d.push('b');
+    /// d.push('c');
+    /// assert_eq!(s.len(), 3);
+    /// ```
+    pub fn len(&self) -> usize {
+        let t = self.inner.top.load(Relaxed);
+        atomic::fence(SeqCst);
+        let b = self.inner.bottom.load(Relaxed);
+        std::cmp::max(b.wrapping_sub(t), 0) as usize
+    }
+
+    /// Steals an element from the top of the deque.
+    ///
+    /// Unlike most methods in concurrent data structures, if another operation gets in the way
+    /// while attempting to steal data, this method will return immediately with [`Steal::Retry`]
+    /// instead of retrying.
+    ///
+    /// This method will not attempt to resize the internal buffer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_deque::{Deque, Steal};
+    ///
+    /// let d = Deque::new();
+    /// let s = d.stealer();
+    /// d.push(1);
+    /// d.push(2);
+    ///
+    /// // Attempt to steal an element, but keep retrying if we get `Retry`.
+    /// loop {
+    ///     match d.steal() {
+    ///         Steal::Empty => panic!("should steal something"),
+    ///         Steal::Data(data) => {
+    ///             assert_eq!(data, 1);
+    ///             break;
+    ///         }
+    ///         Steal::Retry => {}
+    ///     }
+    /// }
+    /// ```
+    ///
+    /// [`Steal::Retry`]: enum.Steal.html#variant.Retry
+    pub fn steal(&self) -> Steal<T> {
+        // Load the top.
+        let t = self.inner.top.load(Acquire);
+
+        // A SeqCst fence is needed here.
+        // If the current thread is already pinned (reentrantly), we must manually issue the fence.
+        // Otherwise, the following pinning will issue the fence anyway, so we don't have to.
+        if epoch::is_pinned() {
+            atomic::fence(SeqCst);
+        }
+
+        let guard = &epoch::pin();
+
+        // Load the bottom.
+        let b = self.inner.bottom.load(Acquire);
+
+        // Is the deque empty?
+        if b.wrapping_sub(t) <= 0 {
+            return Steal::Empty;
+        }
+
+        // Load the buffer and read the value at the top.
+        let buf = self.inner.buffer.load(Acquire, guard);
+        let value = unsafe { buf.deref().read(t) };
+
+        // Try incrementing the top to steal the value.
+        if self.inner
+            .top
+            .compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed)
+            .is_ok()
+        {
+            return Steal::Data(value);
+        }
+
+        // We didn't steal this value, forget it.
+        mem::forget(value);
+
+        Steal::Retry
+    }
+}
+
+impl<T> Clone for Stealer<T> {
+    /// Creates another stealer.
+    fn clone(&self) -> Stealer<T> {
+        Stealer {
+            inner: self.inner.clone(),
+            _marker: PhantomData,
+        }
+    }
+}
+
+impl<T> fmt::Debug for Stealer<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "Stealer {{ ... }}")
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    extern crate rand;
+
+    use std::sync::{Arc, Mutex};
+    use std::sync::atomic::{AtomicBool, AtomicUsize};
+    use std::sync::atomic::Ordering::SeqCst;
+    use std::thread;
+
+    use epoch;
+    use self::rand::Rng;
+
+    use super::{Deque, Steal};
+
+    #[test]
+    fn smoke() {
+        let d = Deque::new();
+        let s = d.stealer();
+        assert_eq!(d.pop(), None);
+        assert_eq!(s.steal(), Steal::Empty);
+        assert_eq!(d.len(), 0);
+        assert_eq!(s.len(), 0);
+
+        d.push(1);
+        assert_eq!(d.len(), 1);
+        assert_eq!(s.len(), 1);
+        assert_eq!(d.pop(), Some(1));
+        assert_eq!(d.pop(), None);
+        assert_eq!(s.steal(), Steal::Empty);
+        assert_eq!(d.len(), 0);
+        assert_eq!(s.len(), 0);
+
+        d.push(2);
+        assert_eq!(s.steal(), Steal::Data(2));
+        assert_eq!(s.steal(), Steal::Empty);
+        assert_eq!(d.pop(), None);
+
+        d.push(3);
+        d.push(4);
+        d.push(5);
+        assert_eq!(d.steal(), Steal::Data(3));
+        assert_eq!(s.steal(), Steal::Data(4));
+        assert_eq!(d.steal(), Steal::Data(5));
+        assert_eq!(d.steal(), Steal::Empty);
+    }
+
+    #[test]
+    fn steal_push() {
+        const STEPS: usize = 50_000;
+
+        let d = Deque::new();
+        let s = d.stealer();
+        let t = thread::spawn(move || for i in 0..STEPS {
+            loop {
+                if let Steal::Data(v) = s.steal() {
+                    assert_eq!(i, v);
+                    break;
+                }
+            }
+        });
+
+        for i in 0..STEPS {
+            d.push(i);
+        }
+        t.join().unwrap();
+    }
+
+    #[test]
+    fn stampede() {
+        const COUNT: usize = 50_000;
+
+        let d = Deque::new();
+
+        for i in 0..COUNT {
+            d.push(Box::new(i + 1));
+        }
+        let remaining = Arc::new(AtomicUsize::new(COUNT));
+
+        let threads = (0..8)
+            .map(|_| {
+                let s = d.stealer();
+                let remaining = remaining.clone();
+
+                thread::spawn(move || {
+                    let mut last = 0;
+                    while remaining.load(SeqCst) > 0 {
+                        if let Steal::Data(x) = s.steal() {
+                            assert!(last < *x);
+                            last = *x;
+                            remaining.fetch_sub(1, SeqCst);
+                        }
+                    }
+                })
+            })
+            .collect::<Vec<_>>();
+
+        let mut last = COUNT + 1;
+        while remaining.load(SeqCst) > 0 {
+            if let Some(x) = d.pop() {
+                assert!(last > *x);
+                last = *x;
+                remaining.fetch_sub(1, SeqCst);
+            }
+        }
+
+        for t in threads {
+            t.join().unwrap();
+        }
+    }
+
+    fn run_stress() {
+        const COUNT: usize = 50_000;
+
+        let d = Deque::new();
+        let done = Arc::new(AtomicBool::new(false));
+        let hits = Arc::new(AtomicUsize::new(0));
+
+        let threads = (0..8)
+            .map(|_| {
+                let s = d.stealer();
+                let done = done.clone();
+                let hits = hits.clone();
+
+                thread::spawn(move || while !done.load(SeqCst) {
+                    if let Steal::Data(_) = s.steal() {
+                        hits.fetch_add(1, SeqCst);
+                    }
+                })
+            })
+            .collect::<Vec<_>>();
+
+        let mut rng = rand::thread_rng();
+        let mut expected = 0;
+        while expected < COUNT {
+            if rng.gen_range(0, 3) == 0 {
+                if d.pop().is_some() {
+                    hits.fetch_add(1, SeqCst);
+                }
+            } else {
+                d.push(expected);
+                expected += 1;
+            }
+        }
+
+        while hits.load(SeqCst) < COUNT {
+            if d.pop().is_some() {
+                hits.fetch_add(1, SeqCst);
+            }
+        }
+        done.store(true, SeqCst);
+
+        for t in threads {
+            t.join().unwrap();
+        }
+    }
+
+    #[test]
+    fn stress() {
+        run_stress();
+    }
+
+    #[test]
+    fn stress_pinned() {
+        let _guard = epoch::pin();
+        run_stress();
+    }
+
+    #[test]
+    fn no_starvation() {
+        const COUNT: usize = 50_000;
+
+        let d = Deque::new();
+        let done = Arc::new(AtomicBool::new(false));
+
+        let (threads, hits): (Vec<_>, Vec<_>) = (0..8)
+            .map(|_| {
+                let s = d.stealer();
+                let done = done.clone();
+                let hits = Arc::new(AtomicUsize::new(0));
+
+                let t = {
+                    let hits = hits.clone();
+                    thread::spawn(move || while !done.load(SeqCst) {
+                        if let Steal::Data(_) = s.steal() {
+                            hits.fetch_add(1, SeqCst);
+                        }
+                    })
+                };
+
+                (t, hits)
+            })
+            .unzip();
+
+        let mut rng = rand::thread_rng();
+        let mut my_hits = 0;
+        loop {
+            for i in 0..rng.gen_range(0, COUNT) {
+                if rng.gen_range(0, 3) == 0 && my_hits == 0 {
+                    if d.pop().is_some() {
+                        my_hits += 1;
+                    }
+                } else {
+                    d.push(i);
+                }
+            }
+
+            if my_hits > 0 && hits.iter().all(|h| h.load(SeqCst) > 0) {
+                break;
+            }
+        }
+        done.store(true, SeqCst);
+
+        for t in threads {
+            t.join().unwrap();
+        }
+    }
+
+    #[test]
+    fn destructors() {
+        const COUNT: usize = 50_000;
+
+        struct Elem(usize, Arc<Mutex<Vec<usize>>>);
+
+        impl Drop for Elem {
+            fn drop(&mut self) {
+                self.1.lock().unwrap().push(self.0);
+            }
+        }
+
+        let d = Deque::new();
+
+        let dropped = Arc::new(Mutex::new(Vec::new()));
+        let remaining = Arc::new(AtomicUsize::new(COUNT));
+        for i in 0..COUNT {
+            d.push(Elem(i, dropped.clone()));
+        }
+
+        let threads = (0..8)
+            .map(|_| {
+                let s = d.stealer();
+                let remaining = remaining.clone();
+
+                thread::spawn(move || for _ in 0..1000 {
+                    if let Steal::Data(_) = s.steal() {
+                        remaining.fetch_sub(1, SeqCst);
+                    }
+                })
+            })
+            .collect::<Vec<_>>();
+
+        for _ in 0..1000 {
+            if d.pop().is_some() {
+                remaining.fetch_sub(1, SeqCst);
+            }
+        }
+
+        for t in threads {
+            t.join().unwrap();
+        }
+
+        let rem = remaining.load(SeqCst);
+        assert!(rem > 0);
+        assert_eq!(d.len(), rem);
+
+        {
+            let mut v = dropped.lock().unwrap();
+            assert_eq!(v.len(), COUNT - rem);
+            v.clear();
+        }
+
+        drop(d);
+
+        {
+            let mut v = dropped.lock().unwrap();
+            assert_eq!(v.len(), rem);
+            v.sort();
+            for pair in v.windows(2) {
+                assert_eq!(pair[0] + 1, pair[1]);
+            }
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{".travis.yml":"d84605e26d95fabc8172af7a621d3e48117b5180d389c6a166d15acb09c9ed9f","CHANGELOG.md":"5e62172f395348eb92a3fd2532ba5d65a7f13286449a3698b41f3aac7a9a4e57","Cargo.toml":"4406898417f3336a7f285eb59ca78efa43e228e64215403cd82974ce61d86d60","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"8728114db9ab19bca8e07b36f1cccd1e6a57db6ea03be08679aef2a982736532","benches/defer.rs":"b2b64a8bb684721f12432aa63ae1e2227511879567ed212c0430961805b9f543","benches/flush.rs":"3b8c6be173ea546ad7e93adff324244a1c289608403bb13cc3bd89082fe90e35","benches/pin.rs":"4165baf238bbe2267e1598695d41ea8d3a312aa613d567e4dd7f5581a0f1323c","examples/sanitize.rs":"41b2d03e2cfd46912a3722295843b841e74e10eae6eb23586d3bc3b6d0a41e32","src/atomic.rs":"469ae38d3e8b37eec79c1c21a29a63cd357e49f34f4b6cdde6817f8e1267bd8d","src/collector.rs":"ebebbf1229a0d5339b938825d0dca9dc8642f9fa5bbceafb4e371477186ed4b4","src/default.rs":"804c217df80e0b6df3c6e90c5d6f5153c153567ac28cc75cc62042ba75d24bf2","src/deferred.rs":"1bd6c66c58f92714088b6f9f811368a123143a5f03cf4afc4b19ab24f3181387","src/epoch.rs":"25b85734a4ec5bedb0384a1fe976ec97056a88910a046a270a3e38558f7dbd4b","src/garbage.rs":"b77a8f87701dca8b63d858bb234137335455b6fc1f223e73c7609542d13daa43","src/guard.rs":"08975d989ba558aba90d64865594b155b2135e628414f77bb8afb9de427a2e0d","src/internal.rs":"a5a6a52999ce99294d544ac7cb82cb820e78f0c41315fc8d7494d21ca6da1135","src/lib.rs":"f3093bc3411f2bd94d662c3cf8719411b62793449b3db1699865f4c08c207af1","src/sync/list.rs":"57c3674c40e30eaf92689ab0e09973d7d161e52a5bdb5b5481b62fd0d10fb4eb","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"868b5bd651e54216fa1827d668ab564c120779113ae7a2a056fee4371db1066c"},"package":"59796cc6cbbdc6bb319161349db0c3250ec73ec7fcb763a51065ec4e2e158552"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/.travis.yml
@@ -0,0 +1,64 @@
+language: rust
+
+rust:
+  - stable
+  - beta
+  - nightly
+  - 1.13.0
+
+addons:
+  apt:
+    sources:
+      - ubuntu-toolchain-r-test
+      - llvm-toolchain-precise
+      - llvm-toolchain-precise-3.8
+    packages:
+      - llvm-3.8
+      - llvm-3.8-dev
+      - clang-3.8
+      - clang-3.8-dev
+
+script:
+  - cargo build
+  - cargo build --release
+  - cargo build --no-default-features
+  - cargo build --release --no-default-features
+  - cargo test
+  - cargo test --release
+
+  - |
+    if [ $TRAVIS_RUST_VERSION == nightly ]; then
+      cargo build --features nightly --no-default-features
+      cargo build --features nightly --release --no-default-features
+    fi
+
+  - |
+    if [ $TRAVIS_RUST_VERSION == nightly ]; then
+      cargo test --features nightly
+    fi
+
+  - |
+    if [[ $TRAVIS_RUST_VERSION == nightly ]]; then
+      cargo test --features nightly --release
+    fi
+
+  - |
+    if [[ $TRAVIS_RUST_VERSION == nightly ]]; then
+      ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \
+      RUSTFLAGS="-Z sanitizer=address" \
+      cargo run \
+        --target x86_64-unknown-linux-gnu \
+        --features sanitize,nightly \
+        --example sanitize
+    fi
+
+  - |
+    if [[ $TRAVIS_RUST_VERSION == nightly ]]; then
+      ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \
+      RUSTFLAGS="-Z sanitizer=address" \
+      cargo run \
+        --release \
+        --target x86_64-unknown-linux-gnu \
+        --features sanitize,nightly \
+        --example sanitize
+    fi
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/CHANGELOG.md
@@ -0,0 +1,26 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [0.2.0] - 2017-11-29
+### Added
+- Add method `Owned::into_box`.
+
+### Changed
+- Fix a use-after-free bug in `Local::finalize`.
+- Fix an ordering bug in `Global::push_bag`.
+- Fix a bug in calculating distance between epochs.
+
+### Removed
+- Remove `impl<T> Into<Box<T>> for Owned<T>`.
+
+## 0.1.0 - 2017-11-26
+### Added
+- First version of the new epoch-based GC.
+
+[Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...HEAD
+[0.2.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.1.0...v0.2.0
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/Cargo.toml
@@ -0,0 +1,57 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "crossbeam-epoch"
+version = "0.3.0"
+authors = ["The Crossbeam Project Developers"]
+description = "Epoch-based garbage collection"
+homepage = "https://github.com/crossbeam-rs/crossbeam-epoch"
+documentation = "https://docs.rs/crossbeam-epoch"
+readme = "README.md"
+keywords = ["lock-free", "rcu", "atomic", "garbage"]
+categories = ["concurrency", "memory-management"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/crossbeam-rs/crossbeam-epoch"
+[dependencies.arrayvec]
+version = "0.4"
+default-features = false
+
+[dependencies.cfg-if]
+version = "0.1"
+
+[dependencies.crossbeam-utils]
+version = "0.2"
+default-features = false
+
+[dependencies.lazy_static]
+version = "0.2"
+optional = true
+
+[dependencies.memoffset]
+version = "0.2"
+
+[dependencies.nodrop]
+version = "0.1.12"
+default-features = false
+
+[dependencies.scopeguard]
+version = "0.3"
+default-features = false
+[dev-dependencies.rand]
+version = "0.3"
+
+[features]
+default = ["use_std"]
+nightly = ["arrayvec/use_union"]
+sanitize = []
+use_std = ["lazy_static", "crossbeam-utils/use_std"]
copy from third_party/rust/coco/LICENSE-APACHE
copy to third_party/rust/crossbeam-epoch/LICENSE-APACHE
copy from third_party/rust/coco/LICENSE-MIT
copy to third_party/rust/crossbeam-epoch/LICENSE-MIT
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/README.md
@@ -0,0 +1,33 @@
+# Epoch-based garbage collection
+
+[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-epoch.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-epoch)
+[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-epoch)
+[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](https://crates.io/crates/crossbeam-epoch)
+[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)](https://docs.rs/crossbeam-epoch)
+
+This crate provides epoch-based garbage collection for use in concurrent data structures.
+
+If a thread removes a node from a concurrent data structure, other threads
+may still have pointers to that node, so it cannot be immediately destructed.
+Epoch GC allows deferring destruction until it becomes safe to do so.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+crossbeam-epoch = "0.2"
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate crossbeam_epoch as epoch;
+```
+
+## License
+
+Licensed under the terms of MIT license and the Apache License (Version 2.0).
+
+See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/benches/defer.rs
@@ -0,0 +1,73 @@
+#![feature(test)]
+
+extern crate crossbeam_epoch as epoch;
+extern crate crossbeam_utils as utils;
+extern crate test;
+
+use epoch::Owned;
+use test::Bencher;
+use utils::scoped::scope;
+
+#[bench]
+fn single_alloc_defer_free(b: &mut Bencher) {
+    b.iter(|| {
+        let guard = &epoch::pin();
+        let p = Owned::new(1).into_shared(guard);
+        unsafe {
+            guard.defer(move || p.into_owned());
+        }
+    });
+}
+
+#[bench]
+fn single_defer(b: &mut Bencher) {
+    b.iter(|| {
+        let guard = &epoch::pin();
+        unsafe {
+            guard.defer(move || ());
+        }
+    });
+}
+
+#[bench]
+fn multi_alloc_defer_free(b: &mut Bencher) {
+    const THREADS: usize = 16;
+    const STEPS: usize = 10_000;
+
+    b.iter(|| {
+        scope(|s| {
+            for _ in 0..THREADS {
+                s.spawn(|| {
+                    for _ in 0..STEPS {
+                        let guard = &epoch::pin();
+                        let p = Owned::new(1).into_shared(guard);
+                        unsafe {
+                            guard.defer(move || p.into_owned());
+                        }
+                    }
+                });
+            }
+        });
+    });
+}
+
+#[bench]
+fn multi_defer(b: &mut Bencher) {
+    const THREADS: usize = 16;
+    const STEPS: usize = 10_000;
+
+    b.iter(|| {
+        scope(|s| {
+            for _ in 0..THREADS {
+                s.spawn(|| {
+                    for _ in 0..STEPS {
+                        let guard = &epoch::pin();
+                        unsafe {
+                            guard.defer(move || ());
+                        }
+                    }
+                });
+            }
+        });
+    });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/benches/flush.rs
@@ -0,0 +1,51 @@
+#![feature(test)]
+
+extern crate crossbeam_epoch as epoch;
+extern crate crossbeam_utils as utils;
+extern crate test;
+
+use std::sync::Barrier;
+
+use test::Bencher;
+use utils::scoped::scope;
+
+#[bench]
+fn single_flush(b: &mut Bencher) {
+    const THREADS: usize = 16;
+
+    let start = Barrier::new(THREADS + 1);
+    let end = Barrier::new(THREADS + 1);
+
+    scope(|s| {
+        for _ in 0..THREADS {
+            s.spawn(|| {
+                epoch::pin();
+                start.wait();
+                end.wait();
+            });
+        }
+
+        start.wait();
+        b.iter(|| epoch::pin().flush());
+        end.wait();
+    });
+}
+
+#[bench]
+fn multi_flush(b: &mut Bencher) {
+    const THREADS: usize = 16;
+    const STEPS: usize = 10_000;
+
+    b.iter(|| {
+        scope(|s| {
+            for _ in 0..THREADS {
+                s.spawn(|| {
+                    for _ in 0..STEPS {
+                        let guard = &epoch::pin();
+                        guard.flush();
+                    }
+                });
+            }
+        });
+    });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/benches/pin.rs
@@ -0,0 +1,36 @@
+#![feature(test)]
+
+extern crate crossbeam_epoch as epoch;
+extern crate crossbeam_utils as utils;
+extern crate test;
+
+use test::Bencher;
+use utils::scoped::scope;
+
+#[bench]
+fn single_pin(b: &mut Bencher) {
+    b.iter(|| epoch::pin());
+}
+
+#[bench]
+fn single_default_handle_pin(b: &mut Bencher) {
+    b.iter(|| epoch::default_handle().pin());
+}
+
+#[bench]
+fn multi_pin(b: &mut Bencher) {
+    const THREADS: usize = 16;
+    const STEPS: usize = 100_000;
+
+    b.iter(|| {
+        scope(|s| {
+            for _ in 0..THREADS {
+                s.spawn(|| {
+                    for _ in 0..STEPS {
+                        epoch::pin();
+                    }
+                });
+            }
+        });
+    });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/examples/sanitize.rs
@@ -0,0 +1,70 @@
+extern crate crossbeam_epoch as epoch;
+extern crate rand;
+
+use std::sync::Arc;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed};
+use std::time::{Duration, Instant};
+use std::thread;
+
+use epoch::{Atomic, Collector, Handle, Owned, Shared};
+use rand::Rng;
+
+fn worker(a: Arc<Atomic<AtomicUsize>>, handle: Handle) -> usize {
+    let mut rng = rand::thread_rng();
+    let mut sum = 0;
+
+    if rng.gen() {
+        thread::sleep(Duration::from_millis(1));
+    }
+    let timeout = Duration::from_millis(rng.gen_range(0, 10));
+    let now = Instant::now();
+
+    while now.elapsed() < timeout {
+        for _ in 0..100 {
+            let guard = &handle.pin();
+            guard.flush();
+
+            let val = if rng.gen() {
+                let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard);
+                unsafe {
+                    guard.defer(move || p.into_owned());
+                    guard.flush();
+                    p.deref().load(Relaxed)
+                }
+            } else {
+                let p = a.load(Acquire, guard);
+                unsafe {
+                    p.deref().fetch_add(sum, Relaxed)
+                }
+            };
+
+            sum = sum.wrapping_add(val);
+        }
+    }
+
+    sum
+}
+
+fn main() {
+    for _ in 0..100 {
+        let collector = Collector::new();
+        let a = Arc::new(Atomic::new(AtomicUsize::new(777)));
+
+        let threads = (0..16)
+            .map(|_| {
+                let a = a.clone();
+                let h = collector.handle();
+                thread::spawn(move || worker(a, h))
+            })
+            .collect::<Vec<_>>();
+
+        for t in threads {
+            t.join().unwrap();
+        }
+
+        unsafe {
+            a.swap(Shared::null(), AcqRel, epoch::unprotected()).into_owned();
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/atomic.rs
@@ -0,0 +1,1097 @@
+use core::borrow::{Borrow, BorrowMut};
+use core::cmp;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ptr;
+use core::ops::{Deref, DerefMut};
+use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
+use core::sync::atomic::Ordering;
+use alloc::boxed::Box;
+
+use guard::Guard;
+
+/// Given ordering for the success case in a compare-exchange operation, returns the strongest
+/// appropriate ordering for the failure case.
+#[inline]
+fn strongest_failure_ordering(ord: Ordering) -> Ordering {
+    match ord {
+        Ordering::Relaxed | Ordering::Release => Ordering::Relaxed,
+        Ordering::Acquire | Ordering::AcqRel => Ordering::Acquire,
+        _ => Ordering::SeqCst,
+    }
+}
+
+/// The error returned on failed compare-and-set operation.
+pub struct CompareAndSetError<'g, T: 'g, P: Pointer<T>> {
+    /// The value in the atomic pointer at the time of the failed operation.
+    pub current: Shared<'g, T>,
+
+    /// The new value, which the operation failed to store.
+    pub new: P,
+}
+
+impl<'g, T: 'g, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("CompareAndSetError")
+            .field("current", &self.current)
+            .field("new", &self.new)
+            .finish()
+    }
+}
+
+/// Memory orderings for compare-and-set operations.
+///
+/// A compare-and-set operation can have different memory orderings depending on whether it
+/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
+///
+/// The two ways of specifying orderings for compare-and-set are:
+///
+/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
+///    ordering is chosen.
+/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
+///    for the failure case.
+pub trait CompareAndSetOrdering {
+    /// The ordering of the operation when it succeeds.
+    fn success(&self) -> Ordering;
+
+    /// The ordering of the operation when it fails.
+    ///
+    /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
+    /// the success ordering.
+    fn failure(&self) -> Ordering;
+}
+
+impl CompareAndSetOrdering for Ordering {
+    #[inline]
+    fn success(&self) -> Ordering {
+        *self
+    }
+
+    #[inline]
+    fn failure(&self) -> Ordering {
+        strongest_failure_ordering(*self)
+    }
+}
+
+impl CompareAndSetOrdering for (Ordering, Ordering) {
+    #[inline]
+    fn success(&self) -> Ordering {
+        self.0
+    }
+
+    #[inline]
+    fn failure(&self) -> Ordering {
+        self.1
+    }
+}
+
+/// Panics if the pointer is not properly unaligned.
+#[inline]
+fn ensure_aligned<T>(raw: *const T) {
+    assert_eq!(raw as usize & low_bits::<T>(), 0, "unaligned pointer");
+}
+
+/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
+#[inline]
+fn low_bits<T>() -> usize {
+    (1 << mem::align_of::<T>().trailing_zeros()) - 1
+}
+
+/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
+///
+/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
+#[inline]
+fn data_with_tag<T>(data: usize, tag: usize) -> usize {
+    (data & !low_bits::<T>()) | (tag & low_bits::<T>())
+}
+
+/// Decomposes a tagged pointer `data` into the pointer and the tag.
+#[inline]
+fn decompose_data<T>(data: usize) -> (*mut T, usize) {
+    let raw = (data & !low_bits::<T>()) as *mut T;
+    let tag = data & low_bits::<T>();
+    (raw, tag)
+}
+
+/// An atomic pointer that can be safely shared between threads.
+///
+/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
+/// least significant bits of the address.  More precisely, a tag should be less than `(1 <<
+/// mem::align_of::<T>().trailing_zeros())`.
+///
+/// Any method that loads the pointer must be passed a reference to a [`Guard`].
+///
+/// [`Guard`]: struct.Guard.html
+pub struct Atomic<T> {
+    data: AtomicUsize,
+    _marker: PhantomData<*mut T>,
+}
+
+unsafe impl<T: Send + Sync> Send for Atomic<T> {}
+unsafe impl<T: Send + Sync> Sync for Atomic<T> {}
+
+impl<T> Atomic<T> {
+    /// Returns a new atomic pointer pointing to the tagged pointer `data`.
+    fn from_data(data: usize) -> Atomic<T> {
+        Atomic {
+            data: AtomicUsize::new(data),
+            _marker: PhantomData,
+        }
+    }
+
+    /// Returns a new null atomic pointer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Atomic;
+    ///
+    /// let a = Atomic::<i32>::null();
+    /// ```
+    #[cfg(not(feature = "nightly"))]
+    pub fn null() -> Atomic<T> {
+        Atomic {
+            data: ATOMIC_USIZE_INIT,
+            _marker: PhantomData,
+        }
+    }
+
+    /// Returns a new null atomic pointer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Atomic;
+    ///
+    /// let a = Atomic::<i32>::null();
+    /// ```
+    #[cfg(feature = "nightly")]
+    pub const fn null() -> Atomic<T> {
+        Self {
+            data: ATOMIC_USIZE_INIT,
+            _marker: PhantomData,
+        }
+    }
+
+    /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Atomic;
+    ///
+    /// let a = Atomic::new(1234);
+    /// ```
+    pub fn new(value: T) -> Atomic<T> {
+        Self::from(Owned::new(value))
+    }
+
+    /// Loads a `Shared` from the atomic pointer.
+    ///
+    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+    /// operation.
+    ///
+    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    /// let guard = &epoch::pin();
+    /// let p = a.load(SeqCst, guard);
+    /// ```
+    pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+        unsafe { Shared::from_data(self.data.load(ord)) }
+    }
+
+    /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
+    ///
+    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+    /// operation.
+    ///
+    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    /// a.store(Shared::null(), SeqCst);
+    /// a.store(Owned::new(1234), SeqCst);
+    /// ```
+    pub fn store<'g, P: Pointer<T>>(&self, new: P, ord: Ordering) {
+        self.data.store(new.into_data(), ord);
+    }
+
+    /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
+    /// `Shared`.
+    ///
+    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+    /// operation.
+    ///
+    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    /// let guard = &epoch::pin();
+    /// let p = a.swap(Shared::null(), SeqCst, guard);
+    /// ```
+    pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+        unsafe { Shared::from_data(self.data.swap(new.into_data(), ord)) }
+    }
+
+    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
+    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
+    /// same object, but with different tags, will not be considered equal.
+    ///
+    /// The return value is a result indicating whether the new pointer was written. On success the
+    /// pointer that was written is returned. On failure the actual current value and `new` are
+    /// returned.
+    ///
+    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
+    /// ordering of this operation.
+    ///
+    /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    ///
+    /// let guard = &epoch::pin();
+    /// let mut curr = a.load(SeqCst, guard);
+    /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
+    /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
+    /// ```
+    pub fn compare_and_set<'g, O, P>(
+        &self,
+        current: Shared<T>,
+        new: P,
+        ord: O,
+        _: &'g Guard,
+    ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
+    where
+        O: CompareAndSetOrdering,
+        P: Pointer<T>,
+    {
+        let new = new.into_data();
+        self.data
+            .compare_exchange(current.into_data(), new, ord.success(), ord.failure())
+            .map(|_| unsafe { Shared::from_data(new) })
+            .map_err(|current| unsafe {
+                CompareAndSetError {
+                    current: Shared::from_data(current),
+                    new: P::from_data(new),
+                }
+            })
+    }
+
+    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
+    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
+    /// same object, but with different tags, will not be considered equal.
+    ///
+    /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
+    /// succeeds, which can result in more efficient code on some platforms.  The return value is a
+    /// result indicating whether the new pointer was written. On success the pointer that was
+    /// written is returned. On failure the actual current value and `new` are returned.
+    ///
+    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
+    /// ordering of this operation.
+    ///
+    /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set
+    /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    /// let guard = &epoch::pin();
+    ///
+    /// let mut new = Owned::new(5678);
+    /// let mut ptr = a.load(SeqCst, guard);
+    /// loop {
+    ///     match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
+    ///         Ok(p) => {
+    ///             ptr = p;
+    ///             break;
+    ///         }
+    ///         Err(err) => {
+    ///             ptr = err.current;
+    ///             new = err.new;
+    ///         }
+    ///     }
+    /// }
+    ///
+    /// let mut curr = a.load(SeqCst, guard);
+    /// loop {
+    ///     match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
+    ///         Ok(_) => break,
+    ///         Err(err) => curr = err.current,
+    ///     }
+    /// }
+    /// ```
+    pub fn compare_and_set_weak<'g, O, P>(
+        &self,
+        current: Shared<T>,
+        new: P,
+        ord: O,
+        _: &'g Guard,
+    ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
+    where
+        O: CompareAndSetOrdering,
+        P: Pointer<T>,
+    {
+        let new = new.into_data();
+        self.data
+            .compare_exchange_weak(current.into_data(), new, ord.success(), ord.failure())
+            .map(|_| unsafe { Shared::from_data(new) })
+            .map_err(|current| unsafe {
+                CompareAndSetError {
+                    current: Shared::from_data(current),
+                    new: P::from_data(new),
+                }
+            })
+    }
+
+    /// Bitwise "and" with the current tag.
+    ///
+    /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
+    /// new tag to the result. Returns the previous pointer.
+    ///
+    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+    /// operation.
+    ///
+    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
+    /// let guard = &epoch::pin();
+    /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
+    /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
+    /// ```
+    pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+        unsafe { Shared::from_data(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
+    }
+
+    /// Bitwise "or" with the current tag.
+    ///
+    /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
+    /// new tag to the result. Returns the previous pointer.
+    ///
+    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+    /// operation.
+    ///
+    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
+    /// let guard = &epoch::pin();
+    /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
+    /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
+    /// ```
+    pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+        unsafe { Shared::from_data(self.data.fetch_or(val & low_bits::<T>(), ord)) }
+    }
+
+    /// Bitwise "xor" with the current tag.
+    ///
+    /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
+    /// new tag to the result. Returns the previous pointer.
+    ///
+    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+    /// operation.
+    ///
+    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
+    /// let guard = &epoch::pin();
+    /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
+    /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
+    /// ```
+    pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+        unsafe { Shared::from_data(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
+    }
+}
+
+impl<T> fmt::Debug for Atomic<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let data = self.data.load(Ordering::SeqCst);
+        let (raw, tag) = decompose_data::<T>(data);
+
+        f.debug_struct("Atomic")
+            .field("raw", &raw)
+            .field("tag", &tag)
+            .finish()
+    }
+}
+
+impl<T> fmt::Pointer for Atomic<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let data = self.data.load(Ordering::SeqCst);
+        let (raw, _) = decompose_data::<T>(data);
+        fmt::Pointer::fmt(&raw, f)
+    }
+}
+
+impl<T> Clone for Atomic<T> {
+    /// Returns a copy of the atomic value.
+    ///
+    /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
+    /// atomics or fences.
+    fn clone(&self) -> Self {
+        let data = self.data.load(Ordering::Relaxed);
+        Atomic::from_data(data)
+    }
+}
+
+impl<T> Default for Atomic<T> {
+    fn default() -> Self {
+        Atomic::null()
+    }
+}
+
+impl<T> From<Owned<T>> for Atomic<T> {
+    /// Returns a new atomic pointer pointing to `owned`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{Atomic, Owned};
+    ///
+    /// let a = Atomic::<i32>::from(Owned::new(1234));
+    /// ```
+    fn from(owned: Owned<T>) -> Self {
+        let data = owned.data;
+        mem::forget(owned);
+        Self::from_data(data)
+    }
+}
+
+impl<T> From<Box<T>> for Atomic<T> {
+    fn from(b: Box<T>) -> Self {
+        Self::from(Owned::from(b))
+    }
+}
+
+impl<T> From<T> for Atomic<T> {
+    fn from(t: T) -> Self {
+        Self::new(t)
+    }
+}
+
+impl<'g, T> From<Shared<'g, T>> for Atomic<T> {
+    /// Returns a new atomic pointer pointing to `ptr`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{Atomic, Shared};
+    ///
+    /// let a = Atomic::<i32>::from(Shared::<i32>::null());
+    /// ```
+    fn from(ptr: Shared<'g, T>) -> Self {
+        Self::from_data(ptr.data)
+    }
+}
+
+impl<T> From<*const T> for Atomic<T> {
+    /// Returns a new atomic pointer pointing to `raw`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::ptr;
+    /// use crossbeam_epoch::Atomic;
+    ///
+    /// let a = Atomic::<i32>::from(ptr::null::<i32>());
+    /// ```
+    fn from(raw: *const T) -> Self {
+        Self::from_data(raw as usize)
+    }
+}
+
+/// A trait for either `Owned` or `Shared` pointers.
+pub trait Pointer<T> {
+    /// Returns the machine representation of the pointer.
+    fn into_data(self) -> usize;
+
+    /// Returns a new pointer pointing to the tagged pointer `data`.
+    unsafe fn from_data(data: usize) -> Self;
+}
+
+/// An owned heap-allocated object.
+///
+/// This type is very similar to `Box<T>`.
+///
+/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
+/// least significant bits of the address.
+pub struct Owned<T> {
+    data: usize,
+    _marker: PhantomData<Box<T>>,
+}
+
+impl<T> Pointer<T> for Owned<T> {
+    #[inline]
+    fn into_data(self) -> usize {
+        let data = self.data;
+        mem::forget(self);
+        data
+    }
+
+    /// Returns a new pointer pointing to the tagged pointer `data`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the data is zero in debug mode.
+    #[inline]
+    unsafe fn from_data(data: usize) -> Self {
+        debug_assert!(data != 0, "converting zero into `Owned`");
+        Owned {
+            data: data,
+            _marker: PhantomData,
+        }
+    }
+}
+
+impl<T> Owned<T> {
+    /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Owned;
+    ///
+    /// let o = Owned::new(1234);
+    /// ```
+    pub fn new(value: T) -> Owned<T> {
+        Self::from(Box::new(value))
+    }
+
+    /// Returns a new owned pointer pointing to `raw`.
+    ///
+    /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
+    /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
+    /// the same raw pointer.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `raw` is not properly aligned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Owned;
+    ///
+    /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
+    /// ```
+    pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
+        ensure_aligned(raw);
+        Self::from_data(raw as usize)
+    }
+
+    /// Converts the owned pointer into a [`Shared`].
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Owned};
+    ///
+    /// let o = Owned::new(1234);
+    /// let guard = &epoch::pin();
+    /// let p = o.into_shared(guard);
+    /// ```
+    ///
+    /// [`Shared`]: struct.Shared.html
+    pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
+        unsafe { Shared::from_data(self.into_data()) }
+    }
+
+    /// Converts the owned pointer into a `Box`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Owned};
+    ///
+    /// let o = Owned::new(1234);
+    /// let b: Box<i32> = o.into_box();
+    /// assert_eq!(*b, 1234);
+    /// ```
+    pub fn into_box(self) -> Box<T> {
+        let (raw, _) = decompose_data::<T>(self.data);
+        mem::forget(self);
+        unsafe { Box::from_raw(raw) }
+    }
+
+    /// Returns the tag stored within the pointer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Owned;
+    ///
+    /// assert_eq!(Owned::new(1234).tag(), 0);
+    /// ```
+    pub fn tag(&self) -> usize {
+        let (_, tag) = decompose_data::<T>(self.data);
+        tag
+    }
+
+    /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
+    /// unused bits of the pointer to `T`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Owned;
+    ///
+    /// let o = Owned::new(0u64);
+    /// assert_eq!(o.tag(), 0);
+    /// let o = o.with_tag(5);
+    /// assert_eq!(o.tag(), 5);
+    /// ```
+    pub fn with_tag(self, tag: usize) -> Owned<T> {
+        let data = self.into_data();
+        unsafe { Self::from_data(data_with_tag::<T>(data, tag)) }
+    }
+}
+
+impl<T> Drop for Owned<T> {
+    fn drop(&mut self) {
+        let (raw, _) = decompose_data::<T>(self.data);
+        unsafe {
+            drop(Box::from_raw(raw));
+        }
+    }
+}
+
+impl<T> fmt::Debug for Owned<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let (raw, tag) = decompose_data::<T>(self.data);
+
+        f.debug_struct("Owned")
+            .field("raw", &raw)
+            .field("tag", &tag)
+            .finish()
+    }
+}
+
+impl<T: Clone> Clone for Owned<T> {
+    fn clone(&self) -> Self {
+        Owned::new((**self).clone()).with_tag(self.tag())
+    }
+}
+
+impl<T> Deref for Owned<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        let (raw, _) = decompose_data::<T>(self.data);
+        unsafe { &*raw }
+    }
+}
+
+impl<T> DerefMut for Owned<T> {
+    fn deref_mut(&mut self) -> &mut T {
+        let (raw, _) = decompose_data::<T>(self.data);
+        unsafe { &mut *raw }
+    }
+}
+
+impl<T> From<T> for Owned<T> {
+    fn from(t: T) -> Self {
+        Owned::new(t)
+    }
+}
+
+impl<T> From<Box<T>> for Owned<T> {
+    /// Returns a new owned pointer pointing to `b`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the pointer (the `Box`) is not properly aligned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Owned;
+    ///
+    /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
+    /// ```
+    fn from(b: Box<T>) -> Self {
+        unsafe { Self::from_raw(Box::into_raw(b)) }
+    }
+}
+
+impl<T> Borrow<T> for Owned<T> {
+    fn borrow(&self) -> &T {
+        &**self
+    }
+}
+
+impl<T> BorrowMut<T> for Owned<T> {
+    fn borrow_mut(&mut self) -> &mut T {
+        &mut **self
+    }
+}
+
+impl<T> AsRef<T> for Owned<T> {
+    fn as_ref(&self) -> &T {
+        &**self
+    }
+}
+
+impl<T> AsMut<T> for Owned<T> {
+    fn as_mut(&mut self) -> &mut T {
+        &mut **self
+    }
+}
+
+/// A pointer to an object protected by the epoch GC.
+///
+/// The pointer is valid for use only during the lifetime `'g`.
+///
+/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
+/// least significant bits of the address.
+pub struct Shared<'g, T: 'g> {
+    data: usize,
+    _marker: PhantomData<(&'g (), *const T)>,
+}
+
+impl<'g, T> Clone for Shared<'g, T> {
+    fn clone(&self) -> Self {
+        Shared {
+            data: self.data,
+            _marker: PhantomData,
+        }
+    }
+}
+
+impl<'g, T> Copy for Shared<'g, T> {}
+
+impl<'g, T> Pointer<T> for Shared<'g, T> {
+    #[inline]
+    fn into_data(self) -> usize {
+        self.data
+    }
+
+    #[inline]
+    unsafe fn from_data(data: usize) -> Self {
+        Shared {
+            data: data,
+            _marker: PhantomData,
+        }
+    }
+}
+
+impl<'g, T> Shared<'g, T> {
+    /// Returns a new null pointer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Shared;
+    ///
+    /// let p = Shared::<i32>::null();
+    /// assert!(p.is_null());
+    /// ```
+    pub fn null() -> Shared<'g, T> {
+        Shared {
+            data: 0,
+            _marker: PhantomData,
+        }
+    }
+
+    /// Returns `true` if the pointer is null.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::null();
+    /// let guard = &epoch::pin();
+    /// assert!(a.load(SeqCst, guard).is_null());
+    /// a.store(Owned::new(1234), SeqCst);
+    /// assert!(!a.load(SeqCst, guard).is_null());
+    /// ```
+    pub fn is_null(&self) -> bool {
+        self.as_raw().is_null()
+    }
+
+    /// Converts the pointer to a raw pointer (without the tag).
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let o = Owned::new(1234);
+    /// let raw = &*o as *const _;
+    /// let a = Atomic::from(o);
+    ///
+    /// let guard = &epoch::pin();
+    /// let p = a.load(SeqCst, guard);
+    /// assert_eq!(p.as_raw(), raw);
+    /// ```
+    pub fn as_raw(&self) -> *const T {
+        let (raw, _) = decompose_data::<T>(self.data);
+        raw
+    }
+
+    /// Dereferences the pointer.
+    ///
+    /// Returns a reference to the pointee that is valid during the lifetime `'g`.
+    ///
+    /// # Safety
+    ///
+    /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
+    ///
+    /// Another concern is the possiblity of data races due to lack of proper synchronization.
+    /// For example, consider the following scenario:
+    ///
+    /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
+    /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
+    ///
+    /// The problem is that relaxed orderings don't synchronize initialization of the object with
+    /// the read from the second thread. This is a data race. A possible solution would be to use
+    /// `Release` and `Acquire` orderings.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    /// let guard = &epoch::pin();
+    /// let p = a.load(SeqCst, guard);
+    /// unsafe {
+    ///     assert_eq!(p.deref(), &1234);
+    /// }
+    /// ```
+    pub unsafe fn deref(&self) -> &'g T {
+        &*self.as_raw()
+    }
+
+    /// Converts the pointer to a reference.
+    ///
+    /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
+    ///
+    /// # Safety
+    ///
+    /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
+    ///
+    /// Another concern is the possiblity of data races due to lack of proper synchronization.
+    /// For example, consider the following scenario:
+    ///
+    /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
+    /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
+    ///
+    /// The problem is that relaxed orderings don't synchronize initialization of the object with
+    /// the read from the second thread. This is a data race. A possible solution would be to use
+    /// `Release` and `Acquire` orderings.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    /// let guard = &epoch::pin();
+    /// let p = a.load(SeqCst, guard);
+    /// unsafe {
+    ///     assert_eq!(p.as_ref(), Some(&1234));
+    /// }
+    /// ```
+    pub unsafe fn as_ref(&self) -> Option<&'g T> {
+        self.as_raw().as_ref()
+    }
+
+    /// Takes ownership of the pointee.
+    ///
+    /// # Panics
+    ///
+    /// Panics if this pointer is null, but only in debug mode.
+    ///
+    /// # Safety
+    ///
+    /// This method may be called only if the pointer is valid and nobody else is holding a
+    /// reference to the same object.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    /// unsafe {
+    ///     let guard = &epoch::unprotected();
+    ///     let p = a.load(SeqCst, guard);
+    ///     drop(p.into_owned());
+    /// }
+    /// ```
+    pub unsafe fn into_owned(self) -> Owned<T> {
+        debug_assert!(
+            self.as_raw() != ptr::null(),
+            "converting a null `Shared` into `Owned`"
+        );
+        Owned::from_data(self.data)
+    }
+
+    /// Returns the tag stored within the pointer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(5));
+    /// let guard = &epoch::pin();
+    /// let p = a.load(SeqCst, guard);
+    /// assert_eq!(p.tag(), 5);
+    /// ```
+    pub fn tag(&self) -> usize {
+        let (_, tag) = decompose_data::<T>(self.data);
+        tag
+    }
+
+    /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
+    /// unused bits of the pointer to `T`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(0u64);
+    /// let guard = &epoch::pin();
+    /// let p1 = a.load(SeqCst, guard);
+    /// let p2 = p1.with_tag(5);
+    ///
+    /// assert_eq!(p1.tag(), 0);
+    /// assert_eq!(p2.tag(), 5);
+    /// assert_eq!(p1.as_raw(), p2.as_raw());
+    /// ```
+    pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
+        unsafe { Self::from_data(data_with_tag::<T>(self.data, tag)) }
+    }
+}
+
+impl<'g, T> From<*const T> for Shared<'g, T> {
+    /// Returns a new pointer pointing to `raw`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `raw` is not properly aligned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::Shared;
+    ///
+    /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) };
+    /// assert!(!p.is_null());
+    /// ```
+    fn from(raw: *const T) -> Self {
+        ensure_aligned(raw);
+        unsafe { Self::from_data(raw as usize) }
+    }
+}
+
+impl<'g, T> PartialEq<Shared<'g, T>> for Shared<'g, T> {
+    fn eq(&self, other: &Self) -> bool {
+        self.data == other.data
+    }
+}
+
+impl<'g, T> Eq for Shared<'g, T> {}
+
+impl<'g, T> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
+    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+        self.data.partial_cmp(&other.data)
+    }
+}
+
+impl<'g, T> Ord for Shared<'g, T> {
+    fn cmp(&self, other: &Self) -> cmp::Ordering {
+        self.data.cmp(&other.data)
+    }
+}
+
+impl<'g, T> fmt::Debug for Shared<'g, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let (raw, tag) = decompose_data::<T>(self.data);
+
+        f.debug_struct("Shared")
+            .field("raw", &raw)
+            .field("tag", &tag)
+            .finish()
+    }
+}
+
+impl<'g, T> fmt::Pointer for Shared<'g, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Pointer::fmt(&self.as_raw(), f)
+    }
+}
+
+impl<'g, T> Default for Shared<'g, T> {
+    fn default() -> Self {
+        Shared::null()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::Shared;
+
+    #[test]
+    fn valid_tag_i8() {
+        Shared::<i8>::null().with_tag(0);
+    }
+
+    #[test]
+    fn valid_tag_i64() {
+        Shared::<i64>::null().with_tag(7);
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/collector.rs
@@ -0,0 +1,426 @@
+/// Epoch-based garbage collector.
+///
+/// # Examples
+///
+/// ```
+/// use crossbeam_epoch::Collector;
+///
+/// let collector = Collector::new();
+///
+/// let handle = collector.handle();
+/// drop(collector); // `handle` still works after dropping `collector`
+///
+/// handle.pin().flush();
+/// ```
+
+use alloc::arc::Arc;
+
+use internal::{Global, Local};
+use guard::Guard;
+
+/// An epoch-based garbage collector.
+pub struct Collector {
+    global: Arc<Global>,
+}
+
+unsafe impl Send for Collector {}
+unsafe impl Sync for Collector {}
+
+impl Collector {
+    /// Creates a new collector.
+    pub fn new() -> Self {
+        Collector { global: Arc::new(Global::new()) }
+    }
+
+    /// Creates a new handle for the collector.
+    pub fn handle(&self) -> Handle {
+        Handle { local: Local::register(&self.global) }
+    }
+}
+
+impl Clone for Collector {
+    /// Creates another reference to the same garbage collector.
+    fn clone(&self) -> Self {
+        Collector { global: self.global.clone() }
+    }
+}
+
+/// A handle to a garbage collector.
+pub struct Handle {
+    local: *const Local,
+}
+
+impl Handle {
+    /// Pins the handle.
+    #[inline]
+    pub fn pin(&self) -> Guard {
+        unsafe { (*self.local).pin() }
+    }
+
+    /// Returns `true` if the handle is pinned.
+    #[inline]
+    pub fn is_pinned(&self) -> bool {
+        unsafe { (*self.local).is_pinned() }
+    }
+}
+
+unsafe impl Send for Handle {}
+
+impl Drop for Handle {
+    #[inline]
+    fn drop(&mut self) {
+        unsafe {
+            Local::release_handle(&*self.local);
+        }
+    }
+}
+
+impl Clone for Handle {
+    #[inline]
+    fn clone(&self) -> Self {
+        unsafe {
+            Local::acquire_handle(&*self.local);
+        }
+        Handle { local: self.local }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::mem;
+    use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
+    use std::sync::atomic::Ordering;
+
+    use crossbeam_utils::scoped;
+
+    use {Collector, Owned};
+
+    const NUM_THREADS: usize = 8;
+
+    #[test]
+    fn pin_reentrant() {
+        let collector = Collector::new();
+        let handle = collector.handle();
+        drop(collector);
+
+        assert!(!handle.is_pinned());
+        {
+            let _guard = &handle.pin();
+            assert!(handle.is_pinned());
+            {
+                let _guard = &handle.pin();
+                assert!(handle.is_pinned());
+            }
+            assert!(handle.is_pinned());
+        }
+        assert!(!handle.is_pinned());
+    }
+
+    #[test]
+    fn flush_local_bag() {
+        let collector = Collector::new();
+        let handle = collector.handle();
+        drop(collector);
+
+        for _ in 0..100 {
+            let guard = &handle.pin();
+            unsafe {
+                let a = Owned::new(7).into_shared(guard);
+                guard.defer(move || a.into_owned());
+
+                assert!(!(*guard.get_local()).is_bag_empty());
+
+                while !(*guard.get_local()).is_bag_empty() {
+                    guard.flush();
+                }
+            }
+        }
+    }
+
+    #[test]
+    fn garbage_buffering() {
+        let collector = Collector::new();
+        let handle = collector.handle();
+        drop(collector);
+
+        let guard = &handle.pin();
+        unsafe {
+            for _ in 0..10 {
+                let a = Owned::new(7).into_shared(guard);
+                guard.defer(move || a.into_owned());
+            }
+            assert!(!(*guard.get_local()).is_bag_empty());
+        }
+    }
+
+    #[test]
+    fn pin_holds_advance() {
+        let collector = Collector::new();
+
+        let threads = (0..NUM_THREADS)
+            .map(|_| {
+                scoped::scope(|scope| {
+                    scope.spawn(|| {
+                        let handle = collector.handle();
+                        for _ in 0..500_000 {
+                            let guard = &handle.pin();
+
+                            let before = collector.global.load_epoch(Ordering::Relaxed);
+                            collector.global.collect(guard);
+                            let after = collector.global.load_epoch(Ordering::Relaxed);
+
+                            assert!(after.wrapping_sub(before) <= 2);
+                        }
+                    })
+                })
+            })
+            .collect::<Vec<_>>();
+        drop(collector);
+
+        for t in threads {
+            t.join();
+        }
+    }
+
+    #[test]
+    fn incremental() {
+        const COUNT: usize = 100_000;
+        static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+        let collector = Collector::new();
+        let handle = collector.handle();
+
+        unsafe {
+            let guard = &handle.pin();
+            for _ in 0..COUNT {
+                let a = Owned::new(7i32).into_shared(guard);
+                guard.defer(move || {
+                    drop(a.into_owned());
+                    DESTROYS.fetch_add(1, Ordering::Relaxed);
+                });
+            }
+            guard.flush();
+        }
+
+        let mut last = 0;
+
+        while last < COUNT {
+            let curr = DESTROYS.load(Ordering::Relaxed);
+            assert!(curr - last <= 1024);
+            last = curr;
+
+            let guard = &handle.pin();
+            collector.global.collect(guard);
+        }
+        assert!(DESTROYS.load(Ordering::Relaxed) == 100_000);
+    }
+
+    #[test]
+    fn buffering() {
+        const COUNT: usize = 10;
+        static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+        let collector = Collector::new();
+        let handle = collector.handle();
+
+        unsafe {
+            let guard = &handle.pin();
+            for _ in 0..COUNT {
+                let a = Owned::new(7i32).into_shared(guard);
+                guard.defer(move || {
+                    drop(a.into_owned());
+                    DESTROYS.fetch_add(1, Ordering::Relaxed);
+                });
+            }
+        }
+
+        for _ in 0..100_000 {
+            collector.global.collect(&handle.pin());
+        }
+        assert!(DESTROYS.load(Ordering::Relaxed) < COUNT);
+
+        handle.pin().flush();
+
+        while DESTROYS.load(Ordering::Relaxed) < COUNT {
+            let guard = &handle.pin();
+            collector.global.collect(guard);
+        }
+        assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
+    }
+
+    #[test]
+    fn count_drops() {
+        const COUNT: usize = 100_000;
+        static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+        struct Elem(i32);
+
+        impl Drop for Elem {
+            fn drop(&mut self) {
+                DROPS.fetch_add(1, Ordering::Relaxed);
+            }
+        }
+
+        let collector = Collector::new();
+        let handle = collector.handle();
+
+        unsafe {
+            let guard = &handle.pin();
+
+            for _ in 0..COUNT {
+                let a = Owned::new(Elem(7i32)).into_shared(guard);
+                guard.defer(move || a.into_owned());
+            }
+            guard.flush();
+        }
+
+        while DROPS.load(Ordering::Relaxed) < COUNT {
+            let guard = &handle.pin();
+            collector.global.collect(guard);
+        }
+        assert_eq!(DROPS.load(Ordering::Relaxed), COUNT);
+    }
+
+    #[test]
+    fn count_destroy() {
+        const COUNT: usize = 100_000;
+        static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+        let collector = Collector::new();
+        let handle = collector.handle();
+
+        unsafe {
+            let guard = &handle.pin();
+
+            for _ in 0..COUNT {
+                let a = Owned::new(7i32).into_shared(guard);
+                guard.defer(move || {
+                    drop(a.into_owned());
+                    DESTROYS.fetch_add(1, Ordering::Relaxed);
+                });
+            }
+            guard.flush();
+        }
+
+        while DESTROYS.load(Ordering::Relaxed) < COUNT {
+            let guard = &handle.pin();
+            collector.global.collect(guard);
+        }
+        assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
+    }
+
+    #[test]
+    fn drop_array() {
+        const COUNT: usize = 700;
+        static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+        struct Elem(i32);
+
+        impl Drop for Elem {
+            fn drop(&mut self) {
+                DROPS.fetch_add(1, Ordering::Relaxed);
+            }
+        }
+
+        let collector = Collector::new();
+        let handle = collector.handle();
+
+        let mut guard = handle.pin();
+
+        let mut v = Vec::with_capacity(COUNT);
+        for i in 0..COUNT {
+            v.push(Elem(i as i32));
+        }
+
+        {
+            let a = Owned::new(v).into_shared(&guard);
+            unsafe { guard.defer(move || a.into_owned()); }
+            guard.flush();
+        }
+
+        while DROPS.load(Ordering::Relaxed) < COUNT {
+            guard.repin();
+            collector.global.collect(&guard);
+        }
+        assert_eq!(DROPS.load(Ordering::Relaxed), COUNT);
+    }
+
+    #[test]
+    fn destroy_array() {
+        const COUNT: usize = 100_000;
+        static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+        let collector = Collector::new();
+        let handle = collector.handle();
+
+        unsafe {
+            let guard = &handle.pin();
+
+            let mut v = Vec::with_capacity(COUNT);
+            for i in 0..COUNT {
+                v.push(i as i32);
+            }
+
+            let ptr = v.as_mut_ptr() as usize;
+            let len = v.len();
+            guard.defer(move || {
+                drop(Vec::from_raw_parts(ptr as *const u8 as *mut u8, len, len));
+                DESTROYS.fetch_add(len, Ordering::Relaxed);
+            });
+            guard.flush();
+
+            mem::forget(v);
+        }
+
+        while DESTROYS.load(Ordering::Relaxed) < COUNT {
+            let guard = &handle.pin();
+            collector.global.collect(guard);
+        }
+        assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
+    }
+
+    #[test]
+    fn stress() {
+        const THREADS: usize = 8;
+        const COUNT: usize = 100_000;
+        static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+        struct Elem(i32);
+
+        impl Drop for Elem {
+            fn drop(&mut self) {
+                DROPS.fetch_add(1, Ordering::Relaxed);
+            }
+        }
+
+        let collector = Collector::new();
+
+        let threads = (0..THREADS)
+            .map(|_| {
+                scoped::scope(|scope| {
+                    scope.spawn(|| {
+                        let handle = collector.handle();
+                        for _ in 0..COUNT {
+                            let guard = &handle.pin();
+                            unsafe {
+                                let a = Owned::new(Elem(7i32)).into_shared(guard);
+                                guard.defer(move || a.into_owned());
+                            }
+                        }
+                    })
+                })
+            })
+            .collect::<Vec<_>>();
+
+        for t in threads {
+            t.join();
+        }
+
+        let handle = collector.handle();
+        while DROPS.load(Ordering::Relaxed) < COUNT * THREADS {
+            let guard = &handle.pin();
+            collector.global.collect(guard);
+        }
+        assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS);
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/default.rs
@@ -0,0 +1,40 @@
+//! The default garbage collector.
+//!
+//! For each thread, a participant is lazily initialized on its first use, when the current thread
+//! is registered in the default collector.  If initialized, the thread's participant will get
+//! destructed on thread exit, which in turn unregisters the thread.
+
+use collector::{Collector, Handle};
+use guard::Guard;
+
+lazy_static! {
+    /// The global data for the default garbage collector.
+    static ref COLLECTOR: Collector = Collector::new();
+}
+
+thread_local! {
+    /// The per-thread participant for the default garbage collector.
+    static HANDLE: Handle = COLLECTOR.handle();
+}
+
+/// Pins the current thread.
+#[inline]
+pub fn pin() -> Guard {
+    // FIXME(jeehoonkang): thread-local storage may be destructed at the time `pin()` is called. For
+    // that case, we should use `HANDLE.try_with()` instead.
+    HANDLE.with(|handle| handle.pin())
+}
+
+/// Returns `true` if the current thread is pinned.
+#[inline]
+pub fn is_pinned() -> bool {
+    // FIXME(jeehoonkang): thread-local storage may be destructed at the time `pin()` is called. For
+    // that case, we should use `HANDLE.try_with()` instead.
+    HANDLE.with(|handle| handle.is_pinned())
+}
+
+/// Returns the default handle associated with the current thread.
+#[inline]
+pub fn default_handle() -> Handle {
+    HANDLE.with(|handle| handle.clone())
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/deferred.rs
@@ -0,0 +1,147 @@
+use core::mem;
+use core::ptr;
+use alloc::boxed::Box;
+
+/// Number of words a piece of `Data` can hold.
+///
+/// Three words should be enough for the majority of cases. For example, you can fit inside it the
+/// function pointer together with a fat pointer representing an object that needs to be destroyed.
+const DATA_WORDS: usize = 3;
+
+/// Some space to keep a `FnOnce()` object on the stack.
+type Data = [usize; DATA_WORDS];
+
+/// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap.
+///
+/// This is a handy way of keeping an unsized `FnOnce()` within a sized structure.
+pub struct Deferred {
+    call: unsafe fn(*mut u8),
+    data: Data,
+}
+
+impl Deferred {
+    /// Constructs a new `Deferred` from a `FnOnce()`.
+    pub fn new<F: FnOnce()>(f: F) -> Self {
+        let size = mem::size_of::<F>();
+        let align = mem::align_of::<F>();
+
+        unsafe {
+            if size <= mem::size_of::<Data>() && align <= mem::align_of::<Data>() {
+                let mut data: Data = mem::uninitialized();
+                ptr::write(&mut data as *mut Data as *mut F, f);
+
+                unsafe fn call<F: FnOnce()>(raw: *mut u8) {
+                    let f: F = ptr::read(raw as *mut F);
+                    f();
+                }
+
+                Deferred {
+                    call: call::<F>,
+                    data: data,
+                }
+            } else {
+                let b: Box<F> = Box::new(f);
+                let mut data: Data = mem::uninitialized();
+                ptr::write(&mut data as *mut Data as *mut Box<F>, b);
+
+                unsafe fn call<F: FnOnce()>(raw: *mut u8) {
+                    let b: Box<F> = ptr::read(raw as *mut Box<F>);
+                    (*b)();
+                }
+
+                Deferred {
+                    call: call::<F>,
+                    data: data,
+                }
+            }
+        }
+    }
+
+    /// Calls the function or panics if it was already called.
+    #[inline]
+    pub fn call(&mut self) {
+        unsafe fn fail(_: *mut u8) {
+            panic!("cannot call `FnOnce` more than once");
+        }
+
+        let call = mem::replace(&mut self.call, fail);
+        unsafe {
+            call(&mut self.data as *mut Data as *mut u8);
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::cell::Cell;
+    use super::Deferred;
+
+    #[test]
+    fn on_stack() {
+        let fired = &Cell::new(false);
+        let a = [0usize; 1];
+
+        let mut d = Deferred::new(move || {
+            drop(a);
+            fired.set(true);
+        });
+
+        assert!(!fired.get());
+        d.call();
+        assert!(fired.get());
+    }
+
+    #[test]
+    fn on_heap() {
+        let fired = &Cell::new(false);
+        let a = [0usize; 10];
+
+        let mut d = Deferred::new(move || {
+            drop(a);
+            fired.set(true);
+        });
+
+        assert!(!fired.get());
+        d.call();
+        assert!(fired.get());
+    }
+
+    #[test]
+    #[should_panic(expected = "cannot call `FnOnce` more than once")]
+    fn twice_on_stack() {
+        let a = [0usize; 1];
+        let mut d = Deferred::new(move || drop(a));
+        d.call();
+        d.call();
+    }
+
+    #[test]
+    #[should_panic(expected = "cannot call `FnOnce` more than once")]
+    fn twice_on_heap() {
+        let a = [0usize; 10];
+        let mut d = Deferred::new(move || drop(a));
+        d.call();
+        d.call();
+    }
+
+    #[test]
+    fn string() {
+        let a = "hello".to_string();
+        let mut d = Deferred::new(move || assert_eq!(a, "hello"));
+        d.call();
+    }
+
+    #[test]
+    fn boxed_slice_i32() {
+        let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice();
+        let mut d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7]));
+        d.call();
+    }
+
+    #[test]
+    fn long_slice_usize() {
+        let a: [usize; 5] = [2, 3, 5, 7, 11];
+        let mut d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11]));
+        d.call();
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/epoch.rs
@@ -0,0 +1,106 @@
+//! The global epoch
+//!
+//! The last bit in this number is unused and is always zero. Every so often the global epoch is
+//! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only
+//! if all currently pinned participants have been pinned in the current epoch.
+//!
+//! If an object became garbage in some epoch, then we can be sure that after two advancements no
+//! participant will hold a reference to it. That is the crux of safe memory reclamation.
+
+use core::sync::atomic::{AtomicUsize, Ordering};
+
+/// An epoch that can be marked as pinned or unpinned.
+///
+/// Internally, the epoch is represented as an integer that wraps around at some unspecified point
+/// and a flag that represents whether it is pinned or unpinned.
+#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
+pub struct Epoch {
+    /// The least significant bit is set if pinned. The rest of the bits hold the epoch.
+    data: usize,
+}
+
+impl Epoch {
+    /// Returns the starting epoch in unpinned state.
+    #[inline]
+    pub fn starting() -> Self {
+        Self::default()
+    }
+
+    /// Returns the number of epochs `self` is ahead of `rhs`.
+    ///
+    /// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX
+    /// / 2)`, so the returned distance will be in the same interval.
+    pub fn wrapping_sub(self, rhs: Self) -> isize {
+        // The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`,
+        // because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)`
+        // will be ignored in the shift operation.
+        self.data.wrapping_sub(rhs.data & !1) as isize >> 1
+    }
+
+    /// Returns `true` if the epoch is marked as pinned.
+    #[inline]
+    pub fn is_pinned(self) -> bool {
+        (self.data & 1) == 1
+    }
+
+    /// Returns the same epoch, but marked as pinned.
+    #[inline]
+    pub fn pinned(self) -> Epoch {
+        Epoch { data: self.data | 1 }
+    }
+
+    /// Returns the same epoch, but marked as unpinned.
+    #[inline]
+    pub fn unpinned(self) -> Epoch {
+        Epoch { data: self.data & !1 }
+    }
+
+    /// Returns the successor epoch.
+    ///
+    /// The returned epoch will be marked as pinned only if the previous one was as well.
+    #[inline]
+    pub fn successor(self) -> Epoch {
+        Epoch { data: self.data.wrapping_add(2) }
+    }
+}
+
+/// An atomic value that holds an `Epoch`.
+#[derive(Default, Debug)]
+pub struct AtomicEpoch {
+    /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented
+    /// using an `AtomicUsize`.
+    data: AtomicUsize,
+}
+
+impl AtomicEpoch {
+    /// Creates a new atomic epoch.
+    #[inline]
+    pub fn new(epoch: Epoch) -> Self {
+        let data = AtomicUsize::new(epoch.data);
+        AtomicEpoch { data: data }
+    }
+
+    /// Loads a value from the atomic epoch.
+    #[inline]
+    pub fn load(&self, ord: Ordering) -> Epoch {
+        Epoch { data: self.data.load(ord) }
+    }
+
+    /// Stores a value into the atomic epoch.
+    #[inline]
+    pub fn store(&self, epoch: Epoch, ord: Ordering) {
+        self.data.store(epoch.data, ord);
+    }
+
+    /// Stores a value into the atomic epoch if the current value is the same as `current`.
+    ///
+    /// The return value is always the previous value. If it is equal to `current`, then the value
+    /// is updated.
+    ///
+    /// The `Ordering` argument describes the memory ordering of this operation.
+    #[inline]
+    pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch {
+        let data = self.data.compare_and_swap(current.data, new.data, ord);
+        Epoch { data: data }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/garbage.rs
@@ -0,0 +1,134 @@
+//! Garbage collection.
+//!
+//! # Garbages
+//!
+//! Objects that get unlinked from concurrent data structures must be stashed away until the global
+//! epoch sufficiently advances so that they become safe for destruction.  We call these objects
+//! garbages.  When the global epoch advances sufficiently, `Destroy` garbages are dropped (i.e. the
+//! destructors are called), and `Free` garbages are freed.  In addition, you can register arbitrary
+//! function to be called later using the `Fn` garbages.
+//!
+//! # Bags
+//!
+//! Pointers to such garbages are pushed into thread-local bags, and when it becomes full, the bag
+//! is marked with the current global epoch and pushed into a global queue of garbage bags.  We
+//! store garbages in thread-local storages for amortizing the synchronization cost of pushing the
+//! garbages to a global queue.
+//!
+//! # Garbage queues
+//!
+//! Whenever a bag is pushed into a queue, some garbage in the queue is collected and destroyed
+//! along the way.  This design reduces contention on data structures.  The global queue cannot be
+//! explicitly accessed: the only way to interact with it is by calling functions `defer*()`, or
+//! calling `collect()` that manually triggers garbage collection.  Ideally each instance of
+//! concurrent data structure may have its own queue that gets fully destroyed as soon as the data
+//! structure gets dropped.
+
+use core::fmt;
+use arrayvec::ArrayVec;
+use deferred::Deferred;
+
+/// Maximum number of objects a bag can contain.
+#[cfg(not(feature = "strict_gc"))]
+const MAX_OBJECTS: usize = 64;
+#[cfg(feature = "strict_gc")]
+const MAX_OBJECTS: usize = 4;
+
+
+pub struct Garbage {
+    func: Deferred,
+}
+
+
+unsafe impl Sync for Garbage {}
+unsafe impl Send for Garbage {}
+
+impl fmt::Debug for Garbage {
+    fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+        write!(f, "garbage {{ ... }}")
+    }
+}
+
+impl Garbage {
+    /// Make a closure that will later be called.
+    pub fn new<F: FnOnce()>(f: F) -> Self {
+        Garbage { func: Deferred::new(move || f()) }
+    }
+}
+
+impl Drop for Garbage {
+    fn drop(&mut self) {
+        self.func.call();
+    }
+}
+
+
+/// Bag of garbages.
+#[derive(Default, Debug)]
+pub struct Bag {
+    /// Stashed objects.
+    objects: ArrayVec<[Garbage; MAX_OBJECTS]>,
+}
+
+impl Bag {
+    /// Returns a new, empty bag.
+    pub fn new() -> Self {
+        Self::default()
+    }
+
+    /// Returns `true` if the bag is empty.
+    pub fn is_empty(&self) -> bool {
+        self.objects.is_empty()
+    }
+
+    /// Attempts to insert a garbage object into the bag and returns `true` if succeeded.
+    pub fn try_push(&mut self, garbage: Garbage) -> Result<(), Garbage> {
+        self.objects.try_push(garbage).map_err(|e| e.element())
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
+    use std::sync::atomic::Ordering;
+
+    use super::{Garbage, Bag};
+
+    #[test]
+    fn check_defer() {
+        static FLAG: AtomicUsize = ATOMIC_USIZE_INIT;
+        fn set() {
+            FLAG.store(42, Ordering::Relaxed);
+        }
+
+        let g = Garbage::new(set);
+        assert_eq!(FLAG.load(Ordering::Relaxed), 0);
+        drop(g);
+        assert_eq!(FLAG.load(Ordering::Relaxed), 42);
+    }
+
+    #[test]
+    fn check_bag() {
+        static FLAG: AtomicUsize = ATOMIC_USIZE_INIT;
+        fn incr() {
+            FLAG.fetch_add(1, Ordering::Relaxed);
+        }
+
+        let mut bag = Bag::new();
+        assert!(bag.is_empty());
+
+        for _ in 0..super::MAX_OBJECTS {
+            assert!(bag.try_push(Garbage::new(incr)).is_ok());
+            assert!(!bag.is_empty());
+            assert_eq!(FLAG.load(Ordering::Relaxed), 0);
+        }
+
+        let result = bag.try_push(Garbage::new(incr));
+        assert!(result.is_err());
+        assert!(!bag.is_empty());
+        assert_eq!(FLAG.load(Ordering::Relaxed), 0);
+
+        drop(bag);
+        assert_eq!(FLAG.load(Ordering::Relaxed), super::MAX_OBJECTS);
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/guard.rs
@@ -0,0 +1,417 @@
+use core::ptr;
+use core::mem;
+
+use garbage::Garbage;
+use internal::Local;
+
+/// A guard that keeps the current thread pinned.
+///
+/// # Pinning
+///
+/// The current thread is pinned by calling [`pin`], which returns a new guard:
+///
+/// ```
+/// use crossbeam_epoch as epoch;
+///
+/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference.
+/// // This is not really necessary, but makes passing references to the guard a bit easier.
+/// let guard = &epoch::pin();
+/// ```
+///
+/// When a guard gets dropped, the current thread is automatically unpinned.
+///
+/// # Pointers on the stack
+///
+/// Having a guard allows us to create pointers on the stack to heap-allocated objects.
+/// For example:
+///
+/// ```
+/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+/// use std::sync::atomic::Ordering::SeqCst;
+///
+/// // Create a heap-allocated number.
+/// let a = Atomic::new(777);
+///
+/// // Pin the current thread.
+/// let guard = &epoch::pin();
+///
+/// // Load the heap-allocated object and create pointer `p` on the stack.
+/// let p = a.load(SeqCst, guard);
+///
+/// // Dereference the pointer and print the value:
+/// if let Some(num) = unsafe { p.as_ref() } {
+///     println!("The number is {}.", num);
+/// }
+/// ```
+///
+/// # Multiple guards
+///
+/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the
+/// thread will actually be pinned only when the first guard is created and unpinned when the last
+/// one is dropped:
+///
+/// ```
+/// use crossbeam_epoch as epoch;
+///
+/// let guard1 = epoch::pin();
+/// let guard2 = epoch::pin();
+/// assert!(epoch::is_pinned());
+/// drop(guard1);
+/// assert!(epoch::is_pinned());
+/// drop(guard2);
+/// assert!(!epoch::is_pinned());
+/// ```
+///
+/// The same can be achieved by cloning guards:
+///
+/// ```
+/// use crossbeam_epoch as epoch;
+///
+/// let guard1 = epoch::pin();
+/// let guard2 = guard1.clone();
+/// ```
+///
+/// [`pin`]: fn.pin.html
+pub struct Guard {
+    local: *const Local,
+}
+
+impl Guard {
+    /// Creates a new guard from a pointer to `Local`.
+    ///
+    /// # Safety
+    ///
+    /// The `local` should be a valid pointer created by `Local::register()`.
+    #[doc(hidden)]
+    pub unsafe fn new(local: *const Local) -> Guard {
+        Guard { local: local }
+    }
+
+    /// Accesses the internal pointer to `Local`.
+    #[doc(hidden)]
+    pub unsafe fn get_local(&self) -> *const Local {
+        self.local
+    }
+
+    /// Stores a function so that it can be executed at some point after all currently pinned
+    /// threads get unpinned.
+    ///
+    /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
+    /// becomes full, some functions are moved into the global cache. At the same time, some
+    /// functions from both local and global caches may get executed in order to incrementally
+    /// clean up the caches as they fill up.
+    ///
+    /// There is no guarantee when exactly `f` will be executed. The only guarantee is that won't
+    /// until all currently pinned threads get unpinned. In theory, `f` might never be deallocated,
+    /// but the epoch-based garbage collection will make an effort to execute it reasonably soon.
+    ///
+    /// If this method is called from an [`unprotected`] guard, the function will simply be
+    /// executed immediately.
+    ///
+    /// # Safety
+    ///
+    /// The given function must not hold reference onto the stack. It is highly recommended that
+    /// the passed function is **always** marked with `move` in order to prevent accidental
+    /// borrows.
+    ///
+    /// ```
+    /// use crossbeam_epoch as epoch;
+    ///
+    /// let guard = &epoch::pin();
+    /// let message = "Hello!";
+    /// unsafe {
+    ///     // ALWAYS use `move` when sending a closure into `defef`.
+    ///     guard.defer(move || {
+    ///         println!("{}", message);
+    ///     });
+    /// }
+    /// ```
+    ///
+    /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed
+    /// by the closure must be `Send`.
+    ///
+    /// # Examples
+    ///
+    /// When a heap-allocated object in a data structure becomes unreachable, it has to be
+    /// deallocated. However, the current thread and other threads may be still holding references
+    /// on the stack to that same object. Therefore it cannot be deallocated before those
+    /// references get dropped. This method can defer deallocation until all those threads get
+    /// unpinned and consequently drop all their references on the stack.
+    ///
+    /// ```rust
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new("foo");
+    ///
+    /// // Now suppose that `a` is shared among multiple threads and concurrently
+    /// // accessed and modified...
+    ///
+    /// // Pin the current thread.
+    /// let guard = &epoch::pin();
+    ///
+    /// // Steal the object currently stored in `a` and swap it with another one.
+    /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
+    ///
+    /// if !p.is_null() {
+    ///     // The object `p` is pointing to is now unreachable.
+    ///     // Defer its deallocation until all currently pinned threads get unpinned.
+    ///     unsafe {
+    ///         // ALWAYS use `move` when sending a closure into `defer`.
+    ///         guard.defer(move || {
+    ///             println!("{} is now being deallocated.", p.deref());
+    ///             // Now we have unique access to the object pointed to by `p` and can turn it
+    ///             // into an `Owned`. Dropping the `Owned` will deallocate the object.
+    ///             drop(p.into_owned());
+    ///         });
+    ///     }
+    /// }
+    /// ```
+    ///
+    /// [`unprotected`]: fn.unprotected.html
+    pub unsafe fn defer<F, R>(&self, f: F)
+    where
+        F: FnOnce() -> R,
+    {
+        let garbage = Garbage::new(|| drop(f()));
+
+        if let Some(local) = self.local.as_ref() {
+            local.defer(garbage, self);
+        }
+    }
+
+    /// Clears up the thread-local cache of deferred functions by executing them or moving into the
+    /// global cache.
+    ///
+    /// Call this method after deferring execution of a function if you want to get it executed as
+    /// soon as possible. Flushing will make sure it is residing in in the global cache, so that
+    /// any thread has a chance of taking the function and executing it.
+    ///
+    /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens).
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch as epoch;
+    ///
+    /// let guard = &epoch::pin();
+    /// unsafe {
+    ///     guard.defer(move || {
+    ///         println!("This better be printed as soon as possible!");
+    ///     });
+    /// }
+    /// guard.flush();
+    /// ```
+    ///
+    /// [`unprotected`]: fn.unprotected.html
+    pub fn flush(&self) {
+        if let Some(local) = unsafe { self.local.as_ref() } {
+            local.flush(self);
+        }
+    }
+
+    /// Unpins and then immediately re-pins the thread.
+    ///
+    /// This method is useful when you don't want delay the advancement of the global epoch by
+    /// holding an old epoch. For safety, you should not maintain any guard-based reference across
+    /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this
+    /// is the only active guard for the current thread.
+    ///
+    /// If this method is called from an [`unprotected`] guard, then the call will be just no-op.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    /// use std::thread;
+    /// use std::time::Duration;
+    ///
+    /// let a = Atomic::new(777);
+    /// let mut guard = epoch::pin();
+    /// {
+    ///     let p = a.load(SeqCst, &guard);
+    ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
+    /// }
+    /// guard.repin();
+    /// {
+    ///     let p = a.load(SeqCst, &guard);
+    ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
+    /// }
+    /// ```
+    ///
+    /// [`unprotected`]: fn.unprotected.html
+    pub fn repin(&mut self) {
+        if let Some(local) = unsafe { self.local.as_ref() } {
+            local.repin();
+        }
+    }
+
+    /// Temporarily unpins the thread, executes the given function and then re-pins the thread.
+    ///
+    /// This method is useful when you need to perform a long-running operation (e.g. sleeping)
+    /// and don't need to maintain any guard-based reference across the call (the latter is enforced
+    /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the
+    /// current thread.
+    ///
+    /// If this method is called from an [`unprotected`] guard, then the passed function is called
+    /// directly without unpinning the thread.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    /// use std::thread;
+    /// use std::time::Duration;
+    ///
+    /// let a = Atomic::new(777);
+    /// let mut guard = epoch::pin();
+    /// {
+    ///     let p = a.load(SeqCst, &guard);
+    ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
+    /// }
+    /// guard.repin_after(|| thread::sleep(Duration::from_millis(50)));
+    /// {
+    ///     let p = a.load(SeqCst, &guard);
+    ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
+    /// }
+    /// ```
+    ///
+    /// [`unprotected`]: fn.unprotected.html
+    pub fn repin_after<F, R>(&mut self, f: F) -> R
+    where
+        F: FnOnce() -> R,
+    {
+        if let Some(local) = unsafe { self.local.as_ref() } {
+            // We need to acquire a handle here to ensure the Local doesn't
+            // disappear from under us.
+            local.acquire_handle();
+            local.unpin();
+        }
+
+        // Ensure the Guard is re-pinned even if the function panics
+        defer! {
+            if let Some(local) = unsafe { self.local.as_ref() } {
+                mem::forget(local.pin());
+                local.release_handle();
+            }
+        }
+
+        f()
+    }
+}
+
+impl Drop for Guard {
+    #[inline]
+    fn drop(&mut self) {
+        if let Some(local) = unsafe { self.local.as_ref() } {
+            local.unpin();
+        }
+    }
+}
+
+impl Clone for Guard {
+    #[inline]
+    fn clone(&self) -> Guard {
+        match unsafe { self.local.as_ref() } {
+            None => Guard { local: ptr::null() },
+            Some(local) => local.pin(),
+        }
+    }
+}
+
+/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
+///
+/// This guard should be used in special occasions only. Note that it doesn't actually keep any
+/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely.
+///
+/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just
+/// execute the function immediately.
+///
+/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`.
+///
+/// # Safety
+///
+/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the
+/// [`Atomic`] is not being concurrently modified by other threads.
+///
+/// # Examples
+///
+/// ```
+/// use crossbeam_epoch::{self as epoch, Atomic};
+/// use std::sync::atomic::Ordering::Relaxed;
+///
+/// let a = Atomic::new(7);
+///
+/// unsafe {
+///     // Load `a` without pinning the current thread.
+///     a.load(Relaxed, epoch::unprotected());
+///
+///     // It's possible to create more dummy guards by calling `clone()`.
+///     let dummy = &epoch::unprotected().clone();
+///
+///     dummy.defer(move || {
+///         println!("This gets executed immediately.");
+///     });
+///
+///     // Dropping `dummy` doesn't affect the current thread - it's just a noop.
+/// }
+/// ```
+///
+/// The most common use of this function is when constructing or destructing a data structure.
+///
+/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that
+/// point no other thread could concurrently modify the [`Atomic`]s we are accessing.
+///
+/// If we were to actually pin the current thread during destruction, that would just unnecessarily
+/// delay garbage collection and incur some performance cost, so in cases like these `unprotected`
+/// is very helpful.
+///
+/// ```
+/// use crossbeam_epoch::{self as epoch, Atomic};
+/// use std::ptr;
+/// use std::sync::atomic::Ordering::Relaxed;
+///
+/// struct Stack {
+///     head: epoch::Atomic<Node>,
+/// }
+///
+/// struct Node {
+///     data: u32,
+///     next: epoch::Atomic<Node>,
+/// }
+///
+/// impl Drop for Stack {
+///     fn drop(&mut self) {
+///         unsafe {
+///             // Unprotected load.
+///             let mut node = self.head.load(Relaxed, epoch::unprotected());
+///
+///             while let Some(n) = node.as_ref() {
+///                 // Unprotected load.
+///                 let next = n.next.load(Relaxed, epoch::unprotected());
+///
+///                 // Take ownership of the node, then drop it.
+///                 drop(node.into_owned());
+///
+///                 node = next;
+///             }
+///         }
+///     }
+/// }
+/// ```
+///
+/// [`Atomic`]: struct.Atomic.html
+/// [`defer`]: struct.Guard.html#method.defer
+#[inline]
+pub unsafe fn unprotected() -> &'static Guard {
+    // HACK(stjepang): An unprotected guard is just a `Guard` with its field `local` set to null.
+    // Since this function returns a `'static` reference to a `Guard`, we must return a reference
+    // to a global guard. However, it's not possible to create a `static` `Guard` because it does
+    // not implement `Sync`. To get around the problem, we create a static `usize` initialized to
+    // zero and then transmute it into a `Guard`. This is safe because `usize` and `Guard`
+    // (consisting of a single pointer) have the same representation in memory.
+    static UNPROTECTED: usize = 0;
+    &*(&UNPROTECTED as *const _ as *const Guard)
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/internal.rs
@@ -0,0 +1,409 @@
+//! The global data and participant for garbage collection.
+//!
+//! # Registration
+//!
+//! In order to track all participants in one place, we need some form of participant
+//! registration. When a participant is created, it is registered to a global lock-free
+//! singly-linked list of registries; and when a participant is leaving, it is unregistered from the
+//! list.
+//!
+//! # Pinning
+//!
+//! Every participant contains an integer that tells whether the participant is pinned and if so,
+//! what was the global epoch at the time it was pinned. Participants also hold a pin counter that
+//! aids in periodic global epoch advancement.
+//!
+//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned.
+//! Guards are necessary for performing atomic operations, and for freeing/dropping locations.
+
+use core::cell::{Cell, UnsafeCell};
+use core::mem;
+use core::num::Wrapping;
+use core::ptr;
+use core::sync::atomic;
+use core::sync::atomic::Ordering;
+use alloc::boxed::Box;
+use alloc::arc::Arc;
+
+use crossbeam_utils::cache_padded::CachePadded;
+use nodrop::NoDrop;
+
+use atomic::Owned;
+use epoch::{AtomicEpoch, Epoch};
+use guard::{unprotected, Guard};
+use garbage::{Bag, Garbage};
+use sync::list::{List, Entry, IterError, IsElement};
+use sync::queue::Queue;
+
+/// Number of bags to destroy.
+const COLLECT_STEPS: usize = 8;
+
+/// Number of pinnings after which a participant will execute some deferred functions from the
+/// global queue.
+const PINNINGS_BETWEEN_COLLECT: usize = 128;
+
+/// The global data for a garbage collector.
+pub struct Global {
+    /// The intrusive linked list of `Local`s.
+    locals: List<Local>,
+
+    /// The global queue of bags of deferred functions.
+    queue: Queue<(Epoch, Bag)>,
+
+    /// The global epoch.
+    epoch: CachePadded<AtomicEpoch>,
+}
+
+impl Global {
+    /// Creates a new global data for garbage collection.
+    #[inline]
+    pub fn new() -> Global {
+        Global {
+            locals: List::new(),
+            queue: Queue::new(),
+            epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())),
+        }
+    }
+
+    /// Returns the current global epoch.
+    pub fn load_epoch(&self, ordering: Ordering) -> Epoch {
+        self.epoch.load(ordering)
+    }
+
+    /// Pushes the bag into the global queue and replaces the bag with a new empty bag.
+    pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) {
+        let bag = mem::replace(bag, Bag::new());
+
+        atomic::fence(Ordering::SeqCst);
+
+        let epoch = self.epoch.load(Ordering::Relaxed);
+        self.queue.push((epoch, bag), guard);
+    }
+
+    /// Collects several bags from the global queue and executes deferred functions in them.
+    ///
+    /// Note: This may itself produce garbage and in turn allocate new bags.
+    ///
+    /// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold
+    /// path. In other words, we want the compiler to optimize branching for the case when
+    /// `collect()` is not called.
+    #[cold]
+    pub fn collect(&self, guard: &Guard) {
+        let global_epoch = self.try_advance(guard);
+
+        let condition = |item: &(Epoch, Bag)| {
+            // A pinned participant can witness at most one epoch advancement. Therefore, any bag
+            // that is within one epoch of the current one cannot be destroyed yet.
+            global_epoch.wrapping_sub(item.0) >= 2
+        };
+
+        let steps = if cfg!(feature = "sanitize") {
+            usize::max_value()
+        } else {
+            COLLECT_STEPS
+        };
+
+        for _ in 0..steps {
+            match self.queue.try_pop_if(&condition, guard) {
+                None => break,
+                Some(bag) => drop(bag),
+            }
+        }
+    }
+
+    /// Attempts to advance the global epoch.
+    ///
+    /// The global epoch can advance only if all currently pinned participants have been pinned in
+    /// the current epoch.
+    ///
+    /// Returns the current global epoch.
+    ///
+    /// `try_advance()` is annotated `#[cold]` because it is rarely called.
+    #[cold]
+    pub fn try_advance(&self, guard: &Guard) -> Epoch {
+        let global_epoch = self.epoch.load(Ordering::Relaxed);
+        atomic::fence(Ordering::SeqCst);
+
+        // TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly
+        // easy to implement in a lock-free manner. However, traversal can be slow due to cache
+        // misses and data dependencies. We should experiment with other data structures as well.
+        for local in self.locals.iter(&guard) {
+            match local {
+                Err(IterError::Stalled) => {
+                    // A concurrent thread stalled this iteration. That thread might also try to
+                    // advance the epoch, in which case we leave the job to it. Otherwise, the
+                    // epoch will not be advanced.
+                    return global_epoch;
+                }
+                Ok(local) => {
+                    let local_epoch = local.epoch.load(Ordering::Relaxed);
+
+                    // If the participant was pinned in a different epoch, we cannot advance the
+                    // global epoch just yet.
+                    if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch {
+                        return global_epoch;
+                    }
+                }
+            }
+        }
+        atomic::fence(Ordering::Acquire);
+
+        // All pinned participants were pinned in the current global epoch.
+        // Now let's advance the global epoch...
+        //
+        // Note that if another thread already advanced it before us, this store will simply
+        // overwrite the global epoch with the same value. This is true because `try_advance` was
+        // called from a thread that was pinned in `global_epoch`, and the global epoch cannot be
+        // advanced two steps ahead of it.
+        let new_epoch = global_epoch.successor();
+        self.epoch.store(new_epoch, Ordering::Release);
+        new_epoch
+    }
+}
+
+/// Participant for garbage collection.
+pub struct Local {
+    /// A node in the intrusive linked list of `Local`s.
+    entry: Entry,
+
+    /// The local epoch.
+    epoch: AtomicEpoch,
+
+    /// A reference to the global data.
+    ///
+    /// When all guards and handles get dropped, this reference is destroyed.
+    global: UnsafeCell<NoDrop<Arc<Global>>>,
+
+    /// The local bag of deferred functions.
+    bag: UnsafeCell<Bag>,
+
+    /// The number of guards keeping this participant pinned.
+    guard_count: Cell<usize>,
+
+    /// The number of active handles.
+    handle_count: Cell<usize>,
+
+    /// Total number of pinnings performed.
+    ///
+    /// This is just an auxilliary counter that sometimes kicks off collection.
+    pin_count: Cell<Wrapping<usize>>,
+}
+
+unsafe impl Sync for Local {}
+
+impl Local {
+    /// Registers a new `Local` in the provided `Global`.
+    pub fn register(global: &Arc<Global>) -> *const Local {
+        unsafe {
+            // Since we dereference no pointers in this block, it is safe to use `unprotected`.
+
+            let local = Owned::new(Local {
+                entry: Entry::default(),
+                epoch: AtomicEpoch::new(Epoch::starting()),
+                global: UnsafeCell::new(NoDrop::new(global.clone())),
+                bag: UnsafeCell::new(Bag::new()),
+                guard_count: Cell::new(0),
+                handle_count: Cell::new(1),
+                pin_count: Cell::new(Wrapping(0)),
+            }).into_shared(&unprotected());
+            global.locals.insert(local, &unprotected());
+            local.as_raw()
+        }
+    }
+
+    /// Returns whether the local garbage bag is empty.
+    #[inline]
+    pub fn is_bag_empty(&self) -> bool {
+        unsafe { (*self.bag.get()).is_empty() }
+    }
+
+    /// Returns a reference to the `Global` in which this `Local` resides.
+    #[inline]
+    pub fn global(&self) -> &Global {
+        unsafe { &*self.global.get() }
+    }
+
+    /// Returns `true` if the current participant is pinned.
+    #[inline]
+    pub fn is_pinned(&self) -> bool {
+        self.guard_count.get() > 0
+    }
+
+    pub fn defer(&self, mut garbage: Garbage, guard: &Guard) {
+        let bag = unsafe { &mut *self.bag.get() };
+
+        while let Err(g) = bag.try_push(garbage) {
+            self.global().push_bag(bag, guard);
+            garbage = g;
+        }
+    }
+
+    pub fn flush(&self, guard: &Guard) {
+        let bag = unsafe { &mut *self.bag.get() };
+
+        if !bag.is_empty() {
+            self.global().push_bag(bag, guard);
+        }
+
+        self.global().collect(guard);
+    }
+
+    /// Pins the `Local`.
+    #[inline]
+    pub fn pin(&self) -> Guard {
+        let guard = unsafe { Guard::new(self) };
+
+        let guard_count = self.guard_count.get();
+        self.guard_count.set(guard_count.checked_add(1).unwrap());
+
+        if guard_count == 0 {
+            let global_epoch = self.global().epoch.load(Ordering::Relaxed);
+            let new_epoch = global_epoch.pinned();
+
+            // Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence.
+            // The fence makes sure that any future loads from `Atomic`s will not happen before
+            // this store.
+            if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
+                // HACK(stjepang): On x86 architectures there are two different ways of executing
+                // a `SeqCst` fence.
+                //
+                // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
+                // 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg`
+                //    instruction.
+                //
+                // Both instructions have the effect of a full barrier, but benchmarks have shown
+                // that the second one makes pinning faster in this particular case.
+                let current = Epoch::starting();
+                let previous = self.epoch.compare_and_swap(current, new_epoch, Ordering::SeqCst);
+                debug_assert_eq!(current, previous, "participant was expected to be unpinned");
+            } else {
+                self.epoch.store(new_epoch, Ordering::Relaxed);
+                atomic::fence(Ordering::SeqCst);
+            }
+
+            // Increment the pin counter.
+            let count = self.pin_count.get();
+            self.pin_count.set(count + Wrapping(1));
+
+            // After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting
+            // some garbage.
+            if count.0 % PINNINGS_BETWEEN_COLLECT == 0 {
+                self.global().collect(&guard);
+            }
+        }
+
+        guard
+    }
+
+    /// Unpins the `Local`.
+    #[inline]
+    pub fn unpin(&self) {
+        let guard_count = self.guard_count.get();
+        self.guard_count.set(guard_count - 1);
+
+        if guard_count == 1 {
+            self.epoch.store(Epoch::starting(), Ordering::Release);
+
+            if self.handle_count.get() == 0 {
+                self.finalize();
+            }
+        }
+    }
+
+    /// Unpins and then pins the `Local`.
+    #[inline]
+    pub fn repin(&self) {
+        let guard_count = self.guard_count.get();
+
+        // Update the local epoch only if there's only one guard.
+        if guard_count == 1 {
+            let epoch = self.epoch.load(Ordering::Relaxed);
+            let global_epoch = self.global().epoch.load(Ordering::Relaxed);
+
+            // Update the local epoch only if the global epoch is greater than the local epoch.
+            if epoch != global_epoch {
+                // We store the new epoch with `Release` because we need to ensure any memory
+                // accesses from the previous epoch do not leak into the new one.
+                self.epoch.store(global_epoch, Ordering::Release);
+
+                // However, we don't need a following `SeqCst` fence, because it is safe for memory
+                // accesses from the new epoch to be executed before updating the local epoch.  At
+                // worse, other threads will see the new epoch late and delay GC slightly.
+            }
+        }
+    }
+
+    /// Increments the handle count.
+    #[inline]
+    pub fn acquire_handle(&self) {
+        let handle_count = self.handle_count.get();
+        debug_assert!(handle_count >= 1);
+        self.handle_count.set(handle_count + 1);
+    }
+
+    /// Decrements the handle count.
+    #[inline]
+    pub fn release_handle(&self) {
+        let guard_count = self.guard_count.get();
+        let handle_count = self.handle_count.get();
+        debug_assert!(handle_count >= 1);
+        self.handle_count.set(handle_count - 1);
+
+        if guard_count == 0 && handle_count == 1 {
+            self.finalize();
+        }
+    }
+
+    /// Removes the `Local` from the global linked list.
+    #[cold]
+    fn finalize(&self) {
+        debug_assert_eq!(self.guard_count.get(), 0);
+        debug_assert_eq!(self.handle_count.get(), 0);
+
+        // Temporarily increment handle count. This is required so that the following call to `pin`
+        // doesn't call `finalize` again.
+        self.handle_count.set(1);
+        unsafe {
+            // Pin and move the local bag into the global queue. It's important that `push_bag`
+            // doesn't defer destruction on any new garbage.
+            let guard = &self.pin();
+            self.global().push_bag(&mut *self.bag.get(), guard);
+        }
+        // Revert the handle count back to zero.
+        self.handle_count.set(0);
+
+        unsafe {
+            // Take the reference to the `Global` out of this `Local`. Since we're not protected
+            // by a guard at this time, it's crucial that the reference is read before marking the
+            // `Local` as deleted.
+            let global: Arc<Global> = ptr::read(&**self.global.get());
+
+            // Mark this node in the linked list as deleted.
+            self.entry.delete(&unprotected());
+
+            // Finally, drop the reference to the global.  Note that this might be the last
+            // reference to the `Global`. If so, the global data will be destroyed and all deferred
+            // functions in its queue will be executed.
+            drop(global);
+        }
+    }
+}
+
+impl IsElement<Local> for Local {
+    fn entry_of(local: &Local) -> &Entry {
+        let entry_ptr = (local as *const Local as usize + offset_of!(Local, entry)) as *const Entry;
+        unsafe { &*entry_ptr }
+    }
+
+    unsafe fn element_of(entry: &Entry) -> &Local {
+        // offset_of! macro uses unsafe, but it's unnecessary in this context.
+        #[allow(unused_unsafe)]
+        let local_ptr = (entry as *const Entry as usize - offset_of!(Local, entry)) as *const Local;
+        &*local_ptr
+    }
+
+    unsafe fn finalize(entry: &Entry) {
+        let local = Self::element_of(entry);
+        drop(Box::from_raw(local as *const Local as *mut Local));
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/lib.rs
@@ -0,0 +1,110 @@
+//! Epoch-based memory reclamation.
+//!
+//! An interesting problem concurrent collections deal with comes from the remove operation.
+//! Suppose that a thread removes an element from a lock-free map, while another thread is reading
+//! that same element at the same time. The first thread must wait until the second thread stops
+//! reading the element. Only then it is safe to destruct it.
+//!
+//! Programming languages that come with garbage collectors solve this problem trivially. The
+//! garbage collector will destruct the removed element when no thread can hold a reference to it
+//! anymore.
+//!
+//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an
+//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and
+//! marked with the current epoch. Every time a thread accesses a collection, it checks the current
+//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread
+//! can be referencing it anymore.
+//!
+//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit
+//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something
+//! users of concurrent collections don't have to worry much about.
+//!
+//! # Pointers
+//!
+//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which
+//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a
+//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely
+//! read.
+//!
+//! # Pinning
+//!
+//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant
+//! we declare that any object that gets removed from now on must not be destructed just
+//! yet. Garbage collection of newly removed objects is suspended until the participant gets
+//! unpinned.
+//!
+//! # Garbage
+//!
+//! Objects that get removed from concurrent collections must be stashed away until all currently
+//! pinned participants get unpinned. Such objects can be stored into a [`Garbage`], where they are
+//! kept until the right time for their destruction comes.
+//!
+//! There is a global shared instance of garbage queue. You can [`defer`] the execution of an
+//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data
+//! structures may defer the deallocation of an object.
+//!
+//! # APIs
+//!
+//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you
+//! want to create your own garbage collector, use the [`Collector`] API.
+//!
+//! [`Atomic`]: struct.Atomic.html
+//! [`Collector`]: struct.Collector.html
+//! [`Shared`]: struct.Shared.html
+//! [`pin`]: fn.pin.html
+//! [`defer`]: fn.defer.html
+
+#![cfg_attr(feature = "nightly", feature(const_fn))]
+#![cfg_attr(feature = "nightly", feature(alloc))]
+#![cfg_attr(not(test), no_std)]
+
+#[cfg(all(not(test), feature = "use_std"))]
+#[macro_use]
+extern crate std;
+#[cfg(test)]
+extern crate core;
+
+// Use liballoc on nightly to avoid a dependency on libstd
+#[cfg(feature = "nightly")]
+extern crate alloc;
+#[cfg(not(feature = "nightly"))]
+mod alloc {
+    // Tweak the module layout to match the one in liballoc
+    extern crate std;
+    pub use self::std::boxed;
+    pub use self::std::sync as arc;
+}
+
+#[cfg(feature = "manually_drop")]
+mod nodrop {
+    pub use std::mem::ManuallyDrop as NoDrop;
+}
+#[cfg(not(feature = "manually_drop"))]
+extern crate nodrop;
+
+extern crate arrayvec;
+extern crate crossbeam_utils;
+#[cfg(feature = "use_std")]
+#[macro_use]
+extern crate lazy_static;
+#[macro_use]
+extern crate memoffset;
+#[macro_use]
+extern crate scopeguard;
+
+mod atomic;
+mod collector;
+#[cfg(feature = "use_std")]
+mod default;
+mod deferred;
+mod epoch;
+mod garbage;
+mod guard;
+mod internal;
+mod sync;
+
+pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared};
+pub use self::guard::{unprotected, Guard};
+#[cfg(feature = "use_std")]
+pub use self::default::{default_handle, is_pinned, pin};
+pub use self::collector::{Collector, Handle};
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/sync/list.rs
@@ -0,0 +1,473 @@
+//! Lock-free intrusive linked list.
+//!
+//! Ideas from Michael.  High Performance Dynamic Lock-Free Hash Tables and List-Based Sets.  SPAA
+//! 2002.  http://dl.acm.org/citation.cfm?id=564870.564881
+
+use core::marker::PhantomData;
+use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+
+use {Atomic, Shared, Guard, unprotected};
+
+/// An entry in a linked list.
+///
+/// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different
+/// cache-line than thread-local data in terms of performance.
+#[derive(Debug)]
+pub struct Entry {
+    /// The next entry in the linked list.
+    /// If the tag is 1, this entry is marked as deleted.
+    next: Atomic<Entry>,
+}
+
+/// Implementing this trait asserts that the type `T` can be used as an element in the intrusive
+/// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance
+/// of `Entry`.
+///
+/// # Example
+///
+/// ```ignore
+/// struct A {
+///     entry: Entry,
+///     data: usize,
+/// }
+///
+/// impl IsElement<A> for A {
+///     fn entry_of(a: &A) -> &Entry {
+///         let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry;
+///         unsafe { &*entry_ptr }
+///     }
+///
+///     unsafe fn element_of(entry: &Entry) -> &T {
+///         let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T;
+///         &*elem_ptr
+///     }
+///
+///     unsafe fn finalize(entry: &Entry) {
+///         let elem = Self::element_of(entry);
+///         drop(Box::from_raw(elem as *const A as *mut A));
+///     }
+/// }
+/// ```
+///
+/// This trait is implemented on a type separate from `T` (although it can be just `T`), because
+/// one type might be placeable into multiple lists, in which case it would require multiple
+/// implementations of `IsElement`. In such cases, each struct implementing `IsElement<T>`
+/// represents a distinct `Entry` in `T`.
+///
+/// For example, we can insert the following struct into two lists using `entry1` for one
+/// and `entry2` for the other:
+///
+/// ```ignore
+/// struct B {
+///     entry1: Entry,
+///     entry2: Entry,
+///     data: usize,
+/// }
+/// ```
+///
+pub trait IsElement<T> {
+    /// Returns a reference to this element's `Entry`.
+    fn entry_of(&T) -> &Entry;
+
+    /// Given a reference to an element's entry, returns that element.
+    ///
+    /// ```ignore
+    /// let elem = ListElement::new();
+    /// assert_eq!(elem.entry_of(),
+    ///            unsafe { ListElement::element_of(elem.entry_of()) } );
+    /// ```
+    ///
+    /// # Safety
+    /// The caller has to guarantee that the `Entry` it
+    /// is called with was retrieved from an instance of the element type (`T`).
+    unsafe fn element_of(&Entry) -> &T;
+
+    /// Deallocates the whole element given its `Entry`. This is called when the list
+    /// is ready to actually free the element.
+    ///
+    /// # Safety
+    /// The caller has to guarantee that the `Entry` it
+    /// is called with was retrieved from an instance of the element type (`T`).
+    unsafe fn finalize(&Entry);
+}
+
+/// A lock-free, intrusive linked list of type `T`.
+#[derive(Debug)]
+pub struct List<T, C: IsElement<T> = T> {
+    /// The head of the linked list.
+    head: Atomic<Entry>,
+
+    /// The phantom data for using `T` and `C`.
+    _marker: PhantomData<(T, C)>,
+}
+
+/// An iterator used for retrieving values from the list.
+pub struct Iter<'g, T: 'g, C: IsElement<T>> {
+    /// The guard that protects the iteration.
+    guard: &'g Guard,
+
+    /// Pointer from the predecessor to the current entry.
+    pred: &'g Atomic<Entry>,
+
+    /// The current entry.
+    curr: Shared<'g, Entry>,
+
+    /// The list head, needed for restarting iteration.
+    head: &'g Atomic<Entry>,
+
+    /// Logically, we store a borrow of an instance of `T` and
+    /// use the type information from `C`.
+    _marker: PhantomData<(&'g T, C)>,
+}
+
+/// An error that occurs during iteration over the list.
+#[derive(PartialEq, Debug)]
+pub enum IterError {
+    /// A concurrent thread modified the state of the list at the same place that this iterator
+    /// was inspecting. Subsequent iteration will restart from the beginning of the list.
+    Stalled,
+}
+
+impl Default for Entry {
+    /// Returns the empty entry.
+    fn default() -> Entry {
+        Entry { next: Atomic::null() }
+    }
+}
+
+impl Entry {
+    /// Marks this entry as deleted, deferring the actual deallocation to a later iteration.
+    ///
+    /// # Safety
+    ///
+    /// The entry should be a member of a linked list, and it should not have been deleted.
+    /// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C`
+    /// is the associated helper for the linked list.
+    pub unsafe fn delete(&self, guard: &Guard) {
+        self.next.fetch_or(1, Release, guard);
+    }
+}
+
+impl<T, C: IsElement<T>> List<T, C> {
+    /// Returns a new, empty linked list.
+    pub fn new() -> List<T, C> {
+        List {
+            head: Atomic::null(),
+            _marker: PhantomData,
+        }
+    }
+
+    /// Inserts `entry` into the head of the list.
+    ///
+    /// # Safety
+    ///
+    /// You should guarantee that:
+    ///
+    /// - `container` is not null
+    /// - `container` is immovable, e.g. inside a `Box`
+    /// - the same `Entry` is not inserted more than once
+    /// - the inserted object will be removed before the list is dropped
+    pub unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) {
+        // Insert right after head, i.e. at the beginning of the list.
+        let to = &self.head;
+        // Get the intrusively stored Entry of the new element to insert.
+        let entry: &Entry = C::entry_of(container.deref());
+        // Make a Shared ptr to that Entry.
+        let entry_ptr = Shared::from(entry as *const _);
+        // Read the current successor of where we want to insert.
+        let mut next = to.load(Relaxed, guard);
+
+        loop {
+            // Set the Entry of the to-be-inserted element to point to the previous successor of
+            // `to`.
+            entry.next.store(next, Relaxed);
+            match to.compare_and_set_weak(next, entry_ptr, Release, guard) {
+                Ok(_) => break,
+                // We lost the race or weak CAS failed spuriously. Update the successor and try
+                // again.
+                Err(err) => next = err.current,
+            }
+        }
+    }
+
+    /// Returns an iterator over all objects.
+    ///
+    /// # Caveat
+    ///
+    /// Every object that is inserted at the moment this function is called and persists at least
+    /// until the end of iteration will be returned. Since this iterator traverses a lock-free
+    /// linked list that may be concurrently modified, some additional caveats apply:
+    ///
+    /// 1. If a new object is inserted during iteration, it may or may not be returned.
+    /// 2. If an object is deleted during iteration, it may or may not be returned.
+    /// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning
+    ///    thread will continue to iterate over the same list.
+    pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> {
+        Iter {
+            guard: guard,
+            pred: &self.head,
+            curr: self.head.load(Acquire, guard),
+            head: &self.head,
+            _marker: PhantomData,
+        }
+    }
+}
+
+impl<T, C: IsElement<T>> Drop for List<T, C> {
+    fn drop(&mut self) {
+        unsafe {
+            let guard = &unprotected();
+            let mut curr = self.head.load(Relaxed, guard);
+            while let Some(c) = curr.as_ref() {
+                let succ = c.next.load(Relaxed, guard);
+                // Verify that all elements have been removed from the list.
+                assert_eq!(succ.tag(), 1);
+
+                C::finalize(curr.deref());
+                curr = succ;
+            }
+        }
+    }
+}
+
+impl<'g, T: 'g, C: IsElement<T>> Iterator for Iter<'g, T, C> {
+    type Item = Result<&'g T, IterError>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        while let Some(c) = unsafe { self.curr.as_ref() } {
+            let succ = c.next.load(Acquire, self.guard);
+
+            if succ.tag() == 1 {
+                // This entry was removed. Try unlinking it from the list.
+                let succ = succ.with_tag(0);
+
+                // The tag should never be zero, because removing a node after a logically deleted
+                // node leaves the list in an invalid state.
+                debug_assert!(self.curr.tag() == 0);
+
+                match self.pred.compare_and_set(
+                    self.curr,
+                    succ,
+                    Acquire,
+                    self.guard,
+                ) {
+                    Ok(_) => {
+                        // We succeeded in unlinking this element from the list, so we have to
+                        // schedule deallocation. Deferred drop is okay, because `list.delete()`
+                        // can only be called if `T: 'static`.
+                        unsafe {
+                            let p = self.curr;
+                            self.guard.defer(move || C::finalize(p.deref()));
+                        }
+
+                        // Move over the removed by only advancing `curr`, not `pred`.
+                        self.curr = succ;
+                        continue;
+                    }
+                    Err(_) => {
+                        // A concurrent thread modified the predecessor node. Since it might've
+                        // been deleted, we need to restart from `head`.
+                        self.pred = self.head;
+                        self.curr = self.head.load(Acquire, self.guard);
+
+                        return Some(Err(IterError::Stalled));
+                    }
+                }
+            }
+
+            // Move one step forward.
+            self.pred = &c.next;
+            self.curr = succ;
+
+            return Some(Ok(unsafe { C::element_of(c) }));
+        }
+
+        // We reached the end of the list.
+        None
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use {Collector, Owned, Guard};
+    use crossbeam_utils::scoped;
+    use std::sync::Barrier;
+    use super::*;
+
+    impl IsElement<Entry> for Entry {
+        fn entry_of(entry: &Entry) -> &Entry {
+            entry
+        }
+
+        unsafe fn element_of(entry: &Entry) -> &Entry {
+            entry
+        }
+
+        unsafe fn finalize(entry: &Entry) {
+            drop(Box::from_raw(entry as *const Entry as *mut Entry));
+        }
+    }
+
+    /// Checks whether the list retains inserted elements
+    /// and returns them in the correct order.
+    #[test]
+    fn insert() {
+        let collector = Collector::new();
+        let handle = collector.handle();
+        let guard = handle.pin();
+
+        let l: List<Entry> = List::new();
+
+        let e1 = Owned::new(Entry::default()).into_shared(&guard);
+        let e2 = Owned::new(Entry::default()).into_shared(&guard);
+        let e3 = Owned::new(Entry::default()).into_shared(&guard);
+
+        unsafe {
+            l.insert(e1, &guard);
+            l.insert(e2, &guard);
+            l.insert(e3, &guard);
+        }
+
+        let mut iter = l.iter(&guard);
+        let maybe_e3 = iter.next();
+        assert!(maybe_e3.is_some());
+        assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw());
+        let maybe_e2 = iter.next();
+        assert!(maybe_e2.is_some());
+        assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw());
+        let maybe_e1 = iter.next();
+        assert!(maybe_e1.is_some());
+        assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw());
+        assert!(iter.next().is_none());
+
+        unsafe {
+            e1.as_ref().unwrap().delete(&guard);
+            e2.as_ref().unwrap().delete(&guard);
+            e3.as_ref().unwrap().delete(&guard);
+        }
+    }
+
+    /// Checks whether elements can be removed from the list and whether
+    /// the correct elements are removed.
+    #[test]
+    fn delete() {
+        let collector = Collector::new();
+        let handle = collector.handle();
+        let guard = handle.pin();
+
+        let l: List<Entry> = List::new();
+
+        let e1 = Owned::new(Entry::default()).into_shared(&guard);
+        let e2 = Owned::new(Entry::default()).into_shared(&guard);
+        let e3 = Owned::new(Entry::default()).into_shared(&guard);
+        unsafe {
+            l.insert(e1, &guard);
+            l.insert(e2, &guard);
+            l.insert(e3, &guard);
+            e2.as_ref().unwrap().delete(&guard);
+        }
+
+        let mut iter = l.iter(&guard);
+        let maybe_e3 = iter.next();
+        assert!(maybe_e3.is_some());
+        assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw());
+        let maybe_e1 = iter.next();
+        assert!(maybe_e1.is_some());
+        assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw());
+        assert!(iter.next().is_none());
+
+        unsafe {
+            e1.as_ref().unwrap().delete(&guard);
+            e3.as_ref().unwrap().delete(&guard);
+        }
+
+        let mut iter = l.iter(&guard);
+        assert!(iter.next().is_none());
+    }
+
+    const THREADS: usize = 8;
+    const ITERS: usize = 512;
+
+    /// Contends the list on insert and delete operations to make sure they can run concurrently.
+    #[test]
+    fn insert_delete_multi() {
+        let collector = Collector::new();
+
+        let l: List<Entry> = List::new();
+        let b = Barrier::new(THREADS);
+
+        scoped::scope(|s| for _ in 0..THREADS {
+            s.spawn(|| {
+                b.wait();
+
+                let handle = collector.handle();
+                let guard: Guard = handle.pin();
+                let mut v = Vec::with_capacity(ITERS);
+
+                for _ in 0..ITERS {
+                    let e = Owned::new(Entry::default()).into_shared(&guard);
+                    v.push(e);
+                    unsafe {
+                        l.insert(e, &guard);
+                    }
+                }
+
+                for e in v {
+                    unsafe {
+                        e.as_ref().unwrap().delete(&guard);
+                    }
+                }
+            });
+        });
+
+        let handle = collector.handle();
+        let guard = handle.pin();
+
+        let mut iter = l.iter(&guard);
+        assert!(iter.next().is_none());
+    }
+
+    /// Contends the list on iteration to make sure that it can be iterated over concurrently.
+    #[test]
+    fn iter_multi() {
+        let collector = Collector::new();
+
+        let l: List<Entry> = List::new();
+        let b = Barrier::new(THREADS);
+
+        scoped::scope(|s| for _ in 0..THREADS {
+            s.spawn(|| {
+                b.wait();
+
+                let handle = collector.handle();
+                let guard: Guard = handle.pin();
+                let mut v = Vec::with_capacity(ITERS);
+
+                for _ in 0..ITERS {
+                    let e = Owned::new(Entry::default()).into_shared(&guard);
+                    v.push(e);
+                    unsafe {
+                        l.insert(e, &guard);
+                    }
+                }
+
+                let mut iter = l.iter(&guard);
+                for _ in 0..ITERS {
+                    assert!(iter.next().is_some());
+                }
+
+                for e in v {
+                    unsafe {
+                        e.as_ref().unwrap().delete(&guard);
+                    }
+                }
+            });
+        });
+
+        let handle = collector.handle();
+        let guard = handle.pin();
+
+        let mut iter = l.iter(&guard);
+        assert!(iter.next().is_none());
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/sync/mod.rs
@@ -0,0 +1,4 @@
+//! Synchronization primitives.
+
+pub mod list;
+pub mod queue;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch/src/sync/queue.rs
@@ -0,0 +1,435 @@
+//! Michael-Scott lock-free queue.
+//!
+//! Usable with any number of producers and consumers.
+//!
+//! Michael and Scott.  Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue
+//! Algorithms.  PODC 1996.  http://dl.acm.org/citation.cfm?id=248106
+
+use core::fmt;
+use core::mem;
+use core::ptr;
+use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+
+use crossbeam_utils::cache_padded::CachePadded;
+use nodrop::NoDrop;
+
+use {unprotected, Atomic, Guard, Owned, Shared};
+
+// The representation here is a singly-linked list, with a sentinel node at the front. In general
+// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or
+// all `Blocked` (requests for data from blocked threads).
+#[derive(Debug)]
+pub struct Queue<T> {
+    head: CachePadded<Atomic<Node<T>>>,
+    tail: CachePadded<Atomic<Node<T>>>,
+}
+
+struct Node<T> {
+    /// The slot in which a value of type `T` can be stored.
+    ///
+    /// The type of `data` is `NoDrop<T>` because a `Node<T>` doesn't always contain a `T`. For
+    /// example, the sentinel node in a queue never contains a value: its slot is always empty.
+    /// Other nodes start their life with a push operation and contain a value until it gets popped
+    /// out. After that such empty nodes get added to the collector for destruction.
+    data: NoDrop<T>,
+
+    next: Atomic<Node<T>>,
+}
+
+impl<T> fmt::Debug for Node<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+        write!(f, "node {{ ... }}")
+    }
+}
+
+// Any particular `T` should never be accessed concurrently, so no need for `Sync`.
+unsafe impl<T: Send> Sync for Queue<T> {}
+unsafe impl<T: Send> Send for Queue<T> {}
+
+impl<T> Queue<T> {
+    /// Create a new, empty queue.
+    pub fn new() -> Queue<T> {
+        let q = Queue {
+            head: CachePadded::new(Atomic::null()),
+            tail: CachePadded::new(Atomic::null()),
+        };
+        let sentinel = Owned::new(Node {
+            data: unsafe { mem::uninitialized() },
+            next: Atomic::null(),
+        });
+        unsafe {
+            let guard = &unprotected();
+            let sentinel = sentinel.into_shared(guard);
+            q.head.store(sentinel, Relaxed);
+            q.tail.store(sentinel, Relaxed);
+            q
+        }
+    }
+
+    /// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on
+    /// success. The queue's `tail` pointer may be updated.
+    #[inline(always)]
+    fn push_internal(&self, onto: Shared<Node<T>>, new: Shared<Node<T>>, guard: &Guard) -> bool {
+        // is `onto` the actual tail?
+        let o = unsafe { onto.deref() };
+        let next = o.next.load(Acquire, guard);
+        if unsafe { next.as_ref().is_some() } {
+            // if not, try to "help" by moving the tail pointer forward
+            let _ = self.tail.compare_and_set(onto, next, Release, guard);
+            false
+        } else {
+            // looks like the actual tail; attempt to link in `n`
+            let result = o.next
+                .compare_and_set(Shared::null(), new, Release, guard)
+                .is_ok();
+            if result {
+                // try to move the tail pointer forward
+                let _ = self.tail.compare_and_set(onto, new, Release, guard);
+            }
+            result
+        }
+    }
+
+    /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`.
+    pub fn push(&self, t: T, guard: &Guard) {
+        let new = Owned::new(Node {
+            data: NoDrop::new(t),
+            next: Atomic::null(),
+        });
+        let new = Owned::into_shared(new, guard);
+
+        loop {
+            // We push onto the tail, so we'll start optimistically by looking there first.
+            let tail = self.tail.load(Acquire, guard);
+
+            // Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed.
+            if self.push_internal(tail, new, guard) {
+                break;
+            }
+        }
+    }
+
+    /// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop.
+    #[inline(always)]
+    fn pop_internal(&self, guard: &Guard) -> Result<Option<T>, ()> {
+        let head = self.head.load(Acquire, guard);
+        let h = unsafe { head.deref() };
+        let next = h.next.load(Acquire, guard);
+        match unsafe { next.as_ref() } {
+            Some(n) => unsafe {
+                self.head
+                    .compare_and_set(head, next, Release, guard)
+                    .map(|_| {
+                        guard.defer(move || drop(head.into_owned()));
+                        Some(NoDrop::into_inner(ptr::read(&n.data)))
+                    })
+                    .map_err(|_| ())
+            },
+            None => Ok(None),
+        }
+    }
+
+    /// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue
+    /// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop.
+    #[inline(always)]
+    fn pop_if_internal<F>(&self, condition: F, guard: &Guard) -> Result<Option<T>, ()>
+    where
+        T: Sync,
+        F: Fn(&T) -> bool,
+    {
+        let head = self.head.load(Acquire, guard);
+        let h = unsafe { head.deref() };
+        let next = h.next.load(Acquire, guard);
+        match unsafe { next.as_ref() } {
+            Some(n) if condition(&n.data) => unsafe {
+                self.head
+                    .compare_and_set(head, next, Release, guard)
+                    .map(|_| {
+                        guard.defer(move || drop(head.into_owned()));
+                        Some(NoDrop::into_inner(ptr::read(&n.data)))
+                    })
+                    .map_err(|_| ())
+            },
+            None | Some(_) => Ok(None),
+        }
+    }
+
+    /// Attempts to dequeue from the front.
+    ///
+    /// Returns `None` if the queue is observed to be empty.
+    pub fn try_pop(&self, guard: &Guard) -> Option<T> {
+        loop {
+            if let Ok(head) = self.pop_internal(guard) {
+                return head;
+            }
+        }
+    }
+
+    /// Attempts to dequeue from the front, if the item satisfies the given condition.
+    ///
+    /// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given
+    /// condition.
+    pub fn try_pop_if<F>(&self, condition: F, guard: &Guard) -> Option<T>
+    where
+        T: Sync,
+        F: Fn(&T) -> bool,
+    {
+        loop {
+            if let Ok(head) = self.pop_if_internal(&condition, guard) {
+                return head;
+            }
+        }
+    }
+}
+
+impl<T> Drop for Queue<T> {
+    fn drop(&mut self) {
+        unsafe {
+            let guard = &unprotected();
+
+            while let Some(_) = self.try_pop(guard) {}
+
+            // Destroy the remaining sentinel node.
+            let sentinel = self.head.load(Relaxed, guard);
+            drop(sentinel.into_owned());
+        }
+    }
+}
+
+
+#[cfg(test)]
+mod test {
+    use {pin};
+
+    use core::sync::atomic::Ordering;
+    use crossbeam_utils::scoped;
+
+    struct Queue<T> {
+        queue: super::Queue<T>,
+    }
+
+    impl<T> Queue<T> {
+        pub fn new() -> Queue<T> {
+            Queue { queue: super::Queue::new() }
+        }
+
+        pub fn push(&self, t: T) {
+            let guard = &pin();
+            self.queue.push(t, guard);
+        }
+
+        pub fn is_empty(&self) -> bool {
+            let guard = &pin();
+            let head = self.queue.head.load(Ordering::Acquire, guard);
+            let h = unsafe { head.deref() };
+            h.next.load(Ordering::Acquire, guard).is_null()
+        }
+
+        pub fn try_pop(&self) -> Option<T> {
+            let guard = &pin();
+            self.queue.try_pop(guard)
+        }
+
+        pub fn pop(&self) -> T {
+            loop {
+                match self.try_pop() {
+                    None => continue,
+                    Some(t) => return t,
+                }
+            }
+        }
+    }
+
+    const CONC_COUNT: i64 = 1000000;
+
+    #[test]
+    fn push_try_pop_1() {
+        let q: Queue<i64> = Queue::new();
+        assert!(q.is_empty());
+        q.push(37);
+        assert!(!q.is_empty());
+        assert_eq!(q.try_pop(), Some(37));
+        assert!(q.is_empty());
+    }
+
+    #[test]
+    fn push_try_pop_2() {
+        let q: Queue<i64> = Queue::new();
+        assert!(q.is_empty());
+        q.push(37);
+        q.push(48);
+        assert_eq!(q.try_pop(), Some(37));
+        assert!(!q.is_empty());
+        assert_eq!(q.try_pop(), Some(48));
+        assert!(q.is_empty());
+    }
+
+    #[test]
+    fn push_try_pop_many_seq() {
+        let q: Queue<i64> = Queue::new();
+        assert!(q.is_empty());
+        for i in 0..200 {
+            q.push(i)
+        }
+        assert!(!q.is_empty());
+        for i in 0..200 {
+            assert_eq!(q.try_pop(), Some(i));
+        }
+        assert!(q.is_empty());
+    }
+
+    #[test]
+    fn push_pop_1() {
+        let q: Queue<i64> = Queue::new();
+        assert!(q.is_empty());
+        q.push(37);
+        assert!(!q.is_empty());
+        assert_eq!(q.pop(), 37);
+        assert!(q.is_empty());
+    }
+
+    #[test]
+    fn push_pop_2() {
+        let q: Queue<i64> = Queue::new();
+        q.push(37);
+        q.push(48);
+        assert_eq!(q.pop(), 37);
+        assert_eq!(q.pop(), 48);
+    }
+
+    #[test]
+    fn push_pop_many_seq() {
+        let q: Queue<i64> = Queue::new();
+        assert!(q.is_empty());
+        for i in 0..200 {
+            q.push(i)
+        }
+        assert!(!q.is_empty());
+        for i in 0..200 {
+            assert_eq!(q.pop(), i);
+        }
+        assert!(q.is_empty());
+    }
+
+    #[test]
+    fn push_try_pop_many_spsc() {
+        let q: Queue<i64> = Queue::new();
+        assert!(q.is_empty());
+
+        scoped::scope(|scope| {
+            scope.spawn(|| {
+                let mut next = 0;
+
+                while next < CONC_COUNT {
+                    if let Some(elem) = q.try_pop() {
+                        assert_eq!(elem, next);
+                        next += 1;
+                    }
+                }
+            });
+
+            for i in 0..CONC_COUNT {
+                q.push(i)
+            }
+        });
+    }
+
+    #[test]
+    fn push_try_pop_many_spmc() {
+        fn recv(_t: i32, q: &Queue<i64>) {
+            let mut cur = -1;
+            for _i in 0..CONC_COUNT {
+                if let Some(elem) = q.try_pop() {
+                    assert!(elem > cur);
+                    cur = elem;
+
+                    if cur == CONC_COUNT - 1 {
+                        break;
+                    }
+                }
+            }
+        }
+
+        let q: Queue<i64> = Queue::new();
+        assert!(q.is_empty());
+        let qr = &q;
+        scoped::scope(|scope| {
+            for i in 0..3 {
+                scope.spawn(move || recv(i, qr));
+            }
+
+            scope.spawn(|| for i in 0..CONC_COUNT {
+                q.push(i);
+            })
+        });
+    }
+
+    #[test]
+    fn push_try_pop_many_mpmc() {
+        enum LR {
+            Left(i64),
+            Right(i64),
+        }
+
+        let q: Queue<LR> = Queue::new();
+        assert!(q.is_empty());
+
+        scoped::scope(|scope| for _t in 0..2 {
+            scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT {
+                q.push(LR::Left(i))
+            });
+            scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT {
+                q.push(LR::Right(i))
+            });
+            scope.spawn(|| {
+                let mut vl = vec![];
+                let mut vr = vec![];
+                for _i in 0..CONC_COUNT {
+                    match q.try_pop() {
+                        Some(LR::Left(x)) => vl.push(x),
+                        Some(LR::Right(x)) => vr.push(x),
+                        _ => {}
+                    }
+                }
+
+                let mut vl2 = vl.clone();
+                let mut vr2 = vr.clone();
+                vl2.sort();
+                vr2.sort();
+
+                assert_eq!(vl, vl2);
+                assert_eq!(vr, vr2);
+            });
+        });
+    }
+
+    #[test]
+    fn push_pop_many_spsc() {
+        let q: Queue<i64> = Queue::new();
+
+        scoped::scope(|scope| {
+            scope.spawn(|| {
+                let mut next = 0;
+                while next < CONC_COUNT {
+                    assert_eq!(q.pop(), next);
+                    next += 1;
+                }
+            });
+
+            for i in 0..CONC_COUNT {
+                q.push(i)
+            }
+        });
+        assert!(q.is_empty());
+    }
+
+    #[test]
+    fn is_empty_dont_pop() {
+        let q: Queue<i64> = Queue::new();
+        q.push(20);
+        q.push(20);
+        assert!(!q.is_empty());
+        assert!(!q.is_empty());
+        assert!(q.try_pop().is_some());
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{".travis.yml":"da898db16b841a2f633a896d69df908fb263d63d04f6248e448ba49a6122f5e9","CHANGELOG.md":"945485d3f79a1912bfa6944ed7b07a9c60915fae992f7abcbb1de44ec147953e","Cargo.toml":"2c8f106920b27ebe60616933c4bf04cf2a6515d65f87fafa216febc4d6e1164b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"ef6edf8bcb3491d8453ca36008f9e3fa0895bb6c17db47b38867784ed7717983","src/atomic_option.rs":"0ed05d26d8980c761c4972a0f37f5b507462ed6dff5d688ef92444560e7b9c69","src/cache_padded.rs":"47a99e571bf5c213395585ff001c7abd10388609f349a2e776d481e2ed0b32cb","src/lib.rs":"ea79e01d2c2f55d27d365e8cd45e377b313f53f27c705d4e4f6a4f19d7e11a98","src/scoped.rs":"5af1b54ca167c634e4c206aeab53e6ca78682633ad0009af220b17de385b3080"},"package":"2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/.travis.yml
@@ -0,0 +1,20 @@
+language: rust
+
+rust:
+  - stable
+  - beta
+  - nightly
+  - 1.12.1
+
+script:
+  - cargo build
+  - cargo build --release
+  - cargo build --no-default-features
+  - cargo build --release --no-default-features
+  - cargo test
+  - cargo test --release
+  - |
+    if [ $TRAVIS_RUST_VERSION == nightly ]; then
+      cargo test --features nightly
+      cargo test --features nightly --release
+    fi
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+### Added
+- Support for Rust 1.12.1.
+
+### Fixed
+- Call `T::clone` when cloning a `CachePadded<T>`.
+
+## [0.2.1] - 2017-11-26
+### Added
+- Add `use_std` feature.
+
+## [0.2.0] - 2017-11-17
+### Added
+- Add `nightly` feature.
+- Use `repr(align(64))` on `CachePadded` with the `nightly` feature.
+- Implement `Drop` for `CachePadded<T>`.
+- Implement `Clone` for `CachePadded<T>`.
+- Implement `From<T>` for `CachePadded<T>`.
+- Implement better `Debug` for `CachePadded<T>`.
+- Write more tests.
+- Add this changelog.
+
+### Changed
+- Change cache line length to 64 bytes.
+
+### Removed
+- Remove `ZerosValid`.
+
+## 0.1.0 - 2017-08-27
+### Added
+- Old implementation of `CachePadded` from `crossbeam` version 0.3.0
+
+[Unreleased]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.2.1...HEAD
+[0.2.1]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.2.0...v0.2.1
+[0.2.0]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.1.0...v0.2.0
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/Cargo.toml
@@ -0,0 +1,31 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "crossbeam-utils"
+version = "0.2.2"
+authors = ["The Crossbeam Project Developers"]
+description = "Utilities for concurrent programming"
+homepage = "https://github.com/crossbeam-rs/crossbeam-utils"
+documentation = "https://docs.rs/crossbeam-utils"
+readme = "README.md"
+keywords = ["scoped", "thread", "atomic", "cache"]
+categories = ["algorithms", "concurrency", "data-structures"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/crossbeam-rs/crossbeam-utils"
+[dependencies.cfg-if]
+version = "0.1"
+
+[features]
+default = ["use_std"]
+nightly = []
+use_std = []
copy from third_party/rust/coco/LICENSE-APACHE
copy to third_party/rust/crossbeam-utils/LICENSE-APACHE
copy from third_party/rust/coco/LICENSE-MIT
copy to third_party/rust/crossbeam-utils/LICENSE-MIT
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/README.md
@@ -0,0 +1,29 @@
+# Utilities for concurrent programming
+
+[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-utils.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-utils)
+[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-utils)
+[![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)](https://crates.io/crates/crossbeam-utils)
+[![Documentation](https://docs.rs/crossbeam-utils/badge.svg)](https://docs.rs/crossbeam-utils)
+
+This crate provides utilities for concurrent programming.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+crossbeam-utils = "0.2"
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate crossbeam_utils;
+```
+
+## License
+
+Licensed under the terms of MIT license and the Apache License (Version 2.0).
+
+See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/src/atomic_option.rs
@@ -0,0 +1,55 @@
+use std::sync::atomic::{AtomicPtr, Ordering};
+use std::ptr;
+
+unsafe impl<T: Send> Send for AtomicOption<T> {}
+unsafe impl<T: Send> Sync for AtomicOption<T> {}
+
+#[derive(Debug)]
+pub struct AtomicOption<T> {
+    inner: AtomicPtr<T>,
+}
+
+impl<T> Drop for AtomicOption<T> {
+    fn drop(&mut self) {
+        let inner = self.inner.load(Ordering::Relaxed);
+        if !inner.is_null() {
+            unsafe {
+                drop(Box::from_raw(inner));
+            }
+        }
+    }
+}
+
+impl<T> AtomicOption<T> {
+    pub fn new() -> Self {
+        AtomicOption { inner: AtomicPtr::new(ptr::null_mut()) }
+    }
+
+    fn swap_inner(&self, ptr: *mut T, order: Ordering) -> Option<Box<T>> {
+        let old = self.inner.swap(ptr, order);
+        if old.is_null() {
+            None
+        } else {
+            Some(unsafe { Box::from_raw(old) })
+        }
+    }
+
+    // allows re-use of allocation
+    pub fn swap_box(&self, t: Box<T>, order: Ordering) -> Option<Box<T>> {
+        self.swap_inner(Box::into_raw(t), order)
+    }
+
+    pub fn swap(&self, t: T, order: Ordering) -> Option<T> {
+        self.swap_box(Box::new(t), order).map(|old| *old)
+    }
+
+    pub fn take(&self, order: Ordering) -> Option<T> {
+        self.swap_inner(ptr::null_mut(), order).map(|old| *old)
+    }
+}
+
+impl<T> Default for AtomicOption<T> {
+    fn default() -> Self {
+        Self::new()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/src/cache_padded.rs
@@ -0,0 +1,290 @@
+use core::fmt;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+use core::ptr;
+
+
+cfg_if! {
+    if #[cfg(feature = "nightly")] {
+        // This trick allows use to support rustc 1.12.1, which does not support the
+        // #[repr(align(n))] syntax. Using the attribute makes the parser fail over.
+        // It is, however, okay to use it within a macro, since it would be parsed
+        // in a later stage, but that never occurs due to the cfg_if.
+        // TODO(Vtec234): remove this crap when we drop support for 1.12.
+        macro_rules! nightly_inner {
+            () => (
+                #[derive(Clone)]
+                #[repr(align(64))]
+                pub(crate) struct Inner<T> {
+                    value: T,
+                }
+            )
+        }
+        nightly_inner!();
+
+        impl<T> Inner<T> {
+            pub(crate) fn new(t: T) -> Inner<T> {
+                Self {
+                    value: t
+                }
+            }
+        }
+
+        impl<T> Deref for Inner<T> {
+            type Target = T;
+
+            fn deref(&self) -> &T {
+                &self.value
+            }
+        }
+
+        impl<T> DerefMut for Inner<T> {
+            fn deref_mut(&mut self) -> &mut T {
+                &mut self.value
+            }
+        }
+    } else {
+        use core::marker::PhantomData;
+
+        struct Inner<T> {
+            bytes: [u8; 64],
+
+            /// `[T; 0]` ensures alignment is at least that of `T`.
+            /// `PhantomData<T>` signals that `CachePadded<T>` contains a `T`.
+            _marker: ([T; 0], PhantomData<T>),
+        }
+
+        impl<T> Inner<T> {
+            fn new(t: T) -> Inner<T> {
+                assert!(mem::size_of::<T>() <= mem::size_of::<Self>());
+                assert!(mem::align_of::<T>() <= mem::align_of::<Self>());
+
+                unsafe {
+                    let mut inner: Self = mem::uninitialized();
+                    let p: *mut T = &mut *inner;
+                    ptr::write(p, t);
+                    inner
+                }
+            }
+        }
+
+        impl<T> Deref for Inner<T> {
+            type Target = T;
+
+            fn deref(&self) -> &T {
+                unsafe { &*(self.bytes.as_ptr() as *const T) }
+            }
+        }
+
+        impl<T> DerefMut for Inner<T> {
+            fn deref_mut(&mut self) -> &mut T {
+                unsafe { &mut *(self.bytes.as_ptr() as *mut T) }
+            }
+        }
+
+        impl<T> Drop for CachePadded<T> {
+            fn drop(&mut self) {
+                let p: *mut T = self.deref_mut();
+                unsafe {
+                    ptr::drop_in_place(p);
+                }
+            }
+        }
+
+        impl<T: Clone> Clone for Inner<T> {
+            fn clone(&self) -> Inner<T> {
+                let val = self.deref().clone();
+                Self::new(val)
+            }
+        }
+    }
+}
+
+/// Pads `T` to the length of a cache line.
+///
+/// Sometimes concurrent programming requires a piece of data to be padded out to the size of a
+/// cacheline to avoid "false sharing": cache lines being invalidated due to unrelated concurrent
+/// activity. Use this type when you want to *avoid* cache locality.
+///
+/// At the moment, cache lines are assumed to be 64 bytes on all architectures.
+///
+/// # Size and alignment
+///
+/// By default, the size of `CachePadded<T>` is 64 bytes. If `T` is larger than that, then
+/// `CachePadded::<T>::new` will panic. Alignment of `CachePadded<T>` is the same as that of `T`.
+///
+/// However, if the `nightly` feature is enabled, arbitrarily large types `T` can be stored inside
+/// a `CachePadded<T>`. The size will then be a multiple of 64 at least the size of `T`, and the
+/// alignment will be the maximum of 64 and the alignment of `T`.
+pub struct CachePadded<T> {
+    inner: Inner<T>,
+}
+
+unsafe impl<T: Send> Send for CachePadded<T> {}
+unsafe impl<T: Sync> Sync for CachePadded<T> {}
+
+impl<T> CachePadded<T> {
+    /// Pads a value to the length of a cache line.
+    ///
+    /// # Panics
+    ///
+    /// If `nightly` is not enabled and `T` is larger than 64 bytes, this function will panic.
+    pub fn new(t: T) -> CachePadded<T> {
+        CachePadded::<T> { inner: Inner::new(t) }
+    }
+}
+
+impl<T> Deref for CachePadded<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        self.inner.deref()
+    }
+}
+
+impl<T> DerefMut for CachePadded<T> {
+    fn deref_mut(&mut self) -> &mut T {
+        self.inner.deref_mut()
+    }
+}
+
+impl<T: Default> Default for CachePadded<T> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T: Clone> Clone for CachePadded<T> {
+    fn clone(&self) -> Self {
+        CachePadded { inner: self.inner.clone() }
+    }
+}
+
+impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let inner: &T = &*self;
+        write!(f, "CachePadded {{ {:?} }}", inner)
+    }
+}
+
+impl<T> From<T> for CachePadded<T> {
+    fn from(t: T) -> Self {
+        CachePadded::new(t)
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use std::cell::Cell;
+
+    #[test]
+    fn store_u64() {
+        let x: CachePadded<u64> = CachePadded::new(17);
+        assert_eq!(*x, 17);
+    }
+
+    #[test]
+    fn store_pair() {
+        let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37));
+        assert_eq!(x.0, 17);
+        assert_eq!(x.1, 37);
+    }
+
+    #[test]
+    fn distance() {
+        let arr = [CachePadded::new(17u8), CachePadded::new(37u8)];
+        let a = &*arr[0] as *const u8;
+        let b = &*arr[1] as *const u8;
+        assert!(unsafe { a.offset(64) } <= b);
+    }
+
+    #[test]
+    fn different_sizes() {
+        CachePadded::new(17u8);
+        CachePadded::new(17u16);
+        CachePadded::new(17u32);
+        CachePadded::new([17u64; 0]);
+        CachePadded::new([17u64; 1]);
+        CachePadded::new([17u64; 2]);
+        CachePadded::new([17u64; 3]);
+        CachePadded::new([17u64; 4]);
+        CachePadded::new([17u64; 5]);
+        CachePadded::new([17u64; 6]);
+        CachePadded::new([17u64; 7]);
+        CachePadded::new([17u64; 8]);
+    }
+
+    cfg_if! {
+        if #[cfg(feature = "nightly")] {
+            #[test]
+            fn large() {
+                let a = [17u64; 9];
+                let b = CachePadded::new(a);
+                assert!(mem::size_of_val(&a) <= mem::size_of_val(&b));
+            }
+        } else {
+            #[test]
+            #[should_panic]
+            fn large() {
+                CachePadded::new([17u64; 9]);
+            }
+        }
+    }
+
+    #[test]
+    fn debug() {
+        assert_eq!(
+            format!("{:?}", CachePadded::new(17u64)),
+            "CachePadded { 17 }"
+        );
+    }
+
+    #[test]
+    fn drops() {
+        let count = Cell::new(0);
+
+        struct Foo<'a>(&'a Cell<usize>);
+
+        impl<'a> Drop for Foo<'a> {
+            fn drop(&mut self) {
+                self.0.set(self.0.get() + 1);
+            }
+        }
+
+        let a = CachePadded::new(Foo(&count));
+        let b = CachePadded::new(Foo(&count));
+
+        assert_eq!(count.get(), 0);
+        drop(a);
+        assert_eq!(count.get(), 1);
+        drop(b);
+        assert_eq!(count.get(), 2);
+    }
+
+    #[test]
+    fn clone() {
+        let a = CachePadded::new(17);
+        let b = a.clone();
+        assert_eq!(*a, *b);
+    }
+
+    #[test]
+    fn runs_custom_clone() {
+        let count = Cell::new(0);
+
+        struct Foo<'a>(&'a Cell<usize>);
+
+        impl<'a> Clone for Foo<'a> {
+            fn clone(&self) -> Foo<'a> {
+                self.0.set(self.0.get() + 1);
+                Foo::<'a>(self.0)
+            }
+        }
+
+        let a = CachePadded::new(Foo(&count));
+        a.clone();
+
+        assert_eq!(count.get(), 1);
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/src/lib.rs
@@ -0,0 +1,14 @@
+#![cfg_attr(feature = "nightly", feature(attr_literals, repr_align))]
+#![cfg_attr(not(feature = "use_std"), no_std)]
+
+#[cfg(feature = "use_std")]
+extern crate core;
+
+#[macro_use]
+extern crate cfg_if;
+
+pub mod cache_padded;
+#[cfg(feature = "use_std")]
+pub mod atomic_option;
+#[cfg(feature = "use_std")]
+pub mod scoped;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/crossbeam-utils/src/scoped.rs
@@ -0,0 +1,364 @@
+/// Scoped thread.
+///
+/// # Examples
+///
+/// A basic scoped thread:
+///
+/// ```
+/// crossbeam_utils::scoped::scope(|scope| {
+///     scope.spawn(|| {
+///         println!("Hello from a scoped thread!");
+///     });
+/// });
+/// ```
+///
+/// When writing concurrent Rust programs, you'll sometimes see a pattern like this, using
+/// [`std::thread::spawn`][spawn]:
+///
+/// ```ignore
+/// let array = [1, 2, 3];
+/// let mut guards = vec![];
+///
+/// for i in &array {
+///     let guard = std::thread::spawn(move || {
+///         println!("element: {}", i);
+///     });
+///
+///     guards.push(guard);
+/// }
+///
+/// for guard in guards {
+///     guard.join().unwrap();
+/// }
+/// ```
+///
+/// The basic pattern is:
+///
+/// 1. Iterate over some collection.
+/// 2. Spin up a thread to operate on each part of the collection.
+/// 3. Join all the threads.
+///
+/// However, this code actually gives an error:
+///
+/// ```text
+/// error: `array` does not live long enough
+/// for i in &array {
+///           ^~~~~
+/// in expansion of for loop expansion
+/// note: expansion site
+/// note: reference must be valid for the static lifetime...
+/// note: ...but borrowed value is only valid for the block suffix following statement 0 at ...
+///     let array = [1, 2, 3];
+///     let mut guards = vec![];
+///
+///     for i in &array {
+///         let guard = std::thread::spawn(move || {
+///             println!("element: {}", i);
+/// ...
+/// error: aborting due to previous error
+/// ```
+///
+/// Because [`std::thread::spawn`][spawn] doesn't know about this scope, it requires a
+/// `'static` lifetime. One way of giving it a proper lifetime is to use an [`Arc`][arc]:
+///
+/// [arc]: http://doc.rust-lang.org/stable/std/sync/struct.Arc.html
+///
+/// ```
+/// use std::sync::Arc;
+///
+/// let array = Arc::new([1, 2, 3]);
+/// let mut guards = vec![];
+///
+/// for i in 0..array.len() {
+///     let a = array.clone();
+///
+///     let guard = std::thread::spawn(move || {
+///         println!("element: {}", a[i]);
+///     });
+///
+///     guards.push(guard);
+/// }
+///
+/// for guard in guards {
+///     guard.join().unwrap();
+/// }
+/// ```
+///
+/// But this introduces unnecessary allocation, as `Arc<T>` puts its data on the heap, and we
+/// also end up dealing with reference counts. We know that we're joining the threads before
+/// our function returns, so just taking a reference _should_ be safe. Rust can't know that,
+/// though.
+///
+/// Enter scoped threads. Here's our original example, using `spawn` from crossbeam rather
+/// than from `std::thread`:
+///
+/// ```
+/// let array = [1, 2, 3];
+///
+/// crossbeam_utils::scoped::scope(|scope| {
+///     for i in &array {
+///         scope.spawn(move || {
+///             println!("element: {}", i);
+///         });
+///     }
+/// });
+/// ```
+///
+/// Much more straightforward.
+// FIXME(jeehoonkang): maybe we should create a new crate for scoped threads.
+
+use std::cell::RefCell;
+use std::fmt;
+use std::mem;
+use std::rc::Rc;
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+use std::thread;
+use std::io;
+
+use atomic_option::AtomicOption;
+
+#[doc(hidden)]
+trait FnBox {
+    fn call_box(self: Box<Self>);
+}
+
+impl<F: FnOnce()> FnBox for F {
+    fn call_box(self: Box<Self>) {
+        (*self)()
+    }
+}
+
+/// Like `std::thread::spawn`, but without the closure bounds.
+pub unsafe fn spawn_unsafe<'a, F>(f: F) -> thread::JoinHandle<()>
+where
+    F: FnOnce() + Send + 'a,
+{
+    let builder = thread::Builder::new();
+    builder_spawn_unsafe(builder, f).unwrap()
+}
+
+/// Like `std::thread::Builder::spawn`, but without the closure bounds.
+pub unsafe fn builder_spawn_unsafe<'a, F>(
+    builder: thread::Builder,
+    f: F,
+) -> io::Result<thread::JoinHandle<()>>
+where
+    F: FnOnce() + Send + 'a,
+{
+    use std::mem;
+
+    let closure: Box<FnBox + 'a> = Box::new(f);
+    let closure: Box<FnBox + Send> = mem::transmute(closure);
+    builder.spawn(move || closure.call_box())
+}
+
+
+pub struct Scope<'a> {
+    dtors: RefCell<Option<DtorChain<'a>>>,
+}
+
+struct DtorChain<'a> {
+    dtor: Box<FnBox + 'a>,
+    next: Option<Box<DtorChain<'a>>>,
+}
+
+enum JoinState {
+    Running(thread::JoinHandle<()>),
+    Joined,
+}
+
+impl JoinState {
+    fn join(&mut self) {
+        let mut state = JoinState::Joined;
+        mem::swap(self, &mut state);
+        if let JoinState::Running(handle) = state {
+            let res = handle.join();
+
+            if !thread::panicking() {
+                res.unwrap();
+            }
+        }
+    }
+}
+
+/// A handle to a scoped thread
+pub struct ScopedJoinHandle<T> {
+    inner: Rc<RefCell<JoinState>>,
+    packet: Arc<AtomicOption<T>>,
+    thread: thread::Thread,
+}
+
+/// Create a new `scope`, for deferred destructors.
+///
+/// Scopes, in particular, support [*scoped thread spawning*](struct.Scope.html#method.spawn).
+///
+/// # Examples
+///
+/// Creating and using a scope:
+///
+/// ```
+/// crossbeam_utils::scoped::scope(|scope| {
+///     scope.defer(|| println!("Exiting scope"));
+///     scope.spawn(|| println!("Running child thread in scope"))
+/// });
+/// // Prints messages in the reverse order written
+/// ```
+pub fn scope<'a, F, R>(f: F) -> R
+where
+    F: FnOnce(&Scope<'a>) -> R,
+{
+    let mut scope = Scope { dtors: RefCell::new(None) };
+    let ret = f(&scope);
+    scope.drop_all();
+    ret
+}
+
+impl<'a> fmt::Debug for Scope<'a> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "Scope {{ ... }}")
+    }
+}
+
+impl<T> fmt::Debug for ScopedJoinHandle<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "ScopedJoinHandle {{ ... }}")
+    }
+}
+
+impl<'a> Scope<'a> {
+    // This method is carefully written in a transactional style, so
+    // that it can be called directly and, if any dtor panics, can be
+    // resumed in the unwinding this causes. By initially running the
+    // method outside of any destructor, we avoid any leakage problems
+    // due to @rust-lang/rust#14875.
+    fn drop_all(&mut self) {
+        loop {
+            // use a separate scope to ensure that the RefCell borrow
+            // is relinquished before running `dtor`
+            let dtor = {
+                let mut dtors = self.dtors.borrow_mut();
+                if let Some(mut node) = dtors.take() {
+                    *dtors = node.next.take().map(|b| *b);
+                    node.dtor
+                } else {
+                    return;
+                }
+            };
+            dtor.call_box()
+        }
+    }
+
+    /// Schedule code to be executed when exiting the scope.
+    ///
+    /// This is akin to having a destructor on the stack, except that it is
+    /// *guaranteed* to be run.
+    pub fn defer<F>(&self, f: F)
+    where
+        F: FnOnce() + 'a,
+    {
+        let mut dtors = self.dtors.borrow_mut();
+        *dtors = Some(DtorChain {
+            dtor: Box::new(f),
+            next: dtors.take().map(Box::new),
+        });
+    }
+
+    /// Create a scoped thread.
+    ///
+    /// `spawn` is similar to the [`spawn`][spawn] function in Rust's standard library. The
+    /// difference is that this thread is scoped, meaning that it's guaranteed to terminate
+    /// before the current stack frame goes away, allowing you to reference the parent stack frame
+    /// directly. This is ensured by having the parent thread join on the child thread before the
+    /// scope exits.
+    ///
+    /// [spawn]: http://doc.rust-lang.org/std/thread/fn.spawn.html
+    pub fn spawn<F, T>(&self, f: F) -> ScopedJoinHandle<T>
+    where
+        F: FnOnce() -> T + Send + 'a,
+        T: Send + 'a,
+    {
+        self.builder().spawn(f).unwrap()
+    }
+
+    /// Generates the base configuration for spawning a scoped thread, from which configuration
+    /// methods can be chained.
+    pub fn builder<'s>(&'s self) -> ScopedThreadBuilder<'s, 'a> {
+        ScopedThreadBuilder {
+            scope: self,
+            builder: thread::Builder::new(),
+        }
+    }
+}
+
+/// Scoped thread configuration. Provides detailed control over the properties and behavior of new
+/// scoped threads.
+pub struct ScopedThreadBuilder<'s, 'a: 's> {
+    scope: &'s Scope<'a>,
+    builder: thread::Builder,
+}
+
+impl<'s, 'a: 's> ScopedThreadBuilder<'s, 'a> {
+    /// Names the thread-to-be. Currently the name is used for identification only in panic
+    /// messages.
+    pub fn name(mut self, name: String) -> ScopedThreadBuilder<'s, 'a> {
+        self.builder = self.builder.name(name);
+        self
+    }
+
+    /// Sets the size of the stack for the new thread.
+    pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'s, 'a> {
+        self.builder = self.builder.stack_size(size);
+        self
+    }
+
+    /// Spawns a new thread, and returns a join handle for it.
+    pub fn spawn<F, T>(self, f: F) -> io::Result<ScopedJoinHandle<T>>
+    where
+        F: FnOnce() -> T + Send + 'a,
+        T: Send + 'a,
+    {
+        let their_packet = Arc::new(AtomicOption::new());
+        let my_packet = their_packet.clone();
+
+        let join_handle = try!(unsafe {
+            builder_spawn_unsafe(self.builder, move || {
+                their_packet.swap(f(), Ordering::Relaxed);
+            })
+        });
+
+        let thread = join_handle.thread().clone();
+        let deferred_handle = Rc::new(RefCell::new(JoinState::Running(join_handle)));
+        let my_handle = deferred_handle.clone();
+
+        self.scope.defer(move || {
+            let mut state = deferred_handle.borrow_mut();
+            state.join();
+        });
+
+        Ok(ScopedJoinHandle {
+            inner: my_handle,
+            packet: my_packet,
+            thread: thread,
+        })
+    }
+}
+
+impl<T> ScopedJoinHandle<T> {
+    /// Join the scoped thread, returning the result it produced.
+    pub fn join(self) -> T {
+        self.inner.borrow_mut().join();
+        self.packet.take(Ordering::Relaxed).unwrap()
+    }
+
+    /// Get the underlying thread handle.
+    pub fn thread(&self) -> &thread::Thread {
+        &self.thread
+    }
+}
+
+impl<'a> Drop for Scope<'a> {
+    fn drop(&mut self) {
+        self.drop_all()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/memoffset/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{"Cargo.toml":"c48ab44d744586aab2bb02ff1288429679cced7cca191c0584982f9eb6edb6c7","LICENSE":"3234ac55816264ee7b6c7ee27efd61cf0a1fe775806870e3d9b4c41ea73c5cb1","README.md":"5b2de7b6eaa7f01720c0a58040264f1ba336fbd005d43284b55f82a826011d11","src/lib.rs":"f8cfb7f2f7e3b179de53d1bde8e814c51b5f66f638f1e744bc14cf9ae4148381","src/offset_of.rs":"1cf7c89bb7b05dee7241913596f107672289e14e8d3d404eecc8c97b302f2f12","src/span_of.rs":"513452dcb6e4c7d5354631973492561de83bed5aaa8306f82cc5828664962538"},"package":"0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/memoffset/Cargo.toml
@@ -0,0 +1,24 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "memoffset"
+version = "0.2.1"
+authors = ["Gilad Naaman <gilad.naaman@gmail.com>"]
+description = "offset_of functionality for Rust structs."
+readme = "README.md"
+keywords = ["mem", "offset", "offset_of", "offsetof"]
+categories = ["no-std"]
+license = "MIT"
+repository = "https://github.com/Gilnaa/memoffset"
+
+[dependencies]
new file mode 100644
--- /dev/null
+++ b/third_party/rust/memoffset/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2017 Gilad Naaman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/memoffset/README.md
@@ -0,0 +1,47 @@
+# memoffset #
+
+[![](http://meritbadge.herokuapp.com/memoffset)](https://crates.io/crates/memoffset)
+
+C-Like `offset_of` functionality for Rust structs.
+
+Introduces the following macros:
+ * `offset_of!` for obtaining the offset of a member of a struct.
+ * `span_of!` for obtaining the range that a field, or fields, span.
+
+`memoffset` works under `no_std` environments.
+
+## Usage ##
+Add the following dependency to your `Cargo.toml`:
+
+```toml
+[dependencies]
+memoffset = "0.2"
+```
+
+Add the following lines at the top of your `main.rs` or `lib.rs` files.
+
+```rust
+#[macro_use]
+extern crate memoffset;
+```
+
+## Examples ##
+```rust
+#[repr(C, packed)]
+struct Foo {
+	a: u32,
+	b: u32,
+	c: [u8; 5],
+	d: u32,
+}
+
+assert_eq!(offset_of!(Foo, b), 4);
+assert_eq!(offset_of!(Foo, c[3]), 11);
+
+assert_eq!(span_of!(Foo, a),          0..4);
+assert_eq!(span_of!(Foo, a ..  c),    0..8);
+assert_eq!(span_of!(Foo, a ..  c[1]), 0..9);
+assert_eq!(span_of!(Foo, a ..= c[1]), 0..10);
+assert_eq!(span_of!(Foo, ..= d),      0..14);
+assert_eq!(span_of!(Foo, b ..),       4..17);
+```
new file mode 100644
--- /dev/null
+++ b/third_party/rust/memoffset/src/lib.rs
@@ -0,0 +1,70 @@
+// Copyright (c) 2017 Gilad Naaman
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+//! A crate used for calculating offsets of struct members and their spans.
+//!
+//! Some of the funcationality of the crate makes no sense when used along with structs that
+//! are not `#[repr(C, packed)]`, but it is up to the user to make sure that they are.
+//!
+//! ## Examples
+//! ```
+//! #[macro_use]
+//! extern crate memoffset;
+//!
+//! #[repr(C, packed)]
+//! struct HelpMeIAmTrappedInAStructFactory {
+//!     help_me_before_they_: [u8; 15],
+//!     a: u32
+//! }
+//!
+//! fn main() {
+//!     assert_eq!(offset_of!(HelpMeIAmTrappedInAStructFactory, a), 15);
+//!     assert_eq!(span_of!(HelpMeIAmTrappedInAStructFactory, a), 15..19);
+//!     assert_eq!(span_of!(HelpMeIAmTrappedInAStructFactory, help_me_before_they_[2] .. a), 2..15);
+//! }
+//! ```
+//!
+//! This functionality can be useful, for example, for checksum calculations:
+//!
+//! ```ignore
+//! #[repr(C, packed)]
+//! struct Message {
+//!     header: MessageHeader,
+//!     fragment_index: u32,
+//!     fragment_count: u32,
+//!     payload: [u8; 1024],
+//!     checksum: u16
+//! }
+//!
+//! let checksum_range = &raw[span_of!(Message, header..checksum)];
+//! let checksum = crc16(checksum_range);
+//! ```
+
+#![no_std]
+
+// This `use` statement enables the macros to use `$crate::mem`.
+// Doing this enables this crate to function under both std and no-std crates.
+#[doc(hidden)]
+pub use core::mem;
+
+#[macro_use]
+mod offset_of;
+#[macro_use]
+mod span_of;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/memoffset/src/offset_of.rs
@@ -0,0 +1,119 @@
+// Copyright (c) 2017 Gilad Naaman
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+/// Calculates the offset of the specified field from the start of the struct.
+/// This macro supports arbitrary amount of subscripts and recursive member-accesses.
+///
+/// *Note*: This macro may not make much sense when used on structs that are not `#[repr(C, packed)]`
+///
+/// ## Examples - Simple
+/// ```
+/// #[macro_use]
+/// extern crate memoffset;
+///
+/// #[repr(C, packed)]
+/// struct Foo {
+///     a: u32,
+///     b: u64,
+///     c: [u8; 5]
+/// }
+///
+/// fn main() {
+///     assert_eq!(offset_of!(Foo, a), 0);
+///     assert_eq!(offset_of!(Foo, b), 4);
+///     assert_eq!(offset_of!(Foo, c[2]), 14);
+/// }
+/// ```
+///
+/// ## Examples - Advanced
+/// ```
+/// #[macro_use]
+/// extern crate memoffset;
+///
+/// #[repr(C, packed)]
+/// struct UnnecessarilyComplicatedStruct {
+///     member: [UnnecessarilyComplexStruct; 12]
+/// }
+///
+/// #[repr(C, packed)]
+/// struct UnnecessarilyComplexStruct {
+///     a: u32,
+///     b: u64,
+///     c: [u8; 5]
+/// }
+///
+///
+/// fn main() {
+///     assert_eq!(offset_of!(UnnecessarilyComplicatedStruct, member[3].c[3]), 66);
+/// }
+/// ```
+#[macro_export]
+macro_rules! offset_of {
+    ($father:ty, $($field:tt)+) => ({
+        #[allow(unused_unsafe)]
+        let root: $father = unsafe { $crate::mem::uninitialized() };
+
+        let base = &root as *const _ as usize;
+
+        // Future error: borrow of packed field requires unsafe function or block (error E0133)
+        #[allow(unused_unsafe)]
+        let member =  unsafe { &root.$($field)* as *const _ as usize };
+
+        $crate::mem::forget(root);
+
+        member - base
+    });
+}
+
+#[cfg(test)]
+mod tests {
+    #[repr(C, packed)]
+    struct Foo {
+        a: u32,
+        b: [u8; 4],
+        c: i64,
+    }
+
+    #[test]
+    fn offset_simple() {
+        assert_eq!(offset_of!(Foo, a), 0);
+        assert_eq!(offset_of!(Foo, b), 4);
+        assert_eq!(offset_of!(Foo, c), 8);
+    }
+
+    #[test]
+    fn offset_index() {
+        assert_eq!(offset_of!(Foo, b[2]), 6);
+    }
+
+    #[test]
+    #[should_panic]
+    fn offset_index_out_of_bounds() {
+        offset_of!(Foo, b[4]);
+    }
+
+    #[test]
+    fn tuple_struct() {
+        #[repr(C, packed)]
+        struct Tup(i32, i32);
+
+        assert_eq!(offset_of!(Tup, 0), 0);
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/memoffset/src/span_of.rs
@@ -0,0 +1,274 @@
+// Copyright (c) 2017 Gilad Naaman
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+/// Produces a range instance representing the sub-slice containing the specified member.
+///
+/// This macro provides 2 forms of differing functionalities.
+///
+/// The first form is identical to the appearance of the `offset_of!` macro,
+/// and just like `offset_of!`, it has no limit on the depth of fields / subscripts used.
+///
+/// ```ignore
+/// span_of!(Struct, member[index].field)
+/// ```
+///
+/// The second form of `span_of!` returns a sub-slice which starts at one field, and ends at another.
+/// The general pattern of this form is:
+///
+/// ```ignore
+/// // Exclusive
+/// span_of!(Struct, member_a .. member_b)
+/// // Inclusive
+/// span_of!(Struct, member_a ..= member_b)
+///
+/// // Open-ended ranges
+/// span_of!(Struct, .. end)
+/// span_of!(Struct, start ..)
+/// ```
+///
+/// *Note*: 
+/// This macro uses recursion in order to resolve the range expressions, so there is a limit to the complexity of the expression.
+/// In order to raise the limit, the compiler's recursion limit should be lifted.
+///
+/// *Note*: 
+/// This macro may not make much sense when used on structs that are not `#[repr(C, packed)]`
+///
+/// ## Examples
+/// ```
+/// #[macro_use]
+/// extern crate memoffset;
+///
+/// #[repr(C, packed)]
+/// struct Florp {
+///     a: u32
+/// }
+///
+/// #[repr(C, packed)]
+/// struct Blarg {
+///     x: u64,
+///     y: [u8; 56],
+///     z: Florp,
+///     egg: [[u8; 4]; 4]
+/// }
+///
+/// fn main() {
+///     assert_eq!(0..8,   span_of!(Blarg, x));
+///     assert_eq!(64..68, span_of!(Blarg, z.a));
+///     assert_eq!(79..80, span_of!(Blarg, egg[2][3]));
+///
+///     assert_eq!(8..64,  span_of!(Blarg, y[0]  ..  z));
+///     assert_eq!(0..42,  span_of!(Blarg, x     ..  y[34]));
+///     assert_eq!(0..64,  span_of!(Blarg, x     ..= y));
+///     assert_eq!(58..68, span_of!(Blarg, y[50] ..= z));
+/// }
+/// ```
+#[macro_export]
+macro_rules! span_of {
+    (@helper $root:ident, [] ..=) => {
+        compile_error!("Expected a range, found '..='")
+    };
+    (@helper $root:ident, [] ..) => {
+        compile_error!("Expected a range, found '..'")
+    };
+    (@helper $root:ident, [] ..= $($field:tt)+) => {
+        (&$root as *const _ as usize,
+         &$root.$($field)* as *const _ as usize + $crate::mem::size_of_val(&$root.$($field)*))
+    };
+    (@helper $root:ident, [] .. $($field:tt)+) => {
+        (&$root as *const _ as usize, &$root.$($field)* as *const _ as usize)
+    };
+    (@helper $root:ident, $(# $begin:tt)+ [] ..= $($end:tt)+) => {
+        (&$root.$($begin)* as *const _ as usize,
+         &$root.$($end)* as *const _ as usize + $crate::mem::size_of_val(&$root.$($end)*))
+    };
+    (@helper $root:ident, $(# $begin:tt)+ [] .. $($end:tt)+) => {
+        (&$root.$($begin)* as *const _ as usize, &$root.$($end)* as *const _ as usize)
+    };
+    (@helper $root:ident, $(# $begin:tt)+ [] ..) => {
+        (&$root.$($begin)* as *const _ as usize,
+         &$root as *const _ as usize + $crate::mem::size_of_val(&$root))
+    };
+    (@helper $root:ident, $(# $begin:tt)+ [] ..=) => {
+        compile_error!(
+            "Found inclusive range to the end of a struct. Did you mean '..' instead of '..='?")
+    };
+    (@helper $root:ident, $(# $begin:tt)+ []) => {
+        (&$root.$($begin)* as *const _ as usize,
+         &$root.$($begin)* as *const _ as usize + $crate::mem::size_of_val(&$root.$($begin)*))
+    };
+    (@helper $root:ident, $(# $begin:tt)+ [] $tt:tt $($rest:tt)*) => {
+        span_of!(@helper $root, $(#$begin)* #$tt [] $($rest)*)
+    };
+    (@helper $root:ident, [] $tt:tt $($rest:tt)*) => {
+        span_of!(@helper $root, #$tt [] $($rest)*)
+    };
+
+    ($sty:ty, $($exp:tt)+) => ({
+        unsafe { 
+            let root: $sty = $crate::mem::uninitialized();
+            let base = &root as *const _ as usize;
+            let (begin, end) = span_of!(@helper root, [] $($exp)*);
+            begin-base..end-base
+        }
+    });
+}
+
+#[cfg(test)]
+mod tests {
+    use ::core::mem;
+
+    #[repr(C, packed)]
+    struct Foo {
+        a: u32,
+        b: [u8; 4],
+        c: i64,
+    }
+
+    #[test]
+    fn span_simple() {
+        assert_eq!(span_of!(Foo, a), 0..4);
+        assert_eq!(span_of!(Foo, b), 4..8);
+        assert_eq!(span_of!(Foo, c), 8..16);
+    }
+
+    #[test]
+    fn span_index() {
+        assert_eq!(span_of!(Foo, b[1]), 5..6);
+    }
+
+    #[test]
+    fn span_forms() {
+        #[repr(C, packed)]
+        struct Florp {
+            a: u32,
+        }
+
+        #[repr(C, packed)]
+        struct Blarg {
+            x: u64,
+            y: [u8; 56],
+            z: Florp,
+            egg: [[u8; 4]; 4],
+        }
+
+        // Love me some brute force
+        assert_eq!(0..8, span_of!(Blarg, x));
+        assert_eq!(64..68, span_of!(Blarg, z.a));
+        assert_eq!(79..80, span_of!(Blarg, egg[2][3]));
+
+        assert_eq!(8..64, span_of!(Blarg, y[0]..z));
+        assert_eq!(0..42, span_of!(Blarg, x..y[34]));
+        assert_eq!(0..64, span_of!(Blarg, x     ..= y));
+        assert_eq!(58..68, span_of!(Blarg, y[50] ..= z));
+    }
+
+    #[test]
+    fn ig_test() {
+        #[repr(C)]
+        struct Member {
+            foo: u32,
+        }
+
+        #[repr(C)]
+        struct Test {
+            x: u64,
+            y: [u8; 56],
+            z: Member,
+            egg: [[u8; 4]; 4],
+        }
+
+        assert_eq!(span_of!(Test, ..x), 0..0);
+        assert_eq!(span_of!(Test, ..=x), 0..8);
+        assert_eq!(span_of!(Test, ..y), 0..8);
+        assert_eq!(span_of!(Test, ..=y), 0..64);
+        assert_eq!(span_of!(Test, ..y[0]), 0..8);
+        assert_eq!(span_of!(Test, ..=y[0]), 0..9);
+        assert_eq!(span_of!(Test, ..z), 0..64);
+        assert_eq!(span_of!(Test, ..=z), 0..68);
+        assert_eq!(span_of!(Test, ..z.foo), 0..64);
+        assert_eq!(span_of!(Test, ..=z.foo), 0..68);
+        assert_eq!(span_of!(Test, ..egg), 0..68);
+        assert_eq!(span_of!(Test, ..=egg), 0..84);
+        assert_eq!(span_of!(Test, ..egg[0]), 0..68);
+        assert_eq!(span_of!(Test, ..=egg[0]), 0..72);
+        assert_eq!(span_of!(Test, ..egg[0][0]), 0..68);
+        assert_eq!(span_of!(Test, ..=egg[0][0]), 0..69);
+        assert_eq!(
+            span_of!(Test, x..),
+            offset_of!(Test, x)..mem::size_of::<Test>()
+        );
+        assert_eq!(
+            span_of!(Test, y..),
+            offset_of!(Test, y)..mem::size_of::<Test>()
+        );
+        assert_eq!(
+            span_of!(Test, y[0]..),
+            offset_of!(Test, y[0])..mem::size_of::<Test>()
+        );
+        assert_eq!(
+            span_of!(Test, z..),
+            offset_of!(Test, z)..mem::size_of::<Test>()
+        );
+        assert_eq!(
+            span_of!(Test, z.foo..),
+            offset_of!(Test, z.foo)..mem::size_of::<Test>()
+        );
+        assert_eq!(
+            span_of!(Test, egg..),
+            offset_of!(Test, egg)..mem::size_of::<Test>()
+        );
+        assert_eq!(
+            span_of!(Test, egg[0]..),
+            offset_of!(Test, egg[0])..mem::size_of::<Test>()
+        );
+        assert_eq!(
+            span_of!(Test, egg[0][0]..),
+            offset_of!(Test, egg[0][0])..mem::size_of::<Test>()
+        );
+        assert_eq!(
+            span_of!(Test, x..y),
+            offset_of!(Test, x)..offset_of!(Test, y)
+        );
+        assert_eq!(
+            span_of!(Test, x..=y),
+            offset_of!(Test, x)..offset_of!(Test, y) + mem::size_of::<[u8; 56]>()
+        );
+        assert_eq!(
+            span_of!(Test, x..y[4]),
+            offset_of!(Test, x)..offset_of!(Test, y[4])
+        );
+        assert_eq!(
+            span_of!(Test, x..=y[4]),
+            offset_of!(Test, x)..offset_of!(Test, y) + mem::size_of::<[u8; 5]>()
+        );
+        assert_eq!(
+            span_of!(Test, x..z.foo),
+            offset_of!(Test, x)..offset_of!(Test, z.foo)
+        );
+        assert_eq!(
+            span_of!(Test, x..=z.foo),
+            offset_of!(Test, x)..offset_of!(Test, z.foo) + mem::size_of::<u32>()
+        );
+        assert_eq!(
+            span_of!(Test, egg[0][0]..egg[1][0]),
+            offset_of!(Test, egg[0][0])..offset_of!(Test, egg[1][0])
+        );
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{".travis.yml":"f50507960a9ceb5659f17d3ca114880f6ea5a91981a1a7585c1ca0b2c32eafef","Cargo.toml":"a80d28f1e820a246d8c6ecd78fe19b5438716f048ed8ef1d4212b265c0e6f04d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"40bcc4505c71815463bb4fef4ca2158091cdc97ac51dee49ab5559b62401b493","RELEASES.md":"41fcefb8206d971bb953b552b15c232bb290145e56ab2ecee8ce9f4e0c685c98","appveyor.yml":"7e9559e0e28af2d5da74b1d8598dffc0f42817b0e7f0fefda2d67dce1e6d5bc6","ci/highlander.sh":"68b1e5c3be188a4c503d0e6b12e8409459947f560dcf92ec0658b27e136d44f2","examples/README.md":"537e6fe9cf696fd4ada9c08bf31055ed6e366ed65668a523e7c5fde77f97c8d3","examples/cpu_monitor.rs":"8e65cbc35762eaf3c108071d00b260293527c61f5e3089248366d71440f362ca","scripts/analyze.sh":"35214a036d8d0ead5400562cd72b131376849f65d63d04122c21886a57049934","src/collections/binary_heap.rs":"c9b77ba76e03f73ce192bdd9ff10e5bf1becbac51b229d2edcf215b360020c7a","src/collections/btree_map.rs":"e04e373c37266a65d880f8f7449aec4ae380de77e6e7705344e90ab45b81e336","src/collections/btree_set.rs":"1f2e75b0c1bd8fbf4405a57fb735fe071acb68b988fd58f1f3cd06e678ae4da4","src/collections/hash_map.rs":"c53e4157e07a21d49e011ab1fb3d80d54790059a81827ec8b6ead063438c4ecc","src/collections/hash_set.rs":"2341e7193a156030cc59f6b88984056ed31cef8bdaf36e76f2bebe7f29f1e954","src/collections/linked_list.rs":"675ee631db311a360424125e8b0d2fd6cf156b836d9874a7a06149fac82b51d5","src/collections/mod.rs":"24bbbe85c6e9c65ce7a3906595a68048bfe4c94c790563e1526920104dfb1906","src/collections/vec_deque.rs":"b152b6119ac543adfef92f852e3795322887cb5b252412991c685891f3152b20","src/delegate.rs":"df71e8a5d4a41717f713baa20b0a70a31325f82af217734aaaa36d8682dbd26d","src/iter/README.md":"e843627769d4f284dc927b86ae3d874894689607fa3d096b0a4f5f084f933d40","src/iter/chain.rs":"27c86e8764d93e5e27722be1f04149ffb2f2eeb77fd9e82d082547314e416813","src/iter/cloned.rs":"8ed86733ecab3452d301123e559c2daa3623c9883e8aef31753d06ad21b4e7b2","src/iter/collect/consumer.rs":"c8f03c471eb3eea789997d5e722bf35f7228a4bd2d2b3c16ca01ce647ebbaba1","src/iter/collect/mod.rs":"e96064e3c70473871611a285c2fab59eaede7bc5efa6c073dbf6f398f1527c2a","src/iter/collect/test.rs":"3305b01a4f7526b89bda7c15578678849b512706baa9ef78fdd3ada7cefc2212","src/iter/enumerate.rs":"8dc63307bb005726efc95557879d67df3623352c1c92e59e0b7c83add9b2bcd3","src/iter/extend.rs":"a7d9501fc44e99e0ee903c1efe5454222eb0a102836f9158e709e15d290cd7d6","src/iter/filter.rs":"85dc8579a63d80e63a12ad3ab9259bc8222e801642b62d37a20da2b29f5b9c03","src/iter/filter_map.rs":"79a13c4483aac78367d0a818f7c8176bab03722c1f9b4db93ee59810f4172890","src/iter/find.rs":"f52f482947bde2717e9eb8163a8993fbaf19ddd561c3b320f878953e3ff18bbe","src/iter/find_first_last/mod.rs":"0524686a96b8aeb02ac37cdbf32f9ab57a943f628ee0399c3bd7fb6b8e1a0294","src/iter/find_first_last/test.rs":"f71d35df36c553b239a07c75b0c961f001bcafb5df774c165e18ba06b2db0350","src/iter/flat_map.rs":"962f969dc6266009d70ad9ee47b210addb5a988f1f20ec33de342ea2427d40d7","src/iter/fold.rs":"c4c7f1e1bb7684cde32146d7c11e6443aabf63d692d160fc4212b8d7e421e560","src/iter/for_each.rs":"fce9dbd6abc34915616e1663d2fb67f75d3071cdef88e57e40fac91ad686310e","src/iter/from_par_iter.rs":"9124e211101336f5ecdf079b8e63757d113506c7cac41c4a2963f0d5062c4dcd","src/iter/inspect.rs":"9cc5e779470d8ffb76643e377920f1c07dabcb1152e4639eb866671ebba817b3","src/iter/internal.rs":"acd673495e30ad13d50413693bb9d53857f9176fc362e5962efbdaa6e883d618","src/iter/len.rs":"d804ecd7bb6f7d7a2e103896c705f2c0129db9128ff197af7a010a1c5e87bd7d","src/iter/map.rs":"181a8ce6a74421c363583c2c4e057379728cef99980a777a77eff519827aae2a","src/iter/map_with.rs":"589ffb47024d5272c97bbfdfa373e02586bac024cdea75cb9d9bf4acf6177057","src/iter/mod.rs":"c24bcae57b74e044576ce251c797130351931b30bda09da14bb6908fd8bd89bf","src/iter/noop.rs":"8dd7608547083820eae7b3863d06e97112db9d8f0f7c62c6ee854402f90c3f5d","src/iter/product.rs":"5c19bc2df086451352aa3aa2d0a005b7bca47899b8131d4e8551237a0bdb9f84","src/iter/reduce.rs":"185fabd1cc987778bda7c4ecf7217ba5e5e47d762313e0064529f9a9246ff930","src/iter/rev.rs":"ce80f0fdb185c72b6f4dff7fc13113246f12be8c4b0bdcf89c4eefe4578527e0","src/iter/skip.rs":"bd2ae4a57b59609c786b8a6457c8003d56b5ecd952593b3ef1e6568903484ccb","src/iter/splitter.rs":"5a728b13662c46b4a279c001130629728c00c77127df064a7e775a7d684b1c2a","src/iter/sum.rs":"5448a773625aaafd7c11e8d503460b79c2c4e9ff3b7542ad723702f01f9d3800","src/iter/take.rs":"0f9dcf1bac14ca582546ce230077b37c9ed71992c5b8250c96f01100dc5c42cd","src/iter/test.rs":"5640f015e5d43de506f4b04af91597731a699457637ee806d1d2b534fa7cbabf","src/iter/unzip.rs":"1ac7382f52d1201a1aed0827d057560462dd99406075b52ae13b50ba3099c696","src/iter/while_some.rs":"0b2f96838604b616aaffa00da9cfdb83d00324204c644b87b2beb2e1e1733132","src/iter/zip.rs":"ae3546beece5c3c67110a8c0bd620522cb346c7b07cc28eb3f55200d3144ea35","src/lib.rs":"eb32d5fdde656bfcb8c5d526c9083f3d318c45dd569858b89966bad240116740","src/option.rs":"40b696ae2be883f046cb24ecb52e48a27efbf9225d4bead9d74fbeecc280db26","src/prelude.rs":"270985c11ce2fb1699c17bb52e116d0d32f8924c2aa4188450d9462d736a4d29","src/private.rs":"951f15fc1e9446cc47a7a932cdd9f858aa5df7da7fa070d196cd05de190e6952","src/range.rs":"fa3cacae156a3fa274e2b09c04a6965cda574b9838a2cc7916b74d705b94bd44","src/result.rs":"5223be4a32c720a0d21bce341ce9207e11e7f838ec1146b2a37e9fabc287db45","src/slice/mergesort.rs":"4d0e12c08a5ac01a60cb2234f979034c92ca8f9192f5f67aa33a5e1877e608db","src/slice/mod.rs":"5870189dc9ca8f51c93194539c066041b041a254b8273227794f97ca1051ba0f","src/slice/quicksort.rs":"b930d388f79cceff521c7c04c3e8b9714cb57b5d4e68e470fe63259a550e2007","src/slice/test.rs":"512424e5884533f425d8ce62b282c57062e8373d9a6ee16699cd45217b7efab6","src/split_producer.rs":"424982cf31708c7121e950fd6ed69768bd132ef67597692d550e2e361e53b5a6","src/str.rs":"c26576143452fce791915c7e8faaab102ab816e9e42327fb1496ca6e07cb1f4c","src/test.rs":"ab51bf6a72f1eae6e214643be5de58959060ee54727efb36d7485aaf7c9a4093","src/vec.rs":"00ff40cf41ac4603bf545353a2d0302330952731351247869dfc2318cbb941e3","tests/compile-fail-unstable/README.md":"53e7fb9aa143094c4ad8a4f3f954b125559d3f09e40d3cb4ab43dd06fc22e35b","tests/compile-fail-unstable/future_escape.rs":"f876eceb4c7ff26fd538c1e5d4cae6c424516563face45842cb2d0fa3bff5131","tests/compile-fail/README.md":"3a7477331161672cf83c67a2f38aeb989cb35a9bcb00c3ddf0cc123315f5cd00","tests/compile-fail/cannot_collect_filtermap_data.rs":"730a597fc86b79edf0921999f343d376c80f369e65f9109760be8b81037d4033","tests/compile-fail/cannot_zip_filtered_data.rs":"9271f21d2d1146e9e588936a15b7c54507b050039c04d902f09516ed1bcf2728","tests/compile-fail/cell_par_iter.rs":"3a20e18d488b0769f7b7679387cfe05f383b657dd07d155f3d4391676e36e857","tests/compile-fail/must_use.rs":"a139d6e6e3fbba78993d723a83904a864565bbf86aea8492043865d2a7ab4dc6","tests/compile-fail/no_send_par_iter.rs":"ce3346fb657f0684e64ff5f4870ab3ef4a468dd47bfdc7a117633e720299f300","tests/compile-fail/quicksort_race1.rs":"983cb334de39ef9fc6d3bdf40497d6cba9db50d6ea7932b0fbd628e8ba6f82df","tests/compile-fail/quicksort_race2.rs":"7e9d4477e6b34d075405e86fbb617c5e60ccf1729f81ef04907282106257fc64","tests/compile-fail/quicksort_race3.rs":"8ae1b5285acb75097358d8d424bf2c08a6044937edb57aa647ca521f30240d16","tests/compile-fail/rc_par_iter.rs":"2518f55a035db28c446faedfc07e2cede9d18f6bb5d53a69e0a533538b1b22c3","tests/compile-fail/rc_return.rs":"c9b1cf6d94f3eff00674ee9820b34b2ae3fa5f29bdf1f389edfe04bd82930c76","tests/compile-fail/rc_upvar.rs":"200c4583e17eb09547cabcf1b2f3ab02d2176f58e11f6ae9fff864ff9a6c9655","tests/compile-fail/scope_join_bad.rs":"2ad7d09f2273860a0e7c6d9b65356141339b96b189b7c8403aeccdcb1c0c9060","tests/run-fail-unstable/README.md":"448cd23346a2a71d581c5afbb61daa0349892ec5ad78d18730190127d2d11487","tests/run-fail/README.md":"2b9a7abb977671af7123478f9c4d2f596cd446869a7adaaf306656a2767bb80a","tests/run-fail/iter_panic.rs":"a897798038b89125d13883a7040341c0666bbde9f71a2bebed96ead8839bfaa3","tests/run-fail/simple_panic.rs":"b9d1cd0bedb6b22e4fd3df5ae948e804c1119d5e7a98e9eb4fdcf6281fd504ed","tests/run-pass-unstable/README.md":"2f996550ba5946cf6721b0ee6420f77555fc9a796ce0543fab7931b7a5e4ef5b","tests/run-pass/README.md":"324816facdb78da40a1539cdae8c4bc9d4d027451c167b5f087abc3239b199bf","tests/run-pass/double_init_fail.rs":"841735a15b819bf3ea5e50e758e0825575b00652897cb0fecf25d723a537f1b9","tests/run-pass/init_zero_threads.rs":"42ff55d2396a9feb0be67aaad18252a0a92f778b9f5e9564f35634488228e37c","tests/run-pass/named-threads.rs":"511a75feed739de04033efe4bb2986fd1549bd03a086db8e12ec3b9751d27771","tests/run-pass/scope_join.rs":"65894d7bfde3d2ad7f18a54dbc786bcd756e1d00ce4d7cef503b3c74938e06d7","tests/run-pass/sort-panic-safe.rs":"14319461ae4c21800bca5befb2d7147f315a86aa2235feeada9029cc25f46ffd","tests/run-pass/stack_overflow_crash.rs":"a76489f74f2b9a308f3e56317e87d6566708b0b8c635ffec9a46305da4d48f77"},"package":"b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/.travis.yml
@@ -0,0 +1,38 @@
+language: rust
+rust:
+  - 1.12.0
+  - stable
+  - nightly
+os:
+  - linux
+  - osx
+
+# Using 16MB stacks for deep test/debug recursion
+env:
+  global:
+    - RUST_MIN_STACK=16777216
+
+matrix:
+  include:
+  - rust: stable
+    env: RUSTFLAGS='--cfg rayon_unstable'
+    os: linux
+  - rust: stable
+    env: RUSTFLAGS='--cfg rayon_unstable'
+    os: osx
+  - rust: nightly
+    env: RUSTFLAGS='--cfg rayon_unstable'
+    os: linux
+  - rust: nightly
+    env: RUSTFLAGS='--cfg rayon_unstable'
+    os: osx
+
+script:
+  - cargo build
+  - |
+    if [ $TRAVIS_RUST_VERSION == nightly ]; then
+      cargo test &&
+      cargo test -p rayon-core &&
+      cargo test -p rayon-demo &&
+      ./ci/highlander.sh
+    fi
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/Cargo.toml
@@ -0,0 +1,36 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "rayon"
+version = "0.8.2"
+authors = ["Niko Matsakis <niko@alum.mit.edu>", "Josh Stone <cuviper@gmail.com>"]
+description = "Simple work-stealing parallelism for Rust"
+documentation = "https://docs.rs/rayon/"
+license = "Apache-2.0/MIT"
+repository = "https://github.com/nikomatsakis/rayon"
+[dependencies.rayon-core]
+version = "1.2"
+[dev-dependencies.rand]
+version = "0.3"
+
+[dev-dependencies.compiletest_rs]
+version = "0.2.1"
+
+[dev-dependencies.docopt]
+version = "0.7"
+
+[dev-dependencies.rustc-serialize]
+version = "0.3"
+
+[dev-dependencies.futures]
+version = "0.1.7"
copy from third_party/rust/coco/LICENSE-APACHE
copy to third_party/rust/rayon-0.8.2/LICENSE-APACHE
copy from third_party/rust/coco/LICENSE-MIT
copy to third_party/rust/rayon-0.8.2/LICENSE-MIT
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/README.md
@@ -0,0 +1,458 @@
+# Rayon
+
+[![Join the chat at https://gitter.im/rayon-rs/Lobby](https://badges.gitter.im/rayon-rs/Lobby.svg)](https://gitter.im/rayon-rs/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+[![Travis Status](https://travis-ci.org/nikomatsakis/rayon.svg?branch=master)](https://travis-ci.org/nikomatsakis/rayon)
+
+[![Appveyor status](https://ci.appveyor.com/api/projects/status/6oft3iwgr6f2o4d4?svg=true)](https://ci.appveyor.com/project/nikomatsakis/rayon)
+
+Rayon is a data-parallelism library for Rust. It is extremely
+lightweight and makes it easy to convert a sequential computation into
+a parallel one. It also guarantees data-race freedom. (You may also
+enjoy [this blog post][blog] about Rayon, which gives more background
+and details about how it works, or [this video][video], from the Rust Belt Rust conference.) Rayon is
+[available on crates.io](https://crates.io/crates/rayon), and
+[API Documentation is available on docs.rs](https://docs.rs/rayon/).
+
+[blog]: http://smallcultfollowing.com/babysteps/blog/2015/12/18/rayon-data-parallelism-in-rust/
+[video]: https://www.youtube.com/watch?v=gof_OEv71Aw
+
+You can use Rayon in two ways. Which way you will want will depend on
+what you are doing:
+
+- Parallel iterators: convert iterator chains to execute in parallel.
+- The `join` method: convert recursive, divide-and-conquer style
+  problems to execute in parallel.
+
+No matter which way you choose, you don't have to worry about data
+races: Rayon statically guarantees data-race freedom. For the most
+part, adding calls to Rayon should not change how your programs works
+at all, in fact. However, if you operate on mutexes or atomic
+integers, please see the [notes on atomicity](#atomicity).
+
+Rayon currently requires `rustc 1.12.0` or greater.
+
+### Using Rayon
+
+[Rayon is available on crates.io](https://crates.io/crates/rayon). The
+recommended way to use it is to add a line into your Cargo.toml such
+as:
+
+```rust
+[dependencies]
+rayon = "0.8.2"
+```
+
+and then add the following to to your `lib.rs`:
+
+```rust
+extern crate rayon;
+```
+
+To use the Parallel Iterator APIs, a number of traits have to be in
+scope. The easiest way to bring those things into scope is to use the
+[Rayon prelude](https://docs.rs/rayon/*/rayon/prelude/index.html).
+In each module where you would like to use the parallel iterator APIs,
+just add:
+
+```rust
+use rayon::prelude::*;
+```
+
+### Contribution
+
+Rayon is an open source project! If you'd like to contribute to Rayon, check out [the list of "help wanted" issues](https://github.com/nikomatsakis/rayon/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). These are all (or should be) issues that are suitable for getting started, and they generally include a detailed set of instructions for what to do. Please ask questions if anything is unclear! Also, check out the [Guide to Development](https://github.com/nikomatsakis/rayon/wiki/Guide-to-Development) page on the wiki. Note that all code submitted in PRs to Rayon is assumed to [be licensed under Rayon's dual MIT/Apache2 licensing](https://github.com/nikomatsakis/rayon/blob/master/README.md#license).
+
+### Quick demo
+
+To see Rayon in action, check out the `rayon-demo` directory, which
+includes a number of demos of code using Rayon. For example, run this
+command to get a visualization of an nbody simulation. To see the
+effect of using Rayon, press `s` to run sequentially and `p` to run in
+parallel.
+
+```
+> cd rayon-demo
+> cargo +nightly run --release -- nbody visualize
+```
+
+For more information on demos, try:
+
+```
+> cd rayon-demo
+> cargo +nightly run --release -- --help
+```
+
+**Note:** While Rayon is usable as a library with the stable compiler, running demos or executing tests requires nightly Rust.
+
+### Parallel Iterators
+
+Rayon supports an experimental API called "parallel iterators". These
+let you write iterator-like chains that execute in parallel. For
+example, to compute the sum of the squares of a sequence of integers,
+one might write:
+
+```rust
+use rayon::prelude::*;
+fn sum_of_squares(input: &[i32]) -> i32 {
+    input.par_iter()
+         .map(|&i| i * i)
+         .sum()
+}
+```
+
+Or, to increment all the integers in a slice, you could write:
+
+```rust
+use rayon::prelude::*;
+fn increment_all(input: &mut [i32]) {
+    input.par_iter_mut()
+         .for_each(|p| *p += 1);
+}
+```
+
+To use parallel iterators, first import the traits by adding something
+like `use rayon::prelude::*` to your module. You can then call
+`par_iter` and `par_iter_mut` to get a parallel iterator.  Like a
+[regular iterator][], parallel iterators work by first constructing a
+computation and then executing it. See the
+[`ParallelIterator` trait][pt] for the list of available methods and
+more details. (Sorry, proper documentation is still somewhat lacking.)
+
+[regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html
+[pt]: https://github.com/nikomatsakis/rayon/blob/master/src/iter/mod.rs
+
+### Using join for recursive, divide-and-conquer problems
+
+Parallel iterators are actually implemented in terms of a more
+primitive method called `join`. `join` simply takes two closures and
+potentially runs them in parallel. For example, we could rewrite the
+`increment_all` function we saw for parallel iterators as follows
+(this function increments all the integers in a slice):
+
+```rust
+/// Increment all values in slice.
+fn increment_all(slice: &mut [i32]) {
+    if slice.len() < 1000 {
+        for p in slice { *p += 1; }
+    } else {
+        let mid_point = slice.len() / 2;
+        let (left, right) = slice.split_at_mut(mid_point);
+        rayon::join(|| increment_all(left), || increment_all(right));
+    }
+}
+```
+
+Perhaps a more interesting example is this parallel quicksort:
+
+```rust
+fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (lo, hi) = v.split_at_mut(mid);
+    rayon::join(|| quick_sort(lo), || quick_sort(hi));
+}
+```
+
+**Note though that calling `join` is very different from just spawning
+two threads in terms of performance.** This is because `join` does not
+*guarantee* that the two closures will run in parallel. If all of your
+CPUs are already busy with other work, Rayon will instead opt to run
+them sequentially. The call to `join` is designed to have very low
+overhead in that case, so that you can safely call it even with very
+small workloads (as in the example above).
+
+However, in practice, the overhead is still noticeable. Therefore, for
+maximal performance, you want to have some kind of sequential fallback
+once your problem gets small enough. The parallel iterator APIs try to
+handle this for you. When using join, you have to code it yourself.
+For an example, see the [quicksort demo][], which includes sequential
+fallback after a certain size.
+
+[quicksort demo]: https://github.com/nikomatsakis/rayon/blob/master/rayon-demo/src/quicksort/mod.rs
+
+### Safety
+
+You've probably heard that parallel programming can be the source of
+bugs that are really hard to diagnose. That is certainly true!
+However, thanks to Rust's type system, you basically don't have to
+worry about that when using Rayon. The Rayon APIs are guaranteed to be
+data-race free. The Rayon APIs themselves also cannot cause deadlocks
+(though if your closures or callbacks use locks or ports, those locks
+might trigger deadlocks).
+
+For example, if you write code that tries to process the same mutable
+state from both closures, you will find that fails to compile:
+
+```rust
+/// Increment all values in slice.
+fn increment_all(slice: &mut [i32]) {
+    rayon::join(|| process(slice), || process(slice));
+}
+```
+
+However, this safety does have some implications. You will not be able
+to use types which are not thread-safe (i.e., do not implement `Send`)
+from inside a `join` closure. Note that almost all types *are* in fact
+thread-safe in Rust; the only exception is those types that employ
+"inherent mutability" without some form of synchronization, such as
+`RefCell` or `Rc`. Here is a list of the most common types in the
+standard library that are not `Send`, along with an alternative that
+you can use instead which *is* `Send` (but which also has higher
+overhead, because it must work across threads):
+
+- `Cell` -- replacement: `AtomicUsize`, `AtomicBool`, etc (but see warning below)
+- `RefCell` -- replacement: `RwLock`, or perhaps `Mutex` (but see warning below)
+- `Rc` -- replacement: `Arc`
+
+However, if you are converting uses of `Cell` or `RefCell`, you must
+be prepared for other threads to interject changes. For more
+information, read the section on atomicity below.
+
+### How it works: Work stealing
+
+Behind the scenes, Rayon uses a technique called work stealing to try
+and dynamically ascertain how much parallelism is available and
+exploit it. The idea is very simple: we always have a pool of worker
+threads available, waiting for some work to do. When you call `join`
+the first time, we shift over into that pool of threads. But if you
+call `join(a, b)` from a worker thread W, then W will place `b` into
+its work queue, advertising that this is work that other worker
+threads might help out with. W will then start executing `a`.
+
+While W is busy with `a`, other threads might come along and take `b`
+from its queue. That is called *stealing* `b`. Once `a` is done, W
+checks whether `b` was stolen by another thread and, if not, executes
+`b` itself. If W runs out of jobs in its own queue, it will look
+through the other threads' queues and try to steal work from them.
+
+This technique is not new. It was first introduced by the
+[Cilk project][cilk], done at MIT in the late nineties. The name Rayon
+is an homage to that work.
+
+[cilk]: http://supertech.csail.mit.edu/cilk/
+
+<a name="atomicity"></a>
+
+#### Warning: Be wary of atomicity
+
+Converting a `Cell` (or, to a lesser extent, a `RefCell`) to work in
+parallel merits special mention for a number of reasons. `Cell` and
+`RefCell` are handy types that permit you to modify data even when
+that data is shared (aliased). They work somewhat differently, but
+serve a common purpose:
+
+1. A `Cell` offers a mutable slot with just two methods, `get` and
+   `set`.  Cells can only be used for `Copy` types that are safe to
+   memcpy around, such as `i32`, `f32`, or even something bigger like a tuple of
+   `(usize, usize, f32)`.
+2. A `RefCell` is kind of like a "single-threaded read-write lock"; it
+   can be used with any sort of type `T`. To gain access to the data
+   inside, you call `borrow` or `borrow_mut`. Dynamic checks are done
+   to ensure that you have either readers or one writer but not both.
+
+While there are threadsafe types that offer similar APIs, caution is
+warranted because, in a threadsafe setting, other threads may
+"interject" modifications in ways that are not possible in sequential
+code. While this will never lead to a *data race* --- that is, you
+need not fear *undefined behavior* --- you can certainly still have
+*bugs*.
+
+Let me give you a concrete example using `Cell`. A common use of `Cell`
+is to implement a shared counter. In that case, you would have something
+like `counter: Rc<Cell<usize>>`. Now I can increment the counter by
+calling `get` and `set` as follows:
+
+```rust
+let value = counter.get();
+counter.set(value + 1);
+```
+
+If I convert this to be a thread-safe counter, I would use the
+corresponding types `tscounter: Arc<AtomicUsize>`. If I then were to
+convert the `Cell` API calls directly, I would do something like this:
+
+```rust
+let value = tscounter.load(Ordering::SeqCst);
+tscounter.store(value + 1, Ordering::SeqCst);
+```
+
+You can already see that the `AtomicUsize` API is a bit more complex,
+as it requires you to specify an
+[ordering](http://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html). (I
+won't go into the details on ordering here, but suffice to say that if
+you don't know what an ordering is, and probably even if you do, you
+should use `Ordering::SeqCst`.) The danger in this parallel version of
+the counter is that other threads might be running at the same time
+and they could cause our counter to get out of sync. For example, if
+we have two threads, then they might both execute the "load" before
+either has a chance to execute the "store":
+
+```
+Thread 1                                          Thread 2
+let value = tscounter.load(Ordering::SeqCst);
+// value = X                                      let value = tscounter.load(Ordering::SeqCst);
+                                                  // value = X
+tscounter.store(value+1);                         tscounter.store(value+1);
+// tscounter = X+1                                // tscounter = X+1
+```
+
+Now even though we've had two increments, we'll only increase the
+counter by one!  Even though we've got no data race, this is still
+probably not the result we wanted. The problem here is that the `Cell`
+API doesn't make clear the scope of a "transaction" -- that is, the
+set of reads/writes that should occur atomically. In this case, we
+probably wanted the get/set to occur together.
+
+In fact, when using the `Atomic` types, you very rarely want a plain
+`load` or plain `store`. You probably want the more complex
+operations. A counter, for example, would use `fetch_add` to
+atomically load and increment the value in one step. Compare-and-swap
+is another popular building block.
+
+A similar problem can arise when converting `RefCell` to `RwLock`, but
+it is somewhat less likely, because the `RefCell` API does in fact
+have a notion of a transaction: the scope of the handle returned by
+`borrow` or `borrow_mut`. So if you convert each call to `borrow` to
+`read` (and `borrow_mut` to `write`), things will mostly work fine in
+a parallel setting, but there can still be changes in behavior.
+Consider using a `handle: RefCell<Vec<i32>>` like :
+
+```rust
+let len = handle.borrow().len();
+for i in 0 .. len {
+    let data = handle.borrow()[i];
+    println!("{}", data);
+}
+```
+
+In sequential code, we know that this loop is safe. But if we convert
+this to parallel code with an `RwLock`, we do not: this is because
+another thread could come along and do
+`handle.write().unwrap().pop()`, and thus change the length of the
+vector. In fact, even in *sequential* code, using very small borrow
+sections like this is an anti-pattern: you ought to be enclosing the
+entire transaction together, like so:
+
+```rust
+let vec = handle.borrow();
+let len = vec.len();
+for i in 0 .. len {
+    let data = vec[i];
+    println!("{}", data);
+}
+```
+
+Or, even better, using an iterator instead of indexing:
+
+```rust
+let vec = handle.borrow();
+for data in vec {
+    println!("{}", data);
+}
+```
+
+There are several reasons to prefer one borrow over many. The most
+obvious is that it is more efficient, since each borrow has to perform
+some safety checks. But it's also more reliable: suppose we modified
+the loop above to not just print things out, but also call into a
+helper function:
+
+```rust
+let vec = handle.borrow();
+for data in vec {
+    helper(...);
+}
+```
+
+And now suppose, independently, this helper fn evolved and had to pop
+something off of the vector:
+
+```rust
+fn helper(...) {
+    handle.borrow_mut().pop();
+}
+```
+
+Under the old model, where we did lots of small borrows, this would
+yield precisely the same error that we saw in parallel land using an
+`RwLock`: the length would be out of sync and our indexing would fail
+(note that in neither case would there be an actual *data race* and
+hence there would never be undefined behavior). But now that we use a
+single borrow, we'll see a borrow error instead, which is much easier
+to diagnose, since it occurs at the point of the `borrow_mut`, rather
+than downstream. Similarly, if we move to an `RwLock`, we'll find that
+the code either deadlocks (if the write is on the same thread as the
+read) or, if the write is on another thread, works just fine. Both of
+these are preferable to random failures in my experience.
+
+#### But wait, isn't Rust supposed to free me from this kind of thinking?
+
+You might think that Rust is supposed to mean that you don't have to
+think about atomicity at all. In fact, if you avoid inherent
+mutability (`Cell` and `RefCell` in a sequential setting, or
+`AtomicUsize`, `RwLock`, `Mutex`, et al. in parallel code), then this
+is true: the type system will basically guarantee that you don't have
+to think about atomicity at all. But often there are times when you
+WANT threads to interleave in the ways I showed above.
+
+Consider for example when you are conducting a search in parallel, say
+to find the shortest route. To avoid fruitless search, you might want
+to keep a cell with the shortest route you've found thus far.  This
+way, when you are searching down some path that's already longer than
+this shortest route, you can just stop and avoid wasted effort. In
+sequential land, you might model this "best result" as a shared value
+like `Rc<Cell<usize>>` (here the `usize` represents the length of best
+path found so far); in parallel land, you'd use a `Arc<AtomicUsize>`.
+Now we can make our search function look like:
+
+```rust
+fn search(path: &Path, cost_so_far: usize, best_cost: &Arc<AtomicUsize>) {
+    if cost_so_far >= best_cost.load(Ordering::SeqCst) {
+        return;
+    }
+    ...
+    best_cost.store(...);
+}
+```
+
+Now in this case, we really WANT to see results from other threads
+interjected into our execution!
+
+## Semver policy, the rayon-core crate, and unstable features
+
+Rayon follows semver versioning. However, we also have APIs that are
+still in the process of development and which may break from release
+to release -- those APIs are not subject to semver. To use them,
+you have to set the cfg flag `rayon_unstable`. The easiest way to do this
+is to use the `RUSTFLAGS` environment variable:
+
+```
+RUSTFLAGS='--cfg rayon_unstable' cargo build
+```
+
+Note that this must not only be done for your crate, but for any crate
+that depends on your crate. This infectious nature is intentional, as
+it serves as a reminder that you are outside of the normal semver
+guarantees. **If you see unstable APIs that you would like to use,
+please request stabilization on the correspond tracking issue!**
+
+Rayon itself is internally split into two crates. The `rayon` crate is
+intended to be the main, user-facing crate, and hence all the
+documentation refers to `rayon`. This crate is still evolving and
+regularly goes through (minor) breaking changes. The `rayon-core`
+crate contains the global thread-pool and defines the core APIs: we no
+longer permit breaking changes in this crate (except to unstable
+features). The intention is that multiple semver-incompatible versions
+of the rayon crate can peacefully coexist; they will all share one
+global thread-pool through the `rayon-core` crate.
+
+## License
+
+Rayon is distributed under the terms of both the MIT license and the
+Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and
+[LICENSE-MIT](LICENSE-MIT) for details. Opening a pull requests is
+assumed to signal agreement with these licensing terms.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/RELEASES.md
@@ -0,0 +1,323 @@
+# Release rayon 0.8.2
+
+- `ParallelSliceMut` now has six parallel sorting methods with the same
+  variations as the standard library.
+  - `par_sort`, `par_sort_by`, and `par_sort_by_key` perform stable sorts in
+    parallel, using the default order, a custom comparator, or a key extraction
+    function, respectively.
+  - `par_sort_unstable`, `par_sort_unstable_by`, and `par_sort_unstable_by_key`
+    perform unstable sorts with the same comparison options.
+  - Thanks to @stejpang!
+
+# Release rayon 0.8.1 / rayon-core 1.2.0
+
+- The following core APIs are being stabilized:
+  - `rayon::spawn()` -- spawns a task into the Rayon threadpool; as it
+    is contained in the global scope (rather than a user-created
+    scope), the task cannot capture anything from the current stack
+    frame.
+  - `ThreadPool::join()`, `ThreadPool::spawn()`, `ThreadPool::scope()`
+    -- convenience APIs for launching new work within a thread-pool. 
+- The various iterator adapters are now tagged with `#[must_use]`
+- Parallel iterators now offer a `for_each_with` adapter, similar to
+  `map_with`.
+- We are adopting a new approach to handling the remaining unstable
+  APIs (which primarily pertain to futures integration). As awlays,
+  unstable APIs are intended for experimentation, but do not come with
+  any promise of compatibility (in other words, we might change them
+  in arbitrary ways in any release). Previously, we designated such
+  APIs using a Cargo feature "unstable". Now, we are using a regular
+  `#[cfg]` flag. This means that to see the unstable APIs, you must do
+  `RUSTFLAGS='--cfg rayon_unstable' cargo build`. This is
+  intentionally inconvenient; in particular, if you are a library,
+  then your clients must also modify their environment, signaling
+  their agreement to instability.
+
+# Release rayon 0.8.0 / rayon-core 1.1.0
+
+## Rayon 0.8.0
+
+- Added the `map_with` and `fold_with` combinators, which help for
+  passing along state (like channels) that cannot be shared between
+  threads but which can be cloned on each thread split.
+- Added the `while_some` combinator, which helps for writing short-circuiting iterators.
+- Added support for "short-circuiting" collection: e.g., collecting
+  from an iterator producing `Option<T>` or `Result<T, E>` into a
+  `Option<Collection<T>>` or `Result<Collection<T>, E>`.
+- Support `FromParallelIterator` for `Cow`.
+- Removed the deprecated weight APIs.
+- Simplified the parallel iterator trait hierarchy by removing the
+  `BoundedParallelIterator` and `ExactParallelIterator` traits,
+  which were not serving much purpose.
+- Improved documentation.
+- Added some missing `Send` impls.
+- Fixed some small bugs.
+
+## Rayon-core 1.1.0
+
+- We now have more documentation.
+- Renamed the (unstable) methods `spawn_async` and
+  `spawn_future_async` -- which spawn tasks that cannot hold
+  references -- to simply `spawn` and `spawn_future`, respectively.
+- We are now using the coco library for our deque.
+- Individual threadpools can now be configured in "breadth-first"
+  mode, which causes them to execute spawned tasks in the reverse
+  order that they used to.  In some specific scenarios, this can be a
+  win (though it is not generally the right choice).
+- Added top-level functions:
+  - `current_thread_index`, for querying the index of the current worker thread within
+    its thread-pool (previously available as `thread_pool.current_thread_index()`);
+  - `current_thread_has_pending_tasks`, for querying whether the
+    current worker that has an empty task deque or not. This can be
+    useful when deciding whether to spawn a task.
+- The environment variables for controlling Rayon are now
+  `RAYON_NUM_THREADS` and `RAYON_LOG`. The older variables (e.g.,
+  `RAYON_RS_NUM_CPUS` are still supported but deprecated).
+
+## Rayon-demo
+
+- Added a new game-of-life benchmark.
+
+## Contributors
+
+Thanks to the following contributors:
+
+- @ChristopherDavenport
+- @SuperFluffy
+- @antoinewdg
+- @crazymykl
+- @cuviper
+- @glandium
+- @julian-seward1
+- @leodasvacas
+- @leshow
+- @lilianmoraru
+- @mschmo
+- @nikomatsakis
+- @stjepang
+
+# Release rayon 0.7.1 / rayon-core 1.0.2
+
+This release is a targeted performance fix for #343, an issue where
+rayon threads could sometimes enter into a spin loop where they would
+be unable to make progress until they are pre-empted.
+
+# Release rayon 0.7 / rayon-core 1.0
+
+This release marks the first step towards Rayon 1.0. **For best
+performance, it is important that all Rayon users update to at least
+Rayon 0.7.** This is because, as of Rayon 0.7, we have taken steps to
+ensure that, no matter how many versions of rayon are actively in use,
+there will only be a single global scheduler. This is achieved via the
+`rayon-core` crate, which is being released at version 1.0, and which
+encapsulates the core schedule APIs like `join()`. (Note: the
+`rayon-core` crate is, to some degree, an implementation detail, and
+not intended to be imported directly; it's entire API surface is
+mirrored through the rayon crate.)
+
+We have also done a lot of work reorganizing the API for Rayon 0.7 in
+preparation for 1.0. The names of iterator types have been changed and
+reorganized (but few users are expected to be naming those types
+explicitly anyhow). In addition, a number of parallel iterator methods
+have been adjusted to match those in the standard iterator traits more
+closely. See the "Breaking Changes" section below for
+details.
+
+Finally, Rayon 0.7 includes a number of new features and new parallel
+iterator methods. **As of this release, Rayon's parallel iterators
+have officially reached parity with sequential iterators** -- that is,
+every sequential iterator method that makes any sense in parallel is
+supported in some capacity.
+
+### New features and methods
+
+- The internal `Producer` trait now features `fold_with`, which enables
+  better performance for some parallel iterators.
+- Strings now support `par_split()` and `par_split_whitespace()`.
+- The `Configuration` API is expanded and simplified:
+    - `num_threads(0)` no longer triggers an error 
+    - you can now supply a closure to name the Rayon threads that get created 
+      by using `Configuration::thread_name`.
+    - you can now inject code when Rayon threads start up and finish
+    - you can now set a custom panic handler to handle panics in various odd situations
+- Threadpools are now able to more gracefully put threads to sleep when not needed.
+- Parallel iterators now support `find_first()`, `find_last()`, `position_first()`,
+  and `position_last()`.
+- Parallel iterators now support `rev()`, which primarily affects subsequent calls
+  to `enumerate()`.
+- The `scope()` API is now considered stable (and part of `rayon-core`).
+- There is now a useful `rayon::split` function for creating custom
+  Rayon parallel iterators.
+- Parallel iterators now allow you to customize the min/max number of
+  items to be processed in a given thread. This mechanism replaces the
+  older `weight` mechanism, which is deprecated.
+- `sum()` and friends now use the standard `Sum` traits
+
+### Breaking changes
+
+In the move towards 1.0, there have been a number of minor breaking changes:
+
+- Configuration setters like `Configuration::set_num_threads()` lost the `set_` prefix,
+  and hence become something like `Configuration::num_threads()`.
+- `Configuration` getters are removed
+- Iterator types have been shuffled around and exposed more consistently:
+    - combinator types live in `rayon::iter`, e.g. `rayon::iter::Filter`
+    - iterators over various types live in a module named after their type,
+      e.g. `rayon::slice::Windows`
+- When doing a `sum()` or `product()`, type annotations are needed for the result
+  since it is now possible to have the resulting sum be of a type other than the value
+  you are iterating over (this mirrors sequential iterators).
+
+### Experimental features
+
+Experimental features require the use of the `unstable` feature. Their
+APIs may change or disappear entirely in future releases (even minor
+releases) and hence they should be avoided for production code.
+
+- We now have (unstable) support for futures integration. You can use
+  `Scope::spawn_future` or `rayon::spawn_future_async()`.
+- There is now a `rayon::spawn_async()` function for using the Rayon
+  threadpool to run tasks that do not have references to the stack.
+
+### Contributors
+
+Thanks to the following people for their contributions to this release:
+
+- @Aaronepower
+- @ChristopherDavenport
+- @bluss
+- @cuviper
+- @froydnj
+- @gaurikholkar
+- @hniksic
+- @leodasvacas
+- @leshow
+- @martinhath
+- @mbrubeck
+- @nikomatsakis
+- @pegomes
+- @schuster
+- @torkleyy
+
+# Release 0.6
+
+This release includes a lot of progress towards the goal of parity
+with the sequential iterator API, though there are still a few methods
+that are not yet complete. If you'd like to help with that effort,
+[check out the milestone](https://github.com/nikomatsakis/rayon/issues?q=is%3Aopen+is%3Aissue+milestone%3A%22Parity+with+the+%60Iterator%60+trait%22)
+to see the remaining issues.
+
+**Announcement:** @cuviper has been added as a collaborator to the
+Rayon repository for all of his outstanding work on Rayon, which
+includes both internal refactoring and helping to shape the public
+API. Thanks @cuviper! Keep it up.
+
+- We now support `collect()` and not just `collect_with()`.
+  You can use `collect()` to build a number of collections,
+  including vectors, maps, and sets. Moreover, when building a vector
+  with `collect()`, you are no longer limited to exact parallel iterators.
+  Thanks @nikomatsakis, @cuviper!
+- We now support `skip()` and `take()` on parallel iterators.
+  Thanks @martinhath!
+- **Breaking change:** We now match the sequential APIs for `min()` and `max()`.
+  We also support `min_by_key()` and `max_by_key()`. Thanks @tapeinosyne!
+- **Breaking change:** The `mul()` method is now renamed to `product()`,
+  to match sequential iterators. Thanks @jonathandturner!
+- We now support parallel iterator over ranges on `u64` values. Thanks @cuviper!  
+- We now offer a `par_chars()` method on strings for iterating over characters
+  in parallel. Thanks @cuviper!
+- We now have new demos: a traveling salesman problem solver as well as matrix
+  multiplication. Thanks @nikomatsakis, @edre!
+- We are now documenting our minimum rustc requirement (currently
+  v1.12.0).  We will attempt to maintain compatibility with rustc
+  stable v1.12.0 as long as it remains convenient, but if new features
+  are stabilized or added that would be helpful to Rayon, or there are
+  bug fixes that we need, we will bump to the most recent rustc. Thanks @cuviper!
+- The `reduce()` functionality now has better inlining.
+  Thanks @bluss!
+- The `join()` function now has some documentation. Thanks @gsquire!
+- The project source has now been fully run through rustfmt.
+  Thanks @ChristopherDavenport!
+- Exposed helper methods for accessing the current thread index.
+  Thanks @bholley!
+
+# Release 0.5
+
+- **Breaking change:** The `reduce` method has been vastly
+  simplified, and `reduce_with_identity` has been deprecated.
+- **Breaking change:** The `fold` method has been changed. It used to
+  always reduce the values, but now instead it is a combinator that
+  returns a parallel iterator which can itself be reduced. See the
+  docs for more information.
+- The following parallel iterator combinators are now available (thanks @cuviper!):
+  - `find_any()`: similar to `find` on a sequential iterator,
+    but doesn't necessarily return the *first* matching item
+  - `position_any()`: similar to `position` on a sequential iterator,
+    but doesn't necessarily return the index of *first* matching item
+  - `any()`, `all()`: just like their sequential counterparts
+- The `count()` combinator is now available for parallel iterators.
+- We now build with older versions of rustc again (thanks @durango!),
+  as we removed a stray semicolon from `thread_local!`.
+- Various improvements to the (unstable) `scope()` API implementation.
+    
+# Release 0.4.3
+
+- Parallel iterators now offer an adaptive weight scheme,
+  which means that explicit weights should no longer
+  be necessary in most cases! Thanks @cuviper!
+  - We are considering removing weights or changing the weight mechanism
+    before 1.0. Examples of scenarios where you still need weights even
+    with this adaptive mechanism would be great. Join the discussion
+    at <https://github.com/nikomatsakis/rayon/issues/111>.
+- New (unstable) scoped threads API, see `rayon::scope` for details.
+  - You will need to supply the [cargo feature] `unstable`.
+- The various demos and benchmarks have been consolidated into one
+  program, `rayon-demo`.
+- Optimizations in Rayon's inner workings. Thanks @emilio!  
+- Update `num_cpus` to 1.0. Thanks @jamwt!
+- Various internal cleanup in the implementation and typo fixes.
+  Thanks @cuviper, @Eh2406, and @spacejam!
+
+[cargo feature]: http://doc.crates.io/manifest.html#the-features-section
+
+# Release 0.4.2
+
+- Updated crates.io metadata.
+
+# Release 0.4.1
+
+- New `chain` combinator for parallel iterators.
+- `Option`, `Result`, as well as many more collection types now have
+  parallel iterators.
+- New mergesort demo.
+- Misc fixes.
+
+Thanks to @cuviper, @edre, @jdanford, @frewsxcv for their contributions!
+
+# Release 0.4
+
+- Make use of latest versions of catch-panic and various fixes to panic propagation.
+- Add new prime sieve demo.
+- Add `cloned()` and `inspect()` combinators.
+- Misc fixes for Rust RFC 1214.
+
+Thanks to @areilb1, @Amanieu, @SharplEr, and @cuviper for their contributions!
+
+# Release 0.3
+
+- Expanded `par_iter` APIs now available:
+  - `into_par_iter` is now supported on vectors (taking ownership of the elements)
+- Panic handling is much improved:
+  - if you use the Nightly feature, experimental panic recovery is available
+  - otherwise, panics propagate out and poision the workpool
+- New `Configuration` object to control number of threads and other details
+- New demos and benchmarks
+  - try `cargo run --release -- visualize` in `demo/nbody` :)
+    - Note: a nightly compiler is required for this demo due to the
+      use of the `+=` syntax
+
+Thanks to @bjz, @cuviper, @Amanieu, and @willi-kappler for their contributions!
+
+# Release 0.2 and earlier
+
+No release notes were being kept at this time.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/appveyor.yml
@@ -0,0 +1,61 @@
+environment:
+  RUST_MIN_STACK: 16777216
+  matrix:
+    - TARGET: x86_64-pc-windows-gnu
+      CHANNEL: 1.12.0
+
+    - TARGET: x86_64-pc-windows-gnu
+      CHANNEL: stable
+    - TARGET: x86_64-pc-windows-gnu
+      CHANNEL: stable
+      RUSTFLAGS: --cfg rayon_unstable
+
+    - TARGET: x86_64-pc-windows-gnu
+      CHANNEL: beta
+    - TARGET: x86_64-pc-windows-gnu
+      CHANNEL: beta
+      RUSTFLAGS: --cfg rayon_unstable
+
+    - TARGET: x86_64-pc-windows-gnu
+      CHANNEL: nightly
+    - TARGET: x86_64-pc-windows-gnu
+      CHANNEL: nightly
+      RUSTFLAGS: --cfg rayon_unstable
+
+
+    - TARGET: x86_64-pc-windows-msvc
+      CHANNEL: 1.12.0
+
+    - TARGET: x86_64-pc-windows-msvc
+      CHANNEL: stable
+    - TARGET: x86_64-pc-windows-msvc
+      CHANNEL: stable
+      RUSTFLAGS: --cfg rayon_unstable
+
+    - TARGET: x86_64-pc-windows-msvc
+      CHANNEL: beta
+    - TARGET: x86_64-pc-windows-msvc
+      CHANNEL: beta
+      RUSTFLAGS: --cfg rayon_unstable
+
+    - TARGET: x86_64-pc-windows-msvc
+      CHANNEL: nightly
+    - TARGET: x86_64-pc-windows-msvc
+      CHANNEL: nightly
+      RUSTFLAGS: --cfg rayon_unstable
+
+install:
+  - curl -sSf -o rustup-init.exe https://win.rustup.rs
+  - rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
+  - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
+  - rustc -Vv
+  - cargo -V
+
+build: false
+
+test_script:
+  - cargo build
+  - if [%CHANNEL%]==[nightly] (
+      cargo test -p rayon-core &&
+      cargo test -p rayon-demo
+    )
new file mode 100755
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/ci/highlander.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+echo "INFO: There Can Be Only One!" >&2
+
+if cargo build --manifest-path "$DIR/highlander/Cargo.toml"; then
+    echo "ERROR: we built with multiple rayon-core!" >&2
+    exit 1
+fi
+
+echo "PASS: using multiple rayon-core failed." >&2
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/examples/README.md
@@ -0,0 +1,3 @@
+We use this directory for interactive tests that can't be run in an
+automatic fashion. For examples of how to use Rayon, or benchmarks,
+see `rayon-demo`.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/examples/cpu_monitor.rs
@@ -0,0 +1,85 @@
+extern crate docopt;
+extern crate rayon;
+extern crate rustc_serialize;
+
+use docopt::Docopt;
+use std::env;
+use std::io;
+use std::process;
+
+const USAGE: &'static str = "
+Usage: cpu_monitor [options] <scenario>
+       cpu_monitor --help
+
+A test for monitoring how much CPU usage Rayon consumes under various
+scenarios. This test is intended to be executed interactively, like so:
+
+    cargo run --example cpu_monitor -- tasks_ended
+
+The list of scenarios you can try are as follows:
+
+- tasks_ended: after all tasks have finished, go to sleep
+- task_stall_root: a root task stalls for a very long time
+- task_stall_scope: a task in a scope stalls for a very long time
+
+Options:
+    -h, --help                   Show this message.
+    -d N, --depth N              Control how hard the dummy task works [default: 27]
+";
+
+#[derive(RustcDecodable)]
+pub struct Args {
+    arg_scenario: String,
+    flag_depth: usize,
+}
+
+fn main() {
+    let args: &Args =
+        &Docopt::new(USAGE).and_then(|d| d.argv(env::args()).decode()).unwrap_or_else(|e| e.exit());
+
+    match &args.arg_scenario[..] {
+        "tasks_ended" => tasks_ended(args),
+        "task_stall_root" => task_stall_root(args),
+        "task_stall_scope" => task_stall_scope(args),
+        _ => {
+            println!("unknown scenario: `{}`", args.arg_scenario);
+            println!("try --help");
+            process::exit(1);
+        }
+    }
+}
+
+fn wait_for_user() {
+    let mut input = String::new();
+    io::stdin().read_line(&mut input).unwrap();
+}
+
+fn task(args: &Args) {
+    fn join_recursively(n: usize) {
+        if n == 0 {
+            return;
+        }
+        rayon::join(|| join_recursively(n - 1), || join_recursively(n - 1));
+    }
+
+    println!("Starting heavy work at depth {}...wait.", args.flag_depth);
+    join_recursively(args.flag_depth);
+    println!("Heavy work done; check top. You should see CPU usage drop to zero soon.");
+    println!("Press <enter> to quit...");
+}
+
+fn tasks_ended(args: &Args) {
+    task(args);
+    wait_for_user();
+}
+
+fn task_stall_root(args: &Args) {
+    rayon::join(|| task(args), || wait_for_user());
+}
+
+fn task_stall_scope(args: &Args) {
+    rayon::scope(|scope| {
+                     scope.spawn(move |_| task(args));
+                     scope.spawn(move |_| wait_for_user());
+                 });
+}
new file mode 100755
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/scripts/analyze.sh
@@ -0,0 +1,30 @@
+# Rough and dirty shell script to scrape the `log.rs` output and
+# analyze what kinds of tasks have been started and stopped. Very
+# useful in tracking down deadlocks.
+
+TICKLES=$(grep Tickle $1 | wc -l)
+
+INJECT_JOBS=$(grep InjectJobs $1 | wc -l)
+echo "Injected jobs:" $(((INJECT_JOBS * 2)))
+
+JOINS=$(grep Join $1 | wc -l)
+echo "Joins:        " $JOINS
+
+POPPED_RHS=$(grep PoppedRhs $1 | wc -l)
+POPPED_JOB=$(grep PoppedJob $1 | wc -l)
+POPPED_TOTAL=$((($POPPED_RHS + $POPPED_JOB)))
+echo "Popped jobs:  " $POPPED_TOTAL = rhs $POPPED_RHS + other $POPPED_JOB
+
+FOUND_WORK=$(grep FoundWork $1 | wc -l)
+echo "Found work:   " $FOUND_WORK
+
+STOLE_WORK=$(grep StoleWork $1 | wc -l)
+echo "Stole work:   " $STOLE_WORK
+
+UNINJECTED_WORK=$(grep UninjectedWork $1 | wc -l)
+echo "Uninjected:   " $UNINJECTED_WORK
+
+echo "Join balance: " $((( $JOINS - $POPPED_TOTAL - $STOLE_WORK  )))
+echo "Inj. balance: " $((( $INJECT_JOBS * 2 - $UNINJECTED_WORK )))
+echo "Total balance:" $((( $FOUND_WORK + $POPPED_TOTAL - $JOINS - $INJECT_JOBS * 2 )))
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/collections/binary_heap.rs
@@ -0,0 +1,40 @@
+//! This module contains the parallel iterator types for heaps
+//! (`BinaryHeap<T>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use std::collections::BinaryHeap;
+
+use iter::*;
+use iter::internal::*;
+
+use vec;
+
+impl<T: Ord + Send> IntoParallelIterator for BinaryHeap<T> {
+    type Item = T;
+    type Iter = IntoIter<T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IntoIter { inner: Vec::from(self).into_par_iter() }
+    }
+}
+
+into_par_vec!{
+    &'a BinaryHeap<T> => Iter<'a, T>,
+    impl<'a, T: Ord + Sync>
+}
+
+// `BinaryHeap` doesn't have a mutable `Iterator`
+
+
+delegate_indexed_iterator!{
+    #[doc = "Parallel iterator over a binary heap"]
+    IntoIter<T> => vec::IntoIter<T>,
+    impl<T: Ord + Send>
+}
+
+
+delegate_indexed_iterator!{
+    #[doc = "Parallel iterator over an immutable reference to a binary heap"]
+    Iter<'a, T> => vec::IntoIter<&'a T>,
+    impl<'a, T: Ord + Sync + 'a>
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/collections/btree_map.rs
@@ -0,0 +1,46 @@
+//! This module contains the parallel iterator types for B-Tree maps
+//! (`BTreeMap<K, V>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use std::collections::BTreeMap;
+
+use iter::*;
+use iter::internal::*;
+
+use vec;
+
+into_par_vec!{
+    BTreeMap<K, V> => IntoIter<K, V>,
+    impl<K: Ord + Send, V: Send>
+}
+
+into_par_vec!{
+    &'a BTreeMap<K, V> => Iter<'a, K, V>,
+    impl<'a, K: Ord + Sync, V: Sync>
+}
+
+into_par_vec!{
+    &'a mut BTreeMap<K, V> => IterMut<'a, K, V>,
+    impl<'a, K: Ord + Sync, V: Send>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over a B-Tree map"]
+    IntoIter<K, V> => vec::IntoIter<(K, V)>,
+    impl<K: Ord + Send, V: Send>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over an immutable reference to a B-Tree map"]
+    Iter<'a, K, V> => vec::IntoIter<(&'a K, &'a V)>,
+    impl<'a, K: Ord + Sync + 'a, V: Sync + 'a>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over a mutable reference to a B-Tree map"]
+    IterMut<'a, K, V> => vec::IntoIter<(&'a K, &'a mut V)>,
+    impl<'a, K: Ord + Sync + 'a, V: Send + 'a>
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/collections/btree_set.rs
@@ -0,0 +1,36 @@
+//! This module contains the parallel iterator types for B-Tree sets
+//! (`BTreeSet<T>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use std::collections::BTreeSet;
+
+use iter::*;
+use iter::internal::*;
+
+use vec;
+
+into_par_vec!{
+    BTreeSet<T> => IntoIter<T>,
+    impl<T: Ord + Send>
+}
+
+into_par_vec!{
+    &'a BTreeSet<T> => Iter<'a, T>,
+    impl<'a, T: Ord + Sync>
+}
+
+// `BTreeSet` doesn't have a mutable `Iterator`
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over a B-Tree set"]
+    IntoIter<T> => vec::IntoIter<T>,
+    impl<T: Ord + Send>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over an immutable reference to a B-Tree set"]
+    Iter<'a, T> => vec::IntoIter<&'a T>,
+    impl<'a, T: Ord + Sync + 'a>
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/collections/hash_map.rs
@@ -0,0 +1,47 @@
+//! This module contains the parallel iterator types for hash maps
+//! (`HashMap<K, V>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use std::collections::HashMap;
+use std::hash::{Hash, BuildHasher};
+
+use iter::*;
+use iter::internal::*;
+
+use vec;
+
+into_par_vec!{
+    HashMap<K, V, S> => IntoIter<K, V>,
+    impl<K: Hash + Eq + Send, V: Send, S: BuildHasher>
+}
+
+into_par_vec!{
+    &'a HashMap<K, V, S> => Iter<'a, K, V>,
+    impl<'a, K: Hash + Eq + Sync, V: Sync, S: BuildHasher>
+}
+
+into_par_vec!{
+    &'a mut HashMap<K, V, S> => IterMut<'a, K, V>,
+    impl<'a, K: Hash + Eq + Sync, V: Send, S: BuildHasher>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over a hash map"]
+    IntoIter<K, V> => vec::IntoIter<(K, V)>,
+    impl<K: Hash + Eq + Send, V: Send>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over an immutable reference to a hash map"]
+    Iter<'a, K, V> => vec::IntoIter<(&'a K, &'a V)>,
+    impl<'a, K: Hash + Eq + Sync + 'a, V: Sync + 'a>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over a mutable reference to a hash map"]
+    IterMut<'a, K, V> => vec::IntoIter<(&'a K, &'a mut V)>,
+    impl<'a, K: Hash + Eq + Sync + 'a, V: Send + 'a>
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/collections/hash_set.rs
@@ -0,0 +1,37 @@
+//! This module contains the parallel iterator types for hash sets
+//! (`HashSet<T>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use std::collections::HashSet;
+use std::hash::{Hash, BuildHasher};
+
+use iter::*;
+use iter::internal::*;
+
+use vec;
+
+into_par_vec!{
+    HashSet<T, S> => IntoIter<T>,
+    impl<T: Hash + Eq + Send, S: BuildHasher>
+}
+
+into_par_vec!{
+    &'a HashSet<T, S> => Iter<'a, T>,
+    impl<'a, T: Hash + Eq + Sync, S: BuildHasher>
+}
+
+// `HashSet` doesn't have a mutable `Iterator`
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over a hash set"]
+    IntoIter<T> => vec::IntoIter<T>,
+    impl<T: Hash + Eq + Send>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over an immutable reference to a hash set"]
+    Iter<'a, T> => vec::IntoIter<&'a T>,
+    impl<'a, T: Hash + Eq + Sync + 'a>
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/collections/linked_list.rs
@@ -0,0 +1,47 @@
+//! This module contains the parallel iterator types for linked lists
+//! (`LinkedList<T>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use std::collections::LinkedList;
+
+use iter::*;
+use iter::internal::*;
+
+use vec;
+
+into_par_vec!{
+    LinkedList<T> => IntoIter<T>,
+    impl<T: Send>
+}
+
+into_par_vec!{
+    &'a LinkedList<T> => Iter<'a, T>,
+    impl<'a, T: Sync>
+}
+
+into_par_vec!{
+    &'a mut LinkedList<T> => IterMut<'a, T>,
+    impl<'a, T: Send>
+}
+
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over a linked list"]
+    IntoIter<T> => vec::IntoIter<T>,
+    impl<T: Send>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over an immutable reference to a linked list"]
+    Iter<'a, T> => vec::IntoIter<&'a T>,
+    impl<'a, T: Sync + 'a>
+}
+
+
+delegate_iterator!{
+    #[doc = "Parallel iterator over a mutable reference to a linked list"]
+    IterMut<'a, T> => vec::IntoIter<&'a mut T>,
+    impl<'a, T: Send + 'a>
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/collections/mod.rs
@@ -0,0 +1,27 @@
+//! This module contains the parallel iterator types for standard
+//! collections. You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+/// Convert an iterable collection into a parallel iterator by first
+/// collecting into a temporary `Vec`, then iterating that.
+macro_rules! into_par_vec {
+    ($t:ty => $iter:ident<$($i:tt),*>, impl $($args:tt)*) => {
+        impl $($args)* IntoParallelIterator for $t {
+            type Item = <$t as IntoIterator>::Item;
+            type Iter = $iter<$($i),*>;
+
+            fn into_par_iter(self) -> Self::Iter {
+                use std::iter::FromIterator;
+                $iter { inner: Vec::from_iter(self).into_par_iter() }
+            }
+        }
+    };
+}
+
+pub mod binary_heap;
+pub mod btree_map;
+pub mod btree_set;
+pub mod hash_map;
+pub mod hash_set;
+pub mod linked_list;
+pub mod vec_deque;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/collections/vec_deque.rs
@@ -0,0 +1,57 @@
+//! This module contains the parallel iterator types for double-ended queues
+//! (`VecDeque<T>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use std::collections::VecDeque;
+
+use iter::*;
+use iter::internal::*;
+
+use slice;
+use vec;
+
+into_par_vec!{
+    VecDeque<T> => IntoIter<T>,
+    impl<T: Send>
+}
+
+impl<'a, T: Sync> IntoParallelIterator for &'a VecDeque<T> {
+    type Item = &'a T;
+    type Iter = Iter<'a, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        let (a, b) = self.as_slices();
+        Iter { inner: a.into_par_iter().chain(b) }
+    }
+}
+
+impl<'a, T: Send> IntoParallelIterator for &'a mut VecDeque<T> {
+    type Item = &'a mut T;
+    type Iter = IterMut<'a, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        let (a, b) = self.as_mut_slices();
+        IterMut { inner: a.into_par_iter().chain(b) }
+    }
+}
+
+
+delegate_indexed_iterator!{
+    #[doc = "Parallel iterator over a double-ended queue"]
+    IntoIter<T> => vec::IntoIter<T>,
+    impl<T: Send>
+}
+
+
+delegate_indexed_iterator_item!{
+    #[doc = "Parallel iterator over an immutable reference to a double-ended queue"]
+    Iter<'a, T> => Chain<slice::Iter<'a, T>, slice::Iter<'a, T>> : &'a T,
+    impl<'a, T: Sync + 'a>
+}
+
+
+delegate_indexed_iterator_item!{
+    #[doc = "Parallel iterator over a mutable reference to a double-ended queue"]
+    IterMut<'a, T> => Chain<slice::IterMut<'a, T>, slice::IterMut<'a, T>> : &'a mut T,
+    impl<'a, T: Send + 'a>
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/delegate.rs
@@ -0,0 +1,119 @@
+//! Macros for delegating newtype iterators to inner types.
+
+// Note: these place `impl` bounds at the end, as token gobbling is the only way
+// I know how to consume an arbitrary list of constraints, with `$($args:tt)*`.
+
+/// Create a parallel iterator which simply wraps an inner type and delegates
+/// all methods inward.  The item type is parsed from the inner type.
+///
+/// The implementation of `IntoParallelIterator` should be added separately.
+///
+/// # Example
+///
+/// ```
+/// delegate_iterator!{
+///     #[doc = "Move items from `MyCollection` in parallel"]
+///     MyIntoIter<T, U> => vec::IntoIter<(T, U)>,
+///     impl<T: Ord + Send, U: Send>
+/// }
+/// ```
+macro_rules! delegate_iterator {
+    ($( #[ $attr:meta ] )+
+     $iter:ident < $( $i:tt ),* > => $( $inner:ident )::+ < $item:ty > ,
+     impl $( $args:tt )*
+     ) => {
+        delegate_iterator_item!{
+            $( #[ $attr ] )+
+            $iter < $( $i ),* > => $( $inner )::+ < $item > : $item ,
+            impl $( $args )*
+        }
+    }
+}
+
+/// Create an indexed parallel iterator which simply wraps an inner type and
+/// delegates all methods inward.  The item type is parsed from the inner type.
+macro_rules! delegate_indexed_iterator {
+    ($( #[ $attr:meta ] )+
+     $iter:ident < $( $i:tt ),* > => $( $inner:ident )::+ < $item:ty > ,
+     impl $( $args:tt )*
+     ) => {
+        delegate_indexed_iterator_item!{
+            $( #[ $attr ] )+
+            $iter < $( $i ),* > => $( $inner )::+ < $item > : $item ,
+            impl $( $args )*
+        }
+    }
+}
+
+/// Create a parallel iterator which simply wraps an inner type and delegates
+/// all methods inward.  The item type is explicitly specified.
+///
+/// The implementation of `IntoParallelIterator` should be added separately.
+///
+/// # Example
+///
+/// ```
+/// delegate_iterator_item!{
+///     #[doc = "Iterate items from `MyCollection` in parallel"]
+///     MyIter<'a, T, U> => slice::Iter<'a, (T, U)>: &'a (T, U),
+///     impl<'a, T: Ord + Sync, U: Sync>
+/// }
+/// ```
+macro_rules! delegate_iterator_item {
+    ($( #[ $attr:meta ] )+
+     $iter:ident < $( $i:tt ),* > => $inner:ty : $item:ty,
+     impl $( $args:tt )*
+     ) => {
+        $( #[ $attr ] )+
+        pub struct $iter $( $args )* {
+            inner: $inner,
+        }
+
+        impl $( $args )* ParallelIterator for $iter < $( $i ),* > {
+            type Item = $item;
+
+            fn drive_unindexed<C>(self, consumer: C) -> C::Result
+                where C: UnindexedConsumer<Self::Item>
+            {
+                self.inner.drive_unindexed(consumer)
+            }
+
+            fn opt_len(&mut self) -> Option<usize> {
+                self.inner.opt_len()
+            }
+        }
+    }
+}
+
+/// Create an indexed parallel iterator which simply wraps an inner type and
+/// delegates all methods inward.  The item type is explicitly specified.
+macro_rules! delegate_indexed_iterator_item {
+    ($( #[ $attr:meta ] )+
+     $iter:ident < $( $i:tt ),* > => $inner:ty : $item:ty,
+     impl $( $args:tt )*
+     ) => {
+        delegate_iterator_item!{
+            $( #[ $attr ] )+
+            $iter < $( $i ),* > => $inner : $item ,
+            impl $( $args )*
+        }
+
+        impl $( $args )* IndexedParallelIterator for $iter < $( $i ),* > {
+            fn drive<C>(self, consumer: C) -> C::Result
+                where C: Consumer<Self::Item>
+            {
+                self.inner.drive(consumer)
+            }
+
+            fn len(&mut self) -> usize {
+                self.inner.len()
+            }
+
+            fn with_producer<CB>(self, callback: CB) -> CB::Output
+                where CB: ProducerCallback<Self::Item>
+            {
+                self.inner.with_producer(callback)
+            }
+        }
+    }
+}
rename from third_party/rust/rayon/src/iter/README.md
rename to third_party/rust/rayon-0.8.2/src/iter/README.md
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/chain.rs
@@ -0,0 +1,254 @@
+use super::internal::*;
+use super::*;
+use std::cmp;
+use std::iter;
+use rayon_core::join;
+
+/// `Chain` is an iterator that joins `b` after `a` in one continuous iterator.
+/// This struct is created by the [`chain()`] method on [`ParallelIterator`]
+///
+/// [`chain()`]: trait.ParallelIterator.html#method.chain
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Chain<A, B>
+    where A: ParallelIterator,
+          B: ParallelIterator<Item = A::Item>
+{
+    a: A,
+    b: B,
+}
+
+/// Create a new `Chain` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<A, B>(a: A, b: B) -> Chain<A, B>
+    where A: ParallelIterator,
+          B: ParallelIterator<Item = A::Item>
+{
+    Chain { a: a, b: b }
+}
+
+impl<A, B> ParallelIterator for Chain<A, B>
+    where A: ParallelIterator,
+          B: ParallelIterator<Item = A::Item>
+{
+    type Item = A::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let Chain { mut a, b } = self;
+
+        // If we returned a value from our own `opt_len`, then the collect consumer in particular
+        // will balk at being treated like an actual `UnindexedConsumer`.  But when we do know the
+        // length, we can use `Consumer::split_at` instead, and this is still harmless for other
+        // truly-unindexed consumers too.
+        let (left, right, reducer) = if let Some(len) = a.opt_len() {
+            consumer.split_at(len)
+        } else {
+            let reducer = consumer.to_reducer();
+            (consumer.split_off_left(), consumer, reducer)
+        };
+
+        let (a, b) = join(|| a.drive_unindexed(left), || b.drive_unindexed(right));
+        reducer.reduce(a, b)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        match (self.a.opt_len(), self.b.opt_len()) {
+            (Some(a_len), Some(b_len)) => a_len.checked_add(b_len),
+            _ => None,
+        }
+    }
+}
+
+impl<A, B> IndexedParallelIterator for Chain<A, B>
+    where A: IndexedParallelIterator,
+          B: IndexedParallelIterator<Item = A::Item>
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        let Chain { mut a, b } = self;
+        let (left, right, reducer) = consumer.split_at(a.len());
+        let (a, b) = join(|| a.drive(left), || b.drive(right));
+        reducer.reduce(a, b)
+    }
+
+    fn len(&mut self) -> usize {
+        self.a
+            .len()
+            .checked_add(self.b.len())
+            .expect("overflow")
+    }
+
+    fn with_producer<CB>(mut self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        let a_len = self.a.len();
+        return self.a.with_producer(CallbackA {
+                                        callback: callback,
+                                        a_len: a_len,
+                                        b: self.b,
+                                    });
+
+        struct CallbackA<CB, B> {
+            callback: CB,
+            a_len: usize,
+            b: B,
+        }
+
+        impl<CB, B> ProducerCallback<B::Item> for CallbackA<CB, B>
+            where B: IndexedParallelIterator,
+                  CB: ProducerCallback<B::Item>
+        {
+            type Output = CB::Output;
+
+            fn callback<A>(self, a_producer: A) -> Self::Output
+                where A: Producer<Item = B::Item>
+            {
+                return self.b.with_producer(CallbackB {
+                                                callback: self.callback,
+                                                a_len: self.a_len,
+                                                a_producer: a_producer,
+                                            });
+            }
+        }
+
+        struct CallbackB<CB, A> {
+            callback: CB,
+            a_len: usize,
+            a_producer: A,
+        }
+
+        impl<CB, A> ProducerCallback<A::Item> for CallbackB<CB, A>
+            where A: Producer,
+                  CB: ProducerCallback<A::Item>
+        {
+            type Output = CB::Output;
+
+            fn callback<B>(self, b_producer: B) -> Self::Output
+                where B: Producer<Item = A::Item>
+            {
+                let producer = ChainProducer::new(self.a_len, self.a_producer, b_producer);
+                self.callback.callback(producer)
+            }
+        }
+
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+struct ChainProducer<A, B>
+    where A: Producer,
+          B: Producer<Item = A::Item>
+{
+    a_len: usize,
+    a: A,
+    b: B,
+}
+
+impl<A, B> ChainProducer<A, B>
+    where A: Producer,
+          B: Producer<Item = A::Item>
+{
+    fn new(a_len: usize, a: A, b: B) -> Self {
+        ChainProducer {
+            a_len: a_len,
+            a: a,
+            b: b,
+        }
+    }
+}
+
+impl<A, B> Producer for ChainProducer<A, B>
+    where A: Producer,
+          B: Producer<Item = A::Item>
+{
+    type Item = A::Item;
+    type IntoIter = ChainSeq<A::IntoIter, B::IntoIter>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        ChainSeq::new(self.a.into_iter(), self.b.into_iter())
+    }
+
+    fn min_len(&self) -> usize {
+        cmp::max(self.a.min_len(), self.b.min_len())
+    }
+
+    fn max_len(&self) -> usize {
+        cmp::min(self.a.max_len(), self.b.max_len())
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        if index <= self.a_len {
+            let a_rem = self.a_len - index;
+            let (a_left, a_right) = self.a.split_at(index);
+            let (b_left, b_right) = self.b.split_at(0);
+            (ChainProducer::new(index, a_left, b_left), ChainProducer::new(a_rem, a_right, b_right))
+        } else {
+            let (a_left, a_right) = self.a.split_at(self.a_len);
+            let (b_left, b_right) = self.b.split_at(index - self.a_len);
+            (ChainProducer::new(self.a_len, a_left, b_left),
+             ChainProducer::new(0, a_right, b_right))
+        }
+    }
+
+    fn fold_with<F>(self, mut folder: F) -> F
+        where F: Folder<A::Item>
+    {
+        folder = self.a.fold_with(folder);
+        if folder.full() {
+            folder
+        } else {
+            self.b.fold_with(folder)
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Wrapper for Chain to implement ExactSizeIterator
+
+struct ChainSeq<A, B> {
+    chain: iter::Chain<A, B>,
+}
+
+impl<A, B> ChainSeq<A, B> {
+    fn new(a: A, b: B) -> ChainSeq<A, B>
+        where A: ExactSizeIterator,
+              B: ExactSizeIterator<Item = A::Item>
+    {
+        ChainSeq { chain: a.chain(b) }
+    }
+}
+
+impl<A, B> Iterator for ChainSeq<A, B>
+    where A: Iterator,
+          B: Iterator<Item = A::Item>
+{
+    type Item = A::Item;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        self.chain.next()
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.chain.size_hint()
+    }
+}
+
+impl<A, B> ExactSizeIterator for ChainSeq<A, B>
+    where A: ExactSizeIterator,
+          B: ExactSizeIterator<Item = A::Item>
+{
+}
+
+impl<A, B> DoubleEndedIterator for ChainSeq<A, B>
+    where A: DoubleEndedIterator,
+          B: DoubleEndedIterator<Item = A::Item>
+{
+    fn next_back(&mut self) -> Option<Self::Item> {
+        self.chain.next_back()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/cloned.rs
@@ -0,0 +1,186 @@
+use super::internal::*;
+use super::*;
+
+use std::iter;
+
+/// `Cloned` is an iterator that clones the elements of an underlying iterator.
+///
+/// This struct is created by the [`cloned()`] method on [`ParallelIterator`]
+///
+/// [`cloned()`]: trait.ParallelIterator.html#method.cloned
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Cloned<I: ParallelIterator> {
+    base: I,
+}
+
+/// Create a new `Cloned` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I>(base: I) -> Cloned<I>
+    where I: ParallelIterator
+{
+    Cloned { base: base }
+}
+
+impl<'a, T, I> ParallelIterator for Cloned<I>
+    where I: ParallelIterator<Item = &'a T>,
+          T: 'a + Clone + Send + Sync
+{
+    type Item = T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer1 = ClonedConsumer::new(consumer);
+        self.base.drive_unindexed(consumer1)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        self.base.opt_len()
+    }
+}
+
+impl<'a, T, I> IndexedParallelIterator for Cloned<I>
+    where I: IndexedParallelIterator<Item = &'a T>,
+          T: 'a + Clone + Send + Sync
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        let consumer1 = ClonedConsumer::new(consumer);
+        self.base.drive(consumer1)
+    }
+
+    fn len(&mut self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback { callback: callback });
+
+        struct Callback<CB> {
+            callback: CB,
+        }
+
+        impl<'a, T, CB> ProducerCallback<&'a T> for Callback<CB>
+            where CB: ProducerCallback<T>,
+                  T: 'a + Clone + Send
+        {
+            type Output = CB::Output;
+
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = &'a T>
+            {
+                let producer = ClonedProducer { base: base };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+struct ClonedProducer<P> {
+    base: P,
+}
+
+impl<'a, T, P> Producer for ClonedProducer<P>
+    where P: Producer<Item = &'a T>,
+          T: 'a + Clone
+{
+    type Item = T;
+    type IntoIter = iter::Cloned<P::IntoIter>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.base.into_iter().cloned()
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(index);
+        (ClonedProducer { base: left }, ClonedProducer { base: right })
+    }
+}
+
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct ClonedConsumer<C> {
+    base: C,
+}
+
+impl<C> ClonedConsumer<C> {
+    fn new(base: C) -> Self {
+        ClonedConsumer { base: base }
+    }
+}
+
+impl<'a, T, C> Consumer<&'a T> for ClonedConsumer<C>
+    where C: Consumer<T>,
+          T: 'a + Clone
+{
+    type Folder = ClonedFolder<C::Folder>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (ClonedConsumer::new(left), ClonedConsumer::new(right), reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        ClonedFolder { base: self.base.into_folder() }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'a, T, C> UnindexedConsumer<&'a T> for ClonedConsumer<C>
+    where C: UnindexedConsumer<T>,
+          T: 'a + Clone
+{
+    fn split_off_left(&self) -> Self {
+        ClonedConsumer::new(self.base.split_off_left())
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+
+struct ClonedFolder<F> {
+    base: F,
+}
+
+impl<'a, T, F> Folder<&'a T> for ClonedFolder<F>
+    where F: Folder<T>,
+          T: 'a + Clone
+{
+    type Result = F::Result;
+
+    fn consume(self, item: &'a T) -> Self {
+        ClonedFolder { base: self.base.consume(item.clone()) }
+    }
+
+    fn complete(self) -> F::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/collect/consumer.rs
@@ -0,0 +1,102 @@
+use super::super::internal::*;
+use super::super::noop::*;
+use std::ptr;
+use std::slice;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+pub struct CollectConsumer<'c, T: Send + 'c> {
+    /// Tracks how many items we successfully wrote. Used to guarantee
+    /// safety in the face of panics or buggy parallel iterators.
+    writes: &'c AtomicUsize,
+
+    /// A slice covering the target memory, not yet initialized!
+    target: &'c mut [T],
+}
+
+pub struct CollectFolder<'c, T: Send + 'c> {
+    global_writes: &'c AtomicUsize,
+    local_writes: usize,
+
+    /// An iterator over the *uninitialized* target memory.
+    target: slice::IterMut<'c, T>,
+}
+
+
+impl<'c, T: Send + 'c> CollectConsumer<'c, T> {
+    /// The target memory is considered uninitialized, and will be
+    /// overwritten without dropping anything.
+    pub fn new(writes: &'c AtomicUsize, target: &'c mut [T]) -> CollectConsumer<'c, T> {
+        CollectConsumer {
+            writes: writes,
+            target: target,
+        }
+    }
+}
+
+impl<'c, T: Send + 'c> Consumer<T> for CollectConsumer<'c, T> {
+    type Folder = CollectFolder<'c, T>;
+    type Reducer = NoopReducer;
+    type Result = ();
+
+    fn split_at(self, index: usize) -> (Self, Self, NoopReducer) {
+        // instances Read in the fields from `self` and then
+        // forget `self`, since it has been legitimately consumed
+        // (and not dropped during unwinding).
+        let CollectConsumer { writes, target } = self;
+
+        // Produce new consumers. Normal slicing ensures that the
+        // memory range given to each consumer is disjoint.
+        let (left, right) = target.split_at_mut(index);
+        (CollectConsumer::new(writes, left), CollectConsumer::new(writes, right), NoopReducer)
+    }
+
+    fn into_folder(self) -> CollectFolder<'c, T> {
+        CollectFolder {
+            global_writes: self.writes,
+            local_writes: 0,
+            target: self.target.into_iter(),
+        }
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+impl<'c, T: Send + 'c> Folder<T> for CollectFolder<'c, T> {
+    type Result = ();
+
+    fn consume(mut self, item: T) -> CollectFolder<'c, T> {
+        // Compute target pointer and write to it. Safe because the iterator
+        // does all the bounds checking; we're only avoiding the target drop.
+        let head = self.target.next().expect("too many values pushed to consumer");
+        unsafe {
+            ptr::write(head, item);
+        }
+
+        self.local_writes += 1;
+        self
+    }
+
+    fn complete(self) {
+        assert!(self.target.len() == 0, "too few values pushed to consumer");
+
+        // track total values written
+        self.global_writes.fetch_add(self.local_writes, Ordering::Relaxed);
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+/// Pretend to be unindexed for `special_collect_into`,
+/// but we should never actually get used that way...
+impl<'c, T: Send + 'c> UnindexedConsumer<T> for CollectConsumer<'c, T> {
+    fn split_off_left(&self) -> Self {
+        unreachable!("CollectConsumer must be indexed!")
+    }
+    fn to_reducer(&self) -> Self::Reducer {
+        NoopReducer
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/collect/mod.rs
@@ -0,0 +1,150 @@
+use super::{ParallelIterator, IndexedParallelIterator, IntoParallelIterator, ParallelExtend};
+use std::collections::LinkedList;
+use std::slice;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+mod consumer;
+use self::consumer::CollectConsumer;
+use super::unzip::unzip_indexed;
+
+mod test;
+
+/// Collects the results of the exact iterator into the specified vector.
+///
+/// This is not directly public, but called by `IndexedParallelIterator::collect_into`.
+pub fn collect_into<I, T>(mut pi: I, v: &mut Vec<T>)
+    where I: IndexedParallelIterator<Item = T>,
+          T: Send
+{
+    v.truncate(0); // clear any old data
+    let mut collect = Collect::new(v, pi.len());
+    pi.drive(collect.as_consumer());
+    collect.complete();
+}
+
+/// Collects the results of the iterator into the specified vector.
+///
+/// Technically, this only works for `IndexedParallelIterator`, but we're faking a
+/// bit of specialization here until Rust can do that natively.  Callers are
+/// using `opt_len` to find the length before calling this, and only exact
+/// iterators will return anything but `None` there.
+///
+/// Since the type system doesn't understand that contract, we have to allow
+/// *any* `ParallelIterator` here, and `CollectConsumer` has to also implement
+/// `UnindexedConsumer`.  That implementation panics `unreachable!` in case
+/// there's a bug where we actually do try to use this unindexed.
+fn special_extend<I, T>(pi: I, len: usize, v: &mut Vec<T>)
+    where I: ParallelIterator<Item = T>,
+          T: Send
+{
+    let mut collect = Collect::new(v, len);
+    pi.drive_unindexed(collect.as_consumer());
+    collect.complete();
+}
+
+/// Unzips the results of the exact iterator into the specified vectors.
+///
+/// This is not directly public, but called by `IndexedParallelIterator::unzip_into`.
+pub fn unzip_into<I, A, B>(mut pi: I, left: &mut Vec<A>, right: &mut Vec<B>)
+    where I: IndexedParallelIterator<Item = (A, B)>,
+          A: Send,
+          B: Send
+{
+    // clear any old data
+    left.truncate(0);
+    right.truncate(0);
+
+    let len = pi.len();
+    let mut left = Collect::new(left, len);
+    let mut right = Collect::new(right, len);
+
+    unzip_indexed(pi, left.as_consumer(), right.as_consumer());
+
+    left.complete();
+    right.complete();
+}
+
+
+/// Manage the collection vector.
+struct Collect<'c, T: Send + 'c> {
+    writes: AtomicUsize,
+    vec: &'c mut Vec<T>,
+    len: usize,
+}
+
+impl<'c, T: Send + 'c> Collect<'c, T> {
+    fn new(vec: &'c mut Vec<T>, len: usize) -> Self {
+        Collect {
+            writes: AtomicUsize::new(0),
+            vec: vec,
+            len: len,
+        }
+    }
+
+    /// Create a consumer on a slice of our memory.
+    fn as_consumer(&mut self) -> CollectConsumer<T> {
+        // Reserve the new space.
+        self.vec.reserve(self.len);
+
+        // Get a correct borrow, then extend it for the newly added length.
+        let start = self.vec.len();
+        let mut slice = &mut self.vec[start..];
+        slice = unsafe { slice::from_raw_parts_mut(slice.as_mut_ptr(), self.len) };
+        CollectConsumer::new(&self.writes, slice)
+    }
+
+    /// Update the final vector length.
+    fn complete(mut self) {
+        unsafe {
+            // Here, we assert that `v` is fully initialized. This is
+            // checked by the following assert, which counts how many
+            // total writes occurred. Since we know that the consumer
+            // cannot have escaped from `drive` (by parametricity,
+            // essentially), we know that any stores that will happen,
+            // have happened. Unless some code is buggy, that means we
+            // should have seen `len` total writes.
+            let actual_writes = self.writes.load(Ordering::Relaxed);
+            assert!(actual_writes == self.len,
+                    "expected {} total writes, but got {}",
+                    self.len,
+                    actual_writes);
+            let new_len = self.vec.len() + self.len;
+            self.vec.set_len(new_len);
+        }
+    }
+}
+
+
+/// Extend a vector with items from a parallel iterator.
+impl<T> ParallelExtend<T> for Vec<T>
+    where T: Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = T>
+    {
+        // See the vec_collect benchmarks in rayon-demo for different strategies.
+        let mut par_iter = par_iter.into_par_iter();
+        match par_iter.opt_len() {
+            Some(len) => {
+                // When Rust gets specialization, we can get here for indexed iterators
+                // without relying on `opt_len`.  Until then, `special_extend()` fakes
+                // an unindexed mode on the promise that `opt_len()` is accurate.
+                special_extend(par_iter, len, self);
+            }
+            None => {
+                // This works like `extend`, but `Vec::append` is more efficient.
+                let list: LinkedList<_> = par_iter
+                    .fold(Vec::new, |mut vec, elem| {
+                        vec.push(elem);
+                        vec
+                    })
+                    .collect();
+
+                self.reserve(list.iter().map(Vec::len).sum());
+                for mut vec in list {
+                    self.append(&mut vec);
+                }
+            }
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/collect/test.rs
@@ -0,0 +1,168 @@
+#![cfg(test)]
+#![allow(unused_assignments)]
+
+// These tests are primarily targeting "abusive" producers that will
+// try to drive the "collect consumer" incorrectly. These should
+// result in panics.
+
+use iter::internal::*;
+use super::Collect;
+
+/// Promises to produce 2 items, but then produces 3.  Does not do any
+/// splits at all.
+#[test]
+#[should_panic(expected = "too many values")]
+fn produce_too_many_items() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 2);
+    let consumer = collect.as_consumer();
+    let mut folder = consumer.into_folder();
+    folder = folder.consume(22);
+    folder = folder.consume(23);
+    folder.consume(24);
+}
+
+/// Produces fewer items than promised. Does not do any
+/// splits at all.
+#[test]
+#[should_panic(expected = "too few values")]
+fn produce_fewer_items() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 5);
+    let consumer = collect.as_consumer();
+    let mut folder = consumer.into_folder();
+    folder = folder.consume(22);
+    folder = folder.consume(23);
+    folder.complete();
+}
+
+// Complete is not called by the consumer.Hence,the collection vector is not fully initialized.
+#[test]
+#[should_panic(expected = "expected 4 total writes, but got 2")]
+fn left_produces_items_with_no_complete() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 4);
+    {
+        let consumer = collect.as_consumer();
+        let (left_consumer, right_consumer, _) = consumer.split_at(2);
+        let mut left_folder = left_consumer.into_folder();
+        let mut right_folder = right_consumer.into_folder();
+        left_folder = left_folder.consume(0).consume(1);
+        right_folder = right_folder.consume(2).consume(3);
+        right_folder.complete();
+    }
+    collect.complete();
+}
+
+// Complete is not called by the right consumer. Hence,the
+// collection vector is not fully initialized.
+#[test]
+#[should_panic(expected = "expected 4 total writes, but got 2")]
+fn right_produces_items_with_no_complete() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 4);
+    {
+        let consumer = collect.as_consumer();
+        let (left_consumer, right_consumer, _) = consumer.split_at(2);
+        let mut left_folder = left_consumer.into_folder();
+        let mut right_folder = right_consumer.into_folder();
+        left_folder = left_folder.consume(0).consume(1);
+        right_folder = right_folder.consume(2).consume(3);
+        left_folder.complete();
+    }
+    collect.complete();
+}
+
+// Complete is not called by the consumer. Hence,the collection vector is not fully initialized.
+#[test]
+#[should_panic(expected = "expected 2 total writes, but got 0")]
+fn produces_items_with_no_complete() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 2);
+    {
+        let consumer = collect.as_consumer();
+        let mut folder = consumer.into_folder();
+        folder = folder.consume(22);
+        folder = folder.consume(23);
+    }
+    collect.complete();
+}
+
+// The left consumer produces too many items while the right
+// consumer produces correct number.
+#[test]
+#[should_panic(expected = "too many values")]
+fn left_produces_too_many_items() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 4);
+    {
+        let consumer = collect.as_consumer();
+        let (left_consumer, right_consumer, _) = consumer.split_at(2);
+        let mut left_folder = left_consumer.into_folder();
+        let mut right_folder = right_consumer.into_folder();
+        left_folder = left_folder.consume(0).consume(1).consume(2);
+        right_folder = right_folder.consume(2).consume(3);
+        right_folder.complete();
+    }
+    collect.complete();
+}
+
+// The right consumer produces too many items while the left
+// consumer produces correct number.
+#[test]
+#[should_panic(expected = "too many values")]
+fn right_produces_too_many_items() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 4);
+    {
+        let consumer = collect.as_consumer();
+        let (left_consumer, right_consumer, _) = consumer.split_at(2);
+        let mut left_folder = left_consumer.into_folder();
+        let mut right_folder = right_consumer.into_folder();
+        left_folder = left_folder.consume(0).consume(1);
+        right_folder = right_folder.consume(2).consume(3).consume(4);
+        left_folder.complete();
+    }
+    collect.complete();
+}
+
+
+// The left consumer produces fewer items while the right
+// consumer produces correct number.
+#[test]
+#[should_panic(expected = "too few values")]
+fn left_produces_fewer_items() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 4);
+    {
+        let consumer = collect.as_consumer();
+        let (left_consumer, right_consumer, _) = consumer.split_at(2);
+        let mut left_folder = left_consumer.into_folder();
+        let mut right_folder = right_consumer.into_folder();
+        left_folder = left_folder.consume(0);
+        right_folder = right_folder.consume(2).consume(3);
+        left_folder.complete();
+        right_folder.complete();
+    }
+    collect.complete();
+}
+
+// The right consumer produces fewer items while the left
+// consumer produces correct number.
+#[test]
+#[should_panic(expected = "too few values")]
+fn right_produces_fewer_items() {
+    let mut v = vec![];
+    let mut collect = Collect::new(&mut v, 4);
+    {
+        let consumer = collect.as_consumer();
+        let (left_consumer, right_consumer, _) = consumer.split_at(2);
+        let mut left_folder = left_consumer.into_folder();
+        let mut right_folder = right_consumer.into_folder();
+        left_folder = left_folder.consume(0).consume(1);
+        right_folder = right_folder.consume(2);
+        left_folder.complete();
+        right_folder.complete();
+    }
+    collect.complete();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/enumerate.rs
@@ -0,0 +1,119 @@
+use super::internal::*;
+use super::*;
+use std::iter;
+use std::ops::Range;
+use std::usize;
+
+/// `Enumerate` is an iterator that returns the current count along with the element.
+/// This struct is created by the [`enumerate()`] method on [`ParallelIterator`]
+///
+/// [`enumerate()`]: trait.ParallelIterator.html#method.enumerate
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Enumerate<I: IndexedParallelIterator> {
+    base: I,
+}
+
+/// Create a new `Enumerate` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I>(base: I) -> Enumerate<I>
+    where I: IndexedParallelIterator
+{
+    Enumerate { base: base }
+}
+
+impl<I> ParallelIterator for Enumerate<I>
+    where I: IndexedParallelIterator
+{
+    type Item = (usize, I::Item);
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I> IndexedParallelIterator for Enumerate<I>
+    where I: IndexedParallelIterator
+{
+    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback { callback: callback });
+
+        struct Callback<CB> {
+            callback: CB,
+        }
+
+        impl<I, CB> ProducerCallback<I> for Callback<CB>
+            where CB: ProducerCallback<(usize, I)>
+        {
+            type Output = CB::Output;
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = I>
+            {
+                let producer = EnumerateProducer {
+                    base: base,
+                    offset: 0,
+                };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Producer implementation
+
+struct EnumerateProducer<P> {
+    base: P,
+    offset: usize,
+}
+
+impl<P> Producer for EnumerateProducer<P>
+    where P: Producer
+{
+    type Item = (usize, P::Item);
+    type IntoIter = iter::Zip<Range<usize>, P::IntoIter>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        // Enumerate only works for IndexedParallelIterators. Since those
+        // have a max length of usize::MAX, their max index is
+        // usize::MAX - 1, so the range 0..usize::MAX includes all
+        // possible indices
+        (self.offset..usize::MAX).zip(self.base.into_iter())
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(index);
+        (EnumerateProducer {
+             base: left,
+             offset: self.offset,
+         },
+         EnumerateProducer {
+             base: right,
+             offset: self.offset + index,
+         })
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/extend.rs
@@ -0,0 +1,285 @@
+use super::{ParallelExtend, IntoParallelIterator, ParallelIterator};
+
+use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
+use std::hash::{BuildHasher, Hash};
+use std::collections::LinkedList;
+use std::collections::{BinaryHeap, VecDeque};
+
+/// Perform a generic `par_extend` by collecting to a `LinkedList<Vec<_>>` in
+/// parallel, then extending the collection sequentially.
+fn extend<C, I, F>(collection: &mut C, par_iter: I, reserve: F)
+    where I: IntoParallelIterator,
+          F: FnOnce(&mut C, &LinkedList<Vec<I::Item>>),
+          C: Extend<I::Item>
+{
+    let list = par_iter
+        .into_par_iter()
+        .fold(Vec::new, |mut vec, elem| {
+            vec.push(elem);
+            vec
+        })
+        .collect();
+
+    reserve(collection, &list);
+    for vec in list {
+        collection.extend(vec);
+    }
+}
+
+/// Compute the total length of a `LinkedList<Vec<_>>`.
+fn len<T>(list: &LinkedList<Vec<T>>) -> usize {
+    list.iter().map(Vec::len).sum()
+}
+
+/// Compute the total string length of a `LinkedList<Vec<AsRef<str>>>`.
+fn str_len<T>(list: &LinkedList<Vec<T>>) -> usize
+    where T: AsRef<str>
+{
+    list.iter()
+        .flat_map(|vec| vec.iter().map(|s| s.as_ref().len()))
+        .sum()
+}
+
+
+/// Extend a binary heap with items from a parallel iterator.
+impl<T> ParallelExtend<T> for BinaryHeap<T>
+    where T: Ord + Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = T>
+    {
+        extend(self, par_iter, |heap, list| heap.reserve(len(list)));
+    }
+}
+
+/// Extend a binary heap with copied items from a parallel iterator.
+impl<'a, T> ParallelExtend<&'a T> for BinaryHeap<T>
+    where T: 'a + Copy + Ord + Send + Sync
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = &'a T>
+    {
+        extend(self, par_iter, |heap, list| heap.reserve(len(list)));
+    }
+}
+
+
+/// Extend a B-tree map with items from a parallel iterator.
+impl<K, V> ParallelExtend<(K, V)> for BTreeMap<K, V>
+    where K: Ord + Send,
+          V: Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = (K, V)>
+    {
+        extend(self, par_iter, |_, _| {});
+    }
+}
+
+/// Extend a B-tree map with copied items from a parallel iterator.
+impl<'a, K, V> ParallelExtend<(&'a K, &'a V)> for BTreeMap<K, V>
+    where K: Copy + Ord + Send + Sync,
+          V: Copy + Send + Sync
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = (&'a K, &'a V)>
+    {
+        extend(self, par_iter, |_, _| {});
+    }
+}
+
+
+/// Extend a B-tree set with items from a parallel iterator.
+impl<T> ParallelExtend<T> for BTreeSet<T>
+    where T: Ord + Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = T>
+    {
+        extend(self, par_iter, |_, _| {});
+    }
+}
+
+/// Extend a B-tree set with copied items from a parallel iterator.
+impl<'a, T> ParallelExtend<&'a T> for BTreeSet<T>
+    where T: 'a + Copy + Ord + Send + Sync
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = &'a T>
+    {
+        extend(self, par_iter, |_, _| {});
+    }
+}
+
+
+/// Extend a hash map with items from a parallel iterator.
+impl<K, V, S> ParallelExtend<(K, V)> for HashMap<K, V, S>
+    where K: Eq + Hash + Send,
+          V: Send,
+          S: BuildHasher + Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = (K, V)>
+    {
+        // See the map_collect benchmarks in rayon-demo for different strategies.
+        extend(self, par_iter, |map, list| map.reserve(len(list)));
+    }
+}
+
+/// Extend a hash map with copied items from a parallel iterator.
+impl<'a, K, V, S> ParallelExtend<(&'a K, &'a V)> for HashMap<K, V, S>
+    where K: Copy + Eq + Hash + Send + Sync,
+          V: Copy + Send + Sync,
+          S: BuildHasher + Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = (&'a K, &'a V)>
+    {
+        extend(self, par_iter, |map, list| map.reserve(len(list)));
+    }
+}
+
+
+/// Extend a hash set with items from a parallel iterator.
+impl<T, S> ParallelExtend<T> for HashSet<T, S>
+    where T: Eq + Hash + Send,
+          S: BuildHasher + Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = T>
+    {
+        extend(self, par_iter, |set, list| set.reserve(len(list)));
+    }
+}
+
+/// Extend a hash set with copied items from a parallel iterator.
+impl<'a, T, S> ParallelExtend<&'a T> for HashSet<T, S>
+    where T: 'a + Copy + Eq + Hash + Send + Sync,
+          S: BuildHasher + Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = &'a T>
+    {
+        extend(self, par_iter, |set, list| set.reserve(len(list)));
+    }
+}
+
+
+/// Extend a linked list with items from a parallel iterator.
+impl<T> ParallelExtend<T> for LinkedList<T>
+    where T: Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = T>
+    {
+        let mut list = par_iter
+            .into_par_iter()
+            .fold(LinkedList::new, |mut list, elem| {
+                list.push_back(elem);
+                list
+            })
+            .reduce(LinkedList::new, |mut list1, mut list2| {
+                list1.append(&mut list2);
+                list1
+            });
+        self.append(&mut list);
+    }
+}
+
+
+/// Extend a linked list with copied items from a parallel iterator.
+impl<'a, T> ParallelExtend<&'a T> for LinkedList<T>
+    where T: 'a + Copy + Send + Sync
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = &'a T>
+    {
+        self.par_extend(par_iter.into_par_iter().cloned())
+    }
+}
+
+
+/// Extend a string with characters from a parallel iterator.
+impl ParallelExtend<char> for String {
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = char>
+    {
+        // This is like `extend`, but `Vec<char>` is less efficient to deal
+        // with than `String`, so instead collect to `LinkedList<String>`.
+        let list: LinkedList<_> = par_iter
+            .into_par_iter()
+            .fold(String::new, |mut string, ch| {
+                string.push(ch);
+                string
+            })
+            .collect();
+
+        self.reserve(list.iter().map(String::len).sum());
+        self.extend(list)
+    }
+}
+
+/// Extend a string with copied characters from a parallel iterator.
+impl<'a> ParallelExtend<&'a char> for String {
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = &'a char>
+    {
+        self.par_extend(par_iter.into_par_iter().cloned())
+    }
+}
+
+/// Extend a string with string slices from a parallel iterator.
+impl<'a> ParallelExtend<&'a str> for String {
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = &'a str>
+    {
+        extend(self, par_iter, |string, list| string.reserve(str_len(list)));
+    }
+}
+
+/// Extend a string with strings from a parallel iterator.
+impl ParallelExtend<String> for String {
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = String>
+    {
+        extend(self, par_iter, |string, list| string.reserve(str_len(list)));
+    }
+}
+
+
+/// Extend a deque with items from a parallel iterator.
+impl<T> ParallelExtend<T> for VecDeque<T>
+    where T: Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = T>
+    {
+        extend(self, par_iter, |deque, list| deque.reserve(len(list)));
+    }
+}
+
+/// Extend a deque with copied items from a parallel iterator.
+impl<'a, T> ParallelExtend<&'a T> for VecDeque<T>
+    where T: 'a + Copy + Send + Sync
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = &'a T>
+    {
+        extend(self, par_iter, |deque, list| deque.reserve(len(list)));
+    }
+}
+
+
+// See the `collect` module for the `Vec<T>` implementation.
+// impl<T> ParallelExtend<T> for Vec<T>
+
+/// Extend a vector with copied items from a parallel iterator.
+impl<'a, T> ParallelExtend<&'a T> for Vec<T>
+    where T: 'a + Copy + Send + Sync
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = &'a T>
+    {
+        self.par_extend(par_iter.into_par_iter().cloned())
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/filter.rs
@@ -0,0 +1,130 @@
+use super::internal::*;
+use super::*;
+
+/// `Filter` takes a predicate `filter_op` and filters out elements that match.
+/// This struct is created by the [`filter()`] method on [`ParallelIterator`]
+///
+/// [`filter()`]: trait.ParallelIterator.html#method.filter
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Filter<I: ParallelIterator, P> {
+    base: I,
+    filter_op: P,
+}
+
+/// Create a new `Filter` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, P>(base: I, filter_op: P) -> Filter<I, P>
+    where I: ParallelIterator
+{
+    Filter {
+        base: base,
+        filter_op: filter_op,
+    }
+}
+
+impl<I, P> ParallelIterator for Filter<I, P>
+    where I: ParallelIterator,
+          P: Fn(&I::Item) -> bool + Sync + Send
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer1 = FilterConsumer::new(consumer, &self.filter_op);
+        self.base.drive_unindexed(consumer1)
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct FilterConsumer<'p, C, P: 'p> {
+    base: C,
+    filter_op: &'p P,
+}
+
+impl<'p, C, P> FilterConsumer<'p, C, P> {
+    fn new(base: C, filter_op: &'p P) -> Self {
+        FilterConsumer {
+            base: base,
+            filter_op: filter_op,
+        }
+    }
+}
+
+impl<'p, T, C, P: 'p> Consumer<T> for FilterConsumer<'p, C, P>
+    where C: Consumer<T>,
+          P: Fn(&T) -> bool + Sync
+{
+    type Folder = FilterFolder<'p, C::Folder, P>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (FilterConsumer::new(left, self.filter_op),
+         FilterConsumer::new(right, self.filter_op),
+         reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        FilterFolder {
+            base: self.base.into_folder(),
+            filter_op: self.filter_op,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+
+impl<'p, T, C, P: 'p> UnindexedConsumer<T> for FilterConsumer<'p, C, P>
+    where C: UnindexedConsumer<T>,
+          P: Fn(&T) -> bool + Sync
+{
+    fn split_off_left(&self) -> Self {
+        FilterConsumer::new(self.base.split_off_left(), &self.filter_op)
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct FilterFolder<'p, C, P: 'p> {
+    base: C,
+    filter_op: &'p P,
+}
+
+impl<'p, C, P, T> Folder<T> for FilterFolder<'p, C, P>
+    where C: Folder<T>,
+          P: Fn(&T) -> bool + 'p
+{
+    type Result = C::Result;
+
+    fn consume(self, item: T) -> Self {
+        let filter_op = self.filter_op;
+        if filter_op(&item) {
+            let base = self.base.consume(item);
+            FilterFolder {
+                base: base,
+                filter_op: filter_op,
+            }
+        } else {
+            self
+        }
+    }
+
+    fn complete(self) -> Self::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/filter_map.rs
@@ -0,0 +1,131 @@
+use super::internal::*;
+use super::*;
+
+/// `FilterMap` creates an iterator that uses `filter_op` to both filter and map elements.
+/// This struct is created by the [`filter_map()`] method on [`ParallelIterator`].
+///
+/// [`filter_map()`]: trait.ParallelIterator.html#method.filter_map
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct FilterMap<I: ParallelIterator, P> {
+    base: I,
+    filter_op: P,
+}
+
+/// Create a new `FilterMap` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, P>(base: I, filter_op: P) -> FilterMap<I, P>
+    where I: ParallelIterator
+{
+    FilterMap {
+        base: base,
+        filter_op: filter_op,
+    }
+}
+
+impl<I, P, R> ParallelIterator for FilterMap<I, P>
+    where I: ParallelIterator,
+          P: Fn(I::Item) -> Option<R> + Sync + Send,
+          R: Send
+{
+    type Item = R;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer = FilterMapConsumer::new(consumer, &self.filter_op);
+        self.base.drive_unindexed(consumer)
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct FilterMapConsumer<'p, C, P: 'p> {
+    base: C,
+    filter_op: &'p P,
+}
+
+impl<'p, C, P: 'p> FilterMapConsumer<'p, C, P> {
+    fn new(base: C, filter_op: &'p P) -> Self {
+        FilterMapConsumer {
+            base: base,
+            filter_op: filter_op,
+        }
+    }
+}
+
+impl<'p, T, U, C, P> Consumer<T> for FilterMapConsumer<'p, C, P>
+    where C: Consumer<U>,
+          P: Fn(T) -> Option<U> + Sync + 'p
+{
+    type Folder = FilterMapFolder<'p, C::Folder, P>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (FilterMapConsumer::new(left, self.filter_op),
+         FilterMapConsumer::new(right, self.filter_op),
+         reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        let base = self.base.into_folder();
+        FilterMapFolder {
+            base: base,
+            filter_op: self.filter_op,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'p, T, U, C, P> UnindexedConsumer<T> for FilterMapConsumer<'p, C, P>
+    where C: UnindexedConsumer<U>,
+          P: Fn(T) -> Option<U> + Sync + 'p
+{
+    fn split_off_left(&self) -> Self {
+        FilterMapConsumer::new(self.base.split_off_left(), &self.filter_op)
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct FilterMapFolder<'p, C, P: 'p> {
+    base: C,
+    filter_op: &'p P,
+}
+
+impl<'p, T, U, C, P> Folder<T> for FilterMapFolder<'p, C, P>
+    where C: Folder<U>,
+          P: Fn(T) -> Option<U> + Sync + 'p
+{
+    type Result = C::Result;
+
+    fn consume(self, item: T) -> Self {
+        let filter_op = self.filter_op;
+        if let Some(mapped_item) = filter_op(item) {
+            let base = self.base.consume(mapped_item);
+            FilterMapFolder {
+                base: base,
+                filter_op: filter_op,
+            }
+        } else {
+            self
+        }
+    }
+
+    fn complete(self) -> C::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/find.rs
@@ -0,0 +1,103 @@
+use std::sync::atomic::{AtomicBool, Ordering};
+use super::internal::*;
+use super::*;
+
+pub fn find<I, P>(pi: I, find_op: P) -> Option<I::Item>
+    where I: ParallelIterator,
+          P: Fn(&I::Item) -> bool + Sync
+{
+    let found = AtomicBool::new(false);
+    let consumer = FindConsumer::new(&find_op, &found);
+    pi.drive_unindexed(consumer)
+}
+
+struct FindConsumer<'p, P: 'p> {
+    find_op: &'p P,
+    found: &'p AtomicBool,
+}
+
+impl<'p, P> FindConsumer<'p, P> {
+    fn new(find_op: &'p P, found: &'p AtomicBool) -> Self {
+        FindConsumer {
+            find_op: find_op,
+            found: found,
+        }
+    }
+}
+
+impl<'p, T, P: 'p> Consumer<T> for FindConsumer<'p, P>
+    where T: Send,
+          P: Fn(&T) -> bool + Sync
+{
+    type Folder = FindFolder<'p, T, P>;
+    type Reducer = FindReducer;
+    type Result = Option<T>;
+
+    fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
+        (self.split_off_left(), self, FindReducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        FindFolder {
+            find_op: self.find_op,
+            found: self.found,
+            item: None,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.found.load(Ordering::Relaxed)
+    }
+}
+
+
+impl<'p, T, P: 'p> UnindexedConsumer<T> for FindConsumer<'p, P>
+    where T: Send,
+          P: Fn(&T) -> bool + Sync
+{
+    fn split_off_left(&self) -> Self {
+        FindConsumer::new(self.find_op, self.found)
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        FindReducer
+    }
+}
+
+
+struct FindFolder<'p, T, P: 'p> {
+    find_op: &'p P,
+    found: &'p AtomicBool,
+    item: Option<T>,
+}
+
+impl<'p, T, P> Folder<T> for FindFolder<'p, T, P>
+    where P: Fn(&T) -> bool + 'p
+{
+    type Result = Option<T>;
+
+    fn consume(mut self, item: T) -> Self {
+        if (self.find_op)(&item) {
+            self.found.store(true, Ordering::Relaxed);
+            self.item = Some(item);
+        }
+        self
+    }
+
+    fn complete(self) -> Self::Result {
+        self.item
+    }
+
+    fn full(&self) -> bool {
+        self.found.load(Ordering::Relaxed)
+    }
+}
+
+
+struct FindReducer;
+
+impl<T> Reducer<Option<T>> for FindReducer {
+    fn reduce(self, left: Option<T>, right: Option<T>) -> Option<T> {
+        left.or(right)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/find_first_last/mod.rs
@@ -0,0 +1,220 @@
+use std::cell::Cell;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use super::internal::*;
+use super::*;
+
+#[cfg(test)]
+mod test;
+
+// The key optimization for find_first is that a consumer can stop its search if
+// some consumer to its left already found a match (and similarly for consumers
+// to the right for find_last). To make this work, all consumers need some
+// notion of their position in the data relative to other consumers, including
+// unindexed consumers that have no built-in notion of position.
+//
+// To solve this, we assign each consumer a lower and upper bound for an
+// imaginary "range" of data that it consumes. The initial consumer starts with
+// the range 0..usize::max_value(). The split divides this range in half so that
+// one resulting consumer has the range 0..(usize::max_value() / 2), and the
+// other has (usize::max_value() / 2)..usize::max_value(). Every subsequent
+// split divides the range in half again until it cannot be split anymore
+// (i.e. its length is 1), in which case the split returns two consumers with
+// the same range. In that case both consumers will continue to consume all
+// their data regardless of whether a better match is found, but the reducer
+// will still return the correct answer.
+
+#[derive(Copy, Clone)]
+enum MatchPosition {
+    Leftmost,
+    Rightmost,
+}
+
+/// Returns true if pos1 is a better match than pos2 according to MatchPosition
+#[inline]
+fn better_position(pos1: usize, pos2: usize, mp: MatchPosition) -> bool {
+    match mp {
+        MatchPosition::Leftmost => pos1 < pos2,
+        MatchPosition::Rightmost => pos1 > pos2,
+    }
+}
+
+pub fn find_first<I, P>(pi: I, find_op: P) -> Option<I::Item>
+    where I: ParallelIterator,
+          P: Fn(&I::Item) -> bool + Sync
+{
+    let best_found = AtomicUsize::new(usize::max_value());
+    let consumer = FindConsumer::new(&find_op, MatchPosition::Leftmost, &best_found);
+    pi.drive_unindexed(consumer)
+}
+
+pub fn find_last<I, P>(pi: I, find_op: P) -> Option<I::Item>
+    where I: ParallelIterator,
+          P: Fn(&I::Item) -> bool + Sync
+{
+    let best_found = AtomicUsize::new(0);
+    let consumer = FindConsumer::new(&find_op, MatchPosition::Rightmost, &best_found);
+    pi.drive_unindexed(consumer)
+}
+
+struct FindConsumer<'p, P: 'p> {
+    find_op: &'p P,
+    lower_bound: Cell<usize>,
+    upper_bound: usize,
+    match_position: MatchPosition,
+    best_found: &'p AtomicUsize,
+}
+
+impl<'p, P> FindConsumer<'p, P> {
+    fn new(find_op: &'p P, match_position: MatchPosition, best_found: &'p AtomicUsize) -> Self {
+        FindConsumer {
+            find_op: find_op,
+            lower_bound: Cell::new(0),
+            upper_bound: usize::max_value(),
+            match_position: match_position,
+            best_found: best_found,
+        }
+    }
+
+    fn current_index(&self) -> usize {
+        match self.match_position {
+            MatchPosition::Leftmost => self.lower_bound.get(),
+            MatchPosition::Rightmost => self.upper_bound,
+        }
+    }
+}
+
+impl<'p, T, P> Consumer<T> for FindConsumer<'p, P>
+    where T: Send,
+          P: Fn(&T) -> bool + Sync
+{
+    type Folder = FindFolder<'p, T, P>;
+    type Reducer = FindReducer;
+    type Result = Option<T>;
+
+    fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
+        let dir = self.match_position;
+        (self.split_off_left(), self, FindReducer { match_position: dir })
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        FindFolder {
+            find_op: self.find_op,
+            boundary: self.current_index(),
+            match_position: self.match_position,
+            best_found: self.best_found,
+            item: None,
+        }
+    }
+
+    fn full(&self) -> bool {
+        // can stop consuming if the best found index so far is *strictly*
+        // better than anything this consumer will find
+        better_position(self.best_found.load(Ordering::Relaxed),
+                        self.current_index(),
+                        self.match_position)
+    }
+}
+
+impl<'p, T, P> UnindexedConsumer<T> for FindConsumer<'p, P>
+    where T: Send,
+          P: Fn(&T) -> bool + Sync
+{
+    fn split_off_left(&self) -> Self {
+        // Upper bound for one consumer will be lower bound for the other. This
+        // overlap is okay, because only one of the bounds will be used for
+        // comparing against best_found; the other is kept only to be able to
+        // divide the range in half.
+        //
+        // When the resolution of usize has been exhausted (i.e. when
+        // upper_bound = lower_bound), both results of this split will have the
+        // same range. When that happens, we lose the ability to tell one
+        // consumer to stop working when the other finds a better match, but the
+        // reducer ensures that the best answer is still returned (see the test
+        // above).
+        let old_lower_bound = self.lower_bound.get();
+        let median = old_lower_bound + ((self.upper_bound - old_lower_bound) / 2);
+        self.lower_bound.set(median);
+
+        FindConsumer {
+            find_op: self.find_op,
+            lower_bound: Cell::new(old_lower_bound),
+            upper_bound: median,
+            match_position: self.match_position,
+            best_found: self.best_found,
+        }
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        FindReducer { match_position: self.match_position }
+    }
+}
+
+struct FindFolder<'p, T, P: 'p> {
+    find_op: &'p P,
+    boundary: usize,
+    match_position: MatchPosition,
+    best_found: &'p AtomicUsize,
+    item: Option<T>,
+}
+
+impl<'p, P: 'p + Fn(&T) -> bool, T> Folder<T> for FindFolder<'p, T, P> {
+    type Result = Option<T>;
+
+    fn consume(mut self, item: T) -> Self {
+        let found_best_in_range = match self.match_position {
+            MatchPosition::Leftmost => self.item.is_some(),
+            MatchPosition::Rightmost => false,
+        };
+
+        if !found_best_in_range && (self.find_op)(&item) {
+            // Continuously try to set best_found until we succeed or we
+            // discover a better match was already found.
+            let mut current = self.best_found.load(Ordering::Relaxed);
+            loop {
+                if better_position(current, self.boundary, self.match_position) {
+                    break;
+                }
+                match self.best_found.compare_exchange_weak(current,
+                                                            self.boundary,
+                                                            Ordering::Relaxed,
+                                                            Ordering::Relaxed) {
+                    Ok(_) => {
+                        self.item = Some(item);
+                        break;
+                    }
+                    Err(v) => current = v,
+                }
+            }
+        }
+        self
+    }
+
+    fn complete(self) -> Self::Result {
+        self.item
+    }
+
+    fn full(&self) -> bool {
+        let found_best_in_range = match self.match_position {
+            MatchPosition::Leftmost => self.item.is_some(),
+            MatchPosition::Rightmost => false,
+        };
+
+        found_best_in_range ||
+        better_position(self.best_found.load(Ordering::Relaxed),
+                        self.boundary,
+                        self.match_position)
+    }
+}
+
+struct FindReducer {
+    match_position: MatchPosition,
+}
+
+impl<T> Reducer<Option<T>> for FindReducer {
+    fn reduce(self, left: Option<T>, right: Option<T>) -> Option<T> {
+        match self.match_position {
+            MatchPosition::Leftmost => left.or(right),
+            MatchPosition::Rightmost => right.or(left),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/find_first_last/test.rs
@@ -0,0 +1,149 @@
+use std::sync::atomic::AtomicUsize;
+use super::*;
+
+#[test]
+fn same_range_first_consumers_return_correct_answer() {
+    let find_op = |x: &i32| x % 2 == 0;
+    let first_found = AtomicUsize::new(usize::max_value());
+    let far_right_consumer = FindConsumer::new(&find_op, MatchPosition::Leftmost, &first_found);
+
+    // We save a consumer that will be far to the right of the main consumer (and therefore not
+    // sharing an index range with that consumer) for fullness testing
+    let consumer = far_right_consumer.split_off_left();
+
+    // split until we have an indivisible range
+    let bits_in_usize = usize::min_value().count_zeros();
+
+    for _ in 0..bits_in_usize {
+        consumer.split_off_left();
+    }
+
+    let reducer = consumer.to_reducer();
+    // the left and right folders should now have the same range, having
+    // exhausted the resolution of usize
+    let left_folder = consumer.split_off_left().into_folder();
+    let right_folder = consumer.into_folder();
+
+    let left_folder = left_folder.consume(0).consume(1);
+    assert_eq!(left_folder.boundary, right_folder.boundary);
+    // expect not full even though a better match has been found because the
+    // ranges are the same
+    assert!(!right_folder.full());
+    assert!(far_right_consumer.full());
+    let right_folder = right_folder.consume(2).consume(3);
+    assert_eq!(reducer.reduce(left_folder.complete(), right_folder.complete()),
+               Some(0));
+}
+
+#[test]
+fn same_range_last_consumers_return_correct_answer() {
+    let find_op = |x: &i32| x % 2 == 0;
+    let last_found = AtomicUsize::new(0);
+    let consumer = FindConsumer::new(&find_op, MatchPosition::Rightmost, &last_found);
+
+    // We save a consumer that will be far to the left of the main consumer (and therefore not
+    // sharing an index range with that consumer) for fullness testing
+    let far_left_consumer = consumer.split_off_left();
+
+    // split until we have an indivisible range
+    let bits_in_usize = usize::min_value().count_zeros();
+    for _ in 0..bits_in_usize {
+        consumer.split_off_left();
+    }
+
+    let reducer = consumer.to_reducer();
+    // due to the exact calculation in split_off_left, the very last consumer has a
+    // range of width 2, so we use the second-to-last consumer instead to get
+    // the same boundary on both folders
+    let consumer = consumer.split_off_left();
+    let left_folder = consumer.split_off_left().into_folder();
+    let right_folder = consumer.into_folder();
+    let right_folder = right_folder.consume(2).consume(3);
+    assert_eq!(left_folder.boundary, right_folder.boundary);
+    // expect not full even though a better match has been found because the
+    // ranges are the same
+    assert!(!left_folder.full());
+    assert!(far_left_consumer.full());
+    let left_folder = left_folder.consume(0).consume(1);
+    assert_eq!(reducer.reduce(left_folder.complete(), right_folder.complete()),
+               Some(2));
+}
+
+// These tests requires that a folder be assigned to an iterator with more than
+// one element. We can't necessarily determine when that will happen for a given
+// input to find_first/find_last, so we test the folder directly here instead.
+#[test]
+fn find_first_folder_does_not_clobber_first_found() {
+    let best_found = AtomicUsize::new(usize::max_value());
+    let f = FindFolder {
+        find_op: &(|&_: &i32| -> bool { true }),
+        boundary: 0,
+        match_position: MatchPosition::Leftmost,
+        best_found: &best_found,
+        item: None,
+    };
+    let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
+    assert!(f.full());
+    assert_eq!(f.complete(), Some(0_i32));
+}
+
+#[test]
+fn find_last_folder_yields_last_match() {
+    let best_found = AtomicUsize::new(0);
+    let f = FindFolder {
+        find_op: &(|&_: &i32| -> bool { true }),
+        boundary: 0,
+        match_position: MatchPosition::Rightmost,
+        best_found: &best_found,
+        item: None,
+    };
+    let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
+    assert_eq!(f.complete(), Some(2_i32));
+}
+
+
+/// Produce a parallel iterator for 0u128..10²⁷
+fn octillion() -> impl ParallelIterator<Item = u128> {
+    (0u32..1_000_000_000)
+        .into_par_iter()
+        .with_max_len(1_000)
+        .map(|i| i as u64 * 1_000_000_000)
+        .flat_map(
+            |i| {
+                (0u32..1_000_000_000)
+                    .into_par_iter()
+                    .with_max_len(1_000)
+                    .map(move |j| i + j as u64)
+            }
+        )
+        .map(|i| i as u128 * 1_000_000_000)
+        .flat_map(
+            |i| {
+                (0u32..1_000_000_000)
+                    .into_par_iter()
+                    .with_max_len(1_000)
+                    .map(move |j| i + j as u128)
+            }
+        )
+}
+
+#[test]
+fn find_first_octillion() {
+    let x = octillion().find_first(|_| true);
+    assert_eq!(x, Some(0));
+}
+
+#[test]
+fn find_last_octillion() {
+    // FIXME: If we don't use at least two threads, then we end up walking
+    // through the entire iterator sequentially, without the benefit of any
+    // short-circuiting.  We probably don't want testing to wait that long. ;)
+    // It would be nice if `find_last` could prioritize the later splits,
+    // basically flipping the `join` args, without needing indexed `rev`.
+    // (or could we have an unindexed `rev`?)
+    let config = ::Configuration::new().num_threads(2);
+    let pool = ::ThreadPool::new(config).unwrap();
+
+    let x = pool.install(|| octillion().find_last(|_| true));
+    assert_eq!(x, Some(999999999999999999999999999));
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/flat_map.rs
@@ -0,0 +1,151 @@
+use super::internal::*;
+use super::*;
+
+/// `FlatMap` maps each element to an iterator, then flattens these iterators together.
+/// This struct is created by the [`flat_map()`] method on [`ParallelIterator`]
+///
+/// [`flap_map()`]: trait.ParallelIterator.html#method.flat_map
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct FlatMap<I: ParallelIterator, F> {
+    base: I,
+    map_op: F,
+}
+
+/// Create a new `FlatMap` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, F>(base: I, map_op: F) -> FlatMap<I, F>
+    where I: ParallelIterator
+{
+    FlatMap {
+        base: base,
+        map_op: map_op,
+    }
+}
+
+impl<I, F, PI> ParallelIterator for FlatMap<I, F>
+    where I: ParallelIterator,
+          F: Fn(I::Item) -> PI + Sync + Send,
+          PI: IntoParallelIterator
+{
+    type Item = PI::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer = FlatMapConsumer {
+            base: consumer,
+            map_op: &self.map_op,
+        };
+        self.base.drive_unindexed(consumer)
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct FlatMapConsumer<'f, C, F: 'f> {
+    base: C,
+    map_op: &'f F,
+}
+
+impl<'f, C, F> FlatMapConsumer<'f, C, F> {
+    fn new(base: C, map_op: &'f F) -> Self {
+        FlatMapConsumer {
+            base: base,
+            map_op: map_op,
+        }
+    }
+}
+
+impl<'f, T, U, C, F> Consumer<T> for FlatMapConsumer<'f, C, F>
+    where C: UnindexedConsumer<U::Item>,
+          F: Fn(T) -> U + Sync,
+          U: IntoParallelIterator
+{
+    type Folder = FlatMapFolder<'f, C, F, C::Result>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (FlatMapConsumer::new(left, self.map_op),
+         FlatMapConsumer::new(right, self.map_op),
+         reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        FlatMapFolder {
+            base: self.base,
+            map_op: self.map_op,
+            previous: None,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'f, T, U, C, F> UnindexedConsumer<T> for FlatMapConsumer<'f, C, F>
+    where C: UnindexedConsumer<U::Item>,
+          F: Fn(T) -> U + Sync,
+          U: IntoParallelIterator
+{
+    fn split_off_left(&self) -> Self {
+        FlatMapConsumer::new(self.base.split_off_left(), self.map_op)
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+
+struct FlatMapFolder<'f, C, F: 'f, R> {
+    base: C,
+    map_op: &'f F,
+    previous: Option<R>,
+}
+
+impl<'f, T, U, C, F> Folder<T> for FlatMapFolder<'f, C, F, C::Result>
+    where C: UnindexedConsumer<U::Item>,
+          F: Fn(T) -> U + Sync,
+          U: IntoParallelIterator
+{
+    type Result = C::Result;
+
+    fn consume(self, item: T) -> Self {
+        let map_op = self.map_op;
+        let par_iter = map_op(item).into_par_iter();
+        let result = par_iter.drive_unindexed(self.base.split_off_left());
+
+        // We expect that `previous` is `None`, because we drive
+        // the cost up so high, but just in case.
+        let previous = match self.previous {
+            None => Some(result),
+            Some(previous) => {
+                let reducer = self.base.to_reducer();
+                Some(reducer.reduce(previous, result))
+            }
+        };
+
+        FlatMapFolder {
+            base: self.base,
+            map_op: map_op,
+            previous: previous,
+        }
+    }
+
+    fn complete(self) -> Self::Result {
+        match self.previous {
+            Some(previous) => previous,
+            None => self.base.into_folder().complete(),
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/fold.rs
@@ -0,0 +1,219 @@
+use super::internal::*;
+use super::*;
+
+pub fn fold<U, I, ID, F>(base: I, identity: ID, fold_op: F) -> Fold<I, ID, F>
+    where I: ParallelIterator,
+          F: Fn(U, I::Item) -> U + Sync + Send,
+          ID: Fn() -> U + Sync + Send,
+          U: Send
+{
+    Fold {
+        base: base,
+        identity: identity,
+        fold_op: fold_op,
+    }
+}
+
+/// `Fold` is an iterator that applies a function over an iterator producing a single value.
+/// This struct is created by the [`fold()`] method on [`ParallelIterator`]
+///
+/// [`fold()`]: trait.ParallelIterator.html#method.fold
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Fold<I, ID, F> {
+    base: I,
+    identity: ID,
+    fold_op: F,
+}
+
+impl<U, I, ID, F> ParallelIterator for Fold<I, ID, F>
+    where I: ParallelIterator,
+          F: Fn(U, I::Item) -> U + Sync + Send,
+          ID: Fn() -> U + Sync + Send,
+          U: Send
+{
+    type Item = U;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer1 = FoldConsumer {
+            base: consumer,
+            fold_op: &self.fold_op,
+            identity: &self.identity,
+        };
+        self.base.drive_unindexed(consumer1)
+    }
+}
+
+struct FoldConsumer<'c, C, ID: 'c, F: 'c> {
+    base: C,
+    fold_op: &'c F,
+    identity: &'c ID,
+}
+
+impl<'r, U, T, C, ID, F> Consumer<T> for FoldConsumer<'r, C, ID, F>
+    where C: Consumer<U>,
+          F: Fn(U, T) -> U + Sync,
+          ID: Fn() -> U + Sync,
+          U: Send
+{
+    type Folder = FoldFolder<'r, C::Folder, U, F>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (FoldConsumer { base: left, ..self }, FoldConsumer { base: right, ..self }, reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        FoldFolder {
+            base: self.base.into_folder(),
+            item: (self.identity)(),
+            fold_op: self.fold_op,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'r, U, T, C, ID, F> UnindexedConsumer<T> for FoldConsumer<'r, C, ID, F>
+    where C: UnindexedConsumer<U>,
+          F: Fn(U, T) -> U + Sync,
+          ID: Fn() -> U + Sync,
+          U: Send
+{
+    fn split_off_left(&self) -> Self {
+        FoldConsumer { base: self.base.split_off_left(), ..*self }
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct FoldFolder<'r, C, ID, F: 'r> {
+    base: C,
+    fold_op: &'r F,
+    item: ID,
+}
+
+impl<'r, C, ID, F, T> Folder<T> for FoldFolder<'r, C, ID, F>
+    where C: Folder<ID>,
+          F: Fn(ID, T) -> ID + Sync
+{
+    type Result = C::Result;
+
+    fn consume(self, item: T) -> Self {
+        let item = (self.fold_op)(self.item, item);
+        FoldFolder {
+            base: self.base,
+            fold_op: self.fold_op,
+            item: item,
+        }
+    }
+
+    fn complete(self) -> C::Result {
+        self.base.consume(self.item).complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+// ///////////////////////////////////////////////////////////////////////////
+
+pub fn fold_with<U, I, F>(base: I, item: U, fold_op: F) -> FoldWith<I, U, F>
+    where I: ParallelIterator,
+          F: Fn(U, I::Item) -> U + Sync,
+          U: Send + Clone
+{
+    FoldWith {
+        base: base,
+        item: item,
+        fold_op: fold_op,
+    }
+}
+
+/// `FoldWith` is an iterator that applies a function over an iterator producing a single value.
+/// This struct is created by the [`fold_with()`] method on [`ParallelIterator`]
+///
+/// [`fold_with()`]: trait.ParallelIterator.html#method.fold_with
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct FoldWith<I, U, F> {
+    base: I,
+    item: U,
+    fold_op: F,
+}
+
+impl<U, I, F> ParallelIterator for FoldWith<I, U, F>
+    where I: ParallelIterator,
+          F: Fn(U, I::Item) -> U + Sync + Send,
+          U: Send + Clone
+{
+    type Item = U;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer1 = FoldWithConsumer {
+            base: consumer,
+            item: self.item,
+            fold_op: &self.fold_op,
+        };
+        self.base.drive_unindexed(consumer1)
+    }
+}
+
+struct FoldWithConsumer<'c, C, U, F: 'c> {
+    base: C,
+    item: U,
+    fold_op: &'c F,
+}
+
+impl<'r, U, T, C, F> Consumer<T> for FoldWithConsumer<'r, C, U, F>
+    where C: Consumer<U>,
+          F: Fn(U, T) -> U + Sync,
+          U: Send + Clone
+{
+    type Folder = FoldFolder<'r, C::Folder, U, F>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (FoldWithConsumer { base: left, item: self.item.clone(), ..self },
+         FoldWithConsumer { base: right, ..self }, reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        FoldFolder {
+            base: self.base.into_folder(),
+            item: self.item,
+            fold_op: self.fold_op,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'r, U, T, C, F> UnindexedConsumer<T> for FoldWithConsumer<'r, C, U, F>
+    where C: UnindexedConsumer<U>,
+          F: Fn(U, T) -> U + Sync,
+          U: Send + Clone
+{
+    fn split_off_left(&self) -> Self {
+        FoldWithConsumer { base: self.base.split_off_left(), item: self.item.clone(), ..*self }
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/for_each.rs
@@ -0,0 +1,65 @@
+use super::ParallelIterator;
+use super::internal::*;
+use super::noop::*;
+
+pub fn for_each<I, F, T>(pi: I, op: &F)
+    where I: ParallelIterator<Item = T>,
+          F: Fn(T) + Sync,
+          T: Send
+{
+    let consumer = ForEachConsumer { op: op };
+    pi.drive_unindexed(consumer)
+}
+
+struct ForEachConsumer<'f, F: 'f> {
+    op: &'f F,
+}
+
+impl<'f, F, T> Consumer<T> for ForEachConsumer<'f, F>
+    where F: Fn(T) + Sync
+{
+    type Folder = ForEachConsumer<'f, F>;
+    type Reducer = NoopReducer;
+    type Result = ();
+
+    fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) {
+        (self.split_off_left(), self, NoopReducer)
+    }
+
+    fn into_folder(self) -> Self {
+        self
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+impl<'f, F, T> Folder<T> for ForEachConsumer<'f, F>
+    where F: Fn(T) + Sync
+{
+    type Result = ();
+
+    fn consume(self, item: T) -> Self {
+        (self.op)(item);
+        self
+    }
+
+    fn complete(self) {}
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+impl<'f, F, T> UnindexedConsumer<T> for ForEachConsumer<'f, F>
+    where F: Fn(T) + Sync
+{
+    fn split_off_left(&self) -> Self {
+        ForEachConsumer { op: self.op }
+    }
+
+    fn to_reducer(&self) -> NoopReducer {
+        NoopReducer
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/from_par_iter.rs
@@ -0,0 +1,172 @@
+use super::{FromParallelIterator, IntoParallelIterator, ParallelExtend};
+
+use std::borrow::Cow;
+use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
+use std::hash::{BuildHasher, Hash};
+use std::collections::LinkedList;
+use std::collections::{BinaryHeap, VecDeque};
+
+
+/// Create an empty default collection and extend it.
+fn collect_extended<C, I>(par_iter: I) -> C
+    where I: IntoParallelIterator,
+          C: ParallelExtend<I::Item> + Default
+{
+    let mut collection = C::default();
+    collection.par_extend(par_iter);
+    collection
+}
+
+
+/// Collect items from a parallel iterator into a vector.
+impl<T> FromParallelIterator<T> for Vec<T>
+    where T: Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = T>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect items from a parallel iterator into a vecdeque.
+impl<T> FromParallelIterator<T> for VecDeque<T>
+    where T: Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = T>
+    {
+        Vec::from_par_iter(par_iter).into()
+    }
+}
+
+/// Collect items from a parallel iterator into a binaryheap.
+/// The heap-ordering is calculated serially after all items are collected.
+impl<T> FromParallelIterator<T> for BinaryHeap<T>
+    where T: Ord + Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = T>
+    {
+        Vec::from_par_iter(par_iter).into()
+    }
+}
+
+/// Collect items from a parallel iterator into a freshly allocated
+/// linked list.
+impl<T> FromParallelIterator<T> for LinkedList<T>
+    where T: Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = T>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect (key, value) pairs from a parallel iterator into a
+/// hashmap. If multiple pairs correspond to the same key, then the
+/// ones produced earlier in the parallel iterator will be
+/// overwritten, just as with a sequential iterator.
+impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S>
+    where K: Eq + Hash + Send,
+          V: Send,
+          S: BuildHasher + Default + Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = (K, V)>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect (key, value) pairs from a parallel iterator into a
+/// btreemap. If multiple pairs correspond to the same key, then the
+/// ones produced earlier in the parallel iterator will be
+/// overwritten, just as with a sequential iterator.
+impl<K, V> FromParallelIterator<(K, V)> for BTreeMap<K, V>
+    where K: Ord + Send,
+          V: Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = (K, V)>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect values from a parallel iterator into a hashset.
+impl<V, S> FromParallelIterator<V> for HashSet<V, S>
+    where V: Eq + Hash + Send,
+          S: BuildHasher + Default + Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = V>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect values from a parallel iterator into a btreeset.
+impl<V> FromParallelIterator<V> for BTreeSet<V>
+    where V: Send + Ord
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = V>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect characters from a parallel iterator into a string.
+impl FromParallelIterator<char> for String {
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = char>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect characters from a parallel iterator into a string.
+impl<'a> FromParallelIterator<&'a char> for String {
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = &'a char>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect string slices from a parallel iterator into a string.
+impl<'a> FromParallelIterator<&'a str> for String {
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = &'a str>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect strings from a parallel iterator into one large string.
+impl FromParallelIterator<String> for String {
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = String>
+    {
+        collect_extended(par_iter)
+    }
+}
+
+/// Collect an arbitrary `Cow` collection.
+///
+/// Note, the standard library only has `FromIterator` for `Cow<'a, str>` and
+/// `Cow<'a, [T]>`, because no one thought to add a blanket implementation
+/// before it was stabilized.
+impl<'a, C: ?Sized, T> FromParallelIterator<T> for Cow<'a, C>
+    where C: ToOwned,
+          C::Owned: FromParallelIterator<T>,
+          T: Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = T>
+    {
+        Cow::Owned(C::Owned::from_par_iter(par_iter))
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/inspect.rs
@@ -0,0 +1,221 @@
+use super::internal::*;
+use super::*;
+
+use std::iter;
+
+
+/// `Inspect` is an iterator that calls a function with a reference to each
+/// element before yielding it.
+///
+/// This struct is created by the [`inspect()`] method on [`ParallelIterator`]
+///
+/// [`inspect()`]: trait.ParallelIterator.html#method.inspect
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Inspect<I: ParallelIterator, F> {
+    base: I,
+    inspect_op: F,
+}
+
+/// Create a new `Inspect` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, F>(base: I, inspect_op: F) -> Inspect<I, F>
+    where I: ParallelIterator
+{
+    Inspect {
+        base: base,
+        inspect_op: inspect_op,
+    }
+}
+
+impl<I, F> ParallelIterator for Inspect<I, F>
+    where I: ParallelIterator,
+          F: Fn(&I::Item) + Sync + Send
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer1 = InspectConsumer::new(consumer, &self.inspect_op);
+        self.base.drive_unindexed(consumer1)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        self.base.opt_len()
+    }
+}
+
+impl<I, F> IndexedParallelIterator for Inspect<I, F>
+    where I: IndexedParallelIterator,
+          F: Fn(&I::Item) + Sync + Send
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        let consumer1 = InspectConsumer::new(consumer, &self.inspect_op);
+        self.base.drive(consumer1)
+    }
+
+    fn len(&mut self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base
+                   .with_producer(Callback {
+                                      callback: callback,
+                                      inspect_op: self.inspect_op,
+                                  });
+
+        struct Callback<CB, F> {
+            callback: CB,
+            inspect_op: F,
+        }
+
+        impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
+            where CB: ProducerCallback<T>,
+                  F: Fn(&T) + Sync
+        {
+            type Output = CB::Output;
+
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let producer = InspectProducer {
+                    base: base,
+                    inspect_op: &self.inspect_op,
+                };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+struct InspectProducer<'f, P, F: 'f> {
+    base: P,
+    inspect_op: &'f F,
+}
+
+impl<'f, P, F> Producer for InspectProducer<'f, P, F>
+    where P: Producer,
+          F: Fn(&P::Item) + Sync
+{
+    type Item = P::Item;
+    type IntoIter = iter::Inspect<P::IntoIter, &'f F>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.base.into_iter().inspect(self.inspect_op)
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(index);
+        (InspectProducer {
+             base: left,
+             inspect_op: self.inspect_op,
+         },
+         InspectProducer {
+             base: right,
+             inspect_op: self.inspect_op,
+         })
+    }
+}
+
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct InspectConsumer<'f, C, F: 'f> {
+    base: C,
+    inspect_op: &'f F,
+}
+
+impl<'f, C, F> InspectConsumer<'f, C, F> {
+    fn new(base: C, inspect_op: &'f F) -> Self {
+        InspectConsumer {
+            base: base,
+            inspect_op: inspect_op,
+        }
+    }
+}
+
+impl<'f, T, C, F> Consumer<T> for InspectConsumer<'f, C, F>
+    where C: Consumer<T>,
+          F: Fn(&T) + Sync
+{
+    type Folder = InspectFolder<'f, C::Folder, F>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (InspectConsumer::new(left, self.inspect_op),
+         InspectConsumer::new(right, self.inspect_op),
+         reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        InspectFolder {
+            base: self.base.into_folder(),
+            inspect_op: self.inspect_op,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'f, T, C, F> UnindexedConsumer<T> for InspectConsumer<'f, C, F>
+    where C: UnindexedConsumer<T>,
+          F: Fn(&T) + Sync
+{
+    fn split_off_left(&self) -> Self {
+        InspectConsumer::new(self.base.split_off_left(), &self.inspect_op)
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct InspectFolder<'f, C, F: 'f> {
+    base: C,
+    inspect_op: &'f F,
+}
+
+impl<'f, T, C, F> Folder<T> for InspectFolder<'f, C, F>
+    where C: Folder<T>,
+          F: Fn(&T)
+{
+    type Result = C::Result;
+
+    fn consume(self, item: T) -> Self {
+        (self.inspect_op)(&item);
+        InspectFolder {
+            base: self.base.consume(item),
+            inspect_op: self.inspect_op,
+        }
+    }
+
+    fn complete(self) -> C::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
rename from third_party/rust/rayon/src/iter/internal.rs
rename to third_party/rust/rayon-0.8.2/src/iter/internal.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/len.rs
@@ -0,0 +1,242 @@
+use super::internal::*;
+use super::*;
+use std::cmp;
+
+/// `MinLen` is an iterator that imposes a minimum length on iterator splits.
+/// This struct is created by the [`min_len()`] method on [`IndexedParallelIterator`]
+///
+/// [`min_len()`]: trait.IndexedParallelIterator.html#method.min_len
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct MinLen<I: IndexedParallelIterator> {
+    base: I,
+    min: usize,
+}
+
+/// Create a new `MinLen` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new_min_len<I>(base: I, min: usize) -> MinLen<I>
+    where I: IndexedParallelIterator
+{
+    MinLen {
+        base: base,
+        min: min,
+    }
+}
+
+impl<I> ParallelIterator for MinLen<I>
+    where I: IndexedParallelIterator
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I> IndexedParallelIterator for MinLen<I>
+    where I: IndexedParallelIterator
+{
+    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           min: self.min,
+                                       });
+
+        struct Callback<CB> {
+            callback: CB,
+            min: usize,
+        }
+
+        impl<T, CB> ProducerCallback<T> for Callback<CB>
+            where CB: ProducerCallback<T>
+        {
+            type Output = CB::Output;
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let producer = MinLenProducer {
+                    base: base,
+                    min: self.min,
+                };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// `MinLenProducer` implementation
+
+struct MinLenProducer<P> {
+    base: P,
+    min: usize,
+}
+
+impl<P> Producer for MinLenProducer<P>
+    where P: Producer
+{
+    type Item = P::Item;
+    type IntoIter = P::IntoIter;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.base.into_iter()
+    }
+
+    fn min_len(&self) -> usize {
+        cmp::max(self.min, self.base.min_len())
+    }
+
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(index);
+        (MinLenProducer {
+             base: left,
+             min: self.min,
+         },
+         MinLenProducer {
+             base: right,
+             min: self.min,
+         })
+    }
+}
+
+
+/// `MaxLen` is an iterator that imposes a maximum length on iterator splits.
+/// This struct is created by the [`max_len()`] method on [`IndexedParallelIterator`]
+///
+/// [`max_len()`]: trait.IndexedParallelIterator.html#method.max_len
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct MaxLen<I: IndexedParallelIterator> {
+    base: I,
+    max: usize,
+}
+
+/// Create a new `MaxLen` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new_max_len<I>(base: I, max: usize) -> MaxLen<I>
+    where I: IndexedParallelIterator
+{
+    MaxLen {
+        base: base,
+        max: max,
+    }
+}
+
+impl<I> ParallelIterator for MaxLen<I>
+    where I: IndexedParallelIterator
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I> IndexedParallelIterator for MaxLen<I>
+    where I: IndexedParallelIterator
+{
+    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           max: self.max,
+                                       });
+
+        struct Callback<CB> {
+            callback: CB,
+            max: usize,
+        }
+
+        impl<T, CB> ProducerCallback<T> for Callback<CB>
+            where CB: ProducerCallback<T>
+        {
+            type Output = CB::Output;
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let producer = MaxLenProducer {
+                    base: base,
+                    max: self.max,
+                };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// `MaxLenProducer` implementation
+
+struct MaxLenProducer<P> {
+    base: P,
+    max: usize,
+}
+
+impl<P> Producer for MaxLenProducer<P>
+    where P: Producer
+{
+    type Item = P::Item;
+    type IntoIter = P::IntoIter;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.base.into_iter()
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+
+    fn max_len(&self) -> usize {
+        cmp::min(self.max, self.base.max_len())
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(index);
+        (MaxLenProducer {
+             base: left,
+             max: self.max,
+         },
+         MaxLenProducer {
+             base: right,
+             max: self.max,
+         })
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/map.rs
@@ -0,0 +1,222 @@
+use super::internal::*;
+use super::*;
+
+use std::iter;
+
+
+/// `Map` is an iterator that transforms the elements of an underlying iterator.
+///
+/// This struct is created by the [`map()`] method on [`ParallelIterator`]
+///
+/// [`map()`]: trait.ParallelIterator.html#method.map
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Map<I: ParallelIterator, F> {
+    base: I,
+    map_op: F,
+}
+
+/// Create a new `Map` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, F>(base: I, map_op: F) -> Map<I, F>
+    where I: ParallelIterator
+{
+    Map {
+        base: base,
+        map_op: map_op,
+    }
+}
+
+impl<I, F, R> ParallelIterator for Map<I, F>
+    where I: ParallelIterator,
+          F: Fn(I::Item) -> R + Sync + Send,
+          R: Send
+{
+    type Item = F::Output;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer1 = MapConsumer::new(consumer, &self.map_op);
+        self.base.drive_unindexed(consumer1)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        self.base.opt_len()
+    }
+}
+
+impl<I, F, R> IndexedParallelIterator for Map<I, F>
+    where I: IndexedParallelIterator,
+          F: Fn(I::Item) -> R + Sync + Send,
+          R: Send
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        let consumer1 = MapConsumer::new(consumer, &self.map_op);
+        self.base.drive(consumer1)
+    }
+
+    fn len(&mut self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           map_op: self.map_op,
+                                       });
+
+        struct Callback<CB, F> {
+            callback: CB,
+            map_op: F,
+        }
+
+        impl<T, F, R, CB> ProducerCallback<T> for Callback<CB, F>
+            where CB: ProducerCallback<R>,
+                  F: Fn(T) -> R + Sync,
+                  R: Send
+        {
+            type Output = CB::Output;
+
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let producer = MapProducer {
+                    base: base,
+                    map_op: &self.map_op,
+                };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+struct MapProducer<'f, P, F: 'f> {
+    base: P,
+    map_op: &'f F,
+}
+
+impl<'f, P, F, R> Producer for MapProducer<'f, P, F>
+    where P: Producer,
+          F: Fn(P::Item) -> R + Sync,
+          R: Send
+{
+    type Item = F::Output;
+    type IntoIter = iter::Map<P::IntoIter, &'f F>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.base.into_iter().map(self.map_op)
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(index);
+        (MapProducer {
+             base: left,
+             map_op: self.map_op,
+         },
+         MapProducer {
+             base: right,
+             map_op: self.map_op,
+         })
+    }
+}
+
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct MapConsumer<'f, C, F: 'f> {
+    base: C,
+    map_op: &'f F,
+}
+
+impl<'f, C, F> MapConsumer<'f, C, F> {
+    fn new(base: C, map_op: &'f F) -> Self {
+        MapConsumer {
+            base: base,
+            map_op: map_op,
+        }
+    }
+}
+
+impl<'f, T, R, C, F> Consumer<T> for MapConsumer<'f, C, F>
+    where C: Consumer<F::Output>,
+          F: Fn(T) -> R + Sync,
+          R: Send
+{
+    type Folder = MapFolder<'f, C::Folder, F>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (MapConsumer::new(left, self.map_op), MapConsumer::new(right, self.map_op), reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        MapFolder {
+            base: self.base.into_folder(),
+            map_op: self.map_op,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'f, T, R, C, F> UnindexedConsumer<T> for MapConsumer<'f, C, F>
+    where C: UnindexedConsumer<F::Output>,
+          F: Fn(T) -> R + Sync,
+          R: Send
+{
+    fn split_off_left(&self) -> Self {
+        MapConsumer::new(self.base.split_off_left(), &self.map_op)
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct MapFolder<'f, C, F: 'f> {
+    base: C,
+    map_op: &'f F,
+}
+
+impl<'f, T, R, C, F> Folder<T> for MapFolder<'f, C, F>
+    where C: Folder<F::Output>,
+          F: Fn(T) -> R
+{
+    type Result = C::Result;
+
+    fn consume(self, item: T) -> Self {
+        let mapped_item = (self.map_op)(item);
+        MapFolder {
+            base: self.base.consume(mapped_item),
+            map_op: self.map_op,
+        }
+    }
+
+    fn complete(self) -> C::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/map_with.rs
@@ -0,0 +1,297 @@
+use super::internal::*;
+use super::*;
+
+
+/// `MapWith` is an iterator that transforms the elements of an underlying iterator.
+///
+/// This struct is created by the [`map_with()`] method on [`ParallelIterator`]
+///
+/// [`map_with()`]: trait.ParallelIterator.html#method.map_with
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct MapWith<I: ParallelIterator, T, F> {
+    base: I,
+    item: T,
+    map_op: F,
+}
+
+/// Create a new `MapWith` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, T, F>(base: I, item: T, map_op: F) -> MapWith<I, T, F>
+    where I: ParallelIterator
+{
+    MapWith {
+        base: base,
+        item: item,
+        map_op: map_op,
+    }
+}
+
+impl<I, T, F, R> ParallelIterator for MapWith<I, T, F>
+    where I: ParallelIterator,
+          T: Send + Clone,
+          F: Fn(&mut T, I::Item) -> R + Sync + Send,
+          R: Send
+{
+    type Item = R;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer1 = MapWithConsumer::new(consumer, self.item, &self.map_op);
+        self.base.drive_unindexed(consumer1)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        self.base.opt_len()
+    }
+}
+
+impl<I, T, F, R> IndexedParallelIterator for MapWith<I, T, F>
+    where I: IndexedParallelIterator,
+          T: Send + Clone,
+          F: Fn(&mut T, I::Item) -> R + Sync + Send,
+          R: Send
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        let consumer1 = MapWithConsumer::new(consumer, self.item, &self.map_op);
+        self.base.drive(consumer1)
+    }
+
+    fn len(&mut self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           item: self.item,
+                                           map_op: self.map_op,
+                                       });
+
+        struct Callback<CB, U, F> {
+            callback: CB,
+            item: U,
+            map_op: F,
+        }
+
+        impl<T, U, F, R, CB> ProducerCallback<T> for Callback<CB, U, F>
+            where CB: ProducerCallback<R>,
+                  U: Send + Clone,
+                  F: Fn(&mut U, T) -> R + Sync,
+                  R: Send
+        {
+            type Output = CB::Output;
+
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let producer = MapWithProducer {
+                    base: base,
+                    item: self.item,
+                    map_op: &self.map_op,
+                };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+struct MapWithProducer<'f, P, U, F: 'f> {
+    base: P,
+    item: U,
+    map_op: &'f F,
+}
+
+impl<'f, P, U, F, R> Producer for MapWithProducer<'f, P, U, F>
+    where P: Producer,
+          U: Send + Clone,
+          F: Fn(&mut U, P::Item) -> R + Sync,
+          R: Send
+{
+    type Item = R;
+    type IntoIter = MapWithIter<'f, P::IntoIter, U, F>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        MapWithIter {
+            base: self.base.into_iter(),
+            item: self.item,
+            map_op: self.map_op,
+        }
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(index);
+        (MapWithProducer {
+             base: left,
+             item: self.item.clone(),
+             map_op: self.map_op,
+         },
+         MapWithProducer {
+             base: right,
+             item: self.item,
+             map_op: self.map_op,
+         })
+    }
+}
+
+struct MapWithIter<'f, I, U, F: 'f> {
+    base: I,
+    item: U,
+    map_op: &'f F,
+}
+
+impl<'f, I, U, F, R> Iterator for MapWithIter<'f, I, U, F>
+    where I: Iterator,
+          U: Send + Clone,
+          F: Fn(&mut U, I::Item) -> R + Sync,
+          R: Send
+{
+    type Item = R;
+
+    fn next(&mut self) -> Option<R> {
+        self.base.next().map(|item| (self.map_op)(&mut self.item, item))
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.base.size_hint()
+    }
+}
+
+impl<'f, I, U, F, R> DoubleEndedIterator for MapWithIter<'f, I, U, F>
+    where I: DoubleEndedIterator,
+          U: Send + Clone,
+          F: Fn(&mut U, I::Item) -> R + Sync,
+          R: Send
+{
+    fn next_back(&mut self) -> Option<R> {
+        self.base.next_back().map(|item| (self.map_op)(&mut self.item, item))
+    }
+}
+
+impl<'f, I, U, F, R> ExactSizeIterator for MapWithIter<'f, I, U, F>
+    where I: ExactSizeIterator,
+          U: Send + Clone,
+          F: Fn(&mut U, I::Item) -> R + Sync,
+          R: Send
+{
+}
+
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct MapWithConsumer<'f, C, U, F: 'f> {
+    base: C,
+    item: U,
+    map_op: &'f F,
+}
+
+impl<'f, C, U, F> MapWithConsumer<'f, C, U, F> {
+    fn new(base: C, item: U, map_op: &'f F) -> Self {
+        MapWithConsumer {
+            base: base,
+            item: item,
+            map_op: map_op,
+        }
+    }
+}
+
+impl<'f, T, U, R, C, F> Consumer<T> for MapWithConsumer<'f, C, U, F>
+    where C: Consumer<R>,
+          U: Send + Clone,
+          F: Fn(&mut U, T) -> R + Sync,
+          R: Send
+{
+    type Folder = MapWithFolder<'f, C::Folder, U, F>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (MapWithConsumer::new(left, self.item.clone(), self.map_op),
+         MapWithConsumer::new(right, self.item, self.map_op),
+         reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        MapWithFolder {
+            base: self.base.into_folder(),
+            item: self.item,
+            map_op: self.map_op,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'f, T, U, R, C, F> UnindexedConsumer<T> for MapWithConsumer<'f, C, U, F>
+    where C: UnindexedConsumer<R>,
+          U: Send + Clone,
+          F: Fn(&mut U, T) -> R + Sync,
+          R: Send
+{
+    fn split_off_left(&self) -> Self {
+        MapWithConsumer::new(self.base.split_off_left(), self.item.clone(), self.map_op)
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct MapWithFolder<'f, C, U, F: 'f> {
+    base: C,
+    item: U,
+    map_op: &'f F,
+}
+
+impl<'f, T, U, R, C, F> Folder<T> for MapWithFolder<'f, C, U, F>
+    where C: Folder<R>,
+          U: Clone,
+          F: Fn(&mut U, T) -> R
+{
+    type Result = C::Result;
+
+    fn consume(mut self, item: T) -> Self {
+        let mapped_item = (self.map_op)(&mut self.item, item);
+        self.base = self.base.consume(mapped_item);
+        self
+    }
+
+    fn consume_iter<I>(mut self, iter: I) -> Self
+        where I: IntoIterator<Item = T>
+    {
+        {
+            let map_op = self.map_op;
+            let item = &mut self.item;
+            let mapped_iter = iter.into_iter().map(|x| map_op(item, x));
+            self.base = self.base.consume_iter(mapped_iter);
+        }
+        self
+    }
+
+    fn complete(self) -> C::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/mod.rs
@@ -0,0 +1,1053 @@
+//! The `ParallelIterator` module makes it easy to write parallel
+//! programs using an iterator-style interface. To get access to all
+//! the methods you want, the easiest is to write `use
+//! rayon::prelude::*;` at the top of your module, which will import
+//! the various traits and methods you need.
+//!
+//! The submodules of this module mostly just contain implementaton
+//! details of little interest to an end-user. If you'd like to read
+//! the code itself, the `internal` module and `README.md` file are a
+//! good place to start.
+
+use std::cmp::{self, Ordering};
+use std::iter::{Sum, Product};
+use std::ops::Fn;
+use self::internal::*;
+
+// There is a method to the madness here:
+//
+// - Most of these modules are private but expose certain types to the end-user
+//   (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
+//   public API surface of the `ParallelIterator` traits.
+// - In **this** module, those public types are always used unprefixed, which forces
+//   us to add a `pub use` and helps identify if we missed anything.
+// - In contrast, items that appear **only** in the body of a method,
+//   e.g. `find::find()`, are always used **prefixed**, so that they
+//   can be readily distinguished.
+
+mod find;
+mod find_first_last;
+mod chain;
+pub use self::chain::Chain;
+mod collect;
+mod enumerate;
+pub use self::enumerate::Enumerate;
+mod filter;
+pub use self::filter::Filter;
+mod filter_map;
+pub use self::filter_map::FilterMap;
+mod flat_map;
+pub use self::flat_map::FlatMap;
+mod from_par_iter;
+pub mod internal;
+mod for_each;
+mod fold;
+pub use self::fold::{Fold, FoldWith};
+mod reduce;
+mod skip;
+pub use self::skip::Skip;
+mod splitter;
+pub use self::splitter::{split, Split};
+mod take;
+pub use self::take::Take;
+mod map;
+pub use self::map::Map;
+mod map_with;
+pub use self::map_with::MapWith;
+mod zip;
+pub use self::zip::Zip;
+mod noop;
+mod rev;
+pub use self::rev::Rev;
+mod len;
+pub use self::len::{MinLen, MaxLen};
+mod sum;
+mod product;
+mod cloned;
+pub use self::cloned::Cloned;
+mod inspect;
+pub use self::inspect::Inspect;
+mod while_some;
+pub use self::while_some::WhileSome;
+mod extend;
+mod unzip;
+
+#[cfg(test)]
+mod test;
+
+/// Represents a value of one of two possible types.
+pub enum Either<L, R> {
+    Left(L),
+    Right(R)
+}
+
+pub trait IntoParallelIterator {
+    type Iter: ParallelIterator<Item = Self::Item>;
+    type Item: Send;
+
+    fn into_par_iter(self) -> Self::Iter;
+}
+
+pub trait IntoParallelRefIterator<'data> {
+    type Iter: ParallelIterator<Item = Self::Item>;
+    type Item: Send + 'data;
+
+    fn par_iter(&'data self) -> Self::Iter;
+}
+
+impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
+    where &'data I: IntoParallelIterator
+{
+    type Iter = <&'data I as IntoParallelIterator>::Iter;
+    type Item = <&'data I as IntoParallelIterator>::Item;
+
+    fn par_iter(&'data self) -> Self::Iter {
+        self.into_par_iter()
+    }
+}
+
+pub trait IntoParallelRefMutIterator<'data> {
+    type Iter: ParallelIterator<Item = Self::Item>;
+    type Item: Send + 'data;
+
+    fn par_iter_mut(&'data mut self) -> Self::Iter;
+}
+
+impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
+    where &'data mut I: IntoParallelIterator
+{
+    type Iter = <&'data mut I as IntoParallelIterator>::Iter;
+    type Item = <&'data mut I as IntoParallelIterator>::Item;
+
+    fn par_iter_mut(&'data mut self) -> Self::Iter {
+        self.into_par_iter()
+    }
+}
+
+/// The `ParallelIterator` interface.
+pub trait ParallelIterator: Sized + Send {
+    type Item: Send;
+
+    /// Executes `OP` on each item produced by the iterator, in parallel.
+    fn for_each<OP>(self, op: OP)
+        where OP: Fn(Self::Item) + Sync + Send
+    {
+        for_each::for_each(self, &op)
+    }
+
+    /// Executes `OP` on the given `init` value with each item produced by
+    /// the iterator, in parallel.
+    ///
+    /// The `init` value will be cloned only as needed to be paired with
+    /// the group of items in each rayon job.  It does not require the type
+    /// to be `Sync`.
+    fn for_each_with<OP, T>(self, init: T, op: OP)
+        where OP: Fn(&mut T, Self::Item) + Sync + Send,
+              T: Send + Clone
+    {
+        self.map_with(init, op).for_each(|()| ())
+    }
+
+    /// Counts the number of items in this parallel iterator.
+    fn count(self) -> usize {
+        self.map(|_| 1).sum()
+    }
+
+    /// Applies `map_op` to each item of this iterator, producing a new
+    /// iterator with the results.
+    fn map<F, R>(self, map_op: F) -> Map<Self, F>
+        where F: Fn(Self::Item) -> R + Sync + Send,
+              R: Send
+    {
+        map::new(self, map_op)
+    }
+
+    /// Applies `map_op` to the given `init` value with each item of this
+    /// iterator, producing a new iterator with the results.
+    ///
+    /// The `init` value will be cloned only as needed to be paired with
+    /// the group of items in each rayon job.  It does not require the type
+    /// to be `Sync`.
+    fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
+        where F: Fn(&mut T, Self::Item) -> R + Sync + Send,
+              T: Send + Clone,
+              R: Send
+    {
+        map_with::new(self, init, map_op)
+    }
+
+    /// Creates an iterator which clones all of its elements.  This may be
+    /// useful when you have an iterator over `&T`, but you need `T`.
+    fn cloned<'a, T>(self) -> Cloned<Self>
+        where T: 'a + Clone + Send,
+              Self: ParallelIterator<Item = &'a T>
+    {
+        cloned::new(self)
+    }
+
+    /// Applies `inspect_op` to a reference to each item of this iterator,
+    /// producing a new iterator passing through the original items.  This is
+    /// often useful for debugging to see what's happening in iterator stages.
+    fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
+        where OP: Fn(&Self::Item) + Sync + Send
+    {
+        inspect::new(self, inspect_op)
+    }
+
+    /// Applies `filter_op` to each item of this iterator, producing a new
+    /// iterator with only the items that gave `true` results.
+    fn filter<P>(self, filter_op: P) -> Filter<Self, P>
+        where P: Fn(&Self::Item) -> bool + Sync + Send
+    {
+        filter::new(self, filter_op)
+    }
+
+    /// Applies `filter_op` to each item of this iterator to get an `Option`,
+    /// producing a new iterator with only the items from `Some` results.
+    fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
+        where P: Fn(Self::Item) -> Option<R> + Sync + Send,
+              R: Send
+    {
+        filter_map::new(self, filter_op)
+    }
+
+    /// Applies `map_op` to each item of this iterator to get nested iterators,
+    /// producing a new iterator that flattens these back into one.
+    fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
+        where F: Fn(Self::Item) -> PI + Sync + Send,
+              PI: IntoParallelIterator
+    {
+        flat_map::new(self, map_op)
+    }
+
+    /// Reduces the items in the iterator into one item using `op`.
+    /// The argument `identity` should be a closure that can produce
+    /// "identity" value which may be inserted into the sequence as
+    /// needed to create opportunities for parallel execution. So, for
+    /// example, if you are doing a summation, then `identity()` ought
+    /// to produce something that represents the zero for your type
+    /// (but consider just calling `sum()` in that case).
+    ///
+    /// Example:
+    ///
+    /// ```
+    /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
+    /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
+    /// // where the first/second elements are summed separately.
+    /// use rayon::prelude::*;
+    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
+    ///            .par_iter()        // iterating over &(i32, i32)
+    ///            .cloned()          // iterating over (i32, i32)
+    ///            .reduce(|| (0, 0), // the "identity" is 0 in both columns
+    ///                    |a, b| (a.0 + b.0, a.1 + b.1));
+    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
+    /// ```
+    ///
+    /// **Note:** unlike a sequential `fold` operation, the order in
+    /// which `op` will be applied to reduce the result is not fully
+    /// specified. So `op` should be [associative] or else the results
+    /// will be non-deterministic. And of course `identity()` should
+    /// produce a true identity.
+    ///
+    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
+    fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
+        where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
+              ID: Fn() -> Self::Item + Sync + Send
+    {
+        reduce::reduce(self, identity, op)
+    }
+
+    /// Reduces the items in the iterator into one item using `op`.
+    /// If the iterator is empty, `None` is returned; otherwise,
+    /// `Some` is returned.
+    ///
+    /// This version of `reduce` is simple but somewhat less
+    /// efficient. If possible, it is better to call `reduce()`, which
+    /// requires an identity element.
+    ///
+    /// **Note:** unlike a sequential `fold` operation, the order in
+    /// which `op` will be applied to reduce the result is not fully
+    /// specified. So `op` should be [associative] or else the results
+    /// will be non-deterministic.
+    ///
+    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
+    fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
+        where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send
+    {
+        self.fold(|| None, |opt_a, b| match opt_a {
+                Some(a) => Some(op(a, b)),
+                None => Some(b),
+            })
+            .reduce(|| None, |opt_a, opt_b| match (opt_a, opt_b) {
+                (Some(a), Some(b)) => Some(op(a, b)),
+                (Some(v), None) | (None, Some(v)) => Some(v),
+                (None, None) => None,
+            })
+    }
+
+    /// Parallel fold is similar to sequential fold except that the
+    /// sequence of items may be subdivided before it is
+    /// folded. Consider a list of numbers like `22 3 77 89 46`. If
+    /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
+    /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
+    /// 77, and so forth. The **parallel fold** works similarly except
+    /// that it first breaks up your list into sublists, and hence
+    /// instead of yielding up a single sum at the end, it yields up
+    /// multiple sums. The number of results is nondeterministic, as
+    /// is the point where the breaks occur.
+    ///
+    /// So if did the same parallel fold (`fold(0, |a,b| a+b)`) on
+    /// our example list, we might wind up with a sequence of two numbers,
+    /// like so:
+    ///
+    /// ```notrust
+    /// 22 3 77 89 46
+    ///       |     |
+    ///     102   135
+    /// ```
+    ///
+    /// Or perhaps these three numbers:
+    ///
+    /// ```notrust
+    /// 22 3 77 89 46
+    ///       |  |  |
+    ///     102 89 46
+    /// ```
+    ///
+    /// In general, Rayon will attempt to find good breaking points
+    /// that keep all of your cores busy.
+    ///
+    /// ### Fold versus reduce
+    ///
+    /// The `fold()` and `reduce()` methods each take an identity element
+    /// and a combining function, but they operate rather differently.
+    ///
+    /// `reduce()` requires that the identity function has the same
+    /// type as the things you are iterating over, and it fully
+    /// reduces the list of items into a single item. So, for example,
+    /// imagine we are iterating over a list of bytes `bytes: [128_u8,
+    /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
+    /// u8| a + b)`, we would get an overflow. This is because `0`,
+    /// `a`, and `b` here are all bytes, just like the numbers in the
+    /// list (I wrote the types explicitly above, but those are the
+    /// only types you can use). To avoid the overflow, we would need
+    /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
+    /// b| a + b)`, in which case our result would be `256`.
+    ///
+    /// In contrast, with `fold()`, the identity function does not
+    /// have to have the same type as the things you are iterating
+    /// over, and you potentially get back many results. So, if we
+    /// continue with the `bytes` example from the previous paragraph,
+    /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
+    /// convert our bytes into `u32`. And of course we might not get
+    /// back a single sum.
+    ///
+    /// There is a more subtle distinction as well, though it's
+    /// actually implied by the above points. When you use `reduce()`,
+    /// your reduction function is sometimes called with values that
+    /// were never part of your original parallel iterator (for
+    /// example, both the left and right might be a partial sum). With
+    /// `fold()`, in contrast, the left value in the fold function is
+    /// always the accumulator, and the right value is always from
+    /// your original sequence.
+    ///
+    /// ### Fold vs Map/Reduce
+    ///
+    /// Fold makes sense if you have some operation where it is
+    /// cheaper to groups of elements at a time. For example, imagine
+    /// collecting characters into a string. If you were going to use
+    /// map/reduce, you might try this:
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let s =
+    ///     ['a', 'b', 'c', 'd', 'e']
+    ///     .par_iter()
+    ///     .map(|c: &char| format!("{}", c))
+    ///     .reduce(|| String::new(),
+    ///             |mut a: String, b: String| { a.push_str(&b); a });
+    /// assert_eq!(s, "abcde");
+    /// ```
+    ///
+    /// Because reduce produces the same type of element as its input,
+    /// you have to first map each character into a string, and then
+    /// you can reduce them. This means we create one string per
+    /// element in ou iterator -- not so great. Using `fold`, we can
+    /// do this instead:
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let s =
+    ///     ['a', 'b', 'c', 'd', 'e']
+    ///     .par_iter()
+    ///     .fold(|| String::new(),
+    ///             |mut s: String, c: &char| { s.push(*c); s })
+    ///     .reduce(|| String::new(),
+    ///             |mut a: String, b: String| { a.push_str(&b); a });
+    /// assert_eq!(s, "abcde");
+    /// ```
+    ///
+    /// Now `fold` will process groups of our characters at a time,
+    /// and we only make one string per group. We should wind up with
+    /// some small-ish number of strings roughly proportional to the
+    /// number of CPUs you have (it will ultimately depend on how busy
+    /// your processors are). Note that we still need to do a reduce
+    /// afterwards to combine those groups of strings into a single
+    /// string.
+    ///
+    /// You could use a similar trick to save partial results (e.g., a
+    /// cache) or something similar.
+    ///
+    /// ### Combining fold with other operations
+    ///
+    /// You can combine `fold` with `reduce` if you want to produce a
+    /// single value. This is then roughly equivalent to a map/reduce
+    /// combination in effect:
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let bytes = 0..22_u8; // series of u8 bytes
+    /// let sum = bytes.into_par_iter()
+    ///                .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
+    ///                .sum::<u32>();
+    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
+    /// ```
+    fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
+        where F: Fn(T, Self::Item) -> T + Sync + Send,
+              ID: Fn() -> T + Sync + Send,
+              T: Send
+    {
+        fold::fold(self, identity, fold_op)
+    }
+
+    /// Applies `fold_op` to the given `init` value with each item of this
+    /// iterator, finally producing the value for further use.
+    ///
+    /// This works essentially like `fold(|| init.clone(), fold_op)`, except
+    /// it doesn't require the `init` type to be `Sync`, nor any other form
+    /// of added synchronization.
+    fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
+        where F: Fn(T, Self::Item) -> T + Sync + Send,
+              T: Send + Clone
+    {
+        fold::fold_with(self, init, fold_op)
+    }
+
+    /// Sums up the items in the iterator.
+    ///
+    /// Note that the order in items will be reduced is not specified,
+    /// so if the `+` operator is not truly [associative] (as is the
+    /// case for floating point numbers), then the results are not
+    /// fully deterministic.
+    ///
+    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
+    ///
+    /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
+    /// except that the type of `0` and the `+` operation may vary
+    /// depending on the type of value being produced.
+    fn sum<S>(self) -> S
+        where S: Send + Sum<Self::Item> + Sum
+    {
+        sum::sum(self)
+    }
+
+    /// Multiplies all the items in the iterator.
+    ///
+    /// Note that the order in items will be reduced is not specified,
+    /// so if the `*` operator is not truly [associative] (as is the
+    /// case for floating point numbers), then the results are not
+    /// fully deterministic.
+    ///
+    /// [associative]: https://en.wikipedia.org/wiki/Associative_property
+    ///
+    /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
+    /// except that the type of `1` and the `*` operation may vary
+    /// depending on the type of value being produced.
+    fn product<P>(self) -> P
+        where P: Send + Product<Self::Item> + Product
+    {
+        product::product(self)
+    }
+
+    /// Computes the minimum of all the items in the iterator. If the
+    /// iterator is empty, `None` is returned; otherwise, `Some(min)`
+    /// is returned.
+    ///
+    /// Note that the order in which the items will be reduced is not
+    /// specified, so if the `Ord` impl is not truly associative, then
+    /// the results are not deterministic.
+    ///
+    /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`.
+    fn min(self) -> Option<Self::Item>
+        where Self::Item: Ord
+    {
+        self.reduce_with(cmp::min)
+    }
+
+    /// Computes the minimum of all the items in the iterator with respect to
+    /// the given comparison function. If the iterator is empty, `None` is
+    /// returned; otherwise, `Some(min)` is returned.
+    ///
+    /// Note that the order in which the items will be reduced is not
+    /// specified, so if the comparison function is not associative, then
+    /// the results are not deterministic.
+    fn min_by<F>(self, f: F) -> Option<Self::Item>
+        where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering
+    {
+        self.reduce_with(|a, b| match f(&a, &b) {
+                             Ordering::Greater => b,
+                             _ => a,
+                         })
+    }
+
+    /// Computes the item that yields the minimum value for the given
+    /// function. If the iterator is empty, `None` is returned;
+    /// otherwise, `Some(item)` is returned.
+    ///
+    /// Note that the order in which the items will be reduced is not
+    /// specified, so if the `Ord` impl is not truly associative, then
+    /// the results are not deterministic.
+    fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
+        where K: Ord + Send,
+              F: Sync + Send + Fn(&Self::Item) -> K
+    {
+        self.map(|x| (f(&x), x))
+            .min_by(|a, b| (a.0).cmp(&b.0))
+            .map(|(_, x)| x)
+    }
+
+    /// Computes the maximum of all the items in the iterator. If the
+    /// iterator is empty, `None` is returned; otherwise, `Some(max)`
+    /// is returned.
+    ///
+    /// Note that the order in which the items will be reduced is not
+    /// specified, so if the `Ord` impl is not truly associative, then
+    /// the results are not deterministic.
+    ///
+    /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`.
+    fn max(self) -> Option<Self::Item>
+        where Self::Item: Ord
+    {
+        self.reduce_with(cmp::max)
+    }
+
+    /// Computes the maximum of all the items in the iterator with respect to
+    /// the given comparison function. If the iterator is empty, `None` is
+    /// returned; otherwise, `Some(min)` is returned.
+    ///
+    /// Note that the order in which the items will be reduced is not
+    /// specified, so if the comparison function is not associative, then
+    /// the results are not deterministic.
+    fn max_by<F>(self, f: F) -> Option<Self::Item>
+        where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering
+    {
+        self.reduce_with(|a, b| match f(&a, &b) {
+                             Ordering::Greater => a,
+                             _ => b,
+                         })
+    }
+
+    /// Computes the item that yields the maximum value for the given
+    /// function. If the iterator is empty, `None` is returned;
+    /// otherwise, `Some(item)` is returned.
+    ///
+    /// Note that the order in which the items will be reduced is not
+    /// specified, so if the `Ord` impl is not truly associative, then
+    /// the results are not deterministic.
+    fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
+        where K: Ord + Send,
+              F: Sync + Send + Fn(&Self::Item) -> K
+    {
+        self.map(|x| (f(&x), x))
+            .max_by(|a, b| (a.0).cmp(&b.0))
+            .map(|(_, x)| x)
+    }
+
+    /// Takes two iterators and creates a new iterator over both.
+    fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
+        where C: IntoParallelIterator<Item = Self::Item>
+    {
+        chain::new(self, chain.into_par_iter())
+    }
+
+    /// Searches for **some** item in the parallel iterator that
+    /// matches the given predicate and returns it. This operation
+    /// is similar to [`find` on sequential iterators][find] but
+    /// the item returned may not be the **first** one in the parallel
+    /// sequence which matches, since we search the entire sequence in parallel.
+    ///
+    /// Once a match is found, we will attempt to stop processing
+    /// the rest of the items in the iterator as soon as possible
+    /// (just as `find` stops iterating once a match is found).
+    ///
+    /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
+    fn find_any<P>(self, predicate: P) -> Option<Self::Item>
+        where P: Fn(&Self::Item) -> bool + Sync + Send
+    {
+        find::find(self, predicate)
+    }
+
+    /// Searches for the sequentially **first** item in the parallel iterator
+    /// that matches the given predicate and returns it.
+    ///
+    /// Once a match is found, all attempts to the right of the match
+    /// will be stopped, while attempts to the left must continue in case
+    /// an earlier match is found.
+    ///
+    /// Note that not all parallel iterators have a useful order, much like
+    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
+    /// just want the first match that discovered anywhere in the iterator,
+    /// `find_any` is a better choice.
+    fn find_first<P>(self, predicate: P) -> Option<Self::Item>
+        where P: Fn(&Self::Item) -> bool + Sync + Send
+    {
+        find_first_last::find_first(self, predicate)
+    }
+
+    /// Searches for the sequentially **last** item in the parallel iterator
+    /// that matches the given predicate and returns it.
+    ///
+    /// Once a match is found, all attempts to the left of the match
+    /// will be stopped, while attempts to the right must continue in case
+    /// a later match is found.
+    ///
+    /// Note that not all parallel iterators have a useful order, much like
+    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
+    /// order doesn't actually matter to you, `find_any` is a better choice.
+    fn find_last<P>(self, predicate: P) -> Option<Self::Item>
+        where P: Fn(&Self::Item) -> bool + Sync + Send
+    {
+        find_first_last::find_last(self, predicate)
+    }
+
+    #[doc(hidden)]
+    #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
+    `find_first`, or `find_last`")]
+    fn find<P>(self, predicate: P) -> Option<Self::Item>
+        where P: Fn(&Self::Item) -> bool + Sync + Send
+    {
+        self.find_any(predicate)
+    }
+
+    /// Searches for **some** item in the parallel iterator that
+    /// matches the given predicate, and if so returns true.  Once
+    /// a match is found, we'll attempt to stop process the rest
+    /// of the items.  Proving that there's no match, returning false,
+    /// does require visiting every item.
+    fn any<P>(self, predicate: P) -> bool
+        where P: Fn(Self::Item) -> bool + Sync + Send
+    {
+        self.map(predicate).find_any(|&p| p).is_some()
+    }
+
+    /// Tests that every item in the parallel iterator matches the given
+    /// predicate, and if so returns true.  If a counter-example is found,
+    /// we'll attempt to stop processing more items, then return false.
+    fn all<P>(self, predicate: P) -> bool
+        where P: Fn(Self::Item) -> bool + Sync + Send
+    {
+        self.map(predicate).find_any(|&p| !p).is_none()
+    }
+
+    /// Creates an iterator over the `Some` items of this iterator, halting
+    /// as soon as any `None` is found.
+    fn while_some<T>(self) -> WhileSome<Self>
+        where Self: ParallelIterator<Item = Option<T>>,
+              T: Send
+    {
+        while_some::new(self)
+    }
+
+    /// Create a fresh collection containing all the element produced
+    /// by this parallel iterator.
+    ///
+    /// You may prefer to use `collect_into()`, which allocates more
+    /// efficiently with precise knowledge of how many elements the
+    /// iterator contains, and even allows you to reuse an existing
+    /// vector's backing store rather than allocating a fresh vector.
+    fn collect<C>(self) -> C
+        where C: FromParallelIterator<Self::Item>
+    {
+        C::from_par_iter(self)
+    }
+
+    /// Unzips the items of a parallel iterator into a pair of arbitrary
+    /// `ParallelExtend` containers.
+    ///
+    /// You may prefer to use `unzip_into()`, which allocates more
+    /// efficiently with precise knowledge of how many elements the
+    /// iterator contains, and even allows you to reuse existing
+    /// vectors' backing stores rather than allocating fresh vectors.
+    fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
+        where Self: ParallelIterator<Item = (A, B)>,
+              FromA: Default + Send + ParallelExtend<A>,
+              FromB: Default + Send + ParallelExtend<B>,
+              A: Send,
+              B: Send
+    {
+        unzip::unzip(self)
+    }
+
+    /// Partitions the items of a parallel iterator into a pair of arbitrary
+    /// `ParallelExtend` containers.  Items for which the `predicate` returns
+    /// true go into the first container, and the rest go into the second.
+    ///
+    /// Note: unlike the standard `Iterator::partition`, this allows distinct
+    /// collection types for the left and right items.  This is more flexible,
+    /// but may require new type annotations when converting sequential code
+    /// that used type inferrence assuming the two were the same.
+    fn partition<A, B, P>(self, predicate: P) -> (A, B)
+        where A: Default + Send + ParallelExtend<Self::Item>,
+              B: Default + Send + ParallelExtend<Self::Item>,
+              P: Fn(&Self::Item) -> bool + Sync + Send
+    {
+        unzip::partition(self, predicate)
+    }
+
+    /// Partitions and maps the items of a parallel iterator into a pair of
+    /// arbitrary `ParallelExtend` containers.  `Either::Left` items go into
+    /// the first container, and `Either::Right` items go into the second.
+    fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
+        where A: Default + Send + ParallelExtend<L>,
+              B: Default + Send + ParallelExtend<R>,
+              P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
+              L: Send,
+              R: Send
+    {
+        unzip::partition_map(self, predicate)
+    }
+
+    /// Internal method used to define the behavior of this parallel
+    /// iterator. You should not need to call this directly.
+    ///
+    /// This method causes the iterator `self` to start producing
+    /// items and to feed them to the consumer `consumer` one by one.
+    /// It may split the consumer before doing so to create the
+    /// opportunity to produce in parallel.
+    ///
+    /// See the [README] for more details on the internals of parallel
+    /// iterators.
+    ///
+    /// [README]: README.md
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>;
+
+
+    /// Internal method used to define the behavior of this parallel
+    /// iterator. You should not need to call this directly.
+    ///
+    /// Returns the number of items produced by this iterator, if known
+    /// statically. This can be used by consumers to trigger special fast
+    /// paths. Therefore, if `Some(_)` is returned, this iterator must only
+    /// use the (indexed) `Consumer` methods when driving a consumer, such
+    /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
+    /// other `UnindexedConsumer` methods -- or returning an inaccurate
+    /// value -- may result in panics.
+    ///
+    /// This method is currently used to optimize `collect` for want
+    /// of true Rust specialization; it may be removed when
+    /// specialization is stable.
+    fn opt_len(&mut self) -> Option<usize> {
+        None
+    }
+}
+
+impl<T: ParallelIterator> IntoParallelIterator for T {
+    type Iter = T;
+    type Item = T::Item;
+
+    fn into_par_iter(self) -> T {
+        self
+    }
+}
+
+/// An iterator that supports "random access" to its data, meaning
+/// that you can split it at arbitrary indices and draw data from
+/// those points.
+pub trait IndexedParallelIterator: ParallelIterator {
+    /// Collects the results of the iterator into the specified
+    /// vector. The vector is always truncated before execution
+    /// begins. If possible, reusing the vector across calls can lead
+    /// to better performance since it reuses the same backing buffer.
+    fn collect_into(self, target: &mut Vec<Self::Item>) {
+        collect::collect_into(self, target);
+    }
+
+    /// Unzips the results of the iterator into the specified
+    /// vectors. The vectors are always truncated before execution
+    /// begins. If possible, reusing the vectors across calls can lead
+    /// to better performance since they reuse the same backing buffer.
+    fn unzip_into<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
+        where Self: IndexedParallelIterator<Item = (A, B)>,
+              A: Send,
+              B: Send
+    {
+        collect::unzip_into(self, left, right);
+    }
+
+    /// Iterate over tuples `(A, B)`, where the items `A` are from
+    /// this iterator and `B` are from the iterator given as argument.
+    /// Like the `zip` method on ordinary iterators, if the two
+    /// iterators are of unequal length, you only get the items they
+    /// have in common.
+    fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
+        where Z: IntoParallelIterator,
+              Z::Iter: IndexedParallelIterator
+    {
+        zip::new(self, zip_op.into_par_iter())
+    }
+
+    /// Lexicographically compares the elements of this `ParallelIterator` with those of
+    /// another.
+    fn cmp<I>(mut self, other: I) -> Ordering
+        where I: IntoParallelIterator<Item = Self::Item>,
+              I::Iter: IndexedParallelIterator,
+              Self::Item: Ord
+    {
+        let mut other = other.into_par_iter();
+        let ord_len = self.len().cmp(&other.len());
+        self.zip(other)
+            .map(|(x, y)| Ord::cmp(&x, &y))
+            .find_first(|&ord| ord != Ordering::Equal)
+            .unwrap_or(ord_len)
+    }
+
+    /// Lexicographically compares the elements of this `ParallelIterator` with those of
+    /// another.
+    fn partial_cmp<I>(mut self, other: I) -> Option<Ordering>
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              Self::Item: PartialOrd<I::Item>
+    {
+        let mut other = other.into_par_iter();
+        let ord_len = self.len().cmp(&other.len());
+        self.zip(other)
+            .map(|(x, y)| PartialOrd::partial_cmp(&x, &y))
+            .find_first(|&ord| ord != Some(Ordering::Equal))
+            .unwrap_or(Some(ord_len))
+    }
+
+    /// Determines if the elements of this `ParallelIterator`
+    /// are equal to those of another
+    fn eq<I>(mut self, other: I) -> bool
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              Self::Item: PartialEq<I::Item>
+    {
+        let mut other = other.into_par_iter();
+        self.len() == other.len() && self.zip(other).all(|(x, y)| x.eq(&y))
+    }
+
+    /// Determines if the elements of this `ParallelIterator`
+    /// are unequal to those of another
+    fn ne<I>(self, other: I) -> bool
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              Self::Item: PartialEq<I::Item>
+    {
+        !self.eq(other)
+    }
+
+    /// Determines if the elements of this `ParallelIterator`
+    /// are lexicographically less than those of another.
+    fn lt<I>(self, other: I) -> bool
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              Self::Item: PartialOrd<I::Item>
+    {
+        self.partial_cmp(other) == Some(Ordering::Less)
+    }
+
+    /// Determines if the elements of this `ParallelIterator`
+    /// are less or equal to those of another.
+    fn le<I>(self, other: I) -> bool
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              Self::Item: PartialOrd<I::Item>
+    {
+        let ord = self.partial_cmp(other);
+        ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
+    }
+
+    /// Determines if the elements of this `ParallelIterator`
+    /// are lexicographically greater than those of another.
+    fn gt<I>(self, other: I) -> bool
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              Self::Item: PartialOrd<I::Item>
+    {
+        self.partial_cmp(other) == Some(Ordering::Greater)
+    }
+
+    /// Determines if the elements of this `ParallelIterator`
+    /// are less or equal to those of another.
+    fn ge<I>(self, other: I) -> bool
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              Self::Item: PartialOrd<I::Item>
+    {
+        let ord = self.partial_cmp(other);
+        ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
+    }
+
+    /// Yields an index along with each item.
+    fn enumerate(self) -> Enumerate<Self> {
+        enumerate::new(self)
+    }
+
+    /// Creates an iterator that skips the first `n` elements.
+    fn skip(self, n: usize) -> Skip<Self> {
+        skip::new(self, n)
+    }
+
+    /// Creates an iterator that yields the first `n` elements.
+    fn take(self, n: usize) -> Take<Self> {
+        take::new(self, n)
+    }
+
+    /// Searches for **some** item in the parallel iterator that
+    /// matches the given predicate, and returns its index.  Like
+    /// `ParallelIterator::find_any`, the parallel search will not
+    /// necessarily find the **first** match, and once a match is
+    /// found we'll attempt to stop processing any more.
+    fn position_any<P>(self, predicate: P) -> Option<usize>
+        where P: Fn(Self::Item) -> bool + Sync + Send
+    {
+        self.map(predicate)
+            .enumerate()
+            .find_any(|&(_, p)| p)
+            .map(|(i, _)| i)
+    }
+
+    /// Searches for the sequentially **first** item in the parallel iterator
+    /// that matches the given predicate, and returns its index.
+    ///
+    /// Like `ParallelIterator::find_first`, once a match is found,
+    /// all attempts to the right of the match will be stopped, while
+    /// attempts to the left must continue in case an earlier match
+    /// is found.
+    ///
+    /// Note that not all parallel iterators have a useful order, much like
+    /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
+    /// just want the first match that discovered anywhere in the iterator,
+    /// `position_any` is a better choice.
+    fn position_first<P>(self, predicate: P) -> Option<usize>
+        where P: Fn(Self::Item) -> bool + Sync + Send
+    {
+        self.map(predicate)
+            .enumerate()
+            .find_first(|&(_, p)| p)
+            .map(|(i, _)| i)
+    }
+
+    /// Searches for the sequentially **last** item in the parallel iterator
+    /// that matches the given predicate, and returns its index.
+    ///
+    /// Like `ParallelIterator::find_last`, once a match is found,
+    /// all attempts to the left of the match will be stopped, while
+    /// attempts to the right must continue in case a later match
+    /// is found.
+    ///
+    /// Note that not all parallel iterators have a useful order, much like
+    /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
+    /// order doesn't actually matter to you, `position_any` is a better
+    /// choice.
+    fn position_last<P>(self, predicate: P) -> Option<usize>
+        where P: Fn(Self::Item) -> bool + Sync + Send
+    {
+        self.map(predicate)
+            .enumerate()
+            .find_last(|&(_, p)| p)
+            .map(|(i, _)| i)
+    }
+
+    #[doc(hidden)]
+    #[deprecated(note = "parallel `position` does not search in order -- use `position_any`, \\
+    `position_first`, or `position_last`")]
+    fn position<P>(self, predicate: P) -> Option<usize>
+        where P: Fn(Self::Item) -> bool + Sync + Send
+    {
+        self.position_any(predicate)
+    }
+
+    /// Produces a new iterator with the elements of this iterator in
+    /// reverse order.
+    fn rev(self) -> Rev<Self> {
+        rev::new(self)
+    }
+
+    /// Sets the minimum length of iterators desired to process in each
+    /// thread.  Rayon will not split any smaller than this length, but
+    /// of course an iterator could already be smaller to begin with.
+    fn with_min_len(self, min: usize) -> MinLen<Self> {
+        len::new_min_len(self, min)
+    }
+
+    /// Sets the maximum length of iterators desired to process in each
+    /// thread.  Rayon will try to split at least below this length,
+    /// unless that would put it below the length from `with_min_len()`.
+    /// For example, given min=10 and max=15, a length of 16 will not be
+    /// split any further.
+    fn with_max_len(self, max: usize) -> MaxLen<Self> {
+        len::new_max_len(self, max)
+    }
+
+    /// Produces an exact count of how many items this iterator will
+    /// produce, presuming no panic occurs.
+    fn len(&mut self) -> usize;
+
+    /// Internal method used to define the behavior of this parallel
+    /// iterator. You should not need to call this directly.
+    ///
+    /// This method causes the iterator `self` to start producing
+    /// items and to feed them to the consumer `consumer` one by one.
+    /// It may split the consumer before doing so to create the
+    /// opportunity to produce in parallel. If a split does happen, it
+    /// will inform the consumer of the index where the split should
+    /// occur (unlike `ParallelIterator::drive_unindexed()`).
+    ///
+    /// See the [README] for more details on the internals of parallel
+    /// iterators.
+    ///
+    /// [README]: README.md
+    fn drive<'c, C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
+
+    /// Internal method used to define the behavior of this parallel
+    /// iterator. You should not need to call this directly.
+    ///
+    /// This method converts the iterator into a producer P and then
+    /// invokes `callback.callback()` with P. Note that the type of
+    /// this producer is not defined as part of the API, since
+    /// `callback` must be defined generically for all producers. This
+    /// allows the producer type to contain references; it also means
+    /// that parallel iterators can adjust that type without causing a
+    /// breaking change.
+    ///
+    /// See the [README] for more details on the internals of parallel
+    /// iterators.
+    ///
+    /// [README]: README.md
+    fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
+}
+
+/// `FromParallelIterator` implements the conversion from a [`ParallelIterator`].
+/// By implementing `FromParallelIterator` for a type, you define how it will be
+/// created from an iterator.
+///
+/// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
+///
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+/// [`collect()`]: trait.ParallelIterator.html#method.collect
+pub trait FromParallelIterator<T>
+    where T: Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T>;
+}
+
+/// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
+///
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+pub trait ParallelExtend<T>
+    where T: Send
+{
+    fn par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>;
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/noop.rs
@@ -0,0 +1,57 @@
+use super::internal::*;
+
+pub struct NoopConsumer;
+
+impl NoopConsumer {
+    pub fn new() -> Self {
+        NoopConsumer
+    }
+}
+
+impl<T> Consumer<T> for NoopConsumer {
+    type Folder = NoopConsumer;
+    type Reducer = NoopReducer;
+    type Result = ();
+
+    fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) {
+        (NoopConsumer, NoopConsumer, NoopReducer)
+    }
+
+    fn into_folder(self) -> Self {
+        self
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+impl<T> Folder<T> for NoopConsumer {
+    type Result = ();
+
+    fn consume(self, _item: T) -> Self {
+        self
+    }
+
+    fn complete(self) {}
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+impl<T> UnindexedConsumer<T> for NoopConsumer {
+    fn split_off_left(&self) -> Self {
+        NoopConsumer
+    }
+
+    fn to_reducer(&self) -> NoopReducer {
+        NoopReducer
+    }
+}
+
+pub struct NoopReducer;
+
+impl Reducer<()> for NoopReducer {
+    fn reduce(self, _left: (), _right: ()) {}
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/product.rs
@@ -0,0 +1,99 @@
+use super::ParallelIterator;
+use super::internal::*;
+
+use std::iter::{self, Product};
+use std::marker::PhantomData;
+
+
+pub fn product<PI, P>(pi: PI) -> P
+    where PI: ParallelIterator,
+          P: Send + Product<PI::Item> + Product
+{
+    pi.drive_unindexed(ProductConsumer::new())
+}
+
+fn mul<T: Product>(left: T, right: T) -> T {
+    iter::once(left).chain(iter::once(right)).product()
+}
+
+
+struct ProductConsumer<P: Send> {
+    _marker: PhantomData<*const P>,
+}
+
+unsafe impl<P: Send> Send for ProductConsumer<P> {}
+
+impl<P: Send> ProductConsumer<P> {
+    fn new() -> ProductConsumer<P> {
+        ProductConsumer { _marker: PhantomData }
+    }
+}
+
+impl<P, T> Consumer<T> for ProductConsumer<P>
+    where P: Send + Product<T> + Product
+{
+    type Folder = ProductFolder<P>;
+    type Reducer = Self;
+    type Result = P;
+
+    fn split_at(self, _index: usize) -> (Self, Self, Self) {
+        (ProductConsumer::new(), ProductConsumer::new(), ProductConsumer::new())
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        ProductFolder { product: iter::empty::<T>().product() }
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+impl<P, T> UnindexedConsumer<T> for ProductConsumer<P>
+    where P: Send + Product<T> + Product
+{
+    fn split_off_left(&self) -> Self {
+        ProductConsumer::new()
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        ProductConsumer::new()
+    }
+}
+
+impl<P> Reducer<P> for ProductConsumer<P>
+    where P: Send + Product
+{
+    fn reduce(self, left: P, right: P) -> P {
+        mul(left, right)
+    }
+}
+
+
+struct ProductFolder<P> {
+    product: P,
+}
+
+impl<P, T> Folder<T> for ProductFolder<P>
+    where P: Product<T> + Product
+{
+    type Result = P;
+
+    fn consume(self, item: T) -> Self {
+        ProductFolder { product: mul(self.product, iter::once(item).product()) }
+    }
+
+    fn consume_iter<I>(self, iter: I) -> Self
+        where I: IntoIterator<Item = T>
+    {
+        ProductFolder { product: mul(self.product, iter.into_iter().product()) }
+    }
+
+    fn complete(self) -> P {
+        self.product
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/reduce.rs
@@ -0,0 +1,110 @@
+use super::ParallelIterator;
+use super::internal::*;
+
+pub fn reduce<PI, R, ID, T>(pi: PI, identity: ID, reduce_op: R) -> T
+    where PI: ParallelIterator<Item = T>,
+          R: Fn(T, T) -> T + Sync,
+          ID: Fn() -> T + Sync,
+          T: Send
+{
+    let consumer = ReduceConsumer {
+        identity: &identity,
+        reduce_op: &reduce_op,
+    };
+    pi.drive_unindexed(consumer)
+}
+
+struct ReduceConsumer<'r, R: 'r, ID: 'r> {
+    identity: &'r ID,
+    reduce_op: &'r R,
+}
+
+impl<'r, R, ID> Copy for ReduceConsumer<'r, R, ID> {}
+
+impl<'r, R, ID> Clone for ReduceConsumer<'r, R, ID> {
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
+impl<'r, R, ID, T> Consumer<T> for ReduceConsumer<'r, R, ID>
+    where R: Fn(T, T) -> T + Sync,
+          ID: Fn() -> T + Sync,
+          T: Send
+{
+    type Folder = ReduceFolder<'r, R, T>;
+    type Reducer = Self;
+    type Result = T;
+
+    fn split_at(self, _index: usize) -> (Self, Self, Self) {
+        (self, self, self)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        ReduceFolder {
+            reduce_op: self.reduce_op,
+            item: (self.identity)(),
+        }
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+impl<'r, R, ID, T> UnindexedConsumer<T> for ReduceConsumer<'r, R, ID>
+    where R: Fn(T, T) -> T + Sync,
+          ID: Fn() -> T + Sync,
+          T: Send
+{
+    fn split_off_left(&self) -> Self {
+        *self
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        *self
+    }
+}
+
+impl<'r, R, ID, T> Reducer<T> for ReduceConsumer<'r, R, ID>
+    where R: Fn(T, T) -> T + Sync
+{
+    fn reduce(self, left: T, right: T) -> T {
+        (self.reduce_op)(left, right)
+    }
+}
+
+struct ReduceFolder<'r, R: 'r, T> {
+    reduce_op: &'r R,
+    item: T,
+}
+
+impl<'r, R, T> Folder<T> for ReduceFolder<'r, R, T>
+    where R: Fn(T, T) -> T
+{
+    type Result = T;
+
+    fn consume(self, item: T) -> Self {
+        ReduceFolder {
+            reduce_op: self.reduce_op,
+            item: (self.reduce_op)(self.item, item),
+        }
+    }
+
+    fn consume_iter<I>(self, iter: I) -> Self
+        where I: IntoIterator<Item = T>
+    {
+        ReduceFolder {
+            reduce_op: self.reduce_op,
+            item: iter.into_iter().fold(self.item, self.reduce_op),
+        }
+    }
+
+    fn complete(self) -> T {
+        self.item
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/rev.rs
@@ -0,0 +1,110 @@
+use super::internal::*;
+use super::*;
+use std::iter;
+
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Rev<I: IndexedParallelIterator> {
+    base: I,
+}
+
+/// Create a new `Rev` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I>(base: I) -> Rev<I>
+    where I: IndexedParallelIterator
+{
+    Rev { base: base }
+}
+
+impl<I> ParallelIterator for Rev<I>
+    where I: IndexedParallelIterator
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I> IndexedParallelIterator for Rev<I>
+    where I: IndexedParallelIterator
+{
+    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(mut self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        let len = self.base.len();
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           len: len,
+                                       });
+
+        struct Callback<CB> {
+            callback: CB,
+            len: usize,
+        }
+
+        impl<T, CB> ProducerCallback<T> for Callback<CB>
+            where CB: ProducerCallback<T>
+        {
+            type Output = CB::Output;
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let producer = RevProducer {
+                    base: base,
+                    len: self.len,
+                };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+struct RevProducer<P> {
+    base: P,
+    len: usize,
+}
+
+impl<P> Producer for RevProducer<P>
+    where P: Producer
+{
+    type Item = P::Item;
+    type IntoIter = iter::Rev<P::IntoIter>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.base.into_iter().rev()
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(self.len - index);
+        (RevProducer {
+             base: right,
+             len: index,
+         },
+         RevProducer {
+             base: left,
+             len: self.len - index,
+         })
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/skip.rs
@@ -0,0 +1,80 @@
+use super::internal::*;
+use super::*;
+use super::noop::NoopConsumer;
+use std::cmp::min;
+
+/// `Skip` is an iterator that skips over the first `n` elements.
+/// This struct is created by the [`skip()`] method on [`ParallelIterator`]
+///
+/// [`skip()`]: trait.ParallelIterator.html#method.skip
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Skip<I> {
+    base: I,
+    n: usize,
+}
+
+/// Create a new `Skip` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I>(mut base: I, n: usize) -> Skip<I>
+    where I: IndexedParallelIterator
+{
+    let n = min(base.len(), n);
+    Skip { base: base, n: n }
+}
+
+impl<I> ParallelIterator for Skip<I>
+    where I: IndexedParallelIterator
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I> IndexedParallelIterator for Skip<I>
+    where I: IndexedParallelIterator
+{
+    fn len(&mut self) -> usize {
+        self.base.len() - self.n
+    }
+
+    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
+        bridge(self, consumer)
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           n: self.n,
+                                       });
+
+        struct Callback<CB> {
+            callback: CB,
+            n: usize,
+        }
+
+        impl<T, CB> ProducerCallback<T> for Callback<CB>
+            where CB: ProducerCallback<T>
+        {
+            type Output = CB::Output;
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let (before_skip, after_skip) = base.split_at(self.n);
+                bridge_producer_consumer(self.n, before_skip, NoopConsumer::new());
+                self.callback.callback(after_skip)
+            }
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/splitter.rs
@@ -0,0 +1,71 @@
+use super::internal::*;
+use super::*;
+
+/// The `split` function takes arbitrary data and a closure that knows how to
+/// split it, and turns this into a `ParallelIterator`.
+pub fn split<D, S>(data: D, splitter: S) -> Split<D, S>
+    where D: Send,
+          S: Fn(D) -> (D, Option<D>) + Sync
+{
+    Split {
+        data: data,
+        splitter: splitter,
+    }
+}
+
+/// `Split` is a parallel iterator using arbitrary data and a splitting function.
+/// This struct is created by the [`split()`] function.
+///
+/// [`split()`]: fn.split.html
+pub struct Split<D, S> {
+    data: D,
+    splitter: S,
+}
+
+impl<D, S> ParallelIterator for Split<D, S>
+    where D: Send,
+          S: Fn(D) -> (D, Option<D>) + Sync + Send
+{
+    type Item = D;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let producer = SplitProducer {
+            data: self.data,
+            splitter: &self.splitter,
+        };
+        bridge_unindexed(producer, consumer)
+    }
+}
+
+struct SplitProducer<'a, D, S: 'a> {
+    data: D,
+    splitter: &'a S,
+}
+
+impl<'a, D, S> UnindexedProducer for SplitProducer<'a, D, S>
+    where D: Send,
+          S: Fn(D) -> (D, Option<D>) + Sync
+{
+    type Item = D;
+
+    fn split(mut self) -> (Self, Option<Self>) {
+        let splitter = self.splitter;
+        let (left, right) = splitter(self.data);
+        self.data = left;
+        (self,
+         right.map(|data| {
+                       SplitProducer {
+                           data: data,
+                           splitter: splitter,
+                       }
+                   }))
+    }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        folder.consume(self.data)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/sum.rs
@@ -0,0 +1,99 @@
+use super::ParallelIterator;
+use super::internal::*;
+
+use std::iter::{self, Sum};
+use std::marker::PhantomData;
+
+
+pub fn sum<PI, S>(pi: PI) -> S
+    where PI: ParallelIterator,
+          S: Send + Sum<PI::Item> + Sum
+{
+    pi.drive_unindexed(SumConsumer::new())
+}
+
+fn add<T: Sum>(left: T, right: T) -> T {
+    iter::once(left).chain(iter::once(right)).sum()
+}
+
+
+struct SumConsumer<S: Send> {
+    _marker: PhantomData<*const S>,
+}
+
+unsafe impl<S: Send> Send for SumConsumer<S> {}
+
+impl<S: Send> SumConsumer<S> {
+    fn new() -> SumConsumer<S> {
+        SumConsumer { _marker: PhantomData }
+    }
+}
+
+impl<S, T> Consumer<T> for SumConsumer<S>
+    where S: Send + Sum<T> + Sum
+{
+    type Folder = SumFolder<S>;
+    type Reducer = Self;
+    type Result = S;
+
+    fn split_at(self, _index: usize) -> (Self, Self, Self) {
+        (SumConsumer::new(), SumConsumer::new(), SumConsumer::new())
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        SumFolder { sum: iter::empty::<T>().sum() }
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
+
+impl<S, T> UnindexedConsumer<T> for SumConsumer<S>
+    where S: Send + Sum<T> + Sum
+{
+    fn split_off_left(&self) -> Self {
+        SumConsumer::new()
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        SumConsumer::new()
+    }
+}
+
+impl<S> Reducer<S> for SumConsumer<S>
+    where S: Send + Sum
+{
+    fn reduce(self, left: S, right: S) -> S {
+        add(left, right)
+    }
+}
+
+
+struct SumFolder<S> {
+    sum: S,
+}
+
+impl<S, T> Folder<T> for SumFolder<S>
+    where S: Sum<T> + Sum
+{
+    type Result = S;
+
+    fn consume(self, item: T) -> Self {
+        SumFolder { sum: add(self.sum, iter::once(item).sum()) }
+    }
+
+    fn consume_iter<I>(self, iter: I) -> Self
+        where I: IntoIterator<Item = T>
+    {
+        SumFolder { sum: add(self.sum, iter.into_iter().sum()) }
+    }
+
+    fn complete(self) -> S {
+        self.sum
+    }
+
+    fn full(&self) -> bool {
+        false
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/take.rs
@@ -0,0 +1,78 @@
+use super::internal::*;
+use super::*;
+use std::cmp::min;
+
+/// `Take` is an iterator that iterates over the first `n` elements.
+/// This struct is created by the [`take()`] method on [`ParallelIterator`]
+///
+/// [`take()`]: trait.ParallelIterator.html#method.take
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Take<I> {
+    base: I,
+    n: usize,
+}
+
+/// Create a new `Take` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I>(mut base: I, n: usize) -> Take<I>
+    where I: IndexedParallelIterator
+{
+    let n = min(base.len(), n);
+    Take { base: base, n: n }
+}
+
+impl<I> ParallelIterator for Take<I>
+    where I: IndexedParallelIterator
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I> IndexedParallelIterator for Take<I>
+    where I: IndexedParallelIterator
+{
+    fn len(&mut self) -> usize {
+        self.n
+    }
+
+    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
+        bridge(self, consumer)
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           n: self.n,
+                                       });
+
+        struct Callback<CB> {
+            callback: CB,
+            n: usize,
+        }
+
+        impl<T, CB> ProducerCallback<T> for Callback<CB>
+            where CB: ProducerCallback<T>
+        {
+            type Output = CB::Output;
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let (producer, _) = base.split_at(self.n);
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/test.rs
@@ -0,0 +1,1664 @@
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use rayon_core::*;
+use prelude::*;
+use super::*;
+
+use rand::{Rng, SeedableRng, XorShiftRng};
+use std::collections::LinkedList;
+use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
+use std::collections::{BinaryHeap, VecDeque};
+use std::f64;
+use std::fmt::Debug;
+use std::usize;
+use std::sync::mpsc;
+
+fn is_indexed<T: IndexedParallelIterator>(_: T) {}
+
+#[test]
+pub fn execute() {
+    let a: Vec<i32> = (0..1024).collect();
+    let mut b = vec![];
+    a.par_iter().map(|&i| i + 1).collect_into(&mut b);
+    let c: Vec<i32> = (0..1024).map(|i| i + 1).collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+pub fn execute_cloned() {
+    let a: Vec<i32> = (0..1024).collect();
+    let mut b: Vec<i32> = vec![];
+    a.par_iter().cloned().collect_into(&mut b);
+    let c: Vec<i32> = (0..1024).collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+pub fn execute_range() {
+    let a = 0i32..1024;
+    let mut b = vec![];
+    a.into_par_iter().map(|i| i + 1).collect_into(&mut b);
+    let c: Vec<i32> = (0..1024).map(|i| i + 1).collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+pub fn execute_unindexed_range() {
+    let a = 0i64..1024;
+    let b: LinkedList<i64> = a.into_par_iter().map(|i| i + 1).collect();
+    let c: LinkedList<i64> = (0..1024).map(|i| i + 1).collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+pub fn execute_strings() {
+    let mut rng = XorShiftRng::from_seed([14159, 26535, 89793, 23846]);
+    let s: String = rng.gen_iter::<char>().take(1024).collect();
+
+    let par_chars: String = s.par_chars().collect();
+    assert_eq!(s, par_chars);
+
+    let par_even: String = s.par_chars().filter(|&c| (c as u32) & 1 == 0).collect();
+    let ser_even: String = s.chars().filter(|&c| (c as u32) & 1 == 0).collect();
+    assert_eq!(par_even, ser_even);
+
+    // test `FromParallelIterator<&char> for String`
+    let vchars: Vec<char> = s.par_chars().collect();
+    let par_chars: String = vchars.par_iter().collect();
+    assert_eq!(s, par_chars);
+}
+
+#[test]
+pub fn execute_strings_split() {
+    // char testcases from examples in `str::split` etc.,
+    // plus a large self-test for good measure.
+    let tests = vec![("Mary had a little lamb", ' '),
+                     ("", 'X'),
+                     ("lionXXtigerXleopard", 'X'),
+                     ("||||a||b|c", '|'),
+                     ("(///)", '/'),
+                     ("010", '0'),
+                     ("    a  b c", ' '),
+                     ("A.B.", '.'),
+                     ("A..B..", '.'),
+                     ("foo\r\nbar\n\nbaz\n", '\n'),
+                     ("foo\nbar\n\r\nbaz", '\n'),
+                     ("A few words", ' '),
+                     (" Mary   had\ta\u{2009}little  \n\t lamb", ' '),
+                     (include_str!("test.rs"), ' ')];
+
+    for &(string, separator) in &tests {
+        let serial: Vec<_> = string.split(separator).collect();
+        let parallel: Vec<_> = string.par_split(separator).collect();
+        assert_eq!(serial, parallel);
+
+        let serial_fn: Vec<_> = string.split(|c| c == separator).collect();
+        let parallel_fn: Vec<_> = string.par_split(|c| c == separator).collect();
+        assert_eq!(serial_fn, parallel_fn);
+    }
+
+    for &(string, separator) in &tests {
+        let serial: Vec<_> = string.split_terminator(separator).collect();
+        let parallel: Vec<_> = string.par_split_terminator(separator).collect();
+        assert_eq!(serial, parallel);
+    }
+
+    for &(string, separator) in &tests {
+        let serial: Vec<_> = string.split_terminator(|c| c == separator).collect();
+        let parallel: Vec<_> = string.par_split_terminator(|c| c == separator).collect();
+        assert_eq!(serial, parallel);
+    }
+
+    for &(string, _) in &tests {
+        let serial: Vec<_> = string.lines().collect();
+        let parallel: Vec<_> = string.par_lines().collect();
+        assert_eq!(serial, parallel);
+    }
+
+    for &(string, _) in &tests {
+        let serial: Vec<_> = string.split_whitespace().collect();
+        let parallel: Vec<_> = string.par_split_whitespace().collect();
+        assert_eq!(serial, parallel);
+    }
+}
+
+#[test]
+pub fn check_map_indexed() {
+    let a = [1, 2, 3];
+    is_indexed(a.par_iter().map(|x| x));
+}
+
+#[test]
+pub fn map_sum() {
+    let a: Vec<i32> = (0..1024).collect();
+    let r1: i32 = a.par_iter().map(|&i| i + 1).sum();
+    let r2 = a.iter().map(|&i| i + 1).fold(0, |a, b| a + b);
+    assert_eq!(r1, r2);
+}
+
+#[test]
+pub fn map_reduce() {
+    let a: Vec<i32> = (0..1024).collect();
+    let r1 = a.par_iter().map(|&i| i + 1).reduce(|| 0, |i, j| i + j);
+    let r2 = a.iter().map(|&i| i + 1).fold(0, |a, b| a + b);
+    assert_eq!(r1, r2);
+}
+
+#[test]
+pub fn map_reduce_with() {
+    let a: Vec<i32> = (0..1024).collect();
+    let r1 = a.par_iter().map(|&i| i + 1).reduce_with(|i, j| i + j);
+    let r2 = a.iter().map(|&i| i + 1).fold(0, |a, b| a + b);
+    assert_eq!(r1, Some(r2));
+}
+
+#[test]
+pub fn fold_map_reduce() {
+    // Kind of a weird test, but it demonstrates various
+    // transformations that are taking place. Relies on
+    // `with_max_len(1).fold()` being equivalent to `map()`.
+    //
+    // Take each number from 0 to 32 and fold them by appending to a
+    // vector.  Because of `with_max_len(1)`, this will produce 32 vectors,
+    // each with one item.  We then collect all of these into an
+    // individual vector by mapping each into their own vector (so we
+    // have Vec<Vec<i32>>) and then reducing those into a single
+    // vector.
+    let r1 = (0_i32..32)
+        .into_par_iter()
+        .with_max_len(1)
+        .fold(|| vec![], |mut v, e| {
+            v.push(e);
+            v
+        })
+        .map(|v| vec![v])
+        .reduce_with(|mut v_a, v_b| {
+                         v_a.extend(v_b);
+                         v_a
+                     });
+    assert_eq!(r1,
+               Some(vec![vec![0], vec![1], vec![2], vec![3], vec![4], vec![5], vec![6], vec![7],
+                         vec![8], vec![9], vec![10], vec![11], vec![12], vec![13], vec![14],
+                         vec![15], vec![16], vec![17], vec![18], vec![19], vec![20], vec![21],
+                         vec![22], vec![23], vec![24], vec![25], vec![26], vec![27], vec![28],
+                         vec![29], vec![30], vec![31]]));
+}
+
+#[test]
+pub fn fold_is_full() {
+    let counter = AtomicUsize::new(0);
+    let a = (0_i32..2048)
+        .into_par_iter()
+        .inspect(|_| { counter.fetch_add(1, Ordering::SeqCst); })
+        .fold(|| 0, |a, b| a + b)
+        .find_any(|_| true);
+    assert!(a.is_some());
+    assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
+}
+
+#[test]
+pub fn check_enumerate() {
+    let a: Vec<usize> = (0..1024).rev().collect();
+
+    let mut b = vec![];
+    a.par_iter()
+        .enumerate()
+        .map(|(i, &x)| i + x)
+        .collect_into(&mut b);
+    assert!(b.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
+pub fn check_indices_after_enumerate_split() {
+    let a: Vec<i32> = (0..1024).collect();
+    a.par_iter().enumerate().with_producer(WithProducer);
+
+    struct WithProducer;
+    impl<'a> ProducerCallback<(usize, &'a i32)> for WithProducer {
+        type Output = ();
+        fn callback<P>(self, producer: P)
+            where P: Producer<Item = (usize, &'a i32)>
+        {
+            let (a, b) = producer.split_at(512);
+            for ((index, value), trusted_index) in a.into_iter().zip(0..) {
+                assert_eq!(index, trusted_index);
+                assert_eq!(index, *value as usize);
+            }
+            for ((index, value), trusted_index) in b.into_iter().zip(512..) {
+                assert_eq!(index, trusted_index);
+                assert_eq!(index, *value as usize);
+            }
+        }
+    }
+}
+
+#[test]
+pub fn check_increment() {
+    let mut a: Vec<usize> = (0..1024).rev().collect();
+
+    a.par_iter_mut().enumerate().for_each(|(i, v)| *v += i);
+
+    assert!(a.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
+pub fn check_skip() {
+    let a: Vec<usize> = (0..1024).collect();
+
+    let mut v1 = Vec::new();
+    a.par_iter().skip(16).collect_into(&mut v1);
+    let v2 = a.iter().skip(16).collect::<Vec<_>>();
+    assert_eq!(v1, v2);
+
+    let mut v1 = Vec::new();
+    a.par_iter().skip(2048).collect_into(&mut v1);
+    let v2 = a.iter().skip(2048).collect::<Vec<_>>();
+    assert_eq!(v1, v2);
+
+    let mut v1 = Vec::new();
+    a.par_iter().skip(0).collect_into(&mut v1);
+    let v2 = a.iter().skip(0).collect::<Vec<_>>();
+    assert_eq!(v1, v2);
+
+    // Check that the skipped elements side effects are executed
+    use std::sync::atomic::{AtomicUsize, Ordering};
+    let num = AtomicUsize::new(0);
+    a.par_iter()
+        .map(|&n| num.fetch_add(n, Ordering::Relaxed))
+        .skip(512)
+        .count();
+    assert_eq!(num.load(Ordering::Relaxed), a.iter().sum());
+}
+
+#[test]
+pub fn check_take() {
+    let a: Vec<usize> = (0..1024).collect();
+
+    let mut v1 = Vec::new();
+    a.par_iter().take(16).collect_into(&mut v1);
+    let v2 = a.iter().take(16).collect::<Vec<_>>();
+    assert_eq!(v1, v2);
+
+    let mut v1 = Vec::new();
+    a.par_iter().take(2048).collect_into(&mut v1);
+    let v2 = a.iter().take(2048).collect::<Vec<_>>();
+    assert_eq!(v1, v2);
+
+    let mut v1 = Vec::new();
+    a.par_iter().take(0).collect_into(&mut v1);
+    let v2 = a.iter().take(0).collect::<Vec<_>>();
+    assert_eq!(v1, v2);
+}
+
+#[test]
+pub fn check_inspect() {
+    use std::sync::atomic::{AtomicUsize, Ordering};
+
+    let a = AtomicUsize::new(0);
+    let b =
+        (0_usize..1024).into_par_iter().inspect(|&i| { a.fetch_add(i, Ordering::Relaxed); }).sum();
+
+    assert_eq!(a.load(Ordering::Relaxed), b);
+}
+
+#[test]
+pub fn check_move() {
+    let a = vec![vec![1, 2, 3]];
+    let ptr = a[0].as_ptr();
+
+    let mut b = vec![];
+    a.into_par_iter().collect_into(&mut b);
+
+    // a simple move means the inner vec will be completely unchanged
+    assert_eq!(ptr, b[0].as_ptr());
+}
+
+#[test]
+pub fn check_drops() {
+    use std::sync::atomic::{AtomicUsize, Ordering};
+
+    let c = AtomicUsize::new(0);
+    let a = vec![DropCounter(&c); 10];
+
+    let mut b = vec![];
+    a.clone().into_par_iter().collect_into(&mut b);
+    assert_eq!(c.load(Ordering::Relaxed), 0);
+
+    b.into_par_iter();
+    assert_eq!(c.load(Ordering::Relaxed), 10);
+
+    a.into_par_iter().with_producer(Partial);
+    assert_eq!(c.load(Ordering::Relaxed), 20);
+
+
+    #[derive(Clone)]
+    struct DropCounter<'a>(&'a AtomicUsize);
+    impl<'a> Drop for DropCounter<'a> {
+        fn drop(&mut self) {
+            self.0.fetch_add(1, Ordering::Relaxed);
+        }
+    }
+
+    struct Partial;
+    impl<'a> ProducerCallback<DropCounter<'a>> for Partial {
+        type Output = ();
+        fn callback<P>(self, producer: P)
+            where P: Producer<Item = DropCounter<'a>>
+        {
+            let (a, _) = producer.split_at(5);
+            a.into_iter().next();
+        }
+    }
+}
+
+#[test]
+pub fn check_slice_indexed() {
+    let a = vec![1, 2, 3];
+    is_indexed(a.par_iter());
+}
+
+#[test]
+pub fn check_slice_mut_indexed() {
+    let mut a = vec![1, 2, 3];
+    is_indexed(a.par_iter_mut());
+}
+
+#[test]
+pub fn check_vec_indexed() {
+    let a = vec![1, 2, 3];
+    is_indexed(a.clone().into_par_iter());
+}
+
+#[test]
+pub fn check_range_indexed() {
+    is_indexed((1..5).into_par_iter());
+}
+
+#[test]
+pub fn check_cmp_direct() {
+    let a = (0..1024).into_par_iter();
+    let b = (0..1024).into_par_iter();
+
+    let result = a.cmp(b);
+
+    assert!(result == ::std::cmp::Ordering::Equal);
+}
+
+#[test]
+pub fn check_cmp_to_seq() {
+    assert_eq!((0..1024).into_par_iter().cmp(0..1024),
+               (0..1024).cmp(0..1024));
+}
+
+#[test]
+pub fn check_cmp_rng_to_seq() {
+    use rand::{Rng, SeedableRng, XorShiftRng};
+
+    let mut rng = XorShiftRng::from_seed([11507, 46649, 55961, 20171]);
+    let a: Vec<i32> = rng.gen_iter().take(1024).collect();
+    let b: Vec<i32> = rng.gen_iter().take(1024).collect();
+    for i in 0..a.len() {
+        let par_result = a[i..].par_iter().cmp(b[i..].par_iter());
+        let seq_result = a[i..].iter().cmp(b[i..].iter());
+
+        assert_eq!(par_result, seq_result);
+    }
+}
+
+#[test]
+pub fn check_cmp_lt_direct() {
+    let a = (0..1024).into_par_iter();
+    let b = (1..1024).into_par_iter();
+
+    let result = a.cmp(b);
+
+    assert!(result == ::std::cmp::Ordering::Less);
+}
+
+#[test]
+pub fn check_cmp_lt_to_seq() {
+    assert_eq!((0..1024).into_par_iter().cmp(1..1024),
+               (0..1024).cmp(1..1024))
+}
+
+#[test]
+pub fn check_cmp_gt_direct() {
+    let a = (1..1024).into_par_iter();
+    let b = (0..1024).into_par_iter();
+
+    let result = a.cmp(b);
+
+    assert!(result == ::std::cmp::Ordering::Greater);
+}
+
+#[test]
+pub fn check_cmp_gt_to_seq() {
+    assert_eq!((1..1024).into_par_iter().cmp(0..1024),
+               (1..1024).cmp(0..1024))
+}
+
+#[test]
+pub fn check_cmp_short_circuit() {
+    let a = vec![0; 1024];
+    let mut b = a.clone();
+    b[42] = 1;
+
+    let counter = AtomicUsize::new(0);
+    let result = a.par_iter().inspect(|_| { counter.fetch_add(1, Ordering::SeqCst); }).cmp(&b);
+    assert!(result == ::std::cmp::Ordering::Less);
+    assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one
+}
+
+#[test]
+pub fn check_partial_cmp_short_circuit() {
+    let a = vec![0; 1024];
+    let mut b = a.clone();
+    b[42] = 1;
+
+    let counter = AtomicUsize::new(0);
+    let result =
+        a.par_iter().inspect(|_| { counter.fetch_add(1, Ordering::SeqCst); }).partial_cmp(&b);
+    assert!(result == Some(::std::cmp::Ordering::Less));
+    assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one
+}
+
+#[test]
+pub fn check_partial_cmp_nan_short_circuit() {
+    let a = vec![0.0; 1024];
+    let mut b = a.clone();
+    b[42] = f64::NAN;
+
+    let counter = AtomicUsize::new(0);
+    let result =
+        a.par_iter().inspect(|_| { counter.fetch_add(1, Ordering::SeqCst); }).partial_cmp(&b);
+    assert!(result == None);
+    assert!(counter.load(Ordering::SeqCst) < a.len()); // should not have visited every single one
+}
+
+#[test]
+pub fn check_partial_cmp_direct() {
+    let a = (0..1024).into_par_iter();
+    let b = (0..1024).into_par_iter();
+
+    let result = a.partial_cmp(b);
+
+    assert!(result == Some(::std::cmp::Ordering::Equal));
+}
+
+#[test]
+pub fn check_partial_cmp_to_seq() {
+    let par_result = (0..1024).into_par_iter().partial_cmp(0..1024);
+    let seq_result = (0..1024).partial_cmp(0..1024);
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_partial_cmp_rng_to_seq() {
+    use rand::{Rng, SeedableRng, XorShiftRng};
+
+    let mut rng = XorShiftRng::from_seed([9346, 26355, 87943, 28346]);
+    let a: Vec<i32> = rng.gen_iter().take(1024).collect();
+    let b: Vec<i32> = rng.gen_iter().take(1024).collect();
+    for i in 0..a.len() {
+        let par_result = a[i..].par_iter().partial_cmp(b[i..].par_iter());
+        let seq_result = a[i..].iter().partial_cmp(b[i..].iter());
+
+        assert_eq!(par_result, seq_result);
+    }
+}
+
+#[test]
+pub fn check_partial_cmp_lt_direct() {
+    let a = (0..1024).into_par_iter();
+    let b = (1..1024).into_par_iter();
+
+    let result = a.partial_cmp(b);
+
+    assert!(result == Some(::std::cmp::Ordering::Less));
+}
+
+#[test]
+pub fn check_partial_cmp_lt_to_seq() {
+    let par_result = (0..1024).into_par_iter().partial_cmp(1..1024);
+    let seq_result = (0..1024).partial_cmp(1..1024);
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_partial_cmp_gt_direct() {
+    let a = (1..1024).into_par_iter();
+    let b = (0..1024).into_par_iter();
+
+    let result = a.partial_cmp(b);
+
+    assert!(result == Some(::std::cmp::Ordering::Greater));
+}
+
+#[test]
+pub fn check_partial_cmp_gt_to_seq() {
+    let par_result = (1..1024).into_par_iter().partial_cmp(0..1024);
+    let seq_result = (1..1024).partial_cmp(0..1024);
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_partial_cmp_none_direct() {
+    let a = vec![f64::NAN, 0.0];
+    let b = vec![0.0, 1.0];
+
+    let result = a.par_iter().partial_cmp(b.par_iter());
+
+    assert!(result == None);
+}
+
+#[test]
+pub fn check_partial_cmp_none_to_seq() {
+    let a = vec![f64::NAN, 0.0];
+    let b = vec![0.0, 1.0];
+
+    let par_result = a.par_iter().partial_cmp(b.par_iter());
+    let seq_result = a.iter().partial_cmp(b.iter());
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_partial_cmp_late_nan_direct() {
+    let a = vec![0.0, f64::NAN];
+    let b = vec![1.0, 1.0];
+
+    let result = a.par_iter().partial_cmp(b.par_iter());
+
+    assert!(result == Some(::std::cmp::Ordering::Less));
+}
+
+#[test]
+pub fn check_partial_cmp_late_nane_to_seq() {
+    let a = vec![0.0, f64::NAN];
+    let b = vec![1.0, 1.0];
+
+    let par_result = a.par_iter().partial_cmp(b.par_iter());
+    let seq_result = a.iter().partial_cmp(b.iter());
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_cmp_lengths() {
+    // comparisons should consider length if they are otherwise equal
+    let a = vec![0; 1024];
+    let b = vec![0; 1025];
+
+    assert_eq!(a.par_iter().cmp(&b), a.iter().cmp(&b));
+    assert_eq!(a.par_iter().partial_cmp(&b), a.iter().partial_cmp(&b));
+}
+
+
+#[test]
+pub fn check_eq_direct() {
+    let a = (0..1024).into_par_iter();
+    let b = (0..1024).into_par_iter();
+
+    let result = a.eq(b);
+
+    assert!(result);
+}
+
+#[test]
+pub fn check_eq_to_seq() {
+    let par_result = (0..1024).into_par_iter().eq((0..1024).into_par_iter());
+    let seq_result = (0..1024).eq(0..1024);
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_ne_direct() {
+    let a = (0..1024).into_par_iter();
+    let b = (1..1024).into_par_iter();
+
+    let result = a.ne(b);
+
+    assert!(result);
+}
+
+#[test]
+pub fn check_ne_to_seq() {
+    let par_result = (0..1024).into_par_iter().ne((1..1025).into_par_iter());
+    let seq_result = (0..1024).ne(1..1025);
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_ne_lengths() {
+    // equality should consider length too
+    let a = vec![0; 1024];
+    let b = vec![0; 1025];
+
+    assert_eq!(a.par_iter().eq(&b), a.iter().eq(&b));
+    assert_eq!(a.par_iter().ne(&b), a.iter().ne(&b));
+}
+
+#[test]
+pub fn check_lt_direct() {
+    assert!((0..1024).into_par_iter().lt(1..1024));
+    assert!(!(1..1024).into_par_iter().lt(0..1024));
+}
+
+#[test]
+pub fn check_lt_to_seq() {
+    let par_result = (0..1024).into_par_iter().lt((1..1024).into_par_iter());
+    let seq_result = (0..1024).lt(1..1024);
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_le_equal_direct() {
+    assert!((0..1024).into_par_iter().le((0..1024).into_par_iter()));
+}
+
+#[test]
+pub fn check_le_equal_to_seq() {
+    let par_result = (0..1024).into_par_iter().le((0..1024).into_par_iter());
+    let seq_result = (0..1024).le(0..1024);
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_le_less_direct() {
+    assert!((0..1024).into_par_iter().le((1..1024).into_par_iter()));
+}
+
+#[test]
+pub fn check_le_less_to_seq() {
+    let par_result = (0..1024).into_par_iter().le((1..1024).into_par_iter());
+    let seq_result = (0..1024).le(1..1024);
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_gt_direct() {
+    assert!((1..1024).into_par_iter().gt((0..1024).into_par_iter()));
+}
+
+#[test]
+pub fn check_gt_to_seq() {
+    let par_result = (1..1024).into_par_iter().gt((0..1024).into_par_iter());
+    let seq_result = (1..1024).gt(0..1024);
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_ge_equal_direct() {
+    assert!((0..1024).into_par_iter().ge((0..1024).into_par_iter()));
+}
+
+#[test]
+pub fn check_ge_equal_to_seq() {
+    let par_result = (0..1024).into_par_iter().ge((0..1024).into_par_iter());
+    let seq_result = (0..1024).ge(0..1024);
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_ge_greater_direct() {
+    assert!((1..1024).into_par_iter().ge((0..1024).into_par_iter()));
+}
+
+#[test]
+pub fn check_ge_greater_to_seq() {
+    let par_result = (1..1024).into_par_iter().ge((0..1024).into_par_iter());
+    let seq_result = (1..1024).ge(0..1024);
+
+    assert_eq!(par_result, seq_result);
+}
+
+#[test]
+pub fn check_zip() {
+    let mut a: Vec<usize> = (0..1024).rev().collect();
+    let b: Vec<usize> = (0..1024).collect();
+
+    a.par_iter_mut().zip(&b[..]).for_each(|(a, &b)| *a += b);
+
+    assert!(a.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
+pub fn check_zip_into_par_iter() {
+    let mut a: Vec<usize> = (0..1024).rev().collect();
+    let b: Vec<usize> = (0..1024).collect();
+
+    a.par_iter_mut()
+     .zip(&b) // here we rely on &b iterating over &usize
+     .for_each(|(a, &b)| *a += b);
+
+    assert!(a.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
+pub fn check_zip_into_mut_par_iter() {
+    let a: Vec<usize> = (0..1024).rev().collect();
+    let mut b: Vec<usize> = (0..1024).collect();
+
+    a.par_iter().zip(&mut b).for_each(|(&a, b)| *b += a);
+
+    assert!(b.iter().all(|&x| x == b.len() - 1));
+}
+
+#[test]
+pub fn check_zip_range() {
+    let mut a: Vec<usize> = (0..1024).rev().collect();
+
+    a.par_iter_mut().zip(0usize..1024).for_each(|(a, b)| *a += b);
+
+    assert!(a.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
+pub fn check_sum_filtered_ints() {
+    let a: Vec<i32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+    let par_sum_evens: i32 = a.par_iter().filter(|&x| (x & 1) == 0).sum();
+    let seq_sum_evens = a.iter()
+        .filter(|&x| (x & 1) == 0)
+        .map(|&x| x)
+        .fold(0, |a, b| a + b);
+    assert_eq!(par_sum_evens, seq_sum_evens);
+}
+
+#[test]
+pub fn check_sum_filtermap_ints() {
+    let a: Vec<i32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+    let par_sum_evens: f32 =
+        a.par_iter().filter_map(|&x| if (x & 1) == 0 { Some(x as f32) } else { None }).sum();
+    let seq_sum_evens = a.iter()
+        .filter_map(|&x| if (x & 1) == 0 { Some(x as f32) } else { None })
+        .fold(0.0, |a, b| a + b);
+    assert_eq!(par_sum_evens, seq_sum_evens);
+}
+
+#[test]
+pub fn check_flat_map_nested_ranges() {
+    // FIXME -- why are precise type hints required on the integers here?
+
+    let v: i32 = (0_i32..10)
+        .into_par_iter()
+        .flat_map(|i| (0_i32..10).into_par_iter().map(move |j| (i, j)))
+        .map(|(i, j)| i * j)
+        .sum();
+
+    let w = (0_i32..10)
+        .flat_map(|i| (0_i32..10).map(move |j| (i, j)))
+        .map(|(i, j)| i * j)
+        .fold(0, |i, j| i + j);
+
+    assert_eq!(v, w);
+}
+
+#[test]
+pub fn check_empty_flat_map_sum() {
+    let a: Vec<i32> = (0..1024).collect();
+    let empty = &a[..0];
+
+    // empty on the inside
+    let b: i32 = a.par_iter().flat_map(|_| empty).sum();
+    assert_eq!(b, 0);
+
+    // empty on the outside
+    let c: i32 = empty.par_iter().flat_map(|_| a.par_iter()).sum();
+    assert_eq!(c, 0);
+}
+
+#[test]
+pub fn check_slice_split() {
+    let v: Vec<_> = (0..1000).collect();
+    for m in 1..100 {
+        let a: Vec<_> = v.split(|x| x % m == 0).collect();
+        let b: Vec<_> = v.par_split(|x| x % m == 0).collect();
+        assert_eq!(a, b);
+    }
+
+    // same as std::slice::split() examples
+    let slice = [10, 40, 33, 20];
+    let v: Vec<_> = slice.par_split(|num| num % 3 == 0).collect();
+    assert_eq!(v, &[&slice[..2], &slice[3..]]);
+
+    let slice = [10, 40, 33];
+    let v: Vec<_> = slice.par_split(|num| num % 3 == 0).collect();
+    assert_eq!(v, &[&slice[..2], &slice[..0]]);
+
+    let slice = [10, 6, 33, 20];
+    let v: Vec<_> = slice.par_split(|num| num % 3 == 0).collect();
+    assert_eq!(v, &[&slice[..1], &slice[..0], &slice[3..]]);
+}
+
+#[test]
+pub fn check_slice_split_mut() {
+    let mut v1: Vec<_> = (0..1000).collect();
+    let mut v2 = v1.clone();
+    for m in 1..100 {
+        let a: Vec<_> = v1.split_mut(|x| x % m == 0).collect();
+        let b: Vec<_> = v2.par_split_mut(|x| x % m == 0).collect();
+        assert_eq!(a, b);
+    }
+
+    // same as std::slice::split_mut() example
+    let mut v = [10, 40, 30, 20, 60, 50];
+    v.par_split_mut(|num| num % 3 == 0).for_each(|group| {
+        group[0] = 1;
+    });
+    assert_eq!(v, [1, 40, 30, 1, 60, 1]);
+}
+
+#[test]
+pub fn check_chunks() {
+    let a: Vec<i32> = vec![1, 5, 10, 4, 100, 3, 1000, 2, 10000, 1];
+    let par_sum_product_pairs: i32 =
+        a.par_chunks(2).map(|c| c.iter().map(|&x| x).fold(1, |i, j| i * j)).sum();
+    let seq_sum_product_pairs =
+        a.chunks(2).map(|c| c.iter().map(|&x| x).fold(1, |i, j| i * j)).fold(0, |i, j| i + j);
+    assert_eq!(par_sum_product_pairs, 12345);
+    assert_eq!(par_sum_product_pairs, seq_sum_product_pairs);
+
+    let par_sum_product_triples: i32 =
+        a.par_chunks(3).map(|c| c.iter().map(|&x| x).fold(1, |i, j| i * j)).sum();
+    let seq_sum_product_triples =
+        a.chunks(3).map(|c| c.iter().map(|&x| x).fold(1, |i, j| i * j)).fold(0, |i, j| i + j);
+    assert_eq!(par_sum_product_triples, 5_0 + 12_00 + 2_000_0000 + 1);
+    assert_eq!(par_sum_product_triples, seq_sum_product_triples);
+}
+
+#[test]
+pub fn check_chunks_mut() {
+    let mut a: Vec<i32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+    let mut b: Vec<i32> = a.clone();
+    a.par_chunks_mut(2).for_each(|c| c[0] = c.iter().map(|&x| x).fold(0, |i, j| i + j));
+    b.chunks_mut(2).map(|c| c[0] = c.iter().map(|&x| x).fold(0, |i, j| i + j)).count();
+    assert_eq!(a, &[3, 2, 7, 4, 11, 6, 15, 8, 19, 10]);
+    assert_eq!(a, b);
+
+    let mut a: Vec<i32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+    let mut b: Vec<i32> = a.clone();
+    a.par_chunks_mut(3).for_each(|c| c[0] = c.iter().map(|&x| x).fold(0, |i, j| i + j));
+    b.chunks_mut(3).map(|c| c[0] = c.iter().map(|&x| x).fold(0, |i, j| i + j)).count();
+    assert_eq!(a, &[6, 2, 3, 15, 5, 6, 24, 8, 9, 10]);
+    assert_eq!(a, b);
+}
+
+#[test]
+pub fn check_windows() {
+    let a: Vec<i32> = (0..1024).collect();
+    let par: Vec<_> = a.par_windows(2).collect();
+    let seq: Vec<_> = a.windows(2).collect();
+    assert_eq!(par, seq);
+
+    let par: Vec<_> = a.par_windows(100).collect();
+    let seq: Vec<_> = a.windows(100).collect();
+    assert_eq!(par, seq);
+
+    let par: Vec<_> = a.par_windows(1_000_000).collect();
+    let seq: Vec<_> = a.windows(1_000_000).collect();
+    assert_eq!(par, seq);
+
+    let par: Vec<_> = a.par_windows(2)
+        .chain(a.par_windows(1_000_000))
+        .zip(a.par_windows(2))
+        .collect();
+    let seq: Vec<_> = a.windows(2)
+        .chain(a.windows(1_000_000))
+        .zip(a.windows(2))
+        .collect();
+    assert_eq!(par, seq);
+}
+
+#[test]
+pub fn check_options() {
+    let mut a = vec![None, Some(1), None, None, Some(2), Some(4)];
+
+    assert_eq!(7, a.par_iter().flat_map(|opt| opt).sum());
+    assert_eq!(7, a.par_iter().flat_map(|opt| opt).sum());
+
+    a.par_iter_mut().flat_map(|opt| opt).for_each(|x| *x = *x * *x);
+
+    assert_eq!(21, a.into_par_iter().flat_map(|opt| opt).sum());
+}
+
+#[test]
+pub fn check_results() {
+    let mut a = vec![Err(()), Ok(1i32), Err(()), Err(()), Ok(2), Ok(4)];
+
+    assert_eq!(7, a.par_iter().flat_map(|res| res).sum());
+
+    assert_eq!(Err::<i32, ()>(()), a.par_iter().cloned().sum());
+    assert_eq!(Ok(7),
+               a.par_iter()
+                   .cloned()
+                   .filter(Result::is_ok)
+                   .sum());
+
+    assert_eq!(Err::<i32, ()>(()), a.par_iter().cloned().product());
+    assert_eq!(Ok(8),
+               a.par_iter()
+                   .cloned()
+                   .filter(Result::is_ok)
+                   .product());
+
+    a.par_iter_mut().flat_map(|res| res).for_each(|x| *x = *x * *x);
+
+    assert_eq!(21, a.into_par_iter().flat_map(|res| res).sum());
+}
+
+#[test]
+pub fn check_binary_heap() {
+    use std::collections::BinaryHeap;
+
+    let a: BinaryHeap<i32> = (0..10).collect();
+
+    assert_eq!(45, a.par_iter().sum());
+    assert_eq!(45, a.into_par_iter().sum());
+}
+
+#[test]
+pub fn check_btree_map() {
+    use std::collections::BTreeMap;
+
+    let mut a: BTreeMap<i32, i32> = (0..10).map(|i| (i, -i)).collect();
+
+    assert_eq!(45, a.par_iter().map(|(&k, _)| k).sum());
+    assert_eq!(-45, a.par_iter().map(|(_, &v)| v).sum());
+
+    a.par_iter_mut().for_each(|(k, v)| *v += *k);
+
+    assert_eq!(0, a.into_par_iter().map(|(_, v)| v).sum());
+}
+
+#[test]
+pub fn check_btree_set() {
+    use std::collections::BTreeSet;
+
+    let a: BTreeSet<i32> = (0..10).collect();
+
+    assert_eq!(45, a.par_iter().sum());
+    assert_eq!(45, a.into_par_iter().sum());
+}
+
+#[test]
+pub fn check_hash_map() {
+    use std::collections::HashMap;
+
+    let mut a: HashMap<i32, i32> = (0..10).map(|i| (i, -i)).collect();
+
+    assert_eq!(45, a.par_iter().map(|(&k, _)| k).sum());
+    assert_eq!(-45, a.par_iter().map(|(_, &v)| v).sum());
+
+    a.par_iter_mut().for_each(|(k, v)| *v += *k);
+
+    assert_eq!(0, a.into_par_iter().map(|(_, v)| v).sum());
+}
+
+#[test]
+pub fn check_hash_set() {
+    use std::collections::HashSet;
+
+    let a: HashSet<i32> = (0..10).collect();
+
+    assert_eq!(45, a.par_iter().sum());
+    assert_eq!(45, a.into_par_iter().sum());
+}
+
+#[test]
+pub fn check_linked_list() {
+    use std::collections::LinkedList;
+
+    let mut a: LinkedList<i32> = (0..10).collect();
+
+    assert_eq!(45, a.par_iter().sum());
+
+    a.par_iter_mut().for_each(|x| *x = -*x);
+
+    assert_eq!(-45, a.into_par_iter().sum());
+}
+
+#[test]
+pub fn check_vec_deque() {
+    use std::collections::VecDeque;
+
+    let mut a: VecDeque<i32> = (0..10).collect();
+
+    // try to get it to wrap around
+    a.drain(..5);
+    a.extend(0..5);
+
+    assert_eq!(45, a.par_iter().sum());
+
+    a.par_iter_mut().for_each(|x| *x = -*x);
+
+    assert_eq!(-45, a.into_par_iter().sum());
+}
+
+#[test]
+pub fn check_chain() {
+    let mut res = vec![];
+
+    // stays indexed in the face of madness
+    Some(0)
+        .into_par_iter()
+        .chain(Ok::<_, ()>(1))
+        .chain(1..4)
+        .chain(Err("huh?"))
+        .chain(None)
+        .chain(vec![5, 8, 13])
+        .map(|x| (x as u8 + b'a') as char)
+        .chain(vec!['x', 'y', 'z'])
+        .zip((0i32..1000).into_par_iter().map(|x| -x))
+        .enumerate()
+        .map(|(a, (b, c))| (a, b, c))
+        .chain(None)
+        .collect_into(&mut res);
+
+    assert_eq!(res,
+               vec![(0, 'a', 0),
+                    (1, 'b', -1),
+                    (2, 'b', -2),
+                    (3, 'c', -3),
+                    (4, 'd', -4),
+                    (5, 'f', -5),
+                    (6, 'i', -6),
+                    (7, 'n', -7),
+                    (8, 'x', -8),
+                    (9, 'y', -9),
+                    (10, 'z', -10)]);
+
+    // unindexed is ok too
+    let res: Vec<i32> = Some(1i32)
+        .into_par_iter()
+        .chain((2i32..4)
+                   .into_par_iter()
+                   .chain(vec![5, 6, 7, 8, 9])
+                   .chain(Some((10, 100)).into_par_iter().flat_map(|(a, b)| a..b))
+                   .filter(|x| x & 1 == 1))
+        .collect();
+    let other: Vec<i32> = (0..100).filter(|x| x & 1 == 1).collect();
+    assert_eq!(res, other);
+
+    // chain collect is ok with the "fake" specialization
+    let res: Vec<i32> = Some(1i32).into_par_iter().chain(None).collect();
+    assert_eq!(res, &[1]);
+}
+
+
+#[test]
+pub fn check_count() {
+    let c0 = (0_u32..24 * 1024).filter(|i| i % 2 == 0).count();
+    let c1 = (0_u32..24 * 1024).into_par_iter().filter(|i| i % 2 == 0).count();
+    assert_eq!(c0, c1);
+}
+
+
+#[test]
+pub fn find_any() {
+    let a: Vec<i32> = (0..1024).collect();
+
+    assert!(a.par_iter().find_any(|&&x| x % 42 == 41).is_some());
+    assert_eq!(a.par_iter().find_any(|&&x| x % 19 == 1 && x % 53 == 0),
+               Some(&742_i32));
+    assert_eq!(a.par_iter().find_any(|&&x| x < 0), None);
+
+    assert!(a.par_iter().position_any(|&x| x % 42 == 41).is_some());
+    assert_eq!(a.par_iter().position_any(|&x| x % 19 == 1 && x % 53 == 0),
+               Some(742_usize));
+    assert_eq!(a.par_iter().position_any(|&x| x < 0), None);
+
+    assert!(a.par_iter().any(|&x| x > 1000));
+    assert!(!a.par_iter().any(|&x| x < 0));
+
+    assert!(!a.par_iter().all(|&x| x > 1000));
+    assert!(a.par_iter().all(|&x| x >= 0));
+}
+
+#[test]
+pub fn find_first_or_last() {
+    let a: Vec<i32> = (0..1024).collect();
+
+    assert_eq!(a.par_iter().find_first(|&&x| x % 42 == 41), Some(&41_i32));
+    assert_eq!(a.par_iter().find_first(|&&x| x % 19 == 1 && x % 53 == 0),
+               Some(&742_i32));
+    assert_eq!(a.par_iter().find_first(|&&x| x < 0), None);
+
+    assert_eq!(a.par_iter().position_first(|&x| x % 42 == 41),
+               Some(41_usize));
+    assert_eq!(a.par_iter().position_first(|&x| x % 19 == 1 && x % 53 == 0),
+               Some(742_usize));
+    assert_eq!(a.par_iter().position_first(|&x| x < 0), None);
+
+    assert_eq!(a.par_iter().find_last(|&&x| x % 42 == 41), Some(&1007_i32));
+    assert_eq!(a.par_iter().find_last(|&&x| x % 19 == 1 && x % 53 == 0),
+               Some(&742_i32));
+    assert_eq!(a.par_iter().find_last(|&&x| x < 0), None);
+
+    assert_eq!(a.par_iter().position_last(|&x| x % 42 == 41),
+               Some(1007_usize));
+    assert_eq!(a.par_iter().position_last(|&x| x % 19 == 1 && x % 53 == 0),
+               Some(742_usize));
+    assert_eq!(a.par_iter().position_last(|&x| x < 0), None);
+}
+
+#[test]
+pub fn check_find_not_present() {
+    let counter = AtomicUsize::new(0);
+    let value: Option<i32> =
+        (0_i32..2048).into_par_iter().find_any(|&p| {
+                                                   counter.fetch_add(1, Ordering::SeqCst);
+                                                   p >= 2048
+                                               });
+    assert!(value.is_none());
+    assert!(counter.load(Ordering::SeqCst) == 2048); // should have visited every single one
+}
+
+#[test]
+pub fn check_find_is_present() {
+    let counter = AtomicUsize::new(0);
+    let value: Option<i32> =
+        (0_i32..2048).into_par_iter().find_any(|&p| {
+                                                   counter.fetch_add(1, Ordering::SeqCst);
+                                                   p >= 1024 && p < 1096
+                                               });
+    let q = value.unwrap();
+    assert!(q >= 1024 && q < 1096);
+    assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
+}
+
+#[test]
+pub fn check_while_some() {
+    let value = (0_i32..2048)
+        .into_par_iter()
+        .map(Some)
+        .while_some()
+        .max();
+    assert_eq!(value, Some(2047));
+
+    let counter = AtomicUsize::new(0);
+    let value = (0_i32..2048)
+        .into_par_iter()
+        .map(|x| {
+                 counter.fetch_add(1, Ordering::SeqCst);
+                 if x < 1024 { Some(x) } else { None }
+             })
+        .while_some()
+        .max();
+    assert!(value < Some(1024));
+    assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
+}
+
+#[test]
+pub fn par_iter_collect_option() {
+    let a: Option<Vec<_>> = (0_i32..2048).map(Some).collect();
+    let b: Option<Vec<_>> = (0_i32..2048).into_par_iter().map(Some).collect();
+    assert_eq!(a, b);
+
+    let c: Option<Vec<_>> = (0_i32..2048)
+        .into_par_iter()
+        .map(|x| if x == 1234 { None } else { Some(x) })
+        .collect();
+    assert_eq!(c, None);
+}
+
+#[test]
+pub fn par_iter_collect_result() {
+    let a: Result<Vec<_>, ()> = (0_i32..2048).map(Ok).collect();
+    let b: Result<Vec<_>, ()> = (0_i32..2048).into_par_iter().map(Ok).collect();
+    assert_eq!(a, b);
+
+    let c: Result<Vec<_>, _> = (0_i32..2048)
+        .into_par_iter()
+        .map(|x| if x == 1234 { Err(x) } else { Ok(x) })
+        .collect();
+    assert_eq!(c, Err(1234));
+
+    let d: Result<Vec<_>, _> = (0_i32..2048)
+        .into_par_iter()
+        .map(|x| if x % 100 == 99 { Err(x) } else { Ok(x) })
+        .collect();
+    assert_eq!(d.map_err(|x| x % 100), Err(99));
+}
+
+#[test]
+pub fn par_iter_collect() {
+    let a: Vec<i32> = (0..1024).collect();
+    let b: Vec<i32> = a.par_iter().map(|&i| i + 1).collect();
+    let c: Vec<i32> = (0..1024).map(|i| i + 1).collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+pub fn par_iter_collect_vecdeque() {
+    let a: Vec<i32> = (0..1024).collect();
+    let b: VecDeque<i32> = a.par_iter().cloned().collect();
+    let c: VecDeque<i32> = a.iter().cloned().collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+pub fn par_iter_collect_binaryheap() {
+    let a: Vec<i32> = (0..1024).collect();
+    let mut b: BinaryHeap<i32> = a.par_iter().cloned().collect();
+    assert_eq!(b.peek(), Some(&1023));
+    assert_eq!(b.len(), 1024);
+    for n in (0..1024).rev() {
+        assert_eq!(b.pop(), Some(n));
+        assert_eq!(b.len() as i32, n);
+    }
+}
+
+#[test]
+pub fn par_iter_collect_hashmap() {
+    let a: Vec<i32> = (0..1024).collect();
+    let b: HashMap<i32, String> = a.par_iter().map(|&i| (i, format!("{}", i))).collect();
+    assert_eq!(&b[&3], "3");
+    assert_eq!(b.len(), 1024);
+}
+
+#[test]
+pub fn par_iter_collect_hashset() {
+    let a: Vec<i32> = (0..1024).collect();
+    let b: HashSet<i32> = a.par_iter().cloned().collect();
+    assert_eq!(b.len(), 1024);
+}
+
+#[test]
+pub fn par_iter_collect_btreemap() {
+    let a: Vec<i32> = (0..1024).collect();
+    let b: BTreeMap<i32, String> = a.par_iter().map(|&i| (i, format!("{}", i))).collect();
+    assert_eq!(&b[&3], "3");
+    assert_eq!(b.len(), 1024);
+}
+
+#[test]
+pub fn par_iter_collect_btreeset() {
+    let a: Vec<i32> = (0..1024).collect();
+    let b: BTreeSet<i32> = a.par_iter().cloned().collect();
+    assert_eq!(b.len(), 1024);
+}
+
+#[test]
+pub fn par_iter_collect_linked_list() {
+    let a: Vec<i32> = (0..1024).collect();
+    let b: LinkedList<_> = a.par_iter().map(|&i| (i, format!("{}", i))).collect();
+    let c: LinkedList<_> = a.iter().map(|&i| (i, format!("{}", i))).collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+pub fn par_iter_collect_linked_list_flat_map_filter() {
+    let b: LinkedList<i32> = (0_i32..1024)
+        .into_par_iter()
+        .flat_map(|i| (0..i))
+        .filter(|&i| i % 2 == 0)
+        .collect();
+    let c: LinkedList<i32> = (0_i32..1024).flat_map(|i| (0..i)).filter(|&i| i % 2 == 0).collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+pub fn par_iter_collect_cows() {
+    use std::borrow::Cow;
+
+    let s = "Fearless Concurrency with Rust";
+
+    // Collects `i32` into a `Vec`
+    let a: Cow<[i32]> = (0..1024).collect();
+    let b: Cow<[i32]> = a.par_iter().cloned().collect();
+    assert_eq!(a, b);
+
+    // Collects `char` into a `String`
+    let a: Cow<str> = s.chars().collect();
+    let b: Cow<str> = s.par_chars().collect();
+    assert_eq!(a, b);
+
+    // Collects `str` into a `String`
+    let a: Cow<str> = s.split_whitespace().collect();
+    let b: Cow<str> = s.par_split_whitespace().collect();
+    assert_eq!(a, b);
+
+    // Collects `String` into a `String`
+    let a: Cow<str> = s.split_whitespace().map(|s| s.to_owned()).collect();
+    let b: Cow<str> = s.par_split_whitespace().map(|s| s.to_owned()).collect();
+    assert_eq!(a, b);
+}
+
+#[test]
+pub fn par_iter_unindexed_flat_map() {
+    let b: Vec<i64> = (0_i64..1024).into_par_iter().flat_map(|i| Some(i)).collect();
+    let c: Vec<i64> = (0_i64..1024).flat_map(|i| Some(i)).collect();
+    assert_eq!(b, c);
+}
+
+#[test]
+fn min_max() {
+    let mut rng = XorShiftRng::from_seed([14159, 26535, 89793, 23846]);
+    let a: Vec<i32> = rng.gen_iter().take(1024).collect();
+    for i in 0..a.len() + 1 {
+        let slice = &a[..i];
+        assert_eq!(slice.par_iter().min(), slice.iter().min());
+        assert_eq!(slice.par_iter().max(), slice.iter().max());
+    }
+}
+
+#[test]
+fn min_max_by() {
+    let mut rng = XorShiftRng::from_seed([14159, 26535, 89793, 23846]);
+    // Make sure there are duplicate keys, for testing sort stability
+    let r: Vec<i32> = rng.gen_iter().take(512).collect();
+    let a: Vec<(i32, u16)> = r.iter()
+        .chain(&r)
+        .cloned()
+        .zip(0..)
+        .collect();
+    for i in 0..a.len() + 1 {
+        let slice = &a[..i];
+        assert_eq!(slice.par_iter().min_by(|x, y| x.0.cmp(&y.0)),
+                   slice.iter().min_by(|x, y| x.0.cmp(&y.0)));
+        assert_eq!(slice.par_iter().max_by(|x, y| x.0.cmp(&y.0)),
+                   slice.iter().max_by(|x, y| x.0.cmp(&y.0)));
+    }
+}
+
+#[test]
+fn min_max_by_key() {
+    let mut rng = XorShiftRng::from_seed([14159, 26535, 89793, 23846]);
+    // Make sure there are duplicate keys, for testing sort stability
+    let r: Vec<i32> = rng.gen_iter().take(512).collect();
+    let a: Vec<(i32, u16)> = r.iter()
+        .chain(&r)
+        .cloned()
+        .zip(0..)
+        .collect();
+    for i in 0..a.len() + 1 {
+        let slice = &a[..i];
+        assert_eq!(slice.par_iter().min_by_key(|x| x.0),
+                   slice.iter().min_by_key(|x| x.0));
+        assert_eq!(slice.par_iter().max_by_key(|x| x.0),
+                   slice.iter().max_by_key(|x| x.0));
+    }
+}
+
+#[test]
+fn check_rev() {
+    let a: Vec<usize> = (0..1024).rev().collect();
+    let b: Vec<usize> = (0..1024).collect();
+
+    assert!(a.par_iter()
+                .rev()
+                .zip(b)
+                .all(|(&a, b)| a == b));
+}
+
+#[test]
+fn scope_mix() {
+    let counter_p = &AtomicUsize::new(0);
+    scope(|s| {
+        s.spawn(move |s| { divide_and_conquer(s, counter_p, 1024); });
+        s.spawn(move |_| {
+                    let a: Vec<i32> = (0..1024).collect();
+                    let r1 = a.par_iter().map(|&i| i + 1).reduce_with(|i, j| i + j);
+                    let r2 = a.iter().map(|&i| i + 1).fold(0, |a, b| a + b);
+                    assert_eq!(r1.unwrap(), r2);
+                });
+    });
+}
+
+fn divide_and_conquer<'scope>(scope: &Scope<'scope>, counter: &'scope AtomicUsize, size: usize) {
+    if size > 1 {
+        scope.spawn(move |scope| divide_and_conquer(scope, counter, size / 2));
+        scope.spawn(move |scope| divide_and_conquer(scope, counter, size / 2));
+    } else {
+        // count the leaves
+        counter.fetch_add(1, Ordering::SeqCst);
+    }
+}
+
+#[test]
+fn check_split() {
+    use std::ops::Range;
+
+    let a = (0..1024).into_par_iter();
+
+    let b = split(0..1024, |Range { start, end }| {
+        let mid = (end - start) / 2;
+        if mid > start {
+            (start..mid, Some(mid..end))
+        } else {
+            (start..end, None)
+        }
+    })
+            .flat_map(|range| range);
+
+    assert_eq!(a.collect::<Vec<_>>(), b.collect::<Vec<_>>());
+}
+
+#[test]
+fn check_lengths() {
+    fn check(min: usize, max: usize) {
+        let range = 0..1024 * 1024;
+
+        // Check against normalized values.
+        let min_check = cmp::min(cmp::max(min, 1), range.len());
+        let max_check = cmp::max(max, min_check.saturating_add(min_check - 1));
+
+        assert!(range.into_par_iter()
+                    .with_min_len(min)
+                    .with_max_len(max)
+                    .fold(|| 0, |count, _| count + 1)
+                    .all(|c| c >= min_check && c <= max_check),
+                "check_lengths failed {:?} -> {:?} ",
+                (min, max),
+                (min_check, max_check));
+    }
+
+    let lengths = [0, 1, 10, 100, 1000, 10000, 100000, 1000000, usize::MAX];
+    for &min in &lengths {
+        for &max in &lengths {
+            check(min, max);
+        }
+    }
+}
+
+#[test]
+fn check_map_with() {
+    let (sender, receiver) = mpsc::channel();
+    let a: HashSet<_> = (0..1024).collect();
+
+    a.par_iter()
+        .cloned()
+        .map_with(sender, |s, i| s.send(i).unwrap())
+        .count();
+
+    let b: HashSet<_> = receiver.iter().collect();
+    assert_eq!(a, b);
+}
+
+#[test]
+fn check_fold_with() {
+    let (sender, receiver) = mpsc::channel();
+    let a: HashSet<_> = (0..1024).collect();
+
+    a.par_iter()
+        .cloned()
+        .fold_with(sender, |s, i| {
+            s.send(i).unwrap();
+            s
+        })
+        .count();
+
+    let b: HashSet<_> = receiver.iter().collect();
+    assert_eq!(a, b);
+}
+
+#[test]
+fn check_for_each_with() {
+    let (sender, receiver) = mpsc::channel();
+    let a: HashSet<_> = (0..1024).collect();
+
+    a.par_iter()
+        .cloned()
+        .for_each_with(sender, |s, i| s.send(i).unwrap());
+
+    let b: HashSet<_> = receiver.iter().collect();
+    assert_eq!(a, b);
+}
+
+#[test]
+fn check_extend_items() {
+    fn check<C>()
+        where C: Default + Eq + Debug
+               + Extend<i32> + for<'a> Extend<&'a i32>
+               + ParallelExtend<i32> + for<'a> ParallelExtend<&'a i32>
+    {
+        let mut serial = C::default();
+        let mut parallel = C::default();
+
+        // extend with references
+        let v: Vec<_> = (0..128).collect();
+        serial.extend(&v);
+        parallel.par_extend(&v);
+        assert_eq!(serial, parallel);
+
+        // extend with values
+        serial.extend(-128..0);
+        parallel.par_extend(-128..0);
+        assert_eq!(serial, parallel);
+    }
+
+    check::<BTreeSet<_>>();
+    check::<HashSet<_>>();
+    check::<LinkedList<_>>();
+    check::<Vec<_>>();
+    check::<VecDeque<_>>();
+}
+
+#[test]
+fn check_extend_heap() {
+    let mut serial: BinaryHeap<_> = Default::default();
+    let mut parallel: BinaryHeap<_> = Default::default();
+
+    // extend with references
+    let v: Vec<_> = (0..128).collect();
+    serial.extend(&v);
+    parallel.par_extend(&v);
+    assert_eq!(serial.clone().into_sorted_vec(), parallel.clone().into_sorted_vec());
+
+    // extend with values
+    serial.extend(-128..0);
+    parallel.par_extend(-128..0);
+    assert_eq!(serial.into_sorted_vec(), parallel.into_sorted_vec());
+}
+
+#[test]
+fn check_extend_pairs() {
+    fn check<C>()
+        where C: Default + Eq + Debug
+               + Extend<(usize, i32)> + for<'a> Extend<(&'a usize, &'a i32)>
+               + ParallelExtend<(usize, i32)> + for<'a> ParallelExtend<(&'a usize, &'a i32)>
+    {
+        let mut serial = C::default();
+        let mut parallel = C::default();
+
+        // extend with references
+        let m: HashMap<_, _> = (0..128).enumerate().collect();
+        serial.extend(&m);
+        parallel.par_extend(&m);
+        assert_eq!(serial, parallel);
+
+        // extend with values
+        let v: Vec<(_, _)> = (-128..0).enumerate().collect();
+        serial.extend(v.clone());
+        parallel.par_extend(v);
+        assert_eq!(serial, parallel);
+    }
+
+    check::<BTreeMap<usize, i32>>();
+    check::<HashMap<usize, i32>>();
+}
+
+#[test]
+fn check_unzip_into() {
+    let mut a = vec![];
+    let mut b = vec![];
+    (0..1024)
+        .into_par_iter()
+        .map(|i| i * i)
+        .enumerate()
+        .unzip_into(&mut a, &mut b);
+
+    let (c, d): (Vec<_>, Vec<_>) = (0..1024).map(|i| i * i).enumerate().unzip();
+    assert_eq!(a, c);
+    assert_eq!(b, d);
+}
+
+#[test]
+fn check_unzip() {
+    // indexed, unindexed
+    let (a, b): (Vec<_>, HashSet<_>) = (0..1024)
+        .into_par_iter()
+        .map(|i| i * i)
+        .enumerate()
+        .unzip();
+    let (c, d): (Vec<_>, HashSet<_>) = (0..1024).map(|i| i * i).enumerate().unzip();
+    assert_eq!(a, c);
+    assert_eq!(b, d);
+
+    // unindexed, indexed
+    let (a, b): (HashSet<_>, Vec<_>) = (0..1024)
+        .into_par_iter()
+        .map(|i| i * i)
+        .enumerate()
+        .unzip();
+    let (c, d): (HashSet<_>, Vec<_>) = (0..1024).map(|i| i * i).enumerate().unzip();
+    assert_eq!(a, c);
+    assert_eq!(b, d);
+
+    // indexed, indexed
+    let (a, b): (Vec<_>, Vec<_>) = (0..1024)
+        .into_par_iter()
+        .map(|i| i * i)
+        .enumerate()
+        .unzip();
+    let (c, d): (Vec<_>, Vec<_>) = (0..1024).map(|i| i * i).enumerate().unzip();
+    assert_eq!(a, c);
+    assert_eq!(b, d);
+
+    // unindexed producer
+    let (a, b): (Vec<_>, Vec<_>) = (0..1024)
+        .into_par_iter()
+        .filter_map(|i| Some((i, i * i)))
+        .unzip();
+    let (c, d): (Vec<_>, Vec<_>) = (0..1024).filter_map(|i| Some((i, i * i))).unzip();
+    assert_eq!(a, c);
+    assert_eq!(b, d);
+}
+
+#[test]
+fn check_partition() {
+    let (a, b): (Vec<_>, Vec<_>) = (0..1024).into_par_iter().partition(|&i| i % 3 == 0);
+    let (c, d): (Vec<_>, Vec<_>) = (0..1024).partition(|&i| i % 3 == 0);
+    assert_eq!(a, c);
+    assert_eq!(b, d);
+}
+
+#[test]
+fn check_partition_map() {
+    let input = "a b c 1 2 3 x y z";
+    let (a, b): (Vec<_>, String) = input
+        .par_split_whitespace()
+        .partition_map(|s| match s.parse::<i32>() {
+                           Ok(n) => Either::Left(n),
+                           Err(_) => Either::Right(s),
+                       });
+    assert_eq!(a, vec![1, 2, 3]);
+    assert_eq!(b, "abcxyz");
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/unzip.rs
@@ -0,0 +1,388 @@
+use super::internal::*;
+use super::*;
+
+/// This trait abstracts the different ways we can "unzip" one parallel
+/// iterator into two distinct consumers, which we can handle almost
+/// identically apart from how to process the individual items.
+trait UnzipOp<T>: Sync + Send {
+    /// The type of item expected by the left consumer.
+    type Left: Send;
+
+    /// The type of item expected by the right consumer.
+    type Right: Send;
+
+    /// Consume one item and feed it to one or both of the underlying folders.
+    fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
+        where FA: Folder<Self::Left>,
+              FB: Folder<Self::Right>;
+
+    /// Reports whether this op may support indexed consumers.
+    /// - e.g. true for `unzip` where the item count passed through directly.
+    /// - e.g. false for `partition` where the sorting is not yet known.
+    fn indexable() -> bool {
+        false
+    }
+}
+
+/// Run an unzip-like operation into `ParallelExtend` collections.
+fn execute<I, OP, FromA, FromB>(pi: I, op: OP) -> (FromA, FromB)
+    where I: ParallelIterator,
+          OP: UnzipOp<I::Item>,
+          FromA: Default + Send + ParallelExtend<OP::Left>,
+          FromB: Default + Send + ParallelExtend<OP::Right>
+{
+    let mut a = FromA::default();
+    let mut b = FromB::default();
+    {
+        // We have no idea what the consumers will look like for these
+        // collections' `par_extend`, but we can intercept them in our own
+        // `drive_unindexed`.  Start with the left side, type `A`:
+        let iter = UnzipA {
+            base: pi,
+            op: op,
+            b: &mut b,
+        };
+        a.par_extend(iter);
+    }
+    (a, b)
+}
+
+
+/// Unzips the items of a parallel iterator into a pair of arbitrary
+/// `ParallelExtend` containers.
+///
+/// This is not directly public, but called by `ParallelIterator::unzip`.
+pub fn unzip<I, A, B, FromA, FromB>(pi: I) -> (FromA, FromB)
+    where I: ParallelIterator<Item = (A, B)>,
+          FromA: Default + Send + ParallelExtend<A>,
+          FromB: Default + Send + ParallelExtend<B>,
+          A: Send,
+          B: Send
+{
+    execute(pi, Unzip)
+}
+
+/// Unzip an `IndexedParallelIterator` into two arbitrary `Consumer`s.
+///
+/// This is not directly public, but called by `super::collect::unzip_into`.
+pub fn unzip_indexed<I, A, B, CA, CB>(pi: I, left: CA, right: CB) -> (CA::Result, CB::Result)
+    where I: IndexedParallelIterator<Item = (A, B)>,
+          CA: Consumer<A>,
+          CB: Consumer<B>,
+          A: Send,
+          B: Send
+{
+    let consumer = UnzipConsumer {
+        op: &Unzip,
+        left: left,
+        right: right,
+    };
+    pi.drive(consumer)
+}
+
+/// An `UnzipOp` that splits a tuple directly into the two consumers.
+struct Unzip;
+
+impl<A: Send, B: Send> UnzipOp<(A, B)> for Unzip {
+    type Left = A;
+    type Right = B;
+
+    fn consume<FA, FB>(&self, item: (A, B), left: FA, right: FB) -> (FA, FB)
+        where FA: Folder<A>,
+              FB: Folder<B>
+    {
+        (left.consume(item.0), right.consume(item.1))
+    }
+
+    fn indexable() -> bool {
+        true
+    }
+}
+
+
+/// Partitions the items of a parallel iterator into a pair of arbitrary
+/// `ParallelExtend` containers.
+///
+/// This is not directly public, but called by `ParallelIterator::partition`.
+pub fn partition<I, A, B, P>(pi: I, predicate: P) -> (A, B)
+    where I: ParallelIterator,
+          A: Default + Send + ParallelExtend<I::Item>,
+          B: Default + Send + ParallelExtend<I::Item>,
+          P: Fn(&I::Item) -> bool + Sync + Send
+{
+    execute(pi, Partition { predicate: predicate })
+}
+
+/// An `UnzipOp` that routes items depending on a predicate function.
+struct Partition<P> {
+    predicate: P,
+}
+
+impl<P, T> UnzipOp<T> for Partition<P>
+    where P: Fn(&T) -> bool + Sync + Send,
+          T: Send
+{
+    type Left = T;
+    type Right = T;
+
+    fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
+        where FA: Folder<T>,
+              FB: Folder<T>
+    {
+        if (self.predicate)(&item) {
+            (left.consume(item), right)
+        } else {
+            (left, right.consume(item))
+        }
+    }
+}
+
+
+/// Partitions and maps the items of a parallel iterator into a pair of
+/// arbitrary `ParallelExtend` containers.
+///
+/// This is not directly public, but called by `ParallelIterator::partition_map`.
+pub fn partition_map<I, A, B, P, L, R>(pi: I, predicate: P) -> (A, B)
+    where I: ParallelIterator,
+          A: Default + Send + ParallelExtend<L>,
+          B: Default + Send + ParallelExtend<R>,
+          P: Fn(I::Item) -> Either<L, R> + Sync + Send,
+          L: Send,
+          R: Send
+{
+    execute(pi, PartitionMap { predicate: predicate })
+}
+
+/// An `UnzipOp` that routes items depending on how they are mapped `Either`.
+struct PartitionMap<P> {
+    predicate: P,
+}
+
+impl<P, L, R, T> UnzipOp<T> for PartitionMap<P>
+    where P: Fn(T) -> Either<L, R> + Sync + Send,
+          L: Send,
+          R: Send
+{
+    type Left = L;
+    type Right = R;
+
+    fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
+        where FA: Folder<L>,
+              FB: Folder<R>
+    {
+        match (self.predicate)(item) {
+            Either::Left(item) => (left.consume(item), right),
+            Either::Right(item) => (left, right.consume(item)),
+        }
+    }
+}
+
+
+/// A fake iterator to intercept the `Consumer` for type `A`.
+struct UnzipA<'b, I, OP, FromB: 'b> {
+    base: I,
+    op: OP,
+    b: &'b mut FromB,
+}
+
+impl<'b, I, OP, FromB> ParallelIterator for UnzipA<'b, I, OP, FromB>
+    where I: ParallelIterator,
+          OP: UnzipOp<I::Item>,
+          FromB: Default + Send + ParallelExtend<OP::Right>
+{
+    type Item = OP::Left;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let mut result = None;
+        {
+            // Now it's time to find the consumer for type `B`
+            let iter = UnzipB {
+                base: self.base,
+                op: self.op,
+                left_consumer: consumer,
+                left_result: &mut result,
+            };
+            self.b.par_extend(iter);
+        }
+        // NB: If for some reason `b.par_extend` doesn't actually drive the
+        // iterator, then we won't have a result for the left side to return
+        // at all.  We can't fake an arbitrary consumer's result, so panic.
+        result.expect("unzip consumers didn't execute!")
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        if OP::indexable() {
+            self.base.opt_len()
+        } else {
+            None
+        }
+    }
+}
+
+/// A fake iterator to intercept the `Consumer` for type `B`.
+struct UnzipB<'r, I, OP, CA>
+    where I: ParallelIterator,
+          OP: UnzipOp<I::Item>,
+          CA: UnindexedConsumer<OP::Left>,
+          CA::Result: 'r
+{
+    base: I,
+    op: OP,
+    left_consumer: CA,
+    left_result: &'r mut Option<CA::Result>,
+}
+
+impl<'r, I, OP, CA> ParallelIterator for UnzipB<'r, I, OP, CA>
+    where I: ParallelIterator,
+          OP: UnzipOp<I::Item>,
+          CA: UnindexedConsumer<OP::Left>
+{
+    type Item = OP::Right;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        // Now that we have two consumers, we can unzip the real iterator.
+        let consumer = UnzipConsumer {
+            op: &self.op,
+            left: self.left_consumer,
+            right: consumer,
+        };
+
+        let result = self.base.drive_unindexed(consumer);
+        *self.left_result = Some(result.0);
+        result.1
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        if OP::indexable() {
+            self.base.opt_len()
+        } else {
+            None
+        }
+    }
+}
+
+
+/// `Consumer` that unzips into two other `Consumer`s
+struct UnzipConsumer<'a, OP: 'a, CA, CB> {
+    op: &'a OP,
+    left: CA,
+    right: CB,
+}
+
+impl<'a, T, OP, CA, CB> Consumer<T> for UnzipConsumer<'a, OP, CA, CB>
+    where OP: UnzipOp<T>,
+          CA: Consumer<OP::Left>,
+          CB: Consumer<OP::Right>
+{
+    type Folder = UnzipFolder<'a, OP, CA::Folder, CB::Folder>;
+    type Reducer = UnzipReducer<CA::Reducer, CB::Reducer>;
+    type Result = (CA::Result, CB::Result);
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left1, left2, left_reducer) = self.left.split_at(index);
+        let (right1, right2, right_reducer) = self.right.split_at(index);
+
+        (UnzipConsumer {
+             op: self.op,
+             left: left1,
+             right: right1,
+         },
+         UnzipConsumer {
+             op: self.op,
+             left: left2,
+             right: right2,
+         },
+         UnzipReducer {
+             left: left_reducer,
+             right: right_reducer,
+         })
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        UnzipFolder {
+            op: self.op,
+            left: self.left.into_folder(),
+            right: self.right.into_folder(),
+        }
+    }
+
+    fn full(&self) -> bool {
+        // don't stop until everyone is full
+        self.left.full() && self.right.full()
+    }
+}
+
+impl<'a, T, OP, CA, CB> UnindexedConsumer<T> for UnzipConsumer<'a, OP, CA, CB>
+    where OP: UnzipOp<T>,
+          CA: UnindexedConsumer<OP::Left>,
+          CB: UnindexedConsumer<OP::Right>
+{
+    fn split_off_left(&self) -> Self {
+        UnzipConsumer {
+            op: self.op,
+            left: self.left.split_off_left(),
+            right: self.right.split_off_left(),
+        }
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        UnzipReducer {
+            left: self.left.to_reducer(),
+            right: self.right.to_reducer(),
+        }
+    }
+}
+
+
+/// `Folder` that unzips into two other `Folder`s
+struct UnzipFolder<'a, OP: 'a, FA, FB> {
+    op: &'a OP,
+    left: FA,
+    right: FB,
+}
+
+impl<'a, T, OP, FA, FB> Folder<T> for UnzipFolder<'a, OP, FA, FB>
+    where OP: UnzipOp<T>,
+          FA: Folder<OP::Left>,
+          FB: Folder<OP::Right>
+{
+    type Result = (FA::Result, FB::Result);
+
+    fn consume(self, item: T) -> Self {
+        let (left, right) = self.op.consume(item, self.left, self.right);
+        UnzipFolder {
+            op: self.op,
+            left: left,
+            right: right,
+        }
+    }
+
+    fn complete(self) -> Self::Result {
+        (self.left.complete(), self.right.complete())
+    }
+
+    fn full(&self) -> bool {
+        // don't stop until everyone is full
+        self.left.full() && self.right.full()
+    }
+}
+
+
+/// `Reducer` that unzips into two other `Reducer`s
+struct UnzipReducer<RA, RB> {
+    left: RA,
+    right: RB,
+}
+
+impl<A, B, RA, RB> Reducer<(A, B)> for UnzipReducer<RA, RB>
+    where RA: Reducer<A>,
+          RB: Reducer<B>
+{
+    fn reduce(self, left: (A, B), right: (A, B)) -> (A, B) {
+        (self.left.reduce(left.0, right.0), self.right.reduce(left.1, right.1))
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/while_some.rs
@@ -0,0 +1,118 @@
+use std::sync::atomic::{AtomicBool, Ordering};
+use super::internal::*;
+use super::*;
+
+/// `WhileSome` is an iterator that yields the `Some` elements of an iterator,
+/// halting as soon as any `None` is produced.
+///
+/// This struct is created by the [`while_some()`] method on [`ParallelIterator`]
+///
+/// [`while_some()`]: trait.ParallelIterator.html#method.while_some
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct WhileSome<I: ParallelIterator> {
+    base: I,
+}
+
+/// Create a new `WhileSome` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I>(base: I) -> WhileSome<I>
+    where I: ParallelIterator
+{
+    WhileSome { base: base }
+}
+
+impl<I, T> ParallelIterator for WhileSome<I>
+    where I: ParallelIterator<Item = Option<T>>,
+          T: Send
+{
+    type Item = T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let full = AtomicBool::new(false);
+        let consumer1 = WhileSomeConsumer {
+            base: consumer,
+            full: &full,
+        };
+        self.base.drive_unindexed(consumer1)
+    }
+}
+
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct WhileSomeConsumer<'f, C> {
+    base: C,
+    full: &'f AtomicBool,
+}
+
+impl<'f, T, C> Consumer<Option<T>> for WhileSomeConsumer<'f, C>
+    where C: Consumer<T>,
+          T: Send
+{
+    type Folder = WhileSomeFolder<'f, C::Folder>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (WhileSomeConsumer { base: left, ..self },
+         WhileSomeConsumer { base: right, ..self },
+         reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        WhileSomeFolder {
+            base: self.base.into_folder(),
+            full: self.full,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.full.load(Ordering::Relaxed) || self.base.full()
+    }
+}
+
+impl<'f, T, C> UnindexedConsumer<Option<T>> for WhileSomeConsumer<'f, C>
+    where C: UnindexedConsumer<T>,
+          T: Send
+{
+    fn split_off_left(&self) -> Self {
+        WhileSomeConsumer { base: self.base.split_off_left(), ..*self }
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct WhileSomeFolder<'f, C> {
+    base: C,
+    full: &'f AtomicBool,
+}
+
+impl<'f, T, C> Folder<Option<T>> for WhileSomeFolder<'f, C>
+    where C: Folder<T>
+{
+    type Result = C::Result;
+
+    fn consume(mut self, item: Option<T>) -> Self {
+        match item {
+            Some(item) => self.base = self.base.consume(item),
+            None => self.full.store(true, Ordering::Relaxed),
+        }
+        self
+    }
+
+    fn complete(self) -> C::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.full.load(Ordering::Relaxed) || self.base.full()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/iter/zip.rs
@@ -0,0 +1,141 @@
+use super::internal::*;
+use super::*;
+use std::cmp;
+use std::iter;
+
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+pub struct Zip<A: IndexedParallelIterator, B: IndexedParallelIterator> {
+    a: A,
+    b: B,
+}
+
+/// Create a new `Zip` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<A, B>(a: A, b: B) -> Zip<A, B>
+    where A: IndexedParallelIterator,
+          B: IndexedParallelIterator
+{
+    Zip { a: a, b: b }
+}
+
+impl<A, B> ParallelIterator for Zip<A, B>
+    where A: IndexedParallelIterator,
+          B: IndexedParallelIterator
+{
+    type Item = (A::Item, B::Item);
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<A, B> IndexedParallelIterator for Zip<A, B>
+    where A: IndexedParallelIterator,
+          B: IndexedParallelIterator
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        cmp::min(self.a.len(), self.b.len())
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.a.with_producer(CallbackA {
+                                        callback: callback,
+                                        b: self.b,
+                                    });
+
+        struct CallbackA<CB, B> {
+            callback: CB,
+            b: B,
+        }
+
+        impl<CB, A_ITEM, B> ProducerCallback<A_ITEM> for CallbackA<CB, B>
+            where B: IndexedParallelIterator,
+                  CB: ProducerCallback<(A_ITEM, B::Item)>
+        {
+            type Output = CB::Output;
+
+            fn callback<A>(self, a_producer: A) -> Self::Output
+                where A: Producer<Item = A_ITEM>
+            {
+                return self.b.with_producer(CallbackB {
+                                                a_producer: a_producer,
+                                                callback: self.callback,
+                                            });
+            }
+        }
+
+        struct CallbackB<CB, A> {
+            a_producer: A,
+            callback: CB,
+        }
+
+        impl<CB, A, B_ITEM> ProducerCallback<B_ITEM> for CallbackB<CB, A>
+            where A: Producer,
+                  CB: ProducerCallback<(A::Item, B_ITEM)>
+        {
+            type Output = CB::Output;
+
+            fn callback<B>(self, b_producer: B) -> Self::Output
+                where B: Producer<Item = B_ITEM>
+            {
+                self.callback.callback(ZipProducer {
+                                           a: self.a_producer,
+                                           b: b_producer,
+                                       })
+            }
+        }
+
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+struct ZipProducer<A: Producer, B: Producer> {
+    a: A,
+    b: B,
+}
+
+impl<A: Producer, B: Producer> Producer for ZipProducer<A, B> {
+    type Item = (A::Item, B::Item);
+    type IntoIter = iter::Zip<A::IntoIter, B::IntoIter>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.a.into_iter().zip(self.b.into_iter())
+    }
+
+    fn min_len(&self) -> usize {
+        cmp::max(self.a.min_len(), self.b.min_len())
+    }
+
+    fn max_len(&self) -> usize {
+        cmp::min(self.a.max_len(), self.b.max_len())
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (a_left, a_right) = self.a.split_at(index);
+        let (b_left, b_right) = self.b.split_at(index);
+        (ZipProducer {
+             a: a_left,
+             b: b_left,
+         },
+         ZipProducer {
+             a: a_right,
+             b: b_right,
+         })
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/lib.rs
@@ -0,0 +1,47 @@
+#![allow(non_camel_case_types)] // I prefer to use ALL_CAPS for type parameters
+#![cfg_attr(test, feature(conservative_impl_trait))]
+#![cfg_attr(test, feature(i128_type))]
+
+// If you're not compiling the unstable code, it often happens that
+// there is stuff that is considered "dead code" and so forth. So
+// disable warnings in that scenario.
+#![cfg_attr(not(feature = "unstable"), allow(warnings))]
+
+extern crate rayon_core;
+
+#[cfg(test)]
+extern crate rand;
+
+#[macro_use]
+mod delegate;
+
+#[macro_use]
+mod private;
+
+mod split_producer;
+
+pub mod collections;
+pub mod iter;
+pub mod option;
+pub mod prelude;
+pub mod range;
+pub mod result;
+pub mod slice;
+pub mod str;
+pub mod vec;
+
+mod test;
+
+pub use iter::split;
+
+pub use rayon_core::current_num_threads;
+pub use rayon_core::Configuration;
+pub use rayon_core::initialize;
+pub use rayon_core::ThreadPool;
+pub use rayon_core::join;
+pub use rayon_core::{scope, Scope};
+pub use rayon_core::spawn;
+#[cfg(rayon_unstable)]
+pub use rayon_core::spawn_future;
+#[cfg(rayon_unstable)]
+pub use rayon_core::RayonFuture;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/option.rs
@@ -0,0 +1,143 @@
+//! This module contains the parallel iterator types for options
+//! (`Option<T>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use iter::*;
+use iter::internal::*;
+use std;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+impl<T: Send> IntoParallelIterator for Option<T> {
+    type Item = T;
+    type Iter = IntoIter<T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IntoIter { opt: self }
+    }
+}
+
+impl<'a, T: Sync> IntoParallelIterator for &'a Option<T> {
+    type Item = &'a T;
+    type Iter = Iter<'a, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        Iter { inner: self.as_ref().into_par_iter() }
+    }
+}
+
+impl<'a, T: Send> IntoParallelIterator for &'a mut Option<T> {
+    type Item = &'a mut T;
+    type Iter = IterMut<'a, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IterMut { inner: self.as_mut().into_par_iter() }
+    }
+}
+
+
+/// Parallel iterator over an option
+pub struct IntoIter<T: Send> {
+    opt: Option<T>,
+}
+
+impl<T: Send> ParallelIterator for IntoIter<T> {
+    type Item = T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<T: Send> IndexedParallelIterator for IntoIter<T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        match self.opt {
+            Some(_) => 1,
+            None => 0,
+        }
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        callback.callback(OptionProducer { opt: self.opt })
+    }
+}
+
+
+delegate_indexed_iterator!{
+    #[doc = "Parallel iterator over an immutable reference to an option"]
+    Iter<'a, T> => IntoIter<&'a T>,
+    impl<'a, T: Sync + 'a>
+}
+
+
+delegate_indexed_iterator!{
+    #[doc = "Parallel iterator over a mutable reference to an option"]
+    IterMut<'a, T> => IntoIter<&'a mut T>,
+    impl<'a, T: Send + 'a>
+}
+
+
+/// Private producer for an option
+struct OptionProducer<T: Send> {
+    opt: Option<T>,
+}
+
+impl<T: Send> Producer for OptionProducer<T> {
+    type Item = T;
+    type IntoIter = std::option::IntoIter<T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.opt.into_iter()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let none = OptionProducer { opt: None };
+        if index == 0 {
+            (none, self)
+        } else {
+            (self, none)
+        }
+    }
+}
+
+
+/// Collect an arbitrary `Option`-wrapped collection.
+///
+/// If any item is `None`, then all previous items collected are discarded,
+/// and it returns only `None`.
+impl<'a, C, T> FromParallelIterator<Option<T>> for Option<C>
+    where C: FromParallelIterator<T>,
+          T: Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = Option<T>>
+    {
+        let found_none = AtomicBool::new(false);
+        let collection = par_iter
+            .into_par_iter()
+            .inspect(|item| if item.is_none() {
+                         found_none.store(true, Ordering::Relaxed);
+                     })
+            .while_some()
+            .collect();
+
+        if found_none.load(Ordering::Relaxed) {
+            None
+        } else {
+            Some(collection)
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/prelude.rs
@@ -0,0 +1,14 @@
+//! The rayon prelude imports the various `ParallelIterator` traits.
+//! The intention is that one can include `use rayon::prelude::*` and
+//! have easy access to the various traits and methods you will need.
+
+pub use iter::FromParallelIterator;
+pub use iter::IntoParallelIterator;
+pub use iter::IntoParallelRefIterator;
+pub use iter::IntoParallelRefMutIterator;
+pub use iter::IndexedParallelIterator;
+pub use iter::ParallelExtend;
+pub use iter::ParallelIterator;
+pub use slice::ParallelSlice;
+pub use slice::ParallelSliceMut;
+pub use str::ParallelString;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/private.rs
@@ -0,0 +1,26 @@
+//! The public parts of this private module are used to create traits
+//! that cannot be implemented outside of our own crate.  This way we
+//! can feel free to extend those traits without worrying about it
+//! being a breaking change for other implementations.
+
+
+/// If this type is pub but not publicly reachable, third parties
+/// can't name it and can't implement traits using it.
+pub struct PrivateMarker;
+
+macro_rules! private_decl {
+    () => {
+        /// This trait is private; this method exists to make it
+        /// impossible to implement outside the crate.
+        #[doc(hidden)]
+        fn __rayon_private__(&self) -> ::private::PrivateMarker;
+    }
+}
+
+macro_rules! private_impl {
+    () => {
+        fn __rayon_private__(&self) -> ::private::PrivateMarker {
+            ::private::PrivateMarker
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/range.rs
@@ -0,0 +1,166 @@
+//! This module contains the parallel iterator types for ranges
+//! (`Range<T>`); this is the type for values created by a `a..b`
+//! expression. You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use iter::*;
+use iter::internal::*;
+use std::ops::Range;
+
+/// Parallel iterator over a range
+pub struct Iter<T> {
+    range: Range<T>,
+}
+
+impl<T> IntoParallelIterator for Range<T>
+    where Iter<T>: ParallelIterator
+{
+    type Item = <Iter<T> as ParallelIterator>::Item;
+    type Iter = Iter<T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        Iter { range: self }
+    }
+}
+
+struct IterProducer<T> {
+    range: Range<T>,
+}
+
+impl<T> IntoIterator for IterProducer<T>
+    where Range<T>: Iterator
+{
+    type Item = <Range<T> as Iterator>::Item;
+    type IntoIter = Range<T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.range
+    }
+}
+
+macro_rules! indexed_range_impl {
+    ( $t:ty ) => {
+        impl ParallelIterator for Iter<$t> {
+            type Item = $t;
+
+            fn drive_unindexed<C>(self, consumer: C) -> C::Result
+                where C: UnindexedConsumer<Self::Item>
+            {
+                bridge(self, consumer)
+            }
+
+            fn opt_len(&mut self) -> Option<usize> {
+                Some(self.len())
+            }
+        }
+
+        impl IndexedParallelIterator for Iter<$t> {
+            fn drive<C>(self, consumer: C) -> C::Result
+                where C: Consumer<Self::Item>
+            {
+                bridge(self, consumer)
+            }
+
+            fn len(&mut self) -> usize {
+                self.range.len()
+            }
+
+            fn with_producer<CB>(self, callback: CB) -> CB::Output
+                where CB: ProducerCallback<Self::Item>
+            {
+                callback.callback(IterProducer { range: self.range })
+            }
+        }
+
+        impl Producer for IterProducer<$t> {
+
+            type Item = <Range<$t> as Iterator>::Item;
+            type IntoIter = Range<$t>;
+            fn into_iter(self) -> Self::IntoIter {
+                self.range
+            }
+
+            fn split_at(self, index: usize) -> (Self, Self) {
+                assert!(index <= self.range.len());
+                // For signed $t, the length and requested index could be greater than $t::MAX, and
+                // then `index as $t` could wrap to negative, so wrapping_add is necessary.
+                let mid = self.range.start.wrapping_add(index as $t);
+                let left = self.range.start .. mid;
+                let right = mid .. self.range.end;
+                (IterProducer { range: left }, IterProducer { range: right })
+            }
+        }
+    }
+}
+
+macro_rules! unindexed_range_impl {
+    ( $t:ty ) => {
+        impl IterProducer<$t> {
+            fn len(&self) -> u64 {
+                let Range { start, end } = self.range;
+                if end > start {
+                    end.wrapping_sub(start) as u64
+                } else {
+                    0
+                }
+            }
+        }
+
+        impl ParallelIterator for Iter<$t> {
+            type Item = $t;
+
+            fn drive_unindexed<C>(self, consumer: C) -> C::Result
+                where C: UnindexedConsumer<Self::Item>
+            {
+                bridge_unindexed(IterProducer { range: self.range }, consumer)
+            }
+        }
+
+        impl UnindexedProducer for IterProducer<$t> {
+            type Item = $t;
+
+            fn split(mut self) -> (Self, Option<Self>) {
+                let index = self.len() / 2;
+                if index > 0 {
+                    let mid = self.range.start.wrapping_add(index as $t);
+                    let right = mid .. self.range.end;
+                    self.range.end = mid;
+                    (self, Some(IterProducer { range: right }))
+                } else {
+                    (self, None)
+                }
+            }
+
+            fn fold_with<F>(self, folder: F) -> F
+                where F: Folder<Self::Item>
+            {
+                folder.consume_iter(self)
+            }
+        }
+    }
+}
+
+// all Range<T> with ExactSizeIterator
+indexed_range_impl!{u8}
+indexed_range_impl!{u16}
+indexed_range_impl!{u32}
+indexed_range_impl!{usize}
+indexed_range_impl!{i8}
+indexed_range_impl!{i16}
+indexed_range_impl!{i32}
+indexed_range_impl!{isize}
+
+// other Range<T> with just Iterator
+unindexed_range_impl!{u64}
+unindexed_range_impl!{i64}
+
+
+#[test]
+pub fn check_range_split_at_overflow() {
+    // Note, this split index overflows i8!
+    let producer = IterProducer { range: -100i8..100 };
+    let (left, right) = producer.split_at(150);
+    let r1: i32 = left.range.map(|i| i as i32).sum();
+    let r2: i32 = right.range.map(|i| i as i32).sum();
+    assert_eq!(r1 + r2, -100);
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/result.rs
@@ -0,0 +1,93 @@
+//! This module contains the parallel iterator types for results
+//! (`Result<T, E>`). You will rarely need to interact with it directly
+//! unless you have need to name one of the iterator types.
+
+use iter::*;
+use iter::internal::*;
+use std::sync::Mutex;
+
+use option;
+
+impl<T: Send, E> IntoParallelIterator for Result<T, E> {
+    type Item = T;
+    type Iter = IntoIter<T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IntoIter { inner: self.ok().into_par_iter() }
+    }
+}
+
+impl<'a, T: Sync, E> IntoParallelIterator for &'a Result<T, E> {
+    type Item = &'a T;
+    type Iter = Iter<'a, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        Iter { inner: self.as_ref().ok().into_par_iter() }
+    }
+}
+
+impl<'a, T: Send, E> IntoParallelIterator for &'a mut Result<T, E> {
+    type Item = &'a mut T;
+    type Iter = IterMut<'a, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IterMut { inner: self.as_mut().ok().into_par_iter() }
+    }
+}
+
+
+delegate_indexed_iterator!{
+    #[doc = "Parallel iterator over a result"]
+    IntoIter<T> => option::IntoIter<T>,
+    impl<T: Send>
+}
+
+
+delegate_indexed_iterator!{
+    #[doc = "Parallel iterator over an immutable reference to a result"]
+    Iter<'a, T> => option::IntoIter<&'a T>,
+    impl<'a, T: Sync + 'a>
+}
+
+
+delegate_indexed_iterator!{
+    #[doc = "Parallel iterator over a mutable reference to a result"]
+    IterMut<'a, T> => option::IntoIter<&'a mut T>,
+    impl<'a, T: Send + 'a>
+}
+
+
+/// Collect an arbitrary `Result`-wrapped collection.
+///
+/// If any item is `Err`, then all previous `Ok` items collected are
+/// discarded, and it returns that error.  If there are multiple errors, the
+/// one returned is not deterministic.
+impl<'a, C, T, E> FromParallelIterator<Result<T, E>> for Result<C, E>
+    where C: FromParallelIterator<T>,
+          T: Send,
+          E: Send
+{
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = Result<T, E>>
+    {
+        let saved_error = Mutex::new(None);
+        let collection = par_iter
+            .into_par_iter()
+            .map(|item| match item {
+                     Ok(item) => Some(item),
+                     Err(error) => {
+                         if let Ok(mut guard) = saved_error.lock() {
+                             *guard = Some(error);
+                         }
+                         None
+                     }
+                 })
+            .while_some()
+            .collect();
+
+        match saved_error.into_inner().unwrap() {
+            Some(error) => Err(error),
+            None => Ok(collection),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/slice/mergesort.rs
@@ -0,0 +1,754 @@
+//! Parallel merge sort.
+//!
+//! This implementation is copied verbatim from `std::slice::sort` and then parallelized.
+//! The only difference from the original is that the sequential `mergesort` returns
+//! `MergesortResult` and leaves descending arrays intact.
+
+use iter::*;
+use rayon_core;
+use slice::ParallelSliceMut;
+use std::mem::size_of;
+use std::mem;
+use std::ptr;
+use std::slice;
+
+unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
+    let old = *ptr;
+    *ptr = ptr.offset(1);
+    old
+}
+
+unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
+    *ptr = ptr.offset(-1);
+    *ptr
+}
+
+/// When dropped, copies from `src` into `dest` a sequence of length `len`.
+struct CopyOnDrop<T> {
+    src: *mut T,
+    dest: *mut T,
+    len: usize,
+}
+
+impl<T> Drop for CopyOnDrop<T> {
+    fn drop(&mut self) {
+        unsafe {
+            ptr::copy_nonoverlapping(self.src, self.dest, self.len);
+        }
+    }
+}
+
+/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
+///
+/// This is the integral subroutine of insertion sort.
+fn insert_head<T, F>(v: &mut [T], is_less: &F)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    if v.len() >= 2 && is_less(&v[1], &v[0]) {
+        unsafe {
+            // There are three ways to implement insertion here:
+            //
+            // 1. Swap adjacent elements until the first one gets to its final destination.
+            //    However, this way we copy data around more than is necessary. If elements are big
+            //    structures (costly to copy), this method will be slow.
+            //
+            // 2. Iterate until the right place for the first element is found. Then shift the
+            //    elements succeeding it to make room for it and finally place it into the
+            //    remaining hole. This is a good method.
+            //
+            // 3. Copy the first element into a temporary variable. Iterate until the right place
+            //    for it is found. As we go along, copy every traversed element into the slot
+            //    preceding it. Finally, copy data from the temporary variable into the remaining
+            //    hole. This method is very good. Benchmarks demonstrated slightly better
+            //    performance than with the 2nd method.
+            //
+            // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
+            let mut tmp = NoDrop { value: Some(ptr::read(&v[0])) };
+
+            // Intermediate state of the insertion process is always tracked by `hole`, which
+            // serves two purposes:
+            // 1. Protects integrity of `v` from panics in `is_less`.
+            // 2. Fills the remaining hole in `v` in the end.
+            //
+            // Panic safety:
+            //
+            // If `is_less` panics at any point during the process, `hole` will get dropped and
+            // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
+            // initially held exactly once.
+            let mut hole = InsertionHole {
+                src: tmp.value.as_mut().unwrap(),
+                dest: &mut v[1],
+            };
+            ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
+
+            for i in 2..v.len() {
+                if !is_less(&v[i], tmp.value.as_ref().unwrap()) {
+                    break;
+                }
+                ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
+                hole.dest = &mut v[i];
+            }
+            // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+        }
+    }
+
+    // Holds a value, but never drops it.
+    struct NoDrop<T> {
+        value: Option<T>,
+    }
+
+    impl<T> Drop for NoDrop<T> {
+        fn drop(&mut self) {
+            mem::forget(self.value.take());
+        }
+    }
+
+    // When dropped, copies from `src` into `dest`.
+    struct InsertionHole<T> {
+        src: *mut T,
+        dest: *mut T,
+    }
+
+    impl<T> Drop for InsertionHole<T> {
+        fn drop(&mut self) {
+            unsafe {
+                ptr::copy_nonoverlapping(self.src, self.dest, 1);
+            }
+        }
+    }
+}
+
+/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
+/// stores the result into `v[..]`.
+///
+/// # Safety
+///
+/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
+/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
+unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &F)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    let len = v.len();
+    let v = v.as_mut_ptr();
+    let v_mid = v.offset(mid as isize);
+    let v_end = v.offset(len as isize);
+
+    // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
+    // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
+    // copying the lesser (or greater) one into `v`.
+    //
+    // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
+    // consumed first, then we must copy whatever is left of the shorter run into the remaining
+    // hole in `v`.
+    //
+    // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
+    // 1. Protects integrity of `v` from panics in `is_less`.
+    // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
+    //
+    // Panic safety:
+    //
+    // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
+    // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
+    // object it initially held exactly once.
+    let mut hole;
+
+    if mid <= len - mid {
+        // The left run is shorter.
+        ptr::copy_nonoverlapping(v, buf, mid);
+        hole = MergeHole {
+            start: buf,
+            end: buf.offset(mid as isize),
+            dest: v,
+        };
+
+        // Initially, these pointers point to the beginnings of their arrays.
+        let left = &mut hole.start;
+        let mut right = v_mid;
+        let out = &mut hole.dest;
+
+        while *left < hole.end && right < v_end {
+            // Consume the lesser side.
+            // If equal, prefer the left run to maintain stability.
+            let to_copy = if is_less(&*right, &**left) {
+                get_and_increment(&mut right)
+            } else {
+                get_and_increment(left)
+            };
+            ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+        }
+    } else {
+        // The right run is shorter.
+        ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+        hole = MergeHole {
+            start: buf,
+            end: buf.offset((len - mid) as isize),
+            dest: v_mid,
+        };
+
+        // Initially, these pointers point past the ends of their arrays.
+        let left = &mut hole.dest;
+        let right = &mut hole.end;
+        let mut out = v_end;
+
+        while v < *left && buf < *right {
+            // Consume the greater side.
+            // If equal, prefer the right run to maintain stability.
+            let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+                decrement_and_get(left)
+            } else {
+                decrement_and_get(right)
+            };
+            ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+        }
+    }
+    // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
+    // it will now be copied into the hole in `v`.
+
+    // When dropped, copies the range `start..end` into `dest..`.
+    struct MergeHole<T> {
+        start: *mut T,
+        end: *mut T,
+        dest: *mut T,
+    }
+
+    impl<T> Drop for MergeHole<T> {
+        fn drop(&mut self) {
+            // `T` is not a zero-sized type, so it's okay to divide by its size.
+            let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
+            unsafe {
+                ptr::copy_nonoverlapping(self.start, self.dest, len);
+            }
+        }
+    }
+}
+
+/// The result of merge sort.
+#[must_use]
+#[derive(Clone, Copy, PartialEq, Eq)]
+enum MergesortResult {
+    /// The slice has already been sorted.
+    NonDescending,
+    /// The slice has been descending and therefore it was left intact.
+    Descending,
+    /// The slice was sorted.
+    Sorted,
+}
+
+/// A sorted run that starts at index `start` and is of length `len`.
+#[derive(Clone, Copy)]
+struct Run {
+    start: usize,
+    len: usize,
+}
+
+/// Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
+/// if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
+/// algorithm should continue building a new run instead, `None` is returned.
+///
+/// TimSort is infamous for its buggy implementations, as described here:
+/// http://envisage-project.eu/timsort-specification-and-verification/
+///
+/// The gist of the story is: we must enforce the invariants on the top four runs on the stack.
+/// Enforcing them on just top three is not sufficient to ensure that the invariants will still
+/// hold for *all* runs in the stack.
+///
+/// This function correctly checks invariants for the top four runs. Additionally, if the top
+/// run starts at index 0, it will always demand a merge operation until the stack is fully
+/// collapsed, in order to complete the sort.
+#[inline]
+fn collapse(runs: &[Run]) -> Option<usize> {
+    let n = runs.len();
+
+    if n >= 2 && (runs[n - 1].start == 0 ||
+                  runs[n - 2].len <= runs[n - 1].len ||
+                  (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) ||
+                  (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
+    {
+        if n >= 3 && runs[n - 3].len < runs[n - 1].len {
+            Some(n - 3)
+        } else {
+            Some(n - 2)
+        }
+    } else {
+        None
+    }
+}
+
+/// Sorts a slice using merge sort, unless it is already in descending order.
+///
+/// This function doesn't modify the slice if it is already non-descending or descending.
+/// Otherwise, it sorts the slice into non-descending order.
+///
+/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
+/// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
+///
+/// The algorithm identifies strictly descending and non-descending subsequences, which are called
+/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
+/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
+/// satisfied:
+///
+/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
+/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
+///
+/// The invariants ensure that the total running time is `O(n log n)` worst-case.
+///
+/// # Safety
+///
+/// The argument `buf` is used as a temporary buffer and must be at least as long as `v`.
+unsafe fn mergesort<T, F>(v: &mut [T], buf: *mut T, is_less: &F) -> MergesortResult
+where
+    T: Send,
+    F: Fn(&T, &T) -> bool + Sync,
+{
+    // Very short runs are extended using insertion sort to span at least this many elements.
+    const MIN_RUN: usize = 10;
+
+    let len = v.len();
+
+    // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
+    // strange decision, but consider the fact that merges more often go in the opposite direction
+    // (forwards). According to benchmarks, merging forwards is slightly faster than merging
+    // backwards. To conclude, identifying runs by traversing backwards improves performance.
+    let mut runs = vec![];
+    let mut end = len;
+    while end > 0 {
+        // Find the next natural run, and reverse it if it's strictly descending.
+        let mut start = end - 1;
+
+        if start > 0 {
+            start -= 1;
+
+            if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
+                while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+                    start -= 1;
+                }
+
+                // If this descending run covers the whole slice, return immediately.
+                if start == 0 && end == len {
+                    return MergesortResult::Descending;
+                } else {
+                    v[start..end].reverse();
+                }
+            } else {
+                while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+                    start -= 1;
+                }
+
+                // If this non-descending run covers the whole slice, return immediately.
+                if end - start == len {
+                    return MergesortResult::NonDescending;
+                }
+            }
+        }
+
+        // Insert some more elements into the run if it's too short. Insertion sort is faster than
+        // merge sort on short sequences, so this significantly improves performance.
+        while start > 0 && end - start < MIN_RUN {
+            start -= 1;
+            insert_head(&mut v[start..end], &is_less);
+        }
+
+        // Push this run onto the stack.
+        runs.push(Run {
+            start: start,
+            len: end - start,
+        });
+        end = start;
+
+        // Merge some pairs of adjacent runs to satisfy the invariants.
+        while let Some(r) = collapse(&runs) {
+            let left = runs[r + 1];
+            let right = runs[r];
+            merge(&mut v[left.start..right.start + right.len], left.len, buf, &is_less);
+
+            runs[r] = Run {
+                start: left.start,
+                len: left.len + right.len,
+            };
+            runs.remove(r + 1);
+        }
+    }
+
+    // Finally, exactly one run must remain in the stack.
+    debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+
+    // The original order of the slice was neither non-descending nor descending.
+    MergesortResult::Sorted
+}
+
+////////////////////////////////////////////////////////////////////////////
+// Everything above this line is copied from `std::slice::sort` (with very minor tweaks).
+// Everything below this line is parallelization.
+////////////////////////////////////////////////////////////////////////////
+
+/// Splits two sorted slices so that they can be merged in parallel.
+///
+/// Returns two indices `(a, b)` so that slices `left[..a]` and `right[..b]` come before
+/// `left[a..]` and `right[b..]`.
+fn split_for_merge<T, F>(left: &[T], right: &[T], is_less: &F) -> (usize, usize)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    let left_len = left.len();
+    let right_len = right.len();
+
+    if left_len >= right_len {
+        let left_mid = left_len / 2;
+
+        // Find the first element in `right` that is greater than or equal to `left[left_mid]`.
+        let mut a = 0;
+        let mut b = right_len;
+        while a < b {
+            let m = a + (b - a) / 2;
+            if is_less(&right[m], &left[left_mid]) {
+                a = m + 1;
+            } else {
+                b = m;
+            }
+        }
+
+        (left_mid, a)
+    } else {
+        let right_mid = right_len / 2;
+
+        // Find the first element in `left` that is greater than `right[right_mid]`.
+        let mut a = 0;
+        let mut b = left_len;
+        while a < b {
+            let m = a + (b - a) / 2;
+            if is_less(&right[right_mid], &left[m]) {
+                b = m;
+            } else {
+                a = m + 1;
+            }
+        }
+
+        (a, right_mid)
+    }
+}
+
+/// Merges slices `left` and `right` in parallel and stores the result into `dest`.
+///
+/// # Safety
+///
+/// The `dest` pointer must have enough space to store the result.
+///
+/// Even if `is_less` panics at any point during the merge process, this function will fully copy
+/// all elements from `left` and `right` into `dest` (not necessarily in sorted order).
+unsafe fn par_merge<T, F>(left: &mut [T], right: &mut [T], dest: *mut T, is_less: &F)
+where
+    T: Send,
+    F: Fn(&T, &T) -> bool + Sync,
+{
+    // Slices whose lengths sum up to this value are merged sequentially. This number is slightly
+    // larger than `CHUNK_LENGTH`, and the reason is that merging is faster than merge sorting, so
+    // merging needs a bit coarser granularity in order to hide the overhead of Rayon's task
+    // scheduling.
+    const MAX_SEQUENTIAL: usize = 5000;
+
+    let left_len = left.len();
+    let right_len = right.len();
+
+    // Intermediate state of the merge process, which serves two purposes:
+    // 1. Protects integrity of `dest` from panics in `is_less`.
+    // 2. Copies the remaining elements as soon as one of the two sides is exhausted.
+    //
+    // Panic safety:
+    //
+    // If `is_less` panics at any point during the merge process, `s` will get dropped and copy the
+    // remaining parts of `left` and `right` into `dest`.
+    let mut s = State {
+        left_start: left.as_mut_ptr(),
+        left_end: left.as_mut_ptr().offset(left_len as isize),
+        right_start: right.as_mut_ptr(),
+        right_end: right.as_mut_ptr().offset(right_len as isize),
+        dest: dest,
+    };
+
+    if left_len == 0 || right_len == 0 || left_len + right_len < MAX_SEQUENTIAL {
+        while s.left_start < s.left_end && s.right_start < s.right_end {
+            // Consume the lesser side.
+            // If equal, prefer the left run to maintain stability.
+            let to_copy = if is_less(&*s.right_start, &*s.left_start) {
+                get_and_increment(&mut s.right_start)
+            } else {
+                get_and_increment(&mut s.left_start)
+            };
+            ptr::copy_nonoverlapping(to_copy, get_and_increment(&mut s.dest), 1);
+        }
+    } else {
+        // Function `split_for_merge` might panic. If that happens, `s` will get destructed and copy
+        // the whole `left` and `right` into `dest`.
+        let (left_mid, right_mid) = split_for_merge(left, right, is_less);
+        let (left_l, left_r) = left.split_at_mut(left_mid);
+        let (right_l, right_r) = right.split_at_mut(right_mid);
+
+        // Prevent the destructor of `s` from running. Rayon will ensure that both calls to
+        // `par_merge` happen. If one of the two calls panics, they will ensure that elements still
+        // get copied into `dest_left` and `dest_right``.
+        mem::forget(s);
+
+        // Convert the pointers to `usize` because `*mut T` is not `Send`.
+        let dest_l = dest as usize;
+        let dest_r = dest.offset((left_l.len() + right_l.len()) as isize) as usize;
+        rayon_core::join(
+            || par_merge(left_l, right_l, dest_l as *mut T, is_less),
+            || par_merge(left_r, right_r, dest_r as *mut T, is_less),
+        );
+    }
+    // Finally, `s` gets dropped if we used sequential merge, thus copying the remaining elements
+    // all at once.
+
+    // When dropped, copies arrays `left_start..left_end` and `right_start..right_end` into `dest`,
+    // in that order.
+    struct State<T> {
+        left_start: *mut T,
+        left_end: *mut T,
+        right_start: *mut T,
+        right_end: *mut T,
+        dest: *mut T,
+    }
+
+    impl<T> Drop for State<T> {
+        fn drop(&mut self) {
+            let size = mem::size_of::<T>();
+            let left_len = (self.left_end as usize - self.left_start as usize) / size;
+            let right_len = (self.right_end as usize -  self.right_start as usize) / size;
+
+            // Copy array `left`, followed by `right`.
+            unsafe {
+                ptr::copy_nonoverlapping(self.left_start, self.dest, left_len);
+                self.dest = self.dest.offset(left_len as isize);
+                ptr::copy_nonoverlapping(self.right_start, self.dest, right_len);
+            }
+        }
+    }
+}
+
+/// Recursively merges pre-sorted chunks inside `v`.
+///
+/// Chunks of `v` are stored in `chunks` as intervals (inclusive left and exclusive right bound).
+/// Argument `buf` is an auxiliary buffer that will be used during the procedure.
+/// If `into_buf` is true, the result will be stored into `buf`, otherwise it will be in `v`.
+///
+/// # Safety
+///
+/// The number of chunks must be positive and they must be adjacent: the right bound of each chunk
+/// must equal the left bound of the following chunk.
+///
+/// The buffer must be at least as long as `v`.
+unsafe fn recurse<T, F>(
+    v: *mut T,
+    buf: *mut T,
+    chunks: &[(usize, usize)],
+    into_buf: bool,
+    is_less: &F,
+)
+where
+    T: Send,
+    F: Fn(&T, &T) -> bool + Sync,
+{
+    let len = chunks.len();
+    debug_assert!(len > 0);
+
+    // Base case of the algorithm.
+    // If only one chunk is remaining, there's no more work to split and merge.
+    if len == 1 {
+        if into_buf {
+            // Copy the chunk from `v` into `buf`.
+            let (start, end) = chunks[0];
+            let src = v.offset(start as isize);
+            let dest = buf.offset(start as isize);
+            ptr::copy_nonoverlapping(src, dest, end - start);
+        }
+        return;
+    }
+
+    // Split the chunks into two halves.
+    let (start, _) = chunks[0];
+    let (mid, _) = chunks[len / 2];
+    let (_, end) = chunks[len - 1];
+    let (left, right) = chunks.split_at(len / 2);
+
+    // After recursive calls finish we'll have to merge chunks `(start, mid)` and `(mid, end)` from
+    // `src` into `dest`. If the current invocation has to store the result into `buf`, we'll
+    // merge chunks from `v` into `buf`, and viceversa.
+    //
+    // Recursive calls flip `into_buf` at each level of recursion. More concretely, `par_merge`
+    // merges chunks from `buf` into `v` at the first level, from `v` into `buf` at the second
+    // level etc.
+    let (src, dest) = if into_buf { (v, buf) } else { (buf, v) };
+
+    // Panic safety:
+    //
+    // If `is_less` panics at any point during the recursive calls, the destructor of `guard` will
+    // be executed, thus copying everything from `src` into `dest`. This way we ensure that all
+    // chunks are in fact copied into `dest`, even if the merge process doesn't finish.
+    let guard = CopyOnDrop {
+        src: src.offset(start as isize),
+        dest: dest.offset(start as isize),
+        len: end - start,
+    };
+
+    // Convert the pointers to `usize` because `*mut T` is not `Send`.
+    let v = v as usize;
+    let buf = buf as usize;
+    rayon_core::join(
+        || recurse(v as *mut T, buf as *mut T, left, !into_buf, is_less),
+        || recurse(v as *mut T, buf as *mut T, right, !into_buf, is_less),
+    );
+
+    // Everything went all right - recursive calls didn't panic.
+    // Forget the guard in order to prevent its destructor from running.
+    mem::forget(guard);
+
+    // Merge chunks `(start, mid)` and `(mid, end)` from `src` into `dest`.
+    let src_left = slice::from_raw_parts_mut(src.offset(start as isize), mid - start);
+    let src_right = slice::from_raw_parts_mut(src.offset(mid as isize), end - mid);
+    par_merge(src_left, src_right, dest.offset(start as isize), is_less);
+}
+
+/// Sorts `v` using merge sort in parallel.
+///
+/// The algorithm is stable, allocates memory, and `O(n log n)` worst-case.
+/// The allocated temporary buffer is of the same length as is `v`.
+pub fn par_mergesort<T, F>(v: &mut [T], is_less: F)
+where
+    T: Send,
+    F: Fn(&T, &T) -> bool + Sync,
+{
+    // Slices of up to this length get sorted using insertion sort in order to avoid the cost of
+    // buffer allocation.
+    const MAX_INSERTION: usize = 20;
+    // The length of initial chunks. This number is as small as possible but so that the overhead
+    // of Rayon's task scheduling is still negligible.
+    const CHUNK_LENGTH: usize = 2000;
+
+    // Sorting has no meaningful behavior on zero-sized types.
+    if size_of::<T>() == 0 {
+        return;
+    }
+
+    let len = v.len();
+
+    // Short slices get sorted in-place via insertion sort to avoid allocations.
+    if len <= MAX_INSERTION {
+        if len >= 2 {
+            for i in (0..len - 1).rev() {
+                insert_head(&mut v[i..], &is_less);
+            }
+        }
+        return;
+    }
+
+    // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
+    // shallow copies of the contents of `v` without risking the dtors running on copies if
+    // `is_less` panics.
+    let mut buf = Vec::<T>::with_capacity(len);
+    let buf = buf.as_mut_ptr();
+
+    // If the slice is not longer than one chunk would be, do sequential merge sort and return.
+    if len <= CHUNK_LENGTH {
+        let res = unsafe { mergesort(v, buf, &is_less) };
+        if res == MergesortResult::Descending {
+            v.reverse();
+        }
+        return;
+    }
+
+    // Split the slice into chunks and merge sort them in parallel.
+    // However, descending chunks will not be sorted - they will be simply left intact.
+    let mut iter = {
+        // Convert the pointer to `usize` because `*mut T` is not `Send`.
+        let buf = buf as usize;
+
+        v.par_chunks_mut(CHUNK_LENGTH)
+            .with_max_len(1)
+            .enumerate()
+            .map(|(i, chunk)| {
+                let l = CHUNK_LENGTH * i;
+                let r = l + chunk.len();
+                unsafe {
+                    let buf = (buf as *mut T).offset(l as isize);
+                    (l, r, mergesort(chunk, buf, &is_less))
+                }
+            })
+            .collect::<Vec<_>>()
+            .into_iter()
+            .peekable()
+    };
+
+    // Now attempt to concatenate adjacent chunks that were left intact.
+    let mut chunks = Vec::with_capacity(iter.len());
+
+    while let Some((a, mut b, res)) = iter.next() {
+        // If this chunk was not modified by the sort procedure...
+        if res != MergesortResult::Sorted {
+            while let Some(&(x, y, r)) = iter.peek() {
+                // If the following chunk is of the same type and can be concatenated...
+                if r == res && (r == MergesortResult::Descending) == is_less(&v[x], &v[x - 1]) {
+                    // Concatenate them.
+                    b = y;
+                    iter.next();
+                } else {
+                    break;
+                }
+            }
+        }
+
+        // Descending chunks must be reversed.
+        if res == MergesortResult::Descending {
+            v[a..b].reverse();
+        }
+
+        chunks.push((a, b));
+    }
+
+    // All chunks are properly sorted.
+    // Now we just have to merge them together.
+    unsafe {
+        recurse(v.as_mut_ptr(), buf as *mut T, &chunks, false, &is_less);
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use rand::{thread_rng, Rng};
+    use super::split_for_merge;
+
+    #[test]
+    fn test_split_for_merge() {
+        fn check(left: &[u32], right: &[u32]) {
+            let (l, r) = split_for_merge(left, right, &|&a, &b| a < b);
+            assert!(left[..l].iter().all(|&x| right[r..].iter().all(|&y| x <= y)));
+            assert!(right[..r].iter().all(|&x| left[l..].iter().all(|&y| x < y)));
+        }
+
+        check(&[1, 2, 2, 2, 2, 3], &[1, 2, 2, 2, 2, 3]);
+        check(&[1, 2, 2, 2, 2, 3], &[]);
+        check(&[], &[1, 2, 2, 2, 2, 3]);
+
+        for _ in 0..100 {
+            let mut rng = thread_rng();
+
+            let limit = rng.gen::<u32>() % 20 + 1;
+            let left_len = rng.gen::<usize>() % 20;
+            let right_len = rng.gen::<usize>() % 20;
+
+            let mut left = rng.gen_iter::<u32>()
+                .map(|x| x % limit)
+                .take(left_len)
+                .collect::<Vec<_>>();
+            let mut right = rng.gen_iter::<u32>()
+                .map(|x| x % limit)
+                .take(right_len)
+                .collect::<Vec<_>>();
+
+            left.sort();
+            right.sort();
+            check(&left, &right);
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/slice/mod.rs
@@ -0,0 +1,740 @@
+//! This module contains the parallel iterator types for slices
+//! (`[T]`). You will rarely need to interact with it directly unless
+//! you have need to name one of those types.
+
+mod mergesort;
+mod quicksort;
+
+mod test;
+
+use iter::*;
+use iter::internal::*;
+use self::mergesort::par_mergesort;
+use self::quicksort::par_quicksort;
+use split_producer::*;
+use std::cmp;
+use std::cmp::Ordering;
+
+/// Parallel extensions for slices.
+pub trait ParallelSlice<T: Sync> {
+    /// Returns a plain slice, which is used to implement the rest of the
+    /// parallel methods.
+    fn as_parallel_slice(&self) -> &[T];
+
+    /// Returns a parallel iterator over subslices separated by elements that
+    /// match the separator.
+    fn par_split<P>(&self, separator: P) -> Split<T, P>
+        where P: Fn(&T) -> bool + Sync + Send
+    {
+        Split {
+            slice: self.as_parallel_slice(),
+            separator: separator,
+        }
+    }
+
+    /// Returns a parallel iterator over all contiguous windows of
+    /// length `size`. The windows overlap.
+    fn par_windows(&self, window_size: usize) -> Windows<T> {
+        Windows {
+            window_size: window_size,
+            slice: self.as_parallel_slice(),
+        }
+    }
+
+    /// Returns a parallel iterator over at most `size` elements of
+    /// `self` at a time. The chunks do not overlap.
+    fn par_chunks(&self, chunk_size: usize) -> Chunks<T> {
+        Chunks {
+            chunk_size: chunk_size,
+            slice: self.as_parallel_slice(),
+        }
+    }
+}
+
+impl<T: Sync> ParallelSlice<T> for [T] {
+    #[inline]
+    fn as_parallel_slice(&self) -> &[T] {
+        self
+    }
+}
+
+
+/// Parallel extensions for mutable slices.
+pub trait ParallelSliceMut<T: Send> {
+    /// Returns a plain mutable slice, which is used to implement the rest of
+    /// the parallel methods.
+    fn as_parallel_slice_mut(&mut self) -> &mut [T];
+
+    /// Returns a parallel iterator over mutable subslices separated by
+    /// elements that match the separator.
+    fn par_split_mut<P>(&mut self, separator: P) -> SplitMut<T, P>
+        where P: Fn(&T) -> bool + Sync + Send
+    {
+        SplitMut {
+            slice: self.as_parallel_slice_mut(),
+            separator: separator,
+        }
+    }
+
+    /// Returns a parallel iterator over at most `size` elements of
+    /// `self` at a time. The chunks are mutable and do not overlap.
+    fn par_chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T> {
+        ChunksMut {
+            chunk_size: chunk_size,
+            slice: self.as_parallel_slice_mut(),
+        }
+    }
+
+    /// Sorts the slice in parallel.
+    ///
+    /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
+    ///
+    /// When applicable, unstable sorting is preferred because it is generally faster than stable
+    /// sorting and it doesn't allocate auxiliary memory.
+    /// See [`par_sort_unstable`](#method.par_sort_unstable).
+    ///
+    /// # Current implementation
+    ///
+    /// The current algorithm is an adaptive merge sort inspired by
+    /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+    /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+    /// two or more sorted sequences concatenated one after another.
+    ///
+    /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
+    /// non-allocating insertion sort is used instead.
+    ///
+    /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
+    /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
+    /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
+    /// parallel subdivision of chunks and parallel merge operation.
+    fn par_sort(&mut self)
+    where
+        T: Ord,
+    {
+        par_mergesort(self.as_parallel_slice_mut(), |a, b| a.lt(b));
+    }
+
+    /// Sorts the slice in parallel with a comparator function.
+    ///
+    /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
+    ///
+    /// When applicable, unstable sorting is preferred because it is generally faster than stable
+    /// sorting and it doesn't allocate auxiliary memory.
+    /// See [`par_sort_unstable_by`](#method.par_sort_unstable_by).
+    ///
+    /// # Current implementation
+    ///
+    /// The current algorithm is an adaptive merge sort inspired by
+    /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+    /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+    /// two or more sorted sequences concatenated one after another.
+    ///
+    /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
+    /// non-allocating insertion sort is used instead.
+    ///
+    /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
+    /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
+    /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
+    /// parallel subdivision of chunks and parallel merge operation.
+    fn par_sort_by<F>(&mut self, compare: F)
+    where
+        F: Fn(&T, &T) -> Ordering + Sync,
+    {
+        par_mergesort(self.as_parallel_slice_mut(), |a, b| compare(a, b) == Ordering::Less);
+    }
+
+    /// Sorts the slice in parallel with a key extraction function.
+    ///
+    /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
+    ///
+    /// When applicable, unstable sorting is preferred because it is generally faster than stable
+    /// sorting and it doesn't allocate auxiliary memory.
+    /// See [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key).
+    ///
+    /// # Current implementation
+    ///
+    /// The current algorithm is an adaptive merge sort inspired by
+    /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+    /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+    /// two or more sorted sequences concatenated one after another.
+    ///
+    /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
+    /// non-allocating insertion sort is used instead.
+    ///
+    /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
+    /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
+    /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
+    /// parallel subdivision of chunks and parallel merge operation.
+    fn par_sort_by_key<B, F>(&mut self, f: F)
+    where
+        B: Ord,
+        F: Fn(&T) -> B + Sync,
+    {
+        par_mergesort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
+    }
+
+    /// Sorts the slice in parallel, but may not preserve the order of equal elements.
+    ///
+    /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
+    /// and `O(n log n)` worst-case.
+    ///
+    /// # Current implementation
+    ///
+    /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
+    /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
+    /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
+    /// heapsort on degenerate inputs.
+    ///
+    /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
+    /// slice consists of several concatenated sorted sequences.
+    ///
+    /// All quicksorts work in two stages: partitioning into two halves followed by recursive
+    /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
+    /// parallel.
+    ///
+    /// [pdqsort]: https://github.com/orlp/pdqsort
+    fn par_sort_unstable(&mut self)
+    where
+        T: Ord,
+    {
+        par_quicksort(self.as_parallel_slice_mut(), |a, b| a.lt(b));
+    }
+
+    /// Sorts the slice in parallel with a comparator function, but may not preserve the order of
+    /// equal elements.
+    ///
+    /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
+    /// and `O(n log n)` worst-case.
+    ///
+    /// # Current implementation
+    ///
+    /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
+    /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
+    /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
+    /// heapsort on degenerate inputs.
+    ///
+    /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
+    /// slice consists of several concatenated sorted sequences.
+    ///
+    /// All quicksorts work in two stages: partitioning into two halves followed by recursive
+    /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
+    /// parallel.
+    ///
+    /// [pdqsort]: https://github.com/orlp/pdqsort
+    fn par_sort_unstable_by<F>(&mut self, compare: F)
+    where
+        F: Fn(&T, &T) -> Ordering + Sync,
+    {
+        par_quicksort(self.as_parallel_slice_mut(), |a, b| compare(a, b) == Ordering::Less);
+    }
+
+    /// Sorts the slice in parallel with a key extraction function, but may not preserve the order
+    /// of equal elements.
+    ///
+    /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
+    /// and `O(n log n)` worst-case.
+    ///
+    /// # Current implementation
+    ///
+    /// The current algorithm is based on Orson Peters' [pattern-defeating quicksort][pdqsort],
+    /// which is a quicksort variant designed to be very fast on certain kinds of patterns,
+    /// sometimes achieving linear time. It is randomized but deterministic, and falls back to
+    /// heapsort on degenerate inputs.
+    ///
+    /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
+    /// slice consists of several concatenated sorted sequences.
+    ///
+    /// All quicksorts work in two stages: partitioning into two halves followed by recursive
+    /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
+    /// parallel.
+    ///
+    /// [pdqsort]: https://github.com/orlp/pdqsort
+    fn par_sort_unstable_by_key<B, F>(&mut self, f: F)
+    where
+        B: Ord,
+        F: Fn(&T) -> B + Sync,
+    {
+        par_quicksort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
+    }
+}
+
+impl<T: Send> ParallelSliceMut<T> for [T] {
+    #[inline]
+    fn as_parallel_slice_mut(&mut self) -> &mut [T] {
+        self
+    }
+}
+
+
+impl<'data, T: Sync + 'data> IntoParallelIterator for &'data [T] {
+    type Item = &'data T;
+    type Iter = Iter<'data, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        Iter { slice: self }
+    }
+}
+
+impl<'data, T: Sync + 'data> IntoParallelIterator for &'data Vec<T> {
+    type Item = &'data T;
+    type Iter = Iter<'data, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        Iter { slice: self }
+    }
+}
+
+impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut [T] {
+    type Item = &'data mut T;
+    type Iter = IterMut<'data, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IterMut { slice: self }
+    }
+}
+
+impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut Vec<T> {
+    type Item = &'data mut T;
+    type Iter = IterMut<'data, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IterMut { slice: self }
+    }
+}
+
+
+/// Parallel iterator over immutable items in a slice
+pub struct Iter<'data, T: 'data + Sync> {
+    slice: &'data [T],
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for Iter<'data, T> {
+    type Item = &'data T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for Iter<'data, T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        self.slice.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        callback.callback(IterProducer { slice: self.slice })
+    }
+}
+
+struct IterProducer<'data, T: 'data + Sync> {
+    slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for IterProducer<'data, T> {
+    type Item = &'data T;
+    type IntoIter = ::std::slice::Iter<'data, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.slice.into_iter()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.slice.split_at(index);
+        (IterProducer { slice: left }, IterProducer { slice: right })
+    }
+}
+
+
+/// Parallel iterator over immutable non-overlapping chunks of a slice
+pub struct Chunks<'data, T: 'data + Sync> {
+    chunk_size: usize,
+    slice: &'data [T],
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for Chunks<'data, T> {
+    type Item = &'data [T];
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for Chunks<'data, T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        (self.slice.len() + (self.chunk_size - 1)) / self.chunk_size
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        callback.callback(ChunksProducer {
+                              chunk_size: self.chunk_size,
+                              slice: self.slice,
+                          })
+    }
+}
+
+struct ChunksProducer<'data, T: 'data + Sync> {
+    chunk_size: usize,
+    slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for ChunksProducer<'data, T> {
+    type Item = &'data [T];
+    type IntoIter = ::std::slice::Chunks<'data, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.slice.chunks(self.chunk_size)
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let elem_index = index * self.chunk_size;
+        let (left, right) = self.slice.split_at(elem_index);
+        (ChunksProducer {
+             chunk_size: self.chunk_size,
+             slice: left,
+         },
+         ChunksProducer {
+             chunk_size: self.chunk_size,
+             slice: right,
+         })
+    }
+}
+
+
+/// Parallel iterator over immutable overlapping windows of a slice
+pub struct Windows<'data, T: 'data + Sync> {
+    window_size: usize,
+    slice: &'data [T],
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for Windows<'data, T> {
+    type Item = &'data [T];
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for Windows<'data, T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        assert!(self.window_size >= 1);
+        self.slice.len().saturating_sub(self.window_size - 1)
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        callback.callback(WindowsProducer {
+                              window_size: self.window_size,
+                              slice: self.slice,
+                          })
+    }
+}
+
+struct WindowsProducer<'data, T: 'data + Sync> {
+    window_size: usize,
+    slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for WindowsProducer<'data, T> {
+    type Item = &'data [T];
+    type IntoIter = ::std::slice::Windows<'data, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.slice.windows(self.window_size)
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let left_index = cmp::min(self.slice.len(), index + (self.window_size - 1));
+        let left = &self.slice[..left_index];
+        let right = &self.slice[index..];
+        (WindowsProducer {
+             window_size: self.window_size,
+             slice: left,
+         },
+         WindowsProducer {
+             window_size: self.window_size,
+             slice: right,
+         })
+    }
+}
+
+
+/// Parallel iterator over mutable items in a slice
+pub struct IterMut<'data, T: 'data + Send> {
+    slice: &'data mut [T],
+}
+
+impl<'data, T: Send + 'data> ParallelIterator for IterMut<'data, T> {
+    type Item = &'data mut T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<'data, T: Send + 'data> IndexedParallelIterator for IterMut<'data, T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        self.slice.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        callback.callback(IterMutProducer { slice: self.slice })
+    }
+}
+
+struct IterMutProducer<'data, T: 'data + Send> {
+    slice: &'data mut [T],
+}
+
+impl<'data, T: 'data + Send> Producer for IterMutProducer<'data, T> {
+    type Item = &'data mut T;
+    type IntoIter = ::std::slice::IterMut<'data, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.slice.into_iter()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.slice.split_at_mut(index);
+        (IterMutProducer { slice: left }, IterMutProducer { slice: right })
+    }
+}
+
+
+/// Parallel iterator over mutable non-overlapping chunks of a slice
+pub struct ChunksMut<'data, T: 'data + Send> {
+    chunk_size: usize,
+    slice: &'data mut [T],
+}
+
+impl<'data, T: Send + 'data> ParallelIterator for ChunksMut<'data, T> {
+    type Item = &'data mut [T];
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksMut<'data, T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        (self.slice.len() + (self.chunk_size - 1)) / self.chunk_size
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        callback.callback(ChunksMutProducer {
+                              chunk_size: self.chunk_size,
+                              slice: self.slice,
+                          })
+    }
+}
+
+struct ChunksMutProducer<'data, T: 'data + Send> {
+    chunk_size: usize,
+    slice: &'data mut [T],
+}
+
+impl<'data, T: 'data + Send> Producer for ChunksMutProducer<'data, T> {
+    type Item = &'data mut [T];
+    type IntoIter = ::std::slice::ChunksMut<'data, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.slice.chunks_mut(self.chunk_size)
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let elem_index = index * self.chunk_size;
+        let (left, right) = self.slice.split_at_mut(elem_index);
+        (ChunksMutProducer {
+             chunk_size: self.chunk_size,
+             slice: left,
+         },
+         ChunksMutProducer {
+             chunk_size: self.chunk_size,
+             slice: right,
+         })
+    }
+}
+
+
+/// Parallel iterator over slices separated by a predicate
+pub struct Split<'data, T: 'data, P> {
+    slice: &'data [T],
+    separator: P,
+}
+
+impl<'data, T, P> ParallelIterator for Split<'data, T, P>
+    where P: Fn(&T) -> bool + Sync + Send,
+          T: Sync
+{
+    type Item = &'data [T];
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let producer = SplitProducer::new(self.slice, &self.separator);
+        bridge_unindexed(producer, consumer)
+    }
+}
+
+/// Implement support for `SplitProducer`.
+impl<'data, T, P> Fissile<P> for &'data [T]
+    where P: Fn(&T) -> bool
+{
+    fn length(&self) -> usize {
+        self.len()
+    }
+
+    fn midpoint(&self, end: usize) -> usize {
+        end / 2
+    }
+
+    fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
+        self[start..end].iter().position(separator)
+    }
+
+    fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
+        self[..end].iter().rposition(separator)
+    }
+
+    fn split_once(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.split_at(index);
+        (left, &right[1..]) // skip the separator
+    }
+
+    fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
+        where F: Folder<Self>,
+              Self: Send
+    {
+        let mut split = self.split(separator);
+        if skip_last {
+            split.next_back();
+        }
+        folder.consume_iter(split)
+    }
+}
+
+
+/// Parallel iterator over mutable slices separated by a predicate
+pub struct SplitMut<'data, T: 'data, P> {
+    slice: &'data mut [T],
+    separator: P,
+}
+
+impl<'data, T, P> ParallelIterator for SplitMut<'data, T, P>
+    where P: Fn(&T) -> bool + Sync + Send,
+          T: Send
+{
+    type Item = &'data mut [T];
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let producer = SplitProducer::new(self.slice, &self.separator);
+        bridge_unindexed(producer, consumer)
+    }
+}
+
+/// Implement support for `SplitProducer`.
+impl<'data, T, P> Fissile<P> for &'data mut [T]
+    where P: Fn(&T) -> bool
+{
+    fn length(&self) -> usize {
+        self.len()
+    }
+
+    fn midpoint(&self, end: usize) -> usize {
+        end / 2
+    }
+
+    fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
+        self[start..end].iter().position(separator)
+    }
+
+    fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
+        self[..end].iter().rposition(separator)
+    }
+
+    fn split_once(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.split_at_mut(index);
+        (left, &mut right[1..]) // skip the separator
+    }
+
+    fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
+        where F: Folder<Self>,
+              Self: Send
+    {
+        let mut split = self.split_mut(separator);
+        if skip_last {
+            split.next_back();
+        }
+        folder.consume_iter(split)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/slice/quicksort.rs
@@ -0,0 +1,788 @@
+//! Parallel quicksort.
+//!
+//! This implementation is copied verbatim from `std::slice::sort_unstable` and then parallelized.
+//! The only difference from the original is that calls to `recurse` are executed in parallel using
+//! `rayon_core::join`.
+
+use rayon_core;
+use std::cmp;
+use std::mem;
+use std::ptr;
+
+/// When dropped, takes the value out of `Option` and writes it into `dest`.
+///
+/// This allows us to safely read the pivot into a stack-allocated variable for efficiency, and
+/// write it back into the slice after partitioning. This way we ensure that the write happens
+/// even if `is_less` panics in the meantime.
+struct WriteOnDrop<T> {
+    value: Option<T>,
+    dest: *mut T,
+}
+
+impl<T> Drop for WriteOnDrop<T> {
+    fn drop(&mut self) {
+        unsafe {
+            ptr::write(self.dest, self.value.take().unwrap());
+        }
+    }
+}
+
+/// Holds a value, but never drops it.
+struct NoDrop<T> {
+    value: Option<T>,
+}
+
+impl<T> Drop for NoDrop<T> {
+    fn drop(&mut self) {
+        mem::forget(self.value.take());
+    }
+}
+
+/// When dropped, copies from `src` into `dest`.
+struct CopyOnDrop<T> {
+    src: *mut T,
+    dest: *mut T,
+}
+
+impl<T> Drop for CopyOnDrop<T> {
+    fn drop(&mut self) {
+        unsafe {
+            ptr::copy_nonoverlapping(self.src, self.dest, 1);
+        }
+    }
+}
+
+/// Shifts the first element to the right until it encounters a greater or equal element.
+fn shift_head<T, F>(v: &mut [T], is_less: &F)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    let len = v.len();
+    unsafe {
+        // If the first two elements are out-of-order...
+        if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
+            // Read the first element into a stack-allocated variable. If a following comparison
+            // operation panics, `hole` will get dropped and automatically write the element back
+            // into the slice.
+            let mut tmp = NoDrop { value: Some(ptr::read(v.get_unchecked(0))) };
+            let mut hole = CopyOnDrop {
+                src: tmp.value.as_mut().unwrap(),
+                dest: v.get_unchecked_mut(1),
+            };
+            ptr::copy_nonoverlapping(v.get_unchecked(1), v.get_unchecked_mut(0), 1);
+
+            for i in 2..len {
+                if !is_less(v.get_unchecked(i), tmp.value.as_ref().unwrap()) {
+                    break;
+                }
+
+                // Move `i`-th element one place to the left, thus shifting the hole to the right.
+                ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i - 1), 1);
+                hole.dest = v.get_unchecked_mut(i);
+            }
+            // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+        }
+    }
+}
+
+/// Shifts the last element to the left until it encounters a smaller or equal element.
+fn shift_tail<T, F>(v: &mut [T], is_less: &F)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    let len = v.len();
+    unsafe {
+        // If the last two elements are out-of-order...
+        if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
+            // Read the last element into a stack-allocated variable. If a following comparison
+            // operation panics, `hole` will get dropped and automatically write the element back
+            // into the slice.
+            let mut tmp = NoDrop { value: Some(ptr::read(v.get_unchecked(len - 1))) };
+            let mut hole = CopyOnDrop {
+                src: tmp.value.as_mut().unwrap(),
+                dest: v.get_unchecked_mut(len - 2),
+            };
+            ptr::copy_nonoverlapping(v.get_unchecked(len - 2), v.get_unchecked_mut(len - 1), 1);
+
+            for i in (0..len - 2).rev() {
+                if !is_less(&tmp.value.as_ref().unwrap(), v.get_unchecked(i)) {
+                    break;
+                }
+
+                // Move `i`-th element one place to the right, thus shifting the hole to the left.
+                ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i + 1), 1);
+                hole.dest = v.get_unchecked_mut(i);
+            }
+            // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+        }
+    }
+}
+
+/// Partially sorts a slice by shifting several out-of-order elements around.
+///
+/// Returns `true` if the slice is sorted at the end. This function is `O(n)` worst-case.
+#[cold]
+fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &F) -> bool
+where
+    F: Fn(&T, &T) -> bool,
+{
+    // Maximum number of adjacent out-of-order pairs that will get shifted.
+    const MAX_STEPS: usize = 5;
+    // If the slice is shorter than this, don't shift any elements.
+    const SHORTEST_SHIFTING: usize = 50;
+
+    let len = v.len();
+    let mut i = 1;
+
+    for _ in 0..MAX_STEPS {
+        unsafe {
+            // Find the next pair of adjacent out-of-order elements.
+            while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
+                i += 1;
+            }
+        }
+
+        // Are we done?
+        if i == len {
+            return true;
+        }
+
+        // Don't shift elements on short arrays, that has a performance cost.
+        if len < SHORTEST_SHIFTING {
+            return false;
+        }
+
+        // Swap the found pair of elements. This puts them in correct order.
+        v.swap(i - 1, i);
+
+        // Shift the smaller element to the left.
+        shift_tail(&mut v[..i], is_less);
+        // Shift the greater element to the right.
+        shift_head(&mut v[i..], is_less);
+    }
+
+    // Didn't manage to sort the slice in the limited number of steps.
+    false
+}
+
+/// Sorts a slice using insertion sort, which is `O(n^2)` worst-case.
+fn insertion_sort<T, F>(v: &mut [T], is_less: &F)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    for i in 1..v.len() {
+        shift_tail(&mut v[..i + 1], is_less);
+    }
+}
+
+/// Sorts `v` using heapsort, which guarantees `O(n log n)` worst-case.
+#[cold]
+fn heapsort<T, F>(v: &mut [T], is_less: &F)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    // This binary heap respects the invariant `parent >= child`.
+    let sift_down = |v: &mut [T], mut node| {
+        loop {
+            // Children of `node`:
+            let left = 2 * node + 1;
+            let right = 2 * node + 2;
+
+            // Choose the greater child.
+            let greater = if right < v.len() && is_less(&v[left], &v[right]) {
+                right
+            } else {
+                left
+            };
+
+            // Stop if the invariant holds at `node`.
+            if greater >= v.len() || !is_less(&v[node], &v[greater]) {
+                break;
+            }
+
+            // Swap `node` with the greater child, move one step down, and continue sifting.
+            v.swap(node, greater);
+            node = greater;
+        }
+    };
+
+    // Build the heap in linear time.
+    for i in (0..v.len() / 2).rev() {
+        sift_down(v, i);
+    }
+
+    // Pop maximal elements from the heap.
+    for i in (1..v.len()).rev() {
+        v.swap(0, i);
+        sift_down(&mut v[..i], 0);
+    }
+}
+
+/// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
+/// to `pivot`.
+///
+/// Returns the number of elements smaller than `pivot`.
+///
+/// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
+/// This idea is presented in the [BlockQuicksort][pdf] paper.
+///
+/// [pdf]: http://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
+fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &F) -> usize
+where
+    F: Fn(&T, &T) -> bool,
+{
+    // Number of elements in a typical block.
+    const BLOCK: usize = 128;
+
+    // The partitioning algorithm repeats the following steps until completion:
+    //
+    // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
+    // 2. Trace a block from the right side to identify elements smaller than the pivot.
+    // 3. Exchange the identified elements between the left and right side.
+    //
+    // We keep the following variables for a block of elements:
+    //
+    // 1. `block` - Number of elements in the block.
+    // 2. `start` - Start pointer into the `offsets` array.
+    // 3. `end` - End pointer into the `offsets` array.
+    // 4. `offsets - Indices of out-of-order elements within the block.
+
+    // The current block on the left side (from `l` to `l.offset(block_l)`).
+    let mut l = v.as_mut_ptr();
+    let mut block_l = BLOCK;
+    let mut start_l = ptr::null_mut();
+    let mut end_l = ptr::null_mut();
+    let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() };
+
+    // The current block on the right side (from `r.offset(-block_r)` to `r`).
+    let mut r = unsafe { l.offset(v.len() as isize) };
+    let mut block_r = BLOCK;
+    let mut start_r = ptr::null_mut();
+    let mut end_r = ptr::null_mut();
+    let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() };
+
+    // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
+    fn width<T>(l: *mut T, r: *mut T) -> usize {
+        assert!(mem::size_of::<T>() > 0);
+        (r as usize - l as usize) / mem::size_of::<T>()
+    }
+
+    loop {
+        // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
+        // some patch-up work in order to partition the remaining elements in between.
+        let is_done = width(l, r) <= 2 * BLOCK;
+
+        if is_done {
+            // Number of remaining elements (still not compared to the pivot).
+            let mut rem = width(l, r);
+            if start_l < end_l || start_r < end_r {
+                rem -= BLOCK;
+            }
+
+            // Adjust block sizes so that the left and right block don't overlap, but get perfectly
+            // aligned to cover the whole remaining gap.
+            if start_l < end_l {
+                block_r = rem;
+            } else if start_r < end_r {
+                block_l = rem;
+            } else {
+                block_l = rem / 2;
+                block_r = rem - block_l;
+            }
+            debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
+            debug_assert!(width(l, r) == block_l + block_r);
+        }
+
+        if start_l == end_l {
+            // Trace `block_l` elements from the left side.
+            start_l = offsets_l.as_mut_ptr();
+            end_l = offsets_l.as_mut_ptr();
+            let mut elem = l;
+
+            for i in 0..block_l {
+                unsafe {
+                    // Branchless comparison.
+                    *end_l = i as u8;
+                    end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
+                    elem = elem.offset(1);
+                }
+            }
+        }
+
+        if start_r == end_r {
+            // Trace `block_r` elements from the right side.
+            start_r = offsets_r.as_mut_ptr();
+            end_r = offsets_r.as_mut_ptr();
+            let mut elem = r;
+
+            for i in 0..block_r {
+                unsafe {
+                    // Branchless comparison.
+                    elem = elem.offset(-1);
+                    *end_r = i as u8;
+                    end_r = end_r.offset(is_less(&*elem, pivot) as isize);
+                }
+            }
+        }
+
+        // Number of out-of-order elements to swap between the left and right side.
+        let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
+
+        if count > 0 {
+            macro_rules! left { () => { l.offset(*start_l as isize) } }
+            macro_rules! right { () => { r.offset(-(*start_r as isize) - 1) } }
+
+            // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
+            // permutation. This is not strictly equivalent to swapping, but produces a similar
+            // result using fewer memory operations.
+            unsafe {
+                let tmp = ptr::read(left!());
+                ptr::copy_nonoverlapping(right!(), left!(), 1);
+
+                for _ in 1..count {
+                    start_l = start_l.offset(1);
+                    ptr::copy_nonoverlapping(left!(), right!(), 1);
+                    start_r = start_r.offset(1);
+                    ptr::copy_nonoverlapping(right!(), left!(), 1);
+                }
+
+                ptr::copy_nonoverlapping(&tmp, right!(), 1);
+                mem::forget(tmp);
+                start_l = start_l.offset(1);
+                start_r = start_r.offset(1);
+            }
+        }
+
+        if start_l == end_l {
+            // All out-of-order elements in the left block were moved. Move to the next block.
+            l = unsafe { l.offset(block_l as isize) };
+        }
+
+        if start_r == end_r {
+            // All out-of-order elements in the right block were moved. Move to the previous block.
+            r = unsafe { r.offset(-(block_r as isize)) };
+        }
+
+        if is_done {
+            break;
+        }
+    }
+
+    // All that remains now is at most one block (either the left or the right) with out-of-order
+    // elements that need to be moved. Such remaining elements can be simply shifted to the end
+    // within their block.
+
+    if start_l < end_l {
+        // The left block remains.
+        // Move it's remaining out-of-order elements to the far right.
+        debug_assert_eq!(width(l, r), block_l);
+        while start_l < end_l {
+            unsafe {
+                end_l = end_l.offset(-1);
+                ptr::swap(l.offset(*end_l as isize), r.offset(-1));
+                r = r.offset(-1);
+            }
+        }
+        width(v.as_mut_ptr(), r)
+    } else if start_r < end_r {
+        // The right block remains.
+        // Move it's remaining out-of-order elements to the far left.
+        debug_assert_eq!(width(l, r), block_r);
+        while start_r < end_r {
+            unsafe {
+                end_r = end_r.offset(-1);
+                ptr::swap(l, r.offset(-(*end_r as isize) - 1));
+                l = l.offset(1);
+            }
+        }
+        width(v.as_mut_ptr(), l)
+    } else {
+        // Nothing else to do, we're done.
+        width(v.as_mut_ptr(), l)
+    }
+}
+
+/// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
+/// equal to `v[pivot]`.
+///
+/// Returns a tuple of:
+///
+/// 1. Number of elements smaller than `v[pivot]`.
+/// 2. True if `v` was already partitioned.
+fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> (usize, bool)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    let (mid, was_partitioned) = {
+        // Place the pivot at the beginning of slice.
+        v.swap(0, pivot);
+        let (pivot, v) = v.split_at_mut(1);
+        let pivot = &mut pivot[0];
+
+        // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
+        // operation panics, the pivot will be automatically written back into the slice.
+        let write_on_drop = WriteOnDrop {
+            value: unsafe { Some(ptr::read(pivot)) },
+            dest: pivot,
+        };
+        let pivot = write_on_drop.value.as_ref().unwrap();
+
+        // Find the first pair of out-of-order elements.
+        let mut l = 0;
+        let mut r = v.len();
+        unsafe {
+            // Find the first element greater then or equal to the pivot.
+            while l < r && is_less(v.get_unchecked(l), pivot) {
+                l += 1;
+            }
+
+            // Find the last element smaller that the pivot.
+            while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
+                r -= 1;
+            }
+        }
+
+        (
+            l + partition_in_blocks(&mut v[l..r], pivot, is_less),
+            l >= r,
+        )
+
+        // `write_on_drop` goes out of scope and writes the pivot (which is a stack-allocated
+        // variable) back into the slice where it originally was. This step is critical in ensuring
+        // safety!
+    };
+
+    // Place the pivot between the two partitions.
+    v.swap(0, mid);
+
+    (mid, was_partitioned)
+}
+
+/// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
+///
+/// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
+/// elements smaller than the pivot.
+fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> usize
+where
+    F: Fn(&T, &T) -> bool,
+{
+    // Place the pivot at the beginning of slice.
+    v.swap(0, pivot);
+    let (pivot, v) = v.split_at_mut(1);
+    let pivot = &mut pivot[0];
+
+    // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
+    // operation panics, the pivot will be automatically written back into the slice.
+    let write_on_drop = WriteOnDrop {
+        value: unsafe { Some(ptr::read(pivot)) },
+        dest: pivot,
+    };
+    let pivot = write_on_drop.value.as_ref().unwrap();
+
+    // Now partition the slice.
+    let mut l = 0;
+    let mut r = v.len();
+    loop {
+        unsafe {
+            // Find the first element greater that the pivot.
+            while l < r && !is_less(pivot, v.get_unchecked(l)) {
+                l += 1;
+            }
+
+            // Find the last element equal to the pivot.
+            while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
+                r -= 1;
+            }
+
+            // Are we done?
+            if l >= r {
+                break;
+            }
+
+            // Swap the found pair of out-of-order elements.
+            r -= 1;
+            ptr::swap(v.get_unchecked_mut(l), v.get_unchecked_mut(r));
+            l += 1;
+        }
+    }
+
+    // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
+    l + 1
+
+    // `write_on_drop` goes out of scope and writes the pivot (which is a stack-allocated variable)
+    // back into the slice where it originally was. This step is critical in ensuring safety!
+}
+
+/// Scatters some elements around in an attempt to break patterns that might cause imbalanced
+/// partitions in quicksort.
+#[cold]
+fn break_patterns<T>(v: &mut [T]) {
+    let len = v.len();
+    if len >= 8 {
+        // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
+        let mut random = len as u32;
+        let mut gen_u32 = || {
+            random ^= random << 13;
+            random ^= random >> 17;
+            random ^= random << 5;
+            random
+        };
+        let mut gen_usize = || if mem::size_of::<usize>() <= 4 {
+            gen_u32() as usize
+        } else {
+            (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
+        };
+
+        // Take random numbers modulo this number.
+        // The number fits into `usize` because `len` is not greater than `isize::MAX`.
+        let modulus = len.next_power_of_two();
+
+        // Some pivot candidates will be in the nearby of this index. Let's randomize them.
+        let pos = len / 4 * 2;
+
+        for i in 0..3 {
+            // Generate a random number modulo `len`. However, in order to avoid costly operations
+            // we first take it modulo a power of two, and then decrease by `len` until it fits
+            // into the range `[0, len - 1]`.
+            let mut other = gen_usize() & (modulus - 1);
+
+            // `other` is guaranteed to be less than `2 * len`.
+            if other >= len {
+                other -= len;
+            }
+
+            v.swap(pos - 1 + i, other);
+        }
+    }
+}
+
+/// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
+///
+/// Elements in `v` might be reordered in the process.
+fn choose_pivot<T, F>(v: &mut [T], is_less: &F) -> (usize, bool)
+where
+    F: Fn(&T, &T) -> bool,
+{
+    // Minimum length to choose the median-of-medians method.
+    // Shorter slices use the simple median-of-three method.
+    const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
+    // Maximum number of swaps that can be performed in this function.
+    const MAX_SWAPS: usize = 4 * 3;
+
+    let len = v.len();
+
+    // Three indices near which we are going to choose a pivot.
+    let mut a = len / 4 * 1;
+    let mut b = len / 4 * 2;
+    let mut c = len / 4 * 3;
+
+    // Counts the total number of swaps we are about to perform while sorting indices.
+    let mut swaps = 0;
+
+    if len >= 8 {
+        // Swaps indices so that `v[a] <= v[b]`.
+        let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
+            if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
+                ptr::swap(a, b);
+                swaps += 1;
+            }
+        };
+
+        // Swaps indices so that `v[a] <= v[b] <= v[c]`.
+        let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
+            sort2(a, b);
+            sort2(b, c);
+            sort2(a, b);
+        };
+
+        if len >= SHORTEST_MEDIAN_OF_MEDIANS {
+            // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
+            let mut sort_adjacent = |a: &mut usize| {
+                let tmp = *a;
+                sort3(&mut (tmp - 1), a, &mut (tmp + 1));
+            };
+
+            // Find medians in the neighborhoods of `a`, `b`, and `c`.
+            sort_adjacent(&mut a);
+            sort_adjacent(&mut b);
+            sort_adjacent(&mut c);
+        }
+
+        // Find the median among `a`, `b`, and `c`.
+        sort3(&mut a, &mut b, &mut c);
+    }
+
+    if swaps < MAX_SWAPS {
+        (b, swaps == 0)
+    } else {
+        // The maximum number of swaps was performed. Chances are the slice is descending or mostly
+        // descending, so reversing will probably help sort it faster.
+        v.reverse();
+        (len - 1 - b, true)
+    }
+}
+
+/// Sorts `v` recursively.
+///
+/// If the slice had a predecessor in the original array, it is specified as `pred`.
+///
+/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
+/// this function will immediately switch to heapsort.
+fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &F, mut pred: Option<&'a mut T>, mut limit: usize)
+where
+    T: Send,
+    F: Fn(&T, &T) -> bool + Sync,
+{
+    // Slices of up to this length get sorted using insertion sort.
+    const MAX_INSERTION: usize = 20;
+    // If both partitions are up to this length, we continue sequentially. This number is as small
+    // as possible but so that the overhead of Rayon's task scheduling is still negligible.
+    const MAX_SEQUENTIAL: usize = 2000;
+
+    // True if the last partitioning was reasonably balanced.
+    let mut was_balanced = true;
+    // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
+    let mut was_partitioned = true;
+
+    loop {
+        let len = v.len();
+
+        // Very short slices get sorted using insertion sort.
+        if len <= MAX_INSERTION {
+            insertion_sort(v, is_less);
+            return;
+        }
+
+        // If too many bad pivot choices were made, simply fall back to heapsort in order to
+        // guarantee `O(n log n)` worst-case.
+        if limit == 0 {
+            heapsort(v, is_less);
+            return;
+        }
+
+        // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
+        // some elements around. Hopefully we'll choose a better pivot this time.
+        if !was_balanced {
+            break_patterns(v);
+            limit -= 1;
+        }
+
+        // Choose a pivot and try guessing whether the slice is already sorted.
+        let (pivot, likely_sorted) = choose_pivot(v, is_less);
+
+        // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
+        // selection predicts the slice is likely already sorted...
+        if was_balanced && was_partitioned && likely_sorted {
+            // Try identifying several out-of-order elements and shifting them to correct
+            // positions. If the slice ends up being completely sorted, we're done.
+            if partial_insertion_sort(v, is_less) {
+                return;
+            }
+        }
+
+        // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
+        // slice. Partition the slice into elements equal to and elements greater than the pivot.
+        // This case is usually hit when the slice contains many duplicate elements.
+        if let Some(ref p) = pred {
+            if !is_less(p, &v[pivot]) {
+                let mid = partition_equal(v, pivot, is_less);
+
+                // Continue sorting elements greater than the pivot.
+                v = &mut {v}[mid..];
+                continue;
+            }
+        }
+
+        // Partition the slice.
+        let (mid, was_p) = partition(v, pivot, is_less);
+        was_balanced = cmp::min(mid, len - mid) >= len / 8;
+        was_partitioned = was_p;
+
+        // Split the slice into `left`, `pivot`, and `right`.
+        let (left, right) = {v}.split_at_mut(mid);
+        let (pivot, right) = right.split_at_mut(1);
+        let pivot = &mut pivot[0];
+
+        if cmp::max(left.len(), right.len()) <= MAX_SEQUENTIAL {
+            // Recurse into the shorter side only in order to minimize the total number of recursive
+            // calls and consume less stack space. Then just continue with the longer side (this is
+            // akin to tail recursion).
+            if left.len() < right.len() {
+                recurse(left, is_less, pred, limit);
+                v = right;
+                pred = Some(pivot);
+            } else {
+                recurse(right, is_less, Some(pivot), limit);
+                v = left;
+            }
+        } else {
+            // Sort the left and right half in parallel.
+            rayon_core::join(
+                || recurse(left, is_less, pred, limit),
+                || recurse(right, is_less, Some(pivot), limit),
+            );
+            break;
+        }
+    }
+}
+
+/// Sorts `v` using pattern-defeating quicksort in parallel.
+///
+/// The algorithm is unstable, in-place, and `O(n log n)` worst-case.
+pub fn par_quicksort<T, F>(v: &mut [T], is_less: F)
+where
+    T: Send,
+    F: Fn(&T, &T) -> bool + Sync,
+{
+    // Sorting has no meaningful behavior on zero-sized types.
+    if mem::size_of::<T>() == 0 {
+        return;
+    }
+
+    // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
+    let limit = mem::size_of::<usize>() * 8 - v.len().leading_zeros() as usize;
+
+    recurse(v, &is_less, None, limit);
+}
+
+#[cfg(test)]
+mod tests {
+    use rand::{thread_rng, Rng};
+    use super::heapsort;
+
+    #[test]
+    fn test_heapsort() {
+        let mut rng = thread_rng();
+
+        for len in (0..25).chain(500..501) {
+            for &modulus in &[5, 10, 100] {
+                for _ in 0..100 {
+                    let v: Vec<_> = rng.gen_iter::<i32>()
+                        .map(|x| x % modulus)
+                        .take(len)
+                        .collect();
+
+                    // Test heapsort using `<` operator.
+                    let mut tmp = v.clone();
+                    heapsort(&mut tmp, &|a, b| a < b);
+                    assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+                    // Test heapsort using `>` operator.
+                    let mut tmp = v.clone();
+                    heapsort(&mut tmp, &|a, b| a > b);
+                    assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
+                }
+            }
+        }
+
+        // Sort using a completely random comparison function.
+        // This will reorder the elements *somehow*, but won't panic.
+        let mut v: Vec<_> = (0..100).collect();
+        heapsort(&mut v, &|_, _| thread_rng().gen());
+        heapsort(&mut v, &|a, b| a < b);
+
+        for i in 0..v.len() {
+            assert_eq!(v[i], i);
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/slice/test.rs
@@ -0,0 +1,129 @@
+#![cfg(test)]
+
+use rand::{thread_rng, Rng};
+use std::cmp::Ordering::{Equal, Greater, Less};
+use std::mem;
+use super::ParallelSliceMut;
+
+macro_rules! sort {
+    ($f:ident, $name:ident) => {
+        #[test]
+        fn $name() {
+            let mut rng = thread_rng();
+
+            for len in (0..25).chain(500..501) {
+                for &modulus in &[5, 10, 100] {
+                    for _ in 0..100 {
+                        let v: Vec<_> = rng.gen_iter::<i32>()
+                            .map(|x| x % modulus)
+                            .take(len)
+                            .collect();
+
+                        // Test sort using `<` operator.
+                        let mut tmp = v.clone();
+                        tmp.$f(|a, b| a.cmp(b));
+                        assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+                        // Test sort using `>` operator.
+                        let mut tmp = v.clone();
+                        tmp.$f(|a, b| b.cmp(a));
+                        assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
+                    }
+                }
+            }
+
+            // Test sort with many duplicates.
+            for &len in &[1_000, 10_000, 100_000] {
+                for &modulus in &[5, 10, 100, 10_000] {
+                    let mut v: Vec<_> = rng.gen_iter::<i32>()
+                        .map(|x| x % modulus)
+                        .take(len)
+                        .collect();
+
+                    v.$f(|a, b| a.cmp(b));
+                    assert!(v.windows(2).all(|w| w[0] <= w[1]));
+                }
+            }
+
+            // Test sort with many pre-sorted runs.
+            for &len in &[1_000, 10_000, 100_000] {
+                for &modulus in &[5, 10, 1000, 50_000] {
+                    let mut v: Vec<_> = rng.gen_iter::<i32>()
+                        .map(|x| x % modulus)
+                        .take(len)
+                        .collect();
+
+                    v.sort();
+                    v.reverse();
+
+                    for _ in 0..5 {
+                        let a = rng.gen::<usize>() % len;
+                        let b = rng.gen::<usize>() % len;
+                        if a < b {
+                            v[a..b].reverse();
+                        } else {
+                            v.swap(a, b);
+                        }
+                    }
+
+                    v.$f(|a, b| a.cmp(b));
+                    assert!(v.windows(2).all(|w| w[0] <= w[1]));
+                }
+            }
+
+            // Sort using a completely random comparison function.
+            // This will reorder the elements *somehow*, but won't panic.
+            let mut v: Vec<_> = (0..100).collect();
+            v.$f(|_, _| *thread_rng().choose(&[Less, Equal, Greater]).unwrap());
+            v.$f(|a, b| a.cmp(b));
+            for i in 0..v.len() {
+                assert_eq!(v[i], i);
+            }
+
+            // Should not panic.
+            [0i32; 0].$f(|a, b| a.cmp(b));
+            [(); 10].$f(|a, b| a.cmp(b));
+            [(); 100].$f(|a, b| a.cmp(b));
+
+            let mut v = [0xDEADBEEFu64];
+            v.$f(|a, b| a.cmp(b));
+            assert!(v == [0xDEADBEEF]);
+        }
+    }
+}
+
+sort!(par_sort_by, test_par_sort);
+sort!(par_sort_unstable_by, test_par_sort_unstable);
+
+#[test]
+fn test_par_sort_stability() {
+    for len in (2..25).chain(500..510).chain(50_000..50_010) {
+        for _ in 0..10 {
+            let mut counts = [0; 10];
+
+            // Create a vector like [(6, 1), (5, 1), (6, 2), ...],
+            // where the first item of each tuple is random, but
+            // the second item represents which occurrence of that
+            // number this element is, i.e. the second elements
+            // will occur in sorted order.
+            let mut v: Vec<_> = (0..len)
+                .map(|_| {
+                    let n = thread_rng().gen::<usize>() % 10;
+                    counts[n] += 1;
+                    (n, counts[n])
+                })
+                .collect();
+
+            // Only sort on the first element, so an unstable sort
+            // may mix up the counts.
+            v.par_sort_by(|&(a, _), &(b, _)| a.cmp(&b));
+
+            // This comparison includes the count (the second item
+            // of the tuple), so elements with equal first items
+            // will need to be ordered with increasing
+            // counts... i.e. exactly asserting that this sort is
+            // stable.
+            assert!(v.windows(2).all(|w| w[0] <= w[1]));
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/split_producer.rs
@@ -0,0 +1,124 @@
+//! Common splitter for strings and slices
+//!
+//! This module is private, so these items are effectively `pub(super)`
+
+use iter::internal::{UnindexedProducer, Folder};
+
+/// Common producer for splitting on a predicate.
+pub struct SplitProducer<'p, P: 'p, V> {
+    data: V,
+    separator: &'p P,
+
+    /// Marks the endpoint beyond which we've already found no separators.
+    tail: usize,
+}
+
+/// Helper trait so `&str`, `&[T]`, and `&mut [T]` can share `SplitProducer`.
+pub trait Fissile<P>: Sized {
+    fn length(&self) -> usize;
+    fn midpoint(&self, end: usize) -> usize;
+    fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize>;
+    fn rfind(&self, separator: &P, end: usize) -> Option<usize>;
+    fn split_once(self, index: usize) -> (Self, Self);
+    fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
+        where F: Folder<Self>, Self: Send;
+}
+
+impl<'p, P, V> SplitProducer<'p, P, V>
+    where V: Fissile<P> + Send
+{
+    pub fn new(data: V, separator: &'p P) -> Self {
+        SplitProducer {
+            tail: data.length(),
+            data: data,
+            separator: separator,
+        }
+    }
+
+    /// Common `fold_with` implementation, integrating `SplitTerminator`'s
+    /// need to sometimes skip its final empty item.
+    pub fn fold_with<F>(self, folder: F, skip_last: bool) -> F
+        where F: Folder<V>
+    {
+        let SplitProducer { data, separator, tail } = self;
+
+        if tail == data.length() {
+            // No tail section, so just let `fold_splits` handle it.
+            data.fold_splits(separator, folder, skip_last)
+
+        } else if let Some(index) = data.rfind(separator, tail) {
+            // We found the last separator to complete the tail, so
+            // end with that slice after `fold_splits` finds the rest.
+            let (left, right) = data.split_once(index);
+            let folder = left.fold_splits(separator, folder, false);
+            if skip_last || folder.full() {
+                folder
+            } else {
+                folder.consume(right)
+            }
+
+        } else {
+            // We know there are no separators at all.  Return our whole data.
+            if skip_last {
+                folder
+            } else {
+                folder.consume(data)
+            }
+        }
+    }
+}
+
+impl<'p, P, V> UnindexedProducer for SplitProducer<'p, P, V>
+    where V: Fissile<P> + Send,
+          P: Sync,
+{
+    type Item = V;
+
+    fn split(self) -> (Self, Option<Self>) {
+        // Look forward for the separator, and failing that look backward.
+        let mid = self.data.midpoint(self.tail);
+        let index = self.data.find(self.separator, mid, self.tail)
+            .map(|i| mid + i)
+            .or_else(|| self.data.rfind(self.separator, mid));
+
+        if let Some(index) = index {
+            let len = self.data.length();
+            let (left, right) = self.data.split_once(index);
+
+            let (left_tail, right_tail) = if index < mid {
+                // If we scanned backwards to find the separator, everything in
+                // the right side is exhausted, with no separators left to find.
+                (index, 0)
+            } else {
+                let right_index = len - right.length();
+                (mid, self.tail - right_index)
+            };
+
+            // Create the left split before the separator.
+            let left = SplitProducer {
+                data: left,
+                tail: left_tail,
+                ..self
+            };
+
+            // Create the right split following the separator.
+            let right = SplitProducer {
+                data: right,
+                tail: right_tail,
+                ..self
+            };
+
+            (left, Some(right))
+
+        } else {
+            // The search is exhausted, no more separators...
+            (SplitProducer { tail: 0, ..self }, None)
+        }
+    }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        self.fold_with(folder, false)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/str.rs
@@ -0,0 +1,394 @@
+//! This module contains extension methods for `String` that expose
+//! parallel iterators, such as `par_split_whitespace()`. You will
+//! rarely need to interact with it directly, since if you add `use
+//! rayon::prelude::*` to your file, that will include the helper
+//! traits defined in this module.
+//!
+//! Note: [`ParallelString::par_split()`] and [`par_split_terminator()`]
+//! reference a `Pattern` trait which is not visible outside this crate.
+//! This trait is intentionally kept private, for use only by Rayon itself.
+//! It is implemented for `char` and any `F: Fn(char) -> bool + Sync + Send`.
+//!
+//! [`ParallelString::par_split()`]: trait.ParallelString.html#method.par_split
+//! [`par_split_terminator()`]: trait.ParallelString.html#method.par_split_terminator
+
+use iter::*;
+use iter::internal::*;
+use split_producer::*;
+
+
+/// Test if a byte is the start of a UTF-8 character.
+/// (extracted from `str::is_char_boundary`)
+#[inline]
+fn is_char_boundary(b: u8) -> bool {
+    // This is bit magic equivalent to: b < 128 || b >= 192
+    (b as i8) >= -0x40
+}
+
+/// Find the index of a character boundary near the midpoint.
+#[inline]
+fn find_char_midpoint(chars: &str) -> usize {
+    let mid = chars.len() / 2;
+
+    // We want to split near the midpoint, but we need to find an actual
+    // character boundary.  So we look at the raw bytes, first scanning
+    // forward from the midpoint for a boundary, then trying backward.
+    let (left, right) = chars.as_bytes().split_at(mid);
+    right.iter()
+        .cloned()
+        .position(is_char_boundary)
+        .map(|i| mid + i)
+        .or_else(|| left.iter().cloned().rposition(is_char_boundary))
+        .unwrap_or(0)
+}
+
+
+/// Parallel extensions for strings.
+pub trait ParallelString {
+    /// Returns a plain string slice, which is used to implement the rest of
+    /// the parallel methods.
+    fn as_parallel_string(&self) -> &str;
+
+    /// Returns a parallel iterator over the characters of a string.
+    fn par_chars(&self) -> Chars {
+        Chars { chars: self.as_parallel_string() }
+    }
+
+    /// Returns a parallel iterator over substrings separated by a
+    /// given character or predicate, similar to `str::split`.
+    ///
+    /// Note: the `Pattern` trait is private, for use only by Rayon itself.
+    /// It is implemented for `char` and any `F: Fn(char) -> bool + Sync + Send`.
+    fn par_split<P: Pattern>(&self, separator: P) -> Split<P> {
+        Split::new(self.as_parallel_string(), separator)
+    }
+
+    /// Returns a parallel iterator over substrings terminated by a
+    /// given character or predicate, similar to `str::split_terminator`.
+    /// It's equivalent to `par_split`, except it doesn't produce an empty
+    /// substring after a trailing terminator.
+    ///
+    /// Note: the `Pattern` trait is private, for use only by Rayon itself.
+    /// It is implemented for `char` and any `F: Fn(char) -> bool + Sync + Send`.
+    fn par_split_terminator<P: Pattern>(&self, terminator: P) -> SplitTerminator<P> {
+        SplitTerminator::new(self.as_parallel_string(), terminator)
+    }
+
+    /// Returns a parallel iterator over the lines of a string, ending with an
+    /// optional carriage return and with a newline (`\r\n` or just `\n`).
+    /// The final line ending is optional, and line endings are not included in
+    /// the output strings.
+    fn par_lines(&self) -> Lines {
+        Lines(self.as_parallel_string())
+    }
+
+    /// Returns a parallel iterator over the sub-slices of a string that are
+    /// separated by any amount of whitespace.
+    ///
+    /// As with `str::split_whitespace`, 'whitespace' is defined according to
+    /// the terms of the Unicode Derived Core Property `White_Space`.
+    fn par_split_whitespace(&self) -> SplitWhitespace {
+        SplitWhitespace(self.as_parallel_string())
+    }
+}
+
+impl ParallelString for str {
+    #[inline]
+    fn as_parallel_string(&self) -> &str {
+        self
+    }
+}
+
+
+// /////////////////////////////////////////////////////////////////////////
+
+/// We hide the `Pattern` trait in a private module, as its API is not meant
+/// for general consumption.  If we could have privacy on trait items, then it
+/// would be nicer to have its basic existence and implementors public while
+/// keeping all of the methods private.
+mod private {
+    use iter::internal::Folder;
+
+    /// Pattern-matching trait for `ParallelString`, somewhat like a mix of
+    /// `std::str::pattern::{Pattern, Searcher}`.
+    ///
+    /// Implementing this trait is not permitted outside of `rayon`.
+    pub trait Pattern: Sized + Sync + Send {
+        private_decl!{}
+        fn find_in(&self, &str) -> Option<usize>;
+        fn rfind_in(&self, &str) -> Option<usize>;
+        fn is_suffix_of(&self, &str) -> bool;
+        fn fold_with<'ch, F>(&self, &'ch str, folder: F, skip_last: bool) -> F
+            where F: Folder<&'ch str>;
+    }
+}
+use self::private::Pattern;
+
+impl Pattern for char {
+    private_impl!{}
+
+    #[inline]
+    fn find_in(&self, chars: &str) -> Option<usize> {
+        chars.find(*self)
+    }
+
+    #[inline]
+    fn rfind_in(&self, chars: &str) -> Option<usize> {
+        chars.rfind(*self)
+    }
+
+    #[inline]
+    fn is_suffix_of(&self, chars: &str) -> bool {
+        chars.ends_with(*self)
+    }
+
+    fn fold_with<'ch, F>(&self, chars: &'ch str, folder: F, skip_last: bool) -> F
+        where F: Folder<&'ch str>
+    {
+        let mut split = chars.split(*self);
+        if skip_last {
+            split.next_back();
+        }
+        folder.consume_iter(split)
+    }
+}
+
+impl<FN: Sync + Send + Fn(char) -> bool> Pattern for FN {
+    private_impl!{}
+
+    fn find_in(&self, chars: &str) -> Option<usize> {
+        chars.find(self)
+    }
+
+    fn rfind_in(&self, chars: &str) -> Option<usize> {
+        chars.rfind(self)
+    }
+
+    fn is_suffix_of(&self, chars: &str) -> bool {
+        chars.ends_with(self)
+    }
+
+    fn fold_with<'ch, F>(&self, chars: &'ch str, folder: F, skip_last: bool) -> F
+        where F: Folder<&'ch str>
+    {
+        let mut split = chars.split(self);
+        if skip_last {
+            split.next_back();
+        }
+        folder.consume_iter(split)
+    }
+}
+
+
+// /////////////////////////////////////////////////////////////////////////
+
+/// Parallel iterator over the characters of a string
+pub struct Chars<'ch> {
+    chars: &'ch str,
+}
+
+struct CharsProducer<'ch> {
+    chars: &'ch str,
+}
+
+impl<'ch> ParallelIterator for Chars<'ch> {
+    type Item = char;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge_unindexed(CharsProducer { chars: self.chars }, consumer)
+    }
+}
+
+impl<'ch> UnindexedProducer for CharsProducer<'ch> {
+    type Item = char;
+
+    fn split(mut self) -> (Self, Option<Self>) {
+        let index = find_char_midpoint(self.chars);
+        if index > 0 {
+            let (left, right) = self.chars.split_at(index);
+            self.chars = left;
+            (self, Some(CharsProducer { chars: right }))
+        } else {
+            (self, None)
+        }
+    }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        folder.consume_iter(self.chars.chars())
+    }
+}
+
+
+// /////////////////////////////////////////////////////////////////////////
+
+/// Parallel iterator over substrings separated by a pattern
+pub struct Split<'ch, P: Pattern> {
+    chars: &'ch str,
+    separator: P,
+}
+
+impl<'ch, P: Pattern> Split<'ch, P> {
+    fn new(chars: &'ch str, separator: P) -> Self {
+        Split {
+            chars: chars,
+            separator: separator,
+        }
+    }
+}
+
+impl<'ch, P: Pattern> ParallelIterator for Split<'ch, P> {
+    type Item = &'ch str;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let producer = SplitProducer::new(self.chars, &self.separator);
+        bridge_unindexed(producer, consumer)
+    }
+}
+
+/// Implement support for `SplitProducer`.
+impl<'ch, P: Pattern> Fissile<P> for &'ch str {
+    fn length(&self) -> usize {
+        self.len()
+    }
+
+    fn midpoint(&self, end: usize) -> usize {
+        // First find a suitable UTF-8 boundary.
+        find_char_midpoint(&self[..end])
+    }
+
+    fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
+        separator.find_in(&self[start..end])
+    }
+
+    fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
+        separator.rfind_in(&self[..end])
+    }
+
+    fn split_once(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.split_at(index);
+        let mut right_iter = right.chars();
+        right_iter.next(); // skip the separator
+        (left, right_iter.as_str())
+    }
+
+    fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
+        where F: Folder<Self>
+    {
+        separator.fold_with(self, folder, skip_last)
+    }
+}
+
+
+// /////////////////////////////////////////////////////////////////////////
+
+/// Parallel iterator over substrings separated by a terminator pattern
+pub struct SplitTerminator<'ch, P: Pattern> {
+    chars: &'ch str,
+    terminator: P,
+}
+
+struct SplitTerminatorProducer<'ch, 'sep, P: Pattern + 'sep> {
+    splitter: SplitProducer<'sep, P, &'ch str>,
+    skip_last: bool,
+}
+
+impl<'ch, P: Pattern> SplitTerminator<'ch, P> {
+    fn new(chars: &'ch str, terminator: P) -> Self {
+        SplitTerminator {
+            chars: chars,
+            terminator: terminator,
+        }
+    }
+}
+
+impl<'ch, 'sep, P: Pattern + 'sep> SplitTerminatorProducer<'ch, 'sep, P> {
+    fn new(chars: &'ch str, terminator: &'sep P) -> Self {
+        SplitTerminatorProducer {
+            splitter: SplitProducer::new(chars, terminator),
+            skip_last: chars.is_empty() || terminator.is_suffix_of(chars),
+        }
+    }
+}
+
+impl<'ch, P: Pattern> ParallelIterator for SplitTerminator<'ch, P> {
+    type Item = &'ch str;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let producer = SplitTerminatorProducer::new(self.chars, &self.terminator);
+        bridge_unindexed(producer, consumer)
+    }
+}
+
+impl<'ch, 'sep, P: Pattern + 'sep> UnindexedProducer for SplitTerminatorProducer<'ch, 'sep, P> {
+    type Item = &'ch str;
+
+    fn split(mut self) -> (Self, Option<Self>) {
+        let (left, right) = self.splitter.split();
+        self.splitter = left;
+        let right = right.map(|right| {
+            let skip_last = self.skip_last;
+            self.skip_last = false;
+            SplitTerminatorProducer {
+                splitter: right,
+                skip_last: skip_last,
+            }
+        });
+        (self, right)
+    }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        self.splitter.fold_with(folder, self.skip_last)
+    }
+}
+
+
+// /////////////////////////////////////////////////////////////////////////
+
+/// Parallel iterator over lines in a string
+pub struct Lines<'ch>(&'ch str);
+
+impl<'ch> ParallelIterator for Lines<'ch> {
+    type Item = &'ch str;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        self.0
+            .par_split_terminator('\n')
+            .map(|line| if line.ends_with('\r') {
+                     &line[..line.len() - 1]
+                 } else {
+                     line
+                 })
+            .drive_unindexed(consumer)
+    }
+}
+
+
+// /////////////////////////////////////////////////////////////////////////
+
+/// Parallel iterator over substrings separated by whitespace
+pub struct SplitWhitespace<'ch>(&'ch str);
+
+impl<'ch> ParallelIterator for SplitWhitespace<'ch> {
+    type Item = &'ch str;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        self.0
+            .par_split(char::is_whitespace)
+            .filter(|string| !string.is_empty())
+            .drive_unindexed(consumer)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/test.rs
@@ -0,0 +1,47 @@
+#![cfg(test)]
+
+extern crate compiletest_rs as compiletest;
+
+use std::path::PathBuf;
+
+fn run_compiletest(mode: &str, path: &str) {
+    let mut config = compiletest::default_config();
+    config.mode = mode.parse().ok().expect("Invalid mode");
+    config.src_base = PathBuf::from(path);
+    config.target_rustcflags = Some("-L target/debug/ -L target/debug/deps/".to_owned());
+
+    compiletest::run_tests(&config);
+}
+
+#[test]
+fn negative_tests_compile_fail() {
+    run_compiletest("compile-fail", "tests/compile-fail");
+}
+
+#[test]
+#[cfg(rayon_unstable)]
+fn negative_tests_compile_fail_unstable() {
+    run_compiletest("compile-fail", "tests/compile-fail-unstable");
+}
+
+#[test]
+fn negative_tests_run_fail() {
+    run_compiletest("run-fail", "tests/run-fail");
+}
+
+#[test]
+#[cfg(rayon_unstable)]
+fn negative_tests_run_fail_unstable() {
+    run_compiletest("run-fail", "tests/run-fail-unstable");
+}
+
+#[test]
+fn positive_test_run_pass() {
+    run_compiletest("run-pass", "tests/run-pass");
+}
+
+#[test]
+#[cfg(rayon_unstable)]
+fn positive_test_run_pass_unstable() {
+    run_compiletest("run-pass", "tests/run-pass-unstable");
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/src/vec.rs
@@ -0,0 +1,132 @@
+//! This module contains the parallel iterator types for vectors
+//! (`Vec<T>`). You will rarely need to interact with it directly
+//! unless you have need to name one of those types.
+
+use iter::*;
+use iter::internal::*;
+use std;
+
+/// Parallel iterator that moves out of a vector.
+pub struct IntoIter<T: Send> {
+    vec: Vec<T>,
+}
+
+impl<T: Send> IntoParallelIterator for Vec<T> {
+    type Item = T;
+    type Iter = IntoIter<T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IntoIter { vec: self }
+    }
+}
+
+impl<T: Send> ParallelIterator for IntoIter<T> {
+    type Item = T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&mut self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<T: Send> IndexedParallelIterator for IntoIter<T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&mut self) -> usize {
+        self.vec.len()
+    }
+
+    fn with_producer<CB>(mut self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        // The producer will move or drop each item from its slice, effectively taking ownership of
+        // them.  When we're done, the vector only needs to free its buffer.
+        unsafe {
+            // Make the vector forget about the actual items.
+            let len = self.vec.len();
+            self.vec.set_len(0);
+
+            // Get a correct borrow, then extend it to the original length.
+            let mut slice = self.vec.as_mut_slice();
+            slice = std::slice::from_raw_parts_mut(slice.as_mut_ptr(), len);
+
+            callback.callback(VecProducer { slice: slice })
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+struct VecProducer<'data, T: 'data + Send> {
+    slice: &'data mut [T],
+}
+
+impl<'data, T: 'data + Send> Producer for VecProducer<'data, T> {
+    type Item = T;
+    type IntoIter = SliceDrain<'data, T>;
+
+    fn into_iter(mut self) -> Self::IntoIter {
+        // replace the slice so we don't drop it twice
+        let slice = std::mem::replace(&mut self.slice, &mut []);
+        SliceDrain { iter: slice.iter_mut() }
+    }
+
+    fn split_at(mut self, index: usize) -> (Self, Self) {
+        // replace the slice so we don't drop it twice
+        let slice = std::mem::replace(&mut self.slice, &mut []);
+        let (left, right) = slice.split_at_mut(index);
+        (VecProducer { slice: left }, VecProducer { slice: right })
+    }
+}
+
+impl<'data, T: 'data + Send> Drop for VecProducer<'data, T> {
+    fn drop(&mut self) {
+        SliceDrain { iter: self.slice.iter_mut() };
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+// like std::vec::Drain, without updating a source Vec
+struct SliceDrain<'data, T: 'data> {
+    iter: std::slice::IterMut<'data, T>,
+}
+
+impl<'data, T: 'data> Iterator for SliceDrain<'data, T> {
+    type Item = T;
+
+    fn next(&mut self) -> Option<T> {
+        self.iter.next().map(|ptr| unsafe { std::ptr::read(ptr) })
+    }
+}
+
+impl<'data, T: 'data> DoubleEndedIterator for SliceDrain<'data, T> {
+    fn next_back(&mut self) -> Option<Self::Item> {
+        self.iter.next_back().map(|ptr| unsafe { std::ptr::read(ptr) })
+    }
+}
+
+impl<'data, T: 'data> ExactSizeIterator for SliceDrain<'data, T> {
+    fn len(&self) -> usize {
+        self.iter.len()
+    }
+}
+
+impl<'data, T: 'data> Drop for SliceDrain<'data, T> {
+    fn drop(&mut self) {
+        for ptr in &mut self.iter {
+            unsafe {
+                std::ptr::drop_in_place(ptr);
+            }
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail-unstable/README.md
@@ -0,0 +1,3 @@
+This directory contains test files that are **expected not to
+compile** and which **use unstable features**. See `compile-fail` for
+more details.
rename from third_party/rust/rayon/tests/compile-fail-unstable/future_escape.rs
rename to third_party/rust/rayon-0.8.2/tests/compile-fail-unstable/future_escape.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/README.md
@@ -0,0 +1,11 @@
+This directory contains test files that are **not expected to
+compile**.  It is useful for writing tests that things which ought not
+to type-check do not, in fact, type-check.
+
+To write a test, just write a `.rs` file using Rayon. Then, for each
+compilation error, write a `//~ ERROR E123` annotation on the line
+where the error occurs. `E123` should be the error code that is issued
+by rustc. This should be reasonably robust against future compiler
+changes, though in some cases the errors may start showing up on
+different lines etc as compiler heuristics change.
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/cannot_collect_filtermap_data.rs
@@ -0,0 +1,14 @@
+extern crate rayon;
+
+use rayon::prelude::*;
+
+// zip requires data of exact size, but filter yields only bounded
+// size, so check that we cannot apply it.
+
+fn main() {
+    let a: Vec<usize> = (0..1024).collect();
+    let mut v = vec![];
+    a.par_iter()
+     .filter_map(|&x| Some(x as f32))
+     .collect_into(&mut v); //~ ERROR no method
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/cannot_zip_filtered_data.rs
@@ -0,0 +1,14 @@
+extern crate rayon;
+
+use rayon::prelude::*;
+
+// zip requires data of exact size, but filter yields only bounded
+// size, so check that we cannot apply it.
+
+fn main() {
+    let mut a: Vec<usize> = (0..1024).rev().collect();
+    let b: Vec<usize> = (0..1024).collect();
+
+    a.par_iter()
+     .zip(b.par_iter().filter(|&&x| x > 3)); //~ ERROR E0277
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/cell_par_iter.rs
@@ -0,0 +1,13 @@
+extern crate rayon;
+
+// Check that we can't use the par-iter API to access contents of a `Cell`.
+
+use rayon::prelude::*;
+use std::cell::Cell;
+
+fn main() {
+    let c = Cell::new(42_i32);
+    (0_i32..1024).into_par_iter()
+             .map(|_| c.get()) //~ ERROR Sync` is not satisfied
+             .min();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/must_use.rs
@@ -0,0 +1,30 @@
+#![deny(unused_must_use)]
+
+extern crate rayon;
+
+// Check that we are flagged for ignoring `must_use` parallel adaptors.
+
+use rayon::prelude::*;
+
+fn main() {
+    let v: Vec<_> = (0..100).map(Some).collect();
+
+    v.par_iter().chain(&v);                 //~ ERROR must be used
+    v.par_iter().cloned();                  //~ ERROR must be used
+    v.par_iter().enumerate();               //~ ERROR must be used
+    v.par_iter().filter(|_| true);          //~ ERROR must be used
+    v.par_iter().filter_map(|x| *x);        //~ ERROR must be used
+    v.par_iter().flat_map(|x| *x);          //~ ERROR must be used
+    v.par_iter().fold(|| 0, |x, _| x);      //~ ERROR must be used
+    v.par_iter().fold_with(0, |x, _| x);    //~ ERROR must be used
+    v.par_iter().inspect(|_| {});           //~ ERROR must be used
+    v.par_iter().map(|x| x);                //~ ERROR must be used
+    v.par_iter().map_with(0, |_, x| x);     //~ ERROR must be used
+    v.par_iter().rev();                     //~ ERROR must be used
+    v.par_iter().skip(1);                   //~ ERROR must be used
+    v.par_iter().take(1);                   //~ ERROR must be used
+    v.par_iter().cloned().while_some();     //~ ERROR must be used
+    v.par_iter().with_max_len(1);           //~ ERROR must be used
+    v.par_iter().with_min_len(1);           //~ ERROR must be used
+    v.par_iter().zip(&v);                   //~ ERROR must be used
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/no_send_par_iter.rs
@@ -0,0 +1,27 @@
+extern crate rayon;
+
+// Check that `!Send` types fail early.
+
+use rayon::prelude::*;
+use std::ptr::null;
+
+#[derive(Copy, Clone)]
+struct NoSend(*const ());
+
+unsafe impl Sync for NoSend {}
+
+fn main() {
+    let x = Some(NoSend(null()));
+
+    x.par_iter()
+        .map(|&x| x) //~ ERROR Send` is not satisfied
+        .count(); //~ ERROR Send` is not satisfied
+
+    x.par_iter()
+        .filter_map(|&x| Some(x)) //~ ERROR Send` is not satisfied
+        .count(); //~ ERROR Send` is not satisfied
+
+    x.par_iter()
+        .cloned() //~ ERROR Send` is not satisfied
+        .count(); //~ ERROR Send` is not satisfied
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/quicksort_race1.rs
@@ -0,0 +1,26 @@
+extern crate rayon;
+
+fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (lo, hi) = v.split_at_mut(mid);
+    rayon::join(|| quick_sort(lo), || quick_sort(lo)); //~ ERROR E0524
+}
+
+fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+    let pivot = v.len() - 1;
+    let mut i = 0;
+    for j in 0..pivot {
+        if v[j] <= v[pivot] {
+            v.swap(i, j);
+            i += 1;
+        }
+    }
+    v.swap(i, pivot);
+    i
+}
+
+fn main() { }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/quicksort_race2.rs
@@ -0,0 +1,26 @@
+extern crate rayon;
+
+fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (lo, hi) = v.split_at_mut(mid);
+    rayon::join(|| quick_sort(lo), || quick_sort(v)); //~ ERROR E0500
+}
+
+fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+    let pivot = v.len() - 1;
+    let mut i = 0;
+    for j in 0..pivot {
+        if v[j] <= v[pivot] {
+            v.swap(i, j);
+            i += 1;
+        }
+    }
+    v.swap(i, pivot);
+    i
+}
+
+fn main() { }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/quicksort_race3.rs
@@ -0,0 +1,26 @@
+extern crate rayon;
+
+fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (lo, hi) = v.split_at_mut(mid);
+    rayon::join(|| quick_sort(hi), || quick_sort(hi)); //~ ERROR E0524
+}
+
+fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+    let pivot = v.len() - 1;
+    let mut i = 0;
+    for j in 0..pivot {
+        if v[j] <= v[pivot] {
+            v.swap(i, j);
+            i += 1;
+        }
+    }
+    v.swap(i, pivot);
+    i
+}
+
+fn main() { }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/rc_par_iter.rs
@@ -0,0 +1,15 @@
+extern crate rayon;
+
+// Check that we can't use the par-iter API to access contents of an
+// `Rc`.
+
+use rayon::iter::IntoParallelIterator;
+use std::rc::Rc;
+
+fn main() {
+    let x = vec![Rc::new(22), Rc::new(23)];
+    let mut y = vec![];
+    x.into_par_iter() //~ ERROR no method named `into_par_iter`
+     .map(|rc| *rc)
+     .collect_into(&mut y);
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/rc_return.rs
@@ -0,0 +1,8 @@
+extern crate rayon;
+
+use std::rc::Rc;
+
+fn main() {
+    rayon::join(|| Rc::new(22), || ()); //~ ERROR E0277
+    rayon::join(|| (), || Rc::new(23)); //~ ERROR E0277
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/rc_upvar.rs
@@ -0,0 +1,9 @@
+extern crate rayon;
+
+use std::rc::Rc;
+
+fn main() {
+    let r = Rc::new(22);
+    rayon::join(|| r.clone(), || r.clone());
+    //~^ ERROR E0277
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/compile-fail/scope_join_bad.rs
@@ -0,0 +1,22 @@
+extern crate rayon;
+
+fn bad_scope<F>(f: F)
+    where F: FnOnce(&i32) + Send,
+{
+    rayon::scope(|s| {
+        let x = 22;
+        s.spawn(|_| f(&x)); //~ ERROR `x` does not live long enough
+    });
+}
+
+fn good_scope<F>(f: F)
+    where F: FnOnce(&i32) + Send,
+{
+    let x = 22;
+    rayon::scope(|s| {
+        s.spawn(|_| f(&x));
+    });
+}
+
+fn main() {
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-fail-unstable/README.md
@@ -0,0 +1,3 @@
+This directory contains test files that are **expected to run and
+panic** and which **use unstable features**. See `run-fail` for more
+details.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-fail/README.md
@@ -0,0 +1,14 @@
+This directory contains test files that are **expected to run and
+panic**. Usually, though, these tests are better written with
+`#[test]` tests. To use it, create a `.rs` file and include a comment
+like
+
+```
+// error-pattern:boom
+```
+
+indicating the panic message we should expect to see.
+
+Note: if the test uses unstable features, prefer the
+`run-fail-unstable` directory.
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-fail/iter_panic.rs
@@ -0,0 +1,10 @@
+extern crate rayon;
+
+use rayon::*;
+use rayon::prelude::*;
+
+// error-pattern:boom
+
+fn main() {
+    (0i32..2_000_000).into_par_iter().for_each(|i| if i == 1_350_000 { panic!("boom") });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-fail/simple_panic.rs
@@ -0,0 +1,9 @@
+extern crate rayon;
+
+use rayon::*;
+
+// error-pattern:should panic
+
+fn main() {
+    join(|| {}, || panic!("should panic"));
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-pass-unstable/README.md
@@ -0,0 +1,3 @@
+This directory contains test files that are **expected to run and
+execute successfully** and which **contain unstable features**. See
+the `run-pass` directory for more details.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-pass/README.md
@@ -0,0 +1,7 @@
+This directory contains test files that are **expected to run and
+execute successfully**. Often, though, such tests are better written
+just using the standard `#[test]` mechanism. In any case, to create
+such a test, just make a `.rs` file.
+
+Note: if the test uses unstable features, prefer the
+`run-pass-unstable` directory.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-pass/double_init_fail.rs
@@ -0,0 +1,10 @@
+extern crate rayon;
+
+use rayon::*;
+
+fn main() {
+    let result1 = initialize(Configuration::new());
+    assert_eq!(result1.unwrap(), ());
+    let err = initialize(Configuration::new()).unwrap_err();
+    assert!(err.description() == "The global thread pool has already been initialized.");
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-pass/init_zero_threads.rs
@@ -0,0 +1,7 @@
+extern crate rayon;
+
+use rayon::*;
+
+fn main() {
+    initialize(Configuration::new().num_threads(0)).unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-pass/named-threads.rs
@@ -0,0 +1,19 @@
+extern crate rayon;
+
+use std::collections::HashSet;
+
+use rayon::*;
+use rayon::prelude::*;
+
+fn main() {
+    let result = initialize(Configuration::new().thread_name(|i| format!("hello-name-test-{}", i)));
+
+    const N: usize = 10000;
+
+    let thread_names = (0..N).into_par_iter()
+        .flat_map(|_| ::std::thread::current().name().map(|s| s.to_owned()))
+        .collect::<HashSet<String>>();
+
+    let all_contains_name = thread_names.iter().all(|name| name.starts_with("hello-name-test-"));
+    assert!(all_contains_name);
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-pass/scope_join.rs
@@ -0,0 +1,45 @@
+extern crate rayon;
+
+/// Test that one can emulate join with `scope`:
+fn pseudo_join<F, G>(f: F, g: G)
+    where F: FnOnce() + Send,
+          G: FnOnce() + Send,
+{
+    rayon::scope(|s| {
+        s.spawn(|_| g());
+        f();
+    });
+}
+
+fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (lo, hi) = v.split_at_mut(mid);
+    pseudo_join(|| quick_sort(lo), || quick_sort(hi));
+}
+
+fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+    let pivot = v.len() - 1;
+    let mut i = 0;
+    for j in 0..pivot {
+        if v[j] <= v[pivot] {
+            v.swap(i, j);
+            i += 1;
+        }
+    }
+    v.swap(i, pivot);
+    i
+}
+
+pub fn is_sorted<T: Send + Ord>(v: &[T]) -> bool {
+    (1..v.len()).all(|i| v[i-1] <= v[i])
+}
+
+fn main() {
+    let mut v: Vec<i32> = (0 .. 256).rev().collect();
+    quick_sort(&mut v);
+    assert!(is_sorted(&v));
+}
rename from third_party/rust/rayon/tests/run-pass/sort-panic-safe.rs
rename to third_party/rust/rayon-0.8.2/tests/run-pass/sort-panic-safe.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-0.8.2/tests/run-pass/stack_overflow_crash.rs
@@ -0,0 +1,51 @@
+extern crate rayon;
+
+use rayon::*;
+
+use std::process::{self, Command};
+use std::env;
+
+#[cfg(target_os = "linux")]
+use std::os::unix::process::ExitStatusExt;
+
+
+
+fn force_stack_overflow(depth: u32) {
+    let buffer = [0u8; 1024*1024];
+    if depth > 0 {
+        force_stack_overflow(depth - 1);
+    }
+}
+
+fn main() {
+    if env::args().len() == 1 {
+        // first check that the recursivecall actually causes a stack overflow, and does not get optimized away
+        {
+            let status = Command::new(env::current_exe().unwrap())
+                .arg("8")
+                .status()
+                .unwrap();
+            assert_eq!(status.code(), None);
+            #[cfg(target_os = "linux")]
+            assert!(status.signal() == Some(11 /*SIGABRT*/) || status.signal() == Some(6 /*SIGSEGV*/));
+        }
+
+
+        // now run with a larger stack and verify correct operation
+        {
+            let status = Command::new(env::current_exe().unwrap())
+                .arg("48")
+                .status()
+                .unwrap();
+            assert_eq!(status.code(), Some(0));
+            #[cfg(target_os = "linux")]
+            assert_eq!(status.signal(), None);
+        }
+    } else {
+        let stack_size_in_mb: usize = env::args().nth(1).unwrap().parse().unwrap();
+        let pool = ThreadPool::new(Configuration::new().stack_size(stack_size_in_mb * 1024 * 1024)).unwrap();
+        let index = pool.install(|| {
+            force_stack_overflow(32);
+        });
+    }
+}
--- a/third_party/rust/rayon-core/.cargo-checksum.json
+++ b/third_party/rust/rayon-core/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"b55c2f103247cd210b079fbb8c0812089b962faa47ed670d93f936889141f8d1","build.rs":"fa31cb198b772600d100a7c403ddedccef637d2e6b2da431fa7f02ca41307fc6","src/future/README.md":"c2371a6a7a6cba3ef2c12b4789d6cfc02c5925bca9bf0e15967c8c8e63b18a1b","src/future/mod.rs":"0d65eae294d517c69a9775e27e24a0c013cef08d455ea400624243682ed00f2f","src/future/test.rs":"fd193f77134a235a42338b94e31b4dea76ac6c8f2a22e6a52735a8f6689de0fb","src/job.rs":"bc456ec252d0cd8878e96a5bf602997440957c6bc5ea59d049e4fe0a6bbfb2cd","src/join/mod.rs":"0af433a10b8edc294f95aed39fe66c62553e0c14fad48db9e4988afd2b35da83","src/join/test.rs":"d9abcdb857915ee9c863d2f7f877be7bc969f08661566df405257e9f403432bf","src/latch.rs":"37117e2bdfd657107c8c91805d43bbed0916fd3eba2336cd081000eafbb398f4","src/lib.rs":"a0b7da992ba2f7364ff044ae5cac21e6205e1841248a9670a8193aa65e4d3de5","src/log.rs":"a941e76e1f81f12871ffd3e6602dde71a0a2f139202837261d88726c357976d3","src/registry.rs":"793abc34e7e64c249a2bb2ff282c8dfebd36c7f001f6f138792a45932db40b73","src/scope/mod.rs":"a3eaddd7c543a1ac7a0e11124122311dd7ebf77eb59d269f3e1e3a32f8b741b9","src/scope/test.rs":"c6bbb63a07b9883b252cf1abe0eb5f7289bfe3bab35d7f2751b829521bcd71c0","src/sleep/README.md":"34a83cd8a0f84e58472af5c40744794139fbfe22a392f0dc6eb90cfdf9389119","src/sleep/mod.rs":"f5e5caf619a23da6534d69fff7beb7f4c361bd5b8d86e81e6326eab300e1507c","src/spawn/mod.rs":"819c70e9d23b87f40e2b0add72134ed5ccb01a98c5e71cde28a31a613dc7787d","src/spawn/test.rs":"777930c3f19c6579f0d9ea617f3dde9976e58122bd0b7a3e6db38c27910d5de3","src/test.rs":"a6b7e8aaddba6e864b6654616f14e0a62c10bdf5766c2dada8a2fd3fb85278ea","src/thread_pool/mod.rs":"9c4c34157c6f8ada430afe9c1a2e5b4c819aadc6f09fd4ad62a137b1cbb0c4b9","src/thread_pool/test.rs":"f67c01f8dc1fa09652b925442d02f2bed092f2263c26afcd481630797d450c05","src/unwind.rs":"52a4b26d51bbcbac9e22776cdff8c6cadfe79f7fa20ed507bb987b168f9afe77","src/util.rs":"63ec7e1c75ea3871593d7fa5fc46689de7a2a86aa080b587880181dafd5e9838"},"package":"e2c21a92a5dca958fb030787c1158446c6deb7f976399b72fa8074603f169e2a"}
\ No newline at end of file
+{"files":{"Cargo.toml":"34b1cbb8413751b716444269a455b294228fd6cf4fb3f95c849182e12fead815","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"32b5268c63c55d843735b93d6b755279b1e9f7dd7f45d34bbe24effb27d9f674","build.rs":"fa31cb198b772600d100a7c403ddedccef637d2e6b2da431fa7f02ca41307fc6","src/internal/mod.rs":"26fb298bbb0d785bae5da8ca181b02f267376e766cea0757a191136fc432103e","src/internal/task.rs":"0aafd209e52fdf34c544487cea3e3b46a51a5b8b2651534f2902458c44e621d4","src/internal/worker.rs":"94fd86908128dd500a476bc635ff67f50363153aab38ad675136967fe5cd45f4","src/job.rs":"6ada245db21c326657278cc55caf927e8fb4f8c1c5b74154c33e395a3d9ce99d","src/join/mod.rs":"4b0395f66c0ea23e6b54ac62b646697fa07bc3bde548efb9b57128ede900a546","src/join/test.rs":"774f6330024cbb9d896345b07da57bc545c877cf5a03de26903c8f718d388765","src/latch.rs":"cec40108c5bd33b1db6b23af2974edda05d9a38518c0e5bd0481bfb2fd0ac82d","src/lib.rs":"424673b805538cac3c0576dca92c12e246bcf04faa7cfd938ef3634a4ee9e1f9","src/log.rs":"95fcb9c0613bb5fc865b5a74bf0300df54ca08e406b05a5fa7e09cb8eb526a9a","src/registry.rs":"f2aeb1f866cdc4a65356cff63862cfd681f7ba87ea96989ab846ae0fd74a1684","src/scope/internal.rs":"63103af40d09fc544d9b757ef48cdf8f2fe8b5e05962cbf98783e18f2314ca9c","src/scope/mod.rs":"55a5b991bf5ff7a0a7e9ac44f89ba7a59f9bac9e2d7085b3de30a09b8ca57707","src/scope/test.rs":"a15d3d31299d05333e83975d67eba37d7a6dd4dd85452bad0ed159382455335e","src/sleep/README.md":"34a83cd8a0f84e58472af5c40744794139fbfe22a392f0dc6eb90cfdf9389119","src/sleep/mod.rs":"f5e5caf619a23da6534d69fff7beb7f4c361bd5b8d86e81e6326eab300e1507c","src/spawn/mod.rs":"7665de0682ae4d232d0f2db70f64dfd330bf400c8b17a70ecb165c8dcd6a7070","src/spawn/test.rs":"39073aa2c0f933997fa32a723ef0acbd7b1236e4d839471eec0e6103a894d8bb","src/test.rs":"8b7bc544fb6f853e99d42bbb8b1a77b6b5bc31de16a127c54f9e005f2b7b1e97","src/thread_pool/internal.rs":"eb64a10ef3d52f5eebb7ac7a0023c557bbdb52cb0ab412aa3eba265b60628c8e","src/thread_pool/mod.rs":"867f57a2bbe8c18e879eb509c285f70cbf0ef3310dc5119394f2edaaaab2f144","src/thread_pool/test.rs":"14e7214584b49c1cf2cdaa174b49b4eae4dc8a3d96df45b4ee5e9a3671ea0e6f","src/unwind.rs":"23c7dcc38524f61193b36f371b4ed918aa2e67ed0e155788d09f7eb5fcd4b6bd","src/util.rs":"63ec7e1c75ea3871593d7fa5fc46689de7a2a86aa080b587880181dafd5e9838"},"package":"9d24ad214285a7729b174ed6d3bcfcb80177807f959d95fafd5bfc5c4f201ac8"}
\ No newline at end of file
--- a/third_party/rust/rayon-core/Cargo.toml
+++ b/third_party/rust/rayon-core/Cargo.toml
@@ -7,35 +7,35 @@
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "rayon-core"
-version = "1.2.0"
+version = "1.4.0"
 authors = ["Niko Matsakis <niko@alum.mit.edu>", "Josh Stone <cuviper@gmail.com>"]
 build = "build.rs"
 links = "rayon-core"
 description = "Core APIs for Rayon"
 documentation = "https://docs.rs/rayon/"
+readme = "README.md"
+keywords = ["parallel", "thread", "concurrency", "join", "performance"]
+categories = ["concurrency"]
 license = "Apache-2.0/MIT"
-repository = "https://github.com/nikomatsakis/rayon"
-[dependencies.num_cpus]
-version = "1.2"
-
-[dependencies.coco]
-version = "0.1.1"
-
-[dependencies.futures]
-version = "0.1.7"
+repository = "https://github.com/rayon-rs/rayon"
+[dependencies.crossbeam-deque]
+version = "0.2.0"
 
 [dependencies.lazy_static]
-version = "0.2.2"
-
-[dependencies.rand]
-version = "0.3"
+version = "1"
 
 [dependencies.libc]
 version = "0.2.16"
 
+[dependencies.num_cpus]
+version = "1.2"
+
+[dependencies.rand]
+version = ">= 0.3, < 0.5"
+
 [dev-dependencies]
copy from third_party/rust/coco/LICENSE-APACHE
copy to third_party/rust/rayon-core/LICENSE-APACHE
copy from third_party/rust/coco/LICENSE-MIT
copy to third_party/rust/rayon-core/LICENSE-MIT
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-core/README.md
@@ -0,0 +1,11 @@
+Rayon-core represents the "core, stable" APIs of Rayon: join, scope, and so forth, as well as the ability to create custom thread-pools with ThreadPool.
+
+Maybe worth mentioning: users are not necessarily intended to directly access rayon-core; all its APIs are mirror in the rayon crate. To that end, the examples in the docs use rayon::join and so forth rather than rayon_core::join.
+
+rayon-core aims to never, or almost never, have a breaking change to its API, because each revision of rayon-core also houses the global thread-pool (and hence if you have two simultaneous versions of rayon-core, you have two thread-pools).
+
+Please see [Rayon Docs] for details about using Rayon.
+
+[Rayon Docs]: https://docs.rs/rayon/
+
+Rayon-core currently requires `rustc 1.13.0` or greater.
deleted file mode 100644
--- a/third_party/rust/rayon-core/src/future/README.md
+++ /dev/null
@@ -1,273 +0,0 @@
-# Future integration into Rayon
-
-## How futures work
-
-Let's start with a brief coverage of how futures work. Our example will
-be a simple chain of futures:
-
-    F_map -> F_socket
-
-Here `F_socket` is a future that maps to a TCP socket. It returns a
-`Vec<u8>` of data read from that socket. `F_map` is a future will take
-that data and do some transformation. (Note that the real futures for
-reading from sockets etc do not work in this way, this is just an
-example.)
-
-The idea of futures is that each future offers a `poll()` method. When
-`poll()` is invoked, the future will attempt to execute. Typically,
-this often involves recursively calling `poll()` on other futures. So,
-in our example, `F_map` when it starts would call `F_socket.poll()` to
-see if the data is ready. The idea is that `poll()` returns one of
-three values:
-
-- `Ok(Async::Ready(R))` -- the future has completed, here is the result `R`.
-- `Err(E)` -- the future has completed and resulted in an error `E`.
-- `Ok(Async::NotReady)` -- the future is not yet complete.
-
-The last one is the most interesting. It means that the future is
-blocked on *some event X*, typically an I/O event (i.e., we are
-waiting for more data to arrive on a TCP socket).
-
-When a future returns `NotReady`, it also has one additional job. It
-must register the "current task" (think for now of the current thread)
-to be re-awoken when the event X has occurred. For most futures, this
-job is delegated to another future: e.g., in our example, `F_map`
-invokes `F_socket.poll()`. So if `F_socket.poll()` returns not-ready,
-then it will have registered the current thread already, and hence
-`F_map` can merely propagates the `NotReady` result further up.
-
-### The current task and executor
-
-A key concept of the futures.rs library is that of an *executor*.  The
-executor is the runtime that first invokes the top-level future
-(`T_map`, in our example). This is precisely the role that Rayon
-plays. Note that in any futures system there may be many
-interoperating execturs though.
-
-Part of an executors job is to maintain some thread-local storage
-(TLS) when a future is executing. In particular, it must setup the
-"current task" (basically a unique integer, although it's an opaque
-type) as well as an "unpark object" of type
-`Arc<Unpark>`. [The `Unpark` trait][unpark] offers a single method
-(`unpark()`) which can be invoked when the task should be
-re-awoken. So `F_socket` might, for example, get the current
-`Arc<Unpark>` object and store it for use by an I/O thread. The I/O
-thread might invoke `epoll()` or `select()` or whatever and, when it
-detects the socket has more data, invoke the `unpark()` method.
-
-[unpark]: https://docs.rs/futures/0.1/futures/executor/trait.Unpark.html
-
-## Rayon's futures integration
-
-When you spawn a future of type `F` into rayon, the idea is that it is
-going to start independently executing in the thread-pool. Meanwhile,
-the `spawn_future()` method returns to you your own future (let's call
-it `F'`) that you can use to poll and monitor its progress. Internally
-within Rayon, however, we only allocate a single `Arc` to represent
-both of these things -- an `Arc<ScopeFuture<F>>`, to be precise -- and
-this `Arc` hence serves two distinct roles.
-
-The operations on `F'` (the handle returned to the user) are specified
-by the trait `ScopeFutureTrait` and are very simple. The user can
-either `poll()` the future, which is checking to see if rayon is done
-executing it yet, or `cancel()` the future. `cancel()` occurs when
-`F'` is dropped, which indicates that the user no longer has interest
-in the result.
-
-### Future reference counting
-
-Each spawned future is represents by an `Arc`. This `Arc` actually has
-some interesting structure. Each of the edges in the diagram below
-represents something that is "kept alive" by holding a ref count (in
-some way, usually via an `Arc`):
-
-      F' ---+  [ deque ] --+
-            |              |
-            v              v
-      +---> /---------------------\
-      |     | registry:           | ------> [rayon registry]
-      |     | contents: --------\ |
-      |     | | scope         | | ------> [spawning scope]
-      |     | | unpark          | | --+
-      |     | | this            | | --+ (self references)
-      |     | | ...             | |   |
-      |     | \-----------------/ |   |
-      |     \---------------------/   |
-      +-------------------------------+
-
-Let's walk through them:
-
-- The incoming edge from `F'` represents the edge from the future that was returned
-  to the caller of `spawn_future`. This ensures that the future arc will
-  not be freed so long as the caller is still interesting in looking at
-  its result.
-- The incoming edge from `[ deque ]` represents the fact that when the
-  future is enqueued into a thread-local deque (which it only
-  sometimes is), that deque holds a ref. This is done by transmuting
-  the `Arc` into a `*const Job` object (and hence the `*const`
-  logically holds the ref that was owned by the `Arc`).  When the job
-  is executed, it is transmuted back and the resulting `Arc` is
-  eventually dropped, releasing the ref.
-- The `registry` field holds onto an `Arc<Registry>` and hence keeps
-  some central registry alive. This doesn't really do much but prevent
-  the `Registry` from being dropped. In particular, this doesn't
-  prevent the threads in a registry from terminating while the future
-  is unscheduled etc (though other fields in the future do).
-- The `scope` field (of type `S`) is the "enclosing scope". This scope
-  is an abstract value that implements the `FutureScope<'scope>` trait
-  -- this means that it is responsible for ensuring that `'scope` does
-  not end until one of the `FutureScope` methods are invoked (which
-  occurs when the future has finished executing). For example, if the
-  future is spawned inside a `scope()` call, then the `S` will be a
-  wrapper (`ScopeFutureScope`) around a `*const Scope<'scope>`.  When
-  the future is created one job is allocated for this future in the
-  scope, and the scope counter is decremented once the future is
-  marked as completing.
-  - In general, the job of the `scope` field is to ensure that the
-    future type (`F`) remains valid. After all, since `F: 'scope`, `F`
-    is known to be valid until the lifetime `'scope` ends, and that
-    lifetime cannot end until the `scope` methods are invoked, so we
-    know that `F` must stay valid until one of those methods are
-    invoked.
-  - All of our data of type `F` is stored in the field `spawn` (not
-    shown here). This field is always set to `None` before the scope
-    counter is decremented. See the section on lifetime safety for more
-    details.
-- The `unpark` and `self` fields both store an `Arc` which is actually
-  this same future. Thus the future has a ref count cycle (two of
-  them...) and cannot be freed until this cycle is broken. Both of
-  these fields are actually `Option<Arc<..>>` fields and will be set
-  to `None` once the future is complete, breakin the cycle and
-  allowing it to be freed when other references are dropped.
-
-### The future state machine
-
-Internally, futures go through various states, depicted here:
-
-    PARKED <----+
-    |           |
-    v           |
-    UNPARKED    |
-    |           |
-    v           |
-    EXECUTING --+
-    |   |   ^
-    |   v   |
-    |   EXECUTING_UNPARKED
-    |
-    v
-    COMPLETE
-
-When they are first created, futures begin as *PARKED*. A *PARKED*
-future is one that is waiting for something to happen. It is not
-scheduled in the deque of any thread. Even before we return from
-`spawn_future()`, however, we will transition into *UNPARKED*. An
-*UNPARKED* future is one that is waiting to be executed. It is
-enqueued in the deque of some Rayon thread and hence will execute when
-the thread gets around to it.
-
-Once the future begins to execute (it itself is a Rayon job), it
-transitions into the *EXECUTING* state. This means that it is busy
-calling `F.poll()`, basically. While it calls `poll()`, it also sets
-up its `contents.unpark` field as the current "unpark" instance. Hence
-if `F` returns `NotReady`, it will clone this `unpark` field and hold
-onto it to signal us the future is ready to execute again.
-
-For now let's assume that `F` is complete and hence readys either
-`Ok(Ready(_))` or `Err(_)`. In that case, the future can transition to
-`COMPLETE`. At this point, many bits of state that are no longer
-needed (e.g., the future itself, but also the `this` and `unpark`
-fields) are set to `None` and dropped, and the result is stored in the
-`result` field. (Moreover, we may have to signal other tasks, but that
-is discussed in a future section.)
-
-If `F` returns `Ok(Async::NotReady)`, then we would typically
-transition to the `PARKED` state and await the call to
-`unpark()`. When `unpark()` is called, it would move the future into
-the `UNPARK` state and inject it into the registry.
-
-However, due to the vagaries of thread-scheduling, it *can* happen
-that `unpark()` is called before we exit the `EXECUTING` state. For
-example, we might invoke `F.poll()`, which send the `Unpark` instance
-to the I/O thread, which detects I/O, and invokes `unpark()`, all
-before `F.poll()` has returned. In that case, the `unpark()` method
-will transition the state (atomically, of course) to
-`EXECUTING_UNPARKED`. In that case, instead of transitioning to
-`PARKED` when `F.poll()` returns, the future will simply transition
-right back to `EXECUTING` and try calling `poll()` again. This can
-repeat a few times.
-
-### Lifetime safety
-
-Of course, Rayon's signature feature is that it allows you to use a
-future `F` that includes references, so long as those references
-outlive the lifetime of the scope `'scope`. So why is this safe?
-
-The basic idea of why this is safe is as follows. The `ScopeFuture`
-struct holds a ref on the scope itself (via the field `scope`).
-Until this ref is decremented, the scope will not end (and hence
-`'scope` is still active). This ref is only decremented while the
-future transitions into the *COMPLETE* state -- so anytime before
-then, we know we don't have to worry, the references are still valid.
-
-As we transition into the *COMPLETE* state is where things get more
-interesting. You'll notice that signaling the `self.scope` job as done
-the *last* thing that happens during that transition. Importantly,
-before that is done, we drop all access that we have to the type `F`:
-that is, we store `None` into the fields that might reference values
-of type `F`. This implies that we know that, whatever happens after we
-transition into *COMPLETE*, we can't access any of the references
-found in `F` anymore.
-
-This is good, because there *are* still active refs to the
-`ScopeFuture` after we enter the *COMPLETE* state. There are two
-sources of these: unpark values and the future result.
-
-**Unpark values.** We may have given away `Arc<Unpark>` values --
-these are trait objects, but they are actually refs to our
-`ScopeFuture`. Note that `Arc<Unpark>: 'static`, so these could be
-floating about for any length of time (we had to transmute away the
-lifetimes to give them out). This is ok because (a) the `Arc` keeps
-the `ScopeFuture` alive and (b) the only thing you can do is to call
-`unpark()`, which will promptly return since the state is *COMPLETE*
-(and, anyhow, as we saw above, it doesn't have access to any
-references anyhow).
-
-**Future result.** The other, more interesting reference to the
-`ScopeFuture` is the value that we gave back to the user when we
-spawned the future in the first place. This value is more interesting
-because it can be used to do non-trivial things, unlike the
-`Arc<Unpark>`. If you look carefully at this handle, you will see that
-its type has been designed to hide the type `F`. In fact, it only
-reveals the types `T` and `E` which are the ok/err result types of the
-future `F`.  This is intentonal: suppose that the type `F` includes
-some references, but those references don't appear in the result. We
-want the "result" future to be able to escape the scope, then, to any
-place where the types `T` and `E` are still in scope. If we exposed
-`F` here that would not be possible. (Hiding `F` also requires a
-transmute to an object type, in this case an internal trait called
-`ScopeFutureTrait`.) Note though that it is possible for `T` and `E`
-to have references in them. They could even be references tied to the
-scope.
-
-So what can a user do with this result future? They have two
-operations available: poll and cancel. Let's look at cancel first,
-since it's simpler. If the state is *COMPLETE*, then `cancel()` is an
-immediate no-op, so we know that it can't be used to access any
-references that may be invalid. In any case, the only thing it does is
-to set a field to true and invoke `unpark()`, and we already examined
-the possible effects of `unpark()` in the previous section.q
-
-So what about `poll()`? This is how the user gets the final result out
-of the future. The important thing that it does is to access (and
-effectively nullify) the field `result`, which stores the result of
-the future and hence may have access to `T` and `E` values. These
-values may contain references...so how we know that they are still in
-scope?  The answer is that those types are exposed in the user's type
-of the future, and hence the basic Rust type system should guarantee
-that any references are still valid, or else the user shouldn't be
-able to call `poll()`. (The same is true at the time of cancellation,
-but that's not important, since `cancel()` doesn't do anything of
-interest.)
-
-
deleted file mode 100644
--- a/third_party/rust/rayon-core/src/future/mod.rs
+++ /dev/null
@@ -1,502 +0,0 @@
-//! Future support in Rayon. This module *primary* consists of
-//! internal APIs that are exposed through `Scope::spawn_future` and
-//! `::spawn_future`.  However, the type `RayonFuture` is a public
-//! type exposed to all users.
-//!
-//! See `README.md` for details.
-
-use latch::{LatchProbe};
-#[allow(warnings)]
-use log::Event::*;
-use futures::{Async, Poll};
-use futures::executor;
-use futures::future::CatchUnwind;
-use futures::task::{self, Spawn, Task, Unpark};
-use job::{Job, JobRef};
-use registry::{Registry, WorkerThread};
-use std::any::Any;
-use std::panic::AssertUnwindSafe;
-use std::mem;
-use std::sync::Arc;
-use std::sync::atomic::AtomicUsize;
-use std::sync::atomic::Ordering::*;
-use std::sync::Mutex;
-use unwind;
-
-pub use futures::Future;
-
-const STATE_PARKED: usize = 0;
-const STATE_UNPARKED: usize = 1;
-const STATE_EXECUTING: usize = 2;
-const STATE_EXECUTING_UNPARKED: usize = 3;
-const STATE_COMPLETE: usize = 4;
-
-/// Represents the result of a future that has been spawned in the
-/// Rayon threadpool.
-///
-/// # Panic behavior
-///
-/// Any panics that occur while computing the spawned future will be
-/// propagated when this future is polled.
-pub struct RayonFuture<T, E> {
-    // Warning: Public end-user API!
-    inner: Arc<ScopeFutureTrait<Result<T, E>, Box<Any + Send + 'static>>>,
-}
-
-/// Unsafe because implementor must guarantee:
-///
-/// 1. That the type `Self` remains dynamically valid until one of the
-///    completion methods is called.
-/// 2. That the lifetime `'scope` cannot end until one of those
-///    methods is called.
-///
-/// NB. Although this is public, it is not exposed to outside users.
-pub unsafe trait FutureScope<'scope> {
-    fn registry(&self) -> Arc<Registry>;
-    fn future_panicked(self, err: Box<Any + Send>);
-    fn future_completed(self);
-}
-
-/// Create a `RayonFuture` that will execute `F` and yield its result,
-/// propagating any panics.
-///
-/// NB. Although this is public, it is not exposed to outside users.
-pub fn new_rayon_future<'scope, F, S>(future: F, scope: S) -> RayonFuture<F::Item, F::Error>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{
-    let inner = ScopeFuture::spawn(future, scope);
-
-    // We assert that it is safe to hide the type `F` (and, in
-    // particular, the lifetimes in it). This is true because the API
-    // offered by a `RayonFuture` only permits access to the result of
-    // the future (of type `F::Item` or `F::Error`) and those types
-    // *are* exposed in the `RayonFuture<F::Item, F::Error>` type. See
-    // README.md for details.
-    unsafe {
-        return RayonFuture { inner: hide_lifetime(inner) };
-    }
-
-    unsafe fn hide_lifetime<'l, T, E>(x: Arc<ScopeFutureTrait<T, E> + 'l>)
-                                      -> Arc<ScopeFutureTrait<T, E>> {
-        mem::transmute(x)
-    }
-}
-
-impl<T, E> RayonFuture<T, E> {
-    pub fn rayon_wait(mut self) -> Result<T, E> {
-        // NB: End-user API!
-        let worker_thread = WorkerThread::current();
-        if worker_thread.is_null() {
-            self.wait()
-        } else {
-            // Assert that uses of `worker_thread` pointer below are
-            // valid (because we are on the worker-thread).
-            unsafe {
-                (*worker_thread).wait_until(&*self.inner);
-                debug_assert!(self.inner.probe());
-                self.poll().map(|a_v| match a_v {
-                    Async::Ready(v) => v,
-                    Async::NotReady => panic!("probe() returned true but poll not ready")
-                })
-            }
-        }
-    }
-}
-
-impl<T, E> Future for RayonFuture<T, E> {
-    type Item = T;
-    type Error = E;
-
-    fn wait(self) -> Result<T, E> {
-        if WorkerThread::current().is_null() {
-            executor::spawn(self).wait_future()
-        } else {
-            panic!("using  `wait()` in a Rayon thread is unwise; try `rayon_wait()`")
-        }
-    }
-
-    fn poll(&mut self) -> Poll<T, E> {
-        match self.inner.poll() {
-            Ok(Async::Ready(Ok(v))) => Ok(Async::Ready(v)),
-            Ok(Async::Ready(Err(e))) => Err(e),
-            Ok(Async::NotReady) => Ok(Async::NotReady),
-            Err(e) => unwind::resume_unwinding(e),
-        }
-    }
-}
-
-impl<T, E> Drop for RayonFuture<T, E> {
-    fn drop(&mut self) {
-        self.inner.cancel();
-    }
-}
-
-/// ////////////////////////////////////////////////////////////////////////
-
-struct ScopeFuture<'scope, F, S>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{
-    state: AtomicUsize,
-    registry: Arc<Registry>,
-    contents: Mutex<ScopeFutureContents<'scope, F, S>>,
-}
-
-type CU<F> = CatchUnwind<AssertUnwindSafe<F>>;
-type CUItem<F> = <CU<F> as Future>::Item;
-type CUError<F> = <CU<F> as Future>::Error;
-
-struct ScopeFutureContents<'scope, F, S>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{
-    spawn: Option<Spawn<CU<F>>>,
-    unpark: Option<Arc<Unpark>>,
-
-    // Pointer to ourselves. We `None` this out when we are finished
-    // executing, but it's convenient to keep around normally.
-    this: Option<Arc<ScopeFuture<'scope, F, S>>>,
-
-    // the counter in the scope; since the scope doesn't terminate until
-    // counter reaches zero, and we hold a ref in this counter, we are
-    // assured that this pointer remains valid
-    scope: Option<S>,
-
-    waiting_task: Option<Task>,
-    result: Poll<CUItem<F>, CUError<F>>,
-
-    canceled: bool,
-}
-
-// Assert that the `*const` is safe to transmit between threads:
-unsafe impl<'scope, F, S> Send for ScopeFuture<'scope, F, S>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{}
-unsafe impl<'scope, F, S> Sync for ScopeFuture<'scope, F, S>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{}
-
-impl<'scope, F, S> ScopeFuture<'scope, F, S>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{
-    fn spawn(future: F, scope: S) -> Arc<Self> {
-        // Using `AssertUnwindSafe` is valid here because (a) the data
-        // is `Send + Sync`, which is our usual boundary and (b)
-        // panics will be propagated when the `RayonFuture` is polled.
-        let spawn = task::spawn(AssertUnwindSafe(future).catch_unwind());
-
-        let future: Arc<Self> = Arc::new(ScopeFuture::<F, S> {
-            state: AtomicUsize::new(STATE_PARKED),
-            registry: scope.registry(),
-            contents: Mutex::new(ScopeFutureContents {
-                spawn: None,
-                unpark: None,
-                this: None,
-                scope: Some(scope),
-                waiting_task: None,
-                result: Ok(Async::NotReady),
-                canceled: false,
-            }),
-        });
-
-        // Make the two self-cycles. Note that these imply the future
-        // cannot be freed until these fields are set to `None` (which
-        // occurs when it is finished executing).
-        {
-            let mut contents = future.contents.try_lock().unwrap();
-            contents.spawn = Some(spawn);
-            contents.unpark = Some(Self::make_unpark(&future));
-            contents.this = Some(future.clone());
-        }
-
-        future.unpark();
-
-        future
-    }
-
-    /// Creates a `JobRef` from this job -- note that this hides all
-    /// lifetimes, so it is up to you to ensure that this JobRef
-    /// doesn't outlive any data that it closes over.
-    unsafe fn into_job_ref(this: Arc<Self>) -> JobRef {
-        let this: *const Self = mem::transmute(this);
-        JobRef::new(this)
-    }
-
-    fn make_unpark(this: &Arc<Self>) -> Arc<Unpark> {
-        // Hide any lifetimes in `self`. This is safe because, until
-        // `self` is dropped, the counter is not decremented, and so
-        // the `'scope` lifetimes cannot end.
-        //
-        // Unfortunately, as `Unpark` currently requires `'static`, we
-        // have to do an indirection and this ultimately requires a
-        // fresh allocation.
-        //
-        // Here we assert that hiding the lifetimes in this fashion is
-        // safe: we claim this is true because the lifetimes we are
-        // hiding are part of `F`, and we now that any lifetimes in
-        // `F` outlive `counter`. And we can see from `complete()`
-        // that we drop all values of type `F` before decrementing
-        // `counter`.
-        unsafe {
-            return hide_lifetime(this.clone());
-        }
-
-        unsafe fn hide_lifetime<'l>(x: Arc<Unpark + 'l>) -> Arc<Unpark> {
-            mem::transmute(x)
-        }
-    }
-
-    fn unpark_inherent(&self) {
-        loop {
-            match self.state.load(Relaxed) {
-                STATE_PARKED => {
-                    if {
-                        self.state
-                            .compare_exchange_weak(STATE_PARKED, STATE_UNPARKED, Release, Relaxed)
-                            .is_ok()
-                    } {
-                        // Contention here is unlikely but possible: a
-                        // previous execution might have moved us to the
-                        // PARKED state but not yet released the lock.
-                        let contents = self.contents.lock().unwrap();
-
-                        // Assert that `job_ref` remains valid until
-                        // it is executed.  That's true because
-                        // `job_ref` holds a ref on the `Arc` and
-                        // because, until `job_ref` completes, the
-                        // references in the future are valid.
-                        unsafe {
-                            let job_ref = Self::into_job_ref(contents.this.clone().unwrap());
-                            self.registry.inject_or_push(job_ref);
-                        }
-                        return;
-                    }
-                }
-
-                STATE_EXECUTING => {
-                    if {
-                        self.state
-                            .compare_exchange_weak(STATE_EXECUTING,
-                                                   STATE_EXECUTING_UNPARKED,
-                                                   Release,
-                                                   Relaxed)
-                            .is_ok()
-                    } {
-                        return;
-                    }
-                }
-
-                state => {
-                    debug_assert!(state == STATE_UNPARKED || state == STATE_EXECUTING_UNPARKED ||
-                                  state == STATE_COMPLETE);
-                    return;
-                }
-            }
-        }
-    }
-
-    fn begin_execute_state(&self) {
-        // When we are put into the unparked state, we are enqueued in
-        // a worker thread. We should then be executed exactly once,
-        // at which point we transiition to STATE_EXECUTING. Nobody
-        // should be contending with us to change the state here.
-        let state = self.state.load(Acquire);
-        debug_assert_eq!(state, STATE_UNPARKED);
-        let result = self.state.compare_exchange(state, STATE_EXECUTING, Release, Relaxed);
-        debug_assert_eq!(result, Ok(STATE_UNPARKED));
-    }
-
-    fn end_execute_state(&self) -> bool {
-        loop {
-            match self.state.load(Relaxed) {
-                STATE_EXECUTING => {
-                    if {
-                        self.state
-                            .compare_exchange_weak(STATE_EXECUTING, STATE_PARKED, Release, Relaxed)
-                            .is_ok()
-                    } {
-                        // We put ourselves into parked state, no need to
-                        // re-execute.  We'll just wait for the Unpark.
-                        return true;
-                    }
-                }
-
-                state => {
-                    debug_assert_eq!(state, STATE_EXECUTING_UNPARKED);
-                    if {
-                        self.state
-                            .compare_exchange_weak(state, STATE_EXECUTING, Release, Relaxed)
-                            .is_ok()
-                    } {
-                        // We finished executing, but an unpark request
-                        // came in the meantime.  We need to execute
-                        // again. Return false as we failed to end the
-                        // execution phase.
-                        return false;
-                    }
-                }
-            }
-        }
-    }
-}
-
-impl<'scope, F, S> Unpark for ScopeFuture<'scope, F, S>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{
-    fn unpark(&self) {
-        self.unpark_inherent();
-    }
-}
-
-impl<'scope, F, S> Job for ScopeFuture<'scope, F, S>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{
-    unsafe fn execute(this: *const Self) {
-        let this: Arc<Self> = mem::transmute(this);
-
-        // *generally speaking* there should be no contention for the
-        // lock, but it is possible -- we can end execution, get re-enqeueud,
-        // and re-executed, before we have time to return from this fn
-        let mut contents = this.contents.lock().unwrap();
-
-        log!(FutureExecute { state: this.state.load(Relaxed) });
-
-        this.begin_execute_state();
-        loop {
-            if contents.canceled {
-                return contents.complete(Ok(Async::NotReady));
-            } else {
-                match contents.poll() {
-                    Ok(Async::Ready(v)) => {
-                        log!(FutureExecuteReady);
-                        return contents.complete(Ok(Async::Ready(v)));
-                    }
-                    Ok(Async::NotReady) => {
-                        log!(FutureExecuteNotReady);
-                        if this.end_execute_state() {
-                            return;
-                        }
-                    }
-                    Err(err) => {
-                        log!(FutureExecuteErr);
-                        return contents.complete(Err(err));
-                    }
-                }
-            }
-        }
-    }
-}
-
-impl<'scope, F, S> ScopeFutureContents<'scope, F, S>
-    where F: Future + Send + 'scope, S: FutureScope<'scope>,
-{
-    fn poll(&mut self) -> Poll<CUItem<F>, CUError<F>> {
-        let unpark = self.unpark.clone().unwrap();
-        self.spawn.as_mut().unwrap().poll_future(unpark)
-    }
-
-    fn complete(&mut self, value: Poll<CUItem<F>, CUError<F>>) {
-        log!(FutureComplete);
-
-        // So, this is subtle. We know that the type `F` may have some
-        // data which is only valid until the end of the scope, and we
-        // also know that the scope doesn't end until `self.counter`
-        // is decremented below. So we want to be sure to drop
-        // `self.future` first, lest its dtor try to access some of
-        // that state or something!
-        mem::drop(self.spawn.take().unwrap());
-
-        self.unpark = None;
-        self.result = value;
-        let this = self.this.take().unwrap();
-        if cfg!(debug_assertions) {
-            let state = this.state.load(Relaxed);
-            debug_assert!(state == STATE_EXECUTING || state == STATE_EXECUTING_UNPARKED,
-                          "cannot complete when not executing (state = {})",
-                          state);
-        }
-        this.state.store(STATE_COMPLETE, Release);
-
-        // `unpark()` here is arbitrary user-code, so it may well
-        // panic. We try to capture that panic and forward it
-        // somewhere useful if we can.
-        let mut err = None;
-        if let Some(waiting_task) = self.waiting_task.take() {
-            log!(FutureUnparkWaitingTask);
-            match unwind::halt_unwinding(|| waiting_task.unpark()) {
-                Ok(()) => { }
-                Err(e) => { err = Some(e); }
-            }
-        }
-
-        // Allow the enclosing scope to end. Asserts that
-        // `self.counter` is still valid, which we know because caller
-        // to `new_rayon_future()` ensures it for us.
-        let scope = self.scope.take().unwrap();
-        if let Some(err) = err {
-            scope.future_panicked(err);
-        } else {
-            scope.future_completed();
-        }
-    }
-}
-
-impl<'scope, F, S> LatchProbe for ScopeFuture<'scope, F, S>
-    where F: Future + Send, S: FutureScope<'scope>,
-{
-    fn probe(&self) -> bool {
-        self.state.load(Acquire) == STATE_COMPLETE
-    }
-}
-
-/// NB. Although this is public, it is not exposed to outside users.
-pub trait ScopeFutureTrait<T, E>: Send + Sync + LatchProbe {
-    fn poll(&self) -> Poll<T, E>;
-    fn cancel(&self);
-}
-
-impl<'scope, F, S> ScopeFutureTrait<CUItem<F>, CUError<F>> for ScopeFuture<'scope, F, S>
-    where F: Future + Send, S: FutureScope<'scope>,
-{
-    fn poll(&self) -> Poll<CUItem<F>, CUError<F>> {
-        // Important: due to transmute hackery, not all the fields are
-        // truly known to be valid at this point. In particular, the
-        // type F is erased. But the `state` and `result` fields
-        // should be valid.
-        let mut contents = self.contents.lock().unwrap();
-        let state = self.state.load(Relaxed);
-        if state == STATE_COMPLETE {
-            let r = mem::replace(&mut contents.result, Ok(Async::NotReady));
-            return r;
-        } else {
-            log!(FutureInstallWaitingTask { state: state });
-            contents.waiting_task = Some(task::park());
-            Ok(Async::NotReady)
-        }
-    }
-
-    fn cancel(&self) {
-        // Fast-path: check if this is already complete and return if
-        // so. A relaxed load suffices since we are not going to
-        // access any data as a result of this action.
-        if self.state.load(Relaxed) == STATE_COMPLETE {
-            return;
-        }
-
-        // Slow-path. Get the lock and set the canceled flag to
-        // true. Also grab the `unpark` instance (which may be `None`,
-        // if the future completes before we get the lack).
-        let unpark = {
-            let mut contents = self.contents.lock().unwrap();
-            contents.canceled = true;
-            contents.unpark.clone()
-        };
-
-        // If the `unpark` we grabbed was not `None`, then signal it.
-        // This will schedule the future.
-        if let Some(u) = unpark {
-            u.unpark();
-        }
-    }
-}
-
-#[cfg(test)]
-mod test;
deleted file mode 100644
--- a/third_party/rust/rayon-core/src/future/test.rs
+++ /dev/null
@@ -1,220 +0,0 @@
-use futures::{self, Async, Future};
-use futures::future::lazy;
-use futures::sync::oneshot;
-use futures::task::{self, Unpark};
-use std::sync::Arc;
-use std::sync::atomic::{AtomicUsize, Ordering};
-use ::{scope, ThreadPool, Configuration};
-
-/// Basic test of using futures to data on the stack frame.
-#[test]
-fn future_test() {
-    let data = &[0, 1];
-
-    // Here we call `wait` on a select future, which will block at
-    // least one thread. So we need a second thread to ensure no
-    // deadlock.
-    ThreadPool::new(Configuration::new().num_threads(2)).unwrap().install(|| {
-        scope(|s| {
-            let a = s.spawn_future(futures::future::ok::<_, ()>(&data[0]));
-            let b = s.spawn_future(futures::future::ok::<_, ()>(&data[1]));
-            let (item1, next) = a.select(b).wait().ok().unwrap();
-            let item2 = next.wait().unwrap();
-            assert!(*item1 == 0 || *item1 == 1);
-            assert!(*item2 == 1 - *item1);
-        });
-    });
-}
-
-/// Test using `map` on a Rayon future. The `map` closure is eecuted
-/// for side-effects, and modifies the `data` variable that is owned
-/// by enclosing stack frame.
-#[test]
-fn future_map() {
-    let data = &mut [format!("Hello, ")];
-
-    let mut future = None;
-    scope(|s| {
-        let a = s.spawn_future(lazy(|| Ok::<_, ()>(&mut data[0])));
-        future = Some(s.spawn_future(a.map(|v| {
-            v.push_str("world!");
-        })));
-    });
-
-    // future must have executed for the scope to have ended, even
-    // though we never invoked `wait` to observe its result
-    assert_eq!(data[0], "Hello, world!");
-    assert!(future.is_some());
-}
-
-/// Test that we can create a future that returns an `&mut` to data,
-/// so long as it outlives the scope.
-#[test]
-fn future_escape_ref() {
-    let data = &mut [format!("Hello, ")];
-
-    {
-        let mut future = None;
-        scope(|s| {
-            let data = &mut *data;
-            future = Some(s.spawn_future(lazy(move || Ok::<_, ()>(&mut data[0]))));
-        });
-        let s = future.unwrap().wait().unwrap();
-        s.push_str("world!");
-    }
-
-    assert_eq!(data[0], "Hello, world!");
-}
-
-#[test]
-#[should_panic(expected = "Hello, world!")]
-fn future_panic_prop() {
-    scope(|s| {
-        let future = s.spawn_future(lazy(move || Ok::<(), ()>(argh())));
-        let _ = future.rayon_wait(); // should panic, not return a value
-    });
-
-    fn argh() -> () {
-        if true {
-            panic!("Hello, world!");
-        }
-    }
-}
-
-/// Test that, even if we have only one thread, invoke `rayon_wait`
-/// will not panic.
-#[test]
-fn future_rayon_wait_1_thread() {
-    // run with only 1 worker thread; this would deadlock if we couldn't make progress
-    let mut result = None;
-    ThreadPool::new(Configuration::new().num_threads(1)).unwrap().install(|| {
-        scope(|s| {
-            use std::sync::mpsc::channel;
-            let (tx, rx) = channel();
-            let a = s.spawn_future(lazy(move || Ok::<usize, ()>(rx.recv().unwrap())));
-            //                          ^^^^ FIXME: why is this needed?
-            let b = s.spawn_future(a.map(|v| v + 1));
-            let c = s.spawn_future(b.map(|v| v + 1));
-            s.spawn(move |_| tx.send(20).unwrap());
-            result = Some(c.rayon_wait().unwrap());
-        });
-    });
-    assert_eq!(result, Some(22));
-}
-
-/// Test that invoking `wait` on a `RayonFuture` will panic, if it is inside
-/// a Rayon worker thread.
-#[test]
-#[should_panic]
-fn future_wait_panics_inside_rayon_thread() {
-    scope(|s| {
-        let future = s.spawn_future(lazy(move || Ok::<(), ()>(())));
-        let _ = future.wait(); // should panic, not return a value
-    });
-}
-
-/// Test that invoking `wait` on a `RayonFuture` will not panic if we
-/// are outside a Rayon worker thread.
-#[test]
-fn future_wait_works_outside_rayon_threads() {
-    let mut future = None;
-    scope(|s| {
-        future = Some(s.spawn_future(lazy(move || Ok::<(), ()>(()))));
-    });
-    assert_eq!(Ok(()), future.unwrap().wait());
-}
-
-/// Test that invoking `wait` on a `RayonFuture` will not panic if we
-/// are outside a Rayon worker thread.
-#[test]
-#[should_panic(expected = "Hello, world!")]
-fn panicy_unpark() {
-    scope(|s| {
-        let (a_tx, a_rx) = oneshot::channel::<u32>();
-        let rf = s.spawn_future(a_rx);
-
-        // invoke `poll_future` with a `PanicUnpark` instance;
-        // this should get installed as a 'waiting task' on the
-        // Rayon future `rf`
-        let mut spawn = task::spawn(rf);
-        let unpark = Arc::new(PanicUnpark);
-        match spawn.poll_future(unpark.clone()) {
-            Ok(Async::NotReady) => {
-                // good, we expect not to be ready yet
-            }
-            r => panic!("spawn poll returned: {:?}", r),
-        }
-
-        // this should trigger the future `a_rx` to be awoken
-        // and executing in a Rayon background thread
-        a_tx.send(22).unwrap();
-
-        // now we wait for `rf` to complete; when it does, it will
-        // also signal the `PanicUnpark` to wake up (that is
-        // *supposed* to be what triggers us to `poll` again, but
-        // we are sidestepping that)
-        let v = spawn.into_inner().rayon_wait().unwrap();
-        assert_eq!(v, 22);
-    });
-    panic!("scope failed to panic!");
-
-    struct PanicUnpark;
-
-    impl Unpark for PanicUnpark {
-        fn unpark(&self) {
-            panic!("Hello, world!");
-        }
-    }
-}
-
-#[test]
-fn double_unpark() {
-    let unpark0 = Arc::new(TrackUnpark { value: AtomicUsize::new(0) });
-    let unpark1 = Arc::new(TrackUnpark { value: AtomicUsize::new(0) });
-    let mut _tag = None;
-    scope(|s| {
-        let (a_tx, a_rx) = oneshot::channel::<u32>();
-        let rf = s.spawn_future(a_rx);
-
-        let mut spawn = task::spawn(rf);
-
-        // test that we don't panic if people try to install a task many times;
-        // even if they are different tasks
-        for i in 0..22 {
-            let u = if i % 2 == 0 {
-                unpark0.clone()
-            } else {
-                unpark1.clone()
-            };
-            match spawn.poll_future(u) {
-                Ok(Async::NotReady) => {
-                    // good, we expect not to be ready yet
-                }
-                r => panic!("spawn poll returned: {:?}", r),
-            }
-        }
-
-        a_tx.send(22).unwrap();
-
-        // just hold onto `rf` to ensure that nothing is cancelled
-        _tag = Some(spawn.into_inner());
-    });
-
-    // Since scope is done, our spawned future must have completed. It
-    // should have signalled the unpark value we gave it -- but
-    // exactly once, even though we called `poll` many times.
-    assert_eq!(unpark1.value.load(Ordering::SeqCst), 1);
-
-    // unpark0 was not the last unpark supplied, so it will never be signalled
-    assert_eq!(unpark0.value.load(Ordering::SeqCst), 0);
-
-    struct TrackUnpark {
-        value: AtomicUsize,
-    }
-
-    impl Unpark for TrackUnpark {
-        fn unpark(&self) {
-            self.value.fetch_add(1, Ordering::SeqCst);
-        }
-    }
-}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-core/src/internal/mod.rs
@@ -0,0 +1,8 @@
+//! The internal directory contains internal APIs not meant to be
+//! exposed to "end-users" of Rayon, but rather which are useful for
+//! constructing abstractions.
+//!
+//! These APIs are still unstable.
+
+pub mod task;
+pub mod worker;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-core/src/internal/task.rs
@@ -0,0 +1,84 @@
+//! Internal, unsafe APIs for creating scoped tasks. Intended for
+//! building abstractions atop the rayon-core thread pool, rather than
+//! direct use by end users. These APIs are mostly analogous to the
+//! (safe) `scope`/`spawn` APIs, but with some unsafe requirements
+//! that permit greater efficiency.
+
+use std::any::Any;
+use std::sync::Arc;
+
+/// Represents a task that can be scheduled onto the Rayon
+/// thread-pool. Once a task is scheduler, it will execute exactly
+/// once (eventually).
+pub trait Task: Send + Sync {
+    /// Invoked by the thread-pool when the task is ready to execute.
+    fn execute(this: Arc<Self>);
+}
+
+/// Represents a handle onto some Rayon scope. This could be either a
+/// local scope created by the `scope()` function or the global scope
+/// for a thread-pool. To get a scope-handle, you can invoke
+/// `ToScopeHandle::to_scope_handle()` on either a `scope` value or a
+/// `ThreadPool`.
+///
+/// The existence of `ScopeHandler` offers a guarantee:
+///
+/// - The Rust lifetime `'scope` will not end until the scope-handle
+///   is dropped, or until you invoke `panicked()` or `ok()`.
+///
+/// This trait is intended to be used as follows:
+///
+/// - You have a parallel task of type `T` to perform where `T: 's`,
+///   meaning that any references that `T` contains outlive the lifetime
+///   `'s`.
+/// - You obtain a scope handle `h` of type `H` where `H:
+///   ScopeHandle<'s>`; typically this would be by invoking
+///   `to_scope_handle()` on a Rayon scope (of type `Scope<'s>`) or a
+///   thread-pool (in which case `'s == 'static`).
+/// - You invoke `h.spawn()` to start your job(s). This may be done
+///   many times.
+///   - Note that `h.spawn()` is an unsafe method. You must ensure
+///     that your parallel jobs have completed before moving to
+///     the next step.
+/// - Eventually, when all invocations are complete, you invoke
+///   either `panicked()` or `ok()`.
+pub unsafe trait ScopeHandle<'scope>: 'scope {
+    /// Enqueues a task for execution within the thread-pool. The task
+    /// will eventually be invoked, and once it is, the `Arc` will be
+    /// dropped.
+    ///
+    /// **Unsafe:** The caller must guarantee that the scope handle
+    /// (`self`) will not be dropped (nor will `ok()` or `panicked()`
+    /// be called) until the task executes. Otherwise, the lifetime
+    /// `'scope` may end while the task is still pending.
+    unsafe fn spawn_task<T: Task + 'scope>(&self, task: Arc<T>);
+
+    /// Indicates that some sub-task of this scope panicked with the
+    /// given `err`. This panic will be propagated back to the user as
+    /// appropriate, depending on how this scope handle was derived.
+    ///
+    /// This takes ownership of the scope handle, meaning that once
+    /// you invoke `panicked`, the scope is permitted to terminate
+    /// (and, in particular, the Rust lifetime `'scope` may end).
+    fn panicked(self, err: Box<Any + Send>);
+
+    /// Indicates that the sub-tasks of this scope that you have
+    /// spawned concluded successfully.
+    ///
+    /// This takes ownership of the scope handle, meaning that once
+    /// you invoke `panicked`, the scope is permitted to terminate
+    /// (and, in particular, the Rust lifetime `'scope` may end).
+    fn ok(self);
+}
+
+/// Converts a Rayon structure (typicaly a `Scope` or `ThreadPool`)
+/// into a "scope handle". See the `ScopeHandle` trait for more
+/// details.
+pub trait ToScopeHandle<'scope> {
+    /// Scope handle type that gets produced.
+    type ScopeHandle: ScopeHandle<'scope>;
+
+    /// Convert the receiver into a scope handle.
+    fn to_scope_handle(&self) -> Self::ScopeHandle;
+}
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-core/src/internal/worker.rs
@@ -0,0 +1,67 @@
+//! Internal, unsafe APIs for manipulating or querying the current
+//! worker thread. Intended for building abstractions atop the
+//! rayon-core thread pool, rather than direct use by end users.
+
+use std::fmt;
+use latch::LatchProbe;
+use registry;
+
+/// Represents the active worker thread.
+pub struct WorkerThread<'w> {
+    thread: &'w registry::WorkerThread
+}
+
+impl<'w> WorkerThread<'w> {
+    /// Causes the worker thread to wait until `f()` returns true.
+    /// While the thread is waiting, it will attempt to steal work
+    /// from other threads, and may go to sleep if there is no work to
+    /// steal.
+    ///
+    /// **Dead-lock warning: This is a low-level interface and cannot
+    /// be used to wait on arbitrary conditions.** In particular, if
+    /// the Rayon thread goes to sleep, it will only be awoken when
+    /// new rayon events occur (e.g., `spawn()` or `join()` is
+    /// invoked, or one of the methods on a `ScopeHandle`). Therefore,
+    /// you must ensure that, once the condition `f()` becomes true,
+    /// some "rayon event" will also occur to ensure that waiting
+    /// threads are awoken.
+    pub unsafe fn wait_until_true<F>(&self, f: F) where F: Fn() -> bool {
+        struct DummyLatch<'a, F: 'a> { f: &'a F }
+
+        impl<'a, F: Fn() -> bool> LatchProbe for DummyLatch<'a, F> {
+            fn probe(&self) -> bool {
+                (self.f)()
+            }
+        }
+
+        self.thread.wait_until(&DummyLatch { f: &f });
+    }
+}
+
+impl<'w> fmt::Debug for WorkerThread<'w> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("WorkerThread")
+            .field("pool", &self.thread.registry().id())
+            .field("index", &self.thread.index())
+            .finish()
+    }
+}
+
+/// If the current thread is a Rayon worker thread, then the callback
+/// is invoked with a reference to the worker-thread the result of
+/// that callback is returned with `Some`. Otherwise, if we are not on
+/// a Rayon worker thread, `None` is immediately returned.
+pub fn if_in_worker_thread<F,R>(if_true: F) -> Option<R>
+    where F: FnOnce(&WorkerThread) -> R,
+{
+    let worker_thread = registry::WorkerThread::current();
+    if worker_thread.is_null() {
+        None
+    } else {
+        unsafe {
+            let wt = WorkerThread { thread: &*worker_thread };
+            Some(if_true(&wt))
+        }
+    }
+}
+
--- a/third_party/rust/rayon-core/src/job.rs
+++ b/third_party/rust/rayon-core/src/job.rs
@@ -11,16 +11,19 @@ pub enum JobResult<T> {
 }
 
 /// A `Job` is used to advertise work for other threads that they may
 /// want to steal. In accordance with time honored tradition, jobs are
 /// arranged in a deque, so that thieves can take from the top of the
 /// deque while the main worker manages the bottom of the deque. This
 /// deque is managed by the `thread_pool` module.
 pub trait Job {
+    /// Unsafe: this may be called from a different thread than the one
+    /// which scheduled the job, so the implementer must ensure the
+    /// appropriate traits are met, whether `Send`, `Sync`, or both.
     unsafe fn execute(this: *const Self);
 }
 
 /// Effectively a Job trait object. Each JobRef **must** be executed
 /// exactly once, or else data may leak.
 ///
 /// Internally, we store the job's data in a `*const ()` pointer.  The
 /// true type is something like `*const StackJob<...>`, but we hide
@@ -30,16 +33,18 @@ pub struct JobRef {
     pointer: *const (),
     execute_fn: unsafe fn(*const ()),
 }
 
 unsafe impl Send for JobRef {}
 unsafe impl Sync for JobRef {}
 
 impl JobRef {
+    /// Unsafe: caller asserts that `data` will remain valid until the
+    /// job is executed.
     pub unsafe fn new<T>(data: *const T) -> JobRef
         where T: Job
     {
         let fn_ptr: unsafe fn(*const T) = <T as Job>::execute;
 
         // erase types:
         let fn_ptr: unsafe fn(*const ()) = mem::transmute(fn_ptr);
         let pointer = data as *const ();
@@ -53,93 +58,102 @@ impl JobRef {
     #[inline]
     pub unsafe fn execute(&self) {
         (self.execute_fn)(self.pointer)
     }
 }
 
 /// A job that will be owned by a stack slot. This means that when it
 /// executes it need not free any heap data, the cleanup occurs when
-/// the stack frame is later popped.
-pub struct StackJob<L: Latch, F, R> {
+/// the stack frame is later popped.  The function parameter indicates
+/// `true` if the job was stolen -- executed on a different thread.
+pub struct StackJob<L, F, R>
+    where L: Latch + Sync,
+          F: FnOnce(bool) -> R + Send,
+          R: Send
+{
     pub latch: L,
     func: UnsafeCell<Option<F>>,
     result: UnsafeCell<JobResult<R>>,
 }
 
-impl<L: Latch, F, R> StackJob<L, F, R>
-    where F: FnOnce() -> R + Send
+impl<L, F, R> StackJob<L, F, R>
+    where L: Latch + Sync,
+          F: FnOnce(bool) -> R + Send,
+          R: Send
 {
     pub fn new(func: F, latch: L) -> StackJob<L, F, R> {
         StackJob {
             latch: latch,
             func: UnsafeCell::new(Some(func)),
             result: UnsafeCell::new(JobResult::None),
         }
     }
 
     pub unsafe fn as_job_ref(&self) -> JobRef {
         JobRef::new(self)
     }
 
-    pub unsafe fn run_inline(self) -> R {
-        self.func.into_inner().unwrap()()
+    pub unsafe fn run_inline(self, stolen: bool) -> R {
+        self.func.into_inner().unwrap()(stolen)
     }
 
     pub unsafe fn into_result(self) -> R {
         self.result.into_inner().into_return_value()
     }
 }
 
-impl<L: Latch, F, R> Job for StackJob<L, F, R>
-    where F: FnOnce() -> R
+impl<L, F, R> Job for StackJob<L, F, R>
+    where L: Latch + Sync,
+          F: FnOnce(bool) -> R + Send,
+          R: Send
 {
     unsafe fn execute(this: *const Self) {
         let this = &*this;
         let abort = unwind::AbortIfPanic;
         let func = (*this.func.get()).take().unwrap();
-        (*this.result.get()) = match unwind::halt_unwinding(|| func()) {
+        (*this.result.get()) = match unwind::halt_unwinding(|| func(true)) {
             Ok(x) => JobResult::Ok(x),
             Err(x) => JobResult::Panic(x),
         };
         this.latch.set();
         mem::forget(abort);
     }
 }
 
 /// Represents a job stored in the heap. Used to implement
 /// `scope`. Unlike `StackJob`, when executed, `HeapJob` simply
 /// invokes a closure, which then triggers the appropriate logic to
 /// signal that the job executed.
 ///
 /// (Probably `StackJob` should be refactored in a similar fashion.)
 pub struct HeapJob<BODY>
-    where BODY: FnOnce()
+    where BODY: FnOnce() + Send
 {
     job: UnsafeCell<Option<BODY>>,
 }
 
 impl<BODY> HeapJob<BODY>
-    where BODY: FnOnce()
+    where BODY: FnOnce() + Send
 {
     pub fn new(func: BODY) -> Self {
         HeapJob { job: UnsafeCell::new(Some(func)) }
     }
 
     /// Creates a `JobRef` from this job -- note that this hides all
     /// lifetimes, so it is up to you to ensure that this JobRef
     /// doesn't outlive any data that it closes over.
     pub unsafe fn as_job_ref(self: Box<Self>) -> JobRef {
         let this: *const Self = mem::transmute(self);
         JobRef::new(this)
     }
 }
 
 impl<BODY> Job for HeapJob<BODY>
-    where BODY: FnOnce()
+    where BODY: FnOnce() + Send
 {
     unsafe fn execute(this: *const Self) {
         let this: Box<Self> = mem::transmute(this);
         let job = (*this.job.get()).take().unwrap();
         job();
     }
 }
 
--- a/third_party/rust/rayon-core/src/join/mod.rs
+++ b/third_party/rust/rayon-core/src/join/mod.rs
@@ -1,82 +1,154 @@
 use latch::{LatchProbe, SpinLatch};
-#[allow(unused_imports)]
 use log::Event::*;
 use job::StackJob;
 use registry::{self, WorkerThread};
 use std::any::Any;
 use unwind;
 
+use FnContext;
+
 #[cfg(test)]
 mod test;
 
-/// The `join` function takes two closures and *potentially* runs them
-/// in parallel. It returns a pair of the results from those closures.
+/// Takes two closures and *potentially* runs them in parallel. It
+/// returns a pair of the results from those closures.
 ///
 /// Conceptually, calling `join()` is similar to spawning two threads,
 /// one executing each of the two closures. However, the
 /// implementation is quite different and incurs very low
 /// overhead. The underlying technique is called "work stealing": the
 /// Rayon runtime uses a fixed pool of worker threads and attempts to
 /// only execute code in parallel when there are idle CPUs to handle
 /// it.
 ///
-/// ### Warning about blocking I/O
+/// When `join` is called from outside the thread pool, the calling
+/// thread will block while the closures execute in the pool.  When
+/// `join` is called within the pool, the calling thread still actively
+/// participates in the thread pool. It will begin by executing closure
+/// A (on the current thread). While it is doing that, it will advertise
+/// closure B as being available for other threads to execute. Once closure A
+/// has completed, the current thread will try to execute closure B;
+/// if however closure B has been stolen, then it will look for other work
+/// while waiting for the thief to fully execute closure B. (This is the
+/// typical work-stealing strategy).
+///
+/// # Examples
+///
+/// This example uses join to perform a quick-sort (note this is not a
+/// particularly optimized implementation: if you **actually** want to
+/// sort for real, you should prefer [the `par_sort` method] offered
+/// by Rayon).
+///
+/// [the `par_sort` method]: ../slice/trait.ParallelSliceMut.html#method.par_sort
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// let mut v = vec![5, 1, 8, 22, 0, 44];
+/// quick_sort(&mut v);
+/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
+///
+/// fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+///    if v.len() > 1 {
+///        let mid = partition(v);
+///        let (lo, hi) = v.split_at_mut(mid);
+///        rayon::join(|| quick_sort(lo),
+///                    || quick_sort(hi));
+///    }
+/// }
+///
+/// // Partition rearranges all items `<=` to the pivot
+/// // item (arbitrary selected to be the last item in the slice)
+/// // to the first half of the slice. It then returns the
+/// // "dividing point" where the pivot is placed.
+/// fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+///     let pivot = v.len() - 1;
+///     let mut i = 0;
+///     for j in 0..pivot {
+///         if v[j] <= v[pivot] {
+///             v.swap(i, j);
+///             i += 1;
+///         }
+///     }
+///     v.swap(i, pivot);
+///     i
+/// }
+/// ```
+///
+/// # Warning about blocking I/O
 ///
 /// The assumption is that the closures given to `join()` are
 /// CPU-bound tasks that do not perform I/O or other blocking
 /// operations. If you do perform I/O, and that I/O should block
 /// (e.g., waiting for a network request), the overall performance may
 /// be poor.  Moreover, if you cause one closure to be blocked waiting
 /// on another (for example, using a channel), that could lead to a
 /// deadlock.
 ///
-/// ### Panics
+/// # Panics
 ///
 /// No matter what happens, both closures will always be executed.  If
 /// a single closure panics, whether it be the first or second
 /// closure, that panic will be propagated and hence `join()` will
 /// panic with the same panic value. If both closures panic, `join()`
 /// will panic with the panic value from the first closure.
 pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
     where A: FnOnce() -> RA + Send,
           B: FnOnce() -> RB + Send,
           RA: Send,
           RB: Send
 {
-    registry::in_worker(|worker_thread| unsafe {
+    join_context(|_| oper_a(), |_| oper_b())
+}
+
+/// Identical to `join`, except that the closures have a parameter
+/// that provides context for the way the closure has been called,
+/// especially indicating whether they're executing on a different
+/// thread than where `join_context` was called.  This will occur if
+/// the second job is stolen by a different thread, or if
+/// `join_context` was called from outside the thread pool to begin
+/// with.
+pub fn join_context<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
+    where A: FnOnce(FnContext) -> RA + Send,
+          B: FnOnce(FnContext) -> RB + Send,
+          RA: Send,
+          RB: Send
+{
+    registry::in_worker(|worker_thread, injected| unsafe {
         log!(Join { worker: worker_thread.index() });
 
         // Create virtual wrapper for task b; this all has to be
         // done here so that the stack frame can keep it all live
         // long enough.
-        let job_b = StackJob::new(oper_b, SpinLatch::new());
+        let job_b = StackJob::new(|migrated| oper_b(FnContext::new(migrated)),
+                                  SpinLatch::new());
         let job_b_ref = job_b.as_job_ref();
         worker_thread.push(job_b_ref);
 
         // Execute task a; hopefully b gets stolen in the meantime.
-        let result_a = match unwind::halt_unwinding(oper_a) {
+        let status_a = unwind::halt_unwinding(move || oper_a(FnContext::new(injected)));
+        let result_a = match status_a {
             Ok(v) => v,
             Err(err) => join_recover_from_panic(worker_thread, &job_b.latch, err),
         };
 
         // Now that task A has finished, try to pop job B from the
         // local stack.  It may already have been popped by job A; it
         // may also have been stolen. There may also be some tasks
         // pushed on top of it in the stack, and we will have to pop
         // those off to get to it.
         while !job_b.latch.probe() {
             if let Some(job) = worker_thread.take_local_job() {
                 if job == job_b_ref {
                     // Found it! Let's run it.
                     //
                     // Note that this could panic, but it's ok if we unwind here.
                     log!(PoppedRhs { worker: worker_thread.index() });
-                    let result_b = job_b.run_inline();
+                    let result_b = job_b.run_inline(injected);
                     return (result_a, result_b);
                 } else {
                     log!(PoppedJob { worker: worker_thread.index() });
                     worker_thread.execute(job);
                 }
             } else {
                 // Local deque is empty. Time to steal from other
                 // threads.
--- a/third_party/rust/rayon-core/src/join/test.rs
+++ b/third_party/rust/rayon-core/src/join/test.rs
@@ -1,14 +1,13 @@
 //! Tests for the join code.
 
-use Configuration;
+use ThreadPoolBuilder;
 use join::*;
 use rand::{Rng, SeedableRng, XorShiftRng};
-use thread_pool::*;
 use unwind;
 
 fn quick_sort<T: PartialOrd + Send>(v: &mut [T]) {
     if v.len() <= 1 {
         return;
     }
 
     let mid = partition(v);
@@ -39,17 +38,17 @@ fn sort() {
     assert_eq!(data, sorted_data);
 }
 
 #[test]
 fn sort_in_pool() {
     let mut rng = XorShiftRng::from_seed([0, 1, 2, 3]);
     let mut data: Vec<_> = (0..12 * 1024).map(|_| rng.next_u32()).collect();
 
-    let pool = ThreadPool::new(Configuration::new()).unwrap();
+    let pool = ThreadPoolBuilder::new().build().unwrap();
     let mut sorted_data = data.clone();
     sorted_data.sort();
     pool.install(|| quick_sort(&mut data));
     assert_eq!(data, sorted_data);
 }
 
 #[test]
 #[should_panic(expected = "Hello, world!")]
@@ -72,8 +71,42 @@ fn panic_propagate_both() {
 #[test]
 fn panic_b_still_executes() {
     let mut x = false;
     match unwind::halt_unwinding(|| join(|| panic!("Hello, world!"), || x = true)) {
         Ok(_) => panic!("failed to propagate panic from closure A,"),
         Err(_) => assert!(x, "closure b failed to execute"),
     }
 }
+
+#[test]
+fn join_context_both() {
+    // If we're not in a pool, both should be marked stolen as they're injected.
+    let (a_migrated, b_migrated) = join_context(|a| a.migrated(), |b| b.migrated());
+    assert!(a_migrated);
+    assert!(b_migrated);
+}
+
+#[test]
+fn join_context_neither() {
+    // If we're already in a 1-thread pool, neither job should be stolen.
+    let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+    let (a_migrated, b_migrated) = pool.install(|| {
+        join_context(|a| a.migrated(), |b| b.migrated())
+    });
+    assert!(!a_migrated);
+    assert!(!b_migrated);
+}
+
+#[test]
+fn join_context_second() {
+    use std::sync::Barrier;
+
+    // If we're already in a 2-thread pool, the second job should be stolen.
+    let barrier = Barrier::new(2);
+    let pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap();
+    let (a_migrated, b_migrated) = pool.install(|| {
+        join_context(|a| { barrier.wait(); a.migrated() },
+                     |b| { barrier.wait(); b.migrated() })
+    });
+    assert!(!a_migrated);
+    assert!(b_migrated);
+}
--- a/third_party/rust/rayon-core/src/latch.rs
+++ b/third_party/rust/rayon-core/src/latch.rs
@@ -1,10 +1,13 @@
 use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
 use std::sync::{Mutex, Condvar};
+use std::usize;
+
+use sleep::Sleep;
 
 /// We define various kinds of latches, which are all a primitive signaling
 /// mechanism. A latch starts as false. Eventually someone calls `set()` and
 /// it becomes true. You can test if it has been set by calling `probe()`.
 ///
 /// Some kinds of latches, but not all, support a `wait()` operation
 /// that will wait until the latch is set, blocking efficiently. That
 /// is not part of the trait since it is not possibly to do with all
@@ -108,16 +111,17 @@ impl Latch for LockLatch {
     }
 }
 
 /// Counting latches are used to implement scopes. They track a
 /// counter. Unlike other latches, calling `set()` does not
 /// necessarily make the latch be considered `set()`; instead, it just
 /// decrements the counter. The latch is only "set" (in the sense that
 /// `probe()` returns true) once the counter reaches zero.
+#[derive(Debug)]
 pub struct CountLatch {
     counter: AtomicUsize,
 }
 
 impl CountLatch {
     #[inline]
     pub fn new() -> CountLatch {
         CountLatch { counter: AtomicUsize::new(1) }
@@ -140,8 +144,42 @@ impl LatchProbe for CountLatch {
 
 impl Latch for CountLatch {
     /// Set the latch to true, releasing all threads who are waiting.
     #[inline]
     fn set(&self) {
         self.counter.fetch_sub(1, Ordering::SeqCst);
     }
 }
+
+
+/// A tickling latch wraps another latch type, and will also awaken a thread
+/// pool when it is set.  This is useful for jobs injected between thread pools,
+/// so the source pool can continue processing its own work while waiting.
+pub struct TickleLatch<'a, L: Latch> {
+    inner: L,
+    sleep: &'a Sleep,
+}
+
+impl<'a, L: Latch> TickleLatch<'a, L> {
+    #[inline]
+    pub fn new(latch: L, sleep: &'a Sleep) -> Self {
+        TickleLatch {
+            inner: latch,
+            sleep: sleep,
+        }
+    }
+}
+
+impl<'a, L: Latch> LatchProbe for TickleLatch<'a, L> {
+    #[inline]
+    fn probe(&self) -> bool {
+        self.inner.probe()
+    }
+}
+
+impl<'a, L: Latch> Latch for TickleLatch<'a, L> {
+    #[inline]
+    fn set(&self) {
+        self.inner.set();
+        self.sleep.tickle(usize::MAX);
+    }
+}
--- a/third_party/rust/rayon-core/src/lib.rs
+++ b/third_party/rust/rayon-core/src/lib.rs
@@ -1,10 +1,10 @@
 //!
-//! [Under construction](https://github.com/nikomatsakis/rayon/issues/231)
+//! [Under construction](https://github.com/rayon-rs/rayon/issues/231)
 //!
 //! ## Restricting multiple versions
 //!
 //! In order to ensure proper coordination between threadpools, and especially
 //! to make sure there's only one global threadpool, `rayon-core` is actively
 //! restricted from building multiple versions of itself into a single target.
 //! You may see a build error like this in violation:
 //!
@@ -14,94 +14,114 @@
 //! ```
 //!
 //! While we strive to keep `rayon-core` semver-compatible, it's still
 //! possible to arrive at this situation if different crates have overly
 //! restrictive tilde or inequality requirements for `rayon-core`.  The
 //! conflicting requirements will need to be resolved before the build will
 //! succeed.
 
-#![allow(non_camel_case_types)] // I prefer to use ALL_CAPS for type parameters
+#![doc(html_root_url = "https://docs.rs/rayon-core/1.4")]
+#![deny(missing_debug_implementations)]
+#![deny(missing_docs)]
 #![cfg_attr(test, feature(conservative_impl_trait))]
 
-// If you're not compiling the unstable code, it often happens that
-// there is stuff that is considered "dead code" and so forth. So
-// disable warnings in that scenario.
-#![cfg_attr(not(feature = "unstable"), allow(warnings))]
-
-#[allow(unused_imports)]
-use log::Event::*;
 use std::any::Any;
 use std::env;
+use std::io;
 use std::error::Error;
+use std::marker::PhantomData;
 use std::str::FromStr;
 use std::fmt;
 
-extern crate coco;
+extern crate crossbeam_deque;
 #[macro_use]
 extern crate lazy_static;
-#[cfg(rayon_unstable)]
-extern crate futures;
 extern crate libc;
 extern crate num_cpus;
 extern crate rand;
 
 #[macro_use]
 mod log;
 
 mod latch;
 mod join;
 mod job;
 mod registry;
-#[cfg(rayon_unstable)]
-mod future;
 mod scope;
 mod sleep;
 mod spawn;
 mod test;
 mod thread_pool;
 mod unwind;
 mod util;
 
+#[cfg(rayon_unstable)]
+pub mod internal;
 pub use thread_pool::ThreadPool;
 pub use thread_pool::current_thread_index;
 pub use thread_pool::current_thread_has_pending_tasks;
-pub use join::join;
+pub use join::{join, join_context};
 pub use scope::{scope, Scope};
 pub use spawn::spawn;
-#[cfg(rayon_unstable)]
-pub use spawn::spawn_future;
-#[cfg(rayon_unstable)]
-pub use future::RayonFuture;
 
 /// Returns the number of threads in the current registry. If this
 /// code is executing within a Rayon thread-pool, then this will be
 /// the number of threads for the thread-pool of the current
 /// thread. Otherwise, it will be the number of threads for the global
 /// thread-pool.
 ///
 /// This can be useful when trying to judge how many times to split
 /// parallel work (the parallel iterator traits use this value
 /// internally for this purpose).
 ///
-/// ### Future compatibility note
+/// # Future compatibility note
 ///
 /// Note that unless this thread-pool was created with a
-/// configuration that specifies the number of threads, then this
+/// builder that specifies the number of threads, then this
 /// number may vary over time in future versions (see [the
 /// `num_threads()` method for details][snt]).
 ///
-/// [snt]: struct.Configuration.html#method.num_threads
+/// [snt]: struct.ThreadPoolBuilder.html#method.num_threads
 pub fn current_num_threads() -> usize {
     ::registry::Registry::current_num_threads()
 }
 
-/// Contains the rayon thread pool configuration.
+/// Error when initializing a thread pool.
+#[derive(Debug)]
+pub struct ThreadPoolBuildError {
+    kind: ErrorKind,
+}
+
+#[derive(Debug)]
+enum ErrorKind {
+    GlobalPoolAlreadyInitialized,
+    IOError(io::Error),
+}
+
+/// Used to create a new [`ThreadPool`] or to configure the global rayon thread pool.
+/// ## Creating a ThreadPool
+/// The following creates a thread pool with 22 threads.
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// let pool = rayon::ThreadPoolBuilder::new().num_threads(22).build().unwrap();
+/// ```
+///
+/// To instead configure the global thread pool, use [`build_global()`]:
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// rayon::ThreadPoolBuilder::new().num_threads(22).build_global().unwrap();
+/// ```
+///
+/// [`ThreadPool`]: struct.ThreadPool.html
+/// [`build_global()`]: struct.ThreadPoolBuilder.html#method.build_global
 #[derive(Default)]
-pub struct Configuration {
+pub struct ThreadPoolBuilder {
     /// The number of threads in the rayon thread pool.
     /// If zero will use the RAYON_NUM_THREADS environment variable.
     /// If RAYON_NUM_THREADS is invalid or zero will use the default.
     num_threads: usize,
 
     /// Custom closure, if any, to handle a panic that we cannot propagate
     /// anywhere else.
     panic_handler: Option<Box<PanicHandler>>,
@@ -119,34 +139,71 @@ pub struct Configuration {
     exit_handler: Option<Box<ExitHandler>>,
 
     /// If false, worker threads will execute spawned jobs in a
     /// "depth-first" fashion. If true, they will do a "breadth-first"
     /// fashion. Depth-first is the default.
     breadth_first: bool,
 }
 
+/// Contains the rayon thread pool configuration. Use [`ThreadPoolBuilder`] instead.
+///
+/// [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
+#[deprecated(note = "Use `ThreadPoolBuilder`")]
+#[derive(Default)]
+pub struct Configuration {
+    builder: ThreadPoolBuilder,
+}
+
 /// The type for a panic handling closure. Note that this same closure
 /// may be invoked multiple times in parallel.
 type PanicHandler = Fn(Box<Any + Send>) + Send + Sync;
 
 /// The type for a closure that gets invoked when a thread starts. The
 /// closure is passed the index of the thread on which it is invoked.
 /// Note that this same closure may be invoked multiple times in parallel.
 type StartHandler = Fn(usize) + Send + Sync;
 
 /// The type for a closure that gets invoked when a thread exits. The
 /// closure is passed the index of the thread on which is is invoked.
 /// Note that this same closure may be invoked multiple times in parallel.
 type ExitHandler = Fn(usize) + Send + Sync;
 
-impl Configuration {
-    /// Creates and return a valid rayon thread pool configuration, but does not initialize it.
-    pub fn new() -> Configuration {
-        Configuration::default()
+impl ThreadPoolBuilder {
+    /// Creates and returns a valid rayon thread pool builder, but does not initialize it.
+    pub fn new() -> ThreadPoolBuilder {
+        ThreadPoolBuilder::default()
+    }
+
+    /// Create a new `ThreadPool` initialized using this configuration.
+    pub fn build(self) -> Result<ThreadPool, ThreadPoolBuildError> {
+        thread_pool::build(self)
+    }
+
+    /// Initializes the global thread pool. This initialization is
+    /// **optional**.  If you do not call this function, the thread pool
+    /// will be automatically initialized with the default
+    /// configuration. Calling `build_global` is not recommended, except
+    /// in two scenarios:
+    ///
+    /// - You wish to change the default configuration.
+    /// - You are running a benchmark, in which case initializing may
+    ///   yield slightly more consistent results, since the worker threads
+    ///   will already be ready to go even in the first iteration.  But
+    ///   this cost is minimal.
+    ///
+    /// Initialization of the global thread pool happens exactly
+    /// once. Once started, the configuration cannot be
+    /// changed. Therefore, if you call `build_global` a second time, it
+    /// will return an error. An `Ok` result indicates that this
+    /// is the first initialization of the thread pool.
+    pub fn build_global(self) -> Result<(), ThreadPoolBuildError> {
+        let registry = try!(registry::init_global_registry(self));
+        registry.wait_until_primed();
+        Ok(())
     }
 
     /// Get the number of threads that will be used for the thread
     /// pool. See `num_threads()` for more information.
     fn get_num_threads(&self) -> usize {
         if self.num_threads > 0 {
             self.num_threads
         } else {
@@ -181,34 +238,34 @@ impl Configuration {
     ///
     /// If you specify a non-zero number of threads using this
     /// function, then the resulting thread-pools are guaranteed to
     /// start at most this number of threads.
     ///
     /// If `num_threads` is 0, or you do not call this function, then
     /// the Rayon runtime will select the number of threads
     /// automatically. At present, this is based on the
-    /// `RAYON_NUM_THREADS` environment variable (if set), 
-    /// or the number of logical CPUs (otherwise). 
+    /// `RAYON_NUM_THREADS` environment variable (if set),
+    /// or the number of logical CPUs (otherwise).
     /// In the future, however, the default behavior may
     /// change to dynamically add or remove threads as needed.
     ///
     /// **Future compatibility warning:** Given the default behavior
     /// may change in the future, if you wish to rely on a fixed
     /// number of threads, you should use this function to specify
     /// that number. To reproduce the current default behavior, you
     /// may wish to use the [`num_cpus`
     /// crate](https://crates.io/crates/num_cpus) to query the number
     /// of CPUs dynamically.
     ///
     /// **Old environment variable:** `RAYON_NUM_THREADS` is a one-to-one
     /// replacement of the now deprecated `RAYON_RS_NUM_CPUS` environment
     /// variable. If both variables are specified, `RAYON_NUM_THREADS` will
     /// be prefered.
-    pub fn num_threads(mut self, num_threads: usize) -> Configuration {
+    pub fn num_threads(mut self, num_threads: usize) -> ThreadPoolBuilder {
         self.num_threads = num_threads;
         self
     }
 
     /// Returns a copy of the current panic handler.
     fn take_panic_handler(&mut self) -> Option<Box<PanicHandler>> {
         self.panic_handler.take()
     }
@@ -222,17 +279,17 @@ impl Configuration {
     ///
     /// If no panic handler is set, the default is to abort the
     /// process, under the principle that panics should not go
     /// unobserved.
     ///
     /// If the panic handler itself panics, this will abort the
     /// process. To prevent this, wrap the body of your panic handler
     /// in a call to `std::panic::catch_unwind()`.
-    pub fn panic_handler<H>(mut self, panic_handler: H) -> Configuration
+    pub fn panic_handler<H>(mut self, panic_handler: H) -> ThreadPoolBuilder
         where H: Fn(Box<Any + Send>) + Send + Sync + 'static
     {
         self.panic_handler = Some(Box::new(panic_handler));
         self
     }
 
     /// Get the stack size of the worker threads
     fn get_stack_size(&self) -> Option<usize>{
@@ -280,17 +337,17 @@ impl Configuration {
     }
 
     /// Set a callback to be invoked on thread start.
     ///
     /// The closure is passed the index of the thread on which it is invoked.
     /// Note that this same closure may be invoked multiple times in parallel.
     /// If this closure panics, the panic will be passed to the panic handler.
     /// If that handler returns, then startup will continue normally.
-    pub fn start_handler<H>(mut self, start_handler: H) -> Configuration
+    pub fn start_handler<H>(mut self, start_handler: H) -> ThreadPoolBuilder
         where H: Fn(usize) + Send + Sync + 'static
     {
         self.start_handler = Some(Box::new(start_handler));
         self
     }
 
     /// Returns a current thread exit callback, leaving `None`.
     fn take_exit_handler(&mut self) -> Option<Box<ExitHandler>> {
@@ -298,67 +355,180 @@ impl Configuration {
     }
 
     /// Set a callback to be invoked on thread exit.
     ///
     /// The closure is passed the index of the thread on which it is invoked.
     /// Note that this same closure may be invoked multiple times in parallel.
     /// If this closure panics, the panic will be passed to the panic handler.
     /// If that handler returns, then the thread will exit normally.
-    pub fn exit_handler<H>(mut self, exit_handler: H) -> Configuration
+    pub fn exit_handler<H>(mut self, exit_handler: H) -> ThreadPoolBuilder
         where H: Fn(usize) + Send + Sync + 'static
     {
         self.exit_handler = Some(Box::new(exit_handler));
         self
     }
 }
 
-/// Initializes the global thread pool. This initialization is
-/// **optional**.  If you do not call this function, the thread pool
-/// will be automatically initialized with the default
-/// configuration. In fact, calling `initialize` is not recommended,
-/// except for in two scenarios:
-///
-/// - You wish to change the default configuration.
-/// - You are running a benchmark, in which case initializing may
-///   yield slightly more consistent results, since the worker threads
-///   will already be ready to go even in the first iteration.  But
-///   this cost is minimal.
-///
-/// Initialization of the global thread pool happens exactly
-/// once. Once started, the configuration cannot be
-/// changed. Therefore, if you call `initialize` a second time, it
-/// will return an error. An `Ok` result indicates that this
-/// is the first initialization of the thread pool.
-pub fn initialize(config: Configuration) -> Result<(), Box<Error>> {
-    let registry = try!(registry::init_global_registry(config));
-    registry.wait_until_primed();
-    Ok(())
+#[allow(deprecated)]
+impl Configuration {
+    /// Creates and return a valid rayon thread pool configuration, but does not initialize it.
+    pub fn new() -> Configuration {
+        Configuration { builder: ThreadPoolBuilder::new() }
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::build`.
+    pub fn build(self) -> Result<ThreadPool, Box<Error + 'static>> {
+        self.builder.build().map_err(|e| e.into())
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::thread_name`.
+    pub fn thread_name<F>(mut self, closure: F) -> Self
+    where F: FnMut(usize) -> String + 'static {
+        self.builder = self.builder.thread_name(closure);
+        self 
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::num_threads`.
+    pub fn num_threads(mut self, num_threads: usize) -> Configuration {
+        self.builder = self.builder.num_threads(num_threads);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::panic_handler`.
+    pub fn panic_handler<H>(mut self, panic_handler: H) -> Configuration
+        where H: Fn(Box<Any + Send>) + Send + Sync + 'static
+    {
+        self.builder = self.builder.panic_handler(panic_handler);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::stack_size`.
+    pub fn stack_size(mut self, stack_size: usize) -> Self {
+        self.builder = self.builder.stack_size(stack_size);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::breadth_first`.
+    pub fn breadth_first(mut self) -> Self {
+        self.builder = self.builder.breadth_first();
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::start_handler`.
+    pub fn start_handler<H>(mut self, start_handler: H) -> Configuration
+        where H: Fn(usize) + Send + Sync + 'static
+    {
+        self.builder = self.builder.start_handler(start_handler);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::exit_handler`.
+    pub fn exit_handler<H>(mut self, exit_handler: H) -> Configuration
+        where H: Fn(usize) + Send + Sync + 'static
+    {
+        self.builder = self.builder.exit_handler(exit_handler);
+        self
+    }
+
+    /// Returns a ThreadPoolBuilder with identical parameters.
+    fn into_builder(self) -> ThreadPoolBuilder {
+        self.builder
+    }
 }
 
-impl fmt::Debug for Configuration {
+impl ThreadPoolBuildError {
+    fn new(kind: ErrorKind) -> ThreadPoolBuildError {
+        ThreadPoolBuildError { kind: kind }
+    }
+}
+
+impl Error for ThreadPoolBuildError {
+    fn description(&self) -> &str {
+        match self.kind {
+            ErrorKind::GlobalPoolAlreadyInitialized => "The global thread pool has already been initialized.",
+            ErrorKind::IOError(ref e) => e.description(),
+        }
+    }
+}
+
+impl fmt::Display for ThreadPoolBuildError {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        let Configuration { ref num_threads, ref get_thread_name,
-                            ref panic_handler, ref stack_size,
-                            ref start_handler, ref exit_handler,
-                            ref breadth_first } = *self;
+        match self.kind {
+            ErrorKind::IOError(ref e) => e.fmt(f),
+            _ => self.description().fmt(f),
+        }
+    }
+}
 
-        // Just print `Some("<closure>")` or `None` to the debug
+/// Deprecated in favor of `ThreadPoolBuilder::build_global`.
+#[deprecated(note = "use `ThreadPoolBuilder::build_global`")]
+#[allow(deprecated)]
+pub fn initialize(config: Configuration) -> Result<(), Box<Error>> {
+    config.into_builder().build_global().map_err(|e| e.into())
+}
+
+impl fmt::Debug for ThreadPoolBuilder {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let ThreadPoolBuilder { ref num_threads, ref get_thread_name,
+                                ref panic_handler, ref stack_size,
+                                ref start_handler, ref exit_handler,
+                                ref breadth_first } = *self;
+
+        // Just print `Some(<closure>)` or `None` to the debug
         // output.
-        let get_thread_name = get_thread_name.as_ref().map(|_| "<closure>");
+        struct ClosurePlaceholder;
+        impl fmt::Debug for ClosurePlaceholder {
+            fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+                f.write_str("<closure>")
+            }
+        }
+        let get_thread_name = get_thread_name.as_ref().map(|_| ClosurePlaceholder);
+        let panic_handler = panic_handler.as_ref().map(|_| ClosurePlaceholder);
+        let start_handler = start_handler.as_ref().map(|_| ClosurePlaceholder);
+        let exit_handler = exit_handler.as_ref().map(|_| ClosurePlaceholder);
 
-        // Just print `Some("<closure>")` or `None` to the debug
-        // output.
-        let panic_handler = panic_handler.as_ref().map(|_| "<closure>");
-        let start_handler = start_handler.as_ref().map(|_| "<closure>");
-        let exit_handler = exit_handler.as_ref().map(|_| "<closure>");
-
-        f.debug_struct("Configuration")
+        f.debug_struct("ThreadPoolBuilder")
          .field("num_threads", num_threads)
          .field("get_thread_name", &get_thread_name)
          .field("panic_handler", &panic_handler)
          .field("stack_size", &stack_size)
          .field("start_handler", &start_handler)
          .field("exit_handler", &exit_handler)
          .field("breadth_first", &breadth_first)
          .finish()
     }
 }
+
+#[allow(deprecated)]
+impl fmt::Debug for Configuration {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.builder.fmt(f)
+    }
+}
+
+/// Provides the calling context to a closure called by `join_context`.
+#[derive(Debug)]
+pub struct FnContext {
+    migrated: bool,
+
+    /// disable `Send` and `Sync`, just for a little future-proofing.
+    _marker: PhantomData<*mut ()>,
+}
+
+impl FnContext {
+    #[inline]
+    fn new(migrated: bool) -> Self {
+        FnContext {
+            migrated: migrated,
+            _marker: PhantomData,
+        }
+    }
+}
+
+impl FnContext {
+    /// Returns `true` if the closure was called from a different thread
+    /// than it was provided from.
+    #[inline]
+    pub fn migrated(&self) -> bool {
+        self.migrated
+    }
+}
--- a/third_party/rust/rayon-core/src/log.rs
+++ b/third_party/rust/rayon-core/src/log.rs
@@ -29,24 +29,16 @@ pub enum Event {
     PoppedJob { worker: usize },
     PoppedRhs { worker: usize },
     LostJob { worker: usize },
     JobCompletedOk { owner_thread: usize },
     JobPanickedErrorStored { owner_thread: usize },
     JobPanickedErrorNotStored { owner_thread: usize },
     ScopeCompletePanicked { owner_thread: usize },
     ScopeCompleteNoPanic { owner_thread: usize },
-
-    FutureExecute { state: usize },
-    FutureExecuteReady,
-    FutureExecuteNotReady,
-    FutureExecuteErr,
-    FutureInstallWaitingTask { state: usize },
-    FutureUnparkWaitingTask,
-    FutureComplete,
 }
 
 pub const DUMP_LOGS: bool = cfg!(debug_assertions);
 
 lazy_static! {
     pub static ref LOG_ENV: bool = env::var("RAYON_LOG").is_ok() || env::var("RAYON_RS_LOG").is_ok();
 }
 
--- a/third_party/rust/rayon-core/src/registry.rs
+++ b/third_party/rust/rayon-core/src/registry.rs
@@ -1,44 +1,29 @@
-use ::{Configuration, ExitHandler, PanicHandler, StartHandler};
-use coco::deque::{self, Worker, Stealer};
+use ::{ExitHandler, PanicHandler, StartHandler, ThreadPoolBuilder, ThreadPoolBuildError, ErrorKind};
+use crossbeam_deque::{Deque, Steal, Stealer};
 use job::{JobRef, StackJob};
-use latch::{LatchProbe, Latch, CountLatch, LockLatch};
-#[allow(unused_imports)]
+#[cfg(rayon_unstable)]
+use job::Job;
+#[cfg(rayon_unstable)]
+use internal::task::Task;
+use latch::{LatchProbe, Latch, CountLatch, LockLatch, SpinLatch, TickleLatch};
 use log::Event::*;
 use rand::{self, Rng};
 use sleep::Sleep;
 use std::any::Any;
-use std::error::Error;
 use std::cell::{Cell, UnsafeCell};
 use std::sync::{Arc, Mutex, Once, ONCE_INIT};
 use std::thread;
 use std::mem;
-use std::fmt;
 use std::u32;
 use std::usize;
 use unwind;
 use util::leak;
 
-/// Error if the gloal thread pool is initialized multiple times.
-#[derive(Debug,PartialEq)]
-struct GlobalPoolAlreadyInitialized;
-
-impl fmt::Display for GlobalPoolAlreadyInitialized {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        f.write_str(self.description())
-    }
-}
-
-impl Error for GlobalPoolAlreadyInitialized {
-    fn description(&self) -> &str {
-        "The global thread pool has already been initialized."
-    }
-}
-
 pub struct Registry {
     thread_infos: Vec<ThreadInfo>,
     state: Mutex<RegistryState>,
     sleep: Sleep,
     job_uninjector: Stealer<JobRef>,
     panic_handler: Option<Box<PanicHandler>>,
     start_handler: Option<Box<StartHandler>>,
     exit_handler: Option<Box<ExitHandler>>,
@@ -55,107 +40,114 @@ pub struct Registry {
     //   return until the blocking job is complete, that ref will continue to be held.
     // - when `join()` or `scope()` is invoked, similarly, no adjustments are needed.
     //   These are always owned by some other job (e.g., one injected by `ThreadPool::install()`)
     //   and that job will keep the pool alive.
     terminate_latch: CountLatch,
 }
 
 struct RegistryState {
-    job_injector: Worker<JobRef>,
+    job_injector: Deque<JobRef>,
 }
 
 /// ////////////////////////////////////////////////////////////////////////
 /// Initialization
 
 static mut THE_REGISTRY: Option<&'static Arc<Registry>> = None;
 static THE_REGISTRY_SET: Once = ONCE_INIT;
 
 /// Starts the worker threads (if that has not already happened). If
 /// initialization has not already occurred, use the default
 /// configuration.
 fn global_registry() -> &'static Arc<Registry> {
-    THE_REGISTRY_SET.call_once(|| unsafe { init_registry(Configuration::new()).unwrap() });
+    THE_REGISTRY_SET.call_once(|| unsafe { init_registry(ThreadPoolBuilder::new()).unwrap() });
     unsafe { THE_REGISTRY.expect("The global thread pool has not been initialized.") }
 }
 
 /// Starts the worker threads (if that has not already happened) with
-/// the given configuration.
-pub fn init_global_registry(config: Configuration) -> Result<&'static Registry, Box<Error>> {
+/// the given builder.
+pub fn init_global_registry(builder: ThreadPoolBuilder) -> Result<&'static Registry, ThreadPoolBuildError> {
     let mut called = false;
     let mut init_result = Ok(());;
     THE_REGISTRY_SET.call_once(|| unsafe {
-        init_result = init_registry(config);
+        init_result = init_registry(builder);
         called = true;
     });
     if called {
         init_result.map(|()| &**global_registry())
     } else {
-        Err(Box::new(GlobalPoolAlreadyInitialized))
+        Err(ThreadPoolBuildError::new(ErrorKind::GlobalPoolAlreadyInitialized))
     }
 }
 
-/// Initializes the global registry with the given configuration.
+/// Initializes the global registry with the given builder.
 /// Meant to be called from within the `THE_REGISTRY_SET` once
 /// function. Declared `unsafe` because it writes to `THE_REGISTRY` in
 /// an unsynchronized fashion.
-unsafe fn init_registry(config: Configuration) -> Result<(), Box<Error>> {
-    Registry::new(config).map(|registry| THE_REGISTRY = Some(leak(registry)))
+unsafe fn init_registry(builder: ThreadPoolBuilder) -> Result<(), ThreadPoolBuildError> {
+    Registry::new(builder).map(|registry| THE_REGISTRY = Some(leak(registry)))
 }
 
 struct Terminator<'a>(&'a Arc<Registry>);
 
 impl<'a> Drop for Terminator<'a> {
     fn drop(&mut self) {
         self.0.terminate()
     }
 }
 
 impl Registry {
-    pub fn new(mut configuration: Configuration) -> Result<Arc<Registry>, Box<Error>> {
-        let n_threads = configuration.get_num_threads();
-        let breadth_first = configuration.get_breadth_first();
+    pub fn new(mut builder: ThreadPoolBuilder) -> Result<Arc<Registry>, ThreadPoolBuildError> {
+        let n_threads = builder.get_num_threads();
+        let breadth_first = builder.get_breadth_first();
 
-        let (inj_worker, inj_stealer) = deque::new();
-        let (workers, stealers): (Vec<_>, Vec<_>) = (0..n_threads).map(|_| deque::new()).unzip();
+        let inj_worker = Deque::new();
+        let inj_stealer = inj_worker.stealer();
+        let workers: Vec<_> = (0..n_threads)
+            .map(|_| Deque::new())
+            .collect();
+        let stealers: Vec<_> = workers.iter().map(|d| d.stealer()).collect();
 
         let registry = Arc::new(Registry {
             thread_infos: stealers.into_iter()
                 .map(|s| ThreadInfo::new(s))
                 .collect(),
             state: Mutex::new(RegistryState::new(inj_worker)),
             sleep: Sleep::new(),
             job_uninjector: inj_stealer,
             terminate_latch: CountLatch::new(),
-            panic_handler: configuration.take_panic_handler(),
-            start_handler: configuration.take_start_handler(),
-            exit_handler: configuration.take_exit_handler(),
+            panic_handler: builder.take_panic_handler(),
+            start_handler: builder.take_start_handler(),
+            exit_handler: builder.take_exit_handler(),
         });
 
         // If we return early or panic, make sure to terminate existing threads.
         let t1000 = Terminator(&registry);
 
         for (index, worker) in workers.into_iter().enumerate() {
             let registry = registry.clone();
             let mut b = thread::Builder::new();
-            if let Some(name) = configuration.get_thread_name(index) {
+            if let Some(name) = builder.get_thread_name(index) {
                 b = b.name(name);
             }
-            if let Some(stack_size) = configuration.get_stack_size() {
+            if let Some(stack_size) = builder.get_stack_size() {
                 b = b.stack_size(stack_size);
             }
-            try!(b.spawn(move || unsafe { main_loop(worker, registry, index, breadth_first) }));
+            if let Err(e) = b.spawn(move || unsafe { main_loop(worker, registry, index, breadth_first) }) {
+                return Err(ThreadPoolBuildError::new(ErrorKind::IOError(e)))
+            }
         }
 
         // Returning normally now, without termination.
         mem::forget(t1000);
 
         Ok(registry.clone())
     }
 
+    #[cfg(rayon_unstable)]
     pub fn global() -> Arc<Registry> {
         global_registry().clone()
     }
 
     pub fn current() -> Arc<Registry> {
         unsafe {
             let worker_thread = WorkerThread::current();
             if worker_thread.is_null() {
@@ -232,29 +224,76 @@ impl Registry {
     ///
     /// So long as all of the worker threads are hanging out in their
     /// top-level loop, there is no work to be done.
 
     /// Push a job into the given `registry`. If we are running on a
     /// worker thread for the registry, this will push onto the
     /// deque. Else, it will inject from the outside (which is slower).
     pub fn inject_or_push(&self, job_ref: JobRef) {
+        let worker_thread = WorkerThread::current();
         unsafe {
-            let worker_thread = WorkerThread::current();
             if !worker_thread.is_null() && (*worker_thread).registry().id() == self.id() {
                 (*worker_thread).push(job_ref);
             } else {
                 self.inject(&[job_ref]);
             }
         }
     }
 
-    /// Unsafe: caller asserts that injected jobs will remain valid
-    /// until they are executed.
-    pub unsafe fn inject(&self, injected_jobs: &[JobRef]) {
+    /// Unsafe: the caller must guarantee that `task` will stay valid
+    /// until it executes.
+    #[cfg(rayon_unstable)]
+    pub unsafe fn submit_task<T>(&self, task: Arc<T>)
+        where T: Task
+    {
+        let task_job = TaskJob::new(task);
+        let task_job_ref = TaskJob::into_job_ref(task_job);
+        return self.inject_or_push(task_job_ref);
+
+        /// A little newtype wrapper for `T`, just because I did not
+        /// want to implement `Job` for all `T: Task`.
+        struct TaskJob<T: Task> {
+            _data: T
+        }
+
+        impl<T: Task> TaskJob<T> {
+            fn new(arc: Arc<T>) -> Arc<Self> {
+                // `TaskJob<T>` has the same layout as `T`, so we can safely
+                // tranmsute this `T` into a `TaskJob<T>`. This lets us write our
+                // impls of `Job` for `TaskJob<T>`, making them more restricted.
+                // Since `Job` is a private trait, this is not strictly necessary,
+                // I don't think, but makes me feel better.
+                unsafe { mem::transmute(arc) }
+            }
+
+            pub fn into_task(this: Arc<TaskJob<T>>) -> Arc<T> {
+                // Same logic as `new()`
+                unsafe { mem::transmute(this) }
+            }
+
+            unsafe fn into_job_ref(this: Arc<Self>) -> JobRef {
+                let this: *const Self = mem::transmute(this);
+                JobRef::new(this)
+            }
+        }
+
+        impl<T: Task> Job for TaskJob<T> {
+            unsafe fn execute(this: *const Self) {
+                let this: Arc<Self> = mem::transmute(this);
+                let task: Arc<T> = TaskJob::into_task(this);
+                Task::execute(task);
+            }
+        }
+    }
+
+    /// Push a job into the "external jobs" queue; it will be taken by
+    /// whatever worker has nothing to do. Use this is you know that
+    /// you are not on a worker of this registry.
+    pub fn inject(&self, injected_jobs: &[JobRef]) {
         log!(InjectJobs { count: injected_jobs.len() });
         {
             let state = self.state.lock().unwrap();
 
             // It should not be possible for `state.terminate` to be true
             // here. It is only set to true when the user creates (and
             // drops) a `ThreadPool`; and, in that case, they cannot be
             // calling `inject()` later, since they dropped their
@@ -264,21 +303,83 @@ impl Registry {
             for &job_ref in injected_jobs {
                 state.job_injector.push(job_ref);
             }
         }
         self.sleep.tickle(usize::MAX);
     }
 
     fn pop_injected_job(&self, worker_index: usize) -> Option<JobRef> {
-        let stolen = self.job_uninjector.steal();
-        if stolen.is_some() {
-            log!(UninjectedWork { worker: worker_index });
+        loop {
+            match self.job_uninjector.steal() {
+                Steal::Empty => return None,
+                Steal::Data(d) => {
+                    log!(UninjectedWork { worker: worker_index });
+                    return Some(d);
+                },
+                Steal::Retry => {},
+            }
+        }
+    }
+
+    /// If already in a worker-thread of this registry, just execute `op`.
+    /// Otherwise, inject `op` in this thread-pool. Either way, block until `op`
+    /// completes and return its return value. If `op` panics, that panic will
+    /// be propagated as well.  The second argument indicates `true` if injection
+    /// was performed, `false` if executed directly.
+    pub fn in_worker<OP, R>(&self, op: OP) -> R
+        where OP: FnOnce(&WorkerThread, bool) -> R + Send, R: Send
+    {
+        unsafe {
+            let worker_thread = WorkerThread::current();
+            if worker_thread.is_null() {
+                self.in_worker_cold(op)
+            } else if (*worker_thread).registry().id() != self.id() {
+                self.in_worker_cross(&*worker_thread, op)
+            } else {
+                // Perfectly valid to give them a `&T`: this is the
+                // current thread, so we know the data structure won't be
+                // invalidated until we return.
+                op(&*worker_thread, false)
+            }
         }
-        stolen
+    }
+
+    #[cold]
+    unsafe fn in_worker_cold<OP, R>(&self, op: OP) -> R
+        where OP: FnOnce(&WorkerThread, bool) -> R + Send, R: Send
+    {
+        // This thread isn't a member of *any* thread pool, so just block.
+        debug_assert!(WorkerThread::current().is_null());
+        let job = StackJob::new(|injected| {
+            let worker_thread = WorkerThread::current();
+            assert!(injected && !worker_thread.is_null());
+            op(&*worker_thread, true)
+        }, LockLatch::new());
+        self.inject(&[job.as_job_ref()]);
+        job.latch.wait();
+        job.into_result()
+    }
+
+    #[cold]
+    unsafe fn in_worker_cross<OP, R>(&self, current_thread: &WorkerThread, op: OP) -> R
+        where OP: FnOnce(&WorkerThread, bool) -> R + Send, R: Send
+    {
+        // This thread is a member of a different pool, so let it process
+        // other work while waiting for this `op` to complete.
+        debug_assert!(current_thread.registry().id() != self.id());
+        let latch = TickleLatch::new(SpinLatch::new(), &current_thread.registry().sleep);
+        let job = StackJob::new(|injected| {
+            let worker_thread = WorkerThread::current();
+            assert!(injected && !worker_thread.is_null());
+            op(&*worker_thread, true)
+        }, latch);
+        self.inject(&[job.as_job_ref()]);
+        current_thread.wait_until(&job.latch);
+        job.into_result()
     }
 
     /// Increment the terminate counter. This increment should be
     /// balanced by a call to `terminate`, which will decrement. This
     /// is used when spawning asynchronous work, which needs to
     /// prevent the registry from terminating so long as it is active.
     ///
     /// Note that blocking functions such as `join` and `scope` do not
@@ -304,23 +405,23 @@ impl Registry {
     /// dropped. The worker threads will gradually terminate, once any
     /// extant work is completed.
     pub fn terminate(&self) {
         self.terminate_latch.set();
         self.sleep.tickle(usize::MAX);
     }
 }
 
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
 pub struct RegistryId {
     addr: usize
 }
 
 impl RegistryState {
-    pub fn new(job_injector: Worker<JobRef>) -> RegistryState {
+    pub fn new(job_injector: Deque<JobRef>) -> RegistryState {
         RegistryState {
             job_injector: job_injector,
         }
     }
 }
 
 struct ThreadInfo {
     /// Latch set once thread has started and we are entering into the
@@ -346,17 +447,17 @@ impl ThreadInfo {
     }
 }
 
 /// ////////////////////////////////////////////////////////////////////////
 /// WorkerThread identifiers
 
 pub struct WorkerThread {
     /// the "worker" half of our local deque
-    worker: Worker<JobRef>,
+    worker: Deque<JobRef>,
 
     index: usize,
 
     /// are these workers configured to steal breadth-first or not?
     breadth_first: bool,
 
     /// A weak random number generator.
     rng: UnsafeCell<rand::XorShiftRng>,
@@ -418,17 +519,23 @@ impl WorkerThread {
     /// popping from the top of the stack, though if we are configured
     /// for breadth-first execution, it would mean dequeuing from the
     /// bottom.
     #[inline]
     pub unsafe fn take_local_job(&self) -> Option<JobRef> {
         if !self.breadth_first {
             self.worker.pop()
         } else {
-            self.worker.steal()
+            loop {
+                match self.worker.steal() {
+                    Steal::Empty => return None,
+                    Steal::Data(d) => return Some(d),
+                    Steal::Retry => {},
+                }
+            }
         }
     }
 
     /// Wait until the latch is set. Try to keep busy by popping and
     /// stealing tasks as necessary.
     #[inline]
     pub unsafe fn wait_until<L: LatchProbe + ?Sized>(&self, latch: &L) {
         log!(WaitUntil { worker: self.index });
@@ -506,29 +613,37 @@ impl WorkerThread {
             let rng = &mut *self.rng.get();
             rng.next_u32() % num_threads as u32
         } as usize;
         (start .. num_threads)
             .chain(0 .. start)
             .filter(|&i| i != self.index)
             .filter_map(|victim_index| {
                 let victim = &self.registry.thread_infos[victim_index];
-                let stolen = victim.stealer.steal();
-                if stolen.is_some() {
-                    log!(StoleWork { worker: self.index, victim: victim_index });
+                loop {
+                    match victim.stealer.steal() {
+                        Steal::Empty => return None,
+                        Steal::Data(d) => {
+                            log!(StoleWork {
+                                worker: self.index,
+                                victim: victim_index
+                            });
+                            return Some(d);
+                        },
+                        Steal::Retry => {},
+                    }
                 }
-                stolen
             })
             .next()
     }
 }
 
 /// ////////////////////////////////////////////////////////////////////////
 
-unsafe fn main_loop(worker: Worker<JobRef>,
+unsafe fn main_loop(worker: Deque<JobRef>,
                     registry: Arc<Registry>,
                     index: usize,
                     breadth_first: bool) {
     let worker_thread = WorkerThread {
         worker: worker,
         breadth_first: breadth_first,
         index: index,
         rng: UnsafeCell::new(rand::weak_rng()),
@@ -579,36 +694,25 @@ unsafe fn main_loop(worker: Worker<JobRe
         }
         // We're already exiting the thread, there's nothing else to do.
     }
 }
 
 /// If already in a worker-thread, just execute `op`.  Otherwise,
 /// execute `op` in the default thread-pool. Either way, block until
 /// `op` completes and return its return value. If `op` panics, that
-/// panic will be propagated as well.
+/// panic will be propagated as well.  The second argument indicates
+/// `true` if injection was performed, `false` if executed directly.
 pub fn in_worker<OP, R>(op: OP) -> R
-    where OP: FnOnce(&WorkerThread) -> R + Send, R: Send
+    where OP: FnOnce(&WorkerThread, bool) -> R + Send, R: Send
 {
     unsafe {
         let owner_thread = WorkerThread::current();
         if !owner_thread.is_null() {
             // Perfectly valid to give them a `&T`: this is the
             // current thread, so we know the data structure won't be
             // invalidated until we return.
-            return op(&*owner_thread);
+            op(&*owner_thread, false)
         } else {
-            return in_worker_cold(op);
+            global_registry().in_worker_cold(op)
         }
     }
 }
-
-#[cold]
-unsafe fn in_worker_cold<OP, R>(op: OP) -> R
-    where OP: FnOnce(&WorkerThread) -> R + Send, R: Send
-{
-    // never run from a worker thread; just shifts over into worker threads
-    debug_assert!(WorkerThread::current().is_null());
-    let job = StackJob::new(|| in_worker(op), LockLatch::new());
-    global_registry().inject(&[job.as_job_ref()]);
-    job.latch.wait();
-    job.into_result()
-}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-core/src/scope/internal.rs
@@ -0,0 +1,61 @@
+#![cfg(rayon_unstable)]
+
+use internal::task::{ScopeHandle, ToScopeHandle, Task};
+use std::any::Any;
+use std::mem;
+use std::sync::Arc;
+use super::Scope;
+
+impl<'scope> ToScopeHandle<'scope> for Scope<'scope> {
+    type ScopeHandle = LocalScopeHandle<'scope>;
+
+    fn to_scope_handle(&self) -> Self::ScopeHandle {
+        unsafe { LocalScopeHandle::new(self) }
+    }
+}
+
+#[derive(Debug)]
+pub struct LocalScopeHandle<'scope> {
+    scope: *const Scope<'scope>
+}
+
+impl<'scope> LocalScopeHandle<'scope> {
+    /// Caller guarantees that `*scope` will remain valid
+    /// until the scope completes. Since we acquire a ref,
+    /// that means it will remain valid until we release it.
+    unsafe fn new(scope: &Scope<'scope>) -> Self {
+        scope.job_completed_latch.increment();
+        LocalScopeHandle { scope: scope }
+    }
+}
+
+impl<'scope> Drop for LocalScopeHandle<'scope> {
+    fn drop(&mut self) {
+        unsafe {
+            if !self.scope.is_null() {
+                (*self.scope).job_completed_ok();
+            }
+        }
+    }
+}
+
+/// We assert that the `Self` type remains valid until a
+/// method is called, and that `'scope` will not end until
+/// that point.
+unsafe impl<'scope> ScopeHandle<'scope> for LocalScopeHandle<'scope> {
+    unsafe fn spawn_task<T: Task + 'scope>(&self, task: Arc<T>) {
+        let scope = &*self.scope;
+        scope.registry.submit_task(task);
+    }
+
+    fn ok(self) {
+        mem::drop(self);
+    }
+
+    fn panicked(self, err: Box<Any + Send>) {
+        unsafe {
+            (*self.scope).job_panicked(err);
+            mem::forget(self); // no need to run dtor now
+        }
+    }
+}
--- a/third_party/rust/rayon-core/src/scope/mod.rs
+++ b/third_party/rust/rayon-core/src/scope/mod.rs
@@ -1,58 +1,72 @@
-#[cfg(rayon_unstable)]
-use future::{self, Future, RayonFuture};
+//! Methods for custom fork-join scopes, created by the [`scope()`]
+//! function. These are a more flexible alternative to [`join()`].
+//!
+//! [`scope()`]: fn.scope.html
+//! [`join()`]: ../join/join.fn.html
+
 use latch::{Latch, CountLatch};
 use log::Event::*;
 use job::HeapJob;
 use std::any::Any;
+use std::fmt;
 use std::marker::PhantomData;
 use std::mem;
 use std::ptr;
 use std::sync::Arc;
 use std::sync::atomic::{AtomicPtr, Ordering};
-use registry::{in_worker, Registry, WorkerThread};
+use registry::{in_worker, WorkerThread, Registry};
 use unwind;
 
 #[cfg(test)]
 mod test;
+mod internal;
 
+///Represents a fork-join scope which can be used to spawn any number of tasks. See [`scope()`] for more information.
+///
+///[`scope()`]: fn.scope.html
 pub struct Scope<'scope> {
     /// thread where `scope()` was executed (note that individual jobs
     /// may be executing on different worker threads, though they
     /// should always be within the same pool of threads)
-    owner_thread: *const WorkerThread,
+    owner_thread_index: usize,
+
+    /// thread registry where `scope()` was executed.
+    registry: Arc<Registry>,
 
     /// if some job panicked, the error is stored here; it will be
     /// propagated to the one who created the scope
     panic: AtomicPtr<Box<Any + Send + 'static>>,
 
     /// latch to set when the counter drops to zero (and hence this scope is complete)
     job_completed_latch: CountLatch,
 
-    /// you can think of a scope as containing a list of closures to
-    /// execute, all of which outlive `'scope`
-    marker: PhantomData<Box<FnOnce(&Scope<'scope>) + 'scope>>,
+    /// You can think of a scope as containing a list of closures to execute,
+    /// all of which outlive `'scope`.  They're not actually required to be
+    /// `Sync`, but it's still safe to let the `Scope` implement `Sync` because
+    /// the closures are only *moved* across threads to be executed.
+    marker: PhantomData<Box<FnOnce(&Scope<'scope>) + Send + Sync + 'scope>>,
 }
 
 /// Create a "fork-join" scope `s` and invokes the closure with a
 /// reference to `s`. This closure can then spawn asynchronous tasks
 /// into `s`. Those tasks may run asynchronously with respect to the
 /// closure; they may themselves spawn additional tasks into `s`. When
 /// the closure returns, it will block until all tasks that have been
 /// spawned into `s` complete.
 ///
 /// `scope()` is a more flexible building block compared to `join()`,
 /// since a loop can be used to spawn any number of tasks without
 /// recursing. However, that flexibility comes at a performance price:
 /// tasks spawned using `scope()` must be allocated onto the heap,
 /// whereas `join()` can make exclusive use of the stack. **Prefer
 /// `join()` (or, even better, parallel iterators) where possible.**
 ///
-/// ### Example
+/// # Example
 ///
 /// The Rayon `join()` function launches two closures and waits for them
 /// to stop. One could implement `join()` using a scope like so, although
 /// it would be less efficient than the real implementation:
 ///
 /// ```rust
 /// # use rayon_core as rayon;
 /// pub fn join<A,B,RA,RB>(oper_a: A, oper_b: B) -> (RA, RB)
@@ -66,24 +80,24 @@ pub struct Scope<'scope> {
 ///     rayon::scope(|s| {
 ///         s.spawn(|_| result_a = Some(oper_a()));
 ///         s.spawn(|_| result_b = Some(oper_b()));
 ///     });
 ///     (result_a.unwrap(), result_b.unwrap())
 /// }
 /// ```
 ///
-/// ### A note on threading
+/// # A note on threading
 ///
 /// The closure given to `scope()` executes in the Rayon thread-pool,
 /// as do those given to `spawn()`. This means that you can't access
 /// thread-local variables (well, you can, but they may have
 /// unexpected values).
 ///
-/// ### Task execution
+/// # Task execution
 ///
 /// Task execution potentially starts as soon as `spawn()` is called.
 /// The task will end sometime before `scope()` returns. Note that the
 /// *closure* given to scope may return much earlier. In general
 /// the lifetime of a scope created like `scope(body) goes something like this:
 ///
 /// - Scope begins when `scope(body)` is called
 /// - Scope body `body()` is invoked
@@ -137,17 +151,17 @@ pub struct Scope<'scope> {
 /// The point here is that everything spawned into scope `s` will
 /// terminate (at latest) at the same point -- right before the
 /// original call to `rayon::scope` returns. This includes new
 /// subtasks created by other subtasks (e.g., task `s.1.1`). If a new
 /// scope is created (such as `t`), the things spawned into that scope
 /// will be joined before that scope returns, which in turn occurs
 /// before the creating task (task `s.1.1` in this case) finishes.
 ///
-/// ### Accessing stack data
+/// # Accessing stack data
 ///
 /// In general, spawned tasks may access stack data in place that
 /// outlives the scope itself. Other data must be fully owned by the
 /// spawned task.
 ///
 /// ```rust
 /// # use rayon_core as rayon;
 /// let ok: Vec<i32> = vec![1, 2, 3];
@@ -227,114 +241,110 @@ pub struct Scope<'scope> {
 ///         println!("ok: {:?}", ok); // `ok` is only borrowed.
 ///         println!("bad: {:?}", bad); // refers to our local variable, above.
 ///     });
 ///
 ///     s.spawn(|_| println!("ok: {:?}", ok)); // we too can borrow `ok`
 /// });
 /// ```
 ///
-/// ### Panics
+/// # Panics
 ///
 /// If a panic occurs, either in the closure given to `scope()` or in
 /// any of the spawned jobs, that panic will be propagated and the
 /// call to `scope()` will panic. If multiple panics occurs, it is
 /// non-deterministic which of their panic values will propagate.
 /// Regardless, once a task is spawned using `scope.spawn()`, it will
 /// execute, even if the spawning task should later panic. `scope()`
 /// returns once all spawned jobs have completed, and any panics are
 /// propagated at that point.
 pub fn scope<'scope, OP, R>(op: OP) -> R
     where OP: for<'s> FnOnce(&'s Scope<'scope>) -> R + 'scope + Send, R: Send,
 {
-    in_worker(|owner_thread| {
+    in_worker(|owner_thread, _| {
         unsafe {
             let scope: Scope<'scope> = Scope {
-                owner_thread: owner_thread as *const WorkerThread as *mut WorkerThread,
+                owner_thread_index: owner_thread.index(),
+                registry: owner_thread.registry().clone(),
                 panic: AtomicPtr::new(ptr::null_mut()),
                 job_completed_latch: CountLatch::new(),
                 marker: PhantomData,
             };
             let result = scope.execute_job_closure(op);
-            scope.steal_till_jobs_complete();
+            scope.steal_till_jobs_complete(owner_thread);
             result.unwrap() // only None if `op` panicked, and that would have been propagated
         }
     })
 }
 
 impl<'scope> Scope<'scope> {
     /// Spawns a job into the fork-join scope `self`. This job will
     /// execute sometime before the fork-join scope completes.  The
     /// job is specified as a closure, and this closure receives its
-    /// own reference to `self` as argument. This can be used to
-    /// inject new jobs into `self`.
+    /// own reference to the scope `self` as argument. This can be
+    /// used to inject new jobs into `self`.
+    ///
+    /// # Returns
+    ///
+    /// Nothing. The spawned closures cannot pass back values to the
+    /// caller directly, though they can write to local variables on
+    /// the stack (if those variables outlive the scope) or
+    /// communicate through shared channels.
+    ///
+    /// (The intention is to eventualy integrate with Rust futures to
+    /// support spawns of functions that compute a value.)
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// # use rayon_core as rayon;
+    /// let mut value_a = None;
+    /// let mut value_b = None;
+    /// let mut value_c = None;
+    /// rayon::scope(|s| {
+    ///     s.spawn(|s1| {
+    ///           // ^ this is the same scope as `s`; this handle `s1`
+    ///           //   is intended for use by the spawned task,
+    ///           //   since scope handles cannot cross thread boundaries.
+    ///
+    ///         value_a = Some(22);
+    ///
+    ///         // the scope `s` will not end until all these tasks are done
+    ///         s1.spawn(|_| {
+    ///             value_b = Some(44);
+    ///         });
+    ///     });
+    ///
+    ///     s.spawn(|_| {
+    ///         value_c = Some(66);
+    ///     });
+    /// });
+    /// assert_eq!(value_a, Some(22));
+    /// assert_eq!(value_b, Some(44));
+    /// assert_eq!(value_c, Some(66));
+    /// ```
+    ///
+    /// # See also
+    ///
+    /// The [`scope` function] has more extensive documentation about
+    /// task spawning.
+    ///
+    /// [`scope` function]: fn.scope.html
     pub fn spawn<BODY>(&self, body: BODY)
-        where BODY: FnOnce(&Scope<'scope>) + 'scope
+        where BODY: FnOnce(&Scope<'scope>) + Send + 'scope
     {
         unsafe {
             self.job_completed_latch.increment();
             let job_ref = Box::new(HeapJob::new(move || self.execute_job(body)))
                 .as_job_ref();
-            let worker_thread = WorkerThread::current();
 
-            // the `Scope` is not send or sync, and we only give out
-            // pointers to it from within a worker thread
-            debug_assert!(!WorkerThread::current().is_null());
-
-            let worker_thread = &*worker_thread;
-            worker_thread.push(job_ref);
-        }
-    }
-
-    #[cfg(rayon_unstable)]
-    pub fn spawn_future<F>(&self, future: F) -> RayonFuture<F::Item, F::Error>
-        where F: Future + Send + 'scope
-    {
-        // We assert that the scope is allocated in a stable location
-        // (an enclosing stack frame, to be exact) which will remain
-        // valid until the scope ends.
-        let future_scope = unsafe { ScopeFutureScope::new(self) };
-
-        return future::new_rayon_future(future, future_scope);
-
-        struct ScopeFutureScope<'scope> {
-            scope: *const Scope<'scope>
-        }
-
-        impl<'scope> ScopeFutureScope<'scope> {
-            /// Caller guarantees that `*scope` will remain valid
-            /// until the scope completes. Since we acquire a ref,
-            /// that means it will remain valid until we release it.
-            unsafe fn new(scope: &Scope<'scope>) -> Self {
-                scope.job_completed_latch.increment();
-                ScopeFutureScope { scope: scope }
-            }
-        }
-
-        /// We assert that the `Self` type remains valid until a
-        /// method is called, and that `'scope` will not end until
-        /// that point.
-        unsafe impl<'scope> future::FutureScope<'scope> for ScopeFutureScope<'scope> {
-            fn registry(&self) -> Arc<Registry> {
-                unsafe {
-                    (*(*self.scope).owner_thread).registry().clone()
-                }
-            }
-
-            fn future_completed(self) {
-                unsafe {
-                    (*self.scope).job_completed_ok();
-                }
-            }
-
-            fn future_panicked(self, err: Box<Any + Send>) {
-                unsafe {
-                    (*self.scope).job_panicked(err);
-                }
-            }
+            // Since `Scope` implements `Sync`, we can't be sure
+            // that we're still in a thread of this pool, so we
+            // can't just push to the local worker thread.
+            self.registry.inject_or_push(job_ref);
         }
     }
 
     /// Executes `func` as a job, either aborting or executing as
     /// appropriate.
     ///
     /// Unsafe because it must be executed on a worker thread.
     unsafe fn execute_job<FUNC>(&self, func: FUNC)
@@ -357,40 +367,51 @@ impl<'scope> Scope<'scope> {
         }
     }
 
     unsafe fn job_panicked(&self, err: Box<Any + Send + 'static>) {
         // capture the first error we see, free the rest
         let nil = ptr::null_mut();
         let mut err = Box::new(err); // box up the fat ptr
         if self.panic.compare_exchange(nil, &mut *err, Ordering::Release, Ordering::Relaxed).is_ok() {
-            log!(JobPanickedErrorStored { owner_thread: (*self.owner_thread).index() });
+            log!(JobPanickedErrorStored { owner_thread: self.owner_thread_index });
             mem::forget(err); // ownership now transferred into self.panic
         } else {
-            log!(JobPanickedErrorNotStored { owner_thread: (*self.owner_thread).index() });
+            log!(JobPanickedErrorNotStored { owner_thread: self.owner_thread_index });
         }
 
 
         self.job_completed_latch.set();
     }
 
     unsafe fn job_completed_ok(&self) {
-        log!(JobCompletedOk { owner_thread: (*self.owner_thread).index() });
+        log!(JobCompletedOk { owner_thread: self.owner_thread_index });
         self.job_completed_latch.set();
     }
 
-    unsafe fn steal_till_jobs_complete(&self) {
+    unsafe fn steal_till_jobs_complete(&self, owner_thread: &WorkerThread) {
         // wait for job counter to reach 0:
-        (*self.owner_thread).wait_until(&self.job_completed_latch);
+        owner_thread.wait_until(&self.job_completed_latch);
 
         // propagate panic, if any occurred; at this point, all
         // outstanding jobs have completed, so we can use a relaxed
         // ordering:
         let panic = self.panic.swap(ptr::null_mut(), Ordering::Relaxed);
         if !panic.is_null() {
-            log!(ScopeCompletePanicked { owner_thread: (*self.owner_thread).index() });
+            log!(ScopeCompletePanicked { owner_thread: owner_thread.index() });
             let value: Box<Box<Any + Send + 'static>> = mem::transmute(panic);
             unwind::resume_unwinding(*value);
         } else {
-            log!(ScopeCompleteNoPanic { owner_thread: (*self.owner_thread).index() });
+            log!(ScopeCompleteNoPanic { owner_thread: owner_thread.index() });
         }
     }
 }
+
+impl<'scope> fmt::Debug for Scope<'scope> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("Scope")
+            .field("pool_id", &self.registry.id())
+            .field("owner_thread_index", &self.owner_thread_index)
+            .field("panic", &self.panic)
+            .field("job_completed_latch", &self.job_completed_latch)
+            .finish()
+    }
+}
--- a/third_party/rust/rayon-core/src/scope/test.rs
+++ b/third_party/rust/rayon-core/src/scope/test.rs
@@ -1,11 +1,10 @@
-use Configuration;
+use ThreadPoolBuilder;
 use {scope, Scope};
-use ThreadPool;
 use rand::{Rng, SeedableRng, XorShiftRng};
 use std::cmp;
 use std::iter::once;
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::sync::Mutex;
 use unwind;
 
 #[test]
@@ -64,22 +63,22 @@ fn divide_and_conquer_seq(counter: &Atom
         divide_and_conquer_seq(counter, size / 2);
         divide_and_conquer_seq(counter, size / 2);
     } else {
         // count the leaves
         counter.fetch_add(1, Ordering::SeqCst);
     }
 }
 
-struct Tree<T> {
+struct Tree<T: Send> {
     value: T,
     children: Vec<Tree<T>>,
 }
 
-impl<T> Tree<T> {
+impl<T: Send> Tree<T> {
     pub fn iter<'s>(&'s self) -> impl Iterator<Item = &'s T> + 's {
         once(&self.value)
             .chain(self.children.iter().flat_map(|c| c.iter()))
             .collect::<Vec<_>>() // seems like it shouldn't be needed... but prevents overflow
             .into_iter()
     }
 
     pub fn update<OP>(&mut self, op: OP)
@@ -137,18 +136,18 @@ fn update_tree() {
 }
 
 /// Check that if you have a chain of scoped tasks where T0 spawns T1
 /// spawns T2 and so forth down to Tn, the stack space should not grow
 /// linearly with N. We test this by some unsafe hackery and
 /// permitting an approx 10% change with a 10x input change.
 #[test]
 fn linear_stack_growth() {
-    let config = Configuration::new().num_threads(1);
-    let pool = ThreadPool::new(config).unwrap();
+    let builder = ThreadPoolBuilder::new().num_threads(1);
+    let pool = builder.build().unwrap();
     pool.install(|| {
         let mut max_diff = Mutex::new(0);
         let bottom_of_stack = 0;
         scope(|s| the_final_countdown(s, &bottom_of_stack, &max_diff, 5));
         let diff_when_5 = *max_diff.get_mut().unwrap() as f64;
 
         scope(|s| the_final_countdown(s, &bottom_of_stack, &max_diff, 500));
         let diff_when_500 = *max_diff.get_mut().unwrap() as f64;
--- a/third_party/rust/rayon-core/src/spawn/mod.rs
+++ b/third_party/rust/rayon-core/src/spawn/mod.rs
@@ -1,15 +1,10 @@
-#[cfg(rayon_unstable)]
-use future::{self, Future, RayonFuture};
-#[allow(unused_imports)]
-use latch::{Latch, SpinLatch};
 use job::*;
 use registry::Registry;
-use std::any::Any;
 use std::mem;
 use std::sync::Arc;
 use unwind;
 
 /// Fires off a task into the Rayon threadpool in the "static" or
 /// "global" scope.  Just like a standard thread, this task is not
 /// tied to the current stack frame, and hence it cannot hold any
 /// references other than those with `'static` lifetime. If you want
@@ -27,21 +22,21 @@ use unwind;
 /// This API assumes that the closure is executed purely for its
 /// side-effects (i.e., it might send messages, modify data protected
 /// by a mutex, or some such thing). If you want to compute a result,
 /// consider `spawn_future()`.
 ///
 /// # Panic handling
 ///
 /// If this closure should panic, the resulting panic will be
-/// propagated to the panic handler registered in the `Configuration`,
-/// if any.  See [`Configuration::panic_handler()`][ph] for more
+/// propagated to the panic handler registered in the `ThreadPoolBuilder`,
+/// if any.  See [`ThreadPoolBuilder::panic_handler()`][ph] for more
 /// details.
 ///
-/// [ph]: struct.Configuration.html#method.panic_handler
+/// [ph]: struct.ThreadPoolBuilder.html#method.panic_handler
 ///
 /// # Examples
 ///
 /// This code creates a Rayon task that increments a global counter.
 ///
 /// ```rust
 /// # use rayon_core as rayon;
 /// use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
@@ -91,84 +86,10 @@ pub unsafe fn spawn_in<F>(func: F, regis
     // be able to panic, and hence the data won't leak but will be
     // enqueued into some deque for later execution.
     let abort_guard = unwind::AbortIfPanic; // just in case we are wrong, and code CAN panic
     let job_ref = HeapJob::as_job_ref(async_job);
     registry.inject_or_push(job_ref);
     mem::forget(abort_guard);
 }
 
-/// Spawns a future in the static scope, scheduling it to execute on
-/// Rayon's threadpool. Returns a new future that can be used to poll
-/// for the result. Since this future is executing in the static scope,
-/// it cannot hold references to things in the enclosing stack frame;
-/// if you would like to hold such references, use [the `scope()`
-/// function][scope] to create a scope.
-///
-/// [scope]: fn.scope.html
-///
-/// # Panic handling
-///
-/// If this future should panic, that panic will be propagated when
-/// `poll()` is invoked on the return value.
-#[cfg(rayon_unstable)]
-pub fn spawn_future<F>(future: F) -> RayonFuture<F::Item, F::Error>
-    where F: Future + Send + 'static
-{
-    /// We assert that the current registry cannot yet have terminated.
-    unsafe { spawn_future_in(future, Registry::current()) }
-}
-
-/// Internal helper function.
-///
-/// Unsafe because caller must guarantee that `registry` has not yet terminated.
-#[cfg(rayon_unstable)]
-pub unsafe fn spawn_future_in<F>(future: F, registry: Arc<Registry>) -> RayonFuture<F::Item, F::Error>
-    where F: Future + Send + 'static
-{
-    let scope = StaticFutureScope::new(registry.clone());
-
-    future::new_rayon_future(future, scope)
-}
-
-#[cfg(rayon_unstable)]
-struct StaticFutureScope {
-    registry: Arc<Registry>
-}
-
-#[cfg(rayon_unstable)]
-impl StaticFutureScope {
-    /// Caller asserts that the registry has not yet terminated.
-    unsafe fn new(registry: Arc<Registry>) -> Self {
-        registry.increment_terminate_count();
-        StaticFutureScope { registry: registry }
-    }
-}
-
-/// We assert that:
-///
-/// (a) the scope valid remains valid until a completion method
-///     is called. In this case, "remains valid" means that the
-///     registry is not terminated. This is true because we
-///     acquire a "termination count" in `StaticFutureScope::new()`
-///     which is not released until `future_panicked()` or
-///     `future_completed()` is invoked.
-/// (b) the lifetime `'static` will not end until a completion
-///     method is called. This is true because `'static` doesn't
-///     end until the end of the program.
-#[cfg(rayon_unstable)]
-unsafe impl future::FutureScope<'static> for StaticFutureScope {
-    fn registry(&self) -> Arc<Registry> {
-        self.registry.clone()
-    }
-
-    fn future_panicked(self, err: Box<Any + Send>) {
-        self.registry.handle_panic(err);
-        self.registry.terminate();
-    }
-
-    fn future_completed(self) {
-        self.registry.terminate();
-    }
-}
-
 #[cfg(test)]
 mod test;
--- a/third_party/rust/rayon-core/src/spawn/test.rs
+++ b/third_party/rust/rayon-core/src/spawn/test.rs
@@ -1,20 +1,15 @@
-#[cfg(rayon_unstable)]
-use futures::{lazy, Future};
-
 use scope;
 use std::any::Any;
-use std::sync::{Arc, Mutex};
+use std::sync::Mutex;
 use std::sync::mpsc::channel;
 
-use {Configuration, ThreadPool};
+use ThreadPoolBuilder;
 use super::spawn;
-#[cfg(rayon_unstable)]
-use super::spawn_future;
 
 #[test]
 fn spawn_then_join_in_worker() {
     let (tx, rx) = channel();
     scope(move |_| {
         spawn(move || tx.send(22).unwrap());
     });
     assert_eq!(22, rx.recv().unwrap());
@@ -40,85 +35,35 @@ fn panic_fwd() {
             } else {
                 tx.send(2).unwrap();
             }
         } else {
             tx.send(3).unwrap();
         }
     };
 
-    let configuration = Configuration::new().panic_handler(panic_handler);
+    let builder = ThreadPoolBuilder::new().panic_handler(panic_handler);
 
-    ThreadPool::new(configuration).unwrap().spawn(move || panic!("Hello, world!"));
+    builder.build().unwrap().spawn(move || panic!("Hello, world!"));
 
     assert_eq!(1, rx.recv().unwrap());
 }
 
-#[test]
-#[cfg(rayon_unstable)]
-fn async_future_map() {
-    let data = Arc::new(Mutex::new(format!("Hello, ")));
-
-    let a = spawn_future(lazy({
-        let data = data.clone();
-        move || Ok::<_, ()>(data)
-    }));
-    let future = spawn_future(a.map(|data| {
-        let mut v = data.lock().unwrap();
-        v.push_str("world!");
-    }));
-    let () = future.wait().unwrap();
-
-    // future must have executed for the scope to have ended, even
-    // though we never invoked `wait` to observe its result
-    assert_eq!(&data.lock().unwrap()[..], "Hello, world!");
-}
-
-#[test]
-#[should_panic(expected = "Hello, world!")]
-#[cfg(rayon_unstable)]
-fn async_future_panic_prop() {
-    let future = spawn_future(lazy(move || Ok::<(), ()>(argh())));
-    let _ = future.rayon_wait(); // should panic, not return a value
-
-    fn argh() -> () {
-        if true {
-            panic!("Hello, world!");
-        }
-    }
-}
-
-#[test]
-#[cfg(rayon_unstable)]
-fn async_future_scope_interact() {
-    let future = spawn_future(lazy(move || Ok::<usize, ()>(22)));
-
-    let mut vec = vec![];
-    scope(|s| {
-        let future = s.spawn_future(future.map(|x| x * 2));
-        s.spawn(|_| {
-            vec.push(future.rayon_wait().unwrap());
-        }); // just because
-    });
-
-    assert_eq!(vec![44], vec);
-}
-
 /// Test what happens when the thread-pool is dropped but there are
 /// still active asynchronous tasks. We expect the thread-pool to stay
 /// alive and executing until those threads are complete.
 #[test]
 fn termination_while_things_are_executing() {
     let (tx0, rx0) = channel();
     let (tx1, rx1) = channel();
 
     // Create a thread-pool and spawn some code in it, but then drop
     // our reference to it.
     {
-        let thread_pool = ThreadPool::new(Configuration::new()).unwrap();
+        let thread_pool = ThreadPoolBuilder::new().build().unwrap();
         thread_pool.spawn(move || {
             let data = rx0.recv().unwrap();
 
             // At this point, we know the "main" reference to the
             // `ThreadPool` has been dropped, but there are still
             // active threads. Launch one more.
             spawn(move || {
                 tx1.send(data).unwrap();
@@ -139,18 +84,18 @@ fn custom_panic_handler_and_spawn() {
     // channel; since the closure is potentially executed in parallel
     // with itself, we have to wrap `tx` in a mutex.
     let tx = Mutex::new(tx);
     let panic_handler = move |e: Box<Any + Send>| {
         tx.lock().unwrap().send(e).unwrap();
     };
 
     // Execute an async that will panic.
-    let config = Configuration::new().panic_handler(panic_handler);
-    ThreadPool::new(config).unwrap().spawn(move || {
+    let builder = ThreadPoolBuilder::new().panic_handler(panic_handler);
+    builder.build().unwrap().spawn(move || {
         panic!("Hello, world!");
     });
 
     // Check that we got back the panic we expected.
     let error = rx.recv().unwrap();
     if let Some(&msg) = error.downcast_ref::<&str>() {
         assert_eq!(msg, "Hello, world!");
     } else {
@@ -167,18 +112,18 @@ fn custom_panic_handler_and_nested_spawn
     // with itself, we have to wrap `tx` in a mutex.
     let tx = Mutex::new(tx);
     let panic_handler = move |e| {
         tx.lock().unwrap().send(e).unwrap();
     };
 
     // Execute an async that will (eventually) panic.
     const PANICS: usize = 3;
-    let config = Configuration::new().panic_handler(panic_handler);
-    ThreadPool::new(config).unwrap().spawn(move || {
+    let builder = ThreadPoolBuilder::new().panic_handler(panic_handler);
+    builder.build().unwrap().spawn(move || {
         // launch 3 nested spawn-asyncs; these should be in the same
         // thread-pool and hence inherit the same panic handler
         for _ in 0 .. PANICS {
             spawn(move || {
                 panic!("Hello, world!");
             });
         }
     });
--- a/third_party/rust/rayon-core/src/test.rs
+++ b/third_party/rust/rayon-core/src/test.rs
@@ -1,18 +1,19 @@
 #![cfg(test)]
 
+#[allow(deprecated)]
 use Configuration;
+use {ThreadPoolBuilder, ThreadPoolBuildError};
 use std::sync::{Arc, Barrier};
 use std::sync::atomic::{AtomicUsize, Ordering};
-use thread_pool::*;
 
 #[test]
 fn worker_thread_index() {
-    let pool = ThreadPool::new(Configuration::new().num_threads(22)).unwrap();
+    let pool = ThreadPoolBuilder::new().num_threads(22).build().unwrap();
     assert_eq!(pool.current_num_threads(), 22);
     assert_eq!(pool.current_thread_index(), None);
     let index = pool.install(|| pool.current_thread_index().unwrap());
     assert!(index < 22);
 }
 
 #[test]
 fn start_callback_called() {
@@ -23,20 +24,20 @@ fn start_callback_called() {
 
     let b = barrier.clone();
     let nc = n_called.clone();
     let start_handler = move |_| {
         nc.fetch_add(1, Ordering::SeqCst);
         b.wait();
     };
 
-    let conf = Configuration::new()
+    let conf = ThreadPoolBuilder::new()
         .num_threads(n_threads)
         .start_handler(start_handler);
-    let _ = ThreadPool::new(conf).unwrap();
+    let _ = conf.build().unwrap();
 
     // Wait for all the threads to have been scheduled to run.
     barrier.wait();
 
     // The handler must have been called on every started thread.
     assert_eq!(n_called.load(Ordering::SeqCst), n_threads);
 }
 
@@ -49,21 +50,21 @@ fn exit_callback_called() {
 
     let b = barrier.clone();
     let nc = n_called.clone();
     let exit_handler = move |_| {
         nc.fetch_add(1, Ordering::SeqCst);
         b.wait();
     };
 
-    let conf = Configuration::new()
+    let conf = ThreadPoolBuilder::new()
         .num_threads(n_threads)
         .exit_handler(exit_handler);
     {
-        let _ = ThreadPool::new(conf).unwrap();
+        let _ = conf.build().unwrap();
         // Drop the pool so it stops the running threads.
     }
 
     // Wait for all the threads to have been scheduled to run.
     barrier.wait();
 
     // The handler must have been called on every exiting thread.
     assert_eq!(n_called.load(Ordering::SeqCst), n_threads);
@@ -91,30 +92,66 @@ fn handler_panics_handled_correctly() {
         let val = nc.fetch_add(1, Ordering::SeqCst);
         if val < n_threads {
             sb.wait();
         } else {
             eb.wait();
         }
     };
 
-    let conf = Configuration::new()
+    let conf = ThreadPoolBuilder::new()
         .num_threads(n_threads)
         .start_handler(start_handler)
         .exit_handler(exit_handler)
         .panic_handler(panic_handler);
     {
-        let _ = ThreadPool::new(conf).unwrap();
+        let _ = conf.build().unwrap();
 
         // Wait for all the threads to start, panic in the start handler,
         // and been taken care of by the panic handler.
         start_barrier.wait();
 
         // Drop the pool so it stops the running threads.
     }
 
     // Wait for all the threads to exit, panic in the exit handler,
     // and been taken care of by the panic handler.
     exit_barrier.wait();
 
     // The panic handler must have been called twice on every thread.
     assert_eq!(n_called.load(Ordering::SeqCst), 2 * n_threads);
 }
+
+#[test]
+#[allow(deprecated)]
+fn check_config_build() {
+    let pool = ThreadPoolBuilder::new().num_threads(22).build().unwrap();
+    assert_eq!(pool.current_num_threads(), 22);
+}
+
+
+/// Helper used by check_error_send_sync to ensure ThreadPoolBuildError is Send + Sync
+fn _send_sync<T: Send + Sync>() { }
+
+#[test]
+fn check_error_send_sync() {
+    _send_sync::<ThreadPoolBuildError>();
+}
+
+#[allow(deprecated)]
+#[test]
+fn configuration() {
+    let start_handler = move |_| { };
+    let exit_handler = move |_| {  };
+    let panic_handler = move |_| { };
+    let thread_name = move |i| { format!("thread_name_{}", i) };
+
+    // Ensure we can call all public methods on Configuration
+    Configuration::new()
+        .thread_name(thread_name)
+        .num_threads(5)
+        .panic_handler(panic_handler)
+        .stack_size(4e6 as usize)
+        .breadth_first()
+        .start_handler(start_handler)
+        .exit_handler(exit_handler)
+        .build().unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon-core/src/thread_pool/internal.rs
@@ -0,0 +1,66 @@
+#![cfg(rayon_unstable)]
+
+use internal::task::{ScopeHandle, ToScopeHandle, Task};
+use registry::Registry;
+use std::any::Any;
+use std::fmt;
+use std::sync::Arc;
+use super::ThreadPool;
+
+impl ToScopeHandle<'static> for ThreadPool {
+    type ScopeHandle = ThreadPoolScopeHandle;
+
+    fn to_scope_handle(&self) -> Self::ScopeHandle {
+        unsafe { ThreadPoolScopeHandle::new(self.registry.clone()) }
+    }
+}
+
+pub struct ThreadPoolScopeHandle {
+    registry: Arc<Registry>
+}
+
+impl fmt::Debug for ThreadPoolScopeHandle {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("ThreadPoolScopeHandle")
+            .field("pool", &self.registry.id())
+            .finish()
+    }
+}
+
+impl ThreadPoolScopeHandle {
+    /// Caller asserts that the registry has not yet terminated.
+    unsafe fn new(registry: Arc<Registry>) -> Self {
+        registry.increment_terminate_count();
+        ThreadPoolScopeHandle { registry: registry }
+    }
+}
+
+impl Drop for ThreadPoolScopeHandle {
+    fn drop(&mut self) {
+        self.registry.terminate();
+    }
+}
+
+/// We assert that:
+///
+/// (a) the scope valid remains valid until a completion method
+///     is called. In this case, "remains valid" means that the
+///     registry is not terminated. This is true because we
+///     acquire a "termination count" in `StaticFutureScope::new()`
+///     which is not released until `future_panicked()` or
+///     `future_completed()` is invoked.
+/// (b) the lifetime `'static` will not end until a completion
+///     method is called. This is true because `'static` doesn't
+///     end until the end of the program.
+unsafe impl ScopeHandle<'static> for ThreadPoolScopeHandle {
+    unsafe fn spawn_task<T: Task + 'static>(&self, task: Arc<T>) {
+        self.registry.submit_task(task);
+    }
+
+    fn ok(self) {
+    }
+
+    fn panicked(self, err: Box<Any + Send>) {
+        self.registry.handle_panic(err);
+    }
+}
--- a/third_party/rust/rayon-core/src/thread_pool/mod.rs
+++ b/third_party/rust/rayon-core/src/thread_pool/mod.rs
@@ -1,65 +1,73 @@
+//! Contains support for user-managed thread pools, represented by the
+//! the [`ThreadPool`] type (see that struct for details).
+//!
+//! [`ThreadPool`]: struct.ThreadPool.html
+
+#[allow(deprecated)]
 use Configuration;
-#[cfg(rayon_unstable)]
-use future::{Future, RayonFuture};
-use latch::LockLatch;
-#[allow(unused_imports)]
-use log::Event::*;
-use job::StackJob;
+use {ThreadPoolBuilder, ThreadPoolBuildError};
 use join;
 use {scope, Scope};
 use spawn;
 use std::sync::Arc;
 use std::error::Error;
+use std::fmt;
 use registry::{Registry, WorkerThread};
 
+mod internal;
 mod test;
-/// # ThreadPool
+
+/// Represents a user created [thread-pool].
 ///
-/// The [`ThreadPool`] struct represents a user created [thread-pool]. [`ThreadPool::new()`]
-/// takes a [`Configuration`] struct that you can use to specify the number and/or
-/// names of threads in the pool. You can then execute functions explicitly within
-/// this [`ThreadPool`] using [`ThreadPool::install()`]. By contrast, top level
-/// rayon functions (like `join()`)  will execute implicitly within the current thread-pool.
-/// 
+/// Use a [`ThreadPoolBuilder`] to specify the number and/or names of threads
+/// in the pool. After calling [`ThreadPoolBuilder::build()`], you can then
+/// execute functions explicitly within this [`ThreadPool`] using
+/// [`ThreadPool::install()`]. By contrast, top level rayon functions
+/// (like `join()`) will execute implicitly within the current thread-pool.
+///
 ///
 /// ## Creating a ThreadPool
 ///
 /// ```rust
-///    # use rayon_core as rayon;
-///
-///    let pool = rayon::ThreadPool::new(rayon::Configuration::new().num_threads(8)).unwrap();
+/// # use rayon_core as rayon;
+/// let pool = rayon::ThreadPoolBuilder::new().num_threads(8).build().unwrap();
 /// ```
 ///
-/// [`install()`] executes a closure in one of the `ThreadPool`'s threads. In addition, 
-/// any other rayon operations called inside of `install()` will also execute in the
-/// context of the `ThreadPool`.
+/// [`install()`][`ThreadPool::install()`] executes a closure in one of the `ThreadPool`'s
+/// threads. In addition, any other rayon operations called inside of `install()` will also
+/// execute in the context of the `ThreadPool`.
 ///
 /// When the `ThreadPool` is dropped, that's a signal for the threads it manages to terminate,
 /// they will complete executing any remaining work that you have spawned, and automatically
 /// terminate.
 ///
 ///
 /// [thread-pool]: https://en.wikipedia.org/wiki/Thread_pool
 /// [`ThreadPool`]: struct.ThreadPool.html
 /// [`ThreadPool::new()`]: struct.ThreadPool.html#method.new
-/// [`Configuration`]: struct.Configuration.html
+/// [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
+/// [`ThreadPoolBuilder::build()`]: struct.ThreadPoolBuilder.html#method.build
 /// [`ThreadPool::install()`]: struct.ThreadPool.html#method.install
 pub struct ThreadPool {
     registry: Arc<Registry>,
 }
 
+pub fn build(builder: ThreadPoolBuilder) -> Result<ThreadPool, ThreadPoolBuildError> {
+    let registry = try!(Registry::new(builder));
+    Ok(ThreadPool { registry: registry })
+}
+
 impl ThreadPool {
-    /// Constructs a new thread pool with the given configuration. If
-    /// the configuration is not valid, returns a suitable `Err`
-    /// result.  See `InitError` for more details.
+    #[deprecated(note = "Use `ThreadPoolBuilder::build`")]
+    #[allow(deprecated)]
+    /// Deprecated in favor of `ThreadPoolBuilder::build`.
     pub fn new(configuration: Configuration) -> Result<ThreadPool, Box<Error>> {
-        let registry = try!(Registry::new(configuration));
-        Ok(ThreadPool { registry: registry })
+        build(configuration.into_builder()).map_err(|e| e.into())
     }
 
     /// Returns a handle to the global thread pool. This is the pool
     /// that Rayon will use by default when you perform a `join()` or
     /// `scope()` operation, if no other thread-pool is installed. If
     /// no global thread-pool has yet been started when this function
     /// is called, then the global thread-pool will be created (with
     /// the default configuration). If you wish to configure the
@@ -87,79 +95,76 @@ impl ThreadPool {
     /// thread-local data from the current thread will not be
     /// accessible.
     ///
     /// # Panics
     ///
     /// If `op` should panic, that panic will be propagated.
     ///
     /// ## Using `install()`
-    ///  
+    ///
     /// ```rust
     ///    # use rayon_core as rayon;
     ///    fn main() {
-    ///         let pool = rayon::ThreadPool::new(rayon::Configuration::new().num_threads(8)).unwrap();
-    ///         let n = pool.install(|| fib(20)); 
+    ///         let pool = rayon::ThreadPoolBuilder::new().num_threads(8).build().unwrap();
+    ///         let n = pool.install(|| fib(20));
     ///         println!("{}", n);
     ///    }
     ///
     ///    fn fib(n: usize) -> usize {
     ///         if n == 0 || n == 1 {
     ///             return n;
     ///         }
     ///         let (a, b) = rayon::join(|| fib(n - 1), || fib(n - 2)); // runs inside of `pool`
     ///         return a + b;
     ///     }
     /// ```
     pub fn install<OP, R>(&self, op: OP) -> R
-        where OP: FnOnce() -> R + Send
+        where OP: FnOnce() -> R + Send,
+              R: Send
     {
-        unsafe {
-            let job_a = StackJob::new(op, LockLatch::new());
-            self.registry.inject(&[job_a.as_job_ref()]);
-            job_a.latch.wait();
-            job_a.into_result()
-        }
+        self.registry.in_worker(|_, _| op())
     }
 
     /// Returns the (current) number of threads in the thread pool.
     ///
-    /// ### Future compatibility note
+    /// # Future compatibility note
     ///
     /// Note that unless this thread-pool was created with a
-    /// configuration that specifies the number of threads, then this
-    /// number may vary over time in future versions (see [the
+    /// [`ThreadPoolBuilder`] that specifies the number of threads,
+    /// then this number may vary over time in future versions (see [the
     /// `num_threads()` method for details][snt]).
     ///
-    /// [snt]: struct.Configuration.html#method.num_threads
+    /// [snt]: struct.ThreadPoolBuilder.html#method.num_threads
+    /// [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
     #[inline]
     pub fn current_num_threads(&self) -> usize {
         self.registry.num_threads()
     }
 
     /// If called from a Rayon worker thread in this thread-pool,
     /// returns the index of that thread; if not called from a Rayon
     /// thread, or called from a Rayon thread that belongs to a
     /// different thread-pool, returns `None`.
     ///
     /// The index for a given thread will not change over the thread's
     /// lifetime. However, multiple threads may share the same index if
     /// they are in distinct thread-pools.
     ///
-    /// ### Future compatibility note
+    /// # Future compatibility note
     ///
     /// Currently, every thread-pool (including the global
     /// thread-pool) has a fixed number of threads, but this may
     /// change in future Rayon versions (see [the `num_threads()` method
     /// for details][snt]). In that case, the index for a
     /// thread would not change during its lifetime, but thread
     /// indices may wind up being reused if threads are terminated and
     /// restarted.
     ///
-    /// [snt]: struct.Configuration.html#method.num_threads
+    /// [snt]: struct.ThreadPoolBuilder.html#method.num_threads
     #[inline]
     pub fn current_thread_index(&self) -> Option<usize> {
         unsafe {
             let curr = WorkerThread::current();
             if curr.is_null() {
                 None
             } else if (*curr).registry().id() != self.registry.id() {
                 None
@@ -237,86 +242,56 @@ impl ThreadPool {
     ///
     /// [spawn]: struct.Scope.html#method.spawn
     pub fn spawn<OP>(&self, op: OP)
         where OP: FnOnce() + Send + 'static
     {
         // We assert that `self.registry` has not terminated.
         unsafe { spawn::spawn_in(op, &self.registry) }
     }
-
-    /// Spawns an asynchronous future in the thread pool. `spawn_future()` will inject 
-    /// jobs into the threadpool that are not tied to your current stack frame. This means 
-    /// `ThreadPool`'s `spawn` methods are not scoped. As a result, it cannot access data
-    /// owned by the stack.
-    ///
-    /// `spawn_future()` returns a `RayonFuture<F::Item, F::Error>`, allowing you to chain
-    /// multiple jobs togther.
-    ///
-    /// ## Using `spawn_future()`
-    ///
-    /// ```rust
-    ///    # extern crate rayon_core as rayon;
-    ///    extern crate futures;
-    ///    use futures::{future, Future};
-    ///    # fn main() {
-    ///
-    ///    let pool = rayon::ThreadPool::new(rayon::Configuration::new().num_threads(8)).unwrap();
-    ///
-    ///    let a = pool.spawn_future(future::lazy(move || Ok::<_, ()>(format!("Hello, "))));
-    ///    let b = pool.spawn_future(a.map(|mut data| {
-    ///                                        data.push_str("world");
-    ///                                        data
-    ///                                    }));
-    ///    let result = b.wait().unwrap(); // `Err` is impossible, so use `unwrap()` here
-    ///    println!("{:?}", result); // prints: "Hello, world!"
-    ///    # }
-    /// ```
-    ///
-    /// See also: [the `spawn_future()` function defined on scopes][spawn_future].
-    ///
-    /// [spawn_future]: struct.Scope.html#method.spawn_future
-    #[cfg(rayon_unstable)]
-    pub fn spawn_future<F>(&self, future: F) -> RayonFuture<F::Item, F::Error>
-        where F: Future + Send + 'static
-    {
-        // We assert that `self.registry` has not yet terminated.
-        unsafe { spawn::spawn_future_in(future, self.registry.clone()) }
-    }
 }
 
 impl Drop for ThreadPool {
     fn drop(&mut self) {
         self.registry.terminate();
     }
 }
 
+impl fmt::Debug for ThreadPool {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_struct("ThreadPool")
+            .field("num_threads", &self.current_num_threads())
+            .field("id", &self.registry.id())
+            .finish()
+    }
+}
+
 /// If called from a Rayon worker thread, returns the index of that
 /// thread within its current pool; if not called from a Rayon thread,
 /// returns `None`.
 ///
 /// The index for a given thread will not change over the thread's
 /// lifetime. However, multiple threads may share the same index if
 /// they are in distinct thread-pools.
 ///
 /// See also: [the `ThreadPool::current_thread_index()` method].
 ///
 /// [m]: struct.ThreadPool.html#method.current_thread_index
 ///
-/// ### Future compatibility note
+/// # Future compatibility note
 ///
 /// Currently, every thread-pool (including the global
 /// thread-pool) has a fixed number of threads, but this may
 /// change in future Rayon versions (see [the `num_threads()` method
 /// for details][snt]). In that case, the index for a
 /// thread would not change during its lifetime, but thread
 /// indices may wind up being reused if threads are terminated and
 /// restarted.
 ///
-/// [snt]: struct.Configuration.html#method.num_threads
+/// [snt]: struct.ThreadPoolBuilder.html#method.num_threads
 #[inline]
 pub fn current_thread_index() -> Option<usize> {
     unsafe {
         let curr = WorkerThread::current();
         if curr.is_null() {
             None
         } else {
             Some((*curr).index())
--- a/third_party/rust/rayon-core/src/thread_pool/test.rs
+++ b/third_party/rust/rayon-core/src/thread_pool/test.rs
@@ -1,34 +1,36 @@
 #![cfg(test)]
 
 use std::sync::Arc;
 use std::sync::atomic::{AtomicUsize, Ordering};
 
+#[allow(deprecated)]
 use Configuration;
+use ThreadPoolBuilder;
 use join;
-use super::ThreadPool;
+use thread_pool::ThreadPool;
 use unwind;
 
 #[test]
 #[should_panic(expected = "Hello, world!")]
 fn panic_propagate() {
-    let thread_pool = ThreadPool::new(Configuration::new()).unwrap();
+    let thread_pool = ThreadPoolBuilder::new().build().unwrap();
     thread_pool.install(|| {
                             panic!("Hello, world!");
                         });
 }
 
 #[test]
 fn workers_stop() {
     let registry;
 
     {
         // once we exit this block, thread-pool will be dropped
-        let thread_pool = ThreadPool::new(Configuration::new().num_threads(22)).unwrap();
+        let thread_pool = ThreadPoolBuilder::new().num_threads(22).build().unwrap();
         registry = thread_pool.install(|| {
                                            // do some work on these threads
                                            join_a_lot(22);
 
                                            thread_pool.registry.clone()
                                        });
         assert_eq!(registry.num_threads(), 22);
     }
@@ -47,17 +49,17 @@ fn join_a_lot(n: usize) {
 #[test]
 fn sleeper_stop() {
     use std::{thread, time};
 
     let registry;
 
     {
         // once we exit this block, thread-pool will be dropped
-        let thread_pool = ThreadPool::new(Configuration::new().num_threads(22)).unwrap();
+        let thread_pool = ThreadPoolBuilder::new().num_threads(22).build().unwrap();
         registry = thread_pool.registry.clone();
 
         // Give time for at least some of the thread pool to fall asleep.
         thread::sleep(time::Duration::from_secs(1));
     }
 
     // once thread-pool is dropped, registry should terminate, which
     // should lead to worker threads stopping
@@ -93,47 +95,108 @@ fn failed_thread_stack() {
     // Note: we first tried to force failure with a `usize::MAX` stack, but
     // macOS and Windows weren't fazed, or at least didn't fail the way we want.
     // They work with `isize::MAX`, but 32-bit platforms may feasibly allocate a
     // 2GB stack, so it might not fail until the second thread.
     let stack_size = ::std::isize::MAX as usize;
 
     let (start_count, start_handler) = count_handler();
     let (exit_count, exit_handler) = count_handler();
-    let config = Configuration::new()
+    let builder = ThreadPoolBuilder::new()
         .num_threads(10)
         .stack_size(stack_size)
         .start_handler(move |i| start_handler(i))
         .exit_handler(move |i| exit_handler(i));
 
-    let pool = ThreadPool::new(config);
+    let pool = builder.build();
     assert!(pool.is_err(), "thread stack should have failed!");
 
     // With such a huge stack, 64-bit will probably fail on the first thread;
     // 32-bit might manage the first 2GB, but certainly fail the second.
     let start_count = wait_for_counter(start_count);
     assert!(start_count <= 1);
     assert_eq!(start_count, wait_for_counter(exit_count));
 }
 
 #[test]
 fn panic_thread_name() {
     let (start_count, start_handler) = count_handler();
     let (exit_count, exit_handler) = count_handler();
-    let config = Configuration::new()
+    let builder = ThreadPoolBuilder::new()
         .num_threads(10)
         .start_handler(move |i| start_handler(i))
         .exit_handler(move |i| exit_handler(i))
         .thread_name(|i| {
                          if i >= 5 {
                              panic!();
                          }
                          format!("panic_thread_name#{}", i)
                      });
 
-    let pool = unwind::halt_unwinding(|| ThreadPool::new(config));
+    let pool = unwind::halt_unwinding(|| builder.build());
     assert!(pool.is_err(), "thread-name panic should propagate!");
 
     // Assuming they're created in order, threads 0 through 4 should have
     // been started already, and then terminated by the panic.
     assert_eq!(5, wait_for_counter(start_count));
     assert_eq!(5, wait_for_counter(exit_count));
 }
+
+#[test]
+fn self_install() {
+    let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+
+    // If the inner `install` blocks, then nothing will actually run it!
+    assert!(pool.install(|| pool.install(|| true)));
+}
+
+#[test]
+fn mutual_install() {
+    let pool1 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+    let pool2 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+
+    let ok = pool1.install(|| {
+        // This creates a dependency from `pool1` -> `pool2`
+        pool2.install(|| {
+            // This creates a dependency from `pool2` -> `pool1`
+            pool1.install(|| {
+               // If they blocked on inter-pool installs, there would be no
+               // threads left to run this!
+               true
+            })
+        })
+    });
+    assert!(ok);
+}
+
+#[test]
+fn mutual_install_sleepy() {
+    use std::{thread, time};
+
+    let pool1 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+    let pool2 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+
+    let ok = pool1.install(|| {
+        // This creates a dependency from `pool1` -> `pool2`
+        pool2.install(|| {
+            // Give `pool1` time to fall asleep.
+            thread::sleep(time::Duration::from_secs(1));
+
+            // This creates a dependency from `pool2` -> `pool1`
+            pool1.install(|| {
+               // Give `pool2` time to fall asleep.
+               thread::sleep(time::Duration::from_secs(1));
+
+               // If they blocked on inter-pool installs, there would be no
+               // threads left to run this!
+               true
+            })
+        })
+    });
+    assert!(ok);
+}
+
+#[test]
+#[allow(deprecated)]
+fn check_thread_pool_new() {
+    let pool = ThreadPool::new(Configuration::new().num_threads(22)).unwrap();
+    assert_eq!(pool.current_num_threads(), 22);
+}
--- a/third_party/rust/rayon-core/src/unwind.rs
+++ b/third_party/rust/rayon-core/src/unwind.rs
@@ -1,13 +1,12 @@
 //! Package up unwind recovery. Note that if you are in some sensitive
 //! place, you can use the `AbortIfPanic` helper to protect against
 //! accidental panics in the rayon code itself.
 
-use libc;
 use std::any::Any;
 use std::panic::{self, AssertUnwindSafe};
 use std::io::stderr;
 use std::io::prelude::*;
 use std::thread;
 
 /// Executes `f` and captures any panic, translating that panic into a
 /// `Err` result. The assumption is that any panic will be propagated
@@ -20,16 +19,27 @@ pub fn halt_unwinding<F, R>(func: F) -> 
 }
 
 pub fn resume_unwinding(payload: Box<Any + Send>) -> ! {
     panic::resume_unwind(payload)
 }
 
 pub struct AbortIfPanic;
 
+fn aborting() {
+    let _ = writeln!(&mut stderr(), "Rayon: detected unexpected panic; aborting");
+}
+
 impl Drop for AbortIfPanic {
+    #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
     fn drop(&mut self) {
+        aborting();
+        ::std::process::abort(); // stable in rust 1.17
+    }
+
+    #[cfg(not(all(target_arch = "wasm32", not(target_os = "emscripten"))))]
+    fn drop(&mut self) {
+        aborting();
         unsafe {
-            let _ = writeln!(&mut stderr(), "Rayon: detected unexpected panic; aborting");
-            libc::abort();
+            ::libc::abort(); // used for compat before 1.17
         }
     }
 }
--- a/third_party/rust/rayon/.cargo-checksum.json
+++ b/third_party/rust/rayon/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{".travis.yml":"f50507960a9ceb5659f17d3ca114880f6ea5a91981a1a7585c1ca0b2c32eafef","Cargo.toml":"a80d28f1e820a246d8c6ecd78fe19b5438716f048ed8ef1d4212b265c0e6f04d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"40bcc4505c71815463bb4fef4ca2158091cdc97ac51dee49ab5559b62401b493","RELEASES.md":"41fcefb8206d971bb953b552b15c232bb290145e56ab2ecee8ce9f4e0c685c98","appveyor.yml":"7e9559e0e28af2d5da74b1d8598dffc0f42817b0e7f0fefda2d67dce1e6d5bc6","ci/highlander.sh":"68b1e5c3be188a4c503d0e6b12e8409459947f560dcf92ec0658b27e136d44f2","examples/README.md":"537e6fe9cf696fd4ada9c08bf31055ed6e366ed65668a523e7c5fde77f97c8d3","examples/cpu_monitor.rs":"8e65cbc35762eaf3c108071d00b260293527c61f5e3089248366d71440f362ca","scripts/analyze.sh":"35214a036d8d0ead5400562cd72b131376849f65d63d04122c21886a57049934","src/collections/binary_heap.rs":"c9b77ba76e03f73ce192bdd9ff10e5bf1becbac51b229d2edcf215b360020c7a","src/collections/btree_map.rs":"e04e373c37266a65d880f8f7449aec4ae380de77e6e7705344e90ab45b81e336","src/collections/btree_set.rs":"1f2e75b0c1bd8fbf4405a57fb735fe071acb68b988fd58f1f3cd06e678ae4da4","src/collections/hash_map.rs":"c53e4157e07a21d49e011ab1fb3d80d54790059a81827ec8b6ead063438c4ecc","src/collections/hash_set.rs":"2341e7193a156030cc59f6b88984056ed31cef8bdaf36e76f2bebe7f29f1e954","src/collections/linked_list.rs":"675ee631db311a360424125e8b0d2fd6cf156b836d9874a7a06149fac82b51d5","src/collections/mod.rs":"24bbbe85c6e9c65ce7a3906595a68048bfe4c94c790563e1526920104dfb1906","src/collections/vec_deque.rs":"b152b6119ac543adfef92f852e3795322887cb5b252412991c685891f3152b20","src/delegate.rs":"df71e8a5d4a41717f713baa20b0a70a31325f82af217734aaaa36d8682dbd26d","src/iter/README.md":"e843627769d4f284dc927b86ae3d874894689607fa3d096b0a4f5f084f933d40","src/iter/chain.rs":"27c86e8764d93e5e27722be1f04149ffb2f2eeb77fd9e82d082547314e416813","src/iter/cloned.rs":"8ed86733ecab3452d301123e559c2daa3623c9883e8aef31753d06ad21b4e7b2","src/iter/collect/consumer.rs":"c8f03c471eb3eea789997d5e722bf35f7228a4bd2d2b3c16ca01ce647ebbaba1","src/iter/collect/mod.rs":"e96064e3c70473871611a285c2fab59eaede7bc5efa6c073dbf6f398f1527c2a","src/iter/collect/test.rs":"3305b01a4f7526b89bda7c15578678849b512706baa9ef78fdd3ada7cefc2212","src/iter/enumerate.rs":"8dc63307bb005726efc95557879d67df3623352c1c92e59e0b7c83add9b2bcd3","src/iter/extend.rs":"a7d9501fc44e99e0ee903c1efe5454222eb0a102836f9158e709e15d290cd7d6","src/iter/filter.rs":"85dc8579a63d80e63a12ad3ab9259bc8222e801642b62d37a20da2b29f5b9c03","src/iter/filter_map.rs":"79a13c4483aac78367d0a818f7c8176bab03722c1f9b4db93ee59810f4172890","src/iter/find.rs":"f52f482947bde2717e9eb8163a8993fbaf19ddd561c3b320f878953e3ff18bbe","src/iter/find_first_last/mod.rs":"0524686a96b8aeb02ac37cdbf32f9ab57a943f628ee0399c3bd7fb6b8e1a0294","src/iter/find_first_last/test.rs":"f71d35df36c553b239a07c75b0c961f001bcafb5df774c165e18ba06b2db0350","src/iter/flat_map.rs":"962f969dc6266009d70ad9ee47b210addb5a988f1f20ec33de342ea2427d40d7","src/iter/fold.rs":"c4c7f1e1bb7684cde32146d7c11e6443aabf63d692d160fc4212b8d7e421e560","src/iter/for_each.rs":"fce9dbd6abc34915616e1663d2fb67f75d3071cdef88e57e40fac91ad686310e","src/iter/from_par_iter.rs":"9124e211101336f5ecdf079b8e63757d113506c7cac41c4a2963f0d5062c4dcd","src/iter/inspect.rs":"9cc5e779470d8ffb76643e377920f1c07dabcb1152e4639eb866671ebba817b3","src/iter/internal.rs":"acd673495e30ad13d50413693bb9d53857f9176fc362e5962efbdaa6e883d618","src/iter/len.rs":"d804ecd7bb6f7d7a2e103896c705f2c0129db9128ff197af7a010a1c5e87bd7d","src/iter/map.rs":"181a8ce6a74421c363583c2c4e057379728cef99980a777a77eff519827aae2a","src/iter/map_with.rs":"589ffb47024d5272c97bbfdfa373e02586bac024cdea75cb9d9bf4acf6177057","src/iter/mod.rs":"c24bcae57b74e044576ce251c797130351931b30bda09da14bb6908fd8bd89bf","src/iter/noop.rs":"8dd7608547083820eae7b3863d06e97112db9d8f0f7c62c6ee854402f90c3f5d","src/iter/product.rs":"5c19bc2df086451352aa3aa2d0a005b7bca47899b8131d4e8551237a0bdb9f84","src/iter/reduce.rs":"185fabd1cc987778bda7c4ecf7217ba5e5e47d762313e0064529f9a9246ff930","src/iter/rev.rs":"ce80f0fdb185c72b6f4dff7fc13113246f12be8c4b0bdcf89c4eefe4578527e0","src/iter/skip.rs":"bd2ae4a57b59609c786b8a6457c8003d56b5ecd952593b3ef1e6568903484ccb","src/iter/splitter.rs":"5a728b13662c46b4a279c001130629728c00c77127df064a7e775a7d684b1c2a","src/iter/sum.rs":"5448a773625aaafd7c11e8d503460b79c2c4e9ff3b7542ad723702f01f9d3800","src/iter/take.rs":"0f9dcf1bac14ca582546ce230077b37c9ed71992c5b8250c96f01100dc5c42cd","src/iter/test.rs":"5640f015e5d43de506f4b04af91597731a699457637ee806d1d2b534fa7cbabf","src/iter/unzip.rs":"1ac7382f52d1201a1aed0827d057560462dd99406075b52ae13b50ba3099c696","src/iter/while_some.rs":"0b2f96838604b616aaffa00da9cfdb83d00324204c644b87b2beb2e1e1733132","src/iter/zip.rs":"ae3546beece5c3c67110a8c0bd620522cb346c7b07cc28eb3f55200d3144ea35","src/lib.rs":"eb32d5fdde656bfcb8c5d526c9083f3d318c45dd569858b89966bad240116740","src/option.rs":"40b696ae2be883f046cb24ecb52e48a27efbf9225d4bead9d74fbeecc280db26","src/prelude.rs":"270985c11ce2fb1699c17bb52e116d0d32f8924c2aa4188450d9462d736a4d29","src/private.rs":"951f15fc1e9446cc47a7a932cdd9f858aa5df7da7fa070d196cd05de190e6952","src/range.rs":"fa3cacae156a3fa274e2b09c04a6965cda574b9838a2cc7916b74d705b94bd44","src/result.rs":"5223be4a32c720a0d21bce341ce9207e11e7f838ec1146b2a37e9fabc287db45","src/slice/mergesort.rs":"4d0e12c08a5ac01a60cb2234f979034c92ca8f9192f5f67aa33a5e1877e608db","src/slice/mod.rs":"5870189dc9ca8f51c93194539c066041b041a254b8273227794f97ca1051ba0f","src/slice/quicksort.rs":"b930d388f79cceff521c7c04c3e8b9714cb57b5d4e68e470fe63259a550e2007","src/slice/test.rs":"512424e5884533f425d8ce62b282c57062e8373d9a6ee16699cd45217b7efab6","src/split_producer.rs":"424982cf31708c7121e950fd6ed69768bd132ef67597692d550e2e361e53b5a6","src/str.rs":"c26576143452fce791915c7e8faaab102ab816e9e42327fb1496ca6e07cb1f4c","src/test.rs":"ab51bf6a72f1eae6e214643be5de58959060ee54727efb36d7485aaf7c9a4093","src/vec.rs":"00ff40cf41ac4603bf545353a2d0302330952731351247869dfc2318cbb941e3","tests/compile-fail-unstable/README.md":"53e7fb9aa143094c4ad8a4f3f954b125559d3f09e40d3cb4ab43dd06fc22e35b","tests/compile-fail-unstable/future_escape.rs":"f876eceb4c7ff26fd538c1e5d4cae6c424516563face45842cb2d0fa3bff5131","tests/compile-fail/README.md":"3a7477331161672cf83c67a2f38aeb989cb35a9bcb00c3ddf0cc123315f5cd00","tests/compile-fail/cannot_collect_filtermap_data.rs":"730a597fc86b79edf0921999f343d376c80f369e65f9109760be8b81037d4033","tests/compile-fail/cannot_zip_filtered_data.rs":"9271f21d2d1146e9e588936a15b7c54507b050039c04d902f09516ed1bcf2728","tests/compile-fail/cell_par_iter.rs":"3a20e18d488b0769f7b7679387cfe05f383b657dd07d155f3d4391676e36e857","tests/compile-fail/must_use.rs":"a139d6e6e3fbba78993d723a83904a864565bbf86aea8492043865d2a7ab4dc6","tests/compile-fail/no_send_par_iter.rs":"ce3346fb657f0684e64ff5f4870ab3ef4a468dd47bfdc7a117633e720299f300","tests/compile-fail/quicksort_race1.rs":"983cb334de39ef9fc6d3bdf40497d6cba9db50d6ea7932b0fbd628e8ba6f82df","tests/compile-fail/quicksort_race2.rs":"7e9d4477e6b34d075405e86fbb617c5e60ccf1729f81ef04907282106257fc64","tests/compile-fail/quicksort_race3.rs":"8ae1b5285acb75097358d8d424bf2c08a6044937edb57aa647ca521f30240d16","tests/compile-fail/rc_par_iter.rs":"2518f55a035db28c446faedfc07e2cede9d18f6bb5d53a69e0a533538b1b22c3","tests/compile-fail/rc_return.rs":"c9b1cf6d94f3eff00674ee9820b34b2ae3fa5f29bdf1f389edfe04bd82930c76","tests/compile-fail/rc_upvar.rs":"200c4583e17eb09547cabcf1b2f3ab02d2176f58e11f6ae9fff864ff9a6c9655","tests/compile-fail/scope_join_bad.rs":"2ad7d09f2273860a0e7c6d9b65356141339b96b189b7c8403aeccdcb1c0c9060","tests/run-fail-unstable/README.md":"448cd23346a2a71d581c5afbb61daa0349892ec5ad78d18730190127d2d11487","tests/run-fail/README.md":"2b9a7abb977671af7123478f9c4d2f596cd446869a7adaaf306656a2767bb80a","tests/run-fail/iter_panic.rs":"a897798038b89125d13883a7040341c0666bbde9f71a2bebed96ead8839bfaa3","tests/run-fail/simple_panic.rs":"b9d1cd0bedb6b22e4fd3df5ae948e804c1119d5e7a98e9eb4fdcf6281fd504ed","tests/run-pass-unstable/README.md":"2f996550ba5946cf6721b0ee6420f77555fc9a796ce0543fab7931b7a5e4ef5b","tests/run-pass/README.md":"324816facdb78da40a1539cdae8c4bc9d4d027451c167b5f087abc3239b199bf","tests/run-pass/double_init_fail.rs":"841735a15b819bf3ea5e50e758e0825575b00652897cb0fecf25d723a537f1b9","tests/run-pass/init_zero_threads.rs":"42ff55d2396a9feb0be67aaad18252a0a92f778b9f5e9564f35634488228e37c","tests/run-pass/named-threads.rs":"511a75feed739de04033efe4bb2986fd1549bd03a086db8e12ec3b9751d27771","tests/run-pass/scope_join.rs":"65894d7bfde3d2ad7f18a54dbc786bcd756e1d00ce4d7cef503b3c74938e06d7","tests/run-pass/sort-panic-safe.rs":"14319461ae4c21800bca5befb2d7147f315a86aa2235feeada9029cc25f46ffd","tests/run-pass/stack_overflow_crash.rs":"a76489f74f2b9a308f3e56317e87d6566708b0b8c635ffec9a46305da4d48f77"},"package":"b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8"}
\ No newline at end of file
+{"files":{".travis.yml":"a30a6d288c6f4784c5051648fd2ef1d1210de8a05f6528c9536e5eaad6d43e8f","Cargo.toml":"010683202219220dc69e9e9554e50541087f76c6f93be78422b72b50efce0a51","FAQ.md":"db561f22a7919dfe3782948338b549d0158e28642aa901c1e41eca89cd563d15","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"7dd9281e94b77dd5e9955f4c3e50d4a944fb4d9e059c41daf61add197aee1218","RELEASES.md":"c2f0e8dad0478e84297abf885cbf6769d4e10ba1efcbc6f80b36c7529c51dcbd","appveyor.yml":"e89cce4a8cb578879335d18df9d99c95176bde4a3630aeebf92166de32aa5b52","bors.toml":"4fc007a5e68851d22f266560b3c5fdd80e4227a38f159e46928029cc7509a4ec","ci/highlander.sh":"68b1e5c3be188a4c503d0e6b12e8409459947f560dcf92ec0658b27e136d44f2","examples/README.md":"537e6fe9cf696fd4ada9c08bf31055ed6e366ed65668a523e7c5fde77f97c8d3","examples/cpu_monitor.rs":"43b27ffabf7ca0071a1ff09ba92ad9e70a038416cace05a26ad04415849269a4","scripts/analyze.sh":"35214a036d8d0ead5400562cd72b131376849f65d63d04122c21886a57049934","src/collections/binary_heap.rs":"2464198f67cd479d099d2819d68d4a1afb9a57189126a9cca664a5f58c876453","src/collections/btree_map.rs":"3879515e2eae95b61314eb5a782a27a2156927911d686126c3d9b58a676fbcaf","src/collections/btree_set.rs":"cea05b6d23e55e6fc48e6aa07d4bf85aad37da80c829573160a669ce2e4dd35f","src/collections/hash_map.rs":"968caef3d4de5a325172f3e1d9af9916785f6f3ef2e6132351dde0c80ef0353a","src/collections/hash_set.rs":"4e4c10a503d4d99b26c860a9f029d3b9e8e8d7e9a340390fa040c6c06b63d546","src/collections/linked_list.rs":"2cd5cf0ed16d855e8f5c5fba8019e1657bea3c1b45493b1591d5284e7ed0acde","src/collections/mod.rs":"037660cbdd5907808f7df59fa69993582bf978ecaf125a83a1205452bb505a53","src/collections/vec_deque.rs":"73749f3f5fd11149325bcf50a4b6b1b80c1d0af7780c9e31ad0a1c4fc27b2f46","src/delegate.rs":"fa05c22227346baf147da03195e96dc0f705aaf7b2295f02f9f996c22295701e","src/iter/chain.rs":"ae3c0bcaf8da188484191c195f223de52da0948ed5d658f3fd584b54e5d379b8","src/iter/chunks.rs":"4d22053b4039618bb4d26fbbcb375e8ef623eb2ce85069ee119b13a654975671","src/iter/cloned.rs":"d74fbc5862e7d32f5ceebbaf56c0cc4a5b96d06e4b8f25d0a957e4e520f69dfd","src/iter/collect/consumer.rs":"0c274d39c84c1df6b95f886cd1b39f9fe0b0d643fece38e06c43b35b73668dd3","src/iter/collect/mod.rs":"fbb53517dd1c837b496bf6544cb646fc64b3f5f1b8cfdaa36c0f38416eeb56b9","src/iter/collect/test.rs":"8f7ea785c1d247c99a2e50d478bf8345496541538ac211f9a34267c9917eb3f0","src/iter/empty.rs":"7d999a21fb6b1b72490bd4fcd89229b6acef58892ecc4bdc3ce99a19a8c448ec","src/iter/enumerate.rs":"df96057813d3ca3ec1c06d07d18643aefedaf6351aece38395c9331ec2a6212d","src/iter/extend.rs":"91193164f55a626217f7783b2d7b997bd635e9ba3a0ef70197167764ddd9635e","src/iter/filter.rs":"78c78d94958afd9252fe998056f999d1c4656d24c50ca71821b50912cc6e0dc9","src/iter/filter_map.rs":"15377b61ab201d8ebcf64704850177d548e8a36fefff04306883e65e66bd1a12","src/iter/find.rs":"020e4152477da6ff5f8e9ed330dce6c6ac78a1fd9829a47e4cd3805d77ea464d","src/iter/find_first_last/mod.rs":"a57fff2fa97e51ab6eaa03bb3eabc357c9560f695cf8ca5198d2d0629957e7e8","src/iter/find_first_last/test.rs":"6ec625b7122f156b324499525abe832b9273a08b6e906965ffb1f43e57f93199","src/iter/flat_map.rs":"4501e4463eb1ce41ae9cf6e3f54a8efe9e7ea150f0a8e9221087f58903868afe","src/iter/flatten.rs":"0691e6c44becda646fe1d4896f7c77444e253e8ff2c0496251d23eee146efd6a","src/iter/fold.rs":"db7acdae1375ef2ae666737681dbdbf6563719f0fcffa54d62ec8d3a8db91f98","src/iter/for_each.rs":"e1dc5bd14b08288377000fb731ee088b4d1616585400a3fdb9c565b1758f9fbc","src/iter/from_par_iter.rs":"038f5c7f7280636bd23fafd275594ae3ba912e534841a3d125619084f9c48775","src/iter/inspect.rs":"bb81f58f3cce56ad1d25928217c290b01d81204cb9c04681c93175b9782a3c2c","src/iter/interleave.rs":"470fcbeb543ba0f4002c1ddbf8b2afe7ef12d01d9d9888ed9a8c17d056a0b1a4","src/iter/interleave_shortest.rs":"f4ecde4ae3957b870eb34c4fc299656532650e6397c2e0814c283232dc428acf","src/iter/intersperse.rs":"11c00e7b99e43d9256ed1371ef22345c5ff4b4373dedbfa3d749450e45a4b3ee","src/iter/len.rs":"a8eec9c999221d0562256655ad24e1b194b0728c04cc45e327919c7930451972","src/iter/map.rs":"f5bf66318dec1c596b276f705111bdd4cfeac316e6f4f0a38ef1ec967a0455d7","src/iter/map_with.rs":"94408ad228fa4cba3096cb12cce9dcfeb4b952b4dedab2cc85b2fee495394077","src/iter/mod.rs":"6246e1a6d556fd1e25c5547a32b60309dfff04787255a8e615ca734190170339","src/iter/noop.rs":"4c0f09ce842e136701ef76931eab383aa45b4501d7c9ee10444e7ba6ae50a289","src/iter/once.rs":"028b1b8fb983b7600aa0fd3883c4525653e9717c3d2e8630d4294e490afeabf7","src/iter/plumbing/README.md":"4da8272f6eafe7d760f85388e980d22ec26834d518fb069a45183fd44b70a6fb","src/iter/plumbing/mod.rs":"393597b72fc71156f769c0b0dcfb52fe9a58414e6f34f580e420f1ff4a33bbbe","src/iter/product.rs":"bdeb06e6bef8c517f12e75e829727274ebd120c40a0e63ac465f1f527522a7ad","src/iter/reduce.rs":"b7aaffef92e5f46f6a5fcd7d49cc818a6c21e273a2824e095511c024385de96d","src/iter/repeat.rs":"5f83f9c3efd1accbfdb26c24b9fbda6dbcccc350942f95468b029094d32f8fc5","src/iter/rev.rs":"5e897824b6ecc0d1c555fa13c51353bbe73f6b7fd3b57006a2aa5207613bce5d","src/iter/skip.rs":"7ec38e6252e7ccc3de637aa938e9fd006fb070c17079cc15c87825662ebbd8a1","src/iter/splitter.rs":"a1bdd5c45847d54aeed6cb938f99e091ef0005a15c7693455ee06c3ddbb4f10d","src/iter/sum.rs":"e0a0046d6b9a8b257f9c8419d0f48a208da27bfb954080c9f07b9633eb264e9b","src/iter/take.rs":"df114745f5904e1861678f0513f4c8f6b1c4177e61dad779f5911deb272e27d6","src/iter/test.rs":"db281c430651fc7a68f7baa36ae5944206f40eefe5809281773390d00bea78ef","src/iter/unzip.rs":"afe306cfe505a28b27605a500280807ff8650cdf69a73f8a1fb18fdf5f4da410","src/iter/update.rs":"ae2a254fbce9e464faf862b5038db40953f59ed37a0e1dd5e03843f7425882bc","src/iter/while_some.rs":"4e0bdd0d6ac55ec0ef38ff9f485ca6d5a9fbcff50d74b894b93bdcaab2577354","src/iter/zip.rs":"e574427dc637e17613dc0ff1300155d81395146acf6aa09aa01c5690b1571733","src/iter/zip_eq.rs":"a1e9c471ee0b9801ee5d17149ff0304f837dd5ba6008d43752542607413ba9be","src/lib.rs":"934e2c23789ddbe9e8d4ee3f883eb5a0ac4a7a0df9ad909415bb031a615466e7","src/math.rs":"7a2c88133052d770ba61228e3096752c9fbd5f651a56c5130c583da4d5cd8bc1","src/option.rs":"a1dae559adfb27e0261d73892b683866fa90f156cb67392cc74fd0fdbd6c5558","src/par_either.rs":"cc636fcce8a467f351116281694005a7a66d5e8231fca744daa37f40d545a191","src/prelude.rs":"270985c11ce2fb1699c17bb52e116d0d32f8924c2aa4188450d9462d736a4d29","src/private.rs":"b201603c357d51756107fad58bfe97a22a057c85e018fcedbe3831ee38e850c4","src/range.rs":"1726cc1e0d192781ce13b33c959a980df2bc6474523ae1a80010bc7fc07b288a","src/result.rs":"6807c034a859d8c3d760effafd486ae2c006840e966e7439aea84e78aecb812b","src/slice/mergesort.rs":"4d0e12c08a5ac01a60cb2234f979034c92ca8f9192f5f67aa33a5e1877e608db","src/slice/mod.rs":"5af0e945446cf1101998e4674d02f6f515eb55cb54f4db098de9b9fe8a3789b8","src/slice/quicksort.rs":"b930d388f79cceff521c7c04c3e8b9714cb57b5d4e68e470fe63259a550e2007","src/slice/test.rs":"f744a92243e249fbf1750a227a671524d47054ed5468d4c3e00a7c17f1621e5b","src/split_producer.rs":"121a438bcf986fb36bf15bb31881d849a9905901b62a5c588f14d6fd8fc9e5f2","src/str.rs":"d55ab34ba4880a74fc0ac8e89a43646fd24cbc9ac829d528e1ed0c4262ca462a","src/test.rs":"397e328e5c5572c40e28c747ec94776b36ae9c79683ea242eeb8ed4e20f287f1","src/vec.rs":"bbe976bcb3cb4119ef746769d531e8c687bc8ef1ce56b22e4a17a8b48e8ce6c0","tests/clones.rs":"b56263b6c571720476c556a3c49e0b5ad762b88832ebbfa41c47319c14c9731a","tests/compile-fail-unstable/README.md":"53e7fb9aa143094c4ad8a4f3f954b125559d3f09e40d3cb4ab43dd06fc22e35b","tests/compile-fail/README.md":"3a7477331161672cf83c67a2f38aeb989cb35a9bcb00c3ddf0cc123315f5cd00","tests/compile-fail/cannot_collect_filtermap_data.rs":"a7e539ed49e7d70ee38bfe93a78e04d64e800937b0f71893c2017e900774a266","tests/compile-fail/cannot_zip_filtered_data.rs":"9271f21d2d1146e9e588936a15b7c54507b050039c04d902f09516ed1bcf2728","tests/compile-fail/cell_par_iter.rs":"3a20e18d488b0769f7b7679387cfe05f383b657dd07d155f3d4391676e36e857","tests/compile-fail/must_use.rs":"ceeae6b914444232c03268ce6b74e0e293f40755d91aa9eeaf2e4eac6223c74a","tests/compile-fail/no_send_par_iter.rs":"6002782203a2814eb3b06560a699a819c18b61c429b66047f2fc0b0bc3ac4ac9","tests/compile-fail/quicksort_race1.rs":"22e049b102a705607d10e869598be381883bbd258be4c37637b72a24b403d956","tests/compile-fail/quicksort_race2.rs":"44b8039c8f8189e80c574c46474242f7b073b1fc553b6c4733e6ccf7c277c430","tests/compile-fail/quicksort_race3.rs":"fd8cc9069528952167a4d944dc57f40ccac460c669bebcab6d83b732ef9d6318","tests/compile-fail/rc_par_iter.rs":"0ecd808365ddafb5f2b6e7e1b642ede9b98310172d05b7b7777be57c923fadbf","tests/compile-fail/rc_return.rs":"c9b1cf6d94f3eff00674ee9820b34b2ae3fa5f29bdf1f389edfe04bd82930c76","tests/compile-fail/rc_upvar.rs":"200c4583e17eb09547cabcf1b2f3ab02d2176f58e11f6ae9fff864ff9a6c9655","tests/compile-fail/scope_join_bad.rs":"2ad7d09f2273860a0e7c6d9b65356141339b96b189b7c8403aeccdcb1c0c9060","tests/debug.rs":"62bd4db0bba708ef7bcaa331852c44171c60ed3f5222d914df4fdfa228d2adfe","tests/intersperse.rs":"288a9c781de6d6e1f3b061059ca274d3b17c84fe0460265d8a6cbca9ef994274","tests/producer_split_at.rs":"6a6c446e5c8f49dd760df8a52f44f63ba8e13f4be1e2a3170396143353c314d5","tests/run-fail-unstable/README.md":"448cd23346a2a71d581c5afbb61daa0349892ec5ad78d18730190127d2d11487","tests/run-fail/README.md":"2b9a7abb977671af7123478f9c4d2f596cd446869a7adaaf306656a2767bb80a","tests/run-fail/iter_panic.rs":"a897798038b89125d13883a7040341c0666bbde9f71a2bebed96ead8839bfaa3","tests/run-fail/simple_panic.rs":"b9d1cd0bedb6b22e4fd3df5ae948e804c1119d5e7a98e9eb4fdcf6281fd504ed","tests/run-pass-unstable/README.md":"2f996550ba5946cf6721b0ee6420f77555fc9a796ce0543fab7931b7a5e4ef5b","tests/run-pass/README.md":"324816facdb78da40a1539cdae8c4bc9d4d027451c167b5f087abc3239b199bf","tests/run-pass/double_init_fail.rs":"01a5363bbdc2796cc093e2485ecf01d1556c1e7f4745552afff6dba923a88003","tests/run-pass/init_zero_threads.rs":"8cf15c0fcd9d3fd97f387645578647e6a14acd23cc74048faf46fe9c4c863350","tests/run-pass/named-threads.rs":"c62eb4c2d96dcf0ae5b0b821b0f550f2628654588cc987c2bc1713113a5ee794","tests/run-pass/scope_join.rs":"65894d7bfde3d2ad7f18a54dbc786bcd756e1d00ce4d7cef503b3c74938e06d7","tests/run-pass/stack_overflow_crash.rs":"7914e1270265e0ac0d167e7afc6408be3be70896c9de82c1ce8f8b244c459f24","tests/sort-panic-safe.rs":"9139e0238814c21b8c5b4002c7ecfe6999d0a4bb47b775406126a3edc650e846"},"package":"485541959c8ecc49865526fe6c4de9653dd6e60d829d6edf0be228167b60372d"}
\ No newline at end of file
--- a/third_party/rust/rayon/.travis.yml
+++ b/third_party/rust/rayon/.travis.yml
@@ -1,38 +1,97 @@
 language: rust
-rust:
-  - 1.12.0
-  - stable
-  - nightly
-os:
-  - linux
-  - osx
 
 # Using 16MB stacks for deep test/debug recursion
 env:
   global:
     - RUST_MIN_STACK=16777216
 
 matrix:
+  fast_finish: true
   include:
-  - rust: stable
-    env: RUSTFLAGS='--cfg rayon_unstable'
-    os: linux
-  - rust: stable
-    env: RUSTFLAGS='--cfg rayon_unstable'
-    os: osx
-  - rust: nightly
-    env: RUSTFLAGS='--cfg rayon_unstable'
-    os: linux
-  - rust: nightly
-    env: RUSTFLAGS='--cfg rayon_unstable'
-    os: osx
+    # NB: To help with CI delays, each `pull_request` is only tested on Linux,
+    # with 1.13 for compatibility and nightly+rayon_unstable for broad test
+    # coverage.  The bors bot counts as a `push` type, which will run it all.
+
+    - rust: 1.13.0
+      os: linux
+      #if: everything!
+      before_script:
+        # rand 0.4.2 requires rust 1.15, and rand-0.3.22 requires rand-0.4  :/
+        # manually hacking the lockfile due to the limitations of cargo#2773
+        - cargo generate-lockfile
+        - sed -i -e 's/"rand 0.[34].[0-9]\+/"rand 0.3.20/' Cargo.lock
+        - sed -i -e '/^name = "rand"/,/^$/s/version = "0.3.[0-9]\+"/version = "0.3.20"/' Cargo.lock
+
+    - rust: stable
+      os: linux
+      if: NOT type = pull_request
+    - rust: stable
+      os: linux
+      env: RUSTFLAGS='--cfg rayon_unstable'
+      if: NOT type = pull_request
+
+    - rust: beta
+      os: linux
+      if: NOT type = pull_request
+    - rust: beta
+      os: linux
+      env: RUSTFLAGS='--cfg rayon_unstable'
+      if: NOT type = pull_request
+
+    - rust: nightly
+      os: linux
+      if: NOT type = pull_request
+    - rust: nightly
+      os: linux
+      env: RUSTFLAGS='--cfg rayon_unstable'
+      #if: everything!
+
+    - rust: stable
+      os: osx
+      if: NOT type = pull_request
+    - rust: stable
+      os: osx
+      env: RUSTFLAGS='--cfg rayon_unstable'
+      if: NOT type = pull_request
+
+    - rust: nightly
+      os: osx
+      if: NOT type = pull_request
+    - rust: nightly
+      os: osx
+      env: RUSTFLAGS='--cfg rayon_unstable'
+      if: NOT type = pull_request
+
+    # wasm won't actually work without threading, but it builds
+    - rust: nightly
+      os: linux
+      env: TARGET=wasm32-unknown-unknown
+      script:
+        - rustup target add $TARGET
+        - cargo build --target $TARGET
+      if: NOT type = pull_request
+
 
 script:
   - cargo build
   - |
     if [ $TRAVIS_RUST_VERSION == nightly ]; then
-      cargo test &&
+      cargo test -p rayon &&
       cargo test -p rayon-core &&
       cargo test -p rayon-demo &&
       ./ci/highlander.sh
     fi
+  - |
+    if [ -n "$RUSTFLAGS" ]; then
+      cargo clean &&
+      cargo build -p rayon-futures &&
+      if [ $TRAVIS_RUST_VERSION == nightly ]; then
+        cargo test -p rayon-futures
+      fi
+    fi
+
+branches:
+  only:
+    - master
+    - staging
+    - trying
--- a/third_party/rust/rayon/Cargo.toml
+++ b/third_party/rust/rayon/Cargo.toml
@@ -7,30 +7,39 @@
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "rayon"
-version = "0.8.2"
+version = "1.0.0"
 authors = ["Niko Matsakis <niko@alum.mit.edu>", "Josh Stone <cuviper@gmail.com>"]
 description = "Simple work-stealing parallelism for Rust"
 documentation = "https://docs.rs/rayon/"
+readme = "README.md"
+keywords = ["parallel", "thread", "concurrency", "join", "performance"]
+categories = ["concurrency"]
 license = "Apache-2.0/MIT"
-repository = "https://github.com/nikomatsakis/rayon"
+repository = "https://github.com/rayon-rs/rayon"
+[dependencies.either]
+version = "1.0"
+default-features = false
+
 [dependencies.rayon-core]
-version = "1.2"
-[dev-dependencies.rand]
-version = "0.3"
+version = "1.4"
+[dev-dependencies.docopt]
+version = "0.8"
 
-[dev-dependencies.compiletest_rs]
-version = "0.2.1"
+[dev-dependencies.lazy_static]
+version = "1"
 
-[dev-dependencies.docopt]
-version = "0.7"
+[dev-dependencies.rand]
+version = ">= 0.3, < 0.5"
 
-[dev-dependencies.rustc-serialize]
-version = "0.3"
+[dev-dependencies.serde]
+version = "1"
 
-[dev-dependencies.futures]
-version = "0.1.7"
+[dev-dependencies.serde_derive]
+version = "1"
+[target."cfg(not(all(windows, target_env = \"gnu\")))".dev-dependencies.compiletest_rs]
+version = "0.3"
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/FAQ.md
@@ -0,0 +1,227 @@
+# Rayon FAQ
+
+This file is for general questions that don't fit into the README or
+crate docs.
+
+## How many threads will Rayon spawn?
+
+By default, Rayon uses the same number of threads as the number of
+CPUs available. Note that on systems with hyperthreading enabled this
+equals the number of logical cores and not the physical ones.
+
+If you want to alter the number of threads spawned, you can set the
+environmental variable `RAYON_NUM_THREADS` to the desired number of
+threads or use the
+[`ThreadPoolBuilder::build_global` function](https://docs.rs/rayon/*/rayon/struct.ThreadPoolBuilder.html#method.build_global)
+method.
+
+## How does Rayon balance work between threads?
+
+Behind the scenes, Rayon uses a technique called **work stealing** to
+try and dynamically ascertain how much parallelism is available and
+exploit it. The idea is very simple: we always have a pool of worker
+threads available, waiting for some work to do. When you call `join`
+the first time, we shift over into that pool of threads. But if you
+call `join(a, b)` from a worker thread W, then W will place `b` into
+its work queue, advertising that this is work that other worker
+threads might help out with. W will then start executing `a`.
+
+While W is busy with `a`, other threads might come along and take `b`
+from its queue. That is called *stealing* `b`. Once `a` is done, W
+checks whether `b` was stolen by another thread and, if not, executes
+`b` itself. If W runs out of jobs in its own queue, it will look
+through the other threads' queues and try to steal work from them.
+
+This technique is not new. It was first introduced by the
+[Cilk project][cilk], done at MIT in the late nineties. The name Rayon
+is an homage to that work.
+
+[cilk]: http://supertech.csail.mit.edu/cilk/
+
+## What should I do if I use `Rc`, `Cell`, `RefCell` or other non-Send-and-Sync types?
+
+There a number of non-threadsafe types in the Rust standard library,
+and if your code is using them, you will not be able to combine it
+with Rayon. Similarly, even if you don't have such types, but you try
+to have multiple closures mutating the same state, you will get
+compilation errors; for example, this function won't work, because
+both closures access `slice`:
+
+```rust
+/// Increment all values in slice.
+fn increment_all(slice: &mut [i32]) {
+    rayon::join(|| process(slice), || process(slice));
+}
+```
+
+The correct way to resolve such errors will depend on the case.  Some
+cases are easy: for example, uses of [`Rc`] can typically be replaced
+with [`Arc`], which is basically equivalent, but thread-safe.
+
+Code that uses `Cell` or `RefCell`, however, can be somewhat more complicated.
+If you can refactor your code to avoid those types, that is often the best way
+forward, but otherwise, you can try to replace those types with their threadsafe
+equivalents:
+
+- `Cell` -- replacement: `AtomicUsize`, `AtomicBool`, etc
+- `RefCell` -- replacement: `RwLock`, or perhaps `Mutex`
+
+However, you have to be wary! The parallel versions of these types
+have different atomicity guarantees. For example, with a `Cell`, you
+can increment a counter like so:
+
+```rust
+let value = counter.get();
+counter.set(value + 1);
+```
+
+But when you use the equivalent `AtomicUsize` methods, you are
+actually introducing a potential race condition (not a data race,
+technically, but it can be an awfully fine distinction):
+
+```rust
+let value = tscounter.load(Ordering::SeqCst);
+tscounter.store(value + 1, Ordering::SeqCst);
+```
+
+You can already see that the `AtomicUsize` API is a bit more complex,
+as it requires you to specify an
+[ordering](http://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html). (I
+won't go into the details on ordering here, but suffice to say that if
+you don't know what an ordering is, and probably even if you do, you
+should use `Ordering::SeqCst`.) The danger in this parallel version of
+the counter is that other threads might be running at the same time
+and they could cause our counter to get out of sync. For example, if
+we have two threads, then they might both execute the "load" before
+either has a chance to execute the "store":
+
+```
+Thread 1                                          Thread 2
+let value = tscounter.load(Ordering::SeqCst);
+// value = X                                      let value = tscounter.load(Ordering::SeqCst);
+                                                  // value = X
+tscounter.store(value+1);                         tscounter.store(value+1);
+// tscounter = X+1                                // tscounter = X+1
+```
+
+Now even though we've had two increments, we'll only increase the
+counter by one!  Even though we've got no data race, this is still
+probably not the result we wanted. The problem here is that the `Cell`
+API doesn't make clear the scope of a "transaction" -- that is, the
+set of reads/writes that should occur atomically. In this case, we
+probably wanted the get/set to occur together.
+
+In fact, when using the `Atomic` types, you very rarely want a plain
+`load` or plain `store`. You probably want the more complex
+operations. A counter, for example, would use `fetch_add` to
+atomically load and increment the value in one step. Compare-and-swap
+is another popular building block.
+
+A similar problem can arise when converting `RefCell` to `RwLock`, but
+it is somewhat less likely, because the `RefCell` API does in fact
+have a notion of a transaction: the scope of the handle returned by
+`borrow` or `borrow_mut`. So if you convert each call to `borrow` to
+`read` (and `borrow_mut` to `write`), things will mostly work fine in
+a parallel setting, but there can still be changes in behavior.
+Consider using a `handle: RefCell<Vec<i32>>` like:
+
+```rust
+let len = handle.borrow().len();
+for i in 0 .. len {
+    let data = handle.borrow()[i];
+    println!("{}", data);
+}
+```
+
+In sequential code, we know that this loop is safe. But if we convert
+this to parallel code with an `RwLock`, we do not: this is because
+another thread could come along and do
+`handle.write().unwrap().pop()`, and thus change the length of the
+vector. In fact, even in *sequential* code, using very small borrow
+sections like this is an anti-pattern: you ought to be enclosing the
+entire transaction together, like so:
+
+```rust
+let vec = handle.borrow();
+let len = vec.len();
+for i in 0 .. len {
+    let data = vec[i];
+    println!("{}", data);
+}
+```
+
+Or, even better, using an iterator instead of indexing:
+
+```rust
+let vec = handle.borrow();
+for data in vec {
+    println!("{}", data);
+}
+```
+
+There are several reasons to prefer one borrow over many. The most
+obvious is that it is more efficient, since each borrow has to perform
+some safety checks. But it's also more reliable: suppose we modified
+the loop above to not just print things out, but also call into a
+helper function:
+
+```rust
+let vec = handle.borrow();
+for data in vec {
+    helper(...);
+}
+```
+
+And now suppose, independently, this helper fn evolved and had to pop
+something off of the vector:
+
+```rust
+fn helper(...) {
+    handle.borrow_mut().pop();
+}
+```
+
+Under the old model, where we did lots of small borrows, this would
+yield precisely the same error that we saw in parallel land using an
+`RwLock`: the length would be out of sync and our indexing would fail
+(note that in neither case would there be an actual *data race* and
+hence there would never be undefined behavior). But now that we use a
+single borrow, we'll see a borrow error instead, which is much easier
+to diagnose, since it occurs at the point of the `borrow_mut`, rather
+than downstream. Similarly, if we move to an `RwLock`, we'll find that
+the code either deadlocks (if the write is on the same thread as the
+read) or, if the write is on another thread, works just fine. Both of
+these are preferable to random failures in my experience.
+
+## But wait, isn't Rust supposed to free me from this kind of thinking?
+
+You might think that Rust is supposed to mean that you don't have to
+think about atomicity at all. In fact, if you avoid inherent
+mutability (`Cell` and `RefCell` in a sequential setting, or
+`AtomicUsize`, `RwLock`, `Mutex`, et al. in parallel code), then this
+is true: the type system will basically guarantee that you don't have
+to think about atomicity at all. But often there are times when you
+WANT threads to interleave in the ways I showed above.
+
+Consider for example when you are conducting a search in parallel, say
+to find the shortest route. To avoid fruitless search, you might want
+to keep a cell with the shortest route you've found thus far.  This
+way, when you are searching down some path that's already longer than
+this shortest route, you can just stop and avoid wasted effort. In
+sequential land, you might model this "best result" as a shared value
+like `Rc<Cell<usize>>` (here the `usize` represents the length of best
+path found so far); in parallel land, you'd use a `Arc<AtomicUsize>`.
+Now we can make our search function look like:
+
+```rust
+fn search(path: &Path, cost_so_far: usize, best_cost: &Arc<AtomicUsize>) {
+    if cost_so_far >= best_cost.load(Ordering::SeqCst) {
+        return;
+    }
+    ...
+    best_cost.store(...);
+}
+```
+
+Now in this case, we really WANT to see results from other threads
+interjected into our execution!
--- a/third_party/rust/rayon/README.md
+++ b/third_party/rust/rayon/README.md
@@ -1,74 +1,107 @@
 # Rayon
 
+[![Rayon crate](https://img.shields.io/crates/v/rayon.svg)](https://crates.io/crates/rayon)
+[![Rayon documentation](https://docs.rs/rayon/badge.svg)](https://docs.rs/rayon)
+[![Travis Status](https://travis-ci.org/rayon-rs/rayon.svg?branch=master)](https://travis-ci.org/rayon-rs/rayon)
+[![Appveyor status](https://ci.appveyor.com/api/projects/status/wre5dkx08gayy8hc/branch/master?svg=true)](https://ci.appveyor.com/project/cuviper/rayon/branch/master)
 [![Join the chat at https://gitter.im/rayon-rs/Lobby](https://badges.gitter.im/rayon-rs/Lobby.svg)](https://gitter.im/rayon-rs/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
 
-[![Travis Status](https://travis-ci.org/nikomatsakis/rayon.svg?branch=master)](https://travis-ci.org/nikomatsakis/rayon)
-
-[![Appveyor status](https://ci.appveyor.com/api/projects/status/6oft3iwgr6f2o4d4?svg=true)](https://ci.appveyor.com/project/nikomatsakis/rayon)
-
 Rayon is a data-parallelism library for Rust. It is extremely
 lightweight and makes it easy to convert a sequential computation into
 a parallel one. It also guarantees data-race freedom. (You may also
 enjoy [this blog post][blog] about Rayon, which gives more background
-and details about how it works, or [this video][video], from the Rust Belt Rust conference.) Rayon is
+and details about how it works, or [this video][video], from the Rust
+Belt Rust conference.) Rayon is
 [available on crates.io](https://crates.io/crates/rayon), and
 [API Documentation is available on docs.rs](https://docs.rs/rayon/).
 
 [blog]: http://smallcultfollowing.com/babysteps/blog/2015/12/18/rayon-data-parallelism-in-rust/
 [video]: https://www.youtube.com/watch?v=gof_OEv71Aw
 
-You can use Rayon in two ways. Which way you will want will depend on
-what you are doing:
+## Parallel iterators and more
+
+Rayon makes it drop-dead simple to convert sequential iterators into
+parallel ones: usually, you just change your `foo.iter()` call into
+`foo.par_iter()`, and Rayon does the rest:
 
-- Parallel iterators: convert iterator chains to execute in parallel.
-- The `join` method: convert recursive, divide-and-conquer style
-  problems to execute in parallel.
+```rust
+use rayon::prelude::*;
+fn sum_of_squares(input: &[i32]) -> i32 {
+    input.par_iter() // <-- just change that!
+         .map(|&i| i * i)
+         .sum()
+}
+```
+
+[Parallel iterators] take care of deciding how to divide your data
+into tasks; it will dynamically adapt for maximum performance. If you
+need more flexibility than that, Rayon also offers the [join] and
+[scope] functions, which let you create parallel tasks on your own.
+For even more control, you can create [custom threadpools] rather than
+using Rayon's default, global threadpool.
 
-No matter which way you choose, you don't have to worry about data
-races: Rayon statically guarantees data-race freedom. For the most
-part, adding calls to Rayon should not change how your programs works
-at all, in fact. However, if you operate on mutexes or atomic
-integers, please see the [notes on atomicity](#atomicity).
+[Parallel iterators]: https://docs.rs/rayon/*/rayon/iter/index.html
+[join]: https://docs.rs/rayon/*/rayon/fn.join.html
+[scope]: https://docs.rs/rayon/*/rayon/fn.scope.html
+[custom threadpools]: https://docs.rs/rayon/*/rayon/struct.ThreadPool.html
+
+## No data races
 
-Rayon currently requires `rustc 1.12.0` or greater.
+You may have heard that parallel execution can produce all kinds of
+crazy bugs. Well, rest easy. Rayon's APIs all guarantee **data-race
+freedom**, which generally rules out most parallel bugs (though not
+all). In other words, **if your code compiles**, it typically does the
+same thing it did before.
 
-### Using Rayon
+For the most, parallel iterators in particular are guaranteed to
+produce the same results as their sequential counterparts. One caevat:
+If your iterator has side effects (for example, sending methods to
+other threads through a [Rust channel] or writing to disk), those side
+effects may occur in a different order. Note also that, in some cases,
+parallel iterators offer alternative versions of the sequential
+iterator methods that can have higher performance.
+
+[Rust channel]: https://doc.rust-lang.org/std/sync/mpsc/fn.channel.html
+
+## Using Rayon
 
 [Rayon is available on crates.io](https://crates.io/crates/rayon). The
 recommended way to use it is to add a line into your Cargo.toml such
 as:
 
-```rust
+```toml
 [dependencies]
-rayon = "0.8.2"
+rayon = "1.0"
 ```
 
 and then add the following to to your `lib.rs`:
 
 ```rust
 extern crate rayon;
 ```
 
 To use the Parallel Iterator APIs, a number of traits have to be in
 scope. The easiest way to bring those things into scope is to use the
-[Rayon prelude](https://docs.rs/rayon/*/rayon/prelude/index.html).
-In each module where you would like to use the parallel iterator APIs,
+[Rayon prelude](https://docs.rs/rayon/*/rayon/prelude/index.html).  In
+each module where you would like to use the parallel iterator APIs,
 just add:
 
 ```rust
 use rayon::prelude::*;
 ```
 
-### Contribution
+Rayon currently requires `rustc 1.13.0` or greater.
+
+## Contribution
 
-Rayon is an open source project! If you'd like to contribute to Rayon, check out [the list of "help wanted" issues](https://github.com/nikomatsakis/rayon/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). These are all (or should be) issues that are suitable for getting started, and they generally include a detailed set of instructions for what to do. Please ask questions if anything is unclear! Also, check out the [Guide to Development](https://github.com/nikomatsakis/rayon/wiki/Guide-to-Development) page on the wiki. Note that all code submitted in PRs to Rayon is assumed to [be licensed under Rayon's dual MIT/Apache2 licensing](https://github.com/nikomatsakis/rayon/blob/master/README.md#license).
+Rayon is an open source project! If you'd like to contribute to Rayon, check out [the list of "help wanted" issues](https://github.com/rayon-rs/rayon/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). These are all (or should be) issues that are suitable for getting started, and they generally include a detailed set of instructions for what to do. Please ask questions if anything is unclear! Also, check out the [Guide to Development](https://github.com/rayon-rs/rayon/wiki/Guide-to-Development) page on the wiki. Note that all code submitted in PRs to Rayon is assumed to [be licensed under Rayon's dual MIT/Apache2 licensing](https://github.com/rayon-rs/rayon/blob/master/README.md#license).
 
-### Quick demo
+## Quick demo
 
 To see Rayon in action, check out the `rayon-demo` directory, which
 includes a number of demos of code using Rayon. For example, run this
 command to get a visualization of an nbody simulation. To see the
 effect of using Rayon, press `s` to run sequentially and `p` to run in
 parallel.
 
 ```
@@ -80,379 +113,20 @@ For more information on demos, try:
 
 ```
 > cd rayon-demo
 > cargo +nightly run --release -- --help
 ```
 
 **Note:** While Rayon is usable as a library with the stable compiler, running demos or executing tests requires nightly Rust.
 
-### Parallel Iterators
-
-Rayon supports an experimental API called "parallel iterators". These
-let you write iterator-like chains that execute in parallel. For
-example, to compute the sum of the squares of a sequence of integers,
-one might write:
-
-```rust
-use rayon::prelude::*;
-fn sum_of_squares(input: &[i32]) -> i32 {
-    input.par_iter()
-         .map(|&i| i * i)
-         .sum()
-}
-```
-
-Or, to increment all the integers in a slice, you could write:
-
-```rust
-use rayon::prelude::*;
-fn increment_all(input: &mut [i32]) {
-    input.par_iter_mut()
-         .for_each(|p| *p += 1);
-}
-```
-
-To use parallel iterators, first import the traits by adding something
-like `use rayon::prelude::*` to your module. You can then call
-`par_iter` and `par_iter_mut` to get a parallel iterator.  Like a
-[regular iterator][], parallel iterators work by first constructing a
-computation and then executing it. See the
-[`ParallelIterator` trait][pt] for the list of available methods and
-more details. (Sorry, proper documentation is still somewhat lacking.)
-
-[regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html
-[pt]: https://github.com/nikomatsakis/rayon/blob/master/src/iter/mod.rs
-
-### Using join for recursive, divide-and-conquer problems
-
-Parallel iterators are actually implemented in terms of a more
-primitive method called `join`. `join` simply takes two closures and
-potentially runs them in parallel. For example, we could rewrite the
-`increment_all` function we saw for parallel iterators as follows
-(this function increments all the integers in a slice):
-
-```rust
-/// Increment all values in slice.
-fn increment_all(slice: &mut [i32]) {
-    if slice.len() < 1000 {
-        for p in slice { *p += 1; }
-    } else {
-        let mid_point = slice.len() / 2;
-        let (left, right) = slice.split_at_mut(mid_point);
-        rayon::join(|| increment_all(left), || increment_all(right));
-    }
-}
-```
-
-Perhaps a more interesting example is this parallel quicksort:
-
-```rust
-fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
-    if v.len() <= 1 {
-        return;
-    }
-
-    let mid = partition(v);
-    let (lo, hi) = v.split_at_mut(mid);
-    rayon::join(|| quick_sort(lo), || quick_sort(hi));
-}
-```
-
-**Note though that calling `join` is very different from just spawning
-two threads in terms of performance.** This is because `join` does not
-*guarantee* that the two closures will run in parallel. If all of your
-CPUs are already busy with other work, Rayon will instead opt to run
-them sequentially. The call to `join` is designed to have very low
-overhead in that case, so that you can safely call it even with very
-small workloads (as in the example above).
-
-However, in practice, the overhead is still noticeable. Therefore, for
-maximal performance, you want to have some kind of sequential fallback
-once your problem gets small enough. The parallel iterator APIs try to
-handle this for you. When using join, you have to code it yourself.
-For an example, see the [quicksort demo][], which includes sequential
-fallback after a certain size.
-
-[quicksort demo]: https://github.com/nikomatsakis/rayon/blob/master/rayon-demo/src/quicksort/mod.rs
-
-### Safety
-
-You've probably heard that parallel programming can be the source of
-bugs that are really hard to diagnose. That is certainly true!
-However, thanks to Rust's type system, you basically don't have to
-worry about that when using Rayon. The Rayon APIs are guaranteed to be
-data-race free. The Rayon APIs themselves also cannot cause deadlocks
-(though if your closures or callbacks use locks or ports, those locks
-might trigger deadlocks).
-
-For example, if you write code that tries to process the same mutable
-state from both closures, you will find that fails to compile:
-
-```rust
-/// Increment all values in slice.
-fn increment_all(slice: &mut [i32]) {
-    rayon::join(|| process(slice), || process(slice));
-}
-```
-
-However, this safety does have some implications. You will not be able
-to use types which are not thread-safe (i.e., do not implement `Send`)
-from inside a `join` closure. Note that almost all types *are* in fact
-thread-safe in Rust; the only exception is those types that employ
-"inherent mutability" without some form of synchronization, such as
-`RefCell` or `Rc`. Here is a list of the most common types in the
-standard library that are not `Send`, along with an alternative that
-you can use instead which *is* `Send` (but which also has higher
-overhead, because it must work across threads):
-
-- `Cell` -- replacement: `AtomicUsize`, `AtomicBool`, etc (but see warning below)
-- `RefCell` -- replacement: `RwLock`, or perhaps `Mutex` (but see warning below)
-- `Rc` -- replacement: `Arc`
-
-However, if you are converting uses of `Cell` or `RefCell`, you must
-be prepared for other threads to interject changes. For more
-information, read the section on atomicity below.
-
-### How it works: Work stealing
-
-Behind the scenes, Rayon uses a technique called work stealing to try
-and dynamically ascertain how much parallelism is available and
-exploit it. The idea is very simple: we always have a pool of worker
-threads available, waiting for some work to do. When you call `join`
-the first time, we shift over into that pool of threads. But if you
-call `join(a, b)` from a worker thread W, then W will place `b` into
-its work queue, advertising that this is work that other worker
-threads might help out with. W will then start executing `a`.
-
-While W is busy with `a`, other threads might come along and take `b`
-from its queue. That is called *stealing* `b`. Once `a` is done, W
-checks whether `b` was stolen by another thread and, if not, executes
-`b` itself. If W runs out of jobs in its own queue, it will look
-through the other threads' queues and try to steal work from them.
-
-This technique is not new. It was first introduced by the
-[Cilk project][cilk], done at MIT in the late nineties. The name Rayon
-is an homage to that work.
-
-[cilk]: http://supertech.csail.mit.edu/cilk/
-
-<a name="atomicity"></a>
-
-#### Warning: Be wary of atomicity
-
-Converting a `Cell` (or, to a lesser extent, a `RefCell`) to work in
-parallel merits special mention for a number of reasons. `Cell` and
-`RefCell` are handy types that permit you to modify data even when
-that data is shared (aliased). They work somewhat differently, but
-serve a common purpose:
-
-1. A `Cell` offers a mutable slot with just two methods, `get` and
-   `set`.  Cells can only be used for `Copy` types that are safe to
-   memcpy around, such as `i32`, `f32`, or even something bigger like a tuple of
-   `(usize, usize, f32)`.
-2. A `RefCell` is kind of like a "single-threaded read-write lock"; it
-   can be used with any sort of type `T`. To gain access to the data
-   inside, you call `borrow` or `borrow_mut`. Dynamic checks are done
-   to ensure that you have either readers or one writer but not both.
-
-While there are threadsafe types that offer similar APIs, caution is
-warranted because, in a threadsafe setting, other threads may
-"interject" modifications in ways that are not possible in sequential
-code. While this will never lead to a *data race* --- that is, you
-need not fear *undefined behavior* --- you can certainly still have
-*bugs*.
-
-Let me give you a concrete example using `Cell`. A common use of `Cell`
-is to implement a shared counter. In that case, you would have something
-like `counter: Rc<Cell<usize>>`. Now I can increment the counter by
-calling `get` and `set` as follows:
+## Other questions?
 
-```rust
-let value = counter.get();
-counter.set(value + 1);
-```
-
-If I convert this to be a thread-safe counter, I would use the
-corresponding types `tscounter: Arc<AtomicUsize>`. If I then were to
-convert the `Cell` API calls directly, I would do something like this:
-
-```rust
-let value = tscounter.load(Ordering::SeqCst);
-tscounter.store(value + 1, Ordering::SeqCst);
-```
-
-You can already see that the `AtomicUsize` API is a bit more complex,
-as it requires you to specify an
-[ordering](http://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html). (I
-won't go into the details on ordering here, but suffice to say that if
-you don't know what an ordering is, and probably even if you do, you
-should use `Ordering::SeqCst`.) The danger in this parallel version of
-the counter is that other threads might be running at the same time
-and they could cause our counter to get out of sync. For example, if
-we have two threads, then they might both execute the "load" before
-either has a chance to execute the "store":
-
-```
-Thread 1                                          Thread 2
-let value = tscounter.load(Ordering::SeqCst);
-// value = X                                      let value = tscounter.load(Ordering::SeqCst);
-                                                  // value = X
-tscounter.store(value+1);                         tscounter.store(value+1);
-// tscounter = X+1                                // tscounter = X+1
-```
-
-Now even though we've had two increments, we'll only increase the
-counter by one!  Even though we've got no data race, this is still
-probably not the result we wanted. The problem here is that the `Cell`
-API doesn't make clear the scope of a "transaction" -- that is, the
-set of reads/writes that should occur atomically. In this case, we
-probably wanted the get/set to occur together.
-
-In fact, when using the `Atomic` types, you very rarely want a plain
-`load` or plain `store`. You probably want the more complex
-operations. A counter, for example, would use `fetch_add` to
-atomically load and increment the value in one step. Compare-and-swap
-is another popular building block.
-
-A similar problem can arise when converting `RefCell` to `RwLock`, but
-it is somewhat less likely, because the `RefCell` API does in fact
-have a notion of a transaction: the scope of the handle returned by
-`borrow` or `borrow_mut`. So if you convert each call to `borrow` to
-`read` (and `borrow_mut` to `write`), things will mostly work fine in
-a parallel setting, but there can still be changes in behavior.
-Consider using a `handle: RefCell<Vec<i32>>` like :
-
-```rust
-let len = handle.borrow().len();
-for i in 0 .. len {
-    let data = handle.borrow()[i];
-    println!("{}", data);
-}
-```
-
-In sequential code, we know that this loop is safe. But if we convert
-this to parallel code with an `RwLock`, we do not: this is because
-another thread could come along and do
-`handle.write().unwrap().pop()`, and thus change the length of the
-vector. In fact, even in *sequential* code, using very small borrow
-sections like this is an anti-pattern: you ought to be enclosing the
-entire transaction together, like so:
-
-```rust
-let vec = handle.borrow();
-let len = vec.len();
-for i in 0 .. len {
-    let data = vec[i];
-    println!("{}", data);
-}
-```
-
-Or, even better, using an iterator instead of indexing:
-
-```rust
-let vec = handle.borrow();
-for data in vec {
-    println!("{}", data);
-}
-```
+See [the Rayon FAQ][faq].
 
-There are several reasons to prefer one borrow over many. The most
-obvious is that it is more efficient, since each borrow has to perform
-some safety checks. But it's also more reliable: suppose we modified
-the loop above to not just print things out, but also call into a
-helper function:
-
-```rust
-let vec = handle.borrow();
-for data in vec {
-    helper(...);
-}
-```
-
-And now suppose, independently, this helper fn evolved and had to pop
-something off of the vector:
-
-```rust
-fn helper(...) {
-    handle.borrow_mut().pop();
-}
-```
-
-Under the old model, where we did lots of small borrows, this would
-yield precisely the same error that we saw in parallel land using an
-`RwLock`: the length would be out of sync and our indexing would fail
-(note that in neither case would there be an actual *data race* and
-hence there would never be undefined behavior). But now that we use a
-single borrow, we'll see a borrow error instead, which is much easier
-to diagnose, since it occurs at the point of the `borrow_mut`, rather
-than downstream. Similarly, if we move to an `RwLock`, we'll find that
-the code either deadlocks (if the write is on the same thread as the
-read) or, if the write is on another thread, works just fine. Both of
-these are preferable to random failures in my experience.
-
-#### But wait, isn't Rust supposed to free me from this kind of thinking?
-
-You might think that Rust is supposed to mean that you don't have to
-think about atomicity at all. In fact, if you avoid inherent
-mutability (`Cell` and `RefCell` in a sequential setting, or
-`AtomicUsize`, `RwLock`, `Mutex`, et al. in parallel code), then this
-is true: the type system will basically guarantee that you don't have
-to think about atomicity at all. But often there are times when you
-WANT threads to interleave in the ways I showed above.
-
-Consider for example when you are conducting a search in parallel, say
-to find the shortest route. To avoid fruitless search, you might want
-to keep a cell with the shortest route you've found thus far.  This
-way, when you are searching down some path that's already longer than
-this shortest route, you can just stop and avoid wasted effort. In
-sequential land, you might model this "best result" as a shared value
-like `Rc<Cell<usize>>` (here the `usize` represents the length of best
-path found so far); in parallel land, you'd use a `Arc<AtomicUsize>`.
-Now we can make our search function look like:
-
-```rust
-fn search(path: &Path, cost_so_far: usize, best_cost: &Arc<AtomicUsize>) {
-    if cost_so_far >= best_cost.load(Ordering::SeqCst) {
-        return;
-    }
-    ...
-    best_cost.store(...);
-}
-```
-
-Now in this case, we really WANT to see results from other threads
-interjected into our execution!
-
-## Semver policy, the rayon-core crate, and unstable features
-
-Rayon follows semver versioning. However, we also have APIs that are
-still in the process of development and which may break from release
-to release -- those APIs are not subject to semver. To use them,
-you have to set the cfg flag `rayon_unstable`. The easiest way to do this
-is to use the `RUSTFLAGS` environment variable:
-
-```
-RUSTFLAGS='--cfg rayon_unstable' cargo build
-```
-
-Note that this must not only be done for your crate, but for any crate
-that depends on your crate. This infectious nature is intentional, as
-it serves as a reminder that you are outside of the normal semver
-guarantees. **If you see unstable APIs that you would like to use,
-please request stabilization on the correspond tracking issue!**
-
-Rayon itself is internally split into two crates. The `rayon` crate is
-intended to be the main, user-facing crate, and hence all the
-documentation refers to `rayon`. This crate is still evolving and
-regularly goes through (minor) breaking changes. The `rayon-core`
-crate contains the global thread-pool and defines the core APIs: we no
-longer permit breaking changes in this crate (except to unstable
-features). The intention is that multiple semver-incompatible versions
-of the rayon crate can peacefully coexist; they will all share one
-global thread-pool through the `rayon-core` crate.
+[faq]: https://github.com/rayon-rs/rayon/blob/master/FAQ.md
 
 ## License
 
 Rayon is distributed under the terms of both the MIT license and the
 Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and
 [LICENSE-MIT](LICENSE-MIT) for details. Opening a pull requests is
 assumed to signal agreement with these licensing terms.
--- a/third_party/rust/rayon/RELEASES.md
+++ b/third_party/rust/rayon/RELEASES.md
@@ -1,18 +1,142 @@
+# Release rayon 1.0.0 / rayon-core 1.4.0
+
+- `ParallelIterator` added the `update` method which applies a function to
+  mutable references, inspired by `itertools`.
+- `IndexedParallelIterator` added the `chunks` method which yields vectors of
+  consecutive items from the base iterator, inspired by `itertools`.
+- `String` now implements `FromParallelIterator<Cow<str>>` and
+  `ParallelExtend<Cow<str>>`, inspired by `std`.
+- `()` now implements `FromParallelIterator<()>`, inspired by `std`.
+- The new `ThreadPoolBuilder` replaces and deprecates `Configuration`.
+  - Errors from initialization now have the concrete `ThreadPoolBuildError`
+    type, rather than `Box<Error>`, and this type implements `Send` and `Sync`.
+  - `ThreadPool::new` is deprecated in favor of `ThreadPoolBuilder::build`.
+  - `initialize` is deprecated in favor of `ThreadPoolBuilder::build_global`.
+- Examples have been added to most of the parallel iterator methods.
+- A lot of the documentation has been reorganized and extended.
+
+## Breaking changes
+
+- Rayon now requires rustc 1.13 or greater.
+- `IndexedParallelIterator::len` and `ParallelIterator::opt_len` now operate on
+  `&self` instead of `&mut self`.
+- `IndexedParallelIterator::collect_into` is now `collect_into_vec`.
+- `IndexedParallelIterator::unzip_into` is now `unzip_into_vecs`.
+- Rayon no longer exports the deprecated `Configuration` and `initialize` from
+  rayon-core.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @Bilkow
+- @cuviper
+- @Enet4
+- @ignatenkobrain
+- @iwillspeak
+- @jeehoonkang
+- @jwass
+- @Kerollmops
+- @KodrAus
+- @kornelski
+- @MaloJaffre
+- @nikomatsakis
+- @obv-mikhail
+- @oddg
+- @phimuemue
+- @stjepang
+- @tmccombs
+- bors[bot]
+
+
+# Release rayon 0.9.0 / rayon-core 1.3.0 / rayon-futures 0.1.0
+
+- `Configuration` now has a `build` method.
+- `ParallelIterator` added `flatten` and `intersperse`, both inspired by
+  itertools.
+- `IndexedParallelIterator` added `interleave`, `interleave_shortest`, and
+  `zip_eq`, all inspired by itertools.
+- The new functions `iter::empty` and `once` create parallel iterators of
+  exactly zero or one item, like their `std` counterparts.
+- The new functions `iter::repeat` and `repeatn` create parallel iterators
+  repeating an item indefinitely or `n` times, respectively.
+- The new function `join_context` works like `join`, with an added `FnContext`
+  parameter that indicates whether the job was stolen.
+- `Either` (used by `ParallelIterator::partition_map`) is now re-exported from
+  the `either` crate, instead of defining our own type.
+  - `Either` also now implements `ParallelIterator`, `IndexedParallelIterator`,
+    and `ParallelExtend` when both of its `Left` and `Right` types do.
+- All public types now implement `Debug`.
+- Many of the parallel iterators now implement `Clone` where possible.
+- Much of the documentation has been extended. (but still could use more help!)
+- All rayon crates have improved metadata.
+- Rayon was evaluated in the Libz Blitz, leading to many of these improvements.
+- Rayon pull requests are now guarded by bors-ng.
+
+## Futures
+
+The `spawn_future()` method has been refactored into its own `rayon-futures`
+crate, now through a `ScopeFutureExt` trait for `ThreadPool` and `Scope`.  The
+supporting `rayon-core` APIs are still gated by `--cfg rayon_unstable`.
+
+## Breaking changes
+
+- Two breaking changes have been made to `rayon-core`, but since they're fixing
+  soundness bugs, we are considering these _minor_ changes for semver.
+  - `Scope::spawn` now requires `Send` for the closure.
+  - `ThreadPool::install` now requires `Send` for the return value.
+- The `iter::internal` module has been renamed to `iter::plumbing`, to hopefully
+  indicate that while these are low-level details, they're not really internal
+  or private to rayon.  The contents of that module are needed for third-parties
+  to implement new parallel iterators, and we'll treat them with normal semver
+  stability guarantees.
+- The function `rayon::iter::split` is no longer re-exported as `rayon::split`.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @AndyGauge
+- @ChristopherDavenport
+- @chrisvittal
+- @cuviper
+- @dns2utf8
+- @dtolnay
+- @frewsxcv
+- @gsquire
+- @Hittherhod
+- @jdr023
+- @laumann
+- @leodasvacas
+- @lvillani
+- @MajorBreakfast
+- @mamuleanu
+- @marmistrz
+- @mbrubeck
+- @mgattozzi
+- @nikomatsakis
+- @smt923
+- @stjepang
+- @tmccombs
+- @vishalsodani
+- bors[bot]
+
+
 # Release rayon 0.8.2
 
 - `ParallelSliceMut` now has six parallel sorting methods with the same
   variations as the standard library.
   - `par_sort`, `par_sort_by`, and `par_sort_by_key` perform stable sorts in
     parallel, using the default order, a custom comparator, or a key extraction
     function, respectively.
   - `par_sort_unstable`, `par_sort_unstable_by`, and `par_sort_unstable_by_key`
     perform unstable sorts with the same comparison options.
-  - Thanks to @stejpang!
+  - Thanks to @stjepang!
 
 # Release rayon 0.8.1 / rayon-core 1.2.0
 
 - The following core APIs are being stabilized:
   - `rayon::spawn()` -- spawns a task into the Rayon threadpool; as it
     is contained in the global scope (rather than a user-created
     scope), the task cannot capture anything from the current stack
     frame.
--- a/third_party/rust/rayon/appveyor.yml
+++ b/third_party/rust/rayon/appveyor.yml
@@ -1,15 +1,12 @@
 environment:
   RUST_MIN_STACK: 16777216
   matrix:
     - TARGET: x86_64-pc-windows-gnu
-      CHANNEL: 1.12.0
-
-    - TARGET: x86_64-pc-windows-gnu
       CHANNEL: stable
     - TARGET: x86_64-pc-windows-gnu
       CHANNEL: stable
       RUSTFLAGS: --cfg rayon_unstable
 
     - TARGET: x86_64-pc-windows-gnu
       CHANNEL: beta
     - TARGET: x86_64-pc-windows-gnu
@@ -19,19 +16,16 @@ environment:
     - TARGET: x86_64-pc-windows-gnu
       CHANNEL: nightly
     - TARGET: x86_64-pc-windows-gnu
       CHANNEL: nightly
       RUSTFLAGS: --cfg rayon_unstable
 
 
     - TARGET: x86_64-pc-windows-msvc
-      CHANNEL: 1.12.0
-
-    - TARGET: x86_64-pc-windows-msvc
       CHANNEL: stable
     - TARGET: x86_64-pc-windows-msvc
       CHANNEL: stable
       RUSTFLAGS: --cfg rayon_unstable
 
     - TARGET: x86_64-pc-windows-msvc
       CHANNEL: beta
     - TARGET: x86_64-pc-windows-msvc
@@ -46,16 +40,33 @@ environment:
 
 install:
   - curl -sSf -o rustup-init.exe https://win.rustup.rs
   - rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
   - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
   - rustc -Vv
   - cargo -V
 
+matrix:
+  fast_finish: true
+
 build: false
 
 test_script:
   - cargo build
   - if [%CHANNEL%]==[nightly] (
+      cargo test -p rayon &&
       cargo test -p rayon-core &&
       cargo test -p rayon-demo
     )
+  - if not "%RUSTFLAGS%"=="%^RUSTFLAGS%" (
+      cargo clean &&
+      cargo build -p rayon-futures &&
+      if [%CHANNEL%]==[nightly] (
+        cargo test -p rayon-futures
+      )
+    )
+
+branches:
+  only:
+    - master
+    - staging
+    - trying
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/bors.toml
@@ -0,0 +1,7 @@
+status = [
+  "continuous-integration/travis-ci/push",
+  "continuous-integration/appveyor/branch",
+]
+
+# Sometimes the queue for Travis CI on OSX gets really backed up...
+timeout_sec = 21600
--- a/third_party/rust/rayon/examples/cpu_monitor.rs
+++ b/third_party/rust/rayon/examples/cpu_monitor.rs
@@ -1,14 +1,15 @@
 extern crate docopt;
 extern crate rayon;
-extern crate rustc_serialize;
+#[macro_use]
+extern crate serde_derive;
+extern crate serde;
 
 use docopt::Docopt;
-use std::env;
 use std::io;
 use std::process;
 
 const USAGE: &'static str = "
 Usage: cpu_monitor [options] <scenario>
        cpu_monitor --help
 
 A test for monitoring how much CPU usage Rayon consumes under various
@@ -22,25 +23,25 @@ The list of scenarios you can try are as
 - task_stall_root: a root task stalls for a very long time
 - task_stall_scope: a task in a scope stalls for a very long time
 
 Options:
     -h, --help                   Show this message.
     -d N, --depth N              Control how hard the dummy task works [default: 27]
 ";
 
-#[derive(RustcDecodable)]
+#[derive(Deserialize)]
 pub struct Args {
     arg_scenario: String,
     flag_depth: usize,
 }
 
 fn main() {
     let args: &Args =
-        &Docopt::new(USAGE).and_then(|d| d.argv(env::args()).decode()).unwrap_or_else(|e| e.exit());
+        &Docopt::new(USAGE).and_then(|d| d.deserialize()).unwrap_or_else(|e| e.exit());
 
     match &args.arg_scenario[..] {
         "tasks_ended" => tasks_ended(args),
         "task_stall_root" => task_stall_root(args),
         "task_stall_scope" => task_stall_scope(args),
         _ => {
             println!("unknown scenario: `{}`", args.arg_scenario);
             println!("try --help");
--- a/third_party/rust/rayon/src/collections/binary_heap.rs
+++ b/third_party/rust/rayon/src/collections/binary_heap.rs
@@ -1,40 +1,56 @@
 //! This module contains the parallel iterator types for heaps
 //! (`BinaryHeap<T>`). You will rarely need to interact with it directly
 //! unless you have need to name one of the iterator types.
 
 use std::collections::BinaryHeap;
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 
 use vec;
 
+/// Parallel iterator over a binary heap
+#[derive(Debug, Clone)]
+pub struct IntoIter<T: Ord + Send> {
+    inner: vec::IntoIter<T>,
+}
+
 impl<T: Ord + Send> IntoParallelIterator for BinaryHeap<T> {
     type Item = T;
     type Iter = IntoIter<T>;
 
     fn into_par_iter(self) -> Self::Iter {
         IntoIter { inner: Vec::from(self).into_par_iter() }
     }
 }
 
+delegate_indexed_iterator!{
+    IntoIter<T> => T,
+    impl<T: Ord + Send>
+}
+
+
+/// Parallel iterator over an immutable reference to a binary heap
+#[derive(Debug)]
+pub struct Iter<'a, T: Ord + Sync + 'a> {
+    inner: vec::IntoIter<&'a T>,
+}
+
+impl<'a, T: Ord + Sync> Clone for Iter<'a, T> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
 into_par_vec!{
     &'a BinaryHeap<T> => Iter<'a, T>,
     impl<'a, T: Ord + Sync>
 }
 
-// `BinaryHeap` doesn't have a mutable `Iterator`
-
-
 delegate_indexed_iterator!{
-    #[doc = "Parallel iterator over a binary heap"]
-    IntoIter<T> => vec::IntoIter<T>,
-    impl<T: Ord + Send>
+    Iter<'a, T> => &'a T,
+    impl<'a, T: Ord + Sync + 'a>
 }
 
 
-delegate_indexed_iterator!{
-    #[doc = "Parallel iterator over an immutable reference to a binary heap"]
-    Iter<'a, T> => vec::IntoIter<&'a T>,
-    impl<'a, T: Ord + Sync + 'a>
-}
+// `BinaryHeap` doesn't have a mutable `Iterator`
--- a/third_party/rust/rayon/src/collections/btree_map.rs
+++ b/third_party/rust/rayon/src/collections/btree_map.rs
@@ -1,46 +1,66 @@
 //! This module contains the parallel iterator types for B-Tree maps
 //! (`BTreeMap<K, V>`). You will rarely need to interact with it directly
 //! unless you have need to name one of the iterator types.
 
 use std::collections::BTreeMap;
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 
 use vec;
 
+/// Parallel iterator over a B-Tree map
+#[derive(Debug)] // std doesn't Clone
+pub struct IntoIter<K: Ord + Send, V: Send> {
+    inner: vec::IntoIter<(K, V)>,
+}
+
 into_par_vec!{
     BTreeMap<K, V> => IntoIter<K, V>,
     impl<K: Ord + Send, V: Send>
 }
 
+delegate_iterator!{
+    IntoIter<K, V> => (K, V),
+    impl<K: Ord + Send, V: Send>
+}
+
+
+/// Parallel iterator over an immutable reference to a B-Tree map
+#[derive(Debug)]
+pub struct Iter<'a, K: Ord + Sync + 'a, V: Sync + 'a> {
+    inner: vec::IntoIter<(&'a K, &'a V)>,
+}
+
+impl<'a, K: Ord + Sync, V: Sync> Clone for Iter<'a, K, V> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
 into_par_vec!{
     &'a BTreeMap<K, V> => Iter<'a, K, V>,
     impl<'a, K: Ord + Sync, V: Sync>
 }
 
+delegate_iterator!{
+    Iter<'a, K, V> => (&'a K, &'a V),
+    impl<'a, K: Ord + Sync + 'a, V: Sync + 'a>
+}
+
+
+/// Parallel iterator over a mutable reference to a B-Tree map
+#[derive(Debug)]
+pub struct IterMut<'a, K: Ord + Sync + 'a, V: Send + 'a> {
+    inner: vec::IntoIter<(&'a K, &'a mut V)>,
+}
+
 into_par_vec!{
     &'a mut BTreeMap<K, V> => IterMut<'a, K, V>,
     impl<'a, K: Ord + Sync, V: Send>
 }
 
-
 delegate_iterator!{
-    #[doc = "Parallel iterator over a B-Tree map"]
-    IntoIter<K, V> => vec::IntoIter<(K, V)>,
-    impl<K: Ord + Send, V: Send>
-}
-
-
-delegate_iterator!{
-    #[doc = "Parallel iterator over an immutable reference to a B-Tree map"]
-    Iter<'a, K, V> => vec::IntoIter<(&'a K, &'a V)>,
-    impl<'a, K: Ord + Sync + 'a, V: Sync + 'a>
-}
-
-
-delegate_iterator!{
-    #[doc = "Parallel iterator over a mutable reference to a B-Tree map"]
-    IterMut<'a, K, V> => vec::IntoIter<(&'a K, &'a mut V)>,
+    IterMut<'a, K, V> => (&'a K, &'a mut V),
     impl<'a, K: Ord + Sync + 'a, V: Send + 'a>
 }
--- a/third_party/rust/rayon/src/collections/btree_set.rs
+++ b/third_party/rust/rayon/src/collections/btree_set.rs
@@ -1,36 +1,52 @@
 //! This module contains the parallel iterator types for B-Tree sets
 //! (`BTreeSet<T>`). You will rarely need to interact with it directly
 //! unless you have need to name one of the iterator types.
 
 use std::collections::BTreeSet;
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 
 use vec;
 
+/// Parallel iterator over a B-Tree set
+#[derive(Debug)] // std doesn't Clone
+pub struct IntoIter<T: Ord + Send> {
+    inner: vec::IntoIter<T>,
+}
+
 into_par_vec!{
     BTreeSet<T> => IntoIter<T>,
     impl<T: Ord + Send>
 }
 
+delegate_iterator!{
+    IntoIter<T> => T,
+    impl<T: Ord + Send>
+}
+
+
+/// Parallel iterator over an immutable reference to a B-Tree set
+#[derive(Debug)]
+pub struct Iter<'a, T: Ord + Sync + 'a> {
+    inner: vec::IntoIter<&'a T>,
+}
+
+impl<'a, T: Ord + Sync + 'a> Clone for Iter<'a, T> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
 into_par_vec!{
     &'a BTreeSet<T> => Iter<'a, T>,
     impl<'a, T: Ord + Sync>
 }
 
-// `BTreeSet` doesn't have a mutable `Iterator`
-
-
 delegate_iterator!{
-    #[doc = "Parallel iterator over a B-Tree set"]
-    IntoIter<T> => vec::IntoIter<T>,
-    impl<T: Ord + Send>
+    Iter<'a, T> => &'a T,
+    impl<'a, T: Ord + Sync + 'a>
 }
 
 
-delegate_iterator!{
-    #[doc = "Parallel iterator over an immutable reference to a B-Tree set"]
-    Iter<'a, T> => vec::IntoIter<&'a T>,
-    impl<'a, T: Ord + Sync + 'a>
-}
+// `BTreeSet` doesn't have a mutable `Iterator`
--- a/third_party/rust/rayon/src/collections/hash_map.rs
+++ b/third_party/rust/rayon/src/collections/hash_map.rs
@@ -1,47 +1,67 @@
 //! This module contains the parallel iterator types for hash maps
 //! (`HashMap<K, V>`). You will rarely need to interact with it directly
 //! unless you have need to name one of the iterator types.
 
 use std::collections::HashMap;
 use std::hash::{Hash, BuildHasher};
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 
 use vec;
 
+/// Parallel iterator over a hash map
+#[derive(Debug)] // std doesn't Clone
+pub struct IntoIter<K: Hash + Eq + Send, V: Send> {
+    inner: vec::IntoIter<(K, V)>,
+}
+
 into_par_vec!{
     HashMap<K, V, S> => IntoIter<K, V>,
     impl<K: Hash + Eq + Send, V: Send, S: BuildHasher>
 }
 
+delegate_iterator!{
+    IntoIter<K, V> => (K, V),
+    impl<K: Hash + Eq + Send, V: Send>
+}
+
+
+/// Parallel iterator over an immutable reference to a hash map
+#[derive(Debug)]
+pub struct Iter<'a, K: Hash + Eq + Sync + 'a, V: Sync + 'a> {
+    inner: vec::IntoIter<(&'a K, &'a V)>,
+}
+
+impl<'a, K: Hash + Eq + Sync, V: Sync> Clone for Iter<'a, K, V> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
 into_par_vec!{
     &'a HashMap<K, V, S> => Iter<'a, K, V>,
     impl<'a, K: Hash + Eq + Sync, V: Sync, S: BuildHasher>
 }
 
+delegate_iterator!{
+    Iter<'a, K, V> => (&'a K, &'a V),
+    impl<'a, K: Hash + Eq + Sync + 'a, V: Sync + 'a>
+}
+
+
+/// Parallel iterator over a mutable reference to a hash map
+#[derive(Debug)]
+pub struct IterMut<'a, K: Hash + Eq + Sync + 'a, V: Send + 'a> {
+    inner: vec::IntoIter<(&'a K, &'a mut V)>,
+}
+
 into_par_vec!{
     &'a mut HashMap<K, V, S> => IterMut<'a, K, V>,
     impl<'a, K: Hash + Eq + Sync, V: Send, S: BuildHasher>
 }
 
-
 delegate_iterator!{
-    #[doc = "Parallel iterator over a hash map"]
-    IntoIter<K, V> => vec::IntoIter<(K, V)>,
-    impl<K: Hash + Eq + Send, V: Send>
-}
-
-
-delegate_iterator!{
-    #[doc = "Parallel iterator over an immutable reference to a hash map"]
-    Iter<'a, K, V> => vec::IntoIter<(&'a K, &'a V)>,
-    impl<'a, K: Hash + Eq + Sync + 'a, V: Sync + 'a>
-}
-
-
-delegate_iterator!{
-    #[doc = "Parallel iterator over a mutable reference to a hash map"]
-    IterMut<'a, K, V> => vec::IntoIter<(&'a K, &'a mut V)>,
+    IterMut<'a, K, V> => (&'a K, &'a mut V),
     impl<'a, K: Hash + Eq + Sync + 'a, V: Send + 'a>
 }
--- a/third_party/rust/rayon/src/collections/hash_set.rs
+++ b/third_party/rust/rayon/src/collections/hash_set.rs
@@ -1,37 +1,53 @@
 //! This module contains the parallel iterator types for hash sets
 //! (`HashSet<T>`). You will rarely need to interact with it directly
 //! unless you have need to name one of the iterator types.
 
 use std::collections::HashSet;
 use std::hash::{Hash, BuildHasher};
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 
 use vec;
 
+/// Parallel iterator over a hash set
+#[derive(Debug)] // std doesn't Clone
+pub struct IntoIter<T: Hash + Eq + Send> {
+    inner: vec::IntoIter<T>,
+}
+
 into_par_vec!{
     HashSet<T, S> => IntoIter<T>,
     impl<T: Hash + Eq + Send, S: BuildHasher>
 }
 
+delegate_iterator!{
+    IntoIter<T> => T,
+    impl<T: Hash + Eq + Send>
+}
+
+
+/// Parallel iterator over an immutable reference to a hash set
+#[derive(Debug)]
+pub struct Iter<'a, T: Hash + Eq + Sync + 'a> {
+    inner: vec::IntoIter<&'a T>,
+}
+
+impl<'a, T: Hash + Eq + Sync> Clone for Iter<'a, T> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
 into_par_vec!{
     &'a HashSet<T, S> => Iter<'a, T>,
     impl<'a, T: Hash + Eq + Sync, S: BuildHasher>
 }
 
-// `HashSet` doesn't have a mutable `Iterator`
-
-
 delegate_iterator!{
-    #[doc = "Parallel iterator over a hash set"]
-    IntoIter<T> => vec::IntoIter<T>,
-    impl<T: Hash + Eq + Send>
+    Iter<'a, T> => &'a T,
+    impl<'a, T: Hash + Eq + Sync + 'a>
 }
 
 
-delegate_iterator!{
-    #[doc = "Parallel iterator over an immutable reference to a hash set"]
-    Iter<'a, T> => vec::IntoIter<&'a T>,
-    impl<'a, T: Hash + Eq + Sync + 'a>
-}
+// `HashSet` doesn't have a mutable `Iterator`
--- a/third_party/rust/rayon/src/collections/linked_list.rs
+++ b/third_party/rust/rayon/src/collections/linked_list.rs
@@ -1,47 +1,66 @@
 //! This module contains the parallel iterator types for linked lists
 //! (`LinkedList<T>`). You will rarely need to interact with it directly
 //! unless you have need to name one of the iterator types.
 
 use std::collections::LinkedList;
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 
 use vec;
 
+/// Parallel iterator over a linked list
+#[derive(Debug, Clone)]
+pub struct IntoIter<T: Send> {
+    inner: vec::IntoIter<T>,
+}
+
 into_par_vec!{
     LinkedList<T> => IntoIter<T>,
     impl<T: Send>
 }
 
+delegate_iterator!{
+    IntoIter<T> => T,
+    impl<T: Send>
+}
+
+
+/// Parallel iterator over an immutable reference to a linked list
+#[derive(Debug)]
+pub struct Iter<'a, T: Sync + 'a> {
+    inner: vec::IntoIter<&'a T>,
+}
+
+impl<'a, T: Sync> Clone for Iter<'a, T> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
 into_par_vec!{
     &'a LinkedList<T> => Iter<'a, T>,
     impl<'a, T: Sync>
 }
 
+delegate_iterator!{
+    Iter<'a, T> => &'a T,
+    impl<'a, T: Sync + 'a>
+}
+
+
+/// Parallel iterator over a mutable reference to a linked list
+#[derive(Debug)]
+pub struct IterMut<'a, T: Send + 'a> {
+    inner: vec::IntoIter<&'a mut T>,
+}
+
 into_par_vec!{
     &'a mut LinkedList<T> => IterMut<'a, T>,
     impl<'a, T: Send>
 }
 
-
-
 delegate_iterator!{
-    #[doc = "Parallel iterator over a linked list"]
-    IntoIter<T> => vec::IntoIter<T>,
-    impl<T: Send>
-}
-
-
-delegate_iterator!{
-    #[doc = "Parallel iterator over an immutable reference to a linked list"]
-    Iter<'a, T> => vec::IntoIter<&'a T>,
-    impl<'a, T: Sync + 'a>
-}
-
-
-delegate_iterator!{
-    #[doc = "Parallel iterator over a mutable reference to a linked list"]
-    IterMut<'a, T> => vec::IntoIter<&'a mut T>,
+    IterMut<'a, T> => &'a mut T,
     impl<'a, T: Send + 'a>
 }
--- a/third_party/rust/rayon/src/collections/mod.rs
+++ b/third_party/rust/rayon/src/collections/mod.rs
@@ -1,11 +1,14 @@
-//! This module contains the parallel iterator types for standard
-//! collections. You will rarely need to interact with it directly
-//! unless you have need to name one of the iterator types.
+//! Parallel iterator types for [standard collections][std::collections]
+//!
+//! You will rarely need to interact with this module directly unless you need
+//! to name one of the iterator types.
+//!
+//! [std::collections]: https://doc.rust-lang.org/stable/std/collections/
 
 /// Convert an iterable collection into a parallel iterator by first
 /// collecting into a temporary `Vec`, then iterating that.
 macro_rules! into_par_vec {
     ($t:ty => $iter:ident<$($i:tt),*>, impl $($args:tt)*) => {
         impl $($args)* IntoParallelIterator for $t {
             type Item = <$t as IntoIterator>::Item;
             type Iter = $iter<$($i),*>;
--- a/third_party/rust/rayon/src/collections/vec_deque.rs
+++ b/third_party/rust/rayon/src/collections/vec_deque.rs
@@ -1,57 +1,77 @@
 //! This module contains the parallel iterator types for double-ended queues
 //! (`VecDeque<T>`). You will rarely need to interact with it directly
 //! unless you have need to name one of the iterator types.
 
 use std::collections::VecDeque;
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 
 use slice;
 use vec;
 
+/// Parallel iterator over a double-ended queue
+#[derive(Debug, Clone)]
+pub struct IntoIter<T: Send> {
+    inner: vec::IntoIter<T>,
+}
+
 into_par_vec!{
     VecDeque<T> => IntoIter<T>,
     impl<T: Send>
 }
 
+delegate_indexed_iterator!{
+    IntoIter<T> => T,
+    impl<T: Send>
+}
+
+
+/// Parallel iterator over an immutable reference to a double-ended queue
+#[derive(Debug)]
+pub struct Iter<'a, T: Sync + 'a> {
+    inner: Chain<slice::Iter<'a, T>, slice::Iter<'a, T>>,
+}
+
+impl<'a, T: Sync> Clone for Iter<'a, T> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
 impl<'a, T: Sync> IntoParallelIterator for &'a VecDeque<T> {
     type Item = &'a T;
     type Iter = Iter<'a, T>;
 
     fn into_par_iter(self) -> Self::Iter {
         let (a, b) = self.as_slices();
         Iter { inner: a.into_par_iter().chain(b) }
     }
 }
 
+delegate_indexed_iterator!{
+    Iter<'a, T> => &'a T,
+    impl<'a, T: Sync + 'a>
+}
+
+
+/// Parallel iterator over a mutable reference to a double-ended queue
+#[derive(Debug)]
+pub struct IterMut<'a, T: Send + 'a> {
+    inner: Chain<slice::IterMut<'a, T>, slice::IterMut<'a, T>>,
+}
+
 impl<'a, T: Send> IntoParallelIterator for &'a mut VecDeque<T> {
     type Item = &'a mut T;
     type Iter = IterMut<'a, T>;
 
     fn into_par_iter(self) -> Self::Iter {
         let (a, b) = self.as_mut_slices();
         IterMut { inner: a.into_par_iter().chain(b) }
     }
 }
 
-
 delegate_indexed_iterator!{
-    #[doc = "Parallel iterator over a double-ended queue"]
-    IntoIter<T> => vec::IntoIter<T>,
-    impl<T: Send>
-}
-
-
-delegate_indexed_iterator_item!{
-    #[doc = "Parallel iterator over an immutable reference to a double-ended queue"]
-    Iter<'a, T> => Chain<slice::Iter<'a, T>, slice::Iter<'a, T>> : &'a T,
-    impl<'a, T: Sync + 'a>
-}
-
-
-delegate_indexed_iterator_item!{
-    #[doc = "Parallel iterator over a mutable reference to a double-ended queue"]
-    IterMut<'a, T> => Chain<slice::IterMut<'a, T>, slice::IterMut<'a, T>> : &'a mut T,
+    IterMut<'a, T> => &'a mut T,
     impl<'a, T: Send + 'a>
 }
--- a/third_party/rust/rayon/src/delegate.rs
+++ b/third_party/rust/rayon/src/delegate.rs
@@ -1,116 +1,67 @@
 //! Macros for delegating newtype iterators to inner types.
 
 // Note: these place `impl` bounds at the end, as token gobbling is the only way
 // I know how to consume an arbitrary list of constraints, with `$($args:tt)*`.
 
-/// Create a parallel iterator which simply wraps an inner type and delegates
-/// all methods inward.  The item type is parsed from the inner type.
+/// Create a parallel iterator implementation which simply wraps an inner type
+/// and delegates all methods inward.  The actual struct must already be
+/// declared with an `inner` field.
 ///
 /// The implementation of `IntoParallelIterator` should be added separately.
 ///
 /// # Example
 ///
 /// ```
 /// delegate_iterator!{
-///     #[doc = "Move items from `MyCollection` in parallel"]
-///     MyIntoIter<T, U> => vec::IntoIter<(T, U)>,
+///     MyIntoIter<T, U> => (T, U),
 ///     impl<T: Ord + Send, U: Send>
 /// }
 /// ```
 macro_rules! delegate_iterator {
-    ($( #[ $attr:meta ] )+
-     $iter:ident < $( $i:tt ),* > => $( $inner:ident )::+ < $item:ty > ,
-     impl $( $args:tt )*
-     ) => {
-        delegate_iterator_item!{
-            $( #[ $attr ] )+
-            $iter < $( $i ),* > => $( $inner )::+ < $item > : $item ,
-            impl $( $args )*
-        }
-    }
-}
-
-/// Create an indexed parallel iterator which simply wraps an inner type and
-/// delegates all methods inward.  The item type is parsed from the inner type.
-macro_rules! delegate_indexed_iterator {
-    ($( #[ $attr:meta ] )+
-     $iter:ident < $( $i:tt ),* > => $( $inner:ident )::+ < $item:ty > ,
+    ($iter:ty => $item:ty ,
      impl $( $args:tt )*
      ) => {
-        delegate_indexed_iterator_item!{
-            $( #[ $attr ] )+
-            $iter < $( $i ),* > => $( $inner )::+ < $item > : $item ,
-            impl $( $args )*
-        }
-    }
-}
-
-/// Create a parallel iterator which simply wraps an inner type and delegates
-/// all methods inward.  The item type is explicitly specified.
-///
-/// The implementation of `IntoParallelIterator` should be added separately.
-///
-/// # Example
-///
-/// ```
-/// delegate_iterator_item!{
-///     #[doc = "Iterate items from `MyCollection` in parallel"]
-///     MyIter<'a, T, U> => slice::Iter<'a, (T, U)>: &'a (T, U),
-///     impl<'a, T: Ord + Sync, U: Sync>
-/// }
-/// ```
-macro_rules! delegate_iterator_item {
-    ($( #[ $attr:meta ] )+
-     $iter:ident < $( $i:tt ),* > => $inner:ty : $item:ty,
-     impl $( $args:tt )*
-     ) => {
-        $( #[ $attr ] )+
-        pub struct $iter $( $args )* {
-            inner: $inner,
-        }
-
-        impl $( $args )* ParallelIterator for $iter < $( $i ),* > {
+        impl $( $args )* ParallelIterator for $iter {
             type Item = $item;
 
             fn drive_unindexed<C>(self, consumer: C) -> C::Result
                 where C: UnindexedConsumer<Self::Item>
             {
                 self.inner.drive_unindexed(consumer)
             }
 
-            fn opt_len(&mut self) -> Option<usize> {
+            fn opt_len(&self) -> Option<usize> {
                 self.inner.opt_len()
             }
         }
     }
 }
 
-/// Create an indexed parallel iterator which simply wraps an inner type and
-/// delegates all methods inward.  The item type is explicitly specified.
-macro_rules! delegate_indexed_iterator_item {
-    ($( #[ $attr:meta ] )+
-     $iter:ident < $( $i:tt ),* > => $inner:ty : $item:ty,
+/// Create an indexed parallel iterator implementation which simply wraps an
+/// inner type and delegates all methods inward.  The actual struct must already
+/// be declared with an `inner` field.
+macro_rules! delegate_indexed_iterator {
+    ($iter:ty => $item:ty ,
      impl $( $args:tt )*
      ) => {
-        delegate_iterator_item!{
-            $( #[ $attr ] )+
-            $iter < $( $i ),* > => $inner : $item ,
+        delegate_iterator!{
+            $iter => $item ,
             impl $( $args )*
         }
 
-        impl $( $args )* IndexedParallelIterator for $iter < $( $i ),* > {
+        impl $( $args )* IndexedParallelIterator for $iter {
             fn drive<C>(self, consumer: C) -> C::Result
                 where C: Consumer<Self::Item>
             {
                 self.inner.drive(consumer)
             }
 
-            fn len(&mut self) -> usize {
+            fn len(&self) -> usize {
                 self.inner.len()
             }
 
             fn with_producer<CB>(self, callback: CB) -> CB::Output
                 where CB: ProducerCallback<Self::Item>
             {
                 self.inner.with_producer(callback)
             }
--- a/third_party/rust/rayon/src/iter/chain.rs
+++ b/third_party/rust/rayon/src/iter/chain.rs
@@ -1,20 +1,21 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 use std::cmp;
 use std::iter;
 use rayon_core::join;
 
 /// `Chain` is an iterator that joins `b` after `a` in one continuous iterator.
 /// This struct is created by the [`chain()`] method on [`ParallelIterator`]
 ///
 /// [`chain()`]: trait.ParallelIterator.html#method.chain
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct Chain<A, B>
     where A: ParallelIterator,
           B: ParallelIterator<Item = A::Item>
 {
     a: A,
     b: B,
 }
 
@@ -32,62 +33,62 @@ impl<A, B> ParallelIterator for Chain<A,
     where A: ParallelIterator,
           B: ParallelIterator<Item = A::Item>
 {
     type Item = A::Item;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
-        let Chain { mut a, b } = self;
+        let Chain { a, b } = self;
 
         // If we returned a value from our own `opt_len`, then the collect consumer in particular
         // will balk at being treated like an actual `UnindexedConsumer`.  But when we do know the
         // length, we can use `Consumer::split_at` instead, and this is still harmless for other
         // truly-unindexed consumers too.
         let (left, right, reducer) = if let Some(len) = a.opt_len() {
             consumer.split_at(len)
         } else {
             let reducer = consumer.to_reducer();
             (consumer.split_off_left(), consumer, reducer)
         };
 
         let (a, b) = join(|| a.drive_unindexed(left), || b.drive_unindexed(right));
         reducer.reduce(a, b)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         match (self.a.opt_len(), self.b.opt_len()) {
             (Some(a_len), Some(b_len)) => a_len.checked_add(b_len),
             _ => None,
         }
     }
 }
 
 impl<A, B> IndexedParallelIterator for Chain<A, B>
     where A: IndexedParallelIterator,
           B: IndexedParallelIterator<Item = A::Item>
 {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
-        let Chain { mut a, b } = self;
+        let Chain { a, b } = self;
         let (left, right, reducer) = consumer.split_at(a.len());
         let (a, b) = join(|| a.drive(left), || b.drive(right));
         reducer.reduce(a, b)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.a
             .len()
             .checked_add(self.b.len())
             .expect("overflow")
     }
 
-    fn with_producer<CB>(mut self, callback: CB) -> CB::Output
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         let a_len = self.a.len();
         return self.a.with_producer(CallbackA {
                                         callback: callback,
                                         a_len: a_len,
                                         b: self.b,
                                     });
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/chunks.rs
@@ -0,0 +1,208 @@
+use std::cmp::min;
+
+use ::math::div_round_up;
+use super::plumbing::*;
+use super::*;
+
+/// `Chunks` is an iterator that groups elements of an underlying iterator.
+///
+/// This struct is created by the [`chunks()`] method on [`IndexedParallelIterator`]
+///
+/// [`chunks()`]: trait.IndexedParallelIterator.html#method.chunks
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
+pub struct Chunks<I>
+    where I: IndexedParallelIterator
+{
+    size: usize,
+    i: I,
+}
+
+/// Create a new `Chunks` iterator
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I>(i: I, size: usize) -> Chunks<I>
+    where I: IndexedParallelIterator
+{
+    Chunks { i: i, size: size }
+}
+
+impl<I> ParallelIterator for Chunks<I>
+    where I: IndexedParallelIterator
+{
+    type Item = Vec<I::Item>;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Vec<I::Item>>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I> IndexedParallelIterator for Chunks<I>
+    where I: IndexedParallelIterator
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&self) -> usize {
+        div_round_up(self.i.len(), self.size)
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        let len = self.i.len();
+        return self.i.with_producer(Callback {
+            size: self.size,
+            len: len,
+            callback: callback,
+        });
+
+        struct Callback<CB> {
+            size: usize,
+            len: usize,
+            callback: CB,
+        }
+
+        impl<T, CB> ProducerCallback<T> for Callback<CB>
+            where CB: ProducerCallback<Vec<T>>
+        {
+            type Output = CB::Output;
+
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                self.callback.callback(ChunkProducer {
+                    chunk_size: self.size,
+                    len: self.len,
+                    base: base,
+                })
+            }
+        }
+    }
+}
+
+struct ChunkProducer<P>
+    where P: Producer
+{
+    chunk_size: usize,
+    len: usize,
+    base: P,
+}
+
+impl<P> Producer for ChunkProducer<P>
+    where P: Producer
+{
+    type Item = Vec<P::Item>;
+    type IntoIter = ChunkSeq<P>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        ChunkSeq {
+            chunk_size: self.chunk_size,
+            len: self.len,
+            inner: if self.len > 0 {
+                Some(self.base)
+            } else {
+                None
+            }
+        }
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let elem_index = min(index * self.chunk_size, self.len);
+        let (left, right) = self.base.split_at(elem_index);
+        (ChunkProducer {
+            chunk_size: self.chunk_size,
+            len: elem_index,
+            base: left,
+        },
+        ChunkProducer {
+            chunk_size: self.chunk_size,
+            len: self.len - elem_index,
+            base: right,
+        })
+    }
+
+    fn min_len(&self) -> usize {
+        div_round_up(self.base.min_len(), self.chunk_size)
+    }
+
+    fn max_len(&self) -> usize {
+        self.base.max_len() / self.chunk_size
+    }
+}
+
+struct ChunkSeq<P> {
+    chunk_size: usize,
+    len: usize,
+    inner: Option<P>,
+}
+
+impl<P> Iterator for ChunkSeq<P>
+    where P: Producer
+{
+    type Item = Vec<P::Item>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.inner.take() {
+            Some(producer) => if self.len > self.chunk_size {
+                let (left, right) = producer.split_at(self.chunk_size);
+                self.inner = Some(right);
+                self.len -= self.chunk_size;
+                Some(left.into_iter().collect())
+            } else {
+                debug_assert!(self.len > 0);
+                self.len = 0;
+                Some(producer.into_iter().collect())
+            },
+            _ => None
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let len = self.len();
+        (len, Some(len))
+    }
+}
+
+impl<P> ExactSizeIterator for ChunkSeq<P>
+    where P: Producer
+{
+    #[inline]
+    fn len(&self) -> usize {
+        div_round_up(self.len, self.chunk_size)
+    }
+}
+
+impl<P> DoubleEndedIterator for ChunkSeq<P>
+    where P: Producer
+{
+    fn next_back(&mut self) -> Option<Self::Item> {
+        match self.inner.take() {
+            Some(producer) => if self.len > self.chunk_size {
+                let mut size = self.len % self.chunk_size;
+                if size == 0 {
+                    size = self.chunk_size;
+                }
+                let (left, right) = producer.split_at(self.len - size);
+                self.inner = Some(left);
+                self.len -= size;
+                Some(right.into_iter().collect())
+            } else {
+                debug_assert!(self.len > 0);
+                self.len = 0;
+                Some(producer.into_iter().collect())
+            },
+            _ => None
+        }
+    }
+}
--- a/third_party/rust/rayon/src/iter/cloned.rs
+++ b/third_party/rust/rayon/src/iter/cloned.rs
@@ -1,20 +1,21 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
 use std::iter;
 
 /// `Cloned` is an iterator that clones the elements of an underlying iterator.
 ///
 /// This struct is created by the [`cloned()`] method on [`ParallelIterator`]
 ///
 /// [`cloned()`]: trait.ParallelIterator.html#method.cloned
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct Cloned<I: ParallelIterator> {
     base: I,
 }
 
 /// Create a new `Cloned` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I>(base: I) -> Cloned<I>
@@ -31,33 +32,33 @@ impl<'a, T, I> ParallelIterator for Clon
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         let consumer1 = ClonedConsumer::new(consumer);
         self.base.drive_unindexed(consumer1)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         self.base.opt_len()
     }
 }
 
 impl<'a, T, I> IndexedParallelIterator for Cloned<I>
     where I: IndexedParallelIterator<Item = &'a T>,
           T: 'a + Clone + Send + Sync
 {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         let consumer1 = ClonedConsumer::new(consumer);
         self.base.drive(consumer1)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         return self.base.with_producer(Callback { callback: callback });
 
@@ -105,16 +106,22 @@ impl<'a, T, P> Producer for ClonedProduc
     fn max_len(&self) -> usize {
         self.base.max_len()
     }
 
     fn split_at(self, index: usize) -> (Self, Self) {
         let (left, right) = self.base.split_at(index);
         (ClonedProducer { base: left }, ClonedProducer { base: right })
     }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        self.base.fold_with(ClonedFolder { base: folder }).base
+    }
 }
 
 
 /// ////////////////////////////////////////////////////////////////////////
 /// Consumer implementation
 
 struct ClonedConsumer<C> {
     base: C,
--- a/third_party/rust/rayon/src/iter/collect/consumer.rs
+++ b/third_party/rust/rayon/src/iter/collect/consumer.rs
@@ -1,9 +1,9 @@
-use super::super::internal::*;
+use super::super::plumbing::*;
 use super::super::noop::*;
 use std::ptr;
 use std::slice;
 use std::sync::atomic::{AtomicUsize, Ordering};
 
 pub struct CollectConsumer<'c, T: Send + 'c> {
     /// Tracks how many items we successfully wrote. Used to guarantee
     /// safety in the face of panics or buggy parallel iterators.
@@ -85,17 +85,17 @@ impl<'c, T: Send + 'c> Folder<T> for Col
         self.global_writes.fetch_add(self.local_writes, Ordering::Relaxed);
     }
 
     fn full(&self) -> bool {
         false
     }
 }
 
-/// Pretend to be unindexed for `special_collect_into`,
+/// Pretend to be unindexed for `special_collect_into_vec`,
 /// but we should never actually get used that way...
 impl<'c, T: Send + 'c> UnindexedConsumer<T> for CollectConsumer<'c, T> {
     fn split_off_left(&self) -> Self {
         unreachable!("CollectConsumer must be indexed!")
     }
     fn to_reducer(&self) -> Self::Reducer {
         NoopReducer
     }
--- a/third_party/rust/rayon/src/iter/collect/mod.rs
+++ b/third_party/rust/rayon/src/iter/collect/mod.rs
@@ -6,18 +6,18 @@ use std::sync::atomic::{AtomicUsize, Ord
 mod consumer;
 use self::consumer::CollectConsumer;
 use super::unzip::unzip_indexed;
 
 mod test;
 
 /// Collects the results of the exact iterator into the specified vector.
 ///
-/// This is not directly public, but called by `IndexedParallelIterator::collect_into`.
-pub fn collect_into<I, T>(mut pi: I, v: &mut Vec<T>)
+/// This is not directly public, but called by `IndexedParallelIterator::collect_into_vec`.
+pub fn collect_into_vec<I, T>(pi: I, v: &mut Vec<T>)
     where I: IndexedParallelIterator<Item = T>,
           T: Send
 {
     v.truncate(0); // clear any old data
     let mut collect = Collect::new(v, pi.len());
     pi.drive(collect.as_consumer());
     collect.complete();
 }
@@ -39,18 +39,18 @@ fn special_extend<I, T>(pi: I, len: usiz
 {
     let mut collect = Collect::new(v, len);
     pi.drive_unindexed(collect.as_consumer());
     collect.complete();
 }
 
 /// Unzips the results of the exact iterator into the specified vectors.
 ///
-/// This is not directly public, but called by `IndexedParallelIterator::unzip_into`.
-pub fn unzip_into<I, A, B>(mut pi: I, left: &mut Vec<A>, right: &mut Vec<B>)
+/// This is not directly public, but called by `IndexedParallelIterator::unzip_into_vecs`.
+pub fn unzip_into_vecs<I, A, B>(pi: I, left: &mut Vec<A>, right: &mut Vec<B>)
     where I: IndexedParallelIterator<Item = (A, B)>,
           A: Send,
           B: Send
 {
     // clear any old data
     left.truncate(0);
     right.truncate(0);
 
@@ -89,17 +89,17 @@ impl<'c, T: Send + 'c> Collect<'c, T> {
         // Get a correct borrow, then extend it for the newly added length.
         let start = self.vec.len();
         let mut slice = &mut self.vec[start..];
         slice = unsafe { slice::from_raw_parts_mut(slice.as_mut_ptr(), self.len) };
         CollectConsumer::new(&self.writes, slice)
     }
 
     /// Update the final vector length.
-    fn complete(mut self) {
+    fn complete(self) {
         unsafe {
             // Here, we assert that `v` is fully initialized. This is
             // checked by the following assert, which counts how many
             // total writes occurred. Since we know that the consumer
             // cannot have escaped from `drive` (by parametricity,
             // essentially), we know that any stores that will happen,
             // have happened. Unless some code is buggy, that means we
             // should have seen `len` total writes.
@@ -118,32 +118,40 @@ impl<'c, T: Send + 'c> Collect<'c, T> {
 /// Extend a vector with items from a parallel iterator.
 impl<T> ParallelExtend<T> for Vec<T>
     where T: Send
 {
     fn par_extend<I>(&mut self, par_iter: I)
         where I: IntoParallelIterator<Item = T>
     {
         // See the vec_collect benchmarks in rayon-demo for different strategies.
-        let mut par_iter = par_iter.into_par_iter();
+        let par_iter = par_iter.into_par_iter();
         match par_iter.opt_len() {
             Some(len) => {
                 // When Rust gets specialization, we can get here for indexed iterators
                 // without relying on `opt_len`.  Until then, `special_extend()` fakes
                 // an unindexed mode on the promise that `opt_len()` is accurate.
                 special_extend(par_iter, len, self);
             }
             None => {
                 // This works like `extend`, but `Vec::append` is more efficient.
                 let list: LinkedList<_> = par_iter
                     .fold(Vec::new, |mut vec, elem| {
                         vec.push(elem);
                         vec
                     })
-                    .collect();
+                    .map(|vec| {
+                        let mut list = LinkedList::new();
+                        list.push_back(vec);
+                        list
+                    })
+                    .reduce(LinkedList::new, |mut list1, mut list2| {
+                        list1.append(&mut list2);
+                        list1
+                    });
 
                 self.reserve(list.iter().map(Vec::len).sum());
                 for mut vec in list {
                     self.append(&mut vec);
                 }
             }
         }
     }
--- a/third_party/rust/rayon/src/iter/collect/test.rs
+++ b/third_party/rust/rayon/src/iter/collect/test.rs
@@ -1,16 +1,16 @@
 #![cfg(test)]
 #![allow(unused_assignments)]
 
 // These tests are primarily targeting "abusive" producers that will
 // try to drive the "collect consumer" incorrectly. These should
 // result in panics.
 
-use iter::internal::*;
+use iter::plumbing::*;
 use super::Collect;
 
 /// Promises to produce 2 items, but then produces 3.  Does not do any
 /// splits at all.
 #[test]
 #[should_panic(expected = "too many values")]
 fn produce_too_many_items() {
     let mut v = vec![];
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/empty.rs
@@ -0,0 +1,99 @@
+use iter::plumbing::*;
+use iter::*;
+
+use std;
+use std::fmt;
+use std::marker::PhantomData;
+
+/// Creates a parallel iterator that produces nothing.
+///
+/// This admits no parallelism on its own, but it could be used for code that
+/// deals with generic parallel iterators.
+///
+/// # Examples
+///
+/// ```
+/// use rayon::prelude::*;
+/// use rayon::iter::empty;
+///
+/// let pi = (0..1234).into_par_iter()
+///     .chain(empty())
+///     .chain(1234..10_000);
+///
+/// assert_eq!(pi.count(), 10_000);
+/// ```
+pub fn empty<T: Send>() -> Empty<T> {
+    Empty { marker: PhantomData }
+}
+
+/// Iterator adaptor for [the `empty()` function](fn.empty.html).
+pub struct Empty<T: Send> {
+    marker: PhantomData<T>,
+}
+
+impl<T: Send> Clone for Empty<T> {
+    fn clone(&self) -> Self {
+        empty()
+    }
+}
+
+impl<T: Send> fmt::Debug for Empty<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.pad("Empty")
+    }
+}
+
+impl<T: Send> ParallelIterator for Empty<T> {
+    type Item = T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        self.drive(consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        Some(0)
+    }
+}
+
+impl<T: Send> IndexedParallelIterator for Empty<T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        consumer.into_folder().complete()
+    }
+
+    fn len(&self) -> usize {
+        0
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        callback.callback(EmptyProducer(PhantomData))
+    }
+}
+
+/// Private empty producer
+struct EmptyProducer<T: Send>(PhantomData<T>);
+
+impl<T: Send> Producer for EmptyProducer<T> {
+    type Item = T;
+    type IntoIter = std::iter::Empty<T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        std::iter::empty()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        debug_assert_eq!(index, 0);
+        (self, EmptyProducer(PhantomData))
+    }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        folder
+    }
+}
--- a/third_party/rust/rayon/src/iter/enumerate.rs
+++ b/third_party/rust/rayon/src/iter/enumerate.rs
@@ -1,20 +1,21 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 use std::iter;
 use std::ops::Range;
 use std::usize;
 
 /// `Enumerate` is an iterator that returns the current count along with the element.
-/// This struct is created by the [`enumerate()`] method on [`ParallelIterator`]
+/// This struct is created by the [`enumerate()`] method on [`IndexedParallelIterator`]
 ///
-/// [`enumerate()`]: trait.ParallelIterator.html#method.enumerate
-/// [`ParallelIterator`]: trait.ParallelIterator.html
+/// [`enumerate()`]: trait.IndexedParallelIterator.html#method.enumerate
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct Enumerate<I: IndexedParallelIterator> {
     base: I,
 }
 
 /// Create a new `Enumerate` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I>(base: I) -> Enumerate<I>
@@ -29,29 +30,29 @@ impl<I> ParallelIterator for Enumerate<I
     type Item = (usize, I::Item);
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<I> IndexedParallelIterator for Enumerate<I>
     where I: IndexedParallelIterator
 {
     fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         return self.base.with_producer(Callback { callback: callback });
 
@@ -89,18 +90,24 @@ impl<P> Producer for EnumerateProducer<P
 {
     type Item = (usize, P::Item);
     type IntoIter = iter::Zip<Range<usize>, P::IntoIter>;
 
     fn into_iter(self) -> Self::IntoIter {
         // Enumerate only works for IndexedParallelIterators. Since those
         // have a max length of usize::MAX, their max index is
         // usize::MAX - 1, so the range 0..usize::MAX includes all
-        // possible indices
-        (self.offset..usize::MAX).zip(self.base.into_iter())
+        // possible indices.
+        //
+        // However, we should to use a precise end to the range, otherwise
+        // reversing the iterator may have to walk back a long ways before
+        // `Zip::next_back` can produce anything.
+        let base = self.base.into_iter();
+        let end = self.offset + base.len();
+        (self.offset..end).zip(base)
     }
 
     fn min_len(&self) -> usize {
         self.base.min_len()
     }
     fn max_len(&self) -> usize {
         self.base.max_len()
     }
--- a/third_party/rust/rayon/src/iter/extend.rs
+++ b/third_party/rust/rayon/src/iter/extend.rs
@@ -1,10 +1,11 @@
 use super::{ParallelExtend, IntoParallelIterator, ParallelIterator};
 
+use std::borrow::Cow;
 use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
 use std::hash::{BuildHasher, Hash};
 use std::collections::LinkedList;
 use std::collections::{BinaryHeap, VecDeque};
 
 /// Perform a generic `par_extend` by collecting to a `LinkedList<Vec<_>>` in
 /// parallel, then extending the collection sequentially.
 fn extend<C, I, F>(collection: &mut C, par_iter: I, reserve: F)
@@ -13,17 +14,25 @@ fn extend<C, I, F>(collection: &mut C, p
           C: Extend<I::Item>
 {
     let list = par_iter
         .into_par_iter()
         .fold(Vec::new, |mut vec, elem| {
             vec.push(elem);
             vec
         })
-        .collect();
+        .map(|vec| {
+            let mut list = LinkedList::new();
+            list.push_back(vec);
+            list
+        })
+        .reduce(LinkedList::new, |mut list1, mut list2| {
+            list1.append(&mut list2);
+            list1
+        });
 
     reserve(collection, &list);
     for vec in list {
         collection.extend(vec);
     }
 }
 
 /// Compute the total length of a `LinkedList<Vec<_>>`.
@@ -207,17 +216,25 @@ impl ParallelExtend<char> for String {
         // This is like `extend`, but `Vec<char>` is less efficient to deal
         // with than `String`, so instead collect to `LinkedList<String>`.
         let list: LinkedList<_> = par_iter
             .into_par_iter()
             .fold(String::new, |mut string, ch| {
                 string.push(ch);
                 string
             })
-            .collect();
+            .map(|vec| {
+                let mut list = LinkedList::new();
+                list.push_back(vec);
+                list
+            })
+            .reduce(LinkedList::new, |mut list1, mut list2| {
+                list1.append(&mut list2);
+                list1
+            });
 
         self.reserve(list.iter().map(String::len).sum());
         self.extend(list)
     }
 }
 
 /// Extend a string with copied characters from a parallel iterator.
 impl<'a> ParallelExtend<&'a char> for String {
@@ -241,16 +258,46 @@ impl<'a> ParallelExtend<&'a str> for Str
 impl ParallelExtend<String> for String {
     fn par_extend<I>(&mut self, par_iter: I)
         where I: IntoParallelIterator<Item = String>
     {
         extend(self, par_iter, |string, list| string.reserve(str_len(list)));
     }
 }
 
+/// Extend a string with string slices from a parallel iterator.
+impl<'a> ParallelExtend<Cow<'a, str>> for String {
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = Cow<'a, str>>
+    {
+        // This is like `extend`, but `Extend<Cow<'a, str>> for String`
+        // wasn't added until Rust 1.19, so we can't use it directly yet.
+        let list = par_iter
+            .into_par_iter()
+            .fold(Vec::new, |mut vec, elem| {
+                vec.push(elem);
+                vec
+            })
+        .map(|vec| {
+            let mut list = LinkedList::new();
+            list.push_back(vec);
+            list
+        })
+        .reduce(LinkedList::new, |mut list1, mut list2| {
+            list1.append(&mut list2);
+            list1
+        });
+
+        self.reserve(str_len(&list));
+        for vec in list {
+            self.extend(vec.iter().map(|cow| &**cow));
+        }
+    }
+}
+
 
 /// Extend a deque with items from a parallel iterator.
 impl<T> ParallelExtend<T> for VecDeque<T>
     where T: Send
 {
     fn par_extend<I>(&mut self, par_iter: I)
         where I: IntoParallelIterator<Item = T>
     {
--- a/third_party/rust/rayon/src/iter/filter.rs
+++ b/third_party/rust/rayon/src/iter/filter.rs
@@ -1,22 +1,33 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
+use std::fmt::{self, Debug};
+
 /// `Filter` takes a predicate `filter_op` and filters out elements that match.
 /// This struct is created by the [`filter()`] method on [`ParallelIterator`]
 ///
 /// [`filter()`]: trait.ParallelIterator.html#method.filter
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
 pub struct Filter<I: ParallelIterator, P> {
     base: I,
     filter_op: P,
 }
 
+impl<I: ParallelIterator + Debug, P> Debug for Filter<I, P> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Filter")
+            .field("base", &self.base)
+            .finish()
+    }
+}
+
 /// Create a new `Filter` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I, P>(base: I, filter_op: P) -> Filter<I, P>
     where I: ParallelIterator
 {
     Filter {
         base: base,
--- a/third_party/rust/rayon/src/iter/filter_map.rs
+++ b/third_party/rust/rayon/src/iter/filter_map.rs
@@ -1,22 +1,33 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
+use std::fmt::{self, Debug};
+
 /// `FilterMap` creates an iterator that uses `filter_op` to both filter and map elements.
 /// This struct is created by the [`filter_map()`] method on [`ParallelIterator`].
 ///
 /// [`filter_map()`]: trait.ParallelIterator.html#method.filter_map
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
 pub struct FilterMap<I: ParallelIterator, P> {
     base: I,
     filter_op: P,
 }
 
+impl<I: ParallelIterator + Debug, P> Debug for FilterMap<I, P> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("FilterMap")
+            .field("base", &self.base)
+            .finish()
+    }
+}
+
 /// Create a new `FilterMap` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I, P>(base: I, filter_op: P) -> FilterMap<I, P>
     where I: ParallelIterator
 {
     FilterMap {
         base: base,
--- a/third_party/rust/rayon/src/iter/find.rs
+++ b/third_party/rust/rayon/src/iter/find.rs
@@ -1,10 +1,10 @@
 use std::sync::atomic::{AtomicBool, Ordering};
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
 pub fn find<I, P>(pi: I, find_op: P) -> Option<I::Item>
     where I: ParallelIterator,
           P: Fn(&I::Item) -> bool + Sync
 {
     let found = AtomicBool::new(false);
     let consumer = FindConsumer::new(&find_op, &found);
--- a/third_party/rust/rayon/src/iter/find_first_last/mod.rs
+++ b/third_party/rust/rayon/src/iter/find_first_last/mod.rs
@@ -1,11 +1,11 @@
 use std::cell::Cell;
 use std::sync::atomic::{AtomicUsize, Ordering};
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
 #[cfg(test)]
 mod test;
 
 // The key optimization for find_first is that a consumer can stop its search if
 // some consumer to its left already found a match (and similarly for consumers
 // to the right for find_last). To make this work, all consumers need some
--- a/third_party/rust/rayon/src/iter/find_first_last/test.rs
+++ b/third_party/rust/rayon/src/iter/find_first_last/test.rs
@@ -136,14 +136,14 @@ fn find_first_octillion() {
 #[test]
 fn find_last_octillion() {
     // FIXME: If we don't use at least two threads, then we end up walking
     // through the entire iterator sequentially, without the benefit of any
     // short-circuiting.  We probably don't want testing to wait that long. ;)
     // It would be nice if `find_last` could prioritize the later splits,
     // basically flipping the `join` args, without needing indexed `rev`.
     // (or could we have an unindexed `rev`?)
-    let config = ::Configuration::new().num_threads(2);
-    let pool = ::ThreadPool::new(config).unwrap();
+    let builder = ::ThreadPoolBuilder::new().num_threads(2);
+    let pool = builder.build().unwrap();
 
     let x = pool.install(|| octillion().find_last(|_| true));
     assert_eq!(x, Some(999999999999999999999999999));
 }
--- a/third_party/rust/rayon/src/iter/flat_map.rs
+++ b/third_party/rust/rayon/src/iter/flat_map.rs
@@ -1,22 +1,33 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
+use std::fmt::{self, Debug};
+
 /// `FlatMap` maps each element to an iterator, then flattens these iterators together.
 /// This struct is created by the [`flat_map()`] method on [`ParallelIterator`]
 ///
-/// [`flap_map()`]: trait.ParallelIterator.html#method.flat_map
+/// [`flat_map()`]: trait.ParallelIterator.html#method.flat_map
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
 pub struct FlatMap<I: ParallelIterator, F> {
     base: I,
     map_op: F,
 }
 
+impl<I: ParallelIterator + Debug, F> Debug for FlatMap<I, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("FlatMap")
+            .field("base", &self.base)
+            .finish()
+    }
+}
+
 /// Create a new `FlatMap` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I, F>(base: I, map_op: F) -> FlatMap<I, F>
     where I: ParallelIterator
 {
     FlatMap {
         base: base,
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/flatten.rs
@@ -0,0 +1,39 @@
+use super::plumbing::*;
+use super::*;
+
+/// `Flatten` turns each element to an iterator, then flattens these iterators
+/// together. This struct is created by the [`flatten()`] method on
+/// [`ParallelIterator`].
+///
+/// [`flatten()`]: trait.ParallelIterator.html#method.flatten
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
+pub struct Flatten<I: ParallelIterator> {
+    base: I,
+}
+
+/// Create a new `Flatten` iterator.
+///
+/// NB: Not part of the public API.
+pub fn new<I, PI>(base: I) -> Flatten<I>
+    where I: ParallelIterator<Item = PI>,
+          PI: IntoParallelIterator + Send
+{
+    Flatten {
+        base: base,
+    }
+}
+
+impl<I, PI> ParallelIterator for Flatten<I>
+    where I: ParallelIterator<Item = PI>,
+          PI: IntoParallelIterator + Send
+{
+    type Item = PI::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        self.base.flat_map(|x| x).drive_unindexed(consumer)
+    }
+}
--- a/third_party/rust/rayon/src/iter/fold.rs
+++ b/third_party/rust/rayon/src/iter/fold.rs
@@ -1,11 +1,13 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
+use std::fmt::{self, Debug};
+
 pub fn fold<U, I, ID, F>(base: I, identity: ID, fold_op: F) -> Fold<I, ID, F>
     where I: ParallelIterator,
           F: Fn(U, I::Item) -> U + Sync + Send,
           ID: Fn() -> U + Sync + Send,
           U: Send
 {
     Fold {
         base: base,
@@ -15,22 +17,31 @@ pub fn fold<U, I, ID, F>(base: I, identi
 }
 
 /// `Fold` is an iterator that applies a function over an iterator producing a single value.
 /// This struct is created by the [`fold()`] method on [`ParallelIterator`]
 ///
 /// [`fold()`]: trait.ParallelIterator.html#method.fold
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
 pub struct Fold<I, ID, F> {
     base: I,
     identity: ID,
     fold_op: F,
 }
 
+impl<I: ParallelIterator + Debug, ID, F> Debug for Fold<I, ID, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Fold")
+            .field("base", &self.base)
+            .finish()
+    }
+}
+
 impl<U, I, ID, F> ParallelIterator for Fold<I, ID, F>
     where I: ParallelIterator,
           F: Fn(U, I::Item) -> U + Sync + Send,
           ID: Fn() -> U + Sync + Send,
           U: Send
 {
     type Item = U;
 
@@ -140,22 +151,32 @@ pub fn fold_with<U, I, F>(base: I, item:
 }
 
 /// `FoldWith` is an iterator that applies a function over an iterator producing a single value.
 /// This struct is created by the [`fold_with()`] method on [`ParallelIterator`]
 ///
 /// [`fold_with()`]: trait.ParallelIterator.html#method.fold_with
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
 pub struct FoldWith<I, U, F> {
     base: I,
     item: U,
     fold_op: F,
 }
 
+impl<I: ParallelIterator + Debug, U: Debug, F> Debug for FoldWith<I, U, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("FoldWith")
+            .field("base", &self.base)
+            .field("item", &self.item)
+            .finish()
+    }
+}
+
 impl<U, I, F> ParallelIterator for FoldWith<I, U, F>
     where I: ParallelIterator,
           F: Fn(U, I::Item) -> U + Sync + Send,
           U: Send + Clone
 {
     type Item = U;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
--- a/third_party/rust/rayon/src/iter/for_each.rs
+++ b/third_party/rust/rayon/src/iter/for_each.rs
@@ -1,10 +1,10 @@
 use super::ParallelIterator;
-use super::internal::*;
+use super::plumbing::*;
 use super::noop::*;
 
 pub fn for_each<I, F, T>(pi: I, op: &F)
     where I: ParallelIterator<Item = T>,
           F: Fn(T) + Sync,
           T: Send
 {
     let consumer = ForEachConsumer { op: op };
@@ -40,16 +40,21 @@ impl<'f, F, T> Folder<T> for ForEachCons
 {
     type Result = ();
 
     fn consume(self, item: T) -> Self {
         (self.op)(item);
         self
     }
 
+    fn consume_iter<I>(self, iter: I) -> Self where I: IntoIterator<Item=T> {
+        iter.into_iter().fold((), |_, item| (self.op)(item));
+        self
+    }
+
     fn complete(self) {}
 
     fn full(&self) -> bool {
         false
     }
 }
 
 impl<'f, F, T> UnindexedConsumer<T> for ForEachConsumer<'f, F>
--- a/third_party/rust/rayon/src/iter/from_par_iter.rs
+++ b/third_party/rust/rayon/src/iter/from_par_iter.rs
@@ -1,9 +1,9 @@
-use super::{FromParallelIterator, IntoParallelIterator, ParallelExtend};
+use super::{FromParallelIterator, IntoParallelIterator, ParallelIterator, ParallelExtend};
 
 use std::borrow::Cow;
 use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
 use std::hash::{BuildHasher, Hash};
 use std::collections::LinkedList;
 use std::collections::{BinaryHeap, VecDeque};
 
 
@@ -149,24 +149,56 @@ impl<'a> FromParallelIterator<&'a str> f
 impl FromParallelIterator<String> for String {
     fn from_par_iter<I>(par_iter: I) -> Self
         where I: IntoParallelIterator<Item = String>
     {
         collect_extended(par_iter)
     }
 }
 
+/// Collect string slices from a parallel iterator into a string.
+impl<'a> FromParallelIterator<Cow<'a, str>> for String {
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = Cow<'a, str>>
+    {
+        collect_extended(par_iter)
+    }
+}
+
 /// Collect an arbitrary `Cow` collection.
 ///
 /// Note, the standard library only has `FromIterator` for `Cow<'a, str>` and
 /// `Cow<'a, [T]>`, because no one thought to add a blanket implementation
 /// before it was stabilized.
 impl<'a, C: ?Sized, T> FromParallelIterator<T> for Cow<'a, C>
     where C: ToOwned,
           C::Owned: FromParallelIterator<T>,
           T: Send
 {
     fn from_par_iter<I>(par_iter: I) -> Self
         where I: IntoParallelIterator<Item = T>
     {
         Cow::Owned(C::Owned::from_par_iter(par_iter))
     }
 }
+
+/// Collapses all unit items from a parallel iterator into one.
+///
+/// This is more useful when combined with higher-level abstractions, like
+/// collecting to a `Result<(), E>` where you only care about errors:
+///
+/// ```
+/// use std::io::*;
+/// use rayon::prelude::*;
+///
+/// let data = vec![1, 2, 3, 4, 5];
+/// let res: Result<()> = data.par_iter()
+///     .map(|x| writeln!(stdout(), "{}", x))
+///     .collect();
+/// assert!(res.is_ok());
+/// ```
+impl FromParallelIterator<()> for () {
+    fn from_par_iter<I>(par_iter: I) -> Self
+        where I: IntoParallelIterator<Item = ()>
+    {
+        par_iter.into_par_iter().for_each(|()| {})
+    }
+}
--- a/third_party/rust/rayon/src/iter/inspect.rs
+++ b/third_party/rust/rayon/src/iter/inspect.rs
@@ -1,27 +1,37 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
+use std::fmt::{self, Debug};
 use std::iter;
 
 
 /// `Inspect` is an iterator that calls a function with a reference to each
 /// element before yielding it.
 ///
 /// This struct is created by the [`inspect()`] method on [`ParallelIterator`]
 ///
 /// [`inspect()`]: trait.ParallelIterator.html#method.inspect
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
 pub struct Inspect<I: ParallelIterator, F> {
     base: I,
     inspect_op: F,
 }
 
+impl<I: ParallelIterator + Debug, F> Debug for Inspect<I, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Inspect")
+            .field("base", &self.base)
+            .finish()
+    }
+}
+
 /// Create a new `Inspect` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I, F>(base: I, inspect_op: F) -> Inspect<I, F>
     where I: ParallelIterator
 {
     Inspect {
         base: base,
@@ -37,33 +47,33 @@ impl<I, F> ParallelIterator for Inspect<
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         let consumer1 = InspectConsumer::new(consumer, &self.inspect_op);
         self.base.drive_unindexed(consumer1)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         self.base.opt_len()
     }
 }
 
 impl<I, F> IndexedParallelIterator for Inspect<I, F>
     where I: IndexedParallelIterator,
           F: Fn(&I::Item) + Sync + Send
 {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         let consumer1 = InspectConsumer::new(consumer, &self.inspect_op);
         self.base.drive(consumer1)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         return self.base
                    .with_producer(Callback {
@@ -127,16 +137,23 @@ impl<'f, P, F> Producer for InspectProdu
              base: left,
              inspect_op: self.inspect_op,
          },
          InspectProducer {
              base: right,
              inspect_op: self.inspect_op,
          })
     }
+
+    fn fold_with<G>(self, folder: G) -> G
+        where G: Folder<Self::Item>
+    {
+        let folder1 = InspectFolder { base: folder, inspect_op: self.inspect_op };
+        self.base.fold_with(folder1).base
+    }
 }
 
 
 /// ////////////////////////////////////////////////////////////////////////
 /// Consumer implementation
 
 struct InspectConsumer<'f, C, F: 'f> {
     base: C,
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/interleave.rs
@@ -0,0 +1,309 @@
+use super::plumbing::*;
+use super::*;
+use std::cmp;
+use std::iter::Fuse;
+
+/// `Interleave` is an iterator that interleaves elements of iterators
+/// `i` and `j` in one continuous iterator. This struct is created by
+/// the [`interleave()`] method on [`IndexedParallelIterator`]
+///
+/// [`interleave()`]: trait.IndexedParallelIterator.html#method.interleave
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
+pub struct Interleave<I, J>
+    where I: IndexedParallelIterator,
+          J: IndexedParallelIterator<Item = I::Item>
+{
+    i: I,
+    j: J,
+}
+
+/// Create a new `Interleave` iterator
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, J>(i: I, j: J) -> Interleave<I, J>
+    where I: IndexedParallelIterator,
+          J: IndexedParallelIterator<Item = I::Item>
+{
+    Interleave { i: i, j: j }
+}
+
+impl<I, J> ParallelIterator for Interleave<I, J>
+    where I: IndexedParallelIterator,
+          J: IndexedParallelIterator<Item = I::Item>
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: Consumer<I::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I, J> IndexedParallelIterator for Interleave<I, J>
+    where I: IndexedParallelIterator,
+          J: IndexedParallelIterator<Item = I::Item>,
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&self) -> usize {
+        self.i
+            .len()
+            .checked_add(self.j.len())
+            .expect("overflow")
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        let (i_len, j_len) = (self.i.len(), self.j.len());
+        return self.i.with_producer(CallbackI {
+            callback: callback,
+            i_len: i_len,
+            j_len: j_len,
+            i_next: false,
+            j: self.j
+        });
+
+        struct CallbackI<CB, J> {
+            callback: CB,
+            i_len: usize,
+            j_len: usize,
+            i_next: bool,
+            j: J
+        }
+
+        impl<CB, J> ProducerCallback<J::Item> for CallbackI<CB, J>
+            where J: IndexedParallelIterator,
+                  CB: ProducerCallback<J::Item>
+        {
+            type Output = CB::Output;
+
+            fn callback<I>(self, i_producer: I) -> Self::Output
+                where I: Producer<Item = J::Item>
+            {
+                self.j.with_producer(CallbackJ {
+                    i_producer: i_producer,
+                    i_len: self.i_len,
+                    j_len: self.j_len,
+                    i_next: self.i_next,
+                    callback: self.callback
+                })
+            }
+        }
+
+        struct CallbackJ<CB, I> {
+            callback: CB,
+            i_len: usize,
+            j_len: usize,
+            i_next: bool,
+            i_producer: I
+        }
+
+        impl<CB, I> ProducerCallback<I::Item> for CallbackJ<CB, I>
+            where I: Producer,
+                  CB: ProducerCallback<I::Item>
+        {
+            type Output = CB::Output;
+
+            fn callback<J>(self, j_producer: J) -> Self::Output
+                where J: Producer<Item = I::Item>
+            {
+                let producer = InterleaveProducer::new(self.i_producer, j_producer, self.i_len, self.j_len, self.i_next);
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+pub struct InterleaveProducer<I, J>
+    where I: Producer,
+          J: Producer<Item = I::Item>
+{
+    i: I,
+    j: J,
+    i_len: usize,
+    j_len: usize,
+    i_next: bool,
+}
+
+impl<I, J> InterleaveProducer<I, J>
+    where I: Producer,
+          J: Producer<Item = I::Item>
+{
+    fn new(i: I, j: J, i_len: usize, j_len: usize, i_next: bool) -> InterleaveProducer<I, J> {
+        InterleaveProducer { i: i, j: j, i_len: i_len, j_len: j_len, i_next: i_next }
+    }
+}
+
+impl<I, J> Producer for InterleaveProducer<I, J>
+    where I: Producer,
+          J: Producer<Item = I::Item>
+{
+    type Item = I::Item;
+    type IntoIter = InterleaveSeq<I::IntoIter, J::IntoIter>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        InterleaveSeq {
+            i: self.i.into_iter().fuse(),
+            j: self.j.into_iter().fuse(),
+            i_next: self.i_next,
+        }
+    }
+
+    fn min_len(&self) -> usize {
+        cmp::max(self.i.min_len(), self.j.min_len())
+    }
+
+    fn max_len(&self) -> usize {
+        cmp::min(self.i.max_len(), self.j.max_len())
+    }
+
+    /// We know 0 < index <= self.i_len + self.j_len
+    ///
+    /// Find a, b satisfying:
+    ///
+    ///  (1) 0 < a <= self.i_len
+    ///  (2) 0 < b <= self.j_len
+    ///  (3) a + b == index
+    ///
+    /// For even splits, set a = b = index/2.
+    /// For odd splits, set a = (index/2)+1, b = index/2, if `i`
+    /// should yield the next element, otherwise, if `j` should yield
+    /// the next element, set a = index/2 and b = (index/2)+1
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let even = index%2 == 0;
+        let idx = index >> 1;
+
+        let odd_offset = |flag| if flag { 0 } else { 1 };
+
+        // desired split
+        let (i_idx, j_idx) = (idx + odd_offset(even || self.i_next),
+                              idx + odd_offset(even || !self.i_next));
+
+        let (i_split, j_split) = if self.i_len >= i_idx && self.j_len >= j_idx {
+            (i_idx, j_idx)
+        } else if self.i_len >= i_idx {
+            // j too short
+            (index - self.j_len, self.j_len)
+        } else {
+            // i too short
+            (self.i_len, index - self.i_len)
+        };
+
+        let trailing_i_next = even == self.i_next;
+        let (i_left, i_right) = self.i.split_at(i_split);
+        let (j_left, j_right) = self.j.split_at(j_split);
+
+        (InterleaveProducer::new(
+            i_left,
+            j_left,
+            i_split,
+            j_split,
+            self.i_next
+        ), InterleaveProducer::new(
+            i_right,
+            j_right,
+            self.i_len - i_split,
+            self.j_len - j_split,
+            trailing_i_next
+        ))
+    }
+}
+
+
+/// Wrapper for Interleave to implement DoubleEndedIterator and
+/// ExactSizeIterator.
+///
+/// This iterator is fused.
+pub struct InterleaveSeq<I, J> {
+    i: Fuse<I>,
+    j: Fuse<J>,
+
+    /// Flag to control which iterator should provide the next element. When
+    /// `false` then `i` produces the next element, otherwise `j` produces the
+    /// next element.
+    i_next: bool
+}
+
+/// Iterator implementation for InterleaveSeq. This implementation is
+/// taken more or less verbatim from itertools. It is replicated here
+/// (instead of calling itertools directly), because we also need to
+/// implement `DoubledEndedIterator` and `ExactSizeIterator`.
+impl<I, J> Iterator for InterleaveSeq<I, J>
+    where I: Iterator,
+          J: Iterator<Item = I::Item>
+{
+    type Item = I::Item;
+
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        self.i_next = !self.i_next;
+        if self.i_next {
+            match self.i.next() {
+                None => self.j.next(),
+                r => r
+            }
+        } else {
+            match self.j.next() {
+                None => self.i.next(),
+                r => r
+            }
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let (ih, jh) = (self.i.size_hint(), self.j.size_hint());
+        let min = ih.0.saturating_add(jh.0);
+        let max = match (ih.1, jh.1) {
+            (Some(x), Some(y)) => x.checked_add(y),
+            _=> None
+        };
+        (min, max)
+    }
+}
+
+// The implementation for DoubleEndedIterator requires
+// ExactSizeIterator to provide `next_back()`. The last element will
+// come from the iterator that runs out last (ie has the most elements
+// in it). If the iterators have the same number of elements, then the
+// last iterator will provide the last element.
+impl<I, J> DoubleEndedIterator for InterleaveSeq<I, J>
+    where I: DoubleEndedIterator + ExactSizeIterator,
+          J: DoubleEndedIterator<Item = I::Item> + ExactSizeIterator<Item = I::Item>
+{
+    #[inline]
+    fn next_back(&mut self) -> Option<I::Item> {
+        if self.i.len() == self.j.len() {
+            if self.i_next {
+                self.i.next_back()
+            } else {
+                self.j.next_back()
+            }
+        } else if self.i.len() < self.j.len() {
+            self.j.next_back()
+        } else {
+            self.i.next_back()
+        }
+    }
+}
+
+impl<I, J> ExactSizeIterator for InterleaveSeq<I, J>
+    where I: ExactSizeIterator,
+          J: ExactSizeIterator<Item = I::Item>
+{
+    #[inline]
+    fn len(&self) -> usize {
+        self.i.len() + self.j.len()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/interleave_shortest.rs
@@ -0,0 +1,79 @@
+use super::plumbing::*;
+use super::*;
+
+/// `InterleaveShortest` is an iterator that works similarly to
+/// `Interleave`, but this version stops returning elements once one
+/// of the iterators run out.
+///
+/// This struct is created by the [`interleave_shortest()`] method on
+/// [`IndexedParallelIterator`].
+///
+/// [`interleave_shortest()`]: trait.IndexedParallelIterator.html#method.interleave_shortest
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
+pub struct InterleaveShortest<I, J>
+    where I: IndexedParallelIterator,
+          J: IndexedParallelIterator<Item = I::Item>
+{
+    interleave: Interleave<Take<I>, Take<J>>
+}
+
+/// Create a new `InterleaveShortest` iterator
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, J>(i: I, j: J) -> InterleaveShortest<I, J>
+    where I: IndexedParallelIterator,
+          J: IndexedParallelIterator<Item = I::Item>
+{
+    InterleaveShortest {
+        interleave: if i.len() <= j.len() {
+            // take equal lengths from both iterators
+            let n = i.len();
+            i.take(n).interleave(j.take(n))
+        } else {
+            // take one extra item from the first iterator
+            let n = j.len();
+            i.take(n + 1).interleave(j.take(n))
+        }
+    }
+}
+
+
+impl<I, J> ParallelIterator for InterleaveShortest<I, J>
+    where I: IndexedParallelIterator,
+          J: IndexedParallelIterator<Item = I::Item>
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: Consumer<I::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+impl<I, J> IndexedParallelIterator for InterleaveShortest<I, J>
+    where I: IndexedParallelIterator,
+          J: IndexedParallelIterator<Item = I::Item>,
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn len(&self) -> usize {
+        self.interleave.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        self.interleave.with_producer(callback)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/intersperse.rs
@@ -0,0 +1,373 @@
+use super::plumbing::*;
+use super::*;
+use std::cell::Cell;
+use std::iter::Fuse;
+
+/// `Intersperse` is an iterator that inserts a particular item between each
+/// item of the adapted iterator.  This struct is created by the
+/// [`intersperse()`] method on [`ParallelIterator`]
+///
+/// [`intersperse()`]: trait.ParallelIterator.html#method.intersperse
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct Intersperse<I>
+    where I: ParallelIterator,
+          I::Item: Clone
+{
+    base: I,
+    item: I::Item,
+}
+
+/// Create a new `Intersperse` iterator
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I>(base: I, item: I::Item) -> Intersperse<I>
+    where I: ParallelIterator,
+          I::Item: Clone
+{
+    Intersperse { base: base, item: item }
+}
+
+impl<I> ParallelIterator for Intersperse<I>
+    where I: ParallelIterator,
+          I::Item: Clone + Send
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<I::Item>
+    {
+        let consumer1 = IntersperseConsumer::new(consumer, self.item);
+        self.base.drive_unindexed(consumer1)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        match self.base.opt_len() {
+            None => None,
+            Some(0) => Some(0),
+            Some(len) => len.checked_add(len - 1),
+        }
+    }
+}
+
+impl<I> IndexedParallelIterator for Intersperse<I>
+    where I: IndexedParallelIterator,
+          I::Item: Clone + Send
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        let consumer1 = IntersperseConsumer::new(consumer, self.item);
+        self.base.drive(consumer1)
+    }
+
+    fn len(&self) -> usize {
+        let len = self.base.len();
+        if len > 0 {
+            len.checked_add(len - 1).expect("overflow")
+        } else {
+            0
+        }
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        let len = self.len();
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           item: self.item,
+                                           len: len,
+                                       });
+
+        struct Callback<CB, T> {
+            callback: CB,
+            item: T,
+            len: usize,
+        }
+
+        impl<T, CB> ProducerCallback<T> for Callback<CB, T>
+            where CB: ProducerCallback<T>,
+                  T: Clone + Send
+        {
+            type Output = CB::Output;
+
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let producer = IntersperseProducer::new(base, self.item, self.len);
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+
+struct IntersperseProducer<P>
+    where P: Producer
+{
+    base: P,
+    item: P::Item,
+    len: usize,
+    clone_first: bool,
+}
+
+impl<P> IntersperseProducer<P>
+    where P: Producer
+{
+    fn new(base: P, item: P::Item, len: usize) -> Self {
+        IntersperseProducer {
+            base: base,
+            item: item,
+            len: len,
+            clone_first: false,
+        }
+    }
+}
+
+impl<P> Producer for IntersperseProducer<P>
+    where P: Producer,
+          P::Item: Clone + Send
+{
+    type Item = P::Item;
+    type IntoIter = IntersperseIter<P::IntoIter>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        IntersperseIter {
+            base: self.base.into_iter().fuse(),
+            item: self.item,
+            clone_first: self.len > 0 && self.clone_first,
+
+            // If there's more than one item, then even lengths end the opposite
+            // of how they started with respect to interspersed clones.
+            clone_last: self.len > 1 && ((self.len & 1 == 0) ^ self.clone_first),
+        }
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        debug_assert!(index <= self.len);
+
+        // The left needs half of the items from the base producer, and the
+        // other half will be our interspersed item.  If we're not leading with
+        // a cloned item, then we need to round up the base number of items,
+        // otherwise round down.
+        let base_index = (index + !self.clone_first as usize) / 2;
+        let (left_base, right_base) = self.base.split_at(base_index);
+
+        let left = IntersperseProducer {
+            base: left_base,
+            item: self.item.clone(),
+            len: index,
+            clone_first: self.clone_first,
+        };
+
+        let right = IntersperseProducer {
+            base: right_base,
+            item: self.item,
+            len: self.len - index,
+
+            // If the index is odd, the right side toggles `clone_first`.
+            clone_first: (index & 1 == 1) ^ self.clone_first,
+        };
+
+        (left, right)
+    }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        let folder1 = IntersperseFolder {
+            base: folder,
+            item: self.item,
+            clone_first: self.clone_first,
+        };
+        self.base.fold_with(folder1).base
+    }
+}
+
+
+struct IntersperseIter<I>
+    where I: Iterator
+{
+    base: Fuse<I>,
+    item: I::Item,
+    clone_first: bool,
+    clone_last: bool,
+}
+
+impl<I> Iterator for IntersperseIter<I>
+    where I: DoubleEndedIterator + ExactSizeIterator,
+          I::Item: Clone
+{
+    type Item = I::Item;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.clone_first {
+            self.clone_first = false;
+            Some(self.item.clone())
+        } else if let next @ Some(_) = self.base.next() {
+            // If there are any items left, we'll need another clone in front.
+            self.clone_first = self.base.len() != 0;
+            next
+        } else if self.clone_last {
+            self.clone_last = false;
+            Some(self.item.clone())
+        } else {
+            None
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let len = self.len();
+        (len, Some(len))
+    }
+}
+
+impl<I> DoubleEndedIterator for IntersperseIter<I>
+    where I: DoubleEndedIterator + ExactSizeIterator,
+          I::Item: Clone
+{
+    fn next_back(&mut self) -> Option<Self::Item> {
+        if self.clone_last {
+            self.clone_last = false;
+            Some(self.item.clone())
+        } else if let next_back @ Some(_) = self.base.next_back() {
+            // If there are any items left, we'll need another clone in back.
+            self.clone_last = self.base.len() != 0;
+            next_back
+        } else if self.clone_first {
+            self.clone_first = false;
+            Some(self.item.clone())
+        } else {
+            None
+        }
+    }
+}
+
+impl<I> ExactSizeIterator for IntersperseIter<I>
+    where I: DoubleEndedIterator + ExactSizeIterator,
+          I::Item: Clone
+{
+    fn len(&self) -> usize {
+        let len = self.base.len();
+        len + len.saturating_sub(1)
+            + self.clone_first as usize
+            + self.clone_last as usize
+    }
+}
+
+
+struct IntersperseConsumer<C, T> {
+    base: C,
+    item: T,
+    clone_first: Cell<bool>,
+}
+
+impl<C, T> IntersperseConsumer<C, T>
+    where C: Consumer<T>
+{
+    fn new(base: C, item: T) -> Self {
+        IntersperseConsumer {
+            base: base,
+            item: item,
+            clone_first: false.into(),
+        }
+    }
+}
+
+impl<C, T> Consumer<T> for IntersperseConsumer<C, T>
+    where C: Consumer<T>,
+          T: Clone + Send
+{
+    type Folder = IntersperseFolder<C::Folder, T>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(mut self, index: usize) -> (Self, Self, Self::Reducer) {
+        // We'll feed twice as many items to the base consumer, except if we're
+        // not currently leading with a cloned item, then it's one less.
+        let base_index = index + index.saturating_sub(!self.clone_first.get() as usize);
+        let (left, right, reducer) = self.base.split_at(base_index);
+
+        let right = IntersperseConsumer {
+            base: right,
+            item: self.item.clone(),
+            clone_first: true.into(),
+        };
+        self.base = left;
+        (self, right, reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        IntersperseFolder {
+            base: self.base.into_folder(),
+            item: self.item,
+            clone_first: self.clone_first.get(),
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<C, T> UnindexedConsumer<T> for IntersperseConsumer<C, T>
+    where C: UnindexedConsumer<T>,
+          T: Clone + Send
+{
+    fn split_off_left(&self) -> Self {
+        let left = IntersperseConsumer {
+            base: self.base.split_off_left(),
+            item: self.item.clone(),
+            clone_first: self.clone_first.clone(),
+        };
+        self.clone_first.set(true);
+        left
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct IntersperseFolder<C, T> {
+    base: C,
+    item: T,
+    clone_first: bool,
+}
+
+impl<C, T> Folder<T> for IntersperseFolder<C, T>
+    where C: Folder<T>,
+          T: Clone
+{
+    type Result = C::Result;
+
+    fn consume(mut self, item: T) -> Self {
+        if self.clone_first {
+            self.base = self.base.consume(self.item.clone());
+            if self.base.full() {
+                return self;
+            }
+        } else {
+            self.clone_first = true;
+        }
+        self.base = self.base.consume(item);
+        self
+    }
+
+    fn complete(self) -> C::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
--- a/third_party/rust/rayon/src/iter/len.rs
+++ b/third_party/rust/rayon/src/iter/len.rs
@@ -1,18 +1,19 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 use std::cmp;
 
 /// `MinLen` is an iterator that imposes a minimum length on iterator splits.
 /// This struct is created by the [`min_len()`] method on [`IndexedParallelIterator`]
 ///
 /// [`min_len()`]: trait.IndexedParallelIterator.html#method.min_len
 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct MinLen<I: IndexedParallelIterator> {
     base: I,
     min: usize,
 }
 
 /// Create a new `MinLen` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
@@ -31,29 +32,29 @@ impl<I> ParallelIterator for MinLen<I>
     type Item = I::Item;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<I> IndexedParallelIterator for MinLen<I>
     where I: IndexedParallelIterator
 {
     fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         return self.base.with_producer(Callback {
                                            callback: callback,
@@ -114,25 +115,32 @@ impl<P> Producer for MinLenProducer<P>
              base: left,
              min: self.min,
          },
          MinLenProducer {
              base: right,
              min: self.min,
          })
     }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        self.base.fold_with(folder)
+    }
 }
 
 
 /// `MaxLen` is an iterator that imposes a maximum length on iterator splits.
 /// This struct is created by the [`max_len()`] method on [`IndexedParallelIterator`]
 ///
 /// [`max_len()`]: trait.IndexedParallelIterator.html#method.max_len
 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct MaxLen<I: IndexedParallelIterator> {
     base: I,
     max: usize,
 }
 
 /// Create a new `MaxLen` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
@@ -151,29 +159,29 @@ impl<I> ParallelIterator for MaxLen<I>
     type Item = I::Item;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<I> IndexedParallelIterator for MaxLen<I>
     where I: IndexedParallelIterator
 {
     fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         return self.base.with_producer(Callback {
                                            callback: callback,
@@ -234,9 +242,15 @@ impl<P> Producer for MaxLenProducer<P>
              base: left,
              max: self.max,
          },
          MaxLenProducer {
              base: right,
              max: self.max,
          })
     }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        self.base.fold_with(folder)
+    }
 }
--- a/third_party/rust/rayon/src/iter/map.rs
+++ b/third_party/rust/rayon/src/iter/map.rs
@@ -1,26 +1,36 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
+use std::fmt::{self, Debug};
 use std::iter;
 
 
 /// `Map` is an iterator that transforms the elements of an underlying iterator.
 ///
 /// This struct is created by the [`map()`] method on [`ParallelIterator`]
 ///
 /// [`map()`]: trait.ParallelIterator.html#method.map
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
 pub struct Map<I: ParallelIterator, F> {
     base: I,
     map_op: F,
 }
 
+impl<I: ParallelIterator + Debug, F> Debug for Map<I, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Map")
+            .field("base", &self.base)
+            .finish()
+    }
+}
+
 /// Create a new `Map` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I, F>(base: I, map_op: F) -> Map<I, F>
     where I: ParallelIterator
 {
     Map {
         base: base,
@@ -37,34 +47,34 @@ impl<I, F, R> ParallelIterator for Map<I
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         let consumer1 = MapConsumer::new(consumer, &self.map_op);
         self.base.drive_unindexed(consumer1)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         self.base.opt_len()
     }
 }
 
 impl<I, F, R> IndexedParallelIterator for Map<I, F>
     where I: IndexedParallelIterator,
           F: Fn(I::Item) -> R + Sync + Send,
           R: Send
 {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         let consumer1 = MapConsumer::new(consumer, &self.map_op);
         self.base.drive(consumer1)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         return self.base.with_producer(Callback {
                                            callback: callback,
@@ -128,16 +138,23 @@ impl<'f, P, F, R> Producer for MapProduc
              base: left,
              map_op: self.map_op,
          },
          MapProducer {
              base: right,
              map_op: self.map_op,
          })
     }
+
+    fn fold_with<G>(self, folder: G) -> G
+        where G: Folder<Self::Item>
+    {
+        let folder1 = MapFolder { base: folder, map_op: self.map_op };
+        self.base.fold_with(folder1).base
+    }
 }
 
 
 /// ////////////////////////////////////////////////////////////////////////
 /// Consumer implementation
 
 struct MapConsumer<'f, C, F: 'f> {
     base: C,
--- a/third_party/rust/rayon/src/iter/map_with.rs
+++ b/third_party/rust/rayon/src/iter/map_with.rs
@@ -1,25 +1,37 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
+use std::fmt::{self, Debug};
+
 
 /// `MapWith` is an iterator that transforms the elements of an underlying iterator.
 ///
 /// This struct is created by the [`map_with()`] method on [`ParallelIterator`]
 ///
 /// [`map_with()`]: trait.ParallelIterator.html#method.map_with
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
 pub struct MapWith<I: ParallelIterator, T, F> {
     base: I,
     item: T,
     map_op: F,
 }
 
+impl<I: ParallelIterator + Debug, T: Debug, F> Debug for MapWith<I, T, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("MapWith")
+            .field("base", &self.base)
+            .field("item", &self.item)
+            .finish()
+    }
+}
+
 /// Create a new `MapWith` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I, T, F>(base: I, item: T, map_op: F) -> MapWith<I, T, F>
     where I: ParallelIterator
 {
     MapWith {
         base: base,
@@ -38,17 +50,17 @@ impl<I, T, F, R> ParallelIterator for Ma
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         let consumer1 = MapWithConsumer::new(consumer, self.item, &self.map_op);
         self.base.drive_unindexed(consumer1)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         self.base.opt_len()
     }
 }
 
 impl<I, T, F, R> IndexedParallelIterator for MapWith<I, T, F>
     where I: IndexedParallelIterator,
           T: Send + Clone,
           F: Fn(&mut T, I::Item) -> R + Sync + Send,
@@ -56,17 +68,17 @@ impl<I, T, F, R> IndexedParallelIterator
 {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         let consumer1 = MapWithConsumer::new(consumer, self.item, &self.map_op);
         self.base.drive(consumer1)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         return self.base.with_producer(Callback {
                                            callback: callback,
@@ -142,16 +154,27 @@ impl<'f, P, U, F, R> Producer for MapWit
              map_op: self.map_op,
          },
          MapWithProducer {
              base: right,
              item: self.item,
              map_op: self.map_op,
          })
     }
+
+    fn fold_with<G>(self, folder: G) -> G
+        where G: Folder<Self::Item>
+    {
+        let folder1 = MapWithFolder {
+            base: folder,
+            item: self.item,
+            map_op: self.map_op,
+        };
+        self.base.fold_with(folder1).base
+    }
 }
 
 struct MapWithIter<'f, I, U, F: 'f> {
     base: I,
     item: U,
     map_op: &'f F,
 }
 
--- a/third_party/rust/rayon/src/iter/mod.rs
+++ b/third_party/rust/rayon/src/iter/mod.rs
@@ -1,239 +1,618 @@
-//! The `ParallelIterator` module makes it easy to write parallel
-//! programs using an iterator-style interface. To get access to all
-//! the methods you want, the easiest is to write `use
-//! rayon::prelude::*;` at the top of your module, which will import
-//! the various traits and methods you need.
+//! Traits for writing parallel programs using an iterator-style interface
+//!
+//! You will rarely need to interact with this module directly unless you have
+//! need to name one of the iterator types.
+//!
+//! Parallel iterators make it easy to write iterator-like chains that
+//! execute in parallel: typically all you have to do is convert the
+//! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
+//! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
+//! example, to compute the sum of the squares of a sequence of
+//! integers, one might write:
+//!
+//! ```rust
+//! use rayon::prelude::*;
+//! fn sum_of_squares(input: &[i32]) -> i32 {
+//!     input.par_iter()
+//!          .map(|i| i * i)
+//!          .sum()
+//! }
+//! ```
+//!
+//! Or, to increment all the integers in a slice, you could write:
+//!
+//! ```rust
+//! use rayon::prelude::*;
+//! fn increment_all(input: &mut [i32]) {
+//!     input.par_iter_mut()
+//!          .for_each(|p| *p += 1);
+//! }
+//! ```
 //!
-//! The submodules of this module mostly just contain implementaton
-//! details of little interest to an end-user. If you'd like to read
-//! the code itself, the `internal` module and `README.md` file are a
-//! good place to start.
+//! To use parallel iterators, first import the traits by adding
+//! something like `use rayon::prelude::*` to your module. You can
+//! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
+//! parallel iterator. Like a [regular iterator][], parallel
+//! iterators work by first constructing a computation and then
+//! executing it.
+//!
+//! In addition to `par_iter()` and friends, some types offer other
+//! ways to create (or consume) parallel iterators:
+//!
+//! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
+//!   `par_windows`, as well as various parallel sorting
+//!   operations. See [the `ParallelSlice` trait] for the full list.
+//! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
+//!   See [the `ParallelString` trait] for the full list.
+//! - Various collections offer [`par_extend`], which grows a
+//!   collection given a parallel iterator. (If you don't have a
+//!   collection to extend, you can use [`collect()`] to create a new
+//!   one from scratch.)
+//!
+//! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
+//! [the `ParallelString` trait]: ../str/trait.ParallelString.html
+//! [`par_extend`]: trait.ParallelExtend.html
+//! [`collect()`]: trait.ParallelIterator.html#method.collect
+//!
+//! To see the full range of methods available on parallel iterators,
+//! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
+//! traits.
+//!
+//! If you'd like to offer parallel iterators for your own collector,
+//! or write your own combinator, then check out the [plumbing]
+//! module.
+//!
+//! [regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html
+//! [`ParallelIterator`]: trait.ParallelIterator.html
+//! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
+//! [plumbing]: plumbing
 
+pub use either::Either;
 use std::cmp::{self, Ordering};
 use std::iter::{Sum, Product};
 use std::ops::Fn;
-use self::internal::*;
+use self::plumbing::*;
 
 // There is a method to the madness here:
 //
 // - Most of these modules are private but expose certain types to the end-user
 //   (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
 //   public API surface of the `ParallelIterator` traits.
 // - In **this** module, those public types are always used unprefixed, which forces
 //   us to add a `pub use` and helps identify if we missed anything.
 // - In contrast, items that appear **only** in the body of a method,
 //   e.g. `find::find()`, are always used **prefixed**, so that they
 //   can be readily distinguished.
 
 mod find;
 mod find_first_last;
 mod chain;
 pub use self::chain::Chain;
+mod chunks;
+pub use self::chunks::Chunks;
 mod collect;
 mod enumerate;
 pub use self::enumerate::Enumerate;
 mod filter;
 pub use self::filter::Filter;
 mod filter_map;
 pub use self::filter_map::FilterMap;
 mod flat_map;
 pub use self::flat_map::FlatMap;
+mod flatten;
+pub use self::flatten::Flatten;
 mod from_par_iter;
-pub mod internal;
+pub mod plumbing;
 mod for_each;
 mod fold;
 pub use self::fold::{Fold, FoldWith};
 mod reduce;
 mod skip;
 pub use self::skip::Skip;
 mod splitter;
 pub use self::splitter::{split, Split};
 mod take;
 pub use self::take::Take;
 mod map;
 pub use self::map::Map;
 mod map_with;
 pub use self::map_with::MapWith;
 mod zip;
 pub use self::zip::Zip;
+mod zip_eq;
+pub use self::zip_eq::ZipEq;
+mod interleave;
+pub use self::interleave::Interleave;
+mod interleave_shortest;
+pub use self::interleave_shortest::InterleaveShortest;
+mod intersperse;
+pub use self::intersperse::Intersperse;
+mod update;
+pub use self::update::Update;
+
 mod noop;
 mod rev;
 pub use self::rev::Rev;
 mod len;
 pub use self::len::{MinLen, MaxLen};
 mod sum;
 mod product;
 mod cloned;
 pub use self::cloned::Cloned;
 mod inspect;
 pub use self::inspect::Inspect;
 mod while_some;
 pub use self::while_some::WhileSome;
 mod extend;
 mod unzip;
+mod repeat;
+pub use self::repeat::{Repeat, repeat};
+pub use self::repeat::{RepeatN, repeatn};
+
+mod empty;
+pub use self::empty::{Empty, empty};
+mod once;
+pub use self::once::{Once, once};
 
 #[cfg(test)]
 mod test;
 
-/// Represents a value of one of two possible types.
-pub enum Either<L, R> {
-    Left(L),
-    Right(R)
-}
+/// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
+///
+/// By implementing `IntoParallelIterator` for a type, you define how it will
+/// transformed into an iterator. This is a parallel version of the standard
+/// library's [`std::iter::IntoIterator`] trait.
+///
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+/// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
+pub trait IntoParallelIterator {
+    /// The parallel iterator type that will be created.
+    type Iter: ParallelIterator<Item = Self::Item>;
 
-pub trait IntoParallelIterator {
-    type Iter: ParallelIterator<Item = Self::Item>;
+    /// The type of item that the parallel iterator will produce.
     type Item: Send;
 
+    /// Converts `self` into a parallel iterator.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// println!("counting in parallel:");
+    /// (0..100).into_par_iter()
+    ///     .for_each(|i| println!("{}", i));
+    /// ```
+    ///
+    /// This conversion is often implicit for arguments to methods like [`zip`].
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
+    /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
+    /// ```
+    ///
+    /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
     fn into_par_iter(self) -> Self::Iter;
 }
 
+/// `IntoParallelRefIterator` implements the conversion to a
+/// [`ParallelIterator`], providing shared references to the data.
+///
+/// This is a parallel version of the `iter()` method
+/// defined by various collections.
+///
+/// This trait is automatically implemented
+/// `for I where &I: IntoParallelIterator`. In most cases, users
+/// will want to implement [`IntoParallelIterator`] rather than implement
+/// this trait directly.
+///
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+/// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
 pub trait IntoParallelRefIterator<'data> {
+    /// The type of the parallel iterator that will be returned.
     type Iter: ParallelIterator<Item = Self::Item>;
+
+    /// The type of item that the parallel iterator will produce.
+    /// This will typically be an `&'data T` reference type.
     type Item: Send + 'data;
 
+    /// Converts `self` into a parallel iterator.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let v: Vec<_> = (0..100).collect();
+    /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
+    ///
+    /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
+    /// // producing the exact same references.
+    /// assert!(v.par_iter().zip(&v)
+    ///          .all(|(a, b)| std::ptr::eq(a, b)));
+    /// ```
     fn par_iter(&'data self) -> Self::Iter;
 }
 
 impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
     where &'data I: IntoParallelIterator
 {
     type Iter = <&'data I as IntoParallelIterator>::Iter;
     type Item = <&'data I as IntoParallelIterator>::Item;
 
     fn par_iter(&'data self) -> Self::Iter {
         self.into_par_iter()
     }
 }
 
+
+/// `IntoParallelRefMutIterator` implements the conversion to a
+/// [`ParallelIterator`], providing mutable references to the data.
+///
+/// This is a parallel version of the `iter_mut()` method
+/// defined by various collections.
+///
+/// This trait is automatically implemented
+/// `for I where &mut I: IntoParallelIterator`. In most cases, users
+/// will want to implement [`IntoParallelIterator`] rather than implement
+/// this trait directly.
+///
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+/// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
 pub trait IntoParallelRefMutIterator<'data> {
+    /// The type of iterator that will be created.
     type Iter: ParallelIterator<Item = Self::Item>;
+
+    /// The type of item that will be produced; this is typically an
+    /// `&'data mut T` reference.
     type Item: Send + 'data;
 
+    /// Creates the parallel iterator from `self`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut v = vec![0usize; 5];
+    /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
+    /// assert_eq!(v, [0, 1, 2, 3, 4]);
+    /// ```
     fn par_iter_mut(&'data mut self) -> Self::Iter;
 }
 
 impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
     where &'data mut I: IntoParallelIterator
 {
     type Iter = <&'data mut I as IntoParallelIterator>::Iter;
     type Item = <&'data mut I as IntoParallelIterator>::Item;
 
     fn par_iter_mut(&'data mut self) -> Self::Iter {
         self.into_par_iter()
     }
 }
 
-/// The `ParallelIterator` interface.
+/// Parallel version of the standard iterator trait.
+///
+/// The combinators on this trait are available on **all** parallel
+/// iterators.  Additional methods can be found on the
+/// [`IndexedParallelIterator`] trait: those methods are only
+/// available for parallel iterators where the number of items is
+/// known in advance (so, e.g., after invoking `filter`, those methods
+/// become unavailable).
+///
+/// For examples of using parallel iterators, see [the docs on the
+/// `iter` module][iter].
+///
+/// [iter]: index.html
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
 pub trait ParallelIterator: Sized + Send {
+    /// The type of item that this parallel iterator produces.
+    /// For example, if you use the [`for_each`] method, this is the type of
+    /// item that your closure will be invoked with.
+    ///
+    /// [`for_each`]: #method.for_each
     type Item: Send;
 
     /// Executes `OP` on each item produced by the iterator, in parallel.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
+    /// ```
     fn for_each<OP>(self, op: OP)
         where OP: Fn(Self::Item) + Sync + Send
     {
         for_each::for_each(self, &op)
     }
 
     /// Executes `OP` on the given `init` value with each item produced by
     /// the iterator, in parallel.
     ///
     /// The `init` value will be cloned only as needed to be paired with
     /// the group of items in each rayon job.  It does not require the type
     /// to be `Sync`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::sync::mpsc::channel;
+    /// use rayon::prelude::*;
+    ///
+    /// let (sender, receiver) = channel();
+    ///
+    /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
+    ///
+    /// let mut res: Vec<_> = receiver.iter().collect();
+    ///
+    /// res.sort();
+    ///
+    /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
+    /// ```
     fn for_each_with<OP, T>(self, init: T, op: OP)
         where OP: Fn(&mut T, Self::Item) + Sync + Send,
               T: Send + Clone
     {
         self.map_with(init, op).for_each(|()| ())
     }
 
     /// Counts the number of items in this parallel iterator.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let count = (0..100).into_par_iter().count();
+    ///
+    /// assert_eq!(count, 100);
+    /// ```
     fn count(self) -> usize {
         self.map(|_| 1).sum()
     }
 
     /// Applies `map_op` to each item of this iterator, producing a new
     /// iterator with the results.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
+    ///
+    /// let doubles: Vec<_> = par_iter.collect();
+    ///
+    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
+    /// ```
     fn map<F, R>(self, map_op: F) -> Map<Self, F>
         where F: Fn(Self::Item) -> R + Sync + Send,
               R: Send
     {
         map::new(self, map_op)
     }
 
     /// Applies `map_op` to the given `init` value with each item of this
     /// iterator, producing a new iterator with the results.
     ///
     /// The `init` value will be cloned only as needed to be paired with
     /// the group of items in each rayon job.  It does not require the type
     /// to be `Sync`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::sync::mpsc::channel;
+    /// use rayon::prelude::*;
+    ///
+    /// let (sender, receiver) = channel();
+    ///
+    /// let a: Vec<_> = (0..5)
+    ///                 .into_par_iter()            // iterating over i32
+    ///                 .map_with(sender, |s, x| {
+    ///                     s.send(x).unwrap();     // sending i32 values through the channel
+    ///                     x                       // returning i32
+    ///                 })
+    ///                 .collect();                 // collecting the returned values into a vector
+    ///
+    /// let mut b: Vec<_> = receiver.iter()         // iterating over the values in the channel
+    ///                             .collect();     // and collecting them
+    /// b.sort();
+    ///
+    /// assert_eq!(a, b);
+    /// ```
     fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
         where F: Fn(&mut T, Self::Item) -> R + Sync + Send,
               T: Send + Clone,
               R: Send
     {
         map_with::new(self, init, map_op)
     }
 
     /// Creates an iterator which clones all of its elements.  This may be
     /// useful when you have an iterator over `&T`, but you need `T`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 2, 3];
+    ///
+    /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
+    ///
+    /// // cloned is the same as .map(|&x| x), for integers
+    /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
+    ///
+    /// assert_eq!(v_cloned, vec![1, 2, 3]);
+    /// assert_eq!(v_map, vec![1, 2, 3]);
+    /// ```
     fn cloned<'a, T>(self) -> Cloned<Self>
         where T: 'a + Clone + Send,
               Self: ParallelIterator<Item = &'a T>
     {
         cloned::new(self)
     }
 
     /// Applies `inspect_op` to a reference to each item of this iterator,
     /// producing a new iterator passing through the original items.  This is
     /// often useful for debugging to see what's happening in iterator stages.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 4, 2, 3];
+    ///
+    /// // this iterator sequence is complex.
+    /// let sum = a.par_iter()
+    ///             .cloned()
+    ///             .filter(|&x| x % 2 == 0)
+    ///             .reduce(|| 0, |sum, i| sum + i);
+    ///
+    /// println!("{}", sum);
+    ///
+    /// // let's add some inspect() calls to investigate what's happening
+    /// let sum = a.par_iter()
+    ///             .cloned()
+    ///             .inspect(|x| println!("about to filter: {}", x))
+    ///             .filter(|&x| x % 2 == 0)
+    ///             .inspect(|x| println!("made it through filter: {}", x))
+    ///             .reduce(|| 0, |sum, i| sum + i);
+    ///
+    /// println!("{}", sum);
+    /// ```
     fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
         where OP: Fn(&Self::Item) + Sync + Send
     {
         inspect::new(self, inspect_op)
     }
 
+    /// Mutates each item of this iterator before yielding it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
+    ///
+    /// let doubles: Vec<_> = par_iter.collect();
+    ///
+    /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
+    /// ```
+    fn update<F>(self, update_op: F) -> Update<Self, F>
+        where F: Fn(&mut Self::Item) + Sync + Send
+    {
+        update::new(self, update_op)
+    }
+
     /// Applies `filter_op` to each item of this iterator, producing a new
     /// iterator with only the items that gave `true` results.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
+    ///
+    /// let even_numbers: Vec<_> = par_iter.collect();
+    ///
+    /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
+    /// ```
     fn filter<P>(self, filter_op: P) -> Filter<Self, P>
         where P: Fn(&Self::Item) -> bool + Sync + Send
     {
         filter::new(self, filter_op)
     }
 
     /// Applies `filter_op` to each item of this iterator to get an `Option`,
     /// producing a new iterator with only the items from `Some` results.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut par_iter = (0..10).into_par_iter()
+    ///                         .filter_map(|x| {
+    ///                             if x % 2 == 0 { Some(x * 3) }
+    ///                             else { None }
+    ///                         });
+    ///
+    /// let even_numbers: Vec<_> = par_iter.collect();
+    ///
+    /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
+    /// ```
     fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
         where P: Fn(Self::Item) -> Option<R> + Sync + Send,
               R: Send
     {
         filter_map::new(self, filter_op)
     }
 
     /// Applies `map_op` to each item of this iterator to get nested iterators,
     /// producing a new iterator that flattens these back into one.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
+    ///
+    /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
+    ///
+    /// let vec: Vec<_> = par_iter.collect();
+    ///
+    /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
+    /// ```
     fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
         where F: Fn(Self::Item) -> PI + Sync + Send,
               PI: IntoParallelIterator
     {
         flat_map::new(self, map_op)
     }
 
+    /// An adaptor that flattens iterable `Item`s into one large iterator
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
+    /// let y: Vec<_> = x.into_par_iter().flatten().collect();
+    ///
+    /// assert_eq!(y, vec![1, 2, 3, 4]);
+    /// ```
+    fn flatten(self) -> Flatten<Self>
+        where Self::Item: IntoParallelIterator
+    {
+        flatten::new(self)
+    }
+
     /// Reduces the items in the iterator into one item using `op`.
     /// The argument `identity` should be a closure that can produce
     /// "identity" value which may be inserted into the sequence as
     /// needed to create opportunities for parallel execution. So, for
     /// example, if you are doing a summation, then `identity()` ought
     /// to produce something that represents the zero for your type
     /// (but consider just calling `sum()` in that case).
     ///
-    /// Example:
+    /// # Examples
     ///
     /// ```
     /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
     /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
     /// // where the first/second elements are summed separately.
     /// use rayon::prelude::*;
     /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
     ///            .par_iter()        // iterating over &(i32, i32)
@@ -260,16 +639,28 @@ pub trait ParallelIterator: Sized + Send
     /// Reduces the items in the iterator into one item using `op`.
     /// If the iterator is empty, `None` is returned; otherwise,
     /// `Some` is returned.
     ///
     /// This version of `reduce` is simple but somewhat less
     /// efficient. If possible, it is better to call `reduce()`, which
     /// requires an identity element.
     ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
+    ///            .par_iter()        // iterating over &(i32, i32)
+    ///            .cloned()          // iterating over (i32, i32)
+    ///            .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
+    ///            .unwrap();
+    /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
+    /// ```
+    ///
     /// **Note:** unlike a sequential `fold` operation, the order in
     /// which `op` will be applied to reduce the result is not fully
     /// specified. So `op` should be [associative] or else the results
     /// will be non-deterministic.
     ///
     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
     fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
         where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send
@@ -355,40 +746,44 @@ pub trait ParallelIterator: Sized + Send
     ///
     /// Fold makes sense if you have some operation where it is
     /// cheaper to groups of elements at a time. For example, imagine
     /// collecting characters into a string. If you were going to use
     /// map/reduce, you might try this:
     ///
     /// ```
     /// use rayon::prelude::*;
+    ///
     /// let s =
     ///     ['a', 'b', 'c', 'd', 'e']
     ///     .par_iter()
     ///     .map(|c: &char| format!("{}", c))
     ///     .reduce(|| String::new(),
     ///             |mut a: String, b: String| { a.push_str(&b); a });
+    ///
     /// assert_eq!(s, "abcde");
     /// ```
     ///
     /// Because reduce produces the same type of element as its input,
     /// you have to first map each character into a string, and then
     /// you can reduce them. This means we create one string per
     /// element in ou iterator -- not so great. Using `fold`, we can
     /// do this instead:
     ///
     /// ```
     /// use rayon::prelude::*;
+    ///
     /// let s =
     ///     ['a', 'b', 'c', 'd', 'e']
     ///     .par_iter()
     ///     .fold(|| String::new(),
     ///             |mut s: String, c: &char| { s.push(*c); s })
     ///     .reduce(|| String::new(),
     ///             |mut a: String, b: String| { a.push_str(&b); a });
+    ///
     /// assert_eq!(s, "abcde");
     /// ```
     ///
     /// Now `fold` will process groups of our characters at a time,
     /// and we only make one string per group. We should wind up with
     /// some small-ish number of strings roughly proportional to the
     /// number of CPUs you have (it will ultimately depend on how busy
     /// your processors are). Note that we still need to do a reduce
@@ -401,117 +796,192 @@ pub trait ParallelIterator: Sized + Send
     /// ### Combining fold with other operations
     ///
     /// You can combine `fold` with `reduce` if you want to produce a
     /// single value. This is then roughly equivalent to a map/reduce
     /// combination in effect:
     ///
     /// ```
     /// use rayon::prelude::*;
-    /// let bytes = 0..22_u8; // series of u8 bytes
+    ///
+    /// let bytes = 0..22_u8;
     /// let sum = bytes.into_par_iter()
     ///                .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
     ///                .sum::<u32>();
+    ///
     /// assert_eq!(sum, (0..22).sum()); // compare to sequential
     /// ```
     fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
         where F: Fn(T, Self::Item) -> T + Sync + Send,
               ID: Fn() -> T + Sync + Send,
               T: Send
     {
         fold::fold(self, identity, fold_op)
     }
 
     /// Applies `fold_op` to the given `init` value with each item of this
     /// iterator, finally producing the value for further use.
     ///
     /// This works essentially like `fold(|| init.clone(), fold_op)`, except
     /// it doesn't require the `init` type to be `Sync`, nor any other form
     /// of added synchronization.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let bytes = 0..22_u8;
+    /// let sum = bytes.into_par_iter()
+    ///                .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
+    ///                .sum::<u32>();
+    ///
+    /// assert_eq!(sum, (0..22).sum()); // compare to sequential
+    /// ```
     fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
         where F: Fn(T, Self::Item) -> T + Sync + Send,
               T: Send + Clone
     {
         fold::fold_with(self, init, fold_op)
     }
 
     /// Sums up the items in the iterator.
     ///
     /// Note that the order in items will be reduced is not specified,
-    /// so if the `+` operator is not truly [associative] (as is the
+    /// so if the `+` operator is not truly [associative] \(as is the
     /// case for floating point numbers), then the results are not
     /// fully deterministic.
     ///
     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
     ///
     /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
     /// except that the type of `0` and the `+` operation may vary
     /// depending on the type of value being produced.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 5, 7];
+    ///
+    /// let sum: i32 = a.par_iter().sum();
+    ///
+    /// assert_eq!(sum, 13);
+    /// ```
     fn sum<S>(self) -> S
-        where S: Send + Sum<Self::Item> + Sum
+        where S: Send + Sum<Self::Item> + Sum<S>
     {
         sum::sum(self)
     }
 
     /// Multiplies all the items in the iterator.
     ///
     /// Note that the order in items will be reduced is not specified,
-    /// so if the `*` operator is not truly [associative] (as is the
+    /// so if the `*` operator is not truly [associative] \(as is the
     /// case for floating point numbers), then the results are not
     /// fully deterministic.
     ///
     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
     ///
     /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
     /// except that the type of `1` and the `*` operation may vary
     /// depending on the type of value being produced.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// fn factorial(n: u32) -> u32 {
+    ///    (1..n+1).into_par_iter().product()
+    /// }
+    ///
+    /// assert_eq!(factorial(0), 1);
+    /// assert_eq!(factorial(1), 1);
+    /// assert_eq!(factorial(5), 120);
+    /// ```
     fn product<P>(self) -> P
-        where P: Send + Product<Self::Item> + Product
+        where P: Send + Product<Self::Item> + Product<P>
     {
         product::product(self)
     }
 
     /// Computes the minimum of all the items in the iterator. If the
     /// iterator is empty, `None` is returned; otherwise, `Some(min)`
     /// is returned.
     ///
     /// Note that the order in which the items will be reduced is not
     /// specified, so if the `Ord` impl is not truly associative, then
     /// the results are not deterministic.
     ///
     /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [45, 74, 32];
+    ///
+    /// assert_eq!(a.par_iter().min(), Some(&32));
+    ///
+    /// let b: [i32; 0] = [];
+    ///
+    /// assert_eq!(b.par_iter().min(), None);
+    /// ```
     fn min(self) -> Option<Self::Item>
         where Self::Item: Ord
     {
         self.reduce_with(cmp::min)
     }
 
     /// Computes the minimum of all the items in the iterator with respect to
     /// the given comparison function. If the iterator is empty, `None` is
     /// returned; otherwise, `Some(min)` is returned.
     ///
     /// Note that the order in which the items will be reduced is not
     /// specified, so if the comparison function is not associative, then
     /// the results are not deterministic.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [-3_i32, 77, 53, 240, -1];
+    ///
+    /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
+    /// ```
     fn min_by<F>(self, f: F) -> Option<Self::Item>
         where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering
     {
         self.reduce_with(|a, b| match f(&a, &b) {
                              Ordering::Greater => b,
                              _ => a,
                          })
     }
 
     /// Computes the item that yields the minimum value for the given
     /// function. If the iterator is empty, `None` is returned;
     /// otherwise, `Some(item)` is returned.
     ///
     /// Note that the order in which the items will be reduced is not
     /// specified, so if the `Ord` impl is not truly associative, then
     /// the results are not deterministic.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
+    ///
+    /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
+    /// ```
     fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
         where K: Ord + Send,
               F: Sync + Send + Fn(&Self::Item) -> K
     {
         self.map(|x| (f(&x), x))
             .min_by(|a, b| (a.0).cmp(&b.0))
             .map(|(_, x)| x)
     }
@@ -520,55 +990,104 @@ pub trait ParallelIterator: Sized + Send
     /// iterator is empty, `None` is returned; otherwise, `Some(max)`
     /// is returned.
     ///
     /// Note that the order in which the items will be reduced is not
     /// specified, so if the `Ord` impl is not truly associative, then
     /// the results are not deterministic.
     ///
     /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [45, 74, 32];
+    ///
+    /// assert_eq!(a.par_iter().max(), Some(&74));
+    ///
+    /// let b: [i32; 0] = [];
+    ///
+    /// assert_eq!(b.par_iter().max(), None);
+    /// ```
     fn max(self) -> Option<Self::Item>
         where Self::Item: Ord
     {
         self.reduce_with(cmp::max)
     }
 
     /// Computes the maximum of all the items in the iterator with respect to
     /// the given comparison function. If the iterator is empty, `None` is
     /// returned; otherwise, `Some(min)` is returned.
     ///
     /// Note that the order in which the items will be reduced is not
     /// specified, so if the comparison function is not associative, then
     /// the results are not deterministic.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [-3_i32, 77, 53, 240, -1];
+    ///
+    /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
+    /// ```
     fn max_by<F>(self, f: F) -> Option<Self::Item>
         where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering
     {
         self.reduce_with(|a, b| match f(&a, &b) {
                              Ordering::Greater => a,
                              _ => b,
                          })
     }
 
     /// Computes the item that yields the maximum value for the given
     /// function. If the iterator is empty, `None` is returned;
     /// otherwise, `Some(item)` is returned.
     ///
     /// Note that the order in which the items will be reduced is not
     /// specified, so if the `Ord` impl is not truly associative, then
     /// the results are not deterministic.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
+    ///
+    /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
+    /// ```
     fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
         where K: Ord + Send,
               F: Sync + Send + Fn(&Self::Item) -> K
     {
         self.map(|x| (f(&x), x))
             .max_by(|a, b| (a.0).cmp(&b.0))
             .map(|(_, x)| x)
     }
 
     /// Takes two iterators and creates a new iterator over both.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [0, 1, 2];
+    /// let b = [9, 8, 7];
+    ///
+    /// let par_iter = a.par_iter().chain(b.par_iter());
+    ///
+    /// let chained: Vec<_> = par_iter.cloned().collect();
+    ///
+    /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
+    /// ```
     fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
         where C: IntoParallelIterator<Item = Self::Item>
     {
         chain::new(self, chain.into_par_iter())
     }
 
     /// Searches for **some** item in the parallel iterator that
     /// matches the given predicate and returns it. This operation
@@ -576,16 +1095,28 @@ pub trait ParallelIterator: Sized + Send
     /// the item returned may not be the **first** one in the parallel
     /// sequence which matches, since we search the entire sequence in parallel.
     ///
     /// Once a match is found, we will attempt to stop processing
     /// the rest of the items in the iterator as soon as possible
     /// (just as `find` stops iterating once a match is found).
     ///
     /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 2, 3, 3];
+    ///
+    /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
+    ///
+    /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
+    /// ```
     fn find_any<P>(self, predicate: P) -> Option<Self::Item>
         where P: Fn(&Self::Item) -> bool + Sync + Send
     {
         find::find(self, predicate)
     }
 
     /// Searches for the sequentially **first** item in the parallel iterator
     /// that matches the given predicate and returns it.
@@ -593,32 +1124,56 @@ pub trait ParallelIterator: Sized + Send
     /// Once a match is found, all attempts to the right of the match
     /// will be stopped, while attempts to the left must continue in case
     /// an earlier match is found.
     ///
     /// Note that not all parallel iterators have a useful order, much like
     /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
     /// just want the first match that discovered anywhere in the iterator,
     /// `find_any` is a better choice.
+    ///
+    /// # Exmaples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 2, 3, 3];
+    ///
+    /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
+    ///
+    /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
+    /// ```
     fn find_first<P>(self, predicate: P) -> Option<Self::Item>
         where P: Fn(&Self::Item) -> bool + Sync + Send
     {
         find_first_last::find_first(self, predicate)
     }
 
     /// Searches for the sequentially **last** item in the parallel iterator
     /// that matches the given predicate and returns it.
     ///
     /// Once a match is found, all attempts to the left of the match
     /// will be stopped, while attempts to the right must continue in case
     /// a later match is found.
     ///
     /// Note that not all parallel iterators have a useful order, much like
     /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
     /// order doesn't actually matter to you, `find_any` is a better choice.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 2, 3, 3];
+    ///
+    /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
+    ///
+    /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
+    /// ```
     fn find_last<P>(self, predicate: P) -> Option<Self::Item>
         where P: Fn(&Self::Item) -> bool + Sync + Send
     {
         find_first_last::find_last(self, predicate)
     }
 
     #[doc(hidden)]
     #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
@@ -629,60 +1184,129 @@ pub trait ParallelIterator: Sized + Send
         self.find_any(predicate)
     }
 
     /// Searches for **some** item in the parallel iterator that
     /// matches the given predicate, and if so returns true.  Once
     /// a match is found, we'll attempt to stop process the rest
     /// of the items.  Proving that there's no match, returning false,
     /// does require visiting every item.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [0, 12, 3, 4, 0, 23, 0];
+    ///
+    /// let is_valid = a.par_iter().any(|&x| x > 10);
+    ///
+    /// assert!(is_valid);
+    /// ```
     fn any<P>(self, predicate: P) -> bool
         where P: Fn(Self::Item) -> bool + Sync + Send
     {
         self.map(predicate).find_any(|&p| p).is_some()
     }
 
     /// Tests that every item in the parallel iterator matches the given
     /// predicate, and if so returns true.  If a counter-example is found,
     /// we'll attempt to stop processing more items, then return false.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [0, 12, 3, 4, 0, 23, 0];
+    ///
+    /// let is_valid = a.par_iter().all(|&x| x > 10);
+    ///
+    /// assert!(!is_valid);
+    /// ```
     fn all<P>(self, predicate: P) -> bool
         where P: Fn(Self::Item) -> bool + Sync + Send
     {
         self.map(predicate).find_any(|&p| !p).is_none()
     }
 
     /// Creates an iterator over the `Some` items of this iterator, halting
     /// as soon as any `None` is found.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// use std::sync::atomic::{AtomicUsize, Ordering};
+    ///
+    /// let counter = AtomicUsize::new(0);
+    /// let value = (0_i32..2048)
+    ///     .into_par_iter()
+    ///     .map(|x| {
+    ///              counter.fetch_add(1, Ordering::SeqCst);
+    ///              if x < 1024 { Some(x) } else { None }
+    ///          })
+    ///     .while_some()
+    ///     .max();
+    ///
+    /// assert!(value < Some(1024));
+    /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
+    /// ```
     fn while_some<T>(self) -> WhileSome<Self>
         where Self: ParallelIterator<Item = Option<T>>,
               T: Send
     {
         while_some::new(self)
     }
 
     /// Create a fresh collection containing all the element produced
     /// by this parallel iterator.
     ///
-    /// You may prefer to use `collect_into()`, which allocates more
+    /// You may prefer to use `collect_into_vec()`, which allocates more
     /// efficiently with precise knowledge of how many elements the
     /// iterator contains, and even allows you to reuse an existing
     /// vector's backing store rather than allocating a fresh vector.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
+    ///
+    /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
+    ///
+    /// assert_eq!(sync_vec, async_vec);
+    /// ```
     fn collect<C>(self) -> C
         where C: FromParallelIterator<Self::Item>
     {
         C::from_par_iter(self)
     }
 
     /// Unzips the items of a parallel iterator into a pair of arbitrary
     /// `ParallelExtend` containers.
     ///
-    /// You may prefer to use `unzip_into()`, which allocates more
+    /// You may prefer to use `unzip_into_vecs()`, which allocates more
     /// efficiently with precise knowledge of how many elements the
     /// iterator contains, and even allows you to reuse existing
     /// vectors' backing stores rather than allocating fresh vectors.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
+    ///
+    /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
+    ///
+    /// assert_eq!(left, [0, 1, 2, 3]);
+    /// assert_eq!(right, [1, 2, 3, 4]);
+    /// ```
     fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
         where Self: ParallelIterator<Item = (A, B)>,
               FromA: Default + Send + ParallelExtend<A>,
               FromB: Default + Send + ParallelExtend<B>,
               A: Send,
               B: Send
     {
         unzip::unzip(self)
@@ -691,37 +1315,85 @@ pub trait ParallelIterator: Sized + Send
     /// Partitions the items of a parallel iterator into a pair of arbitrary
     /// `ParallelExtend` containers.  Items for which the `predicate` returns
     /// true go into the first container, and the rest go into the second.
     ///
     /// Note: unlike the standard `Iterator::partition`, this allows distinct
     /// collection types for the left and right items.  This is more flexible,
     /// but may require new type annotations when converting sequential code
     /// that used type inferrence assuming the two were the same.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
+    ///
+    /// assert_eq!(left, [0, 2, 4, 6]);
+    /// assert_eq!(right, [1, 3, 5, 7]);
+    /// ```
     fn partition<A, B, P>(self, predicate: P) -> (A, B)
         where A: Default + Send + ParallelExtend<Self::Item>,
               B: Default + Send + ParallelExtend<Self::Item>,
               P: Fn(&Self::Item) -> bool + Sync + Send
     {
         unzip::partition(self, predicate)
     }
 
     /// Partitions and maps the items of a parallel iterator into a pair of
     /// arbitrary `ParallelExtend` containers.  `Either::Left` items go into
     /// the first container, and `Either::Right` items go into the second.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// use rayon::iter::Either;
+    ///
+    /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
+    ///                                             .partition_map(|x| {
+    ///                                                 if x % 2 == 0 {
+    ///                                                     Either::Left(x * 4)
+    ///                                                 } else {
+    ///                                                     Either::Right(x * 3)
+    ///                                                 }
+    ///                                             });
+    ///
+    /// assert_eq!(left, [0, 8, 16, 24]);
+    /// assert_eq!(right, [3, 9, 15, 21]);
+    /// ```
     fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
         where A: Default + Send + ParallelExtend<L>,
               B: Default + Send + ParallelExtend<R>,
               P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
               L: Send,
               R: Send
     {
         unzip::partition_map(self, predicate)
     }
 
+    /// Intersperses clones of an element between items of this iterator.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let x = vec![1, 2, 3];
+    /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
+    ///
+    /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
+    /// ```
+    fn intersperse(self, element: Self::Item) -> Intersperse<Self>
+        where Self::Item: Clone
+    {
+        intersperse::new(self, element)
+    }
+
     /// Internal method used to define the behavior of this parallel
     /// iterator. You should not need to call this directly.
     ///
     /// This method causes the iterator `self` to start producing
     /// items and to feed them to the consumer `consumer` one by one.
     /// It may split the consumer before doing so to create the
     /// opportunity to produce in parallel.
     ///
@@ -741,104 +1413,268 @@ pub trait ParallelIterator: Sized + Send
     /// use the (indexed) `Consumer` methods when driving a consumer, such
     /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
     /// other `UnindexedConsumer` methods -- or returning an inaccurate
     /// value -- may result in panics.
     ///
     /// This method is currently used to optimize `collect` for want
     /// of true Rust specialization; it may be removed when
     /// specialization is stable.
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         None
     }
 }
 
 impl<T: ParallelIterator> IntoParallelIterator for T {
     type Iter = T;
     type Item = T::Item;
 
     fn into_par_iter(self) -> T {
         self
     }
 }
 
 /// An iterator that supports "random access" to its data, meaning
 /// that you can split it at arbitrary indices and draw data from
 /// those points.
+///
+/// **Note:** Not implemented for `u64` and `i64` ranges
 pub trait IndexedParallelIterator: ParallelIterator {
     /// Collects the results of the iterator into the specified
     /// vector. The vector is always truncated before execution
     /// begins. If possible, reusing the vector across calls can lead
     /// to better performance since it reuses the same backing buffer.
-    fn collect_into(self, target: &mut Vec<Self::Item>) {
-        collect::collect_into(self, target);
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// // any prior data will be truncated
+    /// let mut vec = vec![-1, -2, -3];
+    ///
+    /// (0..5).into_par_iter()
+    ///     .collect_into_vec(&mut vec);
+    ///
+    /// assert_eq!(vec, [0, 1, 2, 3, 4]);
+    /// ```
+    fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
+        collect::collect_into_vec(self, target);
     }
 
     /// Unzips the results of the iterator into the specified
     /// vectors. The vectors are always truncated before execution
     /// begins. If possible, reusing the vectors across calls can lead
     /// to better performance since they reuse the same backing buffer.
-    fn unzip_into<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// // any prior data will be truncated
+    /// let mut left = vec![42; 10];
+    /// let mut right = vec![-1; 10];
+    ///
+    /// (10..15).into_par_iter()
+    ///     .enumerate()
+    ///     .unzip_into_vecs(&mut left, &mut right);
+    ///
+    /// assert_eq!(left, [0, 1, 2, 3, 4]);
+    /// assert_eq!(right, [10, 11, 12, 13, 14]);
+    /// ```
+    fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
         where Self: IndexedParallelIterator<Item = (A, B)>,
               A: Send,
               B: Send
     {
-        collect::unzip_into(self, left, right);
+        collect::unzip_into_vecs(self, left, right);
     }
 
     /// Iterate over tuples `(A, B)`, where the items `A` are from
     /// this iterator and `B` are from the iterator given as argument.
     /// Like the `zip` method on ordinary iterators, if the two
     /// iterators are of unequal length, you only get the items they
     /// have in common.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let result: Vec<_> = (1..4)
+    ///     .into_par_iter()
+    ///     .zip(vec!['a', 'b', 'c'])
+    ///     .collect();
+    ///
+    /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
+    /// ```
     fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
         where Z: IntoParallelIterator,
               Z::Iter: IndexedParallelIterator
     {
         zip::new(self, zip_op.into_par_iter())
     }
 
+    /// The same as `Zip`, but requires that both iterators have the same length.
+    ///
+    /// # Panics
+    /// Will panic if `self` and `zip_op` are not the same length.
+    ///
+    /// ```should_panic
+    /// use rayon::prelude::*;
+    ///
+    /// let one = [1u8];
+    /// let two = [2u8, 2];
+    /// let one_iter = one.par_iter();
+    /// let two_iter = two.par_iter();
+    ///
+    /// // this will panic
+    /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
+    ///
+    /// // we should never get here
+    /// assert_eq!(1, zipped.len());
+    /// ```
+    fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
+        where Z: IntoParallelIterator,
+              Z::Iter: IndexedParallelIterator
+    {
+        let zip_op_iter = zip_op.into_par_iter();
+        assert_eq!(self.len(), zip_op_iter.len());
+        zip_eq::new(self, zip_op_iter)
+    }
+
+    /// Interleave elements of this iterator and the other given
+    /// iterator. Alternately yields elements from this iterator and
+    /// the given iterator, until both are exhausted. If one iterator
+    /// is exhausted before the other, the last elements are provided
+    /// from the other.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
+    /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
+    /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
+    /// ```
+    fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
+        where I: IntoParallelIterator<Item = Self::Item>,
+              I::Iter: IndexedParallelIterator<Item = Self::Item>
+    {
+        interleave::new(self, other.into_par_iter())
+    }
+
+    /// Interleave elements of this iterator and the other given
+    /// iterator, until one is exhausted.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
+    /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
+    /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
+    /// ```
+    fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
+        where I: IntoParallelIterator<Item = Self::Item>,
+              I::Iter: IndexedParallelIterator<Item = Self::Item>
+    {
+        interleave_shortest::new(self, other.into_par_iter())
+    }
+
+    /// Split an iterator up into fixed-size chunks.
+    ///
+    /// Returns an iterator that returns `Vec`s of the given number of elements.
+    /// If the number of elements in the iterator is not divisible by `chunk_size`,
+    /// the last chunk may be shorter than `chunk_size`.
+    ///
+    /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
+    /// slices, without having to allocate intermediate `Vec`s for the chunks.
+    ///
+    /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
+    /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+    /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
+    /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
+    /// ```
+    fn chunks(self, chunk_size: usize) -> Chunks<Self> {
+        assert!(chunk_size != 0, "chunk_size must not be zero");
+        chunks::new(self, chunk_size)
+    }
+
     /// Lexicographically compares the elements of this `ParallelIterator` with those of
     /// another.
-    fn cmp<I>(mut self, other: I) -> Ordering
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// use std::cmp::Ordering::*;
+    ///
+    /// let x = vec![1, 2, 3];
+    /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
+    /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
+    /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
+    /// ```
+    fn cmp<I>(self, other: I) -> Ordering
         where I: IntoParallelIterator<Item = Self::Item>,
               I::Iter: IndexedParallelIterator,
               Self::Item: Ord
     {
-        let mut other = other.into_par_iter();
+        let other = other.into_par_iter();
         let ord_len = self.len().cmp(&other.len());
         self.zip(other)
             .map(|(x, y)| Ord::cmp(&x, &y))
             .find_first(|&ord| ord != Ordering::Equal)
             .unwrap_or(ord_len)
     }
 
     /// Lexicographically compares the elements of this `ParallelIterator` with those of
     /// another.
-    fn partial_cmp<I>(mut self, other: I) -> Option<Ordering>
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// use std::cmp::Ordering::*;
+    /// use std::f64::NAN;
+    ///
+    /// let x = vec![1.0, 2.0, 3.0];
+    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
+    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
+    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
+    /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
+    /// ```
+    fn partial_cmp<I>(self, other: I) -> Option<Ordering>
         where I: IntoParallelIterator,
               I::Iter: IndexedParallelIterator,
               Self::Item: PartialOrd<I::Item>
     {
-        let mut other = other.into_par_iter();
+        let other = other.into_par_iter();
         let ord_len = self.len().cmp(&other.len());
         self.zip(other)
             .map(|(x, y)| PartialOrd::partial_cmp(&x, &y))
             .find_first(|&ord| ord != Some(Ordering::Equal))
             .unwrap_or(Some(ord_len))
     }
 
     /// Determines if the elements of this `ParallelIterator`
     /// are equal to those of another
-    fn eq<I>(mut self, other: I) -> bool
+    fn eq<I>(self, other: I) -> bool
         where I: IntoParallelIterator,
               I::Iter: IndexedParallelIterator,
               Self::Item: PartialEq<I::Item>
     {
-        let mut other = other.into_par_iter();
+        let other = other.into_par_iter();
         self.len() == other.len() && self.zip(other).all(|(x, y)| x.eq(&y))
     }
 
     /// Determines if the elements of this `ParallelIterator`
     /// are unequal to those of another
     fn ne<I>(self, other: I) -> bool
         where I: IntoParallelIterator,
               I::Iter: IndexedParallelIterator,
@@ -885,35 +1721,88 @@ pub trait IndexedParallelIterator: Paral
               I::Iter: IndexedParallelIterator,
               Self::Item: PartialOrd<I::Item>
     {
         let ord = self.partial_cmp(other);
         ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
     }
 
     /// Yields an index along with each item.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let chars = vec!['a', 'b', 'c'];
+    /// let result: Vec<_> = chars
+    ///     .into_par_iter()
+    ///     .enumerate()
+    ///     .collect();
+    ///
+    /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
+    /// ```
     fn enumerate(self) -> Enumerate<Self> {
         enumerate::new(self)
     }
 
     /// Creates an iterator that skips the first `n` elements.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let result: Vec<_> = (0..100)
+    ///     .into_par_iter()
+    ///     .skip(95)
+    ///     .collect();
+    ///
+    /// assert_eq!(result, [95, 96, 97, 98, 99]);
+    /// ```
     fn skip(self, n: usize) -> Skip<Self> {
         skip::new(self, n)
     }
 
     /// Creates an iterator that yields the first `n` elements.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let result: Vec<_> = (0..100)
+    ///     .into_par_iter()
+    ///     .take(5)
+    ///     .collect();
+    ///
+    /// assert_eq!(result, [0, 1, 2, 3, 4]);
+    /// ```
     fn take(self, n: usize) -> Take<Self> {
         take::new(self, n)
     }
 
     /// Searches for **some** item in the parallel iterator that
     /// matches the given predicate, and returns its index.  Like
     /// `ParallelIterator::find_any`, the parallel search will not
     /// necessarily find the **first** match, and once a match is
     /// found we'll attempt to stop processing any more.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 2, 3, 3];
+    ///
+    /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
+    /// assert!(i == 2 || i == 3);
+    ///
+    /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
+    /// ```
     fn position_any<P>(self, predicate: P) -> Option<usize>
         where P: Fn(Self::Item) -> bool + Sync + Send
     {
         self.map(predicate)
             .enumerate()
             .find_any(|&(_, p)| p)
             .map(|(i, _)| i)
     }
@@ -925,16 +1814,28 @@ pub trait IndexedParallelIterator: Paral
     /// all attempts to the right of the match will be stopped, while
     /// attempts to the left must continue in case an earlier match
     /// is found.
     ///
     /// Note that not all parallel iterators have a useful order, much like
     /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
     /// just want the first match that discovered anywhere in the iterator,
     /// `position_any` is a better choice.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 2, 3, 3];
+    ///
+    /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
+    ///
+    /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
+    /// ```
     fn position_first<P>(self, predicate: P) -> Option<usize>
         where P: Fn(Self::Item) -> bool + Sync + Send
     {
         self.map(predicate)
             .enumerate()
             .find_first(|&(_, p)| p)
             .map(|(i, _)| i)
     }
@@ -946,16 +1847,28 @@ pub trait IndexedParallelIterator: Paral
     /// all attempts to the left of the match will be stopped, while
     /// attempts to the right must continue in case a later match
     /// is found.
     ///
     /// Note that not all parallel iterators have a useful order, much like
     /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
     /// order doesn't actually matter to you, `position_any` is a better
     /// choice.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let a = [1, 2, 3, 3];
+    ///
+    /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
+    ///
+    /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
+    /// ```
     fn position_last<P>(self, predicate: P) -> Option<usize>
         where P: Fn(Self::Item) -> bool + Sync + Send
     {
         self.map(predicate)
             .enumerate()
             .find_last(|&(_, p)| p)
             .map(|(i, _)| i)
     }
@@ -966,55 +1879,118 @@ pub trait IndexedParallelIterator: Paral
     fn position<P>(self, predicate: P) -> Option<usize>
         where P: Fn(Self::Item) -> bool + Sync + Send
     {
         self.position_any(predicate)
     }
 
     /// Produces a new iterator with the elements of this iterator in
     /// reverse order.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let result: Vec<_> = (0..5)
+    ///     .into_par_iter()
+    ///     .rev()
+    ///     .collect();
+    ///
+    /// assert_eq!(result, [4, 3, 2, 1, 0]);
+    /// ```
     fn rev(self) -> Rev<Self> {
         rev::new(self)
     }
 
     /// Sets the minimum length of iterators desired to process in each
     /// thread.  Rayon will not split any smaller than this length, but
     /// of course an iterator could already be smaller to begin with.
+    ///
+    /// Producers like `zip` and `interleave` will use greater of the two
+    /// minimums.
+    /// Chained iterators and iterators inside `flat_map` may each use
+    /// their own minimum length.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let min = (0..1_000_000)
+    ///     .into_par_iter()
+    ///     .with_min_len(1234)
+    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
+    ///     .min().unwrap();
+    ///
+    /// assert!(min >= 1234);
+    /// ```
     fn with_min_len(self, min: usize) -> MinLen<Self> {
         len::new_min_len(self, min)
     }
 
     /// Sets the maximum length of iterators desired to process in each
     /// thread.  Rayon will try to split at least below this length,
     /// unless that would put it below the length from `with_min_len()`.
     /// For example, given min=10 and max=15, a length of 16 will not be
     /// split any further.
+    ///
+    /// Producers like `zip` and `interleave` will use lesser of the two
+    /// maximums.
+    /// Chained iterators and iterators inside `flat_map` may each use
+    /// their own maximum length.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let max = (0..1_000_000)
+    ///     .into_par_iter()
+    ///     .with_max_len(1234)
+    ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
+    ///     .max().unwrap();
+    ///
+    /// assert!(max <= 1234);
+    /// ```
     fn with_max_len(self, max: usize) -> MaxLen<Self> {
         len::new_max_len(self, max)
     }
 
     /// Produces an exact count of how many items this iterator will
     /// produce, presuming no panic occurs.
-    fn len(&mut self) -> usize;
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
+    /// assert_eq!(par_iter.len(), 10);
+    ///
+    /// let vec: Vec<_> = par_iter.collect();
+    /// assert_eq!(vec.len(), 10);
+    /// ```
+    fn len(&self) -> usize;
 
     /// Internal method used to define the behavior of this parallel
     /// iterator. You should not need to call this directly.
     ///
     /// This method causes the iterator `self` to start producing
     /// items and to feed them to the consumer `consumer` one by one.
     /// It may split the consumer before doing so to create the
     /// opportunity to produce in parallel. If a split does happen, it
     /// will inform the consumer of the index where the split should
     /// occur (unlike `ParallelIterator::drive_unindexed()`).
     ///
     /// See the [README] for more details on the internals of parallel
     /// iterators.
     ///
     /// [README]: README.md
-    fn drive<'c, C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
+    fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
 
     /// Internal method used to define the behavior of this parallel
     /// iterator. You should not need to call this directly.
     ///
     /// This method converts the iterator into a producer P and then
     /// invokes `callback.callback()` with P. Note that the type of
     /// this producer is not defined as part of the API, since
     /// `callback` must be defined generically for all producers. This
@@ -1024,30 +2000,114 @@ pub trait IndexedParallelIterator: Paral
     ///
     /// See the [README] for more details on the internals of parallel
     /// iterators.
     ///
     /// [README]: README.md
     fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
 }
 
-/// `FromParallelIterator` implements the conversion from a [`ParallelIterator`].
-/// By implementing `FromParallelIterator` for a type, you define how it will be
+/// `FromParallelIterator` implements the creation of a collection
+/// from a [`ParallelIterator`]. By implementing
+/// `FromParallelIterator` for a given type, you define how it will be
 /// created from an iterator.
 ///
 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
 ///
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 /// [`collect()`]: trait.ParallelIterator.html#method.collect
+///
+/// # Examples
+///
+/// Implementing `FromParallelIterator` for your type:
+///
+/// ```
+/// use rayon::prelude::*;
+/// use std::mem;
+///
+/// struct BlackHole {
+///     mass: usize,
+/// }
+///
+/// impl<T: Send> FromParallelIterator<T> for BlackHole {
+///     fn from_par_iter<I>(par_iter: I) -> Self
+///         where I: IntoParallelIterator<Item = T>
+///     {
+///         let par_iter = par_iter.into_par_iter();
+///         BlackHole {
+///             mass: par_iter.count() * mem::size_of::<T>(),
+///         }
+///     }
+/// }
+///
+/// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
+/// assert_eq!(bh.mass, 4000);
+/// ```
 pub trait FromParallelIterator<T>
     where T: Send
 {
+    /// Creates an instance of the collection from the parallel iterator `par_iter`.
+    ///
+    /// If your collection is not naturally parallel, the easiest (and
+    /// fastest) way to do this is often to collect `par_iter` into a
+    /// [`LinkedList`] or other intermediate data structure and then
+    /// sequentially extend your collection. However, a more 'native'
+    /// technique is to use the [`par_iter.fold`] or
+    /// [`par_iter.fold_with`] methods to create the collection.
+    /// Alternatively, if your collection is 'natively' parallel, you
+    /// can use `par_iter.for_each` to process each element in turn.
+    ///
+    /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
+    /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
+    /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
+    /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
     fn from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T>;
 }
 
 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
 ///
 /// [`ParallelIterator`]: trait.ParallelIterator.html
+///
+/// # Examples
+///
+/// Implementing `ParallelExtend` for your type:
+///
+/// ```
+/// use rayon::prelude::*;
+/// use std::mem;
+///
+/// struct BlackHole {
+///     mass: usize,
+/// }
+///
+/// impl<T: Send> ParallelExtend<T> for BlackHole {
+///     fn par_extend<I>(&mut self, par_iter: I)
+///         where I: IntoParallelIterator<Item = T>
+///     {
+///         let par_iter = par_iter.into_par_iter();
+///         self.mass += par_iter.count() * mem::size_of::<T>();
+///     }
+/// }
+///
+/// let mut bh = BlackHole { mass: 0 };
+/// bh.par_extend(0i32..1000);
+/// assert_eq!(bh.mass, 4000);
+/// bh.par_extend(0i64..10);
+/// assert_eq!(bh.mass, 4080);
+/// ```
 pub trait ParallelExtend<T>
     where T: Send
 {
+    /// Extends an instance of the collection with the elements drawn
+    /// from the parallel iterator `par_iter`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut vec = vec![];
+    /// vec.par_extend(0..5);
+    /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
+    /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
+    /// ```
     fn par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>;
 }
--- a/third_party/rust/rayon/src/iter/noop.rs
+++ b/third_party/rust/rayon/src/iter/noop.rs
@@ -1,9 +1,9 @@
-use super::internal::*;
+use super::plumbing::*;
 
 pub struct NoopConsumer;
 
 impl NoopConsumer {
     pub fn new() -> Self {
         NoopConsumer
     }
 }
@@ -28,16 +28,21 @@ impl<T> Consumer<T> for NoopConsumer {
 
 impl<T> Folder<T> for NoopConsumer {
     type Result = ();
 
     fn consume(self, _item: T) -> Self {
         self
     }
 
+   fn consume_iter<I>(self, iter: I) -> Self where I: IntoIterator<Item=T> {
+        iter.into_iter().fold((), |_, _| ());
+        self
+   }
+
     fn complete(self) {}
 
     fn full(&self) -> bool {
         false
     }
 }
 
 impl<T> UnindexedConsumer<T> for NoopConsumer {
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/once.rs
@@ -0,0 +1,65 @@
+use iter::plumbing::*;
+use iter::*;
+
+/// Creates a parallel iterator that produces an element exactly once.
+///
+/// This admits no parallelism on its own, but it could be chained to existing
+/// parallel iterators to extend their contents, or otherwise used for any code
+/// that deals with generic parallel iterators.
+///
+/// # Examples
+///
+/// ```
+/// use rayon::prelude::*;
+/// use rayon::iter::once;
+///
+/// let pi = (0..1234).into_par_iter()
+///     .chain(once(-1))
+///     .chain(1234..10_000);
+///
+/// assert_eq!(pi.clone().count(), 10_001);
+/// assert_eq!(pi.clone().filter(|&x| x < 0).count(), 1);
+/// assert_eq!(pi.position_any(|x| x < 0), Some(1234));
+/// ```
+pub fn once<T: Send>(item: T) -> Once<T> {
+    Once { item: item }
+}
+
+/// Iterator adaptor for [the `once()` function](fn.once.html).
+#[derive(Clone, Debug)]
+pub struct Once<T: Send> {
+    item: T,
+}
+
+impl<T: Send> ParallelIterator for Once<T> {
+    type Item = T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        self.drive(consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        Some(1)
+    }
+}
+
+impl<T: Send> IndexedParallelIterator for Once<T> {
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        consumer.into_folder().consume(self.item).complete()
+    }
+
+    fn len(&self) -> usize {
+        1
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        // Let `OptionProducer` handle it.
+        Some(self.item).into_par_iter().with_producer(callback)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/plumbing/README.md
@@ -0,0 +1,315 @@
+# Parallel Iterators
+
+These are some notes on the design of the parallel iterator traits.
+This file does not describe how to **use** parallel iterators.
+
+## The challenge
+
+Parallel iterators are more complicated than sequential iterators.
+The reason is that they have to be able to split themselves up and
+operate in parallel across the two halves.
+
+The current design for parallel iterators has two distinct modes in
+which they can be used; as we will see, not all iterators support both
+modes (which is why there are two):
+
+- **Pull mode** (the `Producer` and `UnindexedProducer` traits): in this mode,
+  the iterator is asked to produce the next item using a call to `next`. This
+  is basically like a normal iterator, but with a twist: you can split the
+  iterator in half to produce disjoint items in separate threads.
+  - in the `Producer` trait, splitting is done with `split_at`, which accepts
+    an index where the split should be performed. Only indexed iterators can
+    work in this mode, as they know exactly how much data they will produce,
+    and how to locate the requested index.
+  - in the `UnindexedProducer` trait, splitting is done with `split`, which
+    simply requests that the producer divide itself *approximately* in half.
+    This is useful when the exact length and/or layout is unknown, as with
+    `String` characters, or when the length might exceed `usize`, as with
+    `Range<u64>` on 32-bit platforms.
+    - In theory, any `Producer` could act unindexed, but we don't currently
+      use that possibility.  When you know the exact length, a `split` can
+      simply be implemented as `split_at(length/2)`.
+- **Push mode** (the `Consumer` and `UnindexedConsumer` traits): in
+  this mode, the iterator instead is *given* each item in turn, which
+  is then processed. This is the opposite of a normal iterator. It's
+  more like a `for_each` call: each time a new item is produced, the
+  `consume` method is called with that item. (The traits themselves are
+  a bit more complex, as they support state that can be threaded
+  through and ultimately reduced.) Unlike producers, there are two
+  variants of consumers. The difference is how the split is performed:
+  - in the `Consumer` trait, splitting is done with `split_at`, which
+    accepts an index where the split should be performed. All
+    iterators can work in this mode. The resulting halves thus have an
+    idea about how much data they expect to consume.
+  - in the `UnindexedConsumer` trait, splitting is done with
+    `split_off_left`.  There is no index: the resulting halves must be
+    prepared to process any amount of data, and they don't know where that
+    data falls in the overall stream.
+    - Not all consumers can operate in this mode. It works for
+      `for_each` and `reduce`, for example, but it does not work for
+      `collect_into_vec`, since in that case the position of each item is
+      important for knowing where it ends up in the target collection.
+
+## How iterator execution proceeds
+
+We'll walk through this example iterator chain to start. This chain
+demonstrates more-or-less the full complexity of what can happen.
+
+```rust
+vec1.par_iter()
+    .zip(vec2.par_iter())
+    .flat_map(some_function)
+    .for_each(some_other_function)
+```
+
+To handle an iterator chain, we start by creating consumers. This
+works from the end. So in this case, the call to `for_each` is the
+final step, so it will create a `ForEachConsumer` that, given an item,
+just calls `some_other_function` with that item. (`ForEachConsumer` is
+a very simple consumer because it doesn't need to thread any state
+between items at all.)
+
+Now, the `for_each` call will pass this consumer to the base iterator,
+which is the `flat_map`. It will do by calling the `drive_unindexed`
+method on the `ParallelIterator` trait. `drive_unindexed` basically
+says "produce items for this iterator and feed them to this consumer";
+it only works for unindexed consumers.
+
+(As an aside, it is interesting that only some consumers can work in
+unindexed mode, but all producers can *drive* an unindexed consumer.
+In contrast, only some producers can drive an *indexed* consumer, but
+all consumers can be supplied indexes. Isn't variance neat.)
+
+As it happens, `FlatMap` only works with unindexed consumers anyway.
+This is because flat-map basically has no idea how many items it will
+produce. If you ask flat-map to produce the 22nd item, it can't do it,
+at least not without some intermediate state. It doesn't know whether
+processing the first item will create 1 item, 3 items, or 100;
+therefore, to produce an arbitrary item, it would basically just have
+to start at the beginning and execute sequentially, which is not what
+we want. But for unindexed consumers, this doesn't matter, since they
+don't need to know how much data they will get.
+
+Therefore, `FlatMap` can wrap the `ForEachConsumer` with a
+`FlatMapConsumer` that feeds to it. This `FlatMapConsumer` will be
+given one item. It will then invoke `some_function` to get a parallel
+iterator out. It will then ask this new parallel iterator to drive the
+`ForEachConsumer`. The `drive_unindexed` method on `flat_map` can then
+pass the `FlatMapConsumer` up the chain to the previous item, which is
+`zip`. At this point, something interesting happens.
+
+## Switching from push to pull mode
+
+If you think about `zip`, it can't really be implemented as a
+consumer, at least not without an intermediate thread and some
+channels or something (or maybe coroutines). The problem is that it
+has to walk two iterators *in lockstep*. Basically, it can't call two
+`drive` methods simultaneously, it can only call one at a time. So at
+this point, the `zip` iterator needs to switch from *push mode* into
+*pull mode*.
+
+You'll note that `Zip` is only usable if its inputs implement
+`IndexedParallelIterator`, meaning that they can produce data starting
+at random points in the stream. This need to switch to push mode is
+exactly why. If we want to split a zip iterator at position 22, we
+need to be able to start zipping items from index 22 right away,
+without having to start from index 0.
+
+Anyway, so at this point, the `drive_unindexed` method for `Zip` stops
+creating consumers. Instead, it creates a *producer*, a `ZipProducer`,
+to be exact, and calls the `bridge` function in the `internals`
+module. Creating a `ZipProducer` will in turn create producers for
+the two iterators being zipped. This is possible because they both
+implement `IndexedParallelIterator`.
+
+The `bridge` function will then connect the consumer, which is
+handling the `flat_map` and `for_each`, with the producer, which is
+handling the `zip` and its preecessors. It will split down until the
+chunks seem reasonably small, then pull items from the producer and
+feed them to the consumer.
+
+## The base case
+
+The other time that `bridge` gets used is when we bottom out in an
+indexed producer, such as a slice or range.  There is also a
+`bridge_unindexed` equivalent for - you guessed it - unindexed producers,
+such as string characters.
+
+<a name="producer-callback">
+
+## What on earth is `ProducerCallback`?
+
+We saw that when you call a parallel action method like
+`par_iter.reduce()`, that will creating a "reducing" consumer and then
+invoke `par_iter.drive_unindexed()` (or `par_iter.drive()`) as
+appropriate. This may create yet more consumers as we proceed up the
+parallel iterator chain. But at some point we're going to get to the
+start of the chain, or to a parallel iterator (like `zip()`) that has
+to coordinate multiple inputs. At that point, we need to start
+converting parallel iterators into producers.
+
+The way we do this is by invoking the method `with_producer()`, defined on
+`IndexedParallelIterator`. This is a callback scheme. In an ideal world,
+it would work like this:
+
+```rust
+base_iter.with_producer(|base_producer| {
+    // here, `base_producer` is the producer for `base_iter`
+});
+```
+
+In that case, we could implement a combinator like `map()` by getting
+the producer for the base iterator, wrapping it to make our own
+`MapProducer`, and then passing that to the callback. Something like
+this:
+
+```rust
+struct MapProducer<'f, P, F: 'f> {
+    base: P,
+    map_op: &'f F,
+}
+
+impl<I, F> IndexedParallelIterator for Map<I, F>
+    where I: IndexedParallelIterator,
+          F: MapOp<I::Item>,
+{
+    fn with_producer<CB>(self, callback: CB) -> CB::Output {
+        let map_op = &self.map_op;
+        self.base_iter.with_producer(|base_producer| {
+            // Here `producer` is the producer for `self.base_iter`.
+            // Wrap that to make a `MapProducer`
+            let map_producer = MapProducer {
+                base: base_producer,
+                map_op: map_op
+            };
+
+            // invoke the callback with the wrapped version
+            callback(map_producer)
+        });
+    }
+});
+```
+
+This example demonstrates some of the power of the callback scheme.
+It winds up being a very flexible setup. For one thing, it means we
+can take ownership of `par_iter`; we can then in turn give ownership
+away of its bits and pieces into the producer (this is very useful if
+the iterator owns an `&mut` slice, for example), or create shared
+references and put *those* in the producer. In the case of map, for
+example, the parallel iterator owns the `map_op`, and we borrow
+references to it which we then put into the `MapProducer` (this means
+the `MapProducer` can easily split itself and share those references).
+The `with_producer` method can also create resources that are needed
+during the parallel execution, since the producer does not have to be
+returned.
+
+Unfortunately there is a catch. We can't actually use closures the way
+I showed you. To see why, think about the type that `map_producer`
+would have to have. If we were going to write the `with_producer`
+method using a closure, it would have to look something like this:
+
+```rust
+pub trait IndexedParallelIterator: ParallelIterator {
+    type Producer;
+    fn with_producer<CB, R>(self, callback: CB) -> R
+        where CB: FnOnce(Self::Producer) -> R;
+    ...
+}
+```
+
+Note that we had to add this associated type `Producer` so that
+we could specify the argument of the callback to be `Self::Producer`.
+Now, imagine trying to write that `MapProducer` impl using this style:
+
+```rust
+impl<I, F> IndexedParallelIterator for Map<I, F>
+    where I: IndexedParallelIterator,
+          F: MapOp<I::Item>,
+{
+    type MapProducer = MapProducer<'f, P::Producer, F>;
+    //                             ^^ wait, what is this `'f`?
+
+    fn with_producer<CB, R>(self, callback: CB) -> R
+        where CB: FnOnce(Self::Producer) -> R
+    {
+        let map_op = &self.map_op;
+        //  ^^^^^^ `'f` is (conceptually) the lifetime of this reference,
+        //         so it will be different for each call to `with_producer`!
+    }
+}
+```
+
+This may look familiar to you: it's the same problem that we have
+trying to define an `Iterable` trait. Basically, the producer type
+needs to include a lifetime (here, `'f`) that refers to the body of
+`with_producer` and hence is not in scope at the impl level.
+
+If we had [associated type constructors][1598], we could solve this
+problem that way. But there is another solution. We can use a
+dedicated callback trait like `ProducerCallback`, instead of `FnOnce`:
+
+[1598]: https://github.com/rust-lang/rfcs/pull/1598
+
+```rust
+pub trait ProducerCallback<T> {
+    type Output;
+    fn callback<P>(self, producer: P) -> Self::Output
+        where P: Producer<Item=T>;
+}
+```
+
+Using this trait, the signature of `with_producer()` looks like this:
+
+```rust
+fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
+```
+
+Notice that this signature **never has to name the producer type** --
+there is no associated type `Producer` anymore. This is because the
+`callback()` method is generically over **all** producers `P`.
+
+The problem is that now the `||` sugar doesn't work anymore. So we
+have to manually create the callback struct, which is a mite tedious.
+So our `MapProducer` code looks like this:
+
+```rust
+impl<I, F> IndexedParallelIterator for Map<I, F>
+    where I: IndexedParallelIterator,
+          F: MapOp<I::Item>,
+{
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback { callback: callback, map_op: self.map_op });
+        //                             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+        //                             Manual version of the closure sugar: create an instance
+        //                             of a struct that implements `ProducerCallback`.
+
+        // The struct declaration. Each field is something that need to capture from the
+        // creating scope.
+        struct Callback<CB, F> {
+            callback: CB,
+            map_op: F,
+        }
+
+        // Implement the `ProducerCallback` trait. This is pure boilerplate.
+        impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
+            where F: MapOp<T>,
+                  CB: ProducerCallback<F::Output>
+        {
+            type Output = CB::Output;
+
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item=T>
+            {
+                // The body of the closure is here:
+                let producer = MapProducer { base: base,
+                                             map_op: &self.map_op };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+```
+
+OK, a bit tedious, but it works!
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/plumbing/mod.rs
@@ -0,0 +1,463 @@
+//! Traits and functions used to implement parallel iteration.  These are
+//! low-level details -- users of parallel iterators should not need to
+//! interact with them directly.  See [the `plumbing` README][r] for a high-level overview.
+//!
+//! [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
+
+
+use join_context;
+
+use super::IndexedParallelIterator;
+
+use std::cmp;
+use std::usize;
+
+/// The `ProducerCallback` trait is a kind of generic closure,
+/// [analogous to `FnOnce`][FnOnce]. See [the corresponding section in
+/// the plumbing README][r] for more details.
+///
+/// [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md#producer-callback
+/// [FnOnce]: https://doc.rust-lang.org/std/ops/trait.FnOnce.html
+pub trait ProducerCallback<T> {
+    /// The type of value returned by this callback. Analogous to
+    /// [`Output` from the `FnOnce` trait][Output].
+    ///
+    /// [Output]: https://doc.rust-lang.org/std/ops/trait.FnOnce.html#associatedtype.Output
+    type Output;
+
+    /// Invokes the callback with the given producer as argument. The
+    /// key point of this trait is that this method is generic over
+    /// `P`, and hence implementors must be defined for any producer.
+    fn callback<P>(self, producer: P) -> Self::Output where P: Producer<Item = T>;
+}
+
+/// A `Producer` is effectively a "splittable `IntoIterator`". That
+/// is, a producer is a value which can be converted into an iterator
+/// at any time: at that point, it simply produces items on demand,
+/// like any iterator. But what makes a `Producer` special is that,
+/// *before* we convert to an iterator, we can also **split** it at a
+/// particular point using the `split_at` method. This will yield up
+/// two producers, one producing the items before that point, and one
+/// producing the items after that point (these two producers can then
+/// independently be split further, or be converted into iterators).
+/// In Rayon, this splitting is used to divide between threads.
+/// See [the `plumbing` README][r] for further details.
+///
+/// Note that each producer will always produce a fixed number of
+/// items N. However, this number N is not queryable through the API;
+/// the consumer is expected to track it.
+///
+/// NB. You might expect `Producer` to extend the `IntoIterator`
+/// trait.  However, [rust-lang/rust#20671][20671] prevents us from
+/// declaring the DoubleEndedIterator and ExactSizeIterator
+/// constraints on a required IntoIterator trait, so we inline
+/// IntoIterator here until that issue is fixed.
+///
+/// [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
+/// [20671]: https://github.com/rust-lang/rust/issues/20671
+pub trait Producer: Send + Sized {
+    /// The type of item that will be produced by this producer once
+    /// it is converted into an iterator.
+    type Item;
+
+    /// The type of iterator we will become.
+    type IntoIter: Iterator<Item = Self::Item> + DoubleEndedIterator + ExactSizeIterator;
+
+    /// Convert `self` into an iterator; at this point, no more parallel splits
+    /// are possible.
+    fn into_iter(self) -> Self::IntoIter;
+
+    /// The minimum number of items that we will process
+    /// sequentially. Defaults to 1, which means that we will split
+    /// all the way down to a single item. This can be raised higher
+    /// using the [`with_min_len`] method, which will force us to
+    /// create sequential tasks at a larger granularity. Note that
+    /// Rayon automatically normally attempts to adjust the size of
+    /// parallel splits to reduce overhead, so this should not be
+    /// needed.
+    ///
+    /// [`with_min_len`]: ../trait.IndexedParallelIterator.html#method.with_min_len
+    fn min_len(&self) -> usize {
+        1
+    }
+
+    /// The maximum number of items that we will process
+    /// sequentially. Defaults to MAX, which means that we can choose
+    /// not to split at all. This can be lowered using the
+    /// [`with_max_len`] method, which will force us to create more
+    /// parallel tasks. Note that Rayon automatically normally
+    /// attempts to adjust the size of parallel splits to reduce
+    /// overhead, so this should not be needed.
+    ///
+    /// [`with_max_len`]: ../trait.IndexedParallelIterator.html#method.with_max_len
+    fn max_len(&self) -> usize {
+        usize::MAX
+    }
+
+    /// Split into two producers; one produces items `0..index`, the
+    /// other `index..N`. Index must be less than or equal to `N`.
+    fn split_at(self, index: usize) -> (Self, Self);
+
+    /// Iterate the producer, feeding each element to `folder`, and
+    /// stop when the folder is full (or all elements have been consumed).
+    ///
+    /// The provided implementation is sufficient for most iterables.
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<Self::Item>
+    {
+        folder.consume_iter(self.into_iter())
+    }
+}
+
+/// A consumer is effectively a [generalized "fold" operation][fold],
+/// and in fact each consumer will eventually be converted into a
+/// [`Folder`]. What makes a consumer special is that, like a
+/// [`Producer`], it can be **split** into multiple consumers using
+/// the `split_at` method. When a consumer is split, it produces two
+/// consumers, as well as a **reducer**. The two consumers can be fed
+/// items independently, and when they are done the reducer is used to
+/// combine their two results into one. See [the `plumbing`
+/// README][r] for further details.
+///
+/// [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
+/// [fold]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fold
+/// [`Folder`]: trait.Folder.html
+/// [`Producer`]: trait.Producer.html
+pub trait Consumer<Item>: Send + Sized {
+    /// The type of folder that this consumer can be converted into.
+    type Folder: Folder<Item, Result = Self::Result>;
+
+    /// The type of reducer that is produced if this consumer is split.
+    type Reducer: Reducer<Self::Result>;
+
+    /// The type of result that this consumer will ultimately produce.
+    type Result: Send;
+
+    /// Divide the consumer into two consumers, one processing items
+    /// `0..index` and one processing items from `index..`. Also
+    /// produces a reducer that can be used to reduce the results at
+    /// the end.
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer);
+
+    /// Convert the consumer into a folder that can consume items
+    /// sequentially, eventually producing a final result.
+    fn into_folder(self) -> Self::Folder;
+
+    /// Hint whether this `Consumer` would like to stop processing
+    /// further items, e.g. if a search has been completed.
+    fn full(&self) -> bool;
+}
+
+/// The `Folder` trait encapsulates [the standard fold
+/// operation][fold].  It can be fed many items using the `consume`
+/// method. At the end, once all items have been consumed, it can then
+/// be converted (using `complete`) into a final value.
+///
+/// [fold]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fold
+pub trait Folder<Item>: Sized {
+    /// The type of result that will ultimately be produced by the folder.
+    type Result;
+
+    /// Consume next item and return new sequential state.
+    fn consume(self, item: Item) -> Self;
+
+    /// Consume items from the iterator until full, and return new sequential state.
+    ///
+    /// This method is **optional**. The default simply iterates over
+    /// `iter`, invoking `consume` and checking after each iteration
+    /// whether `full` returns false.
+    ///
+    /// The main reason to override it is if you can provide a more
+    /// specialized, efficient implementation.
+    fn consume_iter<I>(mut self, iter: I) -> Self
+        where I: IntoIterator<Item = Item>
+    {
+        for item in iter {
+            self = self.consume(item);
+            if self.full() {
+                break;
+            }
+        }
+        self
+    }
+
+    /// Finish consuming items, produce final result.
+    fn complete(self) -> Self::Result;
+
+    /// Hint whether this `Folder` would like to stop processing
+    /// further items, e.g. if a search has been completed.
+    fn full(&self) -> bool;
+}
+
+/// The reducer is the final step of a `Consumer` -- after a consumer
+/// has been split into two parts, and each of those parts has been
+/// fully processed, we are left with two results. The reducer is then
+/// used to combine those two results into one. See [the `plumbing`
+/// README][r] for further details.
+///
+/// [r]: https://github.com/rayon-rs/rayon/blob/master/src/iter/plumbing/README.md
+pub trait Reducer<Result> {
+    /// Reduce two final results into one; this is executed after a
+    /// split.
+    fn reduce(self, left: Result, right: Result) -> Result;
+}
+
+/// A stateless consumer can be freely copied. These consumers can be
+/// used like regular consumers, but they also support a
+/// `split_off_left` method that does not take an index to split, but
+/// simply splits at some arbitrary point (`for_each`, for example,
+/// produces an unindexed consumer).
+pub trait UnindexedConsumer<I>: Consumer<I> {
+    /// Splits off a "left" consumer and returns it. The `self`
+    /// consumer should then be used to consume the "right" portion of
+    /// the data. (The ordering matters for methods like find_first --
+    /// values produced by the returned value are given precedence
+    /// over values produced by `self`.) Once the left and right
+    /// halves have been fully consumed, you should reduce the results
+    /// with the result of `to_reducer`.
+    fn split_off_left(&self) -> Self;
+
+    /// Creates a reducer that can be used to combine the results from
+    /// a split consumer.
+    fn to_reducer(&self) -> Self::Reducer;
+}
+
+/// A variant on `Producer` which does not know its exact length or
+/// cannot represent it in a `usize`. These producers act like
+/// ordinary producers except that they cannot be told to split at a
+/// particular point. Instead, you just ask them to split 'somewhere'.
+///
+/// (In principle, `Producer` could extend this trait; however, it
+/// does not because to do so would require producers to carry their
+/// own length with them.)
+pub trait UnindexedProducer: Send + Sized {
+    /// The type of item returned by this producer.
+    type Item;
+
+    /// Split midway into a new producer if possible, otherwise return `None`.
+    fn split(self) -> (Self, Option<Self>);
+
+    /// Iterate the producer, feeding each element to `folder`, and
+    /// stop when the folder is full (or all elements have been consumed).
+    fn fold_with<F>(self, folder: F) -> F where F: Folder<Self::Item>;
+}
+
+/// A splitter controls the policy for splitting into smaller work items.
+///
+/// Thief-splitting is an adaptive policy that starts by splitting into
+/// enough jobs for every worker thread, and then resets itself whenever a
+/// job is actually stolen into a different thread.
+#[derive(Clone, Copy)]
+struct Splitter {
+    /// The `splits` tell us approximately how many remaining times we'd
+    /// like to split this job.  We always just divide it by two though, so
+    /// the effective number of pieces will be `next_power_of_two()`.
+    splits: usize,
+}
+
+impl Splitter {
+    #[inline]
+    fn new() -> Splitter {
+        Splitter {
+            splits: ::current_num_threads(),
+        }
+    }
+
+    #[inline]
+    fn try(&mut self, stolen: bool) -> bool {
+        let Splitter { splits } = *self;
+
+        if stolen {
+            // This job was stolen!  Reset the number of desired splits to the
+            // thread count, if that's more than we had remaining anyway.
+            self.splits = cmp::max(::current_num_threads(), self.splits / 2);
+            true
+        } else if splits > 0 {
+            // We have splits remaining, make it so.
+            self.splits /= 2;
+            true
+        } else {
+            // Not stolen, and no more splits -- we're done!
+            false
+        }
+    }
+}
+
+/// The length splitter is built on thief-splitting, but additionally takes
+/// into account the remaining length of the iterator.
+#[derive(Clone, Copy)]
+struct LengthSplitter {
+    inner: Splitter,
+
+    /// The smallest we're willing to divide into.  Usually this is just 1,
+    /// but you can choose a larger working size with `with_min_len()`.
+    min: usize,
+}
+
+impl LengthSplitter {
+    /// Create a new splitter based on lengths.
+    ///
+    /// The `min` is a hard lower bound.  We'll never split below that, but
+    /// of course an iterator might start out smaller already.
+    ///
+    /// The `max` is an upper bound on the working size, used to determine
+    /// the minimum number of times we need to split to get under that limit.
+    /// The adaptive algorithm may very well split even further, but never
+    /// smaller than the `min`.
+    #[inline]
+    fn new(min: usize, max: usize, len: usize) -> LengthSplitter {
+        let mut splitter = LengthSplitter {
+            inner: Splitter::new(),
+            min: cmp::max(min, 1),
+        };
+
+        // Divide the given length by the max working length to get the minimum
+        // number of splits we need to get under that max.  This rounds down,
+        // but the splitter actually gives `next_power_of_two()` pieces anyway.
+        // e.g. len 12345 / max 100 = 123 min_splits -> 128 pieces.
+        let min_splits = len / cmp::max(max, 1);
+
+        // Only update the value if it's not splitting enough already.
+        if min_splits > splitter.inner.splits {
+            splitter.inner.splits = min_splits;
+        }
+
+        splitter
+    }
+
+    #[inline]
+    fn try(&mut self, len: usize, stolen: bool) -> bool {
+        // If splitting wouldn't make us too small, try the inner splitter.
+        len / 2 >= self.min && self.inner.try(stolen)
+    }
+}
+
+/// This helper function is used to "connect" a parallel iterator to a
+/// consumer. It will convert the `par_iter` into a producer P and
+/// then pull items from P and feed them to `consumer`, splitting and
+/// creating parallel threads as needed.
+///
+/// This is useful when you are implementing your own parallel
+/// iterators: it is often used as the definition of the
+/// [`drive_unindexed`] or [`drive`] methods.
+///
+/// [`drive_unindexed`]: ../trait.ParallelIterator.html#tymethod.drive_unindexed
+/// [`drive`]: ../trait.IndexedParallelIterator.html#tymethod.drive
+pub fn bridge<I, C>(par_iter: I, consumer: C) -> C::Result
+    where I: IndexedParallelIterator,
+          C: Consumer<I::Item>
+{
+    let len = par_iter.len();
+    return par_iter.with_producer(Callback {
+                                      len: len,
+                                      consumer: consumer,
+                                  });
+
+    struct Callback<C> {
+        len: usize,
+        consumer: C,
+    }
+
+    impl<C, I> ProducerCallback<I> for Callback<C>
+        where C: Consumer<I>
+    {
+        type Output = C::Result;
+        fn callback<P>(self, producer: P) -> C::Result
+            where P: Producer<Item = I>
+        {
+            bridge_producer_consumer(self.len, producer, self.consumer)
+        }
+    }
+}
+
+/// This helper function is used to "connect" a producer and a
+/// consumer. You may prefer to call [`bridge`], which wraps this
+/// function. This function will draw items from `producer` and feed
+/// them to `consumer`, splitting and creating parallel tasks when
+/// needed.
+///
+/// This is useful when you are implementing your own parallel
+/// iterators: it is often used as the definition of the
+/// [`drive_unindexed`] or [`drive`] methods.
+///
+/// [`bridge`]: fn.bridge.html
+/// [`drive_unindexed`]: ../trait.ParallelIterator.html#tymethod.drive_unindexed
+/// [`drive`]: ../trait.IndexedParallelIterator.html#tymethod.drive
+pub fn bridge_producer_consumer<P, C>(len: usize, producer: P, consumer: C) -> C::Result
+    where P: Producer,
+          C: Consumer<P::Item>
+{
+    let splitter = LengthSplitter::new(producer.min_len(), producer.max_len(), len);
+    return helper(len, false, splitter, producer, consumer);
+
+    fn helper<P, C>(len: usize,
+                    migrated: bool,
+                    mut splitter: LengthSplitter,
+                    producer: P,
+                    consumer: C)
+                    -> C::Result
+        where P: Producer,
+              C: Consumer<P::Item>
+    {
+        if consumer.full() {
+            consumer.into_folder().complete()
+        } else if splitter.try(len, migrated) {
+            let mid = len / 2;
+            let (left_producer, right_producer) = producer.split_at(mid);
+            let (left_consumer, right_consumer, reducer) = consumer.split_at(mid);
+            let (left_result, right_result) =
+                join_context(|context| {
+                    helper(mid, context.migrated(), splitter,
+                           left_producer, left_consumer)
+                }, |context| {
+                    helper(len - mid, context.migrated(), splitter,
+                           right_producer, right_consumer)
+                });
+            reducer.reduce(left_result, right_result)
+        } else {
+            producer.fold_with(consumer.into_folder()).complete()
+        }
+    }
+}
+
+/// A variant of [`bridge_producer_consumer`] where the producer is an unindexed producer.
+///
+/// [`bridge_producer_consumer`]: fn.bridge_producer_consumer.html
+pub fn bridge_unindexed<P, C>(producer: P, consumer: C) -> C::Result
+    where P: UnindexedProducer,
+          C: UnindexedConsumer<P::Item>
+{
+    let splitter = Splitter::new();
+    bridge_unindexed_producer_consumer(false, splitter, producer, consumer)
+}
+
+fn bridge_unindexed_producer_consumer<P, C>(migrated: bool,
+                                            mut splitter: Splitter,
+                                            producer: P,
+                                            consumer: C)
+                                            -> C::Result
+    where P: UnindexedProducer,
+          C: UnindexedConsumer<P::Item>
+{
+    if consumer.full() {
+        consumer.into_folder().complete()
+    } else if splitter.try(migrated) {
+        match producer.split() {
+            (left_producer, Some(right_producer)) => {
+                let (reducer, left_consumer, right_consumer) =
+                    (consumer.to_reducer(), consumer.split_off_left(), consumer);
+                let bridge = bridge_unindexed_producer_consumer;
+                let (left_result, right_result) =
+                    join_context(|context| {
+                        bridge(context.migrated(), splitter, left_producer, left_consumer)
+                    }, |context| {
+                        bridge(context.migrated(), splitter, right_producer, right_consumer)
+                    });
+                reducer.reduce(left_result, right_result)
+            }
+            (producer, None) => producer.fold_with(consumer.into_folder()).complete(),
+        }
+    } else {
+        producer.fold_with(consumer.into_folder()).complete()
+    }
+}
--- a/third_party/rust/rayon/src/iter/product.rs
+++ b/third_party/rust/rayon/src/iter/product.rs
@@ -1,10 +1,10 @@
 use super::ParallelIterator;
-use super::internal::*;
+use super::plumbing::*;
 
 use std::iter::{self, Product};
 use std::marker::PhantomData;
 
 
 pub fn product<PI, P>(pi: PI) -> P
     where PI: ParallelIterator,
           P: Send + Product<PI::Item> + Product
--- a/third_party/rust/rayon/src/iter/reduce.rs
+++ b/third_party/rust/rayon/src/iter/reduce.rs
@@ -1,10 +1,10 @@
 use super::ParallelIterator;
-use super::internal::*;
+use super::plumbing::*;
 
 pub fn reduce<PI, R, ID, T>(pi: PI, identity: ID, reduce_op: R) -> T
     where PI: ParallelIterator<Item = T>,
           R: Fn(T, T) -> T + Sync,
           ID: Fn() -> T + Sync,
           T: Send
 {
     let consumer = ReduceConsumer {
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/repeat.rs
@@ -0,0 +1,208 @@
+use super::plumbing::*;
+use super::*;
+use std::iter;
+use std::usize;
+
+/// Iterator adaptor for [the `repeat()` function](fn.repeat.html).
+#[derive(Debug, Clone)]
+pub struct Repeat<T: Clone + Send> {
+    element: T,
+}
+
+/// Creates a parallel iterator that endlessly repeats `elt` (by
+/// cloning it). Note that this iterator has "infinite" length, so
+/// typically you would want to use `zip` or `take` or some other
+/// means to shorten it, or consider using
+/// [the `repeatn()` function](fn.repeatn.html) instead.
+///
+/// # Examples
+///
+/// ```
+/// use rayon::prelude::*;
+/// use rayon::iter::repeat;
+/// let x: Vec<(i32, i32)> = repeat(22).zip(0..3).collect();
+/// assert_eq!(x, vec![(22, 0), (22, 1), (22, 2)]);
+/// ```
+pub fn repeat<T: Clone + Send>(elt: T) -> Repeat<T> {
+    Repeat { element: elt }
+}
+
+impl<T> Repeat<T>
+    where T: Clone + Send
+{
+    /// Take only `n` repeats of the element, similar to the general
+    /// [`take()`](trait.IndexedParallelIterator.html#method.take).
+    ///
+    /// The resulting `RepeatN` is an `IndexedParallelIterator`, allowing
+    /// more functionality than `Repeat` alone.
+    pub fn take(self, n: usize) -> RepeatN<T> {
+        repeatn(self.element, n)
+    }
+
+    /// Iterate tuples repeating the element with items from another
+    /// iterator, similar to the general
+    /// [`zip()`](trait.IndexedParallelIterator.html#method.zip).
+    pub fn zip<Z>(self, zip_op: Z) -> Zip<RepeatN<T>, Z::Iter>
+        where Z: IntoParallelIterator,
+              Z::Iter: IndexedParallelIterator
+    {
+        let z = zip_op.into_par_iter();
+        let n = z.len();
+        self.take(n).zip(z)
+    }
+}
+
+impl<T> ParallelIterator for Repeat<T>
+    where T: Clone + Send
+{
+    type Item = T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let producer = RepeatProducer { element: self.element };
+        bridge_unindexed(producer, consumer)
+    }
+}
+
+/// Unindexed producer for `Repeat`.
+struct RepeatProducer<T: Clone + Send> {
+    element: T,
+}
+
+impl<T: Clone + Send> UnindexedProducer for RepeatProducer<T> {
+    type Item = T;
+
+    fn split(self) -> (Self, Option<Self>) {
+        (RepeatProducer { element: self.element.clone() },
+         Some(RepeatProducer { element: self.element }))
+    }
+
+    fn fold_with<F>(self, folder: F) -> F
+        where F: Folder<T>
+    {
+        folder.consume_iter(iter::repeat(self.element))
+    }
+}
+
+
+/// Iterator adaptor for [the `repeatn()` function](fn.repeatn.html).
+#[derive(Debug, Clone)]
+pub struct RepeatN<T: Clone + Send> {
+    element: T,
+    count: usize,
+}
+
+/// Creates a parallel iterator that produces `n` repeats of `elt`
+/// (by cloning it).
+///
+/// # Examples
+///
+/// ```
+/// use rayon::prelude::*;
+/// use rayon::iter::repeatn;
+/// let x: Vec<(i32, i32)> = repeatn(22, 3).zip(0..3).collect();
+/// assert_eq!(x, vec![(22, 0), (22, 1), (22, 2)]);
+/// ```
+pub fn repeatn<T: Clone + Send>(elt: T, n: usize) -> RepeatN<T> {
+    RepeatN { element: elt, count: n }
+}
+
+impl<T> ParallelIterator for RepeatN<T>
+    where T: Clone + Send
+{
+    type Item = T;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        Some(self.count)
+    }
+}
+
+impl<T> IndexedParallelIterator for RepeatN<T>
+    where T: Clone + Send
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self, consumer)
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        callback.callback(RepeatNProducer { element: self.element, count: self.count })
+    }
+
+    fn len(&self) -> usize {
+        self.count
+    }
+}
+
+
+/// Producer for `RepeatN`.
+struct RepeatNProducer<T: Clone + Send> {
+    element: T,
+    count: usize,
+}
+
+impl<T: Clone + Send> Producer for RepeatNProducer<T> {
+    type Item = T;
+    type IntoIter = Iter<T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        Iter { element: self.element, count: self.count }
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        (RepeatNProducer { element: self.element.clone(), count: index },
+         RepeatNProducer { element: self.element, count: self.count - index })
+    }
+}
+
+/// Iterator for `RepeatN`.
+///
+/// This is conceptually like `std::iter::Take<std::iter::Repeat<T>>`, but
+/// we need `DoubleEndedIterator` and unconditional `ExactSizeIterator`.
+struct Iter<T: Clone> {
+    element: T,
+    count: usize,
+}
+
+impl<T: Clone> Iterator for Iter<T> {
+    type Item = T;
+
+    #[inline]
+    fn next(&mut self) -> Option<T> {
+        if self.count > 0 {
+            self.count -= 1;
+            Some(self.element.clone())
+        } else {
+            None
+        }
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.count, Some(self.count))
+    }
+}
+
+impl<T: Clone> DoubleEndedIterator for Iter<T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<T> {
+        self.next()
+    }
+}
+
+impl<T: Clone> ExactSizeIterator for Iter<T> {
+    #[inline]
+    fn len(&self) -> usize {
+        self.count
+    }
+}
--- a/third_party/rust/rayon/src/iter/rev.rs
+++ b/third_party/rust/rayon/src/iter/rev.rs
@@ -1,13 +1,19 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 use std::iter;
 
+/// `Rev` is an iterator that produces elements in reverse order. This struct
+/// is created by the [`rev()`] method on [`IndexedParallelIterator`]
+///
+/// [`rev()`]: trait.IndexedParallelIterator.html#method.rev
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct Rev<I: IndexedParallelIterator> {
     base: I,
 }
 
 /// Create a new `Rev` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I>(base: I) -> Rev<I>
@@ -22,33 +28,33 @@ impl<I> ParallelIterator for Rev<I>
     type Item = I::Item;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<I> IndexedParallelIterator for Rev<I>
     where I: IndexedParallelIterator
 {
     fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len()
     }
 
-    fn with_producer<CB>(mut self, callback: CB) -> CB::Output
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         let len = self.base.len();
         return self.base.with_producer(Callback {
                                            callback: callback,
                                            len: len,
                                        });
 
--- a/third_party/rust/rayon/src/iter/skip.rs
+++ b/third_party/rust/rayon/src/iter/skip.rs
@@ -1,28 +1,29 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 use super::noop::NoopConsumer;
 use std::cmp::min;
 
 /// `Skip` is an iterator that skips over the first `n` elements.
-/// This struct is created by the [`skip()`] method on [`ParallelIterator`]
+/// This struct is created by the [`skip()`] method on [`IndexedParallelIterator`]
 ///
-/// [`skip()`]: trait.ParallelIterator.html#method.skip
-/// [`ParallelIterator`]: trait.ParallelIterator.html
+/// [`skip()`]: trait.IndexedParallelIterator.html#method.skip
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct Skip<I> {
     base: I,
     n: usize,
 }
 
 /// Create a new `Skip` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
-pub fn new<I>(mut base: I, n: usize) -> Skip<I>
+pub fn new<I>(base: I, n: usize) -> Skip<I>
     where I: IndexedParallelIterator
 {
     let n = min(base.len(), n);
     Skip { base: base, n: n }
 }
 
 impl<I> ParallelIterator for Skip<I>
     where I: IndexedParallelIterator
@@ -30,25 +31,25 @@ impl<I> ParallelIterator for Skip<I>
     type Item = I::Item;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<I> IndexedParallelIterator for Skip<I>
     where I: IndexedParallelIterator
 {
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.base.len() - self.n
     }
 
     fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
         bridge(self, consumer)
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
--- a/third_party/rust/rayon/src/iter/splitter.rs
+++ b/third_party/rust/rayon/src/iter/splitter.rs
@@ -1,32 +1,43 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
+use std::fmt::{self, Debug};
+
 /// The `split` function takes arbitrary data and a closure that knows how to
 /// split it, and turns this into a `ParallelIterator`.
 pub fn split<D, S>(data: D, splitter: S) -> Split<D, S>
     where D: Send,
           S: Fn(D) -> (D, Option<D>) + Sync
 {
     Split {
         data: data,
         splitter: splitter,
     }
 }
 
 /// `Split` is a parallel iterator using arbitrary data and a splitting function.
 /// This struct is created by the [`split()`] function.
 ///
 /// [`split()`]: fn.split.html
+#[derive(Clone)]
 pub struct Split<D, S> {
     data: D,
     splitter: S,
 }
 
+impl<D: Debug, S> Debug for Split<D, S> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Split")
+            .field("data", &self.data)
+            .finish()
+    }
+}
+
 impl<D, S> ParallelIterator for Split<D, S>
     where D: Send,
           S: Fn(D) -> (D, Option<D>) + Sync + Send
 {
     type Item = D;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
--- a/third_party/rust/rayon/src/iter/sum.rs
+++ b/third_party/rust/rayon/src/iter/sum.rs
@@ -1,10 +1,10 @@
 use super::ParallelIterator;
-use super::internal::*;
+use super::plumbing::*;
 
 use std::iter::{self, Sum};
 use std::marker::PhantomData;
 
 
 pub fn sum<PI, S>(pi: PI) -> S
     where PI: ParallelIterator,
           S: Send + Sum<PI::Item> + Sum
--- a/third_party/rust/rayon/src/iter/take.rs
+++ b/third_party/rust/rayon/src/iter/take.rs
@@ -1,27 +1,28 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 use std::cmp::min;
 
 /// `Take` is an iterator that iterates over the first `n` elements.
-/// This struct is created by the [`take()`] method on [`ParallelIterator`]
+/// This struct is created by the [`take()`] method on [`IndexedParallelIterator`]
 ///
-/// [`take()`]: trait.ParallelIterator.html#method.take
-/// [`ParallelIterator`]: trait.ParallelIterator.html
+/// [`take()`]: trait.IndexedParallelIterator.html#method.take
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct Take<I> {
     base: I,
     n: usize,
 }
 
 /// Create a new `Take` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
-pub fn new<I>(mut base: I, n: usize) -> Take<I>
+pub fn new<I>(base: I, n: usize) -> Take<I>
     where I: IndexedParallelIterator
 {
     let n = min(base.len(), n);
     Take { base: base, n: n }
 }
 
 impl<I> ParallelIterator for Take<I>
     where I: IndexedParallelIterator
@@ -29,25 +30,25 @@ impl<I> ParallelIterator for Take<I>
     type Item = I::Item;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<I> IndexedParallelIterator for Take<I>
     where I: IndexedParallelIterator
 {
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.n
     }
 
     fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
         bridge(self, consumer)
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
--- a/third_party/rust/rayon/src/iter/test.rs
+++ b/third_party/rust/rayon/src/iter/test.rs
@@ -14,35 +14,35 @@ use std::usize;
 use std::sync::mpsc;
 
 fn is_indexed<T: IndexedParallelIterator>(_: T) {}
 
 #[test]
 pub fn execute() {
     let a: Vec<i32> = (0..1024).collect();
     let mut b = vec![];
-    a.par_iter().map(|&i| i + 1).collect_into(&mut b);
+    a.par_iter().map(|&i| i + 1).collect_into_vec(&mut b);
     let c: Vec<i32> = (0..1024).map(|i| i + 1).collect();
     assert_eq!(b, c);
 }
 
 #[test]
 pub fn execute_cloned() {
     let a: Vec<i32> = (0..1024).collect();
     let mut b: Vec<i32> = vec![];
-    a.par_iter().cloned().collect_into(&mut b);
+    a.par_iter().cloned().collect_into_vec(&mut b);
     let c: Vec<i32> = (0..1024).collect();
     assert_eq!(b, c);
 }
 
 #[test]
 pub fn execute_range() {
     let a = 0i32..1024;
     let mut b = vec![];
-    a.into_par_iter().map(|i| i + 1).collect_into(&mut b);
+    a.into_par_iter().map(|i| i + 1).collect_into_vec(&mut b);
     let c: Vec<i32> = (0..1024).map(|i| i + 1).collect();
     assert_eq!(b, c);
 }
 
 #[test]
 pub fn execute_unindexed_range() {
     let a = 0i64..1024;
     let b: LinkedList<i64> = a.into_par_iter().map(|i| i + 1).collect();
@@ -199,17 +199,30 @@ pub fn fold_is_full() {
 #[test]
 pub fn check_enumerate() {
     let a: Vec<usize> = (0..1024).rev().collect();
 
     let mut b = vec![];
     a.par_iter()
         .enumerate()
         .map(|(i, &x)| i + x)
-        .collect_into(&mut b);
+        .collect_into_vec(&mut b);
+    assert!(b.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
+pub fn check_enumerate_rev() {
+    let a: Vec<usize> = (0..1024).rev().collect();
+
+    let mut b = vec![];
+    a.par_iter()
+        .enumerate()
+        .rev()
+        .map(|(i, &x)| i + x)
+        .collect_into_vec(&mut b);
     assert!(b.iter().all(|&x| x == a.len() - 1));
 }
 
 #[test]
 pub fn check_indices_after_enumerate_split() {
     let a: Vec<i32> = (0..1024).collect();
     a.par_iter().enumerate().with_producer(WithProducer);
 
@@ -241,27 +254,27 @@ pub fn check_increment() {
     assert!(a.iter().all(|&x| x == a.len() - 1));
 }
 
 #[test]
 pub fn check_skip() {
     let a: Vec<usize> = (0..1024).collect();
 
     let mut v1 = Vec::new();
-    a.par_iter().skip(16).collect_into(&mut v1);
+    a.par_iter().skip(16).collect_into_vec(&mut v1);
     let v2 = a.iter().skip(16).collect::<Vec<_>>();
     assert_eq!(v1, v2);
 
     let mut v1 = Vec::new();
-    a.par_iter().skip(2048).collect_into(&mut v1);
+    a.par_iter().skip(2048).collect_into_vec(&mut v1);
     let v2 = a.iter().skip(2048).collect::<Vec<_>>();
     assert_eq!(v1, v2);
 
     let mut v1 = Vec::new();
-    a.par_iter().skip(0).collect_into(&mut v1);
+    a.par_iter().skip(0).collect_into_vec(&mut v1);
     let v2 = a.iter().skip(0).collect::<Vec<_>>();
     assert_eq!(v1, v2);
 
     // Check that the skipped elements side effects are executed
     use std::sync::atomic::{AtomicUsize, Ordering};
     let num = AtomicUsize::new(0);
     a.par_iter()
         .map(|&n| num.fetch_add(n, Ordering::Relaxed))
@@ -270,27 +283,27 @@ pub fn check_skip() {
     assert_eq!(num.load(Ordering::Relaxed), a.iter().sum());
 }
 
 #[test]
 pub fn check_take() {
     let a: Vec<usize> = (0..1024).collect();
 
     let mut v1 = Vec::new();
-    a.par_iter().take(16).collect_into(&mut v1);
+    a.par_iter().take(16).collect_into_vec(&mut v1);
     let v2 = a.iter().take(16).collect::<Vec<_>>();
     assert_eq!(v1, v2);
 
     let mut v1 = Vec::new();
-    a.par_iter().take(2048).collect_into(&mut v1);
+    a.par_iter().take(2048).collect_into_vec(&mut v1);
     let v2 = a.iter().take(2048).collect::<Vec<_>>();
     assert_eq!(v1, v2);
 
     let mut v1 = Vec::new();
-    a.par_iter().take(0).collect_into(&mut v1);
+    a.par_iter().take(0).collect_into_vec(&mut v1);
     let v2 = a.iter().take(0).collect::<Vec<_>>();
     assert_eq!(v1, v2);
 }
 
 #[test]
 pub fn check_inspect() {
     use std::sync::atomic::{AtomicUsize, Ordering};
 
@@ -302,31 +315,31 @@ pub fn check_inspect() {
 }
 
 #[test]
 pub fn check_move() {
     let a = vec![vec![1, 2, 3]];
     let ptr = a[0].as_ptr();
 
     let mut b = vec![];
-    a.into_par_iter().collect_into(&mut b);
+    a.into_par_iter().collect_into_vec(&mut b);
 
     // a simple move means the inner vec will be completely unchanged
     assert_eq!(ptr, b[0].as_ptr());
 }
 
 #[test]
 pub fn check_drops() {
     use std::sync::atomic::{AtomicUsize, Ordering};
 
     let c = AtomicUsize::new(0);
     let a = vec![DropCounter(&c); 10];
 
     let mut b = vec![];
-    a.clone().into_par_iter().collect_into(&mut b);
+    a.clone().into_par_iter().collect_into_vec(&mut b);
     assert_eq!(c.load(Ordering::Relaxed), 0);
 
     b.into_par_iter();
     assert_eq!(c.load(Ordering::Relaxed), 10);
 
     a.into_par_iter().with_producer(Partial);
     assert_eq!(c.load(Ordering::Relaxed), 20);
 
@@ -756,16 +769,57 @@ pub fn check_zip_range() {
     let mut a: Vec<usize> = (0..1024).rev().collect();
 
     a.par_iter_mut().zip(0usize..1024).for_each(|(a, b)| *a += b);
 
     assert!(a.iter().all(|&x| x == a.len() - 1));
 }
 
 #[test]
+pub fn check_zip_eq() {
+    let mut a: Vec<usize> = (0..1024).rev().collect();
+    let b: Vec<usize> = (0..1024).collect();
+
+    a.par_iter_mut().zip_eq(&b[..]).for_each(|(a, &b)| *a += b);
+
+    assert!(a.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
+pub fn check_zip_eq_into_par_iter() {
+    let mut a: Vec<usize> = (0..1024).rev().collect();
+    let b: Vec<usize> = (0..1024).collect();
+
+    a.par_iter_mut()
+     .zip_eq(&b) // here we rely on &b iterating over &usize
+     .for_each(|(a, &b)| *a += b);
+
+    assert!(a.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
+pub fn check_zip_eq_into_mut_par_iter() {
+    let a: Vec<usize> = (0..1024).rev().collect();
+    let mut b: Vec<usize> = (0..1024).collect();
+
+    a.par_iter().zip_eq(&mut b).for_each(|(&a, b)| *b += a);
+
+    assert!(b.iter().all(|&x| x == b.len() - 1));
+}
+
+#[test]
+pub fn check_zip_eq_range() {
+    let mut a: Vec<usize> = (0..1024).rev().collect();
+
+    a.par_iter_mut().zip_eq(0usize..1024).for_each(|(a, b)| *a += b);
+
+    assert!(a.iter().all(|&x| x == a.len() - 1));
+}
+
+#[test]
 pub fn check_sum_filtered_ints() {
     let a: Vec<i32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
     let par_sum_evens: i32 = a.par_iter().filter(|&x| (x & 1) == 0).sum();
     let seq_sum_evens = a.iter()
         .filter(|&x| (x & 1) == 0)
         .map(|&x| x)
         .fold(0, |a, b| a + b);
     assert_eq!(par_sum_evens, seq_sum_evens);
@@ -810,16 +864,39 @@ pub fn check_empty_flat_map_sum() {
     assert_eq!(b, 0);
 
     // empty on the outside
     let c: i32 = empty.par_iter().flat_map(|_| a.par_iter()).sum();
     assert_eq!(c, 0);
 }
 
 #[test]
+pub fn check_flatten_vec() {
+
+    let a: Vec<i32> = (0..1024).collect();
+    let b: Vec<Vec<i32>> = vec![a.clone(), a.clone(), a.clone(), a.clone()];
+    let c: Vec<i32> = b.par_iter().flatten().cloned().collect();
+    let mut d = a.clone();
+    d.extend(&a);
+    d.extend(&a);
+    d.extend(&a);
+
+    assert_eq!(d, c);
+}
+
+#[test]
+pub fn check_flatten_vec_empty() {
+
+    let a: Vec<Vec<i32>> = vec![vec![]];
+    let b: Vec<i32> = a.par_iter().flatten().cloned().collect();
+
+    assert_eq!(vec![] as Vec<i32>, b);
+}
+
+#[test]
 pub fn check_slice_split() {
     let v: Vec<_> = (0..1000).collect();
     for m in 1..100 {
         let a: Vec<_> = v.split(|x| x % m == 0).collect();
         let b: Vec<_> = v.par_split(|x| x % m == 0).collect();
         assert_eq!(a, b);
     }
 
@@ -1054,17 +1131,17 @@ pub fn check_chain() {
         .chain(None)
         .chain(vec![5, 8, 13])
         .map(|x| (x as u8 + b'a') as char)
         .chain(vec!['x', 'y', 'z'])
         .zip((0i32..1000).into_par_iter().map(|x| -x))
         .enumerate()
         .map(|(a, (b, c))| (a, b, c))
         .chain(None)
-        .collect_into(&mut res);
+        .collect_into_vec(&mut res);
 
     assert_eq!(res,
                vec![(0, 'a', 0),
                     (1, 'b', -1),
                     (2, 'b', -2),
                     (3, 'c', -3),
                     (4, 'd', -4),
                     (5, 'f', -5),
@@ -1581,24 +1658,24 @@ fn check_extend_pairs() {
         assert_eq!(serial, parallel);
     }
 
     check::<BTreeMap<usize, i32>>();
     check::<HashMap<usize, i32>>();
 }
 
 #[test]
-fn check_unzip_into() {
+fn check_unzip_into_vecs() {
     let mut a = vec![];
     let mut b = vec![];
     (0..1024)
         .into_par_iter()
         .map(|i| i * i)
         .enumerate()
-        .unzip_into(&mut a, &mut b);
+        .unzip_into_vecs(&mut a, &mut b);
 
     let (c, d): (Vec<_>, Vec<_>) = (0..1024).map(|i| i * i).enumerate().unzip();
     assert_eq!(a, c);
     assert_eq!(b, d);
 }
 
 #[test]
 fn check_unzip() {
@@ -1657,8 +1734,243 @@ fn check_partition_map() {
         .par_split_whitespace()
         .partition_map(|s| match s.parse::<i32>() {
                            Ok(n) => Either::Left(n),
                            Err(_) => Either::Right(s),
                        });
     assert_eq!(a, vec![1, 2, 3]);
     assert_eq!(b, "abcxyz");
 }
+
+#[test]
+fn check_either() {
+    type I = ::vec::IntoIter<i32>;
+    type E = Either<I, I>;
+
+    let v: Vec<i32> = (0..1024).collect();
+
+    // try iterating the left side
+    let left: E = Either::Left(v.clone().into_par_iter());
+    assert!(left.eq(v.clone()));
+
+    // try iterating the right side
+    let right: E = Either::Right(v.clone().into_par_iter());
+    assert!(right.eq(v.clone()));
+
+    // try an indexed iterator
+    let left: E = Either::Left(v.clone().into_par_iter());
+    assert!(left.enumerate().eq(v.clone().into_par_iter().enumerate()));
+}
+
+#[test]
+fn check_either_extend() {
+    type E = Either<Vec<i32>, HashSet<i32>>;
+
+    let v: Vec<i32> = (0..1024).collect();
+
+    // try extending the left side
+    let mut left: E = Either::Left(vec![]);
+    left.par_extend(v.clone());
+    assert_eq!(left.as_ref(), Either::Left(&v));
+
+    // try extending the right side
+    let mut right: E = Either::Right(HashSet::default());
+    right.par_extend(v.clone());
+    assert_eq!(right, Either::Right(v.iter().cloned().collect()));
+}
+
+#[test]
+fn check_interleave_eq() {
+    let xs: Vec<usize> = (0..10).collect();
+    let ys: Vec<usize> = (10..20).collect();
+
+    let mut actual = vec![];
+    xs.par_iter().interleave(&ys).map(|&i| i).collect_into_vec(&mut actual);
+
+    let expected: Vec<usize> = (0..10).zip(10..20).flat_map(|(i, j)| vec![i, j].into_iter()).collect();
+    assert_eq!(expected, actual);
+}
+
+#[test]
+fn check_interleave_uneven() {
+    let cases: Vec<(Vec<usize>, Vec<usize>, Vec<usize>)> = vec![
+        ((0..9).collect(), vec![10], vec![0, 10, 1, 2, 3, 4, 5, 6, 7, 8]),
+        (vec![10], (0..9).collect(), vec![10, 0, 1, 2, 3, 4, 5, 6, 7, 8]),
+        ((0..5).collect(), (5..10).collect(), (0..5).zip(5..10).flat_map(|(i, j)| vec![i, j].into_iter()).collect()),
+        (vec![], (0..9).collect(), (0..9).collect()),
+        ((0..9).collect(), vec![], (0..9).collect()),
+        ((0..50).collect(), (50..100).collect(), (0..50).zip(50..100).flat_map(|(i, j)| vec![i, j].into_iter()).collect()),
+    ];
+
+    for (i, (xs, ys, expected)) in cases.into_iter().enumerate() {
+        let mut res = vec![];
+        xs.par_iter().interleave(&ys).map(|&i| i).collect_into_vec(&mut res);
+        assert_eq!(expected, res, "Case {} failed", i);
+
+        res.truncate(0);
+        xs.par_iter().interleave(&ys).rev().map(|&i| i).collect_into_vec(&mut res);
+        assert_eq!(expected.into_iter().rev().collect::<Vec<usize>>(), res, "Case {} reversed failed", i);
+    }
+}
+
+
+#[test]
+fn check_interleave_shortest() {
+    let cases: Vec<(Vec<usize>, Vec<usize>, Vec<usize>)> = vec![
+        ((0..9).collect(), vec![10], vec![0, 10, 1]),
+        (vec![10], (0..9).collect(), vec![10, 0]),
+        ((0..5).collect(), (5..10).collect(), (0..5).zip(5..10).flat_map(|(i, j)| vec![i, j].into_iter()).collect()),
+        (vec![], (0..9).collect(), vec![]),
+        ((0..9).collect(), vec![], vec![0]),
+        ((0..50).collect(), (50..100).collect(), (0..50).zip(50..100).flat_map(|(i, j)| vec![i, j].into_iter()).collect()),
+    ];
+
+    for (i, (xs, ys, expected)) in cases.into_iter().enumerate() {
+        let mut res = vec![];
+        xs.par_iter().interleave_shortest(&ys).map(|&i| i).collect_into_vec(&mut res);
+        assert_eq!(expected, res, "Case {} failed", i);
+
+        res.truncate(0);
+        xs.par_iter().interleave_shortest(&ys).rev().map(|&i| i).collect_into_vec(&mut res);
+        assert_eq!(expected.into_iter().rev().collect::<Vec<usize>>(), res, "Case {} reversed failed", i);
+    }
+}
+
+#[test]
+#[should_panic(expected = "chunk_size must not be zero")]
+fn check_chunks_zero_size() {
+    let _: Vec<Vec<i32>> = vec![1,2,3].into_par_iter().chunks(0).collect();
+}
+
+#[test]
+fn check_chunks_even_size() {
+    assert_eq!(vec![vec![1,2,3], vec![4,5,6], vec![7,8,9]], (1..10).into_par_iter().chunks(3).collect::<Vec<Vec<i32>>>());
+}
+
+#[test]
+fn check_chunks_empty() {
+    let v: Vec<i32> = vec![];
+    let expected: Vec<Vec<i32>> = vec![];
+    assert_eq!(expected, v.into_par_iter().chunks(2).collect::<Vec<Vec<i32>>>());
+}
+
+#[test]
+fn check_chunks_len() {
+    assert_eq!(4, (0..8).into_par_iter().chunks(2).len());
+    assert_eq!(3, (0..9).into_par_iter().chunks(3).len());
+    assert_eq!(3, (0..8).into_par_iter().chunks(3).len());
+    assert_eq!(1, (&[1]).par_iter().chunks(3).len());
+    assert_eq!(0, (0..0).into_par_iter().chunks(3).len());
+}
+
+#[test]
+fn check_chunks_uneven() {
+    let cases: Vec<(Vec<u32>, usize, Vec<Vec<u32>>)> = vec![
+        ((0..5).collect(), 3, vec![vec![0,1, 2], vec![3, 4]]),
+        (vec![1], 5, vec![vec![1]]),
+        ((0..4).collect(), 3, vec![vec![0,1, 2], vec![3]]),
+    ];
+
+    for (i, (v, n, expected)) in cases.into_iter().enumerate() {
+        let mut res: Vec<Vec<u32>> = vec![];
+        v.par_iter().chunks(n).map(|v| v.into_iter().cloned().collect()).collect_into_vec(&mut res);
+        assert_eq!(expected, res, "Case {} failed", i);
+
+        res.truncate(0);
+        v.into_par_iter().chunks(n).rev().collect_into_vec(&mut res);
+        assert_eq!(expected.into_iter().rev().collect::<Vec<Vec<u32>>>(), res, "Case {} reversed failed", i);
+    }
+}
+
+#[test]
+#[ignore] // it's quick enough on optimized 32-bit platforms, but otherwise... ... ...
+#[should_panic(expected = "overflow")]
+#[cfg(debug_assertions)]
+fn check_repeat_unbounded() {
+    // use just one thread, so we don't get infinite adaptive splitting
+    // (forever stealing and re-splitting jobs that will panic on overflow)
+    let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+    pool.install(|| {
+        println!("counted {} repeats", repeat(()).count());
+    });
+}
+
+#[test]
+fn check_repeat_find_any() {
+    let even = repeat(4).find_any(|&x| x % 2 == 0);
+    assert_eq!(even, Some(4));
+}
+
+#[test]
+fn check_repeat_take() {
+    let v: Vec<_> = repeat(4).take(4).collect();
+    assert_eq!(v, [4, 4, 4, 4]);
+}
+
+#[test]
+fn check_repeat_zip() {
+    let v = vec!(4,4,4,4);
+    let mut fours: Vec<_> = repeat(4).zip(v).collect();
+    assert_eq!(fours.len(), 4);
+    while let Some(item) = fours.pop() {
+        assert_eq!(item, (4, 4));
+    }
+}
+
+#[test]
+fn check_repeatn_zip_left() {
+    let v = vec!(4,4,4,4);
+    let mut fours: Vec<_> = repeatn(4, usize::MAX).zip(v).collect();
+    assert_eq!(fours.len(), 4);
+    while let Some(item) = fours.pop() {
+        assert_eq!(item, (4, 4));
+    }
+}
+
+#[test]
+fn check_repeatn_zip_right() {
+    let v = vec!(4,4,4,4);
+    let mut fours: Vec<_> = v.into_par_iter().zip(repeatn(4, usize::MAX)).collect();
+    assert_eq!(fours.len(), 4);
+    while let Some(item) = fours.pop() {
+        assert_eq!(item, (4, 4));
+    }
+}
+
+#[test]
+fn check_empty() {
+    // drive_unindexed
+    let mut v: Vec<i32> = empty().filter(|_| unreachable!()).collect();
+    assert!(v.is_empty());
+
+    // drive (indexed)
+    empty().collect_into_vec(&mut v);
+    assert!(v.is_empty());
+
+    // with_producer
+    let v: Vec<(i32, i32)> = empty().zip(1..10).collect();
+    assert!(v.is_empty());
+}
+
+#[test]
+fn check_once() {
+    // drive_unindexed
+    let mut v: Vec<i32> = once(42).filter(|_| true).collect();
+    assert_eq!(v, &[42]);
+
+    // drive (indexed)
+    once(42).collect_into_vec(&mut v);
+    assert_eq!(v, &[42]);
+
+    // with_producer
+    let v: Vec<(i32, i32)> = once(42).zip(1..10).collect();
+    assert_eq!(v, &[(42, 1)]);
+}
+
+#[test]
+fn check_update() {
+    let mut v: Vec<Vec<_>> = vec![vec![1], vec![3, 2, 1]];
+    v.par_iter_mut()
+        .update(|v| v.push(0))
+        .for_each(|_|());
+
+    assert_eq!(v, vec![vec![1, 0], vec![3, 2, 1, 0]]);
+}
--- a/third_party/rust/rayon/src/iter/unzip.rs
+++ b/third_party/rust/rayon/src/iter/unzip.rs
@@ -1,9 +1,9 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
 /// This trait abstracts the different ways we can "unzip" one parallel
 /// iterator into two distinct consumers, which we can handle almost
 /// identically apart from how to process the individual items.
 trait UnzipOp<T>: Sync + Send {
     /// The type of item expected by the left consumer.
     type Left: Send;
@@ -59,17 +59,17 @@ pub fn unzip<I, A, B, FromA, FromB>(pi: 
           A: Send,
           B: Send
 {
     execute(pi, Unzip)
 }
 
 /// Unzip an `IndexedParallelIterator` into two arbitrary `Consumer`s.
 ///
-/// This is not directly public, but called by `super::collect::unzip_into`.
+/// This is not directly public, but called by `super::collect::unzip_into_vecs`.
 pub fn unzip_indexed<I, A, B, CA, CB>(pi: I, left: CA, right: CB) -> (CA::Result, CB::Result)
     where I: IndexedParallelIterator<Item = (A, B)>,
           CA: Consumer<A>,
           CB: Consumer<B>,
           A: Send,
           B: Send
 {
     let consumer = UnzipConsumer {
@@ -207,17 +207,17 @@ impl<'b, I, OP, FromB> ParallelIterator 
             self.b.par_extend(iter);
         }
         // NB: If for some reason `b.par_extend` doesn't actually drive the
         // iterator, then we won't have a result for the left side to return
         // at all.  We can't fake an arbitrary consumer's result, so panic.
         result.expect("unzip consumers didn't execute!")
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         if OP::indexable() {
             self.base.opt_len()
         } else {
             None
         }
     }
 }
 
@@ -251,17 +251,17 @@ impl<'r, I, OP, CA> ParallelIterator for
             right: consumer,
         };
 
         let result = self.base.drive_unindexed(consumer);
         *self.left_result = Some(result.0);
         result.1
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         if OP::indexable() {
             self.base.opt_len()
         } else {
             None
         }
     }
 }
 
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/update.rs
@@ -0,0 +1,302 @@
+use super::plumbing::*;
+use super::*;
+
+use std::fmt::{self, Debug};
+
+
+/// `Update` is an iterator that mutates the elements of an
+/// underlying iterator before they are yielded.
+///
+/// This struct is created by the [`update()`] method on [`ParallelIterator`]
+///
+/// [`update()`]: trait.ParallelIterator.html#method.update
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
+pub struct Update<I: ParallelIterator, F> {
+    base: I,
+    update_op: F,
+}
+
+impl<I: ParallelIterator + Debug, F> Debug for Update<I, F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Update")
+            .field("base", &self.base)
+            .finish()
+    }
+}
+
+/// Create a new `Update` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+pub fn new<I, F>(base: I, update_op: F) -> Update<I, F>
+    where I: ParallelIterator
+{
+    Update {
+        base: base,
+        update_op: update_op,
+    }
+}
+
+impl<I, F> ParallelIterator for Update<I, F>
+    where I: ParallelIterator,
+          F: Fn(&mut I::Item) + Send + Sync,
+{
+    type Item = I::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        let consumer1 = UpdateConsumer::new(consumer, &self.update_op);
+        self.base.drive_unindexed(consumer1)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        self.base.opt_len()
+    }
+}
+
+impl<I, F> IndexedParallelIterator for Update<I, F>
+    where I: IndexedParallelIterator,
+          F: Fn(&mut I::Item) + Send + Sync,
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        let consumer1 = UpdateConsumer::new(consumer, &self.update_op);
+        self.base.drive(consumer1)
+    }
+
+    fn len(&self) -> usize {
+        self.base.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        return self.base.with_producer(Callback {
+                                           callback: callback,
+                                           update_op: self.update_op,
+                                       });
+
+        struct Callback<CB, F> {
+            callback: CB,
+            update_op: F,
+        }
+
+        impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
+            where CB: ProducerCallback<T>,
+                  F: Fn(&mut T) + Send + Sync,
+        {
+            type Output = CB::Output;
+
+            fn callback<P>(self, base: P) -> CB::Output
+                where P: Producer<Item = T>
+            {
+                let producer = UpdateProducer {
+                    base: base,
+                    update_op: &self.update_op,
+                };
+                self.callback.callback(producer)
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+struct UpdateProducer<'f, P, F: 'f> {
+    base: P,
+    update_op: &'f F,
+}
+
+impl<'f, P, F> Producer for UpdateProducer<'f, P, F>
+    where P: Producer,
+          F: Fn(&mut P::Item) + Send + Sync,
+{
+    type Item = P::Item;
+    type IntoIter = UpdateSeq<P::IntoIter, &'f F>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        UpdateSeq {
+            base: self.base.into_iter(),
+            update_op: self.update_op,
+        }
+    }
+
+    fn min_len(&self) -> usize {
+        self.base.min_len()
+    }
+    fn max_len(&self) -> usize {
+        self.base.max_len()
+    }
+
+    fn split_at(self, index: usize) -> (Self, Self) {
+        let (left, right) = self.base.split_at(index);
+        (UpdateProducer {
+             base: left,
+             update_op: self.update_op,
+         },
+         UpdateProducer {
+             base: right,
+             update_op: self.update_op,
+         })
+    }
+
+    fn fold_with<G>(self, folder: G) -> G
+        where G: Folder<Self::Item>
+    {
+        let folder1 = UpdateFolder { base: folder, update_op: self.update_op, };
+        self.base.fold_with(folder1).base
+    }
+}
+
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct UpdateConsumer<'f, C, F: 'f> {
+    base: C,
+    update_op: &'f F,
+}
+
+impl<'f, C, F> UpdateConsumer<'f, C, F> {
+    fn new(base: C, update_op: &'f F) -> Self {
+        UpdateConsumer {
+            base: base,
+            update_op: update_op,
+        }
+    }
+}
+
+impl<'f, T, C, F> Consumer<T> for UpdateConsumer<'f, C, F>
+    where C: Consumer<T>,
+          F: Fn(&mut T) + Send + Sync,
+{
+    type Folder = UpdateFolder<'f, C::Folder, F>;
+    type Reducer = C::Reducer;
+    type Result = C::Result;
+
+    fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+        let (left, right, reducer) = self.base.split_at(index);
+        (UpdateConsumer::new(left, self.update_op), UpdateConsumer::new(right, self.update_op), reducer)
+    }
+
+    fn into_folder(self) -> Self::Folder {
+        UpdateFolder {
+            base: self.base.into_folder(),
+            update_op: self.update_op,
+        }
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+impl<'f, T, C, F> UnindexedConsumer<T> for UpdateConsumer<'f, C, F>
+    where C: UnindexedConsumer<T>,
+          F: Fn(&mut T) + Send + Sync,
+{
+    fn split_off_left(&self) -> Self {
+        UpdateConsumer::new(self.base.split_off_left(), &self.update_op)
+    }
+
+    fn to_reducer(&self) -> Self::Reducer {
+        self.base.to_reducer()
+    }
+}
+
+struct UpdateFolder<'f, C, F: 'f> {
+    base: C,
+    update_op: &'f F,
+}
+
+impl<'f, T, C, F> Folder<T> for UpdateFolder<'f, C, F>
+    where C: Folder<T>,
+          F: Fn(& mut T)
+{
+    type Result = C::Result;
+
+    fn consume(self, mut item: T) -> Self {
+        (self.update_op)(&mut item);
+
+        UpdateFolder {
+            base: self.base.consume(item),
+            update_op: self.update_op,
+        }
+    }
+
+    fn complete(self) -> C::Result {
+        self.base.complete()
+    }
+
+    fn full(&self) -> bool {
+        self.base.full()
+    }
+}
+
+/// Standard Update adaptor, based on `itertools::adaptors::Update`
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
+struct UpdateSeq<I, F> {
+    base: I,
+    update_op: F,
+}
+
+impl<I, F> Iterator for UpdateSeq<I, F>
+where
+    I: Iterator,
+    F: FnMut(&mut I::Item),
+{
+    type Item = I::Item;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if let Some(mut v) = self.base.next() {
+            (self.update_op)(&mut v);
+            Some(v)
+        } else {
+            None
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.base.size_hint()
+    }
+
+    fn fold<Acc, G>(self, init: Acc, mut g: G) -> Acc
+        where G: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let mut f = self.update_op;
+        self.base.fold(init, move |acc, mut v| { f(&mut v); g(acc, v) })
+    }
+
+    // if possible, re-use inner iterator specializations in collect
+    fn collect<C>(self) -> C
+        where C: ::std::iter::FromIterator<Self::Item>
+    {
+        let mut f = self.update_op;
+        self.base.map(move |mut v| { f(&mut v); v }).collect()
+    }
+}
+
+impl<I, F> ExactSizeIterator for UpdateSeq<I, F>
+where
+    I: ExactSizeIterator,
+    F: FnMut(&mut I::Item),
+{}
+
+impl<I, F> DoubleEndedIterator for UpdateSeq<I, F>
+where
+    I: DoubleEndedIterator,
+    F: FnMut(&mut I::Item),
+{
+    fn next_back(&mut self) -> Option<Self::Item> {
+        if let Some(mut v) = self.base.next_back() {
+            (self.update_op)(&mut v);
+            Some(v)
+        } else {
+            None
+        }
+    }
+}
--- a/third_party/rust/rayon/src/iter/while_some.rs
+++ b/third_party/rust/rayon/src/iter/while_some.rs
@@ -1,20 +1,21 @@
 use std::sync::atomic::{AtomicBool, Ordering};
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 
 /// `WhileSome` is an iterator that yields the `Some` elements of an iterator,
 /// halting as soon as any `None` is produced.
 ///
 /// This struct is created by the [`while_some()`] method on [`ParallelIterator`]
 ///
 /// [`while_some()`]: trait.ParallelIterator.html#method.while_some
 /// [`ParallelIterator`]: trait.ParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct WhileSome<I: ParallelIterator> {
     base: I,
 }
 
 /// Create a new `WhileSome` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
 pub fn new<I>(base: I) -> WhileSome<I>
--- a/third_party/rust/rayon/src/iter/zip.rs
+++ b/third_party/rust/rayon/src/iter/zip.rs
@@ -1,14 +1,21 @@
-use super::internal::*;
+use super::plumbing::*;
 use super::*;
 use std::cmp;
 use std::iter;
 
+/// `Zip` is an iterator that zips up `a` and `b` into a single iterator
+/// of pairs. This struct is created by the [`zip()`] method on
+/// [`IndexedParallelIterator`]
+///
+/// [`zip()`]: trait.IndexedParallelIterator.html#method.zip
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
 #[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
 pub struct Zip<A: IndexedParallelIterator, B: IndexedParallelIterator> {
     a: A,
     b: B,
 }
 
 /// Create a new `Zip` iterator.
 ///
 /// NB: a free fn because it is NOT part of the end-user API.
@@ -26,77 +33,77 @@ impl<A, B> ParallelIterator for Zip<A, B
     type Item = (A::Item, B::Item);
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<A, B> IndexedParallelIterator for Zip<A, B>
     where A: IndexedParallelIterator,
           B: IndexedParallelIterator
 {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         cmp::min(self.a.len(), self.b.len())
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         return self.a.with_producer(CallbackA {
                                         callback: callback,
                                         b: self.b,
                                     });
 
         struct CallbackA<CB, B> {
             callback: CB,
             b: B,
         }
 
-        impl<CB, A_ITEM, B> ProducerCallback<A_ITEM> for CallbackA<CB, B>
+        impl<CB, ITEM, B> ProducerCallback<ITEM> for CallbackA<CB, B>
             where B: IndexedParallelIterator,
-                  CB: ProducerCallback<(A_ITEM, B::Item)>
+                  CB: ProducerCallback<(ITEM, B::Item)>
         {
             type Output = CB::Output;
 
             fn callback<A>(self, a_producer: A) -> Self::Output
-                where A: Producer<Item = A_ITEM>
+                where A: Producer<Item = ITEM>
             {
                 return self.b.with_producer(CallbackB {
                                                 a_producer: a_producer,
                                                 callback: self.callback,
                                             });
             }
         }
 
         struct CallbackB<CB, A> {
             a_producer: A,
             callback: CB,
         }
 
-        impl<CB, A, B_ITEM> ProducerCallback<B_ITEM> for CallbackB<CB, A>
+        impl<CB, A, ITEM> ProducerCallback<ITEM> for CallbackB<CB, A>
             where A: Producer,
-                  CB: ProducerCallback<(A::Item, B_ITEM)>
+                  CB: ProducerCallback<(A::Item, ITEM)>
         {
             type Output = CB::Output;
 
             fn callback<B>(self, b_producer: B) -> Self::Output
-                where B: Producer<Item = B_ITEM>
+                where B: Producer<Item = ITEM>
             {
                 self.callback.callback(ZipProducer {
                                            a: self.a_producer,
                                            b: b_producer,
                                        })
             }
         }
 
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/iter/zip_eq.rs
@@ -0,0 +1,66 @@
+
+use super::plumbing::*;
+use super::*;
+
+/// An [`IndexedParallelIterator`] that iterates over two parallel iterators of equal
+/// length simultaneously.
+///
+/// This struct is created by the [`zip_eq`] method on [`IndexedParallelIterator`],
+/// see its documentation for more information.
+///
+/// [`zip_eq`]: trait.IndexedParallelIterator.html#method.zip_eq
+/// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Debug, Clone)]
+pub struct ZipEq<A: IndexedParallelIterator, B: IndexedParallelIterator> {
+    zip: Zip<A, B>
+}
+
+/// Create a new `ZipEq` iterator.
+///
+/// NB: a free fn because it is NOT part of the end-user API.
+#[inline]
+pub fn new<A, B>(a: A, b: B) -> ZipEq<A, B>
+    where A: IndexedParallelIterator,
+          B: IndexedParallelIterator
+{
+    ZipEq { zip: super::zip::new(a, b) }
+}
+
+impl<A, B> ParallelIterator for ZipEq<A, B>
+    where A: IndexedParallelIterator,
+          B: IndexedParallelIterator
+{
+    type Item = (A::Item, B::Item);
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        bridge(self.zip, consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        Some(self.zip.len())
+    }
+}
+
+impl<A, B> IndexedParallelIterator for ZipEq<A, B>
+    where A: IndexedParallelIterator,
+          B: IndexedParallelIterator
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        bridge(self.zip, consumer)
+    }
+
+    fn len(&self) -> usize {
+        self.zip.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        self.zip.with_producer(callback)
+    }
+}
--- a/third_party/rust/rayon/src/lib.rs
+++ b/third_party/rust/rayon/src/lib.rs
@@ -1,18 +1,91 @@
-#![allow(non_camel_case_types)] // I prefer to use ALL_CAPS for type parameters
+#![doc(html_root_url = "https://docs.rs/rayon/1.0")]
+#![deny(missing_debug_implementations)]
 #![cfg_attr(test, feature(conservative_impl_trait))]
 #![cfg_attr(test, feature(i128_type))]
+#![deny(missing_docs)]
 
-// If you're not compiling the unstable code, it often happens that
-// there is stuff that is considered "dead code" and so forth. So
-// disable warnings in that scenario.
-#![cfg_attr(not(feature = "unstable"), allow(warnings))]
+//! Data-parallelism library that makes it easy to convert sequential
+//! computations into parallel
+//!
+//! Rayon is lightweight and convenient for introducing parallelism into existing
+//! code. It guarantees data-race free executions and takes advantage of
+//! parallelism when sensible, based on work-load at runtime.
+//!
+//! # How to use Rayon
+//!
+//! There are two ways to use Rayon:
+//!
+//! - **High-level parallel constructs** are the simplest way to use Rayon and also
+//!   typically the most efficient.
+//!   - [Parallel iterators][iter module] make it easy to convert a sequential iterator to
+//!     execute in parallel.
+//!   - The [`par_sort`] method sorts `&mut [T]` slices (or vectors) in parallel.
+//!   - [`par_extend`] can be used to efficiently grow collections with items produced
+//!     by a parallel iterator.
+//! - **Custom tasks** let you divide your work into parallel tasks yourself.
+//!   - [`join`] is used to subdivide a task into two pieces.
+//!   - [`scope`] creates a scope within which you can create any number of parallel tasks.
+//!   - [`ThreadPoolBuilder`] can be used to create your own thread pools or customize
+//!     the global one.
+//!
+//! [iter module]: iter
+//! [`join`]: fn.join.html
+//! [`scope`]: fn.scope.html
+//! [`par_sort`]: slice/trait.ParallelSliceMut.html#method.par_sort
+//! [`par_extend`]: iter/trait.ParallelExtend.html#tymethod.par_extend
+//! [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
+//!
+//! # Basic usage and the Rayon prelude
+//!
+//! First, you will need to add `rayon` to your `Cargo.toml` and put
+//! `extern crate rayon` in your main file (`lib.rs`, `main.rs`).
+//!
+//! Next, to use parallel iterators or the other high-level methods,
+//! you need to import several traits. Those traits are bundled into
+//! the module [`rayon::prelude`]. It is recommended that you import
+//! all of these traits at once by adding `use rayon::prelude::*` at
+//! the top of each module that uses Rayon methods.
+//!
+//! These traits give you access to the `par_iter` method which provides
+//! parallel implementations of many iterative functions such as [`map`],
+//! [`for_each`], [`filter`], [`fold`], and [more].
+//!
+//! [`rayon::prelude::*`]: prelude/index.html
+//! [`map`]: iter/trait.ParallelIterator.html#method.map
+//! [`for_each`]: iter/trait.ParallelIterator.html#method.for_each
+//! [`filter`]: iter/trait.ParallelIterator.html#method.filter
+//! [`fold`]: iter/trait.ParallelIterator.html#method.fold
+//! [more]: iter/trait.ParallelIterator.html#provided-methods
+//!
+//! # Crate Layout
+//!
+//! Rayon extends many of the types found in the standard library with
+//! parallel iterator implementations. The modules in the `rayon`
+//! crate mirror [`std`] itself: so, e.g., the `option` module in
+//! Rayon contains parallel iterators for the `Option` type, which is
+//! found in [the `option` module of `std`]. Similarly, the
+//! `collections` module in Rayon offers parallel iterator types for
+//! [the `collections` from `std`]. You will rarely need to access
+//! these submodules unless you need to name iterator types
+//! explicitly.
+//!
+//! [the `option` module of `std`]: https://doc.rust-lang.org/std/option/index.html
+//! [the `collections` from `std`]: https://doc.rust-lang.org/std/collections/index.html
+//! [`std`]: https://doc.rust-lang.org/std/
+//!
+//! # Other questions?
+//!
+//! See [the Rayon FAQ][faq].
+//!
+//! [faq]: https://github.com/rayon-rs/rayon/blob/master/FAQ.md
 
 extern crate rayon_core;
+extern crate either;
 
 #[cfg(test)]
 extern crate rand;
 
 #[macro_use]
 mod delegate;
 
 #[macro_use]
@@ -25,23 +98,20 @@ pub mod iter;
 pub mod option;
 pub mod prelude;
 pub mod range;
 pub mod result;
 pub mod slice;
 pub mod str;
 pub mod vec;
 
+mod par_either;
+mod math;
 mod test;
 
-pub use iter::split;
-
 pub use rayon_core::current_num_threads;
-pub use rayon_core::Configuration;
-pub use rayon_core::initialize;
 pub use rayon_core::ThreadPool;
-pub use rayon_core::join;
+pub use rayon_core::ThreadPoolBuilder;
+pub use rayon_core::ThreadPoolBuildError;
+pub use rayon_core::{join, join_context};
+pub use rayon_core::FnContext;
 pub use rayon_core::{scope, Scope};
 pub use rayon_core::spawn;
-#[cfg(rayon_unstable)]
-pub use rayon_core::spawn_future;
-#[cfg(rayon_unstable)]
-pub use rayon_core::RayonFuture;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/math.rs
@@ -0,0 +1,25 @@
+/// Divide `n` by `divisor`, and round up to the nearest integer
+/// if not evenly divisable.
+#[inline]
+pub fn div_round_up(n: usize, divisor: usize) -> usize {
+    debug_assert!(divisor != 0, "Division by zero!");
+    if n == 0 {
+        0
+    } else {
+        (n - 1) / divisor + 1
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn check_div_round_up() {
+        assert_eq!(0, div_round_up(0, 5));
+        assert_eq!(1, div_round_up(5, 5));
+        assert_eq!(1, div_round_up(1, 5));
+        assert_eq!(2, div_round_up(3, 2));
+        assert_eq!(usize::max_value() / 2 + 1, div_round_up(usize::max_value(), 2));
+    }
+}
--- a/third_party/rust/rayon/src/option.rs
+++ b/third_party/rust/rayon/src/option.rs
@@ -1,96 +1,142 @@
-//! This module contains the parallel iterator types for options
-//! (`Option<T>`). You will rarely need to interact with it directly
-//! unless you have need to name one of the iterator types.
+//! Parallel iterator types for [options][std::option]
+//!
+//! You will rarely need to interact with this module directly unless you need
+//! to name one of the iterator types.
+//!
+//! [std::option]: https://doc.rust-lang.org/stable/std/option/
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 use std;
 use std::sync::atomic::{AtomicBool, Ordering};
 
+/// A parallel iterator over the value in [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`into_par_iter`] function.
+///
+/// [`Option`]: https://doc.rust-lang.org/std/option/enum.Option.html
+/// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
+/// [`into_par_iter`]: ../iter/trait.IntoParallelIterator.html#tymethod.into_par_iter
+#[derive(Debug, Clone)]
+pub struct IntoIter<T: Send> {
+    opt: Option<T>,
+}
+
 impl<T: Send> IntoParallelIterator for Option<T> {
     type Item = T;
     type Iter = IntoIter<T>;
 
     fn into_par_iter(self) -> Self::Iter {
         IntoIter { opt: self }
     }
 }
 
-impl<'a, T: Sync> IntoParallelIterator for &'a Option<T> {
-    type Item = &'a T;
-    type Iter = Iter<'a, T>;
-
-    fn into_par_iter(self) -> Self::Iter {
-        Iter { inner: self.as_ref().into_par_iter() }
-    }
-}
-
-impl<'a, T: Send> IntoParallelIterator for &'a mut Option<T> {
-    type Item = &'a mut T;
-    type Iter = IterMut<'a, T>;
-
-    fn into_par_iter(self) -> Self::Iter {
-        IterMut { inner: self.as_mut().into_par_iter() }
-    }
-}
-
-
-/// Parallel iterator over an option
-pub struct IntoIter<T: Send> {
-    opt: Option<T>,
-}
-
 impl<T: Send> ParallelIterator for IntoIter<T> {
     type Item = T;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
-        bridge(self, consumer)
+        self.drive(consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<T: Send> IndexedParallelIterator for IntoIter<T> {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
-        bridge(self, consumer)
+        let mut folder = consumer.into_folder();
+        if let Some(item) = self.opt {
+            folder = folder.consume(item);
+        }
+        folder.complete()
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         match self.opt {
             Some(_) => 1,
             None => 0,
         }
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         callback.callback(OptionProducer { opt: self.opt })
     }
 }
 
+/// A parallel iterator over a reference to the [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`par_iter`] function.
+///
+/// [`Option`]: https://doc.rust-lang.org/std/option/enum.Option.html
+/// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
+/// [`par_iter`]: ../iter/trait.IntoParallelRefIterator.html#tymethod.par_iter
+#[derive(Debug)]
+pub struct Iter<'a, T: Sync + 'a> {
+    inner: IntoIter<&'a T>,
+}
+
+impl<'a, T: Sync> Clone for Iter<'a, T> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
+impl<'a, T: Sync> IntoParallelIterator for &'a Option<T> {
+    type Item = &'a T;
+    type Iter = Iter<'a, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        Iter { inner: self.as_ref().into_par_iter() }
+    }
+}
 
 delegate_indexed_iterator!{
-    #[doc = "Parallel iterator over an immutable reference to an option"]
-    Iter<'a, T> => IntoIter<&'a T>,
+    Iter<'a, T> => &'a T,
     impl<'a, T: Sync + 'a>
 }
 
 
+/// A parallel iterator over a mutable reference to the [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`par_iter_mut`] function.
+///
+/// [`Option`]: https://doc.rust-lang.org/std/option/enum.Option.html
+/// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
+/// [`par_iter_mut`]: ../iter/trait.IntoParallelRefMutIterator.html#tymethod.par_iter_mut
+#[derive(Debug)]
+pub struct IterMut<'a, T: Send + 'a> {
+    inner: IntoIter<&'a mut T>,
+}
+
+impl<'a, T: Send> IntoParallelIterator for &'a mut Option<T> {
+    type Item = &'a mut T;
+    type Iter = IterMut<'a, T>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IterMut { inner: self.as_mut().into_par_iter() }
+    }
+}
+
 delegate_indexed_iterator!{
-    #[doc = "Parallel iterator over a mutable reference to an option"]
-    IterMut<'a, T> => IntoIter<&'a mut T>,
+    IterMut<'a, T> => &'a mut T,
     impl<'a, T: Send + 'a>
 }
 
 
 /// Private producer for an option
 struct OptionProducer<T: Send> {
     opt: Option<T>,
 }
@@ -99,16 +145,17 @@ impl<T: Send> Producer for OptionProduce
     type Item = T;
     type IntoIter = std::option::IntoIter<T>;
 
     fn into_iter(self) -> Self::IntoIter {
         self.opt.into_iter()
     }
 
     fn split_at(self, index: usize) -> (Self, Self) {
+        debug_assert!(index <= 1);
         let none = OptionProducer { opt: None };
         if index == 0 {
             (none, self)
         } else {
             (self, none)
         }
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/src/par_either.rs
@@ -0,0 +1,68 @@
+use iter::*;
+use iter::plumbing::*;
+use iter::Either::{Left, Right};
+
+/// `Either<L, R>` is a parallel iterator if both `L` and `R` are parallel iterators.
+impl<L, R> ParallelIterator for Either<L, R>
+    where L: ParallelIterator,
+          R: ParallelIterator<Item = L::Item>
+{
+    type Item = L::Item;
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+        where C: UnindexedConsumer<Self::Item>
+    {
+        match self {
+            Left(iter) => iter.drive_unindexed(consumer),
+            Right(iter) => iter.drive_unindexed(consumer),
+        }
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        self.as_ref().either(L::opt_len, R::opt_len)
+    }
+}
+
+impl<L, R> IndexedParallelIterator for Either<L, R>
+    where L: IndexedParallelIterator,
+          R: IndexedParallelIterator<Item = L::Item>
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+        where C: Consumer<Self::Item>
+    {
+        match self {
+            Left(iter) => iter.drive(consumer),
+            Right(iter) => iter.drive(consumer),
+        }
+    }
+
+    fn len(&self) -> usize {
+        self.as_ref().either(L::len, R::len)
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+        where CB: ProducerCallback<Self::Item>
+    {
+        match self {
+            Left(iter) => iter.with_producer(callback),
+            Right(iter) => iter.with_producer(callback),
+        }
+    }
+}
+
+
+/// `Either<L, R>` can be extended if both `L` and `R` are parallel extendable.
+impl<L, R, T> ParallelExtend<T> for Either<L, R>
+    where L: ParallelExtend<T>,
+          R: ParallelExtend<T>,
+          T: Send
+{
+    fn par_extend<I>(&mut self, par_iter: I)
+        where I: IntoParallelIterator<Item = T>
+    {
+        match self.as_mut() {
+            Left(collection) => collection.par_extend(par_iter),
+            Right(collection) => collection.par_extend(par_iter),
+        }
+    }
+}
--- a/third_party/rust/rayon/src/private.rs
+++ b/third_party/rust/rayon/src/private.rs
@@ -1,16 +1,17 @@
 //! The public parts of this private module are used to create traits
 //! that cannot be implemented outside of our own crate.  This way we
 //! can feel free to extend those traits without worrying about it
 //! being a breaking change for other implementations.
 
 
 /// If this type is pub but not publicly reachable, third parties
 /// can't name it and can't implement traits using it.
+#[allow(missing_debug_implementations)]
 pub struct PrivateMarker;
 
 macro_rules! private_decl {
     () => {
         /// This trait is private; this method exists to make it
         /// impossible to implement outside the crate.
         #[doc(hidden)]
         fn __rayon_private__(&self) -> ::private::PrivateMarker;
--- a/third_party/rust/rayon/src/range.rs
+++ b/third_party/rust/rayon/src/range.rs
@@ -1,18 +1,52 @@
-//! This module contains the parallel iterator types for ranges
-//! (`Range<T>`); this is the type for values created by a `a..b`
-//! expression. You will rarely need to interact with it directly
-//! unless you have need to name one of the iterator types.
+//! Parallel iterator types for [ranges][std::range],
+//! the type for values created by `a..b` expressions
+//!
+//! You will rarely need to interact with this module directly unless you have
+//! need to name one of the iterator types.
+//! 
+//! ```
+//! use rayon::prelude::*;
+//! 
+//! let r = (0..100u64).into_par_iter()
+//!                    .sum();
+//! 
+//! // compare result with sequential calculation
+//! assert_eq!((0..100).sum::<u64>(), r);
+//! ```
+//!
+//! [std::range]: https://doc.rust-lang.org/core/ops/struct.Range.html
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 use std::ops::Range;
 
-/// Parallel iterator over a range
+/// Parallel iterator over a range, implemented for all integer types.
+///
+/// **Note:** The `zip` operation requires `IndexedParallelIterator`
+/// which is not implemented for `u64` or `i64`.
+///
+/// ```
+/// use rayon::prelude::*;
+///
+/// let p = (0..25usize).into_par_iter()
+///                   .zip(0..25usize)
+///                   .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
+///                   .map(|(x, y)| x * y)
+///                   .sum::<usize>();
+///
+/// let s = (0..25usize).zip(0..25)
+///                   .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
+///                   .map(|(x, y)| x * y)
+///                   .sum();
+///
+/// assert_eq!(p, s);
+/// ```
+#[derive(Debug, Clone)]
 pub struct Iter<T> {
     range: Range<T>,
 }
 
 impl<T> IntoParallelIterator for Range<T>
     where Iter<T>: ParallelIterator
 {
     type Item = <Iter<T> as ParallelIterator>::Item;
@@ -44,29 +78,29 @@ macro_rules! indexed_range_impl {
             type Item = $t;
 
             fn drive_unindexed<C>(self, consumer: C) -> C::Result
                 where C: UnindexedConsumer<Self::Item>
             {
                 bridge(self, consumer)
             }
 
-            fn opt_len(&mut self) -> Option<usize> {
+            fn opt_len(&self) -> Option<usize> {
                 Some(self.len())
             }
         }
 
         impl IndexedParallelIterator for Iter<$t> {
             fn drive<C>(self, consumer: C) -> C::Result
                 where C: Consumer<Self::Item>
             {
                 bridge(self, consumer)
             }
 
-            fn len(&mut self) -> usize {
+            fn len(&self) -> usize {
                 self.range.len()
             }
 
             fn with_producer<CB>(self, callback: CB) -> CB::Output
                 where CB: ProducerCallback<Self::Item>
             {
                 callback.callback(IterProducer { range: self.range })
             }
--- a/third_party/rust/rayon/src/result.rs
+++ b/third_party/rust/rayon/src/result.rs
@@ -1,63 +1,86 @@
-//! This module contains the parallel iterator types for results
-//! (`Result<T, E>`). You will rarely need to interact with it directly
-//! unless you have need to name one of the iterator types.
+//! Parallel iterator types for [results][std::result]
+//!
+//! You will rarely need to interact with this module directly unless you need
+//! to name one of the iterator types.
+//!
+//! [std::result]: https://doc.rust-lang.org/stable/std/result/
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 use std::sync::Mutex;
 
 use option;
 
+/// Parallel iterator over a result
+#[derive(Debug, Clone)]
+pub struct IntoIter<T: Send> {
+    inner: option::IntoIter<T>,
+}
+
 impl<T: Send, E> IntoParallelIterator for Result<T, E> {
     type Item = T;
     type Iter = IntoIter<T>;
 
     fn into_par_iter(self) -> Self::Iter {
         IntoIter { inner: self.ok().into_par_iter() }
     }
 }
 
+delegate_indexed_iterator!{
+    IntoIter<T> => T,
+    impl<T: Send>
+}
+
+
+/// Parallel iterator over an immutable reference to a result
+#[derive(Debug)]
+pub struct Iter<'a, T: Sync + 'a> {
+    inner: option::IntoIter<&'a T>,
+}
+
+impl<'a, T: Sync> Clone for Iter<'a, T> {
+    fn clone(&self) -> Self {
+        Iter { inner: self.inner.clone() }
+    }
+}
+
 impl<'a, T: Sync, E> IntoParallelIterator for &'a Result<T, E> {
     type Item = &'a T;
     type Iter = Iter<'a, T>;
 
     fn into_par_iter(self) -> Self::Iter {
         Iter { inner: self.as_ref().ok().into_par_iter() }
     }
 }
 
+delegate_indexed_iterator!{
+    Iter<'a, T> => &'a T,
+    impl<'a, T: Sync + 'a>
+}
+
+
+/// Parallel iterator over a mutable reference to a result
+#[derive(Debug)]
+pub struct IterMut<'a, T: Send + 'a> {
+    inner: option::IntoIter<&'a mut T>,
+}
+
 impl<'a, T: Send, E> IntoParallelIterator for &'a mut Result<T, E> {
     type Item = &'a mut T;
     type Iter = IterMut<'a, T>;
 
     fn into_par_iter(self) -> Self::Iter {
         IterMut { inner: self.as_mut().ok().into_par_iter() }
     }
 }
 
-
 delegate_indexed_iterator!{
-    #[doc = "Parallel iterator over a result"]
-    IntoIter<T> => option::IntoIter<T>,
-    impl<T: Send>
-}
-
-
-delegate_indexed_iterator!{
-    #[doc = "Parallel iterator over an immutable reference to a result"]
-    Iter<'a, T> => option::IntoIter<&'a T>,
-    impl<'a, T: Sync + 'a>
-}
-
-
-delegate_indexed_iterator!{
-    #[doc = "Parallel iterator over a mutable reference to a result"]
-    IterMut<'a, T> => option::IntoIter<&'a mut T>,
+    IterMut<'a, T> => &'a mut T,
     impl<'a, T: Send + 'a>
 }
 
 
 /// Collect an arbitrary `Result`-wrapped collection.
 ///
 /// If any item is `Err`, then all previous `Ok` items collected are
 /// discarded, and it returns that error.  If there are multiple errors, the
@@ -71,18 +94,23 @@ impl<'a, C, T, E> FromParallelIterator<R
         where I: IntoParallelIterator<Item = Result<T, E>>
     {
         let saved_error = Mutex::new(None);
         let collection = par_iter
             .into_par_iter()
             .map(|item| match item {
                      Ok(item) => Some(item),
                      Err(error) => {
-                         if let Ok(mut guard) = saved_error.lock() {
-                             *guard = Some(error);
+                         // We don't need a blocking `lock()`, as anybody
+                         // else holding the lock will also be writing
+                         // `Some(error)`, and then ours is irrelevant.
+                         if let Ok(mut guard) = saved_error.try_lock() {
+                             if guard.is_none() {
+                                 *guard = Some(error);
+                             }
                          }
                          None
                      }
                  })
             .while_some()
             .collect();
 
         match saved_error.into_inner().unwrap() {
--- a/third_party/rust/rayon/src/slice/mod.rs
+++ b/third_party/rust/rayon/src/slice/mod.rs
@@ -1,54 +1,88 @@
-//! This module contains the parallel iterator types for slices
-//! (`[T]`). You will rarely need to interact with it directly unless
-//! you have need to name one of those types.
+//! Parallel iterator types for [slices][std::slice]
+//!
+//! You will rarely need to interact with this module directly unless you need
+//! to name one of the iterator types.
+//!
+//! [std::slice]: https://doc.rust-lang.org/stable/std/slice/
 
 mod mergesort;
 mod quicksort;
 
 mod test;
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 use self::mergesort::par_mergesort;
 use self::quicksort::par_quicksort;
 use split_producer::*;
 use std::cmp;
 use std::cmp::Ordering;
+use std::fmt::{self, Debug};
+
+use super::math::div_round_up;
 
 /// Parallel extensions for slices.
 pub trait ParallelSlice<T: Sync> {
     /// Returns a plain slice, which is used to implement the rest of the
     /// parallel methods.
     fn as_parallel_slice(&self) -> &[T];
 
     /// Returns a parallel iterator over subslices separated by elements that
     /// match the separator.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let smallest = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9]
+    ///     .par_split(|i| *i == 0)
+    ///     .map(|numbers| numbers.iter().min().unwrap())
+    ///     .min();
+    /// assert_eq!(Some(&1), smallest);
+    /// ```
     fn par_split<P>(&self, separator: P) -> Split<T, P>
         where P: Fn(&T) -> bool + Sync + Send
     {
         Split {
             slice: self.as_parallel_slice(),
             separator: separator,
         }
     }
 
     /// Returns a parallel iterator over all contiguous windows of
     /// length `size`. The windows overlap.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let windows: Vec<_> = [1, 2, 3].par_windows(2).collect();
+    /// assert_eq!(vec![[1, 2], [2, 3]], windows);
+    /// ```
     fn par_windows(&self, window_size: usize) -> Windows<T> {
         Windows {
             window_size: window_size,
             slice: self.as_parallel_slice(),
         }
     }
 
     /// Returns a parallel iterator over at most `size` elements of
     /// `self` at a time. The chunks do not overlap.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks(2).collect();
+    /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
+    /// ```
     fn par_chunks(&self, chunk_size: usize) -> Chunks<T> {
+        assert!(chunk_size != 0, "chunk_size must not be zero");
         Chunks {
             chunk_size: chunk_size,
             slice: self.as_parallel_slice(),
         }
     }
 }
 
 impl<T: Sync> ParallelSlice<T> for [T] {
@@ -62,28 +96,49 @@ impl<T: Sync> ParallelSlice<T> for [T] {
 /// Parallel extensions for mutable slices.
 pub trait ParallelSliceMut<T: Send> {
     /// Returns a plain mutable slice, which is used to implement the rest of
     /// the parallel methods.
     fn as_parallel_slice_mut(&mut self) -> &mut [T];
 
     /// Returns a parallel iterator over mutable subslices separated by
     /// elements that match the separator.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let mut array = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9];
+    /// array.par_split_mut(|i| *i == 0)
+    ///      .for_each(|slice| slice.reverse());
+    /// assert_eq!(array, [3, 2, 1, 0, 8, 4, 2, 0, 9, 6, 3]);
+    /// ```
     fn par_split_mut<P>(&mut self, separator: P) -> SplitMut<T, P>
         where P: Fn(&T) -> bool + Sync + Send
     {
         SplitMut {
             slice: self.as_parallel_slice_mut(),
             separator: separator,
         }
     }
 
     /// Returns a parallel iterator over at most `size` elements of
     /// `self` at a time. The chunks are mutable and do not overlap.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let mut array = [1, 2, 3, 4, 5];
+    /// array.par_chunks_mut(2)
+    ///      .for_each(|slice| slice.reverse());
+    /// assert_eq!(array, [2, 1, 4, 3, 5]);
+    /// ```
     fn par_chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T> {
+        assert!(chunk_size != 0, "chunk_size must not be zero");
         ChunksMut {
             chunk_size: chunk_size,
             slice: self.as_parallel_slice_mut(),
         }
     }
 
     /// Sorts the slice in parallel.
     ///
@@ -102,16 +157,27 @@ pub trait ParallelSliceMut<T: Send> {
     ///
     /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
     /// non-allocating insertion sort is used instead.
     ///
     /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
     /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
     /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
     /// parallel subdivision of chunks and parallel merge operation.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut v = [-5, 4, 1, -3, 2];
+    ///
+    /// v.par_sort();
+    /// assert_eq!(v, [-5, -3, 1, 2, 4]);
+    /// ```
     fn par_sort(&mut self)
     where
         T: Ord,
     {
         par_mergesort(self.as_parallel_slice_mut(), |a, b| a.lt(b));
     }
 
     /// Sorts the slice in parallel with a comparator function.
@@ -131,16 +197,30 @@ pub trait ParallelSliceMut<T: Send> {
     ///
     /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
     /// non-allocating insertion sort is used instead.
     ///
     /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
     /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
     /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
     /// parallel subdivision of chunks and parallel merge operation.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut v = [5, 4, 1, 3, 2];
+    /// v.par_sort_by(|a, b| a.cmp(b));
+    /// assert_eq!(v, [1, 2, 3, 4, 5]);
+    ///
+    /// // reverse sorting
+    /// v.par_sort_by(|a, b| b.cmp(a));
+    /// assert_eq!(v, [5, 4, 3, 2, 1]);
+    /// ```
     fn par_sort_by<F>(&mut self, compare: F)
     where
         F: Fn(&T, &T) -> Ordering + Sync,
     {
         par_mergesort(self.as_parallel_slice_mut(), |a, b| compare(a, b) == Ordering::Less);
     }
 
     /// Sorts the slice in parallel with a key extraction function.
@@ -160,16 +240,27 @@ pub trait ParallelSliceMut<T: Send> {
     ///
     /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
     /// non-allocating insertion sort is used instead.
     ///
     /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
     /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
     /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
     /// parallel subdivision of chunks and parallel merge operation.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut v = [-5i32, 4, 1, -3, 2];
+    ///
+    /// v.par_sort_by_key(|k| k.abs());
+    /// assert_eq!(v, [1, 2, -3, 4, -5]);
+    /// ```
     fn par_sort_by_key<B, F>(&mut self, f: F)
     where
         B: Ord,
         F: Fn(&T) -> B + Sync,
     {
         par_mergesort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
     }
 
@@ -188,16 +279,27 @@ pub trait ParallelSliceMut<T: Send> {
     /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
     /// slice consists of several concatenated sorted sequences.
     ///
     /// All quicksorts work in two stages: partitioning into two halves followed by recursive
     /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
     /// parallel.
     ///
     /// [pdqsort]: https://github.com/orlp/pdqsort
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut v = [-5, 4, 1, -3, 2];
+    ///
+    /// v.par_sort_unstable();
+    /// assert_eq!(v, [-5, -3, 1, 2, 4]);
+    /// ```
     fn par_sort_unstable(&mut self)
     where
         T: Ord,
     {
         par_quicksort(self.as_parallel_slice_mut(), |a, b| a.lt(b));
     }
 
     /// Sorts the slice in parallel with a comparator function, but may not preserve the order of
@@ -216,16 +318,30 @@ pub trait ParallelSliceMut<T: Send> {
     /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
     /// slice consists of several concatenated sorted sequences.
     ///
     /// All quicksorts work in two stages: partitioning into two halves followed by recursive
     /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
     /// parallel.
     ///
     /// [pdqsort]: https://github.com/orlp/pdqsort
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut v = [5, 4, 1, 3, 2];
+    /// v.par_sort_unstable_by(|a, b| a.cmp(b));
+    /// assert_eq!(v, [1, 2, 3, 4, 5]);
+    ///
+    /// // reverse sorting
+    /// v.par_sort_unstable_by(|a, b| b.cmp(a));
+    /// assert_eq!(v, [5, 4, 3, 2, 1]);
+    /// ```
     fn par_sort_unstable_by<F>(&mut self, compare: F)
     where
         F: Fn(&T, &T) -> Ordering + Sync,
     {
         par_quicksort(self.as_parallel_slice_mut(), |a, b| compare(a, b) == Ordering::Less);
     }
 
     /// Sorts the slice in parallel with a key extraction function, but may not preserve the order
@@ -244,16 +360,27 @@ pub trait ParallelSliceMut<T: Send> {
     /// It is generally faster than stable sorting, except in a few special cases, e.g. when the
     /// slice consists of several concatenated sorted sequences.
     ///
     /// All quicksorts work in two stages: partitioning into two halves followed by recursive
     /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
     /// parallel.
     ///
     /// [pdqsort]: https://github.com/orlp/pdqsort
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    ///
+    /// let mut v = [-5i32, 4, 1, -3, 2];
+    ///
+    /// v.par_sort_unstable_by_key(|k| k.abs());
+    /// assert_eq!(v, [1, 2, -3, 4, -5]);
+    /// ```
     fn par_sort_unstable_by_key<B, F>(&mut self, f: F)
     where
         B: Ord,
         F: Fn(&T) -> B + Sync,
     {
         par_quicksort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
     }
 }
@@ -299,42 +426,49 @@ impl<'data, T: Send + 'data> IntoParalle
 
     fn into_par_iter(self) -> Self::Iter {
         IterMut { slice: self }
     }
 }
 
 
 /// Parallel iterator over immutable items in a slice
+#[derive(Debug)]
 pub struct Iter<'data, T: 'data + Sync> {
     slice: &'data [T],
 }
 
+impl<'data, T: Sync> Clone for Iter<'data, T> {
+    fn clone(&self) -> Self {
+        Iter { ..*self }
+    }
+}
+
 impl<'data, T: Sync + 'data> ParallelIterator for Iter<'data, T> {
     type Item = &'data T;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<'data, T: Sync + 'data> IndexedParallelIterator for Iter<'data, T> {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.slice.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         callback.callback(IterProducer { slice: self.slice })
     }
@@ -355,44 +489,51 @@ impl<'data, T: 'data + Sync> Producer fo
     fn split_at(self, index: usize) -> (Self, Self) {
         let (left, right) = self.slice.split_at(index);
         (IterProducer { slice: left }, IterProducer { slice: right })
     }
 }
 
 
 /// Parallel iterator over immutable non-overlapping chunks of a slice
+#[derive(Debug)]
 pub struct Chunks<'data, T: 'data + Sync> {
     chunk_size: usize,
     slice: &'data [T],
 }
 
+impl<'data, T: Sync> Clone for Chunks<'data, T> {
+    fn clone(&self) -> Self {
+        Chunks { ..*self }
+    }
+}
+
 impl<'data, T: Sync + 'data> ParallelIterator for Chunks<'data, T> {
     type Item = &'data [T];
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<'data, T: Sync + 'data> IndexedParallelIterator for Chunks<'data, T> {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
-        (self.slice.len() + (self.chunk_size - 1)) / self.chunk_size
+    fn len(&self) -> usize {
+        div_round_up(self.slice.len(), self.chunk_size)
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         callback.callback(ChunksProducer {
                               chunk_size: self.chunk_size,
                               slice: self.slice,
@@ -424,43 +565,50 @@ impl<'data, T: 'data + Sync> Producer fo
              chunk_size: self.chunk_size,
              slice: right,
          })
     }
 }
 
 
 /// Parallel iterator over immutable overlapping windows of a slice
+#[derive(Debug)]
 pub struct Windows<'data, T: 'data + Sync> {
     window_size: usize,
     slice: &'data [T],
 }
 
+impl<'data, T: Sync> Clone for Windows<'data, T> {
+    fn clone(&self) -> Self {
+        Windows { ..*self }
+    }
+}
+
 impl<'data, T: Sync + 'data> ParallelIterator for Windows<'data, T> {
     type Item = &'data [T];
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<'data, T: Sync + 'data> IndexedParallelIterator for Windows<'data, T> {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         assert!(self.window_size >= 1);
         self.slice.len().saturating_sub(self.window_size - 1)
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         callback.callback(WindowsProducer {
@@ -495,42 +643,43 @@ impl<'data, T: 'data + Sync> Producer fo
              window_size: self.window_size,
              slice: right,
          })
     }
 }
 
 
 /// Parallel iterator over mutable items in a slice
+#[derive(Debug)]
 pub struct IterMut<'data, T: 'data + Send> {
     slice: &'data mut [T],
 }
 
 impl<'data, T: Send + 'data> ParallelIterator for IterMut<'data, T> {
     type Item = &'data mut T;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<'data, T: Send + 'data> IndexedParallelIterator for IterMut<'data, T> {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.slice.len()
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         callback.callback(IterMutProducer { slice: self.slice })
     }
@@ -551,44 +700,45 @@ impl<'data, T: 'data + Send> Producer fo
     fn split_at(self, index: usize) -> (Self, Self) {
         let (left, right) = self.slice.split_at_mut(index);
         (IterMutProducer { slice: left }, IterMutProducer { slice: right })
     }
 }
 
 
 /// Parallel iterator over mutable non-overlapping chunks of a slice
+#[derive(Debug)]
 pub struct ChunksMut<'data, T: 'data + Send> {
     chunk_size: usize,
     slice: &'data mut [T],
 }
 
 impl<'data, T: Send + 'data> ParallelIterator for ChunksMut<'data, T> {
     type Item = &'data mut [T];
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksMut<'data, T> {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
-        (self.slice.len() + (self.chunk_size - 1)) / self.chunk_size
+    fn len(&self) -> usize {
+        div_round_up(self.slice.len(), self.chunk_size)
     }
 
     fn with_producer<CB>(self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         callback.callback(ChunksMutProducer {
                               chunk_size: self.chunk_size,
                               slice: self.slice,
@@ -625,16 +775,30 @@ impl<'data, T: 'data + Send> Producer fo
 
 
 /// Parallel iterator over slices separated by a predicate
 pub struct Split<'data, T: 'data, P> {
     slice: &'data [T],
     separator: P,
 }
 
+impl<'data, T, P: Clone> Clone for Split<'data, T, P> {
+    fn clone(&self) -> Self {
+        Split { separator: self.separator.clone(), ..*self }
+    }
+}
+
+impl<'data, T: Debug, P> Debug for Split<'data, T, P> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Split")
+            .field("slice", &self.slice)
+            .finish()
+    }
+}
+
 impl<'data, T, P> ParallelIterator for Split<'data, T, P>
     where P: Fn(&T) -> bool + Sync + Send,
           T: Sync
 {
     type Item = &'data [T];
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
@@ -683,16 +847,24 @@ impl<'data, T, P> Fissile<P> for &'data 
 
 
 /// Parallel iterator over mutable slices separated by a predicate
 pub struct SplitMut<'data, T: 'data, P> {
     slice: &'data mut [T],
     separator: P,
 }
 
+impl<'data, T: Debug, P> Debug for SplitMut<'data, T, P> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SplitMut")
+            .field("slice", &self.slice)
+            .finish()
+    }
+}
+
 impl<'data, T, P> ParallelIterator for SplitMut<'data, T, P>
     where P: Fn(&T) -> bool + Sync + Send,
           T: Send
 {
     type Item = &'data mut [T];
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
--- a/third_party/rust/rayon/src/slice/test.rs
+++ b/third_party/rust/rayon/src/slice/test.rs
@@ -1,13 +1,12 @@
 #![cfg(test)]
 
 use rand::{thread_rng, Rng};
 use std::cmp::Ordering::{Equal, Greater, Less};
-use std::mem;
 use super::ParallelSliceMut;
 
 macro_rules! sort {
     ($f:ident, $name:ident) => {
         #[test]
         fn $name() {
             let mut rng = thread_rng();
 
--- a/third_party/rust/rayon/src/split_producer.rs
+++ b/third_party/rust/rayon/src/split_producer.rs
@@ -1,13 +1,13 @@
 //! Common splitter for strings and slices
 //!
 //! This module is private, so these items are effectively `pub(super)`
 
-use iter::internal::{UnindexedProducer, Folder};
+use iter::plumbing::{UnindexedProducer, Folder};
 
 /// Common producer for splitting on a predicate.
 pub struct SplitProducer<'p, P: 'p, V> {
     data: V,
     separator: &'p P,
 
     /// Marks the endpoint beyond which we've already found no separators.
     tail: usize,
--- a/third_party/rust/rayon/src/str.rs
+++ b/third_party/rust/rayon/src/str.rs
@@ -1,24 +1,25 @@
-//! This module contains extension methods for `String` that expose
-//! parallel iterators, such as `par_split_whitespace()`. You will
-//! rarely need to interact with it directly, since if you add `use
-//! rayon::prelude::*` to your file, that will include the helper
-//! traits defined in this module.
+//! Parallel iterator types for [strings][std::str]
+//!
+//! You will rarely need to interact with this module directly unless you need
+//! to name one of the iterator types.
 //!
 //! Note: [`ParallelString::par_split()`] and [`par_split_terminator()`]
 //! reference a `Pattern` trait which is not visible outside this crate.
 //! This trait is intentionally kept private, for use only by Rayon itself.
 //! It is implemented for `char` and any `F: Fn(char) -> bool + Sync + Send`.
 //!
 //! [`ParallelString::par_split()`]: trait.ParallelString.html#method.par_split
 //! [`par_split_terminator()`]: trait.ParallelString.html#method.par_split_terminator
+//!
+//! [std::str]: https://doc.rust-lang.org/stable/std/str/
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 use split_producer::*;
 
 
 /// Test if a byte is the start of a UTF-8 character.
 /// (extracted from `str::is_char_boundary`)
 #[inline]
 fn is_char_boundary(b: u8) -> bool {
     // This is bit magic equivalent to: b < 128 || b >= 192
@@ -45,53 +46,103 @@ fn find_char_midpoint(chars: &str) -> us
 
 /// Parallel extensions for strings.
 pub trait ParallelString {
     /// Returns a plain string slice, which is used to implement the rest of
     /// the parallel methods.
     fn as_parallel_string(&self) -> &str;
 
     /// Returns a parallel iterator over the characters of a string.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let max = "hello".par_chars().max_by_key(|c| *c as i32);
+    /// assert_eq!(Some('o'), max);
+    /// ```
     fn par_chars(&self) -> Chars {
         Chars { chars: self.as_parallel_string() }
     }
 
     /// Returns a parallel iterator over substrings separated by a
     /// given character or predicate, similar to `str::split`.
     ///
     /// Note: the `Pattern` trait is private, for use only by Rayon itself.
     /// It is implemented for `char` and any `F: Fn(char) -> bool + Sync + Send`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let total = "1, 2, buckle, 3, 4, door"
+    ///    .par_split(',')
+    ///    .filter_map(|s| s.trim().parse::<i32>().ok())
+    ///    .sum();
+    /// assert_eq!(10, total);
+    /// ```
     fn par_split<P: Pattern>(&self, separator: P) -> Split<P> {
         Split::new(self.as_parallel_string(), separator)
     }
 
     /// Returns a parallel iterator over substrings terminated by a
     /// given character or predicate, similar to `str::split_terminator`.
     /// It's equivalent to `par_split`, except it doesn't produce an empty
     /// substring after a trailing terminator.
     ///
     /// Note: the `Pattern` trait is private, for use only by Rayon itself.
     /// It is implemented for `char` and any `F: Fn(char) -> bool + Sync + Send`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let parts: Vec<_> = "((1 + 3) * 2)"
+    ///     .par_split_terminator(|c| c == '(' || c == ')')
+    ///     .collect();
+    /// assert_eq!(vec!["", "", "1 + 3", " * 2"], parts);
+    /// ```
     fn par_split_terminator<P: Pattern>(&self, terminator: P) -> SplitTerminator<P> {
         SplitTerminator::new(self.as_parallel_string(), terminator)
     }
 
     /// Returns a parallel iterator over the lines of a string, ending with an
     /// optional carriage return and with a newline (`\r\n` or just `\n`).
     /// The final line ending is optional, and line endings are not included in
     /// the output strings.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let lengths: Vec<_> = "hello world\nfizbuzz"
+    ///     .par_lines()
+    ///     .map(|l| l.len())
+    ///     .collect();
+    /// assert_eq!(vec![11, 7], lengths);
+    /// ```
     fn par_lines(&self) -> Lines {
         Lines(self.as_parallel_string())
     }
 
     /// Returns a parallel iterator over the sub-slices of a string that are
     /// separated by any amount of whitespace.
     ///
     /// As with `str::split_whitespace`, 'whitespace' is defined according to
     /// the terms of the Unicode Derived Core Property `White_Space`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use rayon::prelude::*;
+    /// let longest = "which is the longest word?"
+    ///     .par_split_whitespace()
+    ///     .max_by_key(|word| word.len());
+    /// assert_eq!(Some("longest"), longest);
+    /// ```
     fn par_split_whitespace(&self) -> SplitWhitespace {
         SplitWhitespace(self.as_parallel_string())
     }
 }
 
 impl ParallelString for str {
     #[inline]
     fn as_parallel_string(&self) -> &str {
@@ -102,17 +153,17 @@ impl ParallelString for str {
 
 // /////////////////////////////////////////////////////////////////////////
 
 /// We hide the `Pattern` trait in a private module, as its API is not meant
 /// for general consumption.  If we could have privacy on trait items, then it
 /// would be nicer to have its basic existence and implementors public while
 /// keeping all of the methods private.
 mod private {
-    use iter::internal::Folder;
+    use iter::plumbing::Folder;
 
     /// Pattern-matching trait for `ParallelString`, somewhat like a mix of
     /// `std::str::pattern::{Pattern, Searcher}`.
     ///
     /// Implementing this trait is not permitted outside of `rayon`.
     pub trait Pattern: Sized + Sync + Send {
         private_decl!{}
         fn find_in(&self, &str) -> Option<usize>;
@@ -178,16 +229,17 @@ impl<FN: Sync + Send + Fn(char) -> bool>
         folder.consume_iter(split)
     }
 }
 
 
 // /////////////////////////////////////////////////////////////////////////
 
 /// Parallel iterator over the characters of a string
+#[derive(Debug, Clone)]
 pub struct Chars<'ch> {
     chars: &'ch str,
 }
 
 struct CharsProducer<'ch> {
     chars: &'ch str,
 }
 
@@ -221,16 +273,17 @@ impl<'ch> UnindexedProducer for CharsPro
         folder.consume_iter(self.chars.chars())
     }
 }
 
 
 // /////////////////////////////////////////////////////////////////////////
 
 /// Parallel iterator over substrings separated by a pattern
+#[derive(Debug, Clone)]
 pub struct Split<'ch, P: Pattern> {
     chars: &'ch str,
     separator: P,
 }
 
 impl<'ch, P: Pattern> Split<'ch, P> {
     fn new(chars: &'ch str, separator: P) -> Self {
         Split {
@@ -283,16 +336,17 @@ impl<'ch, P: Pattern> Fissile<P> for &'c
         separator.fold_with(self, folder, skip_last)
     }
 }
 
 
 // /////////////////////////////////////////////////////////////////////////
 
 /// Parallel iterator over substrings separated by a terminator pattern
+#[derive(Debug, Clone)]
 pub struct SplitTerminator<'ch, P: Pattern> {
     chars: &'ch str,
     terminator: P,
 }
 
 struct SplitTerminatorProducer<'ch, 'sep, P: Pattern + 'sep> {
     splitter: SplitProducer<'sep, P, &'ch str>,
     skip_last: bool,
@@ -350,16 +404,17 @@ impl<'ch, 'sep, P: Pattern + 'sep> Unind
         self.splitter.fold_with(folder, self.skip_last)
     }
 }
 
 
 // /////////////////////////////////////////////////////////////////////////
 
 /// Parallel iterator over lines in a string
+#[derive(Debug, Clone)]
 pub struct Lines<'ch>(&'ch str);
 
 impl<'ch> ParallelIterator for Lines<'ch> {
     type Item = &'ch str;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
@@ -373,16 +428,17 @@ impl<'ch> ParallelIterator for Lines<'ch
             .drive_unindexed(consumer)
     }
 }
 
 
 // /////////////////////////////////////////////////////////////////////////
 
 /// Parallel iterator over substrings separated by whitespace
+#[derive(Debug, Clone)]
 pub struct SplitWhitespace<'ch>(&'ch str);
 
 impl<'ch> ParallelIterator for SplitWhitespace<'ch> {
     type Item = &'ch str;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
--- a/third_party/rust/rayon/src/test.rs
+++ b/third_party/rust/rayon/src/test.rs
@@ -1,16 +1,17 @@
 #![cfg(test)]
+#![cfg(not(all(windows, target_env = "gnu")))]
 
 extern crate compiletest_rs as compiletest;
 
 use std::path::PathBuf;
 
 fn run_compiletest(mode: &str, path: &str) {
-    let mut config = compiletest::default_config();
+    let mut config = compiletest::Config::default();
     config.mode = mode.parse().ok().expect("Invalid mode");
     config.src_base = PathBuf::from(path);
     config.target_rustcflags = Some("-L target/debug/ -L target/debug/deps/".to_owned());
 
     compiletest::run_tests(&config);
 }
 
 #[test]
--- a/third_party/rust/rayon/src/vec.rs
+++ b/third_party/rust/rayon/src/vec.rs
@@ -1,17 +1,21 @@
-//! This module contains the parallel iterator types for vectors
-//! (`Vec<T>`). You will rarely need to interact with it directly
-//! unless you have need to name one of those types.
+//! Parallel iterator types for [vectors][std::vec] (`Vec<T>`)
+//!
+//! You will rarely need to interact with this module directly unless you need
+//! to name one of the iterator types.
+//!
+//! [std::vec]: https://doc.rust-lang.org/stable/std/vec/
 
 use iter::*;
-use iter::internal::*;
+use iter::plumbing::*;
 use std;
 
 /// Parallel iterator that moves out of a vector.
+#[derive(Debug, Clone)]
 pub struct IntoIter<T: Send> {
     vec: Vec<T>,
 }
 
 impl<T: Send> IntoParallelIterator for Vec<T> {
     type Item = T;
     type Iter = IntoIter<T>;
 
@@ -24,29 +28,29 @@ impl<T: Send> ParallelIterator for IntoI
     type Item = T;
 
     fn drive_unindexed<C>(self, consumer: C) -> C::Result
         where C: UnindexedConsumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn opt_len(&mut self) -> Option<usize> {
+    fn opt_len(&self) -> Option<usize> {
         Some(self.len())
     }
 }
 
 impl<T: Send> IndexedParallelIterator for IntoIter<T> {
     fn drive<C>(self, consumer: C) -> C::Result
         where C: Consumer<Self::Item>
     {
         bridge(self, consumer)
     }
 
-    fn len(&mut self) -> usize {
+    fn len(&self) -> usize {
         self.vec.len()
     }
 
     fn with_producer<CB>(mut self, callback: CB) -> CB::Output
         where CB: ProducerCallback<Self::Item>
     {
         // The producer will move or drop each item from its slice, effectively taking ownership of
         // them.  When we're done, the vector only needs to free its buffer.
@@ -102,16 +106,21 @@ struct SliceDrain<'data, T: 'data> {
 }
 
 impl<'data, T: 'data> Iterator for SliceDrain<'data, T> {
     type Item = T;
 
     fn next(&mut self) -> Option<T> {
         self.iter.next().map(|ptr| unsafe { std::ptr::read(ptr) })
     }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let len = self.len();
+        (len, Some(len))
+    }
 }
 
 impl<'data, T: 'data> DoubleEndedIterator for SliceDrain<'data, T> {
     fn next_back(&mut self) -> Option<Self::Item> {
         self.iter.next_back().map(|ptr| unsafe { std::ptr::read(ptr) })
     }
 }
 
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/tests/clones.rs
@@ -0,0 +1,158 @@
+#![feature(clone_closures)]
+
+extern crate rayon;
+
+use rayon::prelude::*;
+
+fn check<I>(iter: I)
+    where I: ParallelIterator + Clone,
+          I::Item: std::fmt::Debug + PartialEq
+{
+    let a: Vec<_> = iter.clone().collect();
+    let b: Vec<_> = iter.collect();
+    assert_eq!(a, b);
+}
+
+#[test]
+fn clone_binary_heap() {
+    use std::collections::BinaryHeap;
+    let heap: BinaryHeap<_> = (0..1000).collect();
+    check(heap.par_iter());
+    check(heap.into_par_iter());
+}
+
+#[test]
+fn clone_btree_map() {
+    use std::collections::BTreeMap;
+    let map: BTreeMap<_,_> = (0..1000).enumerate().collect();
+    check(map.par_iter());
+}
+
+#[test]
+fn clone_btree_set() {
+    use std::collections::BTreeSet;
+    let set: BTreeSet<_> = (0..1000).collect();
+    check(set.par_iter());
+}
+
+#[test]
+fn clone_hash_map() {
+    use std::collections::HashMap;
+    let map: HashMap<_,_> = (0..1000).enumerate().collect();
+    check(map.par_iter());
+}
+
+#[test]
+fn clone_hash_set() {
+    use std::collections::HashSet;
+    let set: HashSet<_> = (0..1000).collect();
+    check(set.par_iter());
+}
+
+#[test]
+fn clone_linked_list() {
+    use std::collections::LinkedList;
+    let list: LinkedList<_> = (0..1000).collect();
+    check(list.par_iter());
+    check(list.into_par_iter());
+}
+
+#[test]
+fn clone_vec_deque() {
+    use std::collections::VecDeque;
+    let deque: VecDeque<_> = (0..1000).collect();
+    check(deque.par_iter());
+    check(deque.into_par_iter());
+}
+
+#[test]
+fn clone_option() {
+    let option = Some(0);
+    check(option.par_iter());
+    check(option.into_par_iter());
+}
+
+#[test]
+fn clone_result() {
+    let result = Ok::<_, ()>(0);
+    check(result.par_iter());
+    check(result.into_par_iter());
+}
+
+#[test]
+fn clone_range() {
+    check((0..1000).into_par_iter());
+}
+
+#[test]
+fn clone_str() {
+    let s = include_str!("clones.rs");
+    check(s.par_chars());
+    check(s.par_lines());
+    check(s.par_split('\n'));
+    check(s.par_split_terminator('\n'));
+    check(s.par_split_whitespace());
+}
+
+#[test]
+fn clone_vec() {
+    let v: Vec<_> = (0..1000).collect();
+    check(v.par_iter());
+    check(v.par_chunks(42));
+    check(v.par_windows(42));
+    check(v.par_split(|x| x % 3 == 0));
+    check(v.into_par_iter());
+}
+
+#[test]
+fn clone_adaptors() {
+    let v: Vec<_> = (0..1000).map(Some).collect();
+    check(v.par_iter().chain(&v));
+    check(v.par_iter().cloned());
+    check(v.par_iter().enumerate());
+    check(v.par_iter().filter(|_| true));
+    check(v.par_iter().filter_map(|x| *x));
+    check(v.par_iter().flat_map(|x| *x));
+    check(v.par_iter().flatten());
+    check(v.par_iter().with_max_len(1).fold(|| 0, |x, _| x));
+    check(v.par_iter().with_max_len(1).fold_with(0, |x, _| x));
+    check(v.par_iter().inspect(|_| ()));
+    check(v.par_iter().update(|_| ()));
+    check(v.par_iter().interleave(&v));
+    check(v.par_iter().interleave_shortest(&v));
+    check(v.par_iter().intersperse(&None));
+    check(v.par_iter().chunks(3));
+    check(v.par_iter().map(|x| x));
+    check(v.par_iter().map_with(0, |_, x| x));
+    check(v.par_iter().rev());
+    check(v.par_iter().skip(1));
+    check(v.par_iter().take(1));
+    check(v.par_iter().cloned().while_some());
+    check(v.par_iter().with_max_len(1));
+    check(v.par_iter().with_min_len(1));
+    check(v.par_iter().zip(&v));
+    check(v.par_iter().zip_eq(&v));
+}
+
+#[test]
+fn clone_empty() {
+    check(rayon::iter::empty::<i32>());
+}
+
+#[test]
+fn clone_once() {
+    check(rayon::iter::once(10));
+}
+
+#[test]
+fn clone_repeat() {
+    let x: Option<i32> = None;
+    check(rayon::iter::repeat(x).while_some());
+    check(rayon::iter::repeatn(x, 1000));
+}
+
+#[test]
+fn clone_splitter() {
+    check(rayon::iter::split(0..1000, |x| (x, None)));
+}
+
--- a/third_party/rust/rayon/tests/compile-fail/cannot_collect_filtermap_data.rs
+++ b/third_party/rust/rayon/tests/compile-fail/cannot_collect_filtermap_data.rs
@@ -5,10 +5,10 @@ use rayon::prelude::*;
 // zip requires data of exact size, but filter yields only bounded
 // size, so check that we cannot apply it.
 
 fn main() {
     let a: Vec<usize> = (0..1024).collect();
     let mut v = vec![];
     a.par_iter()
      .filter_map(|&x| Some(x as f32))
-     .collect_into(&mut v); //~ ERROR no method
+     .collect_into_vec(&mut v); //~ ERROR no method
 }
--- a/third_party/rust/rayon/tests/compile-fail/must_use.rs
+++ b/third_party/rust/rayon/tests/compile-fail/must_use.rs
@@ -10,21 +10,28 @@ fn main() {
     let v: Vec<_> = (0..100).map(Some).collect();
 
     v.par_iter().chain(&v);                 //~ ERROR must be used
     v.par_iter().cloned();                  //~ ERROR must be used
     v.par_iter().enumerate();               //~ ERROR must be used
     v.par_iter().filter(|_| true);          //~ ERROR must be used
     v.par_iter().filter_map(|x| *x);        //~ ERROR must be used
     v.par_iter().flat_map(|x| *x);          //~ ERROR must be used
+    v.par_iter().flatten();                 //~ ERROR must be used
     v.par_iter().fold(|| 0, |x, _| x);      //~ ERROR must be used
     v.par_iter().fold_with(0, |x, _| x);    //~ ERROR must be used
     v.par_iter().inspect(|_| {});           //~ ERROR must be used
+    v.par_iter().update(|_| {});            //~ ERROR must be used
+    v.par_iter().interleave(&v);            //~ ERROR must be used
+    v.par_iter().interleave_shortest(&v);   //~ ERROR must be used
+    v.par_iter().intersperse(&None);        //~ ERROR must be used
+    v.par_iter().chunks(2);                 //~ ERROR must be used
     v.par_iter().map(|x| x);                //~ ERROR must be used
     v.par_iter().map_with(0, |_, x| x);     //~ ERROR must be used
     v.par_iter().rev();                     //~ ERROR must be used
     v.par_iter().skip(1);                   //~ ERROR must be used
     v.par_iter().take(1);                   //~ ERROR must be used
     v.par_iter().cloned().while_some();     //~ ERROR must be used
     v.par_iter().with_max_len(1);           //~ ERROR must be used
     v.par_iter().with_min_len(1);           //~ ERROR must be used
     v.par_iter().zip(&v);                   //~ ERROR must be used
+    v.par_iter().zip_eq(&v);                //~ ERROR must be used
 }
--- a/third_party/rust/rayon/tests/compile-fail/no_send_par_iter.rs
+++ b/third_party/rust/rayon/tests/compile-fail/no_send_par_iter.rs
@@ -9,19 +9,19 @@ use std::ptr::null;
 struct NoSend(*const ());
 
 unsafe impl Sync for NoSend {}
 
 fn main() {
     let x = Some(NoSend(null()));
 
     x.par_iter()
-        .map(|&x| x) //~ ERROR Send` is not satisfied
-        .count(); //~ ERROR Send` is not satisfied
+        .map(|&x| x) //~ ERROR E0277
+        .count(); //~ ERROR E0599
 
     x.par_iter()
-        .filter_map(|&x| Some(x)) //~ ERROR Send` is not satisfied
-        .count(); //~ ERROR Send` is not satisfied
+        .filter_map(|&x| Some(x)) //~ ERROR E0277
+        .count(); //~ ERROR E0599
 
     x.par_iter()
-        .cloned() //~ ERROR Send` is not satisfied
-        .count(); //~ ERROR Send` is not satisfied
+        .cloned() //~ ERROR E0277
+        .count(); //~ ERROR E0599
 }
--- a/third_party/rust/rayon/tests/compile-fail/quicksort_race1.rs
+++ b/third_party/rust/rayon/tests/compile-fail/quicksort_race1.rs
@@ -1,17 +1,17 @@
 extern crate rayon;
 
 fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
     if v.len() <= 1 {
         return;
     }
 
     let mid = partition(v);
-    let (lo, hi) = v.split_at_mut(mid);
+    let (lo, _hi) = v.split_at_mut(mid);
     rayon::join(|| quick_sort(lo), || quick_sort(lo)); //~ ERROR E0524
 }
 
 fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
     let pivot = v.len() - 1;
     let mut i = 0;
     for j in 0..pivot {
         if v[j] <= v[pivot] {
--- a/third_party/rust/rayon/tests/compile-fail/quicksort_race2.rs
+++ b/third_party/rust/rayon/tests/compile-fail/quicksort_race2.rs
@@ -1,17 +1,17 @@
 extern crate rayon;
 
 fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
     if v.len() <= 1 {
         return;
     }
 
     let mid = partition(v);
-    let (lo, hi) = v.split_at_mut(mid);
+    let (lo, _hi) = v.split_at_mut(mid);
     rayon::join(|| quick_sort(lo), || quick_sort(v)); //~ ERROR E0500
 }
 
 fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
     let pivot = v.len() - 1;
     let mut i = 0;
     for j in 0..pivot {
         if v[j] <= v[pivot] {
--- a/third_party/rust/rayon/tests/compile-fail/quicksort_race3.rs
+++ b/third_party/rust/rayon/tests/compile-fail/quicksort_race3.rs
@@ -1,17 +1,17 @@
 extern crate rayon;
 
 fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
     if v.len() <= 1 {
         return;
     }
 
     let mid = partition(v);
-    let (lo, hi) = v.split_at_mut(mid);
+    let (_lo, hi) = v.split_at_mut(mid);
     rayon::join(|| quick_sort(hi), || quick_sort(hi)); //~ ERROR E0524
 }
 
 fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
     let pivot = v.len() - 1;
     let mut i = 0;
     for j in 0..pivot {
         if v[j] <= v[pivot] {
--- a/third_party/rust/rayon/tests/compile-fail/rc_par_iter.rs
+++ b/third_party/rust/rayon/tests/compile-fail/rc_par_iter.rs
@@ -6,10 +6,10 @@ extern crate rayon;
 use rayon::iter::IntoParallelIterator;
 use std::rc::Rc;
 
 fn main() {
     let x = vec![Rc::new(22), Rc::new(23)];
     let mut y = vec![];
     x.into_par_iter() //~ ERROR no method named `into_par_iter`
      .map(|rc| *rc)
-     .collect_into(&mut y);
+     .collect_into_vec(&mut y);
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/tests/debug.rs
@@ -0,0 +1,167 @@
+extern crate rayon;
+
+use rayon::prelude::*;
+use std::fmt::Debug;
+
+fn check<I>(iter: I)
+    where I: ParallelIterator + Debug
+{
+    println!("{:?}", iter);
+}
+
+#[test]
+fn debug_binary_heap() {
+    use std::collections::BinaryHeap;
+    let heap: BinaryHeap<_> = (0..10).collect();
+    check(heap.par_iter());
+    check(heap.into_par_iter());
+}
+
+#[test]
+fn debug_btree_map() {
+    use std::collections::BTreeMap;
+    let mut map: BTreeMap<_,_> = (0..10).enumerate().collect();
+    check(map.par_iter());
+    check(map.par_iter_mut());
+    check(map.into_par_iter());
+}
+
+#[test]
+fn debug_btree_set() {
+    use std::collections::BTreeSet;
+    let set: BTreeSet<_> = (0..10).collect();
+    check(set.par_iter());
+    check(set.into_par_iter());
+}
+
+#[test]
+fn debug_hash_map() {
+    use std::collections::HashMap;
+    let mut map: HashMap<_,_> = (0..10).enumerate().collect();
+    check(map.par_iter());
+    check(map.par_iter_mut());
+    check(map.into_par_iter());
+}
+
+#[test]
+fn debug_hash_set() {
+    use std::collections::HashSet;
+    let set: HashSet<_> = (0..10).collect();
+    check(set.par_iter());
+    check(set.into_par_iter());
+}
+
+#[test]
+fn debug_linked_list() {
+    use std::collections::LinkedList;
+    let mut list: LinkedList<_> = (0..10).collect();
+    check(list.par_iter());
+    check(list.par_iter_mut());
+    check(list.into_par_iter());
+}
+
+#[test]
+fn debug_vec_deque() {
+    use std::collections::VecDeque;
+    let mut deque: VecDeque<_> = (0..10).collect();
+    check(deque.par_iter());
+    check(deque.par_iter_mut());
+    check(deque.into_par_iter());
+}
+
+#[test]
+fn debug_option() {
+    let mut option = Some(0);
+    check(option.par_iter());
+    check(option.par_iter_mut());
+    check(option.into_par_iter());
+}
+
+#[test]
+fn debug_result() {
+    let mut result = Ok::<_, ()>(0);
+    check(result.par_iter());
+    check(result.par_iter_mut());
+    check(result.into_par_iter());
+}
+
+#[test]
+fn debug_range() {
+    check((0..10).into_par_iter());
+}
+
+#[test]
+fn debug_str() {
+    let s = "a b c d\ne f g";
+    check(s.par_chars());
+    check(s.par_lines());
+    check(s.par_split('\n'));
+    check(s.par_split_terminator('\n'));
+    check(s.par_split_whitespace());
+}
+
+#[test]
+fn debug_vec() {
+    let mut v: Vec<_> = (0..10).collect();
+    check(v.par_iter());
+    check(v.par_iter_mut());
+    check(v.par_chunks(42));
+    check(v.par_chunks_mut(42));
+    check(v.par_windows(42));
+    check(v.par_split(|x| x % 3 == 0));
+    check(v.par_split_mut(|x| x % 3 == 0));
+    check(v.into_par_iter());
+}
+
+#[test]
+fn debug_adaptors() {
+    let v: Vec<_> = (0..10).collect();
+    check(v.par_iter().chain(&v));
+    check(v.par_iter().cloned());
+    check(v.par_iter().enumerate());
+    check(v.par_iter().filter(|_| true));
+    check(v.par_iter().filter_map(|x| Some(x)));
+    check(v.par_iter().flat_map(|x| Some(x)));
+    check(v.par_iter().map(Some).flatten());
+    check(v.par_iter().fold(|| 0, |x, _| x));
+    check(v.par_iter().fold_with(0, |x, _| x));
+    check(v.par_iter().inspect(|_| ()));
+    check(v.par_iter().update(|_| ()));
+    check(v.par_iter().interleave(&v));
+    check(v.par_iter().interleave_shortest(&v));
+    check(v.par_iter().intersperse(&-1));
+    check(v.par_iter().chunks(3));
+    check(v.par_iter().map(|x| x));
+    check(v.par_iter().map_with(0, |_, x| x));
+    check(v.par_iter().rev());
+    check(v.par_iter().skip(1));
+    check(v.par_iter().take(1));
+    check(v.par_iter().map(Some).while_some());
+    check(v.par_iter().with_max_len(1));
+    check(v.par_iter().with_min_len(1));
+    check(v.par_iter().zip(&v));
+    check(v.par_iter().zip_eq(&v));
+}
+
+#[test]
+fn debug_empty() {
+    check(rayon::iter::empty::<i32>());
+}
+
+#[test]
+fn debug_once() {
+    check(rayon::iter::once(10));
+}
+
+#[test]
+fn debug_repeat() {
+    let x: Option<i32> = None;
+    check(rayon::iter::repeat(x));
+    check(rayon::iter::repeatn(x, 10));
+}
+
+#[test]
+fn debug_splitter() {
+    check(rayon::iter::split(0..10, |x| (x, None)));
+}
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/tests/intersperse.rs
@@ -0,0 +1,53 @@
+extern crate rayon;
+
+use rayon::prelude::*;
+
+#[test]
+fn check_intersperse() {
+    let v: Vec<_> = (0..1000).into_par_iter().intersperse(-1).collect();
+    assert_eq!(v.len(), 1999);
+    for (i, x) in v.into_iter().enumerate() {
+        assert_eq!(x, if i % 2 == 0 { i as i32 / 2 } else { -1 });
+    }
+}
+
+#[test]
+fn check_intersperse_again() {
+    let v: Vec<_> = (0..1000).into_par_iter().intersperse(-1).intersperse(-2).collect();
+    assert_eq!(v.len(), 3997);
+    for (i, x) in v.into_iter().enumerate() {
+        let y = match i % 4 {
+            0 => i as i32 / 4,
+            2 => -1,
+            _ => -2,
+        };
+        assert_eq!(x, y);
+    }
+}
+
+#[test]
+fn check_intersperse_unindexed() {
+    let v: Vec<_> = (0..1000).map(|i| i.to_string()).collect();
+    let s = v.join(",");
+    let s2 = v.join(";");
+    let par: String = s.par_split(',').intersperse(";").collect();
+    assert_eq!(par, s2);
+}
+
+#[test]
+fn check_intersperse_producer() {
+    (0..1000).into_par_iter().intersperse(-1)
+        .zip_eq(0..1999)
+        .for_each(|(x, i)| {
+            assert_eq!(x, if i % 2 == 0 { i / 2 } else { -1 });
+        });
+}
+
+#[test]
+fn check_intersperse_rev() {
+    (0..1000).into_par_iter().intersperse(-1)
+        .zip_eq(0..1999).rev()
+        .for_each(|(x, i)| {
+            assert_eq!(x, if i % 2 == 0 { i / 2 } else { -1 });
+        });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/tests/producer_split_at.rs
@@ -0,0 +1,272 @@
+#![feature(conservative_impl_trait)]
+
+extern crate rayon;
+
+use rayon::prelude::*;
+use rayon::iter::plumbing::*;
+
+/// Stress-test indexes for `Producer::split_at`.
+fn check<F, I>(expected: &[I::Item], mut f: F)
+    where F: FnMut() -> I,
+          I: IntoParallelIterator,
+          I::Iter: IndexedParallelIterator,
+          I::Item: PartialEq + std::fmt::Debug
+{
+    for (i, j, k) in triples(expected.len() + 1) {
+        Split::forward(f(), i, j, k, expected);
+        Split::reverse(f(), i, j, k, expected);
+    }
+}
+
+fn triples(end: usize) -> impl Iterator<Item=(usize, usize, usize)> {
+    (0..end).flat_map(move |i| {
+        (i..end).flat_map(move |j| {
+            (j..end).map(move |k| (i, j, k))
+        })
+    })
+}
+
+#[derive(Debug)]
+struct Split {
+    i: usize,
+    j: usize,
+    k: usize,
+    reverse: bool
+}
+
+impl Split {
+    fn forward<I>(iter: I, i: usize, j: usize, k: usize, expected: &[I::Item])
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              I::Item: PartialEq + std::fmt::Debug
+    {
+        let result = iter.into_par_iter()
+            .with_producer(Split { i, j, k, reverse: false });
+        assert_eq!(result, expected);
+    }
+
+    fn reverse<I>(iter: I, i: usize, j: usize, k: usize, expected: &[I::Item])
+        where I: IntoParallelIterator,
+              I::Iter: IndexedParallelIterator,
+              I::Item: PartialEq + std::fmt::Debug
+    {
+        let result = iter.into_par_iter()
+            .with_producer(Split { i, j, k, reverse: true });
+        assert!(result.iter().eq(expected.iter().rev()));
+    }
+}
+
+impl<T> ProducerCallback<T> for Split {
+    type Output = Vec<T>;
+
+    fn callback<P>(self, producer: P) -> Self::Output
+        where P: Producer<Item = T>
+    {
+        println!("{:?}", self);
+
+        // Splitting the outer indexes first gets us an arbitrary mid section,
+        // which we then split further to get full test coverage.
+        let (left, d) = producer.split_at(self.k);
+        let (a, mid) = left.split_at(self.i);
+        let (b, c) = mid.split_at(self.j - self.i);
+
+        let a = a.into_iter();
+        let b = b.into_iter();
+        let c = c.into_iter();
+        let d = d.into_iter();
+
+        check_len(&a, self.i);
+        check_len(&b, self.j - self.i);
+        check_len(&c, self.k - self.j);
+
+        let chain = a.chain(b).chain(c).chain(d);
+        if self.reverse {
+            chain.rev().collect()
+        } else {
+            chain.collect()
+        }
+    }
+}
+
+fn check_len<I: ExactSizeIterator>(iter: &I, len: usize) {
+    assert_eq!(iter.size_hint(), (len, Some(len)));
+    assert_eq!(iter.len(), len);
+}
+
+
+// **** Base Producers ****
+
+#[test]
+fn empty() {
+    let v = vec![42];
+    check(&v[..0], || rayon::iter::empty());
+}
+
+#[test]
+fn once() {
+    let v = vec![42];
+    check(&v, || rayon::iter::once(42));
+}
+
+#[test]
+fn option() {
+    let v = vec![42];
+    check(&v, || Some(42));
+}
+
+#[test]
+fn range() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || 0..10);
+}
+
+#[test]
+fn repeatn() {
+    let v: Vec<_> = std::iter::repeat(1).take(5).collect();
+    check(&v, || rayon::iter::repeatn(1, 5));
+}
+
+#[test]
+fn slice_iter() {
+    let s: Vec<_> = (0..10).collect();
+    let v: Vec<_> = s.iter().collect();
+    check(&v, || &s);
+}
+
+#[test]
+fn slice_iter_mut() {
+    let mut s: Vec<_> = (0..10).collect();
+    let mut v: Vec<_> = s.clone();
+    let expected: Vec<_> = v.iter_mut().collect();
+
+    for (i, j, k) in triples(expected.len() + 1) {
+        Split::forward(s.par_iter_mut(), i, j, k, &expected);
+        Split::reverse(s.par_iter_mut(), i, j, k, &expected);
+    }
+}
+
+#[test]
+fn slice_chunks() {
+    let s: Vec<_> = (0..10).collect();
+    let v: Vec<_> = s.chunks(2).collect();
+    check(&v, || s.par_chunks(2));
+}
+
+#[test]
+fn slice_chunks_mut() {
+    let mut s: Vec<_> = (0..10).collect();
+    let mut v: Vec<_> = s.clone();
+    let expected: Vec<_> = v.chunks_mut(2).collect();
+
+    for (i, j, k) in triples(expected.len() + 1) {
+        Split::forward(s.par_chunks_mut(2), i, j, k, &expected);
+        Split::reverse(s.par_chunks_mut(2), i, j, k, &expected);
+    }
+}
+
+#[test]
+fn slice_windows() {
+    let s: Vec<_> = (0..10).collect();
+    let v: Vec<_> = s.windows(2).collect();
+    check(&v, || s.par_windows(2));
+}
+
+#[test]
+fn vec() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || v.clone());
+}
+
+
+// **** Adaptors ****
+
+#[test]
+fn chain() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || (0..5).into_par_iter().chain(5..10));
+}
+
+#[test]
+fn cloned() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || v.par_iter().cloned());
+}
+
+#[test]
+fn enumerate() {
+    let v: Vec<_> = (0..10).enumerate().collect();
+    check(&v, || (0..10).into_par_iter().enumerate());
+}
+
+#[test]
+fn inspect() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || (0..10).into_par_iter().inspect(|_| ()));
+}
+
+#[test]
+fn update() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || (0..10).into_par_iter().update(|_| ()));
+}
+
+#[test]
+fn interleave() {
+    let v = [0, 10, 1, 11, 2, 12, 3, 4];
+    check(&v, || (0..5).into_par_iter().interleave(10..13));
+    check(&v[..6], || (0..3).into_par_iter().interleave(10..13));
+
+    let v = [0, 10, 1, 11, 2, 12, 13, 14];
+    check(&v, || (0..3).into_par_iter().interleave(10..15));
+}
+
+#[test]
+fn intersperse() {
+    let v = [0, -1, 1, -1, 2, -1, 3, -1, 4];
+    check(&v, || (0..5).into_par_iter().intersperse(-1));
+}
+
+#[test]
+fn chunks() {
+    let s: Vec<_> = (0..10).collect();
+    let v: Vec<_> = s.chunks(2).map(|c| c.to_vec()).collect();
+    check(&v, || s.par_iter().cloned().chunks(2));
+}
+
+#[test]
+fn map() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || v.par_iter().map(Clone::clone));
+}
+
+#[test]
+fn map_with() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || v.par_iter().map_with(vec![0], |_, &x| x));
+}
+
+#[test]
+fn rev() {
+    let v: Vec<_> = (0..10).rev().collect();
+    check(&v, || (0..10).into_par_iter().rev());
+}
+
+#[test]
+fn with_max_len() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || (0..10).into_par_iter().with_max_len(1));
+}
+
+#[test]
+fn with_min_len() {
+    let v: Vec<_> = (0..10).collect();
+    check(&v, || (0..10).into_par_iter().with_min_len(1));
+}
+
+#[test]
+fn zip() {
+    let v: Vec<_> = (0..10).zip(10..20).collect();
+    check(&v, || (0..10).into_par_iter().zip(10..20));
+    check(&v[..5], || (0..5).into_par_iter().zip(10..20));
+    check(&v[..5], || (0..10).into_par_iter().zip(10..15));
+}
--- a/third_party/rust/rayon/tests/run-pass/double_init_fail.rs
+++ b/third_party/rust/rayon/tests/run-pass/double_init_fail.rs
@@ -1,10 +1,11 @@
 extern crate rayon;
 
 use rayon::*;
+use std::error::Error;
 
 fn main() {
-    let result1 = initialize(Configuration::new());
+    let result1 = ThreadPoolBuilder::new().build_global();
     assert_eq!(result1.unwrap(), ());
-    let err = initialize(Configuration::new()).unwrap_err();
+    let err = ThreadPoolBuilder::new().build_global().unwrap_err();
     assert!(err.description() == "The global thread pool has already been initialized.");
 }
--- a/third_party/rust/rayon/tests/run-pass/init_zero_threads.rs
+++ b/third_party/rust/rayon/tests/run-pass/init_zero_threads.rs
@@ -1,7 +1,7 @@
 extern crate rayon;
 
 use rayon::*;
 
 fn main() {
-    initialize(Configuration::new().num_threads(0)).unwrap();
+    ThreadPoolBuilder::new().num_threads(0).build_global().unwrap();
 }
--- a/third_party/rust/rayon/tests/run-pass/named-threads.rs
+++ b/third_party/rust/rayon/tests/run-pass/named-threads.rs
@@ -1,17 +1,17 @@
 extern crate rayon;
 
 use std::collections::HashSet;
 
 use rayon::*;
 use rayon::prelude::*;
 
 fn main() {
-    let result = initialize(Configuration::new().thread_name(|i| format!("hello-name-test-{}", i)));
+    let result = ThreadPoolBuilder::new().thread_name(|i| format!("hello-name-test-{}", i)).build_global();
 
     const N: usize = 10000;
 
     let thread_names = (0..N).into_par_iter()
         .flat_map(|_| ::std::thread::current().name().map(|s| s.to_owned()))
         .collect::<HashSet<String>>();
 
     let all_contains_name = thread_names.iter().all(|name| name.starts_with("hello-name-test-"));
--- a/third_party/rust/rayon/tests/run-pass/stack_overflow_crash.rs
+++ b/third_party/rust/rayon/tests/run-pass/stack_overflow_crash.rs
@@ -20,32 +20,39 @@ fn force_stack_overflow(depth: u32) {
 fn main() {
     if env::args().len() == 1 {
         // first check that the recursivecall actually causes a stack overflow, and does not get optimized away
         {
             let status = Command::new(env::current_exe().unwrap())
                 .arg("8")
                 .status()
                 .unwrap();
+
+            #[cfg(windows)]
+            assert_eq!(status.code(), Some(0xc00000fd /*STATUS_STACK_OVERFLOW*/));
+
+            #[cfg(unix)]
             assert_eq!(status.code(), None);
+
             #[cfg(target_os = "linux")]
-            assert!(status.signal() == Some(11 /*SIGABRT*/) || status.signal() == Some(6 /*SIGSEGV*/));
+            assert!(status.signal() == Some(11 /*SIGABRT*/) ||
+                    status.signal() == Some(6 /*SIGSEGV*/));
         }
 
 
         // now run with a larger stack and verify correct operation
         {
             let status = Command::new(env::current_exe().unwrap())
                 .arg("48")
                 .status()
                 .unwrap();
             assert_eq!(status.code(), Some(0));
             #[cfg(target_os = "linux")]
             assert_eq!(status.signal(), None);
         }
     } else {
         let stack_size_in_mb: usize = env::args().nth(1).unwrap().parse().unwrap();
-        let pool = ThreadPool::new(Configuration::new().stack_size(stack_size_in_mb * 1024 * 1024)).unwrap();
+        let pool = ThreadPoolBuilder::new().stack_size(stack_size_in_mb * 1024 * 1024).build().unwrap();
         let index = pool.install(|| {
             force_stack_overflow(32);
         });
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rayon/tests/sort-panic-safe.rs
@@ -0,0 +1,164 @@
+#[macro_use]
+extern crate lazy_static;
+extern crate rayon;
+extern crate rand;
+
+use rand::{thread_rng, Rng};
+use rayon::prelude::*;
+use std::cell::Cell;
+use std::cmp::{self, Ordering};
+use std::panic;
+use std::sync::atomic::Ordering::Relaxed;
+use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize};
+use std::thread;
+
+static VERSIONS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+lazy_static! {
+    static ref DROP_COUNTS: Vec<AtomicUsize> = (0..20_000)
+        .map(|_| AtomicUsize::new(0))
+        .collect();
+}
+
+#[derive(Clone, Eq)]
+struct DropCounter {
+    x: u32,
+    id: usize,
+    version: Cell<usize>,
+}
+
+impl PartialEq for DropCounter {
+    fn eq(&self, other: &Self) -> bool {
+        self.partial_cmp(other) == Some(Ordering::Equal)
+    }
+}
+
+impl PartialOrd for DropCounter {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        self.version.set(self.version.get() + 1);
+        other.version.set(other.version.get() + 1);
+        VERSIONS.fetch_add(2, Relaxed);
+        self.x.partial_cmp(&other.x)
+    }
+}
+
+impl Ord for DropCounter {
+    fn cmp(&self, other: &Self) -> Ordering {
+        self.partial_cmp(other).unwrap()
+    }
+}
+
+impl Drop for DropCounter {
+    fn drop(&mut self) {
+        DROP_COUNTS[self.id].fetch_add(1, Relaxed);
+        VERSIONS.fetch_sub(self.version.get(), Relaxed);
+    }
+}
+
+macro_rules! test {
+    ($input:ident, $func:ident) => {
+        let len = $input.len();
+
+        // Work out the total number of comparisons required to sort
+        // this array...
+        let count = AtomicUsize::new(0);
+        $input.to_owned().$func(|a, b| {
+            count.fetch_add(1, Relaxed);
+            a.cmp(b)
+        });
+
+        let mut panic_countdown = count.load(Relaxed);
+        let step = if len <= 100 {
+            1
+        } else {
+            cmp::max(1, panic_countdown / 10)
+        };
+
+        // ... and then panic after each `step` comparisons.
+        loop {
+            // Refresh the counters.
+            VERSIONS.store(0, Relaxed);
+            for i in 0..len {
+                DROP_COUNTS[i].store(0, Relaxed);
+            }
+
+            let v = $input.to_owned();
+            let _ = thread::spawn(move || {
+                let mut v = v;
+                let panic_countdown = AtomicUsize::new(panic_countdown);
+                v.$func(|a, b| {
+                    if panic_countdown.fetch_sub(1, Relaxed) == 1 {
+                        SILENCE_PANIC.with(|s| s.set(true));
+                        panic!();
+                    }
+                    a.cmp(b)
+                })
+            }).join();
+
+            // Check that the number of things dropped is exactly
+            // what we expect (i.e. the contents of `v`).
+            for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
+                let count = c.load(Relaxed);
+                assert!(count == 1,
+                        "found drop count == {} for i == {}, len == {}",
+                        count, i, len);
+            }
+
+            // Check that the most recent versions of values were dropped.
+            assert_eq!(VERSIONS.load(Relaxed), 0);
+
+            if panic_countdown < step {
+                break;
+            }
+            panic_countdown -= step;
+        }
+    }
+}
+
+thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
+
+#[test]
+fn sort_panic_safe() {
+    let prev = panic::take_hook();
+    panic::set_hook(Box::new(move |info| {
+        if !SILENCE_PANIC.with(|s| s.get()) {
+            prev(info);
+        }
+    }));
+
+    for &len in &[1, 2, 3, 4, 5, 10, 20, 100, 500, 5_000, 20_000] {
+        for &modulus in &[5, 30, 1_000, 20_000] {
+            for &has_runs in &[false, true] {
+                let mut rng = thread_rng();
+                let mut input = (0..len)
+                    .map(|id| {
+                        DropCounter {
+                            x: rng.next_u32() % modulus,
+                            id: id,
+                            version: Cell::new(0),
+                        }
+                    })
+                    .collect::<Vec<_>>();
+
+                if has_runs {
+                    for c in &mut input {
+                        c.x = c.id as u32;
+                    }
+
+                    for _ in 0..5 {
+                        let a = rng.gen::<usize>() % len;
+                        let b = rng.gen::<usize>() % len;
+                        if a < b {
+                            input[a..b].reverse();
+                        } else {
+                            input.swap(a, b);
+                        }
+                    }
+                }
+
+                test!(input, par_sort_by);
+                test!(input, par_sort_unstable_by);
+            }
+        }
+    }
+}
--- a/toolkit/library/gtest/rust/Cargo.lock
+++ b/toolkit/library/gtest/rust/Cargo.lock
@@ -211,25 +211,16 @@ dependencies = [
  "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "coco"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "core-foundation"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "core-foundation-sys 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -283,16 +274,47 @@ dependencies = [
 name = "cose-c"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cose 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "crossbeam-deque"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-epoch 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "cssparser"
 version = "0.23.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "dtoa-short 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -823,16 +845,21 @@ source = "registry+https://github.com/ru
 dependencies = [
  "fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "memoffset"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "mio"
 version = "0.6.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1133,27 +1160,35 @@ dependencies = [
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rayon"
 version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rayon"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.2.0"
+version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
  "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "redox_syscall"
 version = "0.1.32"
@@ -1633,17 +1668,17 @@ dependencies = [
  "euclid 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "freetype 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "gleam 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)",
  "plane-split 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "ron 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
  "webrender_api 0.57.0",
 ]
 
@@ -1671,17 +1706,17 @@ dependencies = [
  "app_units 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "core-foundation 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "core-graphics 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "dwrote 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "euclid 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "gleam 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "webrender 0.57.0",
 ]
 
 [[package]]
 name = "which"
 version = "1.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1761,24 +1796,26 @@ dependencies = [
 "checksum bitreader 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "80b13e2ab064ff3aa0bdbf1eff533f9822dc37899821f5f98c67f263eab51707"
 "checksum boxfnonce 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8380105befe91099e6f69206164072c05bc92427ff6aa8a5171388317346dd75"
 "checksum byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "652805b7e73fada9d85e9a6682a4abd490cb52d96aeecc12e33a0de34dfd0d23"
 "checksum bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d828f97b58cc5de3e40c421d0cf2132d6b2da4ee0e11b8632fa838f0f9333ad6"
 "checksum cexpr 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "393a5f0088efbe41f9d1fcd062f24e83c278608420e62109feb2c8abee07de7d"
 "checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
 "checksum clang-sys 0.21.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00048189ee171715296dfe3b2fcfd439563c7bfec0d98d3976ce3402d62c8f07"
 "checksum clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)" = "110d43e343eb29f4f51c1db31beb879d546db27998577e5715270a54bcf41d3f"
-"checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"
 "checksum core-foundation 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "286e0b41c3a20da26536c6000a280585d519fd07b3956b43aed8a79e9edce980"
 "checksum core-foundation-sys 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "41115a6aa5d3e1e5ef98148373f25971d1fad53818553f216495f9e67e90a624"
 "checksum core-foundation-sys 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "716c271e8613ace48344f723b60b900a93150271e5be206212d052bbc0883efa"
 "checksum core-graphics 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb0ed45fdc32f9ab426238fba9407dfead7bacd7900c9b4dd3f396f46eafdae3"
 "checksum core-text 9.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2bd581c37283d0c23311d179aefbb891f2324ee0405da58a26e8594ab76e5748"
 "checksum cose 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "72fa26cb151d3ae4b70f63d67d0fed57ce04220feafafbae7f503bef7aae590d"
 "checksum cose-c 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "49726015ab0ca765144fcca61e4a7a543a16b795a777fa53f554da2fffff9a94"
+"checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"
+"checksum crossbeam-epoch 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "59796cc6cbbdc6bb319161349db0c3250ec73ec7fcb763a51065ec4e2e158552"
+"checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"
 "checksum cssparser 0.23.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8a807ac3ab7a217829c2a3b65732b926b2befe6a35f33b4bf8b503692430f223"
 "checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df"
 "checksum cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b6557bdb1dc9647eae1cf7f5601b14cd45fc3c7ccf2df618387416fe542da6ea"
 "checksum cstr-macros 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f9f316203d1ea36f4f18316822806f6999aa3dc5ed1adf51e35b77e3b3933d78"
 "checksum darling 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d3effd06d4057f275cb7858889f4952920bab78dd8ff0f6e7dfe0c8d2e67ed89"
 "checksum darling_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "167dd3e235c2f1da16a635c282630452cdf49191eb05711de1bcd1d3d5068c00"
 "checksum darling_macro 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c53edaba455f6073a10c27c72440860eb3f60444f8c8660a391032eeae744d82"
 "checksum debug_unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9a032eac705ca39214d169f83e3d3da290af06d8d1d344d1baad2fd002dca4b3"
@@ -1817,16 +1854,17 @@ dependencies = [
 "checksum libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "5ba3df4dcb460b9dfbd070d41c94c19209620c191b0340b929ce748a2bcd42d2"
 "checksum libloading 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "be99f814beb3e9503a786a592c909692bb6d4fc5a695f6ed7987223acfbd5194"
 "checksum libudev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea626d3bdf40a1c5aee3bcd4f40826970cae8d80a8fec934c82a63840094dcfe"
 "checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"
 "checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2"
 "checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376"
 "checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a"
 "checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b"
+"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
 "checksum mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9e965267d4d58496fc4f740e9861118367f13570cadf66316ed2c3f2f14d87c7"
 "checksum mio-uds 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1731a873077147b626d89cc6c2a0db6288d607496c5d10c0cfcf3adc697ec673"
 "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
 "checksum moz_cbor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20c82a57087fd5990d7122dbff1607c3b20c3d2958e9d9ad9765aab415e2c91c"
 "checksum mp4parse_fallible 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6626c2aef76eb8f984eef02e475883d3fe9112e114720446c5810fc5f045cd30"
 "checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09"
 "checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
 "checksum nom 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b8c256fd9471521bcb84c3cdba98921497f1a331cbc15b8030fc63b82050ce"
@@ -1848,17 +1886,18 @@ dependencies = [
 "checksum plane-split 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d2adb8d1523b2ddcd98275613e9bc04eef75b47a39e252e63733a3218ae3c1b7"
 "checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
 "checksum proc-macro2 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d1cb7aaaa4bf022ec2b14ff2f2ba1643a22f3cee88df014a85e14b392282c61d"
 "checksum procedural-masquerade 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9f566249236c6ca4340f7ca78968271f0ed2b0f234007a61b66f9ecd0af09260"
 "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
 "checksum quote 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1eca14c727ad12702eb4b6bfb5a232287dcf8385cb8ca83a3eeaf6519c44c408"
 "checksum rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)" = "6475140dfd8655aeb72e1fd4b7a1cc1c202be65d71669476e392fe62532b9edd"
 "checksum rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8"
-"checksum rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c21a92a5dca958fb030787c1158446c6deb7f976399b72fa8074603f169e2a"
+"checksum rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "485541959c8ecc49865526fe6c4de9653dd6e60d829d6edf0be228167b60372d"
+"checksum rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9d24ad214285a7729b174ed6d3bcfcb80177807f959d95fafd5bfc5c4f201ac8"
 "checksum redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "ab105df655884ede59d45b7070c8a65002d921461ee813a024558ca16030eea0"
 "checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"
 "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
 "checksum ron 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "da06feaa07f69125ab9ddc769b11de29090122170b402547f64b86fe16ebc399"
 "checksum runloop 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d79b4b604167921892e84afbbaad9d5ad74e091bf6c511d9dbfb0593f09fabd"
 "checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7"
 "checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
 "checksum scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918"
--- a/toolkit/library/rust/Cargo.lock
+++ b/toolkit/library/rust/Cargo.lock
@@ -211,25 +211,16 @@ dependencies = [
  "bitflags 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "coco"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "core-foundation"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "core-foundation-sys 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -283,16 +274,47 @@ dependencies = [
 name = "cose-c"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cose 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "crossbeam-deque"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-epoch 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "cssparser"
 version = "0.23.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "dtoa-short 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -821,16 +843,21 @@ source = "registry+https://github.com/ru
 dependencies = [
  "fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "memoffset"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "mio"
 version = "0.6.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1120,27 +1147,35 @@ dependencies = [
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rayon"
 version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rayon"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.2.0"
+version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
  "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "redox_syscall"
 version = "0.1.32"
@@ -1645,17 +1680,17 @@ dependencies = [
  "euclid 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "freetype 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "gleam 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)",
  "plane-split 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "ron 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
  "webrender_api 0.57.0",
 ]
 
@@ -1683,17 +1718,17 @@ dependencies = [
  "app_units 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "core-foundation 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "core-graphics 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "dwrote 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "euclid 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "gleam 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "webrender 0.57.0",
 ]
 
 [[package]]
 name = "which"
 version = "1.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1764,24 +1799,26 @@ dependencies = [
 "checksum bitreader 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "80b13e2ab064ff3aa0bdbf1eff533f9822dc37899821f5f98c67f263eab51707"
 "checksum boxfnonce 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8380105befe91099e6f69206164072c05bc92427ff6aa8a5171388317346dd75"
 "checksum byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "652805b7e73fada9d85e9a6682a4abd490cb52d96aeecc12e33a0de34dfd0d23"
 "checksum bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d828f97b58cc5de3e40c421d0cf2132d6b2da4ee0e11b8632fa838f0f9333ad6"
 "checksum cexpr 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "393a5f0088efbe41f9d1fcd062f24e83c278608420e62109feb2c8abee07de7d"
 "checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
 "checksum clang-sys 0.21.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00048189ee171715296dfe3b2fcfd439563c7bfec0d98d3976ce3402d62c8f07"
 "checksum clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)" = "110d43e343eb29f4f51c1db31beb879d546db27998577e5715270a54bcf41d3f"
-"checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"
 "checksum core-foundation 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "286e0b41c3a20da26536c6000a280585d519fd07b3956b43aed8a79e9edce980"
 "checksum core-foundation-sys 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "41115a6aa5d3e1e5ef98148373f25971d1fad53818553f216495f9e67e90a624"
 "checksum core-foundation-sys 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "716c271e8613ace48344f723b60b900a93150271e5be206212d052bbc0883efa"
 "checksum core-graphics 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb0ed45fdc32f9ab426238fba9407dfead7bacd7900c9b4dd3f396f46eafdae3"
 "checksum core-text 9.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2bd581c37283d0c23311d179aefbb891f2324ee0405da58a26e8594ab76e5748"
 "checksum cose 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "72fa26cb151d3ae4b70f63d67d0fed57ce04220feafafbae7f503bef7aae590d"
 "checksum cose-c 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "49726015ab0ca765144fcca61e4a7a543a16b795a777fa53f554da2fffff9a94"
+"checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"
+"checksum crossbeam-epoch 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "59796cc6cbbdc6bb319161349db0c3250ec73ec7fcb763a51065ec4e2e158552"
+"checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"
 "checksum cssparser 0.23.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8a807ac3ab7a217829c2a3b65732b926b2befe6a35f33b4bf8b503692430f223"
 "checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df"
 "checksum cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b6557bdb1dc9647eae1cf7f5601b14cd45fc3c7ccf2df618387416fe542da6ea"
 "checksum cstr-macros 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f9f316203d1ea36f4f18316822806f6999aa3dc5ed1adf51e35b77e3b3933d78"
 "checksum darling 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d3effd06d4057f275cb7858889f4952920bab78dd8ff0f6e7dfe0c8d2e67ed89"
 "checksum darling_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "167dd3e235c2f1da16a635c282630452cdf49191eb05711de1bcd1d3d5068c00"
 "checksum darling_macro 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c53edaba455f6073a10c27c72440860eb3f60444f8c8660a391032eeae744d82"
 "checksum debug_unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9a032eac705ca39214d169f83e3d3da290af06d8d1d344d1baad2fd002dca4b3"
@@ -1820,16 +1857,17 @@ dependencies = [
 "checksum libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "5ba3df4dcb460b9dfbd070d41c94c19209620c191b0340b929ce748a2bcd42d2"
 "checksum libloading 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "be99f814beb3e9503a786a592c909692bb6d4fc5a695f6ed7987223acfbd5194"
 "checksum libudev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea626d3bdf40a1c5aee3bcd4f40826970cae8d80a8fec934c82a63840094dcfe"
 "checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"
 "checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2"
 "checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376"
 "checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a"
 "checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b"
+"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
 "checksum mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9e965267d4d58496fc4f740e9861118367f13570cadf66316ed2c3f2f14d87c7"
 "checksum mio-uds 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1731a873077147b626d89cc6c2a0db6288d607496c5d10c0cfcf3adc697ec673"
 "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
 "checksum moz_cbor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20c82a57087fd5990d7122dbff1607c3b20c3d2958e9d9ad9765aab415e2c91c"
 "checksum mp4parse_fallible 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6626c2aef76eb8f984eef02e475883d3fe9112e114720446c5810fc5f045cd30"
 "checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09"
 "checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
 "checksum nom 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b8c256fd9471521bcb84c3cdba98921497f1a331cbc15b8030fc63b82050ce"
@@ -1851,17 +1889,18 @@ dependencies = [
 "checksum plane-split 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d2adb8d1523b2ddcd98275613e9bc04eef75b47a39e252e63733a3218ae3c1b7"
 "checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
 "checksum proc-macro2 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d1cb7aaaa4bf022ec2b14ff2f2ba1643a22f3cee88df014a85e14b392282c61d"
 "checksum procedural-masquerade 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9f566249236c6ca4340f7ca78968271f0ed2b0f234007a61b66f9ecd0af09260"
 "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
 "checksum quote 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1eca14c727ad12702eb4b6bfb5a232287dcf8385cb8ca83a3eeaf6519c44c408"
 "checksum rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)" = "6475140dfd8655aeb72e1fd4b7a1cc1c202be65d71669476e392fe62532b9edd"
 "checksum rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8"
-"checksum rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c21a92a5dca958fb030787c1158446c6deb7f976399b72fa8074603f169e2a"
+"checksum rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "485541959c8ecc49865526fe6c4de9653dd6e60d829d6edf0be228167b60372d"
+"checksum rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9d24ad214285a7729b174ed6d3bcfcb80177807f959d95fafd5bfc5c4f201ac8"
 "checksum redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "ab105df655884ede59d45b7070c8a65002d921461ee813a024558ca16030eea0"
 "checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"
 "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
 "checksum ron 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "da06feaa07f69125ab9ddc769b11de29090122170b402547f64b86fe16ebc399"
 "checksum runloop 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d79b4b604167921892e84afbbaad9d5ad74e091bf6c511d9dbfb0593f09fabd"
 "checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7"
 "checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
 "checksum scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918"