Bug 1443988 - P2: Update futures and futures-cpupool crates. r?mbrubeck draft
authorDan Glastonbury <dan.glastonbury@gmail.com>
Thu, 08 Mar 2018 12:23:10 +1000
changeset 764721 ce14bc75ee81ecb3f722d00aae477dab203fc003
parent 764720 2413ab9548779e69c895f5835f06408d145a7762
push id101830
push userbmo:dglastonbury@mozilla.com
push dateThu, 08 Mar 2018 03:02:01 +0000
reviewersmbrubeck
bugs1443988
milestone60.0a1
Bug 1443988 - P2: Update futures and futures-cpupool crates. r?mbrubeck * futures: 0.1.13 -> 0.1.18 * futures-cpupool: 0.1.5 -> 0.1.8 MozReview-Commit-ID: LDYFHxBfQMU
Cargo.lock
third_party/rust/futures-cpupool/.cargo-checksum.json
third_party/rust/futures-cpupool/Cargo.toml
third_party/rust/futures-cpupool/LICENSE-APACHE
third_party/rust/futures-cpupool/LICENSE-MIT
third_party/rust/futures-cpupool/src/lib.rs
third_party/rust/futures-cpupool/tests/smoke.rs
third_party/rust/futures/.cargo-checksum.json
third_party/rust/futures/.travis.yml
third_party/rust/futures/CHANGELOG.md
third_party/rust/futures/Cargo.toml
third_party/rust/futures/FAQ.md
third_party/rust/futures/README.md
third_party/rust/futures/appveyor.yml
third_party/rust/futures/benches/bilock.rs
third_party/rust/futures/benches/futures_unordered.rs
third_party/rust/futures/benches/poll.rs
third_party/rust/futures/benches/sync_mpsc.rs
third_party/rust/futures/benches/thread_notify.rs
third_party/rust/futures/src/executor.rs
third_party/rust/futures/src/future/catch_unwind.rs
third_party/rust/futures/src/future/chain.rs
third_party/rust/futures/src/future/either.rs
third_party/rust/futures/src/future/flatten.rs
third_party/rust/futures/src/future/inspect.rs
third_party/rust/futures/src/future/join.rs
third_party/rust/futures/src/future/join_all.rs
third_party/rust/futures/src/future/mod.rs
third_party/rust/futures/src/future/result.rs
third_party/rust/futures/src/future/select2.rs
third_party/rust/futures/src/future/select_all.rs
third_party/rust/futures/src/future/select_ok.rs
third_party/rust/futures/src/future/shared.rs
third_party/rust/futures/src/lib.rs
third_party/rust/futures/src/poll.rs
third_party/rust/futures/src/resultstream.rs
third_party/rust/futures/src/sink/buffer.rs
third_party/rust/futures/src/sink/fanout.rs
third_party/rust/futures/src/sink/flush.rs
third_party/rust/futures/src/sink/from_err.rs
third_party/rust/futures/src/sink/map_err.rs
third_party/rust/futures/src/sink/mod.rs
third_party/rust/futures/src/sink/send.rs
third_party/rust/futures/src/sink/send_all.rs
third_party/rust/futures/src/sink/wait.rs
third_party/rust/futures/src/sink/with.rs
third_party/rust/futures/src/sink/with_flat_map.rs
third_party/rust/futures/src/stack.rs
third_party/rust/futures/src/stream/and_then.rs
third_party/rust/futures/src/stream/buffer_unordered.rs
third_party/rust/futures/src/stream/buffered.rs
third_party/rust/futures/src/stream/chunks.rs
third_party/rust/futures/src/stream/concat.rs
third_party/rust/futures/src/stream/filter.rs
third_party/rust/futures/src/stream/filter_map.rs
third_party/rust/futures/src/stream/flatten.rs
third_party/rust/futures/src/stream/fold.rs
third_party/rust/futures/src/stream/for_each.rs
third_party/rust/futures/src/stream/forward.rs
third_party/rust/futures/src/stream/from_err.rs
third_party/rust/futures/src/stream/fuse.rs
third_party/rust/futures/src/stream/futures_ordered.rs
third_party/rust/futures/src/stream/futures_unordered.rs
third_party/rust/futures/src/stream/inspect.rs
third_party/rust/futures/src/stream/inspect_err.rs
third_party/rust/futures/src/stream/iter.rs
third_party/rust/futures/src/stream/iter_ok.rs
third_party/rust/futures/src/stream/iter_result.rs
third_party/rust/futures/src/stream/map.rs
third_party/rust/futures/src/stream/map_err.rs
third_party/rust/futures/src/stream/merge.rs
third_party/rust/futures/src/stream/mod.rs
third_party/rust/futures/src/stream/once.rs
third_party/rust/futures/src/stream/poll_fn.rs
third_party/rust/futures/src/stream/repeat.rs
third_party/rust/futures/src/stream/select.rs
third_party/rust/futures/src/stream/skip.rs
third_party/rust/futures/src/stream/skip_while.rs
third_party/rust/futures/src/stream/split.rs
third_party/rust/futures/src/stream/take.rs
third_party/rust/futures/src/stream/take_while.rs
third_party/rust/futures/src/stream/unfold.rs
third_party/rust/futures/src/stream/wait.rs
third_party/rust/futures/src/stream/zip.rs
third_party/rust/futures/src/sync/bilock.rs
third_party/rust/futures/src/sync/mod.rs
third_party/rust/futures/src/sync/mpsc/mod.rs
third_party/rust/futures/src/sync/mpsc/queue.rs
third_party/rust/futures/src/sync/oneshot.rs
third_party/rust/futures/src/task.rs
third_party/rust/futures/src/task_impl/atomic_task.rs
third_party/rust/futures/src/task_impl/core.rs
third_party/rust/futures/src/task_impl/data.rs
third_party/rust/futures/src/task_impl/mod.rs
third_party/rust/futures/src/task_impl/std/data.rs
third_party/rust/futures/src/task_impl/std/mod.rs
third_party/rust/futures/src/task_impl/std/task_rc.rs
third_party/rust/futures/src/task_impl/std/unpark_mutex.rs
third_party/rust/futures/src/task_impl/task_rc.rs
third_party/rust/futures/src/task_impl/unpark_mutex.rs
third_party/rust/futures/src/unsync/mpsc.rs
third_party/rust/futures/src/unsync/oneshot.rs
third_party/rust/futures/tests/all.rs
third_party/rust/futures/tests/bilock.rs
third_party/rust/futures/tests/buffer_unordered.rs
third_party/rust/futures/tests/channel.rs
third_party/rust/futures/tests/eager_drop.rs
third_party/rust/futures/tests/eventual.rs
third_party/rust/futures/tests/fuse.rs
third_party/rust/futures/tests/future_flatten_stream.rs
third_party/rust/futures/tests/futures_ordered.rs
third_party/rust/futures/tests/futures_unordered.rs
third_party/rust/futures/tests/inspect.rs
third_party/rust/futures/tests/mpsc-close.rs
third_party/rust/futures/tests/mpsc.rs
third_party/rust/futures/tests/oneshot.rs
third_party/rust/futures/tests/ready_queue.rs
third_party/rust/futures/tests/recurse.rs
third_party/rust/futures/tests/select_all.rs
third_party/rust/futures/tests/select_ok.rs
third_party/rust/futures/tests/shared.rs
third_party/rust/futures/tests/sink.rs
third_party/rust/futures/tests/split.rs
third_party/rust/futures/tests/stream.rs
third_party/rust/futures/tests/stream_catch_unwind.rs
third_party/rust/futures/tests/support/local_executor.rs
third_party/rust/futures/tests/support/mod.rs
third_party/rust/futures/tests/unfold.rs
third_party/rust/futures/tests/unsync-oneshot.rs
third_party/rust/futures/tests/unsync.rs
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -51,17 +51,17 @@ dependencies = [
 [[package]]
 name = "audioipc"
 version = "0.2.1"
 dependencies = [
  "bincode 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "cubeb 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_derive 1.0.27 (git+https://github.com/gankro/serde?branch=deserialize_from_enums4)",
  "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -71,33 +71,33 @@ dependencies = [
 
 [[package]]
 name = "audioipc-client"
 version = "0.3.0"
 dependencies = [
  "audioipc 0.2.1",
  "cubeb-backend 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "audioipc-server"
 version = "0.2.2"
 dependencies = [
  "audioipc 0.2.1",
  "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "cubeb 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -639,25 +639,25 @@ name = "fuchsia-zircon-sys"
 version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "futures"
-version = "0.1.13"
+version = "0.1.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "futures-cpupool"
-version = "0.1.5"
+version = "0.1.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "fxhash"
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
@@ -1834,42 +1834,42 @@ dependencies = [
 ]
 
 [[package]]
 name = "tokio-core"
 version = "0.1.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "tokio-io"
 version = "0.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "tokio-uds"
 version = "0.1.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
  "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "mio-uds 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -2274,18 +2274,18 @@ dependencies = [
 "checksum euclid 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b2744c002882c67d0f6d6e8cfdf16eae729dc27744d312745132e62218b7de5c"
 "checksum flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fac2277e84e5e858483756647a9d0aa8d9a2b7cba517fd84325a0aaa69a0909"
 "checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344"
 "checksum foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5ebc04f19019fff1f2d627b5581574ead502f80c48c88900575a46e0840fe5d0"
 "checksum freetype 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "398b8a11884898184d55aca9806f002b3cf68f0e860e0cbb4586f834ee39b0e7"
 "checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866"
 "checksum fuchsia-zircon 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f6c0581a4e363262e52b87f59ee2afe3415361c6ec35e665924eb08afe8ff159"
 "checksum fuchsia-zircon-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "43f3795b4bae048dc6123a6b972cadde2e676f9ded08aef6bb77f5f157684a82"
-"checksum futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "55f0008e13fc853f79ea8fc86e931486860d4c4c156cdffb59fa5f7fa833660a"
-"checksum futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a283c84501e92cade5ea673a2a7ca44f71f209ccdd302a3e0896f50083d2c5ff"
+"checksum futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "0bab5b5e94f5c31fc764ba5dd9ad16568aae5d4825538c01d6bca680c9bf94a7"
+"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4"
 "checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
 "checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb"
 "checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518"
 "checksum gl_generator 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f5c19cde55637681450c92f7a05ea16c78e2b6d0587e601ec1ebdab6960854b"
 "checksum gleam 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)" = "959c818d9bbe9f7b7db55dce0bc44673c4da4f4ee122536c40550f984c3b8017"
 "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
 "checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07"
 "checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e"
--- a/third_party/rust/futures-cpupool/.cargo-checksum.json
+++ b/third_party/rust/futures-cpupool/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"07c97c2816b3cc41857a0cbbb5109f2a7ef2bd81131a3f4f3621f438a1eb7561","README.md":"09c5f4bacff34b3f7e1969f5b9590c062a8aabac7c2442944eab1d2fc1301373","src/lib.rs":"a368e87ed6f93552ba12391cd765d0b0b34b9fe42617a2c1f6a5ce81a0c5de11","tests/smoke.rs":"3e237fc14d19775026f6cff45d73de6bb6b4db6699ce8ab4972ed85165200ec2"},"package":"a283c84501e92cade5ea673a2a7ca44f71f209ccdd302a3e0896f50083d2c5ff"}
\ No newline at end of file
+{"files":{"Cargo.toml":"d65d12c309bb5af442353ceb79339c2d426b1ed643f5eddee14ad22637225ca2","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"09c5f4bacff34b3f7e1969f5b9590c062a8aabac7c2442944eab1d2fc1301373","src/lib.rs":"2bffe7435a2c13028978955882338fbb9df3644f725a7e9d27b5f1495e3e9f90","tests/smoke.rs":"4c07aad02b0dd17f4723f3be1abbe320629b9e0756c885b44cbc1268141668f1"},"package":"ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4"}
\ No newline at end of file
--- a/third_party/rust/futures-cpupool/Cargo.toml
+++ b/third_party/rust/futures-cpupool/Cargo.toml
@@ -1,25 +1,32 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
 [package]
 name = "futures-cpupool"
-version = "0.1.5"
+version = "0.1.8"
 authors = ["Alex Crichton <alex@alexcrichton.com>"]
-license = "MIT/Apache-2.0"
-repository = "https://github.com/alexcrichton/futures-rs"
+description = "An implementation of thread pools which hand out futures to the results of the\ncomputation on the threads themselves.\n"
 homepage = "https://github.com/alexcrichton/futures-rs"
 documentation = "https://docs.rs/futures-cpupool"
-description = """
-An implementation of thread pools which hand out futures to the results of the
-computation on the threads themselves.
-"""
+license = "MIT/Apache-2.0"
+repository = "https://github.com/alexcrichton/futures-rs"
+[dependencies.futures]
+version = "0.1"
+features = ["use_std"]
+default-features = false
 
-[dependencies]
-num_cpus = "1.0"
-
-[dependencies.futures]
-path = ".."
-version = "0.1"
-default-features = false
-features = ["use_std"]
+[dependencies.num_cpus]
+version = "1.0"
 
 [features]
 default = ["with-deprecated"]
 with-deprecated = ["futures/with-deprecated"]
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures-cpupool/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures-cpupool/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 Alex Crichton
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
--- a/third_party/rust/futures-cpupool/src/lib.rs
+++ b/third_party/rust/futures-cpupool/src/lib.rs
@@ -8,18 +8,18 @@
 //!
 //! ```rust
 //! extern crate futures;
 //! extern crate futures_cpupool;
 //!
 //! use futures::Future;
 //! use futures_cpupool::CpuPool;
 //!
-//! # fn long_running_future(a: u32) -> futures::future::BoxFuture<u32, ()> {
-//! #     futures::future::result(Ok(a)).boxed()
+//! # fn long_running_future(a: u32) -> Box<futures::future::Future<Item = u32, Error = ()> + Send> {
+//! #     Box::new(futures::future::result(Ok(a)))
 //! # }
 //! # fn main() {
 //!
 //! // Create a worker thread pool with four threads
 //! let pool = CpuPool::new(4);
 //!
 //! // Execute some work on the thread pool, optionally closing over data.
 //! let a = pool.spawn(long_running_future(2));
@@ -30,30 +30,32 @@
 //! let c = a.join(b).map(|(a, b)| a + b).wait().unwrap();
 //!
 //! // Print out the result
 //! println!("{:?}", c);
 //! # }
 //! ```
 
 #![deny(missing_docs)]
+#![deny(missing_debug_implementations)]
 
 extern crate futures;
 extern crate num_cpus;
 
 use std::panic::{self, AssertUnwindSafe};
 use std::sync::{Arc, Mutex};
 use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
 use std::sync::mpsc;
 use std::thread;
+use std::fmt;
 
 use futures::{IntoFuture, Future, Poll, Async};
-use futures::future::lazy;
+use futures::future::{lazy, Executor, ExecuteError};
 use futures::sync::oneshot::{channel, Sender, Receiver};
-use futures::executor::{self, Run, Executor};
+use futures::executor::{self, Run, Executor as OldExecutor};
 
 /// A thread pool intended to run CPU intensive work.
 ///
 /// This thread pool will hand out futures representing the completed work
 /// that happens on the thread pool itself, and the futures can then be later
 /// composed with other work as part of an overall computation.
 ///
 /// The worker threads associated with a thread pool are kept alive so long as
@@ -73,49 +75,62 @@ pub struct CpuPool {
 }
 
 /// Thread pool configuration object
 ///
 /// Builder starts with a number of workers equal to the number
 /// of CPUs on the host. But you can change it until you call `create()`.
 pub struct Builder {
     pool_size: usize,
+    stack_size: usize,
     name_prefix: Option<String>,
     after_start: Option<Arc<Fn() + Send + Sync>>,
     before_stop: Option<Arc<Fn() + Send + Sync>>,
 }
 
 struct MySender<F, T> {
     fut: F,
     tx: Option<Sender<T>>,
     keep_running_flag: Arc<AtomicBool>,
 }
 
-fn _assert() {
-    fn _assert_send<T: Send>() {}
-    fn _assert_sync<T: Sync>() {}
-    _assert_send::<CpuPool>();
-    _assert_sync::<CpuPool>();
-}
+trait AssertSendSync: Send + Sync {}
+impl AssertSendSync for CpuPool {}
 
 struct Inner {
     tx: Mutex<mpsc::Sender<Message>>,
     rx: Mutex<mpsc::Receiver<Message>>,
     cnt: AtomicUsize,
     size: usize,
-    after_start: Option<Arc<Fn() + Send + Sync>>,
-    before_stop: Option<Arc<Fn() + Send + Sync>>,
+}
+
+impl fmt::Debug for CpuPool {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("CpuPool")
+            .field("size", &self.inner.size)
+            .finish()
+    }
+}
+
+impl fmt::Debug for Builder {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Builder")
+            .field("pool_size", &self.pool_size)
+            .field("name_prefix", &self.name_prefix)
+            .finish()
+    }
 }
 
 /// The type of future returned from the `CpuPool::spawn` function, which
 /// proxies the futures running on the thread pool.
 ///
 /// This future will resolve in the same way as the underlying future, and it
 /// will propagate panics.
 #[must_use]
+#[derive(Debug)]
 pub struct CpuFuture<T, E> {
     inner: Receiver<thread::Result<Result<T, E>>>,
     keep_running_flag: Arc<AtomicBool>,
 }
 
 enum Message {
     Run(Run),
     Close,
@@ -124,33 +139,43 @@ enum Message {
 impl CpuPool {
     /// Creates a new thread pool with `size` worker threads associated with it.
     ///
     /// The returned handle can use `execute` to run work on this thread pool,
     /// and clones can be made of it to get multiple references to the same
     /// thread pool.
     ///
     /// This is a shortcut for:
+    ///
     /// ```rust
+    /// # use futures_cpupool::{Builder, CpuPool};
+    /// #
+    /// # fn new(size: usize) -> CpuPool {
     /// Builder::new().pool_size(size).create()
+    /// # }
     /// ```
     ///
     /// # Panics
     ///
     /// Panics if `size == 0`.
     pub fn new(size: usize) -> CpuPool {
         Builder::new().pool_size(size).create()
     }
 
     /// Creates a new thread pool with a number of workers equal to the number
     /// of CPUs on the host.
     ///
     /// This is a shortcut for:
+    ///
     /// ```rust
+    /// # use futures_cpupool::{Builder, CpuPool};
+    /// #
+    /// # fn new_num_cpus() -> CpuPool {
     /// Builder::new().create()
+    /// # }
     /// ```
     pub fn new_num_cpus() -> CpuPool {
         Builder::new().create()
     }
 
     /// Spawns a future to run on this thread pool, returning a future
     /// representing the produced value.
     ///
@@ -173,17 +198,17 @@ impl CpuPool {
     /// the middle of working, it will be interrupted when possible.
     pub fn spawn<F>(&self, f: F) -> CpuFuture<F::Item, F::Error>
         where F: Future + Send + 'static,
               F::Item: Send + 'static,
               F::Error: Send + 'static,
     {
         let (tx, rx) = channel();
         let keep_running_flag = Arc::new(AtomicBool::new(false));
-        // AssertUnwindSafe is used here becuase `Send + 'static` is basically
+        // AssertUnwindSafe is used here because `Send + 'static` is basically
         // an alias for an implementation of the `UnwindSafe` trait but we can't
         // express that in the standard library right now.
         let sender = MySender {
             fut: AssertUnwindSafe(f).catch_unwind(),
             tx: Some(tx),
             keep_running_flag: keep_running_flag.clone(),
         };
         executor::spawn(sender).execute(self.inner.clone());
@@ -205,31 +230,40 @@ impl CpuPool {
               R::Future: Send + 'static,
               R::Item: Send + 'static,
               R::Error: Send + 'static,
     {
         self.spawn(lazy(f))
     }
 }
 
+impl<F> Executor<F> for CpuPool
+    where F: Future<Item = (), Error = ()> + Send + 'static,
+{
+    fn execute(&self, future: F) -> Result<(), ExecuteError<F>> {
+        executor::spawn(future).execute(self.inner.clone());
+        Ok(())
+    }
+}
+
 impl Inner {
     fn send(&self, msg: Message) {
         self.tx.lock().unwrap().send(msg).unwrap();
     }
 
-    fn work(&self) {
-        self.after_start.as_ref().map(|fun| fun());
+    fn work(&self, after_start: Option<Arc<Fn() + Send + Sync>>, before_stop: Option<Arc<Fn() + Send + Sync>>) {
+        after_start.map(|fun| fun());
         loop {
             let msg = self.rx.lock().unwrap().recv().unwrap();
             match msg {
                 Message::Run(r) => r.run(),
                 Message::Close => break,
             }
         }
-        self.before_stop.as_ref().map(|fun| fun());
+        before_stop.map(|fun| fun());
     }
 }
 
 impl Clone for CpuPool {
     fn clone(&self) -> CpuPool {
         self.inner.cnt.fetch_add(1, Ordering::Relaxed);
         CpuPool { inner: self.inner.clone() }
     }
@@ -240,17 +274,17 @@ impl Drop for CpuPool {
         if self.inner.cnt.fetch_sub(1, Ordering::Relaxed) == 1 {
             for _ in 0..self.inner.size {
                 self.inner.send(Message::Close);
             }
         }
     }
 }
 
-impl Executor for Inner {
+impl OldExecutor for Inner {
     fn execute(&self, run: Run) {
         self.send(Message::Run(run))
     }
 }
 
 impl<T, E> CpuFuture<T, E> {
     /// Drop this future without canceling the underlying future.
     ///
@@ -262,17 +296,17 @@ impl<T, E> CpuFuture<T, E> {
     }
 }
 
 impl<T: Send + 'static, E: Send + 'static> Future for CpuFuture<T, E> {
     type Item = T;
     type Error = E;
 
     fn poll(&mut self) -> Poll<T, E> {
-        match self.inner.poll().expect("shouldn't be canceled") {
+        match self.inner.poll().expect("cannot poll CpuFuture twice") {
             Async::Ready(Ok(Ok(e))) => Ok(e.into()),
             Async::Ready(Ok(Err(e))) => Err(e),
             Async::Ready(Err(e)) => panic::resume_unwind(e),
             Async::NotReady => Ok(Async::NotReady),
         }
     }
 }
 
@@ -302,53 +336,64 @@ impl<F: Future> Future for MySender<F, R
 }
 
 impl Builder {
     /// Create a builder a number of workers equal to the number
     /// of CPUs on the host.
     pub fn new() -> Builder {
         Builder {
             pool_size: num_cpus::get(),
+            stack_size: 0,
             name_prefix: None,
             after_start: None,
             before_stop: None,
         }
     }
 
     /// Set size of a future CpuPool
     ///
     /// The size of a thread pool is the number of worker threads spawned
     pub fn pool_size(&mut self, size: usize) -> &mut Self {
         self.pool_size = size;
         self
     }
 
+    /// Set stack size of threads in the pool.
+    pub fn stack_size(&mut self, stack_size: usize) -> &mut Self {
+        self.stack_size = stack_size;
+        self
+    }
+
     /// Set thread name prefix of a future CpuPool
     ///
     /// Thread name prefix is used for generating thread names. For example, if prefix is
     /// `my-pool-`, then threads in the pool will get names like `my-pool-1` etc.
     pub fn name_prefix<S: Into<String>>(&mut self, name_prefix: S) -> &mut Self {
         self.name_prefix = Some(name_prefix.into());
         self
     }
 
     /// Execute function `f` right after each thread is started but before
-    /// running any jobs on it
+    /// running any jobs on it.
     ///
-    /// This is initially intended for bookkeeping and monitoring uses
+    /// This is initially intended for bookkeeping and monitoring uses.
+    /// The `f` will be deconstructed after the `builder` is deconstructed
+    /// and all threads in the pool has executed it.
     pub fn after_start<F>(&mut self, f: F) -> &mut Self
         where F: Fn() + Send + Sync + 'static
     {
         self.after_start = Some(Arc::new(f));
         self
     }
 
-    /// Execute function `f` before each worker thread stops
+    /// Execute function `f` before each worker thread stops.
     ///
-    /// This is initially intended for bookkeeping and monitoring uses
+    /// This is initially intended for bookkeeping and monitoring uses.
+    /// The `f` will be deconstructed after the `builder` is deconstructed
+    /// and all threads in the pool has executed it.
     pub fn before_stop<F>(&mut self, f: F) -> &mut Self
         where F: Fn() + Send + Sync + 'static
     {
         self.before_stop = Some(Arc::new(f));
         self
     }
 
     /// Create CpuPool with configured parameters
@@ -359,26 +404,47 @@ impl Builder {
     pub fn create(&mut self) -> CpuPool {
         let (tx, rx) = mpsc::channel();
         let pool = CpuPool {
             inner: Arc::new(Inner {
                 tx: Mutex::new(tx),
                 rx: Mutex::new(rx),
                 cnt: AtomicUsize::new(1),
                 size: self.pool_size,
-                after_start: self.after_start.clone(),
-                before_stop: self.before_stop.clone(),
             }),
         };
         assert!(self.pool_size > 0);
 
         for counter in 0..self.pool_size {
             let inner = pool.inner.clone();
+            let after_start = self.after_start.clone();
+            let before_stop = self.before_stop.clone();
             let mut thread_builder = thread::Builder::new();
             if let Some(ref name_prefix) = self.name_prefix {
                 thread_builder = thread_builder.name(format!("{}{}", name_prefix, counter));
             }
-            thread_builder.spawn(move || inner.work()).unwrap();
+            if self.stack_size > 0 {
+                thread_builder = thread_builder.stack_size(self.stack_size);
+            }
+            thread_builder.spawn(move || inner.work(after_start, before_stop)).unwrap();
         }
-
         return pool
     }
 }
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use std::sync::mpsc;
+
+    #[test]
+    fn test_drop_after_start() {
+        let (tx, rx) = mpsc::sync_channel(2);
+        let _cpu_pool = Builder::new()
+            .pool_size(2)
+            .after_start(move || tx.send(1).unwrap()).create();
+
+        // After Builder is deconstructed, the tx should be droped
+        // so that we can use rx as an iterator.
+        let count = rx.into_iter().count();
+        assert_eq!(count, 2);
+    }
+}
--- a/third_party/rust/futures-cpupool/tests/smoke.rs
+++ b/third_party/rust/futures-cpupool/tests/smoke.rs
@@ -1,20 +1,20 @@
 extern crate futures;
 extern crate futures_cpupool;
 
 use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
 use std::thread;
 use std::time::Duration;
 
-use futures::future::{Future, BoxFuture};
+use futures::future::Future;
 use futures_cpupool::{CpuPool, Builder};
 
-fn done<T: Send + 'static>(t: T) -> BoxFuture<T, ()> {
-    futures::future::ok(t).boxed()
+fn done<T: Send + 'static>(t: T) -> Box<Future<Item = T, Error = ()> + Send> {
+    Box::new(futures::future::ok(t))
 }
 
 #[test]
 fn join() {
     let pool = CpuPool::new(2);
     let a = pool.spawn(done(1));
     let b = pool.spawn(done(2));
     let res = a.join(b).map(|(a, b)| a + b).wait();
--- a/third_party/rust/futures/.cargo-checksum.json
+++ b/third_party/rust/futures/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{".travis.yml":"5686f4b7cbca0c317c323326387c6336c96d85ed4ce286d2f0805c04727b509c","Cargo.toml":"631f50135a7e844abc26e9c16b5a216438fe4e58fe582b8e8078507096bba5f4","FAQ.md":"bbc623c1561f55766155ba71a48ef9c63056dfd6c55a71ccc4315f5e37499053","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"c1c4d9ac68761886c4161f76efc164b8034a3230ac764df18ec191fd6d8de901","appveyor.yml":"a330fd0b75d14f1e800053470462918c16c7590f5e8df2dcb8a178ad09451fd7","src/executor.rs":"d83fbd82ef0d85f8e53bdc4abe4e61e9f68f61bc92fec3bcf19ab88d37e91a21","src/future/and_then.rs":"15653d392d331a1fc4619129f737acc28525c88d1675b7fcea6ed27c5b1bf302","src/future/catch_unwind.rs":"f9c38a9b94283f3f615e8c74417a3865ba4b1beb3fae4541bd4d8db63450f352","src/future/chain.rs":"d37330af6d5a094bca999864800fe897311da33da36fc47e66ec3944b01a4841","src/future/either.rs":"9ce99b5cc19410cb67eeb8d40d55130e34fcb522446ed2a369c486ed51de72de","src/future/empty.rs":"b549a1ca0f21bc6d1a26d9063a9a60deb9235ff7eff5db915050115fed91a9c7","src/future/flatten.rs":"f03a3689d2d3e65a3edb9fbe0d440459d97a767bcc377afb4c490e6d8c5e73b5","src/future/flatten_stream.rs":"cf914425c3606b61c046df5c43d64266d6f2328693e4122441f9bbcf7cb0a4e1","src/future/from_err.rs":"a1f42d95f7b52e80c2e5a03b44cbce0efbe5fc486dfe33d799b74ab9ba9057ab","src/future/fuse.rs":"41098c6693e1416679e1628776d7925cbd55446cd6b957080cd48e9bbf34ff65","src/future/into_stream.rs":"0fa6bc4d70e8b4d75cf45fba53b39f033b87574103fffea4090b78f049bf43d0","src/future/join.rs":"01a0e611ea7d51ac58381364ef2602ce3ef18ca32efafa7830b4e32bf646385b","src/future/join_all.rs":"6f36cfad1bbbf72356fc87e7d4eeccef0964dc3f8aa8687f5c87554f1b292a2e","src/future/lazy.rs":"1a2025bae3675fb682cefbf8a88bbb7a7519cfdee42dd6b3049a4d2b7ab8b5b1","src/future/loop_fn.rs":"5bd952247ae4e9d31dff77386bbd3700f596da136ea53e9e9944266af3f08688","src/future/map.rs":"91e148d9adaea929b85ede63c71fb07ef9b5611db906a13eedad2cf551745b47","src/future/map_err.rs":"2c8e87fa8ff56061722db6c69aaba588e6df6835a4e2fe84826f0bd4fed2e007","src/future/mod.rs":"d0fb5b3acfd96d275519042e5e1d2c3c9eb0bb9f23c8a501401ab999dbafb8c9","src/future/option.rs":"93270226cadcfa349250023e2070e687cf595831f427904ca744f7bc50342ded","src/future/or_else.rs":"444567101c4c437b184aa2e2eec0cf4363af442c0afc58d6508d3d2ac86489a9","src/future/poll_fn.rs":"817bfb75e7c43ca96a53e8cc9f48606c92c3c6742b07a732ce79a8f9b7bf8808","src/future/result.rs":"3e1f6cbd813bd2098ad85afc895f1f51396bfff111025cca58d7533acf7e5bc7","src/future/select.rs":"73efd98004d5d8c46607bf770ff07a810bcdbe05cce0e8e4f41f5e659fd44203","src/future/select2.rs":"ac80e0e2db9eb9f5a331a4c25db6c9e0e42294c4e977da2f2181a2a5822a5a34","src/future/select_all.rs":"c47a84f7dad551c2a95c2d2724577f962567aafd7959584a41d68934f6b5ba59","src/future/select_ok.rs":"04f99f4ca85bcece456c8f94edfdee197f6e2e497d51f0f1484553c03d37c11f","src/future/shared.rs":"ddf1897cafb0331c28e8d7b15e7cb7a5067e1a58b1f15ab023e0319c78f32c06","src/future/then.rs":"c49b388ab3c78979ad9ae40f6e859ee98e9351bdb11e3c3f1ad4ceca77651a56","src/lib.rs":"e3c6e1b2989764a97b8b1677e5e2ba80a4c0304ecb73cdae166e6ba2c869c9a7","src/lock.rs":"fe4c8185f9774a134d4ce27af4a9c8b25f30f7dcc6990473210d66b6b8936ce4","src/poll.rs":"05ff3ccb136b3e0e4da497d7b9b48f1dff61a0105546f6d504a3f144c5007124","src/sink/buffer.rs":"aeae8c4440d6fddf4635c9d9d298ba40b02278893703cc87235c8221fee8ec19","src/sink/flush.rs":"a2ee8c2b030dd42830ade7f76ff5505da5fbd59922b1946727a449b37ddb0dce","src/sink/from_err.rs":"0e682d8438180a0c592851e62a122d003251e1709393812a26ca45d38a59157e","src/sink/map_err.rs":"164e136d92dc7993e33cd671f5c459ee5a327eda4a7011e4b2c899ac7396e1b6","src/sink/mod.rs":"4dd651dd60dfed65105532098bb80c9d41b76cc499a737937281f7e7a81f2169","src/sink/send.rs":"8de1091909fea8d59256fa4575f3a6ade8b316eeef56e60e07144db73ca837f4","src/sink/send_all.rs":"5f8521f46e58748f8e2a8040af75566f52242cb4eeffcbade6b900c58c9ccf0d","src/sink/wait.rs":"e3f6827ded4d689242a0d315033415145a9c3385e675d5cbfac7d1cc801aac64","src/sink/with.rs":"a3a183cebbadb9ff57e8a2d7ccf84f560a6f32c2586beb3960b32033be183de6","src/stack.rs":"76d9922de0286bc7cb4da2ae31f672af38ad658ff1763e17c55e1b94d31b7f85","src/stream/and_then.rs":"fe33b6ddac5048452ba97fe78a50bcf6d6e10d92c641ca9fb14e4373ab7b925e","src/stream/buffer_unordered.rs":"32f3c1b6221da11952649425354c1efbf67e1ae1793d69f0a89c52183651873a","src/stream/buffered.rs":"a28639ec87a0b922cef842a950d803fbc01c403ae14193d792bb9296bda1eed6","src/stream/catch_unwind.rs":"957b935645f1744a4741962772c15e94370153f33e0db356309bf98ebb599c37","src/stream/chain.rs":"0b6b06cf5aaf0c2f665c61c65766d6113e24f690ebd9ad3a89abfa521e2ce9b2","src/stream/channel.rs":"f728402228fea0be01ec5cf1d02e49e52666c0c9ea986708d18e24f30376f6de","src/stream/chunks.rs":"f716e2cee2660fac9fe309c943b3eb00c3a5134fc7453ba309301f8067902daa","src/stream/collect.rs":"e770850c7ed2d458b521c12af4ee76adf2303919849d2f95fa93fdf574c86d37","src/stream/concat.rs":"140265d64a3ebe2945165920483c912fda6d395c2e5d7f882bd08f57ebcce474","src/stream/empty.rs":"e8e2820fd3b2329a6987a11c3b3f28849f49427d1a745f2bdc7a4982476514e7","src/stream/filter.rs":"0f4c2f436225b88172dd5035ac7f1dbf786c09993475c92cd6acd69805f85719","src/stream/filter_map.rs":"57970fabf3017cb0e4b36326234d813e43b19abc768547a7b067a1ef10e8e760","src/stream/flatten.rs":"8ce863e6c5fd92e847416a8d1259a32ef262ac34e19b46b610688b08fa36b3f3","src/stream/fold.rs":"46e575e4b882ae904e79cb046472a942839fe4197d1b8fd0b09987024d074034","src/stream/for_each.rs":"9d260db96b8583d1c983b2b29b791647aa39046590ff256669a796e989ceb71a","src/stream/forward.rs":"ec34bd69c000c72662850b4165c227c97b5ac34b825ef38085cd945174466392","src/stream/from_err.rs":"5912cfb747c286eb30e484ad67cb12ddc780be14fcc6fcd6b25ac5b10ca06b4c","src/stream/fuse.rs":"f0343df89167cc7e3c2354c81396b32f2359cd27ce5eae48c6f2a6f4182e188d","src/stream/future.rs":"e9e3100be0090306efa14ea81cb3680f79770648b72c8616e2fcf61a3bef8f94","src/stream/futures_unordered.rs":"c963c13a6431a032fc5d7744b5572988ae1c60ec8b6934b6aeb49426d2472476","src/stream/iter.rs":"4492d00d2463e0e04e448b11c9947170a875685d5243a96306495cc14b4d2c14","src/stream/map.rs":"9e2d5c0d68cc6cee83fc1e640450ac0c22f458796bc1e5602d3377ad7b024361","src/stream/map_err.rs":"78cc76fcf3098242e42dee9fa72dc8a55a58b449d5440e11782168923d5ea90c","src/stream/merge.rs":"9b8f31aa4e7623c39e2361db98b3e552bc39ae8933d968ba5150cefe2654bb76","src/stream/mod.rs":"dfd83151e1226a663dc81319dfb1f5bcd8ceb76ce5c4cd62de1f2e32cb799e2d","src/stream/once.rs":"65cd915f645bfcfc560d4e38dcbf47e330b050662456c75f71405b84928deada","src/stream/or_else.rs":"c11ea499d85d6204ad083058eeca9dbf29873c49ee21bf01f9fe53e9ec3bba52","src/stream/peek.rs":"25d78baa0b3e30d2d1c72d1f3b1aa2a28811522d345dceefec587beb18b70fe2","src/stream/repeat.rs":"c047f76b2d6bfb6a217ad81939cc57a6f63b105df1cccb2776db39f97a64961f","src/stream/select.rs":"cb057a133e03833f3387de7045d1d4dc8427d1070696fe9d527591e2dba51e24","src/stream/skip.rs":"3aff9f823cd8211002d36812d6709f22142afffb8bf4b24d33b3cd815b6cb797","src/stream/skip_while.rs":"ff68f87ea4b09f55f8bc3bc03b204849dac2776098c6fcc7bee60612dfc7b2b0","src/stream/split.rs":"5c08f444803ecec385070d92bceab0afff0af957047bcc78e1faac2acf2e59c5","src/stream/take.rs":"2d0a1485a85935db1dc3b57a9d5eb6a1b57d677aeba5eeb31b783ceb3f0223c2","src/stream/take_while.rs":"c542541ccabc362592515f3b463fa8a0c4fec57bf0b98663892a8357ed77c4f1","src/stream/then.rs":"c7c66e27180cf2d98694de27504283a32444a0d0d6919ab25b3621fa6169408d","src/stream/unfold.rs":"7786706eb8f7a79f72e3e560a108adcbd17a0f5bee9d36ef4ca1340b203b18c5","src/stream/wait.rs":"1ad58c82e1861b2a5285911486e6358fb665f8c2f6023037be5dac6019745f6b","src/stream/zip.rs":"05d98559a82ffd77c388e6b664ce54ce4dbccfae680bc47466f1b05a91ad1b21","src/sync/bilock.rs":"c8bf12cd6747daf63a19891d2c018b7a84b5af8c065362580c8a68b641698c07","src/sync/mod.rs":"56df9c0b8a4bfc0887299e1cd1467629fdb6b9889099978b143b54e4f6db1447","src/sync/mpsc/mod.rs":"59a264b783b189cd9acae432ee7614bb3803446f942e9bfdcaf911451d662762","src/sync/mpsc/queue.rs":"ca3c3da09c1b126658f0b5fea1014b2d605be56d19b6d127813c0230cb18f4a8","src/sync/oneshot.rs":"86d800b56cd4e4427651dad7b11ad4680003c3fcc3a928c996fbe223f1db5227","src/task.rs":"05c85355317b8558f821a708b211bdff020fbfdf320bda0c6e2ea80c4b5b6c08","src/task_impl/data.rs":"1345ab12d94a87c41ca2c98d12a4deca671a506854d4b79bc4fd52fe67e31f5d","src/task_impl/mod.rs":"01c735d065e209105cf37098854ee8a2a2287cad69e43ae5edcfdd1a27021687","src/task_impl/task_rc.rs":"685630c9d5b199496a182e6edbb9ae66863c653ca9775af690980148fb6b1378","src/task_impl/unpark_mutex.rs":"e8b27d129191dd1e79b7869b850f77a1f334006d36dfb0bbfa9bb3e13e009934","src/unsync/mod.rs":"e5da32f78212646f0161fec2e7193cda830f541bc9ae37361fbcf82e99cc1d86","src/unsync/mpsc.rs":"a7afe694c58010d5fc7f39f82c3dfe47e167484ac80b12b34fcfd7bae974fd64","src/unsync/oneshot.rs":"58263830fb7238c52ac2f57fbe260664bb5f87229129bb09c98ea4d13056b14f","tests/all.rs":"891051771df3d8daee66380968e41a5a44063d4a3e7c9f2eeda9e81b02144435","tests/bilock.rs":"734bf0fc2f9b6f5047249f570f550dfc8abf958cec4ef757c79327ab2c9817d6","tests/buffer_unordered.rs":"37df5c5e8f7a1198532d2d71268c874c880ed6ea3ea2d34ff6a41e46207e37b2","tests/channel.rs":"3fdff94031fc9d3cedc22bcc0b867242862f2b8d7857496fa3d3f744c2a47e82","tests/eager_drop.rs":"7a232c3d43906843ad179a06cb5e807a9907695ec51a6a3418d1390f0b988b15","tests/eventual.rs":"9102353f670d7d7c579b1bceb35a401aa382ab6f18da91d26fd1f2553f1c516e","tests/fuse.rs":"efe6e4808b2725832642c8a6ebabb09d4319725e4826b4753b5c8f99a5f09522","tests/future_flatten_stream.rs":"2daa00b8916d4c018d8274e5215f5434a072710494707f20b4a0b6f055d530f2","tests/futures_unordered.rs":"849fa8dcd106fcb4a4e5ee79c57c6580bd2b67e5fa5e6732ab4398b309a9d31b","tests/mpsc-close.rs":"3387e2afb4cf6b98e64bc376944a77447653f790a65d98f30e6000e9197c856b","tests/mpsc.rs":"514ee7bb3349647779f945ab3399b32c36680d7b5c06ac6287f76f0192c0e50a","tests/oneshot.rs":"82f20c57d42ddce3dee180da396dd4a8c84023f711124e3facb3dc4c7c481e90","tests/recurse.rs":"5702590b294493df85b20c239bf82f54a5384789d1439a2f929810525b585f79","tests/select_all.rs":"c7a998215df56ba7336b457798979c9cc38bfb5c30e40a1b4a43d5e58e85d4a1","tests/select_ok.rs":"2ea75dd4dd29d49b659bdc9f2df55e8928b41b55d7b39d80d987ac3273a04245","tests/shared.rs":"898f9dd106eadca3b8ec21675cb0026463c941feb04e5f247e57edf5e5e0d2b5","tests/sink.rs":"df7c44529ba2a04309a4817d2e5aea6788088c375daa382398524117d044a707","tests/split.rs":"635372fa052c4f43b196fabbd1587e0b85c15385a9ab63fe660e18d87e535da6","tests/stream.rs":"f7c5a8481093aeaaf22ba066f8c0311a1bcbe329e6575a8a009b3033442c3d4f","tests/stream_catch_unwind.rs":"6b3b5ab2315682d17df0ebd47b9cfd0f407b02e89970bdf777be9f6c981c1451","tests/support/local_executor.rs":"6c9bba7f628805e36cf47060ad057415d9fa3e6f1cd4a8cec8789022fd022e95","tests/support/mod.rs":"6be1623f036df50846952d0e073e7a7c9c133712643c550a364f9db3bf13e364","tests/unfold.rs":"75b784c8e4bada8e04a615b274d384eb7c8e8b2bd52b4e84b9e1e5bc61f21df7","tests/unsync-oneshot.rs":"b77013799ecd72a9769760c11c6f021756dd3909a085e485b784579a356e5f62","tests/unsync.rs":"100a5a69b5c6af23918e07c6e19a06cc91ed1c51fef2d634144e4b724492057a"},"package":"55f0008e13fc853f79ea8fc86e931486860d4c4c156cdffb59fa5f7fa833660a"}
\ No newline at end of file
+{"files":{".travis.yml":"09f003273cb5a49fd05defc653b5dfc2ce8587ba84c42a6db3909f51eb5f68ab","CHANGELOG.md":"a343d7c2350fa2a9f95e8b49ca8f9e69838437e1819145ceee421477b696a81c","Cargo.toml":"6a3dd737c32ae8ec4a25cc04db19d2a7f95a3988e69cd29c9ad6f04762a2a6fe","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"9217a715c9730fb2a3814641cb0b0c25a4636c0a9517352f41aa972f3666c22f","appveyor.yml":"7b8de5d694cb575649354d7fc3eff0781e2c5c412df4bc8a90b36b6fdb55bfab","benches/bilock.rs":"60b9e0814b8396e0320d299273c6f91c2ccc09a2bb59eec92df74a1f0919e54f","benches/futures_unordered.rs":"fa2d3b5e6cdfe1e941d78c119a696fb583341fa0a0895ec2692e6d374ceb9a0e","benches/poll.rs":"ca369079c4db366a180be22f406eaf8e94e2e771c02568eb35d89e63093006cf","benches/sync_mpsc.rs":"f7f15346ef258d1defbacc8201cf912e5fe928cb39977d4e15a801a5f95e73c7","benches/thread_notify.rs":"1992b1e2b352fbc15a611d1318ac1bf6f19318d769086d55c80e6863f1b0e106","src/executor.rs":"14cbfd6fd5bd0cc55d78a51c754effa478d21cac1792c8d4daf228f2087b7246","src/future/and_then.rs":"15653d392d331a1fc4619129f737acc28525c88d1675b7fcea6ed27c5b1bf302","src/future/catch_unwind.rs":"dfef6b6a66c09574338046cf23b0c6aacd8200872d512b831d6dc12038f05298","src/future/chain.rs":"4d712e989e079f4164d5d9fe3bb522d521094b0d8083ee639350570444e5bb93","src/future/either.rs":"d8d3a4686dfe0068cc35ee452268ff2406e1e6adfddd3f0841988bfa6489ca5d","src/future/empty.rs":"b549a1ca0f21bc6d1a26d9063a9a60deb9235ff7eff5db915050115fed91a9c7","src/future/flatten.rs":"7eb15429fcc749326371fe571e1f7d294d7b83f7557e6e1971e2206180253d65","src/future/flatten_stream.rs":"cf914425c3606b61c046df5c43d64266d6f2328693e4122441f9bbcf7cb0a4e1","src/future/from_err.rs":"a1f42d95f7b52e80c2e5a03b44cbce0efbe5fc486dfe33d799b74ab9ba9057ab","src/future/fuse.rs":"41098c6693e1416679e1628776d7925cbd55446cd6b957080cd48e9bbf34ff65","src/future/inspect.rs":"89c362d8402dddd784bcc54e62ca27657ca8108e1ae8de5a7237e08650e10636","src/future/into_stream.rs":"0fa6bc4d70e8b4d75cf45fba53b39f033b87574103fffea4090b78f049bf43d0","src/future/join.rs":"b1dcefb03b1cb4e609ad2e79ba9a6cfab24235d7a4fff7fb9daf2c8fbf0f3d70","src/future/join_all.rs":"30fc27cbc1248046937b441a165a911e9ed1cd887ad6f3aeeb573b59c43e9cbf","src/future/lazy.rs":"1a2025bae3675fb682cefbf8a88bbb7a7519cfdee42dd6b3049a4d2b7ab8b5b1","src/future/loop_fn.rs":"5bd952247ae4e9d31dff77386bbd3700f596da136ea53e9e9944266af3f08688","src/future/map.rs":"91e148d9adaea929b85ede63c71fb07ef9b5611db906a13eedad2cf551745b47","src/future/map_err.rs":"2c8e87fa8ff56061722db6c69aaba588e6df6835a4e2fe84826f0bd4fed2e007","src/future/mod.rs":"c0745575c1b1cf1d63ff9af810206731f4a5f6cfcfc47338272c4f69f8f64694","src/future/option.rs":"93270226cadcfa349250023e2070e687cf595831f427904ca744f7bc50342ded","src/future/or_else.rs":"444567101c4c437b184aa2e2eec0cf4363af442c0afc58d6508d3d2ac86489a9","src/future/poll_fn.rs":"817bfb75e7c43ca96a53e8cc9f48606c92c3c6742b07a732ce79a8f9b7bf8808","src/future/result.rs":"cc62c2377defb7b53aa859bf05c41c52a9cf8583378b7072bb2b45232d5fc9c5","src/future/select.rs":"73efd98004d5d8c46607bf770ff07a810bcdbe05cce0e8e4f41f5e659fd44203","src/future/select2.rs":"cfbbf3a9794109c56a3703456fae6111826bc25f98f2f36b234d483eeeeab482","src/future/select_all.rs":"b009e57ac241a3aba78db0bb751432cb99c1e91b8bae1b3baf225921f0daa441","src/future/select_ok.rs":"4884896914d8903edbfa12b5e255d35d5b2c91a9182ce6f774978db636617905","src/future/shared.rs":"95d22f444e04378f32dbaf139a207451e01bcd12f2e8cf1d4428aa1383b57f0f","src/future/then.rs":"c49b388ab3c78979ad9ae40f6e859ee98e9351bdb11e3c3f1ad4ceca77651a56","src/lib.rs":"6084082d252dab422505eac3da3925d1a001af803cd7b1bc0c57fc8c0d79797e","src/lock.rs":"fe4c8185f9774a134d4ce27af4a9c8b25f30f7dcc6990473210d66b6b8936ce4","src/poll.rs":"df74c3a8169d7895f3c46dd6de99edd77bd024b85e26b1d0644d2b8e5ef515b9","src/resultstream.rs":"365bc127c0410badb58ea2beb2abae546968ba3ac91abe2140e93e0c3620228f","src/sink/buffer.rs":"0e0f7d60781b4b2970b1b4508bbf245a20aa22080ce2808161dd48121b03a33d","src/sink/fanout.rs":"1fbcabdb1d22a43919417790082dc27ac65e2a100263504b6664a0b5e0657ae1","src/sink/flush.rs":"6c9a3bb9705c740e601ca6101cf6e6a87f2568661cff39a3576ef55986e3cb60","src/sink/from_err.rs":"cef45aff1c0c7638a507a770b1e6fc8a5b4bf4417ae4b35faa839f579e3ae81d","src/sink/map_err.rs":"7dfd27d87f5877ddae1c30821635dfc3f88f1c243fed234007c9e50fa693ebed","src/sink/mod.rs":"4b4d80d008bfa8d0abc83cd640dc9c107423c7920795678c079c544c037ab632","src/sink/send.rs":"019f3f8ab450edc0adb864e4b819f5b0d4cfe9dc33a53093c2aa18e1eb6270dc","src/sink/send_all.rs":"b05047459faceecf0dfd5e6280014c31f5a2a1058974785db8ede497c10a1e79","src/sink/wait.rs":"9c70fdd54c642e4ecf7d9b0ff1fbb2df9c89349dfd60b5482748cd93c6dc301e","src/sink/with.rs":"1d4ec61bd702196ad8a044dc40bb6967b3dd0c1ee14c32b1481967e71aa61405","src/sink/with_flat_map.rs":"7b0f367d98a99d297c3ce097e9858ad7b0dfdafbb66516cba0767b62beb01af3","src/stream/and_then.rs":"9f0f6ee06343ab03eebcb71257963e76d8e7208e4015b402cc8a58f793e37d79","src/stream/buffer_unordered.rs":"057c3dec32baf451ef02f44ef849086637e4d2cbb2d65907cc15ed9398fe131b","src/stream/buffered.rs":"4ced19e37e47182d5f9c7f852a7906c35b71ac4a5b2774a9101859defbecb190","src/stream/catch_unwind.rs":"957b935645f1744a4741962772c15e94370153f33e0db356309bf98ebb599c37","src/stream/chain.rs":"0b6b06cf5aaf0c2f665c61c65766d6113e24f690ebd9ad3a89abfa521e2ce9b2","src/stream/channel.rs":"f728402228fea0be01ec5cf1d02e49e52666c0c9ea986708d18e24f30376f6de","src/stream/chunks.rs":"6c68b006670f2ea227231ba9a7986c46b4f798a871a3de62dd00acfb84c3435b","src/stream/collect.rs":"e770850c7ed2d458b521c12af4ee76adf2303919849d2f95fa93fdf574c86d37","src/stream/concat.rs":"39549687b589562ce713a999e2887b6f20ec8f87291d82ee8b1a48dd7dfe9c8e","src/stream/empty.rs":"e8e2820fd3b2329a6987a11c3b3f28849f49427d1a745f2bdc7a4982476514e7","src/stream/filter.rs":"4abaf6c7bd3ecbccf7deac7920cc6bdc1b17875bedf7c6acd7e702254b3b83ba","src/stream/filter_map.rs":"573079f98efc38bbc68746084702b952ccb035bd8238c3c30fa103979865ed0e","src/stream/flatten.rs":"f2edce326745373c9c524bb574ce18584be95c7fd1a0ef875256b39891219b18","src/stream/fold.rs":"7f397373ed66560ff1eb0cffc5dafaf1569d3c8155fe418cc2bf6fc33faec230","src/stream/for_each.rs":"bd7f96bf551a829e37a54fd529e0b68a8868480797df039c75e1f226639cf096","src/stream/forward.rs":"5dd07a3d85130554f6c0c950fd635e4594f43a0284440f6f1af2a240511c5621","src/stream/from_err.rs":"bde1791790030c480aa88c6f7b235703d5b400249c841c8b045ea2203728b96c","src/stream/fuse.rs":"5d544151de7e5a3ce8a47bdeabe5cc9beaf0937b1eeed67e8d76842f54dea65d","src/stream/future.rs":"e9e3100be0090306efa14ea81cb3680f79770648b72c8616e2fcf61a3bef8f94","src/stream/futures_ordered.rs":"3e41623352600e116c327fe37005da04b0dcf1d5db379cab147738a1383732d8","src/stream/futures_unordered.rs":"3a445ebf5815ecbafaef6dab011cc3edf012564082717a615b70425e78142e1e","src/stream/inspect.rs":"4a1e7d7bbb0842a7021c5145bb1b64dbc213cfdccff51fe8399e3120c123eab5","src/stream/inspect_err.rs":"b4f2bc6a139df8f8eb403aafbca91c05b3093d3a6e13cef034a639fbe3ebe01e","src/stream/iter.rs":"cfff6b28759ccf390e8367f9f63209133c16e7fa53c7ae71167f318ba3ec624b","src/stream/iter_ok.rs":"5165cb02972776515734e0f343e626fbb448b65b38cdeacffbd86116f3c3cd37","src/stream/iter_result.rs":"9db38b1066d9adc1ece496432127049d36fb4b9895660c2af2b7ac28510c9084","src/stream/map.rs":"ba16b1469e519377939cf3bd073b258ac41e6349aab1c59393e3b30178a56496","src/stream/map_err.rs":"5ce9a279fde1f4f0887435856e1efa4fdeda749d43f4bab658b0abd216bc0a6f","src/stream/merge.rs":"63bb60ca386e280985cee8e16ae8b07f02d57aa8a0fa877ae01fb8b4678366d0","src/stream/mod.rs":"474de35e551c67950b7713de203a834e2284092a8271b51da4d3b96beeb9197c","src/stream/once.rs":"277c960dc4bfa09fcc6112efa4e38a9fe937dc31fff440405e60bfd843f3c1ab","src/stream/or_else.rs":"c11ea499d85d6204ad083058eeca9dbf29873c49ee21bf01f9fe53e9ec3bba52","src/stream/peek.rs":"25d78baa0b3e30d2d1c72d1f3b1aa2a28811522d345dceefec587beb18b70fe2","src/stream/poll_fn.rs":"1dffbe60bd50c19efb71de2f768eecf70fa280b0d9c9cb889d16bb43b1619c8b","src/stream/repeat.rs":"807f2be5c9c1e7d54954f73ee38a373e71177aca43be8866712798f29ab541c2","src/stream/select.rs":"027873d9142e896272f7471cccaaccb133bf9f696a3f7510f3fb1aa4253a7c09","src/stream/skip.rs":"d7c839ca15f830709ebedd9526bb9ebd64ee22cb944e44213ce850a1383b71fa","src/stream/skip_while.rs":"aeb9bd64530bfaa631f4ca9500861c62fbf32849b09383eb26904bedd8b8b269","src/stream/split.rs":"c9b391fcbf3d1762bde442fd3549bd4739d2f9f486e88063650d42fea33c6af3","src/stream/take.rs":"9872429dd89cb34755b514abde9b6a876da076aea0449fcadfcc48e982507f21","src/stream/take_while.rs":"36bc2a33850ba2b58fb0da3866c96c8f4dfbd81133e615fda031518e71d425b5","src/stream/then.rs":"c7c66e27180cf2d98694de27504283a32444a0d0d6919ab25b3621fa6169408d","src/stream/unfold.rs":"5e69718714cc38c5ca6d0a6f5243ab28e392bdc97d96e8ab9059d9f0e772120c","src/stream/wait.rs":"936a15df4499d188f210cb0133bc8ad25e33e5b674a96105b4da549f32e92b40","src/stream/zip.rs":"33f1401683a29ce194927533c40bdbbc0783c552cf0b666f268fa7109e593853","src/sync/bilock.rs":"def09b26f9d66f2be0a8885ad6cf7106c3a073493bad591fc4a068212f0d739f","src/sync/mod.rs":"27ad26777f600f7054215fccdff07f4303182af2a6e0998d4229d62b090b7aac","src/sync/mpsc/mod.rs":"edb206061ead2428a418e4f7227df09a7f5339796af094100b176eaa6a7f5a64","src/sync/mpsc/queue.rs":"b39889f1b2000a3de995a50f46243f97a98d3cce7c6de4b95c4d8ffeb42af918","src/sync/oneshot.rs":"ff409b2518d2c41998fcbf88e67d8b0869cd5828784f088c5e3b025b034db37f","src/task.rs":"914955224ba1613835027e6d6436b83ce41caf217428c2c576e8783cacc7ba96","src/task_impl/atomic_task.rs":"79298f2f90aaf2efb63c574346ff1d7a955865a94a5de4321e22a6565c58b15e","src/task_impl/core.rs":"3ababa3970da5668f2b678724a4b5e1aa5f2b65a2355276b7d14ba3dfdd52686","src/task_impl/mod.rs":"89bf59d2cf41a91bcc972ca5c66e72f6ac02b10c70cca501a86238173abee2b2","src/task_impl/std/data.rs":"9b6210811c095c4d0ec0f59a566bb8f5bc4b6ba544c72a4565dc47f3b7fbfab9","src/task_impl/std/mod.rs":"7232659b0ff0e8c40abb4fd1c04c216f667837151fc31b2269d703b540aeb25a","src/task_impl/std/task_rc.rs":"a6e46e79fecb1497d603c016f4f1b14523346f74af800c9c27c069229d62dc25","src/task_impl/std/unpark_mutex.rs":"7a53b7209ff00880bce9d912c249b077870625ca87fe9ab7b0f441d3af430302","src/unsync/mod.rs":"e5da32f78212646f0161fec2e7193cda830f541bc9ae37361fbcf82e99cc1d86","src/unsync/mpsc.rs":"ee5fc8723258b25e802c186c2827554a6dd7cfdbfaa50fd9ea50d5b15edf826d","src/unsync/oneshot.rs":"89661388a87d4ac83befc31df9ad11e6a8c6104e2dde7be9e3585d7549cfe8c4","tests/all.rs":"99c6ad1d1e16ad2e0bc3027e1f5cb1a8f89404f71d77d3fc85badb67278f8179","tests/bilock.rs":"68462100c0c1e4e72f220d96ce1e6b25648f4c10a390be8a3bbfa99bbd795f31","tests/buffer_unordered.rs":"50ceb305da08fa095ee40a8f145fa9d95db59372cca949d77f011bbabc072152","tests/channel.rs":"63d6ab1b7fd51680562f9d626a5fab9d4b81226272b5e0f9ca7faa88eae5073a","tests/eager_drop.rs":"e0a615c39f1fb9baae543212e72a165f68e7576f6b8c6db1809149d819bd546b","tests/eventual.rs":"73cbd3836a598175439b5dc5597f7e464dfbc6d77379aaae1172c6c7f85220e5","tests/fuse.rs":"0b7ee173564cf236591d0cbf78fa076af82aad9816eb176ae58d549ecd2fadf9","tests/future_flatten_stream.rs":"133b91a9e2170849ed7dbcb4024675873a781bf2dd190cfcaa9c41418c3ccb97","tests/futures_ordered.rs":"7835bf9bedb9322a93070b5d87886b7a333dc469aee74f7eb86a1a7914b4602c","tests/futures_unordered.rs":"048153d9c4ec3433efbb97edfe01a458762e76160624362c658432f6f2357524","tests/inspect.rs":"d7706a175be9ed6ecc09d7a45e1559160e00da85fa8a9a7caec4c53918999842","tests/mpsc-close.rs":"824cdb5c574459c8a374e6b890140a22edd91f6edef901184b4507f69accf2d5","tests/mpsc.rs":"d129624af8c156566faba996bca36f5c86f902496b78fa99c8e9c0040693b53c","tests/oneshot.rs":"a8773b3a65e79944045118f36bfd81fceb826d4e2846b46f86db37a02d7ae1f4","tests/ready_queue.rs":"3d50c4e71e3954c5b8e2672255b6af33abaebc16172c038e64c3323d633693c0","tests/recurse.rs":"4922e1ad975dca9d6b63d155515cc24181ad6a915adcbb743f7c8a58c0148a77","tests/select_all.rs":"3666e95ea94da17abb1899101e51b294af576bc446119fbc8aea5bb2991f439a","tests/select_ok.rs":"7a740e5b2d70c7776202ed1495b016f6e63ae1de06ca0f12ab21fcb3117450a9","tests/shared.rs":"4abb7c9a7f6207e40bc7408ee405df4e5a3e778054ceb113b4a177a886a64d11","tests/sink.rs":"a2c5d8f89cb6d757f548f799ba84f2ba758fbdfe9cc951f1dcdbcc2bec50e648","tests/split.rs":"24dd293f049a37bfaabb02ae558c81e9fef9298a2ce43ecb544450b045c15f5c","tests/stream.rs":"3ca52f06a4503a853acce77997e4e744903c2084a83e0abf1e704e4f73833805","tests/stream_catch_unwind.rs":"6cee77f455a671d038aac24cf2f79636f1c0a5d8900957a2fed0ee3ed99832b8","tests/support/local_executor.rs":"10ca7f0bc1d9fd45350a807cfd76015fe24bf68d9a711e16ea0ec6be22af9ddd","tests/support/mod.rs":"1961189f57851a468e518327da0b7893eee990e477b82a278e0015f25b5e5a1c","tests/unfold.rs":"27ff8c3c83b333094bbffe6aebadf3730f0e35d1367b7b602a3df4e233d934d8","tests/unsync-oneshot.rs":"e676b37a64e1d6c0816d55cf443d86249ec2ff8180f1fc0d009de51e6842dac8","tests/unsync.rs":"facc6a6ef2403e26777dcc075e679a84f55ff1fd09bd259f3ff4b026adca1cf0"},"package":"0bab5b5e94f5c31fc764ba5dd9ad16568aae5d4825538c01d6bca680c9bf94a7"}
\ No newline at end of file
--- a/third_party/rust/futures/.travis.yml
+++ b/third_party/rust/futures/.travis.yml
@@ -1,37 +1,35 @@
 language: rust
 
 matrix:
   include:
+    - os: osx
+    - rust: stable
+    - rust: beta
+    - rust: nightly
+      env: BENCH=1
+      before_script:
+        - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
+      after_success:
+        - travis-cargo doc-upload
     - os: linux
-      rust: 1.10.0
+      rust: 1.15.0
       script: cargo test
-rust:
-  - stable
-  - beta
-  - nightly
 sudo: false
-before_script:
-  - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
 script:
-  - export CARGO_TARGET_DIR=`pwd`/target
   - cargo build
   - cargo build --no-default-features
   - cargo test
   - cargo test --no-default-features --features use_std
   - cargo test --manifest-path futures-cpupool/Cargo.toml
   - cargo test --manifest-path futures-cpupool/Cargo.toml --no-default-features
 
   - cargo doc --no-deps
   - cargo doc --no-deps --manifest-path futures-cpupool/Cargo.toml
-after_success:
-  - travis-cargo --only nightly doc-upload
+  - if [ "$BENCH" = "1" ]; then cargo bench; fi
 env:
   global:
     - secure: "iwVcMVIF7ZSY82fK5UyyUvVvJxMSYrbZawh1+4Oi8pvOdYq1gptcDoOC8jxWwCwrNF1b+/85n+jlEUngEqqSmV5PjAbWPjoc+u4Zn7CRi1AlxoUlvHPiQm4vM4Mkkd6GsqoIZttCeedU9m/w0nQ18uUtK8uD6vr2FVdcMnUnkYQAxuGOowGLrwidukzfBXMCu/JrwKMIbt61knAFiI/KJknu0h1mRrhpeF/sQ3tJFzRRcQeFJkbfwDzltMpPo1hq5D3HI4ONjYi/qO2pwUhDk4umfp9cLW9MS8rQvptxJTQmWemHi+f2/U4ld6a0URL6kEuMkt/EbH0A74eFtlicfRs44dX9MlWoqbLypnC3ymqmHcpwcwNA3HmZyg800MTuU+BPK41HIPdO9tPpxjHEiqvNDknH7qs+YBnis0eH7DHJgEjXq651PjW7pm+rnHPwsj+OzKE1YBNxBQZZDkS3VnZJz+O4tVsOzc3IOz0e+lf7VVuI17C9haj117nKp3umC4MVBA0S8RfreFgqpyDeY2zwcqOr0YOlEGGRl0vyWP8Qcxx12kQ7+doLolt6Kxda4uO0hKRmIF6+qki1T+L7v8BOGOtCncz4f7IX48eQ7+Wu0OtglRn45qAa3CxjUuW6xX3KSNH66PCXV0Jtp8Ga2SSevX2wtbbFu9f+9R+PQY4="
 
 notifications:
   email:
     on_success: never
-os:
-  - linux
-  - osx
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/CHANGELOG.md
@@ -0,0 +1,289 @@
+# 0.1.17 - 2017-10-31
+
+* Add a `close` method on `sink::Wait`
+* Undeprecate `stream::iter` as `stream::iter_result`
+* Improve performance of wait-related methods
+* Tweak buffered sinks with a 0 capacity to forward directly to the underlying
+  sink.
+* Add `FromIterator` implementation for `FuturesOrdered` and `FuturesUnordered`.
+
+# 0.1.16 - 2017-09-15
+
+* A `prelude` module has been added to glob import from and pick up a whole
+  bunch of useful types
+* `sync::mpsc::Sender::poll_ready` has been added as an API
+* `sync::mpsc::Sender::try_send` has been added as an API
+
+# 0.1.15 - 2017-08-24
+
+* Improve performance of `BiLock` methods
+* Implement `Clone` for `FutureResult`
+* Forward `Stream` trait through `SinkMapErr`
+* Add `stream::futures_ordered` next to `futures_unordered`
+* Reimplement `Stream::buffered` on top of `stream::futures_ordered` (much more
+  efficient at scale).
+* Add a `with_notify` function for abstractions which previously required
+  `UnparkEvent`.
+* Add `get_ref`/`get_mut`/`into_inner` functions for stream take/skip methods
+* Add a `Clone` implementation for `SharedItem` and `SharedError`
+* Add a `mpsc::spawn` function to spawn a `Stream` into an `Executor`
+* Add a `reunite` function for `BiLock` and the split stream/sink types to
+  rejoin two halves and reclaim the original item.
+* Add `stream::poll_fn` to behave similarly to `future::poll_fn`
+* Add `Sink::with_flat_map` like `Iterator::flat_map`
+* Bump the minimum Rust version to 1.13.0
+* Expose `AtomicTask` in the public API for managing synchronization around task
+  notifications.
+* Unify the `Canceled` type of the `sync` and `unsync` modules.
+* Deprecate the `boxed` methods. These methods have caused more confusion than
+  they've solved historically, so it's recommended to use a local extension
+  trait or a local helper instead of the trait-based methods.
+* Deprecate the `Stream::merge` method as it's less ergonomic than `select`.
+* Add `oneshot::Sender::is_canceled` to test if a oneshot is canceled off a
+  task.
+* Deprecates `UnboundedSender::send` in favor of a method named `unbounded_send`
+  to avoid a conflict with `Sink::send`.
+* Deprecate the `stream::iter` function in favor of an `stream::iter_ok` adaptor
+  to avoid the need to deal with `Result` manually.
+* Add an `inspect` function to the `Future` and `Stream` traits along the lines
+  of `Iterator::inspect`
+
+# 0.1.14 - 2017-05-30
+
+This is a relatively large release of the `futures` crate, although much of it
+is from reworking internals rather than new APIs. The banner feature of this
+release is that the `futures::{task, executor}` modules are now available in
+`no_std` contexts! A large refactoring of the task system was performed in
+PR #436 to accommodate custom memory allocation schemes and otherwise remove
+all dependencies on `std` for the task module. More details about this change
+can be found on the PR itself.
+
+Other API additions in this release are:
+
+* A `FuturesUnordered::push` method was added and the `FuturesUnordered` type
+  itself was completely rewritten to efficiently track a large number of
+  futures.
+* A `Task::will_notify_current` method was added with a slightly different
+  implementation than `Task::is_current` but with stronger guarantees and
+  documentation wording about its purpose.
+* Many combinators now have `get_ref`, `get_mut`, and `into_inner` methods for
+  accessing internal futures and state.
+* A `Stream::concat2` method was added which should be considered the "fixed"
+  version of `concat`, this one doesn't panic on empty streams.
+* An `Executor` trait has been added to represent abstracting over the concept
+  of spawning a new task. Crates which only need the ability to spawn a future
+  can now be generic over `Executor` rather than requiring a
+  `tokio_core::reactor::Handle`.
+
+As with all 0.1.x releases this PR is intended to be 100% backwards compatible.
+All code that previously compiled should continue to do so with these changes.
+As with other changes, though, there are also some updates to be aware of:
+
+* The `task::park` function has been renamed to `task::current`.
+* The `Task::unpark` function has been renamed to `Task::notify`, and in general
+  terminology around "unpark" has shifted to terminology around "notify"
+* The `Unpark` trait has been deprecated in favor of the `Notify` trait
+  mentioned above.
+* The `UnparkEvent` structure has been deprecated. It currently should perform
+  the same as it used to, but it's planned that in a future 0.1.x release the
+  performance will regress for crates that have not transitioned away. The
+  primary primitive to replace this is the addition of a `push` function on the
+  `FuturesUnordered` type. If this does not help implement your use case though,
+  please let us know!
+* The `Task::is_current` method is now deprecated, and you likely want to use
+  `Task::will_notify_current` instead, but let us know if this doesn't suffice!
+
+# 0.1.13 - 2017-04-05
+
+* Add forwarding sink/stream impls for `stream::FromErr` and `sink::SinkFromErr`
+* Add `PartialEq` and `Eq` to `mpsc::SendError`
+* Reimplement `Shared` with `spawn` instead of `UnparkEvent`
+
+# 0.1.12 - 2017-04-03
+
+* Add `Stream::from_err` and `Sink::from_err`
+* Allow `SendError` to be `Clone` when possible
+
+# 0.1.11 - 2017-03-13
+
+The major highlight of this release is the addition of a new "default" method on
+the `Sink` trait, `Sink::close`. This method is used to indicate to a sink that
+no new values will ever need to get pushed into it. This can be used to
+implement graceful shutdown of protocols and otherwise simply indicates to a
+sink that it can start freeing up resources.
+
+Currently this method is **not** a default method to preserve backwards
+compatibility, but it's intended to become a default method in the 0.2 series of
+the `futures` crate. It's highly recommended to audit implementations of `Sink`
+to implement the `close` method as is fit.
+
+Other changes in this release are:
+
+* A new select combinator, `Future::select2` was added for a heterogeneous
+  select.
+* A `Shared::peek` method was added to check to see if it's done.
+* `Sink::map_err` was implemented
+* The `log` dependency was removed
+* Implementations of the `Debug` trait are now generally available.
+* The `stream::IterStream` type was renamed to `stream::Iter` (with a reexport
+  for the old name).
+* Add a `Sink::wait` method which returns an adapter to use an arbitrary `Sink`
+  synchronously.
+* A `Stream::concat` method was added to concatenate a sequence of lists.
+* The `oneshot::Sender::complete` method was renamed to `send` and now returns a
+  `Result` indicating successful transmission of a message or not. Note that the
+  `complete` method still exists, it's just deprecated.
+
+# 0.1.10 - 2017-01-30
+
+* Add a new `unsync` module which mirrors `sync` to the extent that it can but
+  is intended to not perform cross-thread synchronization (only usable within
+  one thread).
+* Tweak `Shared` to work when handles may not get poll'd again.
+
+# 0.1.9 - 2017-01-18
+
+* Fix `Send/Sync` of a few types
+* Add `future::tail_fn` for more easily writing loops
+* Export SharedItem/SharedError
+* Remove an unused type parameter in `from_err`
+
+# 0.1.8 - 2017-01-11
+
+* Fix some race conditions in the `Shared` implementation
+* Add `Stream::take_while`
+* Fix an unwrap in `stream::futures_unordered`
+* Generalize `Stream::for_each`
+* Add `Stream::chain`
+* Add `stream::repeat`
+* Relax `&mut self` to `&self` in `UnboundedSender::send`
+
+# 0.1.7 - 2016-12-18
+
+* Add a `Future::shared` method for creating a future that can be shared
+  amongst threads by cloning the future itself. All derivative futures
+  will resolve to the same value once the original future has been
+  resolved.
+* Add a `FutureFrom` trait for future-based conversion
+* Fix a wakeup bug in `Receiver::close`
+* Add `future::poll_fn` for quickly adapting a `Poll`-based function to
+  a future.
+* Add an `Either` enum with two branches to easily create one future
+  type based on two different futures created on two branches of control
+  flow.
+* Remove the `'static` bound on `Unpark`
+* Optimize `send_all` and `forward` to send as many items as possible
+  before calling `poll_complete`.
+* Unify the return types of the `ok`, `err`, and `result` future to
+  assist returning different varieties in different branches of a function.
+* Add `CpuFuture::forget` to allow the computation to continue running
+  after a drop.
+* Add a `stream::futures_unordered` combinator to turn a list of futures
+  into a stream representing their order of completion.
+
+# 0.1.6 - 2016-11-22
+
+* Fix `Clone` bound on the type parameter on `UnboundedSender`
+
+# 0.1.5 - 2016-11-22
+
+* Fix `#![no_std]` support
+
+# 0.1.4 - 2016-11-22
+
+This is quite a large release relative to the previous point releases! As
+with all 0.1 releases, this release should be fully compatible with the 0.1.3
+release. If any incompatibilities are discovered please file an issue!
+
+The largest changes in 0.1.4 are the addition of a `Sink` trait coupled with a
+reorganization of this crate. Note that all old locations for types/traits
+still exist, they're just deprecated and tagged with `#[doc(hidden)]`.
+
+The new `Sink` trait is used to represent types which can periodically over
+time accept items, but may take some time to fully process the item before
+another can be accepted. Essentially, a sink is the opposite of a stream. This
+trait will then be used in the tokio-core crate to implement simple framing by
+modeling I/O streams as both a stream and a sink of frames.
+
+The organization of this crate is to now have three primary submodules,
+`future`, `stream`, and `sink`. The traits as well as all combinator types are
+defined in these submodules. The traits and types like `Async` and `Poll` are
+then reexported at the top of the crate for convenient usage. It should be a
+relatively rare occasion that the modules themselves are reached into.
+
+Finally, the 0.1.4 release comes with a new module, `sync`, in the futures
+crate.  This is intended to be the home of a suite of futures-aware
+synchronization primitives. Currently this is inhabited with a `oneshot` module
+(the old `oneshot` function), a `mpsc` module for a new multi-producer
+single-consumer channel, and a `BiLock` type which represents sharing ownership
+of one value between two consumers. This module may expand over time with more
+types like a mutex, rwlock, spsc channel, etc.
+
+Notable deprecations in the 0.1.4 release that will be deleted in an eventual
+0.2 release:
+
+* The `TaskRc` type is now deprecated in favor of `BiLock` or otherwise `Arc`
+  sharing.
+* All future combinators should be accessed through the `future` module, not
+  the top-level of the crate.
+* The `Oneshot` and `Complete` types are now replaced with the `sync::oneshot`
+  module.
+* Some old names like `collect` are deprecated in favor of more appropriately
+  named versions like `join_all`
+* The `finished` constructor is now `ok`.
+* The `failed` constructor is now `err`.
+* The `done` constructor is now `result`.
+
+As always, please report bugs to https://github.com/alexcrichton/futures-rs and
+we always love feedback! If you've got situations we don't cover, combinators
+you'd like to see, or slow code, please let us know!
+
+Full changelog:
+
+* Improve scalability of `buffer_unordered` combinator
+* Fix a memory ordering bug in oneshot
+* Add a new trait, `Sink`
+* Reorganize the crate into three primary modules
+* Add a new `sync` module for synchronization primitives
+* Add a `BiLock` sync primitive for two-way sharing
+* Deprecate `TaskRc`
+* Rename `collect` to `join_all`
+* Use a small vec in `Events` for improved clone performance
+* Add `Stream::select` for selecting items from two streams like `merge` but
+  requiring the same types.
+* Add `stream::unfold` constructor
+* Add a `sync::mpsc` module with a futures-aware multi-producer single-consumer
+  queue. Both bounded (with backpressure) and unbounded (no backpressure)
+  variants are provided.
+* Renamed `failed`, `finished`, and `done` combinators to `err`, `ok`, and
+  `result`.
+* Add `Stream::forward` to send all items to a sink, like `Sink::send_all`
+* Add `Stream::split` for streams which are both sinks and streams to have
+  separate ownership of the stream/sink halves
+* Improve `join_all` with concurrency
+
+# 0.1.3 - 2016-10-24
+
+* Rewrite `oneshot` for efficiency and removing allocations on send/recv
+* Errors are passed through in `Stream::take` and `Stream::skip`
+* Add a `select_ok` combinator to pick the first of a list that succeeds
+* Remove the unnecessary `SelectAllNext` typedef
+* Add `Stream::chunks` for receiving chunks of data
+* Rewrite `stream::channel` for efficiency, correctness, and removing
+  allocations
+* Remove `Send + 'static` bounds on the `stream::Empty` type
+
+# 0.1.2 - 2016-10-04
+
+* Fixed a bug in drop of `FutureSender`
+* Expose the channel `SendError` type
+* Add `Future::into_stream` to convert to a single-element stream
+* Add `Future::flatten_to_stream` to convert a future of a stream to a stream
+* impl Debug for SendError
+* Add stream::once for a one element stream
+* Accept IntoIterator in stream::iter
+* Add `Stream::catch_unwind`
+
+# 0.1.1 - 2016-09-09
+
+Initial release!
--- a/third_party/rust/futures/Cargo.toml
+++ b/third_party/rust/futures/Cargo.toml
@@ -1,29 +1,36 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
 [package]
 name = "futures"
-version = "0.1.13"
+version = "0.1.18"
 authors = ["Alex Crichton <alex@alexcrichton.com>"]
-license = "MIT/Apache-2.0"
-readme = "README.md"
-keywords = ["futures", "async", "future"]
-repository = "https://github.com/alexcrichton/futures-rs"
+description = "An implementation of futures and streams featuring zero allocations,\ncomposability, and iterator-like interfaces.\n"
 homepage = "https://github.com/alexcrichton/futures-rs"
 documentation = "https://docs.rs/futures"
-description = """
-An implementation of futures and streams featuring zero allocations,
-composability, and iterator-like interfaces.
-"""
+readme = "README.md"
+keywords = ["futures", "async", "future"]
 categories = ["asynchronous"]
-
-[badges]
-travis-ci = { repository = "alexcrichton/futures-rs" }
-appveyor = { repository = "alexcrichton/futures-rs" }
+license = "MIT/Apache-2.0"
+repository = "https://github.com/alexcrichton/futures-rs"
 
 [dependencies]
 
 [features]
+default = ["use_std", "with-deprecated"]
 use_std = []
 with-deprecated = []
-default = ["use_std", "with-deprecated"]
+[badges.appveyor]
+repository = "alexcrichton/futures-rs"
 
-[workspace]
-members = ["futures-cpupool"]
+[badges.travis-ci]
+repository = "alexcrichton/futures-rs"
deleted file mode 100644
--- a/third_party/rust/futures/FAQ.md
+++ /dev/null
@@ -1,99 +0,0 @@
-# FAQ
-
-A collection of some commonly asked questions, with responses! If you find any
-of these unsatisfactory feel free to ping me (@alexcrichton) on github,
-acrichto on IRC, or just by email!
-
-### Why both `Item` and `Error` associated types?
-
-An alternative design of the `Future` trait would be to only have one associated
-type, `Item`, and then most futures would resolve to `Result<T, E>`. The
-intention of futures, the fundamental support for async I/O, typically means
-that errors will be encoded in almost all futures anyway though. By encoding an
-error type in the future as well we're able to provide convenient combinators
-like `and_then` which automatically propagate errors, as well as combinators
-like `join` which can act differently depending on whether a future resolves to
-an error or not.
-
-### Do futures work with multiple event loops?
-
-Yes! Futures are designed to source events from any location, including multiple
-event loops. All of the basic combinators will work on any number of event loops
-across any number of threads.
-
-### What if I have CPU intensive work?
-
-The documentation of the `Future::poll` function says that's it's supposed to
-"return quickly", what if I have work that doesn't return quickly! In this case
-it's intended that this work will run on a dedicated pool of threads intended
-for this sort of work, and a future to the returned value is used to represent
-its completion.
-
-A proof-of-concept method of doing this is the `futures-cpupool` crate in this
-repository, where you can execute work on a thread pool and receive a future to
-the value generated. This future is then composable with `and_then`, for
-example, to mesh in with the rest of a future's computation.
-
-### How do I call `poll`?
-
-In general it's not recommended to call `poll` unless you're implementing
-another `poll` function. If you need to poll a future, however, you can use
-`task::spawn` followed by the `poll_future` method on `Spawn<T>`.
-
-### How do I return a future?
-
-Returning a future is like returning an iterator in Rust today. It's not the
-easiest thing to do and you frequently need to resort to `Box` with a trait
-object. Thankfully though [`impl Trait`] is just around the corner and will
-allow returning these types unboxed in the future.
-
-[`impl Trait`]: https://github.com/rust-lang/rust/issues/34511
-
-For now though the cost of boxing shouldn't actually be that high. A future
-computation can be constructed *without boxing* and only the final step actually
-places a `Box` around the entire future. In that sense you're only paying the
-allocation at the very end, not for any of the intermediate futures.
-
-More information can be found [in the tutorial][return-future].
-
-[return-future]: https://github.com/alexcrichton/futures-rs/blob/master/TUTORIAL.md#returning-futures
-
-### Does it work on Windows?
-
-Yes! This library builds on top of mio, which works on Windows.
-
-### What version of Rust should I use?
-
-Rust 1.10 or later.
-
-### Is it on crates.io?
-
-Not yet! A few names are reserved, but crates cannot have dependencies from a
-git repository. Right now we depend on the master branch of `mio`, and crates
-will be published once that's on crates.io as well!
-
-### Does this implement tail call optimization?
-
-One aspect of many existing futures libraries is whether or not a tail call
-optimization is implemented. The exact meaning of this varies from framework to
-framework, but it typically boils down to whether common patterns can be
-implemented in such a way that prevents blowing the stack if the system is
-overloaded for a moment or leaking memory for the entire lifetime of a
-future/server.
-
-For the prior case, blowing the stack, this typically arises as loops are often
-implemented through recursion with futures. This recursion can end up proceeding
-too quickly if the "loop" makes lots of turns very quickly. At this time neither
-the `Future` nor `Stream` traits handle tail call optimizations in this case,
-but rather combinators are patterns are provided to avoid recursion. For example
-a `Stream` implements `fold`, `for_each`, etc. These combinators can often be
-used to implement an asynchronous loop to avoid recursion, and they all execute
-in constant stack space. Note that we're very interested in exploring more
-generalized loop combinators, so PRs are always welcome!
-
-For the latter case, leaking memory, this can happen where a future accidentally
-"remembers" all of its previous states when it'll never use them again. This
-also can arise through recursion or otherwise manufacturing of futures of
-infinite length. Like above, however, these also tend to show up in situations
-that would otherwise be expressed with a loop, so the same solutions should
-apply there regardless.
--- a/third_party/rust/futures/README.md
+++ b/third_party/rust/futures/README.md
@@ -11,17 +11,17 @@ This library is an implementation of **z
 [Tutorial](https://tokio.rs/docs/getting-started/futures/)
 
 ## Usage
 
 First, add this to your `Cargo.toml`:
 
 ```toml
 [dependencies]
-futures = "0.1.9"
+futures = "0.1.17"
 ```
 
 Next, add this to your crate:
 
 ```rust
 extern crate futures;
 
 use futures::Future;
@@ -34,18 +34,27 @@ the Tokio stack and also futures.
 ### Feature `use_std`
 
 `futures-rs` works without the standard library, such as in bare metal environments.
 However, it has a significantly reduced API surface. To use `futures-rs` in
 a `#[no_std]` environment, use:
 
 ```toml
 [dependencies]
-futures = { version = "0.1", default-features = false }
+futures = { version = "0.1.17", default-features = false }
 ```
 
 # License
 
-`futures-rs` is primarily distributed under the terms of both the MIT license and
-the Apache License (Version 2.0), with portions covered by various BSD-like
-licenses.
+This project is licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
+   http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or
+   http://opensource.org/licenses/MIT)
 
-See LICENSE-APACHE, and LICENSE-MIT for details.
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in Futures by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
--- a/third_party/rust/futures/appveyor.yml
+++ b/third_party/rust/futures/appveyor.yml
@@ -1,9 +1,29 @@
 environment:
+
+  # At the time this was added AppVeyor was having troubles with checking
+  # revocation of SSL certificates of sites like static.rust-lang.org and what
+  # we think is crates.io. The libcurl HTTP client by default checks for
+  # revocation on Windows and according to a mailing list [1] this can be
+  # disabled.
+  #
+  # The `CARGO_HTTP_CHECK_REVOKE` env var here tells cargo to disable SSL
+  # revocation checking on Windows in libcurl. Note, though, that rustup, which
+  # we're using to download Rust here, also uses libcurl as the default backend.
+  # Unlike Cargo, however, rustup doesn't have a mechanism to disable revocation
+  # checking. To get rustup working we set `RUSTUP_USE_HYPER` which forces it to
+  # use the Hyper instead of libcurl backend. Both Hyper and libcurl use
+  # schannel on Windows but it appears that Hyper configures it slightly
+  # differently such that revocation checking isn't turned on by default.
+  #
+  # [1]: https://curl.haxx.se/mail/lib-2016-03/0202.html
+  RUSTUP_USE_HYPER: 1
+  CARGO_HTTP_CHECK_REVOKE: false
+
   matrix:
   - TARGET: x86_64-pc-windows-msvc
 install:
   - set PATH=C:\Program Files\Git\mingw64\bin;%PATH%
   - curl -sSf -o rustup-init.exe https://win.rustup.rs/
   - rustup-init.exe -y --default-host %TARGET%
   - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
   - rustc -V
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/benches/bilock.rs
@@ -0,0 +1,121 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::{Async, Poll};
+use futures::executor;
+use futures::executor::{Notify, NotifyHandle};
+use futures::sync::BiLock;
+use futures::sync::BiLockAcquire;
+use futures::sync::BiLockAcquired;
+use futures::future::Future;
+use futures::stream::Stream;
+
+
+use test::Bencher;
+
+fn notify_noop() -> NotifyHandle {
+    struct Noop;
+
+    impl Notify for Noop {
+        fn notify(&self, _id: usize) {}
+    }
+
+    const NOOP : &'static Noop = &Noop;
+
+    NotifyHandle::from(NOOP)
+}
+
+
+/// Pseudo-stream which simply calls `lock.poll()` on `poll`
+struct LockStream {
+    lock: BiLockAcquire<u32>,
+}
+
+impl LockStream {
+    fn new(lock: BiLock<u32>) -> LockStream {
+        LockStream {
+            lock: lock.lock()
+        }
+    }
+
+    /// Release a lock after it was acquired in `poll`,
+    /// so `poll` could be called again.
+    fn release_lock(&mut self, guard: BiLockAcquired<u32>) {
+        self.lock = guard.unlock().lock()
+    }
+}
+
+impl Stream for LockStream {
+    type Item = BiLockAcquired<u32>;
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        self.lock.poll().map(|a| match a {
+            Async::Ready(a) => Async::Ready(Some(a)),
+            Async::NotReady => Async::NotReady,
+        })
+    }
+}
+
+
+#[bench]
+fn contended(b: &mut Bencher) {
+    b.iter(|| {
+        let (x, y) = BiLock::new(1);
+
+        let mut x = executor::spawn(LockStream::new(x));
+        let mut y = executor::spawn(LockStream::new(y));
+
+        for _ in 0..1000 {
+            let x_guard = match x.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::Ready(Some(guard))) => guard,
+                _ => panic!(),
+            };
+
+            // Try poll second lock while first lock still holds the lock
+            match y.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::NotReady) => (),
+                _ => panic!(),
+            };
+
+            x.get_mut().release_lock(x_guard);
+
+            let y_guard = match y.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::Ready(Some(guard))) => guard,
+                _ => panic!(),
+            };
+
+            y.get_mut().release_lock(y_guard);
+        }
+        (x, y)
+    });
+}
+
+#[bench]
+fn lock_unlock(b: &mut Bencher) {
+    b.iter(|| {
+        let (x, y) = BiLock::new(1);
+
+        let mut x = executor::spawn(LockStream::new(x));
+        let mut y = executor::spawn(LockStream::new(y));
+
+        for _ in 0..1000 {
+            let x_guard = match x.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::Ready(Some(guard))) => guard,
+                _ => panic!(),
+            };
+
+            x.get_mut().release_lock(x_guard);
+
+            let y_guard = match y.poll_stream_notify(&notify_noop(), 11) {
+                Ok(Async::Ready(Some(guard))) => guard,
+                _ => panic!(),
+            };
+
+            y.get_mut().release_lock(y_guard);
+        }
+        (x, y)
+    })
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/benches/futures_unordered.rs
@@ -0,0 +1,43 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::*;
+use futures::stream::FuturesUnordered;
+use futures::sync::oneshot;
+
+use test::Bencher;
+
+use std::collections::VecDeque;
+use std::thread;
+
+#[bench]
+fn oneshots(b: &mut Bencher) {
+    const NUM: usize = 10_000;
+
+    b.iter(|| {
+        let mut txs = VecDeque::with_capacity(NUM);
+        let mut rxs = FuturesUnordered::new();
+
+        for _ in 0..NUM {
+            let (tx, rx) = oneshot::channel();
+            txs.push_back(tx);
+            rxs.push(rx);
+        }
+
+        thread::spawn(move || {
+            while let Some(tx) = txs.pop_front() {
+                let _ = tx.send("hello");
+            }
+        });
+
+        future::lazy(move || {
+            loop {
+                if let Ok(Async::Ready(None)) = rxs.poll() {
+                    return Ok::<(), ()>(());
+                }
+            }
+        }).wait().unwrap();
+    });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/benches/poll.rs
@@ -0,0 +1,72 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::*;
+use futures::executor::{Notify, NotifyHandle};
+use futures::task::Task;
+
+use test::Bencher;
+
+fn notify_noop() -> NotifyHandle {
+    struct Noop;
+
+    impl Notify for Noop {
+        fn notify(&self, _id: usize) {}
+    }
+
+    const NOOP : &'static Noop = &Noop;
+
+    NotifyHandle::from(NOOP)
+}
+
+#[bench]
+fn task_init(b: &mut Bencher) {
+    const NUM: u32 = 100_000;
+
+    struct MyFuture {
+        num: u32,
+        task: Option<Task>,
+    };
+
+    impl Future for MyFuture {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            if self.num == NUM {
+                Ok(Async::Ready(()))
+            } else {
+                self.num += 1;
+
+                if let Some(ref t) = self.task {
+                    if t.will_notify_current() {
+                        t.notify();
+                        return Ok(Async::NotReady);
+                    }
+                }
+
+                let t = task::current();
+                t.notify();
+                self.task = Some(t);
+
+                Ok(Async::NotReady)
+            }
+        }
+    }
+
+    let notify = notify_noop();
+
+    let mut fut = executor::spawn(MyFuture {
+        num: 0,
+        task: None,
+    });
+
+    b.iter(|| {
+        fut.get_mut().num = 0;
+
+        while let Ok(Async::NotReady) = fut.poll_future_notify(&notify, 0) {
+        }
+    });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/benches/sync_mpsc.rs
@@ -0,0 +1,168 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::{Async, Poll, AsyncSink};
+use futures::executor;
+use futures::executor::{Notify, NotifyHandle};
+
+use futures::sink::Sink;
+use futures::stream::Stream;
+
+use futures::sync::mpsc::unbounded;
+use futures::sync::mpsc::channel;
+use futures::sync::mpsc::Sender;
+use futures::sync::mpsc::UnboundedSender;
+
+
+use test::Bencher;
+
+fn notify_noop() -> NotifyHandle {
+    struct Noop;
+
+    impl Notify for Noop {
+        fn notify(&self, _id: usize) {}
+    }
+
+    const NOOP : &'static Noop = &Noop;
+
+    NotifyHandle::from(NOOP)
+}
+
+/// Single producer, single consumer
+#[bench]
+fn unbounded_1_tx(b: &mut Bencher) {
+    b.iter(|| {
+        let (tx, rx) = unbounded();
+
+        let mut rx = executor::spawn(rx);
+
+        // 1000 iterations to avoid measuring overhead of initialization
+        // Result should be divided by 1000
+        for i in 0..1000 {
+
+            // Poll, not ready, park
+            assert_eq!(Ok(Async::NotReady), rx.poll_stream_notify(&notify_noop(), 1));
+
+            UnboundedSender::unbounded_send(&tx, i).unwrap();
+
+            // Now poll ready
+            assert_eq!(Ok(Async::Ready(Some(i))), rx.poll_stream_notify(&notify_noop(), 1));
+        }
+    })
+}
+
+/// 100 producers, single consumer
+#[bench]
+fn unbounded_100_tx(b: &mut Bencher) {
+    b.iter(|| {
+        let (tx, rx) = unbounded();
+
+        let mut rx = executor::spawn(rx);
+
+        let tx: Vec<_> = (0..100).map(|_| tx.clone()).collect();
+
+        // 1000 send/recv operations total, result should be divided by 1000
+        for _ in 0..10 {
+            for i in 0..tx.len() {
+                assert_eq!(Ok(Async::NotReady), rx.poll_stream_notify(&notify_noop(), 1));
+
+                UnboundedSender::unbounded_send(&tx[i], i).unwrap();
+
+                assert_eq!(Ok(Async::Ready(Some(i))), rx.poll_stream_notify(&notify_noop(), 1));
+            }
+        }
+    })
+}
+
+#[bench]
+fn unbounded_uncontended(b: &mut Bencher) {
+    b.iter(|| {
+        let (tx, mut rx) = unbounded();
+
+        for i in 0..1000 {
+            UnboundedSender::unbounded_send(&tx, i).expect("send");
+            // No need to create a task, because poll is not going to park.
+            assert_eq!(Ok(Async::Ready(Some(i))), rx.poll());
+        }
+    })
+}
+
+
+/// A Stream that continuously sends incrementing number of the queue
+struct TestSender {
+    tx: Sender<u32>,
+    last: u32, // Last number sent
+}
+
+// Could be a Future, it doesn't matter
+impl Stream for TestSender {
+    type Item = u32;
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        match self.tx.start_send(self.last + 1) {
+            Err(_) => panic!(),
+            Ok(AsyncSink::Ready) => {
+                self.last += 1;
+                assert_eq!(Ok(Async::Ready(())), self.tx.poll_complete());
+                Ok(Async::Ready(Some(self.last)))
+            }
+            Ok(AsyncSink::NotReady(_)) => {
+                Ok(Async::NotReady)
+            }
+        }
+    }
+}
+
+
+/// Single producers, single consumer
+#[bench]
+fn bounded_1_tx(b: &mut Bencher) {
+    b.iter(|| {
+        let (tx, rx) = channel(0);
+
+        let mut tx = executor::spawn(TestSender {
+            tx: tx,
+            last: 0,
+        });
+
+        let mut rx = executor::spawn(rx);
+
+        for i in 0..1000 {
+            assert_eq!(Ok(Async::Ready(Some(i + 1))), tx.poll_stream_notify(&notify_noop(), 1));
+            assert_eq!(Ok(Async::NotReady), tx.poll_stream_notify(&notify_noop(), 1));
+            assert_eq!(Ok(Async::Ready(Some(i + 1))), rx.poll_stream_notify(&notify_noop(), 1));
+        }
+    })
+}
+
+/// 100 producers, single consumer
+#[bench]
+fn bounded_100_tx(b: &mut Bencher) {
+    b.iter(|| {
+        // Each sender can send one item after specified capacity
+        let (tx, rx) = channel(0);
+
+        let mut tx: Vec<_> = (0..100).map(|_| {
+            executor::spawn(TestSender {
+                tx: tx.clone(),
+                last: 0
+            })
+        }).collect();
+
+        let mut rx = executor::spawn(rx);
+
+        for i in 0..10 {
+            for j in 0..tx.len() {
+                // Send an item
+                assert_eq!(Ok(Async::Ready(Some(i + 1))), tx[j].poll_stream_notify(&notify_noop(), 1));
+                // Then block
+                assert_eq!(Ok(Async::NotReady), tx[j].poll_stream_notify(&notify_noop(), 1));
+                // Recv the item
+                assert_eq!(Ok(Async::Ready(Some(i + 1))), rx.poll_stream_notify(&notify_noop(), 1));
+            }
+        }
+    })
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/benches/thread_notify.rs
@@ -0,0 +1,114 @@
+#![feature(test)]
+
+extern crate futures;
+extern crate test;
+
+use futures::{Future, Poll, Async};
+use futures::task::{self, Task};
+
+use test::Bencher;
+
+#[bench]
+fn thread_yield_single_thread_one_wait(b: &mut Bencher) {
+    const NUM: usize = 10_000;
+
+    struct Yield {
+        rem: usize,
+    }
+
+    impl Future for Yield {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            if self.rem == 0 {
+                Ok(Async::Ready(()))
+            } else {
+                self.rem -= 1;
+                task::current().notify();
+                Ok(Async::NotReady)
+            }
+        }
+    }
+
+    b.iter(|| {
+        let y = Yield { rem: NUM };
+        y.wait().unwrap();
+    });
+}
+
+#[bench]
+fn thread_yield_single_thread_many_wait(b: &mut Bencher) {
+    const NUM: usize = 10_000;
+
+    struct Yield {
+        rem: usize,
+    }
+
+    impl Future for Yield {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            if self.rem == 0 {
+                Ok(Async::Ready(()))
+            } else {
+                self.rem -= 1;
+                task::current().notify();
+                Ok(Async::NotReady)
+            }
+        }
+    }
+
+    b.iter(|| {
+        for _ in 0..NUM {
+            let y = Yield { rem: 1 };
+            y.wait().unwrap();
+        }
+    });
+}
+
+#[bench]
+fn thread_yield_multi_thread(b: &mut Bencher) {
+    use std::sync::mpsc;
+    use std::thread;
+
+    const NUM: usize = 1_000;
+
+    let (tx, rx) = mpsc::sync_channel::<Task>(10_000);
+
+    struct Yield {
+        rem: usize,
+        tx: mpsc::SyncSender<Task>,
+    }
+
+    impl Future for Yield {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            if self.rem == 0 {
+                Ok(Async::Ready(()))
+            } else {
+                self.rem -= 1;
+                self.tx.send(task::current()).unwrap();
+                Ok(Async::NotReady)
+            }
+        }
+    }
+
+    thread::spawn(move || {
+        while let Ok(task) = rx.recv() {
+            task.notify();
+        }
+    });
+
+    b.iter(move || {
+        let y = Yield {
+            rem: NUM,
+            tx: tx.clone(),
+        };
+
+        y.wait().unwrap();
+    });
+}
--- a/third_party/rust/futures/src/executor.rs
+++ b/third_party/rust/futures/src/executor.rs
@@ -1,10 +1,16 @@
 //! Executors
 //!
 //! This module contains tools for managing the raw execution of futures,
 //! which is needed when building *executors* (places where futures can run).
 //!
 //! More information about executors can be [found online at tokio.rs][online].
 //!
-//! [online]: https://tokio.rs/docs/going-deeper/tasks/
+//! [online]: https://tokio.rs/docs/going-deeper-futures/tasks/
 
-pub use task_impl::{Spawn, spawn, Unpark, Executor, Run};
+#[allow(deprecated)]
+#[cfg(feature = "use_std")]
+pub use task_impl::{Unpark, Executor, Run};
+
+pub use task_impl::{Spawn, spawn, Notify, with_notify};
+
+pub use task_impl::{UnsafeNotify, NotifyHandle};
--- a/third_party/rust/futures/src/future/catch_unwind.rs
+++ b/third_party/rust/futures/src/future/catch_unwind.rs
@@ -24,17 +24,17 @@ pub fn new<F>(future: F) -> CatchUnwind<
 impl<F> Future for CatchUnwind<F>
     where F: Future + UnwindSafe,
 {
     type Item = Result<F::Item, F::Error>;
     type Error = Box<Any + Send>;
 
     fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
         let mut future = self.future.take().expect("cannot poll twice");
-        let (res, future) = try!(catch_unwind(|| (future.poll(), future)));
+        let (res, future) = catch_unwind(|| (future.poll(), future))?;
         match res {
             Ok(Async::NotReady) => {
                 self.future = Some(future);
                 Ok(Async::NotReady)
             }
             Ok(Async::Ready(t)) => Ok(Async::Ready(Ok(t))),
             Err(e) => Ok(Async::Ready(Err(e))),
         }
--- a/third_party/rust/futures/src/future/chain.rs
+++ b/third_party/rust/futures/src/future/chain.rs
@@ -31,17 +31,17 @@ impl<A, B, C> Chain<A, B, C>
             }
             Chain::Second(ref mut b) => return b.poll(),
             Chain::Done => panic!("cannot poll a chained future twice"),
         };
         let data = match mem::replace(self, Chain::Done) {
             Chain::First(_, c) => c,
             _ => panic!(),
         };
-        match try!(f(a_result, data)) {
+        match f(a_result, data)? {
             Ok(e) => Ok(Async::Ready(e)),
             Err(mut b) => {
                 let ret = b.poll();
                 *self = Chain::Second(b);
                 ret
             }
         }
     }
--- a/third_party/rust/futures/src/future/either.rs
+++ b/third_party/rust/futures/src/future/either.rs
@@ -6,26 +6,26 @@ use {Future, Poll};
 pub enum Either<A, B> {
     /// First branch of the type
     A(A),
     /// Second branch of the type
     B(B),
 }
 
 impl<T, A, B> Either<(T, A), (T, B)> {
-    /// Splits out the homogenous type from an either of tuples.
+    /// Splits out the homogeneous type from an either of tuples.
     ///
     /// This method is typically useful when combined with the `Future::select2`
     /// combinator.
     pub fn split(self) -> (T, Either<A, B>) {
         match self {
             Either::A((a, b)) => (a, Either::A(b)),
             Either::B((a, b)) => (a, Either::B(b)),
         }
-	}
+    }
 }
 
 impl<A, B> Future for Either<A, B>
     where A: Future,
           B: Future<Item = A::Item, Error = A::Error>
 {
     type Item = A::Item;
     type Error = A::Error;
--- a/third_party/rust/futures/src/future/flatten.rs
+++ b/third_party/rust/futures/src/future/flatten.rs
@@ -37,13 +37,13 @@ impl<A> Future for Flatten<A>
           A::Item: IntoFuture,
           <<A as Future>::Item as IntoFuture>::Error: From<<A as Future>::Error>
 {
     type Item = <<A as Future>::Item as IntoFuture>::Item;
     type Error = <<A as Future>::Item as IntoFuture>::Error;
 
     fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
         self.state.poll(|a, ()| {
-            let future = try!(a).into_future();
+            let future = a?.into_future();
             Ok(Err(future))
         })
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/future/inspect.rs
@@ -0,0 +1,40 @@
+use {Future, Poll, Async};
+
+/// Do something with the item of a future, passing it on.
+///
+/// This is created by the `Future::inspect` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Inspect<A, F> where A: Future {
+    future: A,
+    f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> Inspect<A, F>
+    where A: Future,
+          F: FnOnce(&A::Item),
+{
+    Inspect {
+        future: future,
+        f: Some(f),
+    }
+}
+
+impl<A, F> Future for Inspect<A, F>
+    where A: Future,
+          F: FnOnce(&A::Item),
+{
+    type Item = A::Item;
+    type Error = A::Error;
+
+    fn poll(&mut self) -> Poll<A::Item, A::Error> {
+        match self.future.poll() {
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            Ok(Async::Ready(e)) => {
+                (self.f.take().expect("cannot poll Inspect twice"))(&e);
+                Ok(Async::Ready(e))
+            },
+            Err(e) => Err(e),
+        }
+    }
+}
--- a/third_party/rust/futures/src/future/join.rs
+++ b/third_party/rust/futures/src/future/join.rs
@@ -145,17 +145,17 @@ enum MaybeDone<A: Future> {
     NotYet(A),
     Done(A::Item),
     Gone,
 }
 
 impl<A: Future> MaybeDone<A> {
     fn poll(&mut self) -> Result<bool, A::Error> {
         let res = match *self {
-            MaybeDone::NotYet(ref mut a) => try!(a.poll()),
+            MaybeDone::NotYet(ref mut a) => a.poll()?,
             MaybeDone::Done(_) => return Ok(true),
             MaybeDone::Gone => panic!("cannot poll Join twice"),
         };
         match res {
             Async::Ready(res) => {
                 *self = MaybeDone::Done(res);
                 Ok(true)
             }
--- a/third_party/rust/futures/src/future/join_all.rs
+++ b/third_party/rust/futures/src/future/join_all.rs
@@ -1,9 +1,9 @@
-//! Definition of the JoinAll combinator, waiting for all of a list of futures
+//! Definition of the `JoinAll` combinator, waiting for all of a list of futures
 //! to finish.
 
 use std::prelude::v1::*;
 
 use std::fmt;
 use std::mem;
 
 use {Future, IntoFuture, Poll, Async};
@@ -38,39 +38,40 @@ impl<I> fmt::Debug for JoinAll<I>
             .finish()
     }
 }
 
 /// Creates a future which represents a collection of the results of the futures
 /// given.
 ///
 /// The returned future will drive execution for all of its underlying futures,
-/// collecting the results into a destination `Vec<T>`. If any future returns
-/// an error then all other futures will be canceled and an error will be
-/// returned immediately. If all futures complete successfully, however, then
-/// the returned future will succeed with a `Vec` of all the successful results.
+/// collecting the results into a destination `Vec<T>` in the same order as they
+/// were provided. If any future returns an error then all other futures will be
+/// canceled and an error will be returned immediately. If all futures complete
+/// successfully, however, then the returned future will succeed with a `Vec` of
+/// all the successful results.
 ///
 /// # Examples
 ///
 /// ```
 /// use futures::future::*;
 ///
 /// let f = join_all(vec![
 ///     ok::<u32, u32>(1),
 ///     ok::<u32, u32>(2),
 ///     ok::<u32, u32>(3),
 /// ]);
 /// let f = f.map(|x| {
 ///     assert_eq!(x, [1, 2, 3]);
 /// });
 ///
 /// let f = join_all(vec![
-///     ok::<u32, u32>(1).boxed(),
-///     err::<u32, u32>(2).boxed(),
-///     ok::<u32, u32>(3).boxed(),
+///     Box::new(ok::<u32, u32>(1)),
+///     Box::new(err::<u32, u32>(2)),
+///     Box::new(ok::<u32, u32>(3)),
 /// ]);
 /// let f = f.then(|x| {
 ///     assert_eq!(x, Err(2));
 ///     x
 /// });
 /// ```
 pub fn join_all<I>(i: I) -> JoinAll<I>
     where I: IntoIterator,
@@ -89,28 +90,28 @@ impl<I> Future for JoinAll<I>
     type Item = Vec<<I::Item as IntoFuture>::Item>;
     type Error = <I::Item as IntoFuture>::Error;
 
 
     fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
         let mut all_done = true;
 
         for idx in 0 .. self.elems.len() {
-            let done_val = match &mut self.elems[idx] {
-                &mut ElemState::Pending(ref mut t) => {
+            let done_val = match self.elems[idx] {
+                ElemState::Pending(ref mut t) => {
                     match t.poll() {
                         Ok(Async::Ready(v)) => Ok(v),
                         Ok(Async::NotReady) => {
                             all_done = false;
                             continue
                         }
                         Err(e) => Err(e),
                     }
                 }
-                &mut ElemState::Done(ref mut _v) => continue,
+                ElemState::Done(ref mut _v) => continue,
             };
 
             match done_val {
                 Ok(v) => self.elems[idx] = ElemState::Done(v),
                 Err(e) => {
                     // On completion drop all our associated resources
                     // ASAP.
                     self.elems = Vec::new();
--- a/third_party/rust/futures/src/future/mod.rs
+++ b/third_party/rust/futures/src/future/mod.rs
@@ -1,13 +1,14 @@
 //! Futures
 //!
 //! This module contains the `Future` trait and a number of adaptors for this
 //! trait. See the crate docs, and the docs for `Future`, for full detail.
 
+use core::fmt;
 use core::result;
 
 // Primitive futures
 mod empty;
 mod lazy;
 mod poll_fn;
 #[path = "result.rs"]
 mod result_;
@@ -50,16 +51,17 @@ mod join;
 mod map;
 mod map_err;
 mod from_err;
 mod or_else;
 mod select;
 mod select2;
 mod then;
 mod either;
+mod inspect;
 
 // impl details
 mod chain;
 
 pub use self::and_then::AndThen;
 pub use self::flatten::Flatten;
 pub use self::flatten_stream::FlattenStream;
 pub use self::fuse::Fuse;
@@ -68,16 +70,17 @@ pub use self::join::{Join, Join3, Join4,
 pub use self::map::Map;
 pub use self::map_err::MapErr;
 pub use self::from_err::FromErr;
 pub use self::or_else::OrElse;
 pub use self::select::{Select, SelectNext};
 pub use self::select2::Select2;
 pub use self::then::Then;
 pub use self::either::Either;
+pub use self::inspect::Inspect;
 
 if_std! {
     mod catch_unwind;
     mod join_all;
     mod select_all;
     mod select_ok;
     mod shared;
     pub use self::catch_unwind::CatchUnwind;
@@ -91,16 +94,20 @@ if_std! {
     #[cfg(feature = "with-deprecated")]
     pub use self::join_all::join_all as collect;
     #[doc(hidden)]
     #[deprecated(since = "0.1.4", note = "use JoinAll instead")]
     #[cfg(feature = "with-deprecated")]
     pub use self::join_all::JoinAll as Collect;
 
     /// A type alias for `Box<Future + Send>`
+    #[doc(hidden)]
+    #[deprecated(note = "removed without replacement, recommended to use a \
+                         local extension trait or function if needed, more \
+                         details in https://github.com/alexcrichton/futures-rs/issues/228")]
     pub type BoxFuture<T, E> = ::std::boxed::Box<Future<Item = T, Error = E> + Send>;
 
     impl<F: ?Sized + Future> Future for ::std::boxed::Box<F> {
         type Item = F::Item;
         type Error = F::Error;
 
         fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
             (**self).poll()
@@ -143,17 +150,17 @@ use {Poll, stream};
 ///
 /// The `poll` method is not intended to be called in general, but rather is
 /// typically called in the context of a "task" which drives a future to
 /// completion. For more information on this see the `task` module.
 ///
 /// More information about the details of `poll` and the nitty-gritty of tasks
 /// can be [found online at tokio.rs][poll-dox].
 ///
-/// [poll-dox]: https://tokio.rs/docs/going-deeper/futures-model/
+/// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/
 ///
 /// # Combinators
 ///
 /// Like iterators, futures provide a large number of combinators to work with
 /// futures to express computations in a much more natural method than
 /// scheduling a number of callbacks. For example the `map` method can change
 /// a `Future<Item=T>` to a `Future<Item=U>` or an `and_then` combinator could
 /// create a future after the first one is done and only be resolved when the
@@ -161,45 +168,45 @@ use {Poll, stream};
 ///
 /// Combinators act very similarly to the methods on the `Iterator` trait itself
 /// or those on `Option` and `Result`. Like with iterators, the combinators are
 /// zero-cost and don't impose any extra layers of indirection you wouldn't
 /// otherwise have to write down.
 ///
 /// More information about combinators can be found [on tokio.rs].
 ///
-/// [on tokio.rs]: https://tokio.rs/docs/going-deeper/futures-mechanics/
+/// [on tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-mechanics/
 pub trait Future {
     /// The type of value that this future will resolved with if it is
     /// successful.
     type Item;
 
     /// The type of error that this future will resolve with if it fails in a
     /// normal fashion.
     type Error;
 
     /// Query this future to see if its value has become available, registering
     /// interest if it is not.
     ///
     /// This function will check the internal state of the future and assess
-    /// whether the value is ready to be produced. Implementors of this function
+    /// whether the value is ready to be produced. Implementers of this function
     /// should ensure that a call to this **never blocks** as event loops may
     /// not work properly otherwise.
     ///
     /// When a future is not ready yet, the `Async::NotReady` value will be
     /// returned. In this situation the future will *also* register interest of
     /// the current task in the value being produced. This is done by calling
     /// `task::park` to retrieve a handle to the current `Task`. When the future
     /// is then ready to make progress (e.g. it should be `poll`ed again) the
     /// `unpark` method is called on the `Task`.
     ///
     /// More information about the details of `poll` and the nitty-gritty of
     /// tasks can be [found online at tokio.rs][poll-dox].
     ///
-    /// [poll-dox]: https://tokio.rs/docs/going-deeper/futures-model/
+    /// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/
     ///
     /// # Runtime characteristics
     ///
     /// This function, `poll`, is the primary method for 'making progress'
     /// within a tree of futures. For example this method will be called
     /// repeatedly as the internal state machine makes its various transitions.
     /// Executors are responsible for ensuring that this function is called in
     /// the right location (e.g. always on an I/O thread or not). Unless it is
@@ -229,16 +236,24 @@ pub trait Future {
     /// error to continue polling the future.
     ///
     /// If `NotReady` is returned, then the future will internally register
     /// interest in the value being produced for the current task (through
     /// `task::park`). In other words, the current task will receive a
     /// notification (through the `unpark` method) once the value is ready to be
     /// produced or the future can make progress.
     ///
+    /// Note that if `NotReady` is returned it only means that *this* task will
+    /// receive a notification. Historical calls to `poll` with different tasks
+    /// will not receive notifications. In other words, implementers of the
+    /// `Future` trait need not store a queue of tasks to notify, but only the
+    /// last task that called this method. Alternatively callers of this method
+    /// can only rely on the most recent task which call `poll` being notified
+    /// when a future is ready.
+    ///
     /// # Panics
     ///
     /// Once a future has completed (returned `Ready` or `Err` from `poll`),
     /// then any future calls to `poll` may panic, block forever, or otherwise
     /// cause wrong behavior. The `Future` trait itself provides no guarantees
     /// about the behavior of `poll` after a future has completed.
     ///
     /// Callers who may call `poll` too many times may want to consider using
@@ -294,21 +309,27 @@ pub trait Future {
     /// `Send` bound, then the `Box::new` function can be used instead.
     ///
     /// This method is only available when the `use_std` feature of this
     /// library is activated, and it is activated by default.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future::{BoxFuture, result};
     ///
     /// let a: BoxFuture<i32, i32> = result(Ok(1)).boxed();
     /// ```
     #[cfg(feature = "use_std")]
+    #[doc(hidden)]
+    #[deprecated(note = "removed without replacement, recommended to use a \
+                         local extension trait or function if needed, more \
+                         details in https://github.com/alexcrichton/futures-rs/issues/228")]
+    #[allow(deprecated)]
     fn boxed(self) -> BoxFuture<Self::Item, Self::Error>
         where Self: Sized + Send + 'static
     {
         ::std::boxed::Box::new(self)
     }
 
     /// Map this future's result to a different type, returning a new future of
     /// the resulting type.
@@ -323,20 +344,33 @@ pub trait Future {
     ///
     /// Note that this function consumes the receiving future and returns a
     /// wrapped version of it, similar to the existing `map` methods in the
     /// standard library.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::ok::<u32, u32>(1);
+    /// let new_future = future.map(|x| x + 3);
+    /// assert_eq!(new_future.wait(), Ok(4));
+    /// ```
     ///
-    /// let future_of_1 = ok::<u32, u32>(1);
-    /// let future_of_4 = future_of_1.map(|x| x + 3);
+    /// Calling `map` on an errored `Future` has no effect:
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::err::<u32, u32>(1);
+    /// let new_future = future.map(|x| x + 3);
+    /// assert_eq!(new_future.wait(), Err(1));
     /// ```
     fn map<F, U>(self, f: F) -> Map<Self, F>
         where F: FnOnce(Self::Item) -> U,
               Self: Sized,
     {
         assert_future::<U, Self::Error, _>(map::new(self, f))
     }
 
@@ -354,18 +388,29 @@ pub trait Future {
     /// Note that this function consumes the receiving future and returns a
     /// wrapped version of it.
     ///
     /// # Examples
     ///
     /// ```
     /// use futures::future::*;
     ///
-    /// let future_of_err_1 = err::<u32, u32>(1);
-    /// let future_of_err_4 = future_of_err_1.map_err(|x| x + 3);
+    /// let future = err::<u32, u32>(1);
+    /// let new_future = future.map_err(|x| x + 3);
+    /// assert_eq!(new_future.wait(), Err(4));
+    /// ```
+    ///
+    /// Calling `map_err` on a successful `Future` has no effect:
+    ///
+    /// ```
+    /// use futures::future::*;
+    ///
+    /// let future = ok::<u32, u32>(1);
+    /// let new_future = future.map_err(|x| x + 3);
+    /// assert_eq!(new_future.wait(), Ok(1));
     /// ```
     fn map_err<F, E>(self, f: F) -> MapErr<Self, F>
         where F: FnOnce(Self::Error) -> E,
               Self: Sized,
     {
         assert_future::<Self::Item, E, _>(map_err::new(self, f))
     }
 
@@ -381,20 +426,21 @@ pub trait Future {
     /// combinators like `select` and `join`.
     ///
     /// Note that this function consumes the receiving future and returns a
     /// wrapped version of it.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
     ///
-    /// let future_of_err_1 = err::<u32, u32>(1);
-    /// let future_of_err_4 = future_of_err_1.from_err::<u32>();
+    /// let future_with_err_u8 = future::err::<(), u8>(1);
+    /// let future_with_err_u32 = future_with_err_u8.from_err::<u32>();
     /// ```
     fn from_err<E:From<Self::Error>>(self) -> FromErr<Self, E>
         where Self: Sized,
     {
         assert_future::<Self::Item, E, _>(from_err::new(self))
     }
 
     /// Chain on a computation for when a future finished, passing the result of
@@ -414,28 +460,29 @@ pub trait Future {
     /// run.
     ///
     /// Note that this function consumes the receiving future and returns a
     /// wrapped version of it.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
     ///
-    /// let future_of_1 = ok::<u32, u32>(1);
+    /// let future_of_1 = future::ok::<u32, u32>(1);
     /// let future_of_4 = future_of_1.then(|x| {
     ///     x.map(|y| y + 3)
     /// });
     ///
-    /// let future_of_err_1 = err::<u32, u32>(1);
+    /// let future_of_err_1 = future::err::<u32, u32>(1);
     /// let future_of_4 = future_of_err_1.then(|x| {
     ///     match x {
     ///         Ok(_) => panic!("expected an error"),
-    ///         Err(y) => ok::<u32, u32>(y + 3),
+    ///         Err(y) => future::ok::<u32, u32>(y + 3),
     ///     }
     /// });
     /// ```
     fn then<F, B>(self, f: F) -> Then<Self, B, F>
         where F: FnOnce(result::Result<Self::Item, Self::Error>) -> B,
               B: IntoFuture,
               Self: Sized,
     {
@@ -457,24 +504,25 @@ pub trait Future {
     /// provided closure `f` is never called.
     ///
     /// Note that this function consumes the receiving future and returns a
     /// wrapped version of it.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future::{self, FutureResult};
     ///
-    /// let future_of_1 = ok::<u32, u32>(1);
+    /// let future_of_1 = future::ok::<u32, u32>(1);
     /// let future_of_4 = future_of_1.and_then(|x| {
     ///     Ok(x + 3)
     /// });
     ///
-    /// let future_of_err_1 = err::<u32, u32>(1);
+    /// let future_of_err_1 = future::err::<u32, u32>(1);
     /// future_of_err_1.and_then(|_| -> FutureResult<u32, u32> {
     ///     panic!("should not be called in case of an error");
     /// });
     /// ```
     fn and_then<F, B>(self, f: F) -> AndThen<Self, B, F>
         where F: FnOnce(Self::Item) -> B,
               B: IntoFuture<Error = Self::Error>,
               Self: Sized,
@@ -497,24 +545,25 @@ pub trait Future {
     /// provided closure `f` is never called.
     ///
     /// Note that this function consumes the receiving future and returns a
     /// wrapped version of it.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future::{self, FutureResult};
     ///
-    /// let future_of_err_1 = err::<u32, u32>(1);
+    /// let future_of_err_1 = future::err::<u32, u32>(1);
     /// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> {
     ///     Ok(x + 3)
     /// });
     ///
-    /// let future_of_1 = ok::<u32, u32>(1);
+    /// let future_of_1 = future::ok::<u32, u32>(1);
     /// future_of_1.or_else(|_| -> FutureResult<u32, u32> {
     ///     panic!("should not be called in case of success");
     /// });
     /// ```
     fn or_else<F, B>(self, f: F) -> OrElse<Self, B, F>
         where F: FnOnce(Self::Error) -> B,
               B: IntoFuture<Item = Self::Item>,
               Self: Sized,
@@ -529,30 +578,52 @@ pub trait Future {
     /// both the value resolved and a future representing the completion of the
     /// other work. Both futures must have the same item and error type.
     ///
     /// Note that this function consumes the receiving futures and returns a
     /// wrapped version of them.
     ///
     /// # Examples
     ///
-    /// ```
-    /// use futures::future::*;
+    /// ```no_run
+    /// use futures::prelude::*;
+    /// use futures::future;
+    /// use std::thread;
+    /// use std::time;
     ///
-    /// // A poor-man's join implemented on top of select
+    /// let future1 = future::lazy(|| {
+    ///     thread::sleep(time::Duration::from_secs(5));
+    ///     future::ok::<char, ()>('a')
+    /// });
+    ///
+    /// let future2 = future::lazy(|| {
+    ///     thread::sleep(time::Duration::from_secs(3));
+    ///     future::ok::<char, ()>('b')
+    /// });
     ///
-    /// fn join<A>(a: A, b: A) -> BoxFuture<(u32, u32), u32>
-    ///     where A: Future<Item = u32, Error = u32> + Send + 'static,
+    /// let (value, last_future) = future1.select(future2).wait().ok().unwrap();
+    /// assert_eq!(value, 'a');
+    /// assert_eq!(last_future.wait().unwrap(), 'b');
+    /// ```
+    ///
+    /// A poor-man's `join` implemented on top of `select`:
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// fn join<A>(a: A, b: A) -> Box<Future<Item=(u32, u32), Error=u32>>
+    ///     where A: Future<Item = u32, Error = u32> + 'static,
     /// {
-    ///     a.select(b).then(|res| {
+    ///     Box::new(a.select(b).then(|res| -> Box<Future<Item=_, Error=_>> {
     ///         match res {
-    ///             Ok((a, b)) => b.map(move |b| (a, b)).boxed(),
-    ///             Err((a, _)) => err(a).boxed(),
+    ///             Ok((a, b)) => Box::new(b.map(move |b| (a, b))),
+    ///             Err((a, _)) => Box::new(future::err(a)),
     ///         }
-    ///     }).boxed()
+    ///     }))
     /// }
     /// ```
     fn select<B>(self, other: B) -> Select<Self, B::Future>
         where B: IntoFuture<Item=Self::Item, Error=Self::Error>,
               Self: Sized,
     {
         let f = select::new(self, other.into_future());
         assert_future::<(Self::Item, SelectNext<Self, B::Future>),
@@ -571,33 +642,34 @@ pub trait Future {
     ///
     /// Also note that if both this and the second future have the same
     /// success/error type you can use the `Either::split` method to
     /// conveniently extract out the value at the end.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future::{self, Either};
     ///
     /// // A poor-man's join implemented on top of select2
     ///
-    /// fn join<A, B, E>(a: A, b: B) -> BoxFuture<(A::Item, B::Item), E>
-    ///     where A: Future<Error = E> + Send + 'static,
-    ///           B: Future<Error = E> + Send + 'static,
-    ///           A::Item: Send, B::Item: Send, E: Send + 'static,
+    /// fn join<A, B, E>(a: A, b: B) -> Box<Future<Item=(A::Item, B::Item), Error=E>>
+    ///     where A: Future<Error = E> + 'static,
+    ///           B: Future<Error = E> + 'static,
+    ///           E: 'static,
     /// {
-    ///     a.select2(b).then(|res| {
+    ///     Box::new(a.select2(b).then(|res| -> Box<Future<Item=_, Error=_>> {
     ///         match res {
-    ///             Ok(Either::A((x, b))) => b.map(move |y| (x, y)).boxed(),
-    ///             Ok(Either::B((y, a))) => a.map(move |x| (x, y)).boxed(),
-    ///             Err(Either::A((e, _))) => err(e).boxed(),
-    ///             Err(Either::B((e, _))) => err(e).boxed(),
+    ///             Ok(Either::A((x, b))) => Box::new(b.map(move |y| (x, y))),
+    ///             Ok(Either::B((y, a))) => Box::new(a.map(move |x| (x, y))),
+    ///             Err(Either::A((e, _))) => Box::new(future::err(e)),
+    ///             Err(Either::B((e, _))) => Box::new(future::err(e)),
     ///         }
-    ///     }).boxed()
+    ///     }))
     /// }
     /// ```
     fn select2<B>(self, other: B) -> Select2<Self, B::Future>
         where B: IntoFuture, Self: Sized
     {
         select2::new(self, other.into_future())
     }
 
@@ -612,26 +684,38 @@ pub trait Future {
     /// returned.
     ///
     /// Note that this function consumes the receiving future and returns a
     /// wrapped version of it.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
     ///
-    /// let a = ok::<u32, u32>(1);
-    /// let b = ok::<u32, u32>(2);
+    /// let a = future::ok::<u32, u32>(1);
+    /// let b = future::ok::<u32, u32>(2);
     /// let pair = a.join(b);
     ///
-    /// pair.map(|(a, b)| {
-    ///     assert_eq!(a, 1);
-    ///     assert_eq!(b, 2);
-    /// });
+    /// assert_eq!(pair.wait(), Ok((1, 2)));
+    /// ```
+    ///
+    /// If one or both of the joined `Future`s is errored, the resulting
+    /// `Future` will be errored:
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let a = future::ok::<u32, u32>(1);
+    /// let b = future::err::<u32, u32>(2);
+    /// let pair = a.join(b);
+    ///
+    /// assert_eq!(pair.wait(), Err(2));
     /// ```
     fn join<B>(self, other: B) -> Join<Self, B::Future>
         where B: IntoFuture<Error=Self::Error>,
               Self: Sized,
     {
         let f = join::new(self, other.into_future());
         assert_future::<(Self::Item, B::Item), Self::Error, _>(f)
     }
@@ -672,56 +756,70 @@ pub trait Future {
     /// Convert this future into a single element stream.
     ///
     /// The returned stream contains single success if this future resolves to
     /// success or single error if this future resolves into error.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::{Stream, Async};
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
     ///
-    /// let future = ok::<_, bool>(17);
+    /// let future = future::ok::<_, bool>(17);
     /// let mut stream = future.into_stream();
     /// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
     /// assert_eq!(Ok(Async::Ready(None)), stream.poll());
     ///
-    /// let future = err::<bool, _>(19);
+    /// let future = future::err::<bool, _>(19);
     /// let mut stream = future.into_stream();
     /// assert_eq!(Err(19), stream.poll());
     /// assert_eq!(Ok(Async::Ready(None)), stream.poll());
     /// ```
     fn into_stream(self) -> IntoStream<Self>
         where Self: Sized
     {
         into_stream::new(self)
     }
 
     /// Flatten the execution of this future when the successful result of this
     /// future is itself another future.
     ///
     /// This can be useful when combining futures together to flatten the
-    /// computation out the the final result. This method can only be called
+    /// computation out the final result. This method can only be called
     /// when the successful result of this future itself implements the
     /// `IntoFuture` trait and the error can be created from this future's error
     /// type.
     ///
     /// This method is roughly equivalent to `self.and_then(|x| x)`.
     ///
     /// Note that this function consumes the receiving future and returns a
     /// wrapped version of it.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let nested_future = future::ok::<_, u32>(future::ok::<u32, u32>(1));
+    /// let future = nested_future.flatten();
+    /// assert_eq!(future.wait(), Ok(1));
+    /// ```
     ///
-    /// let future_of_a_future = ok::<_, u32>(ok::<u32, u32>(1));
-    /// let future_of_1 = future_of_a_future.flatten();
+    /// Calling `flatten` on an errored `Future`, or if the inner `Future` is
+    /// errored, will result in an errored `Future`:
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let nested_future = future::ok::<_, u32>(future::err::<u32, u32>(1));
+    /// let future = nested_future.flatten();
+    /// assert_eq!(future.wait(), Err(1));
     /// ```
     fn flatten(self) -> Flatten<Self>
         where Self::Item: IntoFuture,
         <<Self as Future>::Item as IntoFuture>::Error:
             From<<Self as Future>::Error>,
         Self: Sized
     {
         let f = flatten::new(self);
@@ -738,27 +836,28 @@ pub trait Future {
     /// call site.
     ///
     /// Note that this function consumes this future and returns a wrapped
     /// version of it.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::stream::{self, Stream};
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
+    /// use futures::stream;
     ///
-    /// let stream_items = vec![Ok(17), Err(true), Ok(19)];
-    /// let future_of_a_stream = ok::<_, bool>(stream::iter(stream_items));
+    /// let stream_items = vec![17, 18, 19];
+    /// let future_of_a_stream = future::ok::<_, bool>(stream::iter_ok(stream_items));
     ///
     /// let stream = future_of_a_stream.flatten_stream();
     ///
     /// let mut iter = stream.wait();
     /// assert_eq!(Ok(17), iter.next().unwrap());
-    /// assert_eq!(Err(true), iter.next().unwrap());
+    /// assert_eq!(Ok(18), iter.next().unwrap());
     /// assert_eq!(Ok(19), iter.next().unwrap());
     /// assert_eq!(None, iter.next());
     /// ```
     fn flatten_stream(self) -> FlattenStream<Self>
         where <Self as Future>::Item: stream::Stream<Error=Self::Error>,
               Self: Sized
     {
         flatten_stream::new(self)
@@ -778,37 +877,60 @@ pub trait Future {
     /// resolve).  This, unlike the trait's `poll` method, is guaranteed.
     ///
     /// This combinator will drop this future as soon as it's been completed to
     /// ensure resources are reclaimed as soon as possible.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use futures::Async;
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
     ///
-    /// let mut future = ok::<i32, u32>(2);
+    /// let mut future = future::ok::<i32, u32>(2);
     /// assert_eq!(future.poll(), Ok(Async::Ready(2)));
     ///
     /// // Normally, a call such as this would panic:
     /// //future.poll();
     ///
     /// // This, however, is guaranteed to not panic
-    /// let mut future = ok::<i32, u32>(2).fuse();
+    /// let mut future = future::ok::<i32, u32>(2).fuse();
     /// assert_eq!(future.poll(), Ok(Async::Ready(2)));
     /// assert_eq!(future.poll(), Ok(Async::NotReady));
     /// ```
     fn fuse(self) -> Fuse<Self>
         where Self: Sized
     {
         let f = fuse::new(self);
         assert_future::<Self::Item, Self::Error, _>(f)
     }
 
+    /// Do something with the item of a future, passing it on.
+    ///
+    /// When using futures, you'll often chain several of them together.
+    /// While working on such code, you might want to check out what's happening at
+    /// various parts in the pipeline. To do that, insert a call to inspect().
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::future;
+    ///
+    /// let future = future::ok::<u32, u32>(1);
+    /// let new_future = future.inspect(|&x| println!("about to resolve: {}", x));
+    /// assert_eq!(new_future.wait(), Ok(1));
+    /// ```
+    fn inspect<F>(self, f: F) -> Inspect<Self, F>
+        where F: FnOnce(&Self::Item) -> (),
+              Self: Sized,
+    {
+        assert_future::<Self::Item, Self::Error, _>(inspect::new(self, f))
+    }
+
     /// Catches unwinding panics while polling the future.
     ///
     /// In general, panics within a future can propagate all the way out to the
     /// task level. This combinator makes it possible to halt unwinding within
     /// the future itself. It's most commonly used within task executors. It's
     /// not recommended to use this for error handling.
     ///
     /// Note that this method requires the `UnwindSafe` bound from the standard
@@ -818,66 +940,69 @@ pub trait Future {
     /// implemented for `AssertUnwindSafe<F>` where `F` implements `Future`.
     ///
     /// This method is only available when the `use_std` feature of this
     /// library is activated, and it is activated by default.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future::{self, FutureResult};
     ///
-    /// let mut future = ok::<i32, u32>(2);
+    /// let mut future = future::ok::<i32, u32>(2);
     /// assert!(future.catch_unwind().wait().is_ok());
     ///
-    /// let mut future = lazy(|| -> FutureResult<i32, u32> {
+    /// let mut future = future::lazy(|| -> FutureResult<i32, u32> {
     ///     panic!();
-    ///     ok::<i32, u32>(2)
+    ///     future::ok::<i32, u32>(2)
     /// });
     /// assert!(future.catch_unwind().wait().is_err());
     /// ```
     #[cfg(feature = "use_std")]
     fn catch_unwind(self) -> CatchUnwind<Self>
         where Self: Sized + ::std::panic::UnwindSafe
     {
         catch_unwind::new(self)
     }
 
     /// Create a cloneable handle to this future where all handles will resolve
     /// to the same result.
     ///
-    /// The shared() method provides a mean to convert any future into a
+    /// The shared() method provides a method to convert any future into a
     /// cloneable future. It enables a future to be polled by multiple threads.
     ///
     /// The returned `Shared` future resolves successfully with
     /// `SharedItem<Self::Item>` or erroneously with `SharedError<Self::Error>`.
     /// Both `SharedItem` and `SharedError` implements `Deref` to allow shared
     /// access to the underlying result. Ownership of `Self::Item` and
     /// `Self::Error` cannot currently be reclaimed.
     ///
     /// This method is only available when the `use_std` feature of this
     /// library is activated, and it is activated by default.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
     ///
-    /// let future = ok::<_, bool>(6);
+    /// let future = future::ok::<_, bool>(6);
     /// let shared1 = future.shared();
     /// let shared2 = shared1.clone();
     /// assert_eq!(6, *shared1.wait().unwrap());
     /// assert_eq!(6, *shared2.wait().unwrap());
     /// ```
     ///
     /// ```
     /// use std::thread;
-    /// use futures::future::*;
+    /// use futures::prelude::*;
+    /// use futures::future;
     ///
-    /// let future = ok::<_, bool>(6);
+    /// let future = future::ok::<_, bool>(6);
     /// let shared1 = future.shared();
     /// let shared2 = shared1.clone();
     /// let join_handle = thread::spawn(move || {
     ///     assert_eq!(6, *shared2.wait().unwrap());
     /// });
     /// assert_eq!(6, *shared1.wait().unwrap());
     /// join_handle.join().unwrap();
     /// ```
@@ -952,8 +1077,94 @@ pub trait FutureFrom<T>: Sized {
     type Future: Future<Item=Self, Error=Self::Error>;
 
     /// Possible errors during conversion.
     type Error;
 
     /// Consume the given value, beginning the conversion.
     fn future_from(T) -> Self::Future;
 }
+
+/// A trait for types which can spawn fresh futures.
+///
+/// This trait is typically implemented for "executors", or those types which
+/// can execute futures to completion. Futures passed to `Spawn::spawn`
+/// typically get turned into a *task* and are then driven to completion.
+///
+/// On spawn, the executor takes ownership of the future and becomes responsible
+/// to call `Future::poll()` whenever a readiness notification is raised.
+pub trait Executor<F: Future<Item = (), Error = ()>> {
+    /// Spawns a future to run on this `Executor`, typically in the
+    /// "background".
+    ///
+    /// This function will return immediately, and schedule the future `future`
+    /// to run on `self`. The details of scheduling and execution are left to
+    /// the implementations of `Executor`, but this is typically a primary point
+    /// for injecting concurrency in a futures-based system. Futures spawned
+    /// through this `execute` function tend to run concurrently while they're
+    /// waiting on events.
+    ///
+    /// # Errors
+    ///
+    /// Implementers of this trait are allowed to reject accepting this future
+    /// as well. This can happen for various reason such as:
+    ///
+    /// * The executor is shut down
+    /// * The executor has run out of capacity to execute futures
+    ///
+    /// The decision is left to the caller how to work with this form of error.
+    /// The error returned transfers ownership of the future back to the caller.
+    fn execute(&self, future: F) -> Result<(), ExecuteError<F>>;
+}
+
+/// Errors returned from the `Spawn::spawn` function.
+pub struct ExecuteError<F> {
+    future: F,
+    kind: ExecuteErrorKind,
+}
+
+/// Kinds of errors that can be returned from the `Execute::spawn` function.
+///
+/// Executors which may not always be able to accept a future may return one of
+/// these errors, indicating why it was unable to spawn a future.
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub enum ExecuteErrorKind {
+    /// This executor has shut down and will no longer accept new futures to
+    /// spawn.
+    Shutdown,
+
+    /// This executor has no more capacity to run more futures. Other futures
+    /// need to finish before this executor can accept another.
+    NoCapacity,
+
+    #[doc(hidden)]
+    __Nonexhaustive,
+}
+
+impl<F> ExecuteError<F> {
+    /// Create a new `ExecuteError`
+    pub fn new(kind: ExecuteErrorKind, future: F) -> ExecuteError<F> {
+        ExecuteError {
+            future: future,
+            kind: kind,
+        }
+    }
+
+    /// Returns the associated reason for the error
+    pub fn kind(&self) -> ExecuteErrorKind {
+        self.kind
+    }
+
+    /// Consumes self and returns the original future that was spawned.
+    pub fn into_future(self) -> F {
+        self.future
+    }
+}
+
+impl<F> fmt::Debug for ExecuteError<F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.kind {
+            ExecuteErrorKind::Shutdown => "executor has shut down".fmt(f),
+            ExecuteErrorKind::NoCapacity => "executor has no more capacity".fmt(f),
+            ExecuteErrorKind::__Nonexhaustive => panic!(),
+        }
+    }
+}
--- a/third_party/rust/futures/src/future/result.rs
+++ b/third_party/rust/futures/src/future/result.rs
@@ -2,26 +2,26 @@
 
 use core::result;
 
 use {Future, Poll, Async};
 
 /// A future representing a value that is immediately ready.
 ///
 /// Created by the `result` function.
-#[derive(Debug)]
+#[derive(Debug, Clone)]
 #[must_use = "futures do nothing unless polled"]
 // TODO: rename this to `Result` on the next major version
 pub struct FutureResult<T, E> {
     inner: Option<result::Result<T, E>>,
 }
 
 /// Creates a new "leaf future" which will resolve with the given result.
 ///
-/// The returned future represents a computation which is finshed immediately.
+/// The returned future represents a computation which is finished immediately.
 /// This can be useful with the `finished` and `failed` base future types to
 /// convert an immediate value to a future to interoperate elsewhere.
 ///
 /// # Examples
 ///
 /// ```
 /// use futures::future::*;
 ///
@@ -68,8 +68,14 @@ pub fn err<T, E>(e: E) -> FutureResult<T
 impl<T, E> Future for FutureResult<T, E> {
     type Item = T;
     type Error = E;
 
     fn poll(&mut self) -> Poll<T, E> {
         self.inner.take().expect("cannot poll Result twice").map(Async::Ready)
     }
 }
+
+impl<T, E> From<Result<T, E>> for FutureResult<T, E> {
+    fn from(r: Result<T, E>) -> Self {
+        result(r)
+    }
+}
--- a/third_party/rust/futures/src/future/select2.rs
+++ b/third_party/rust/futures/src/future/select2.rs
@@ -1,15 +1,17 @@
 use {Future, Poll, Async};
 use future::Either;
 
-/// Future for the `merge` combinator, waiting for one of two differently-typed
+/// Future for the `select2` combinator, waiting for one of two differently-typed
 /// futures to complete.
 ///
-/// This is created by the `Future::merge` method.
+/// This is created by the [`Future::select2`] method.
+///
+/// [`Future::select2`]: trait.Future.html#method.select2
 #[must_use = "futures do nothing unless polled"]
 #[derive(Debug)]
 pub struct Select2<A, B> {
     inner: Option<(A, B)>,
 }
 
 pub fn new<A, B>(a: A, b: B) -> Select2<A, B> {
     Select2 { inner: Some((a, b)) }
@@ -18,20 +20,20 @@ pub fn new<A, B>(a: A, b: B) -> Select2<
 impl<A, B> Future for Select2<A, B> where A: Future, B: Future {
     type Item = Either<(A::Item, B), (B::Item, A)>;
     type Error = Either<(A::Error, B), (B::Error, A)>;
 
     fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
         let (mut a, mut b) = self.inner.take().expect("cannot poll Select2 twice");
         match a.poll() {
             Err(e) => Err(Either::A((e, b))),
-            Ok(Async::Ready(x)) => Ok(Async::Ready((Either::A((x, b))))),
+            Ok(Async::Ready(x)) => Ok(Async::Ready(Either::A((x, b)))),
             Ok(Async::NotReady) => match b.poll() {
                 Err(e) => Err(Either::B((e, a))),
-                Ok(Async::Ready(x)) => Ok(Async::Ready((Either::B((x, a))))),
+                Ok(Async::Ready(x)) => Ok(Async::Ready(Either::B((x, a)))),
                 Ok(Async::NotReady) => {
                     self.inner = Some((a, b));
                     Ok(Async::NotReady)
                 }
             }
         }
     }
 }
--- a/third_party/rust/futures/src/future/select_all.rs
+++ b/third_party/rust/futures/src/future/select_all.rs
@@ -1,9 +1,9 @@
-//! Definition of the SelectAll, finding the first future in a list that
+//! Definition of the `SelectAll`, finding the first future in a list that
 //! finishes.
 
 use std::mem;
 use std::prelude::v1::*;
 
 use {Future, IntoFuture, Poll, Async};
 
 /// Future for the `select_all` combinator, waiting for one of any of a list of
--- a/third_party/rust/futures/src/future/select_ok.rs
+++ b/third_party/rust/futures/src/future/select_ok.rs
@@ -2,17 +2,17 @@
 //! in a list.
 
 use std::mem;
 use std::prelude::v1::*;
 
 use {Future, IntoFuture, Poll, Async};
 
 /// Future for the `select_ok` combinator, waiting for one of any of a list of
-/// futures to succesfully complete. unlike `select_all`, this future ignores all
+/// futures to successfully complete. Unlike `select_all`, this future ignores all
 /// but the last error, if there are any.
 ///
 /// This is created by the `select_ok` function.
 #[derive(Debug)]
 #[must_use = "futures do nothing unless polled"]
 pub struct SelectOk<A> where A: Future {
     inner: Vec<A>,
 }
--- a/third_party/rust/futures/src/future/shared.rs
+++ b/third_party/rust/futures/src/future/shared.rs
@@ -9,28 +9,28 @@
 //! let future = ok::<_, bool>(6);
 //! let shared1 = future.shared();
 //! let shared2 = shared1.clone();
 //! assert_eq!(6, *shared1.wait().unwrap());
 //! assert_eq!(6, *shared2.wait().unwrap());
 //! ```
 
 use {Future, Poll, Async};
-use executor::{self, Spawn, Unpark};
 use task::{self, Task};
+use executor::{self, Notify, Spawn};
 
-use std::{fmt, mem, ops};
+use std::{error, fmt, mem, ops};
 use std::cell::UnsafeCell;
 use std::sync::{Arc, Mutex};
 use std::sync::atomic::AtomicUsize;
 use std::sync::atomic::Ordering::SeqCst;
 use std::collections::HashMap;
 
 /// A future that is cloneable and can be polled in multiple threads.
-/// Use Future::shared() method to convert any future into a `Shared` future.
+/// Use `Future::shared()` method to convert any future into a `Shared` future.
 #[must_use = "futures do nothing unless polled"]
 pub struct Shared<F: Future> {
     inner: Arc<Inner<F>>,
     waiter: usize,
 }
 
 impl<F> fmt::Debug for Shared<F>
     where F: Future + fmt::Debug,
@@ -44,35 +44,35 @@ impl<F> fmt::Debug for Shared<F>
             .finish()
     }
 }
 
 struct Inner<F: Future> {
     next_clone_id: AtomicUsize,
     future: UnsafeCell<Option<Spawn<F>>>,
     result: UnsafeCell<Option<Result<SharedItem<F::Item>, SharedError<F::Error>>>>,
-    unparker: Arc<Unparker>,
+    notifier: Arc<Notifier>,
 }
 
-struct Unparker {
+struct Notifier {
     state: AtomicUsize,
     waiters: Mutex<HashMap<usize, Task>>,
 }
 
 const IDLE: usize = 0;
 const POLLING: usize = 1;
 const REPOLL: usize = 2;
 const COMPLETE: usize = 3;
 const POISONED: usize = 4;
 
 pub fn new<F: Future>(future: F) -> Shared<F> {
     Shared {
         inner: Arc::new(Inner {
             next_clone_id: AtomicUsize::new(1),
-            unparker: Arc::new(Unparker {
+            notifier: Arc::new(Notifier {
                 state: AtomicUsize::new(IDLE),
                 waiters: Mutex::new(HashMap::new()),
             }),
             future: UnsafeCell::new(Some(executor::spawn(future))),
             result: UnsafeCell::new(None),
         }),
         waiter: 0,
     }
@@ -86,55 +86,55 @@ impl<F> Shared<F> where F: Future {
     pub fn new(future: F) -> Self {
         new(future)
     }
 
     /// If any clone of this `Shared` has completed execution, returns its result immediately
     /// without blocking. Otherwise, returns None without triggering the work represented by
     /// this `Shared`.
     pub fn peek(&self) -> Option<Result<SharedItem<F::Item>, SharedError<F::Error>>> {
-        match self.inner.unparker.state.load(SeqCst) {
+        match self.inner.notifier.state.load(SeqCst) {
             COMPLETE => {
                 Some(unsafe { self.clone_result() })
             }
             POISONED => panic!("inner future panicked during poll"),
             _ => None,
         }
     }
 
     fn set_waiter(&mut self) {
-        let mut waiters = self.inner.unparker.waiters.lock().unwrap();
-        waiters.insert(self.waiter, task::park());
+        let mut waiters = self.inner.notifier.waiters.lock().unwrap();
+        waiters.insert(self.waiter, task::current());
     }
 
     unsafe fn clone_result(&self) -> Result<SharedItem<F::Item>, SharedError<F::Error>> {
         match *self.inner.result.get() {
             Some(Ok(ref item)) => Ok(SharedItem { item: item.item.clone() }),
             Some(Err(ref e)) => Err(SharedError { error: e.error.clone() }),
             _ => unreachable!(),
         }
     }
 
     fn complete(&self) {
         unsafe { *self.inner.future.get() = None };
-        self.inner.unparker.state.store(COMPLETE, SeqCst);
-        self.inner.unparker.unpark();
+        self.inner.notifier.state.store(COMPLETE, SeqCst);
+        self.inner.notifier.notify(0);
     }
 }
 
 impl<F> Future for Shared<F>
     where F: Future
 {
     type Item = SharedItem<F::Item>;
     type Error = SharedError<F::Error>;
 
     fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
         self.set_waiter();
 
-        match self.inner.unparker.state.compare_and_swap(IDLE, POLLING, SeqCst) {
+        match self.inner.notifier.state.compare_and_swap(IDLE, POLLING, SeqCst) {
             IDLE => {
                 // Lock acquired, fall through
             }
             POLLING | REPOLL => {
                 // Another task is currently polling, at this point we just want
                 // to ensure that our task handle is currently registered
 
                 return Ok(Async::NotReady);
@@ -154,33 +154,34 @@ impl<F> Future for Shared<F>
                     use std::thread;
 
                     if thread::panicking() {
                         self.0.store(POISONED, SeqCst);
                     }
                 }
             }
 
-            let _reset = Reset(&self.inner.unparker.state);
-
-            // Get a handle to the unparker
-            let unpark: Arc<Unpark> = self.inner.unparker.clone();
+            let _reset = Reset(&self.inner.notifier.state);
 
             // Poll the future
-            match unsafe { (*self.inner.future.get()).as_mut().unwrap().poll_future(unpark) } {
+            let res = unsafe {
+                (*self.inner.future.get()).as_mut().unwrap()
+                    .poll_future_notify(&self.inner.notifier, 0)
+            };
+            match res {
                 Ok(Async::NotReady) => {
                     // Not ready, try to release the handle
-                    match self.inner.unparker.state.compare_and_swap(POLLING, IDLE, SeqCst) {
+                    match self.inner.notifier.state.compare_and_swap(POLLING, IDLE, SeqCst) {
                         POLLING => {
                             // Success
                             return Ok(Async::NotReady);
                         }
                         REPOLL => {
                             // Gotta poll again!
-                            let prev = self.inner.unparker.state.swap(POLLING, SeqCst);
+                            let prev = self.inner.notifier.state.swap(POLLING, SeqCst);
                             assert_eq!(prev, REPOLL);
                         }
                         _ => unreachable!(),
                     }
 
                 }
                 Ok(Async::Ready(i)) => {
                     unsafe {
@@ -212,29 +213,29 @@ impl<F> Clone for Shared<F> where F: Fut
             inner: self.inner.clone(),
             waiter: next_clone_id,
         }
     }
 }
 
 impl<F> Drop for Shared<F> where F: Future {
     fn drop(&mut self) {
-        let mut waiters = self.inner.unparker.waiters.lock().unwrap();
+        let mut waiters = self.inner.notifier.waiters.lock().unwrap();
         waiters.remove(&self.waiter);
     }
 }
 
-impl Unpark for Unparker {
-    fn unpark(&self) {
+impl Notify for Notifier {
+    fn notify(&self, _id: usize) {
         self.state.compare_and_swap(POLLING, REPOLL, SeqCst);
 
         let waiters = mem::replace(&mut *self.waiters.lock().unwrap(), HashMap::new());
 
         for (_, waiter) in waiters {
-            waiter.unpark();
+            waiter.notify();
         }
     }
 }
 
 unsafe impl<F: Future> Sync for Inner<F> {}
 unsafe impl<F: Future> Send for Inner<F> {}
 
 impl<F> fmt::Debug for Inner<F>
@@ -243,37 +244,57 @@ impl<F> fmt::Debug for Inner<F>
           F::Error: fmt::Debug,
 {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         fmt.debug_struct("Inner")
             .finish()
     }
 }
 
-/// A wrapped item of the original future that is clonable and implements Deref
+/// A wrapped item of the original future that is cloneable and implements Deref
 /// for ease of use.
-#[derive(Debug)]
+#[derive(Clone, Debug)]
 pub struct SharedItem<T> {
     item: Arc<T>,
 }
 
 impl<T> ops::Deref for SharedItem<T> {
     type Target = T;
 
     fn deref(&self) -> &T {
         &self.item.as_ref()
     }
 }
 
-/// A wrapped error of the original future that is clonable and implements Deref
+/// A wrapped error of the original future that is cloneable and implements Deref
 /// for ease of use.
-#[derive(Debug)]
+#[derive(Clone, Debug)]
 pub struct SharedError<E> {
     error: Arc<E>,
 }
 
 impl<E> ops::Deref for SharedError<E> {
     type Target = E;
 
     fn deref(&self) -> &E {
         &self.error.as_ref()
     }
 }
+
+impl<E> fmt::Display for SharedError<E>
+    where E: fmt::Display,
+{
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.error.fmt(f)
+    }
+}
+
+impl<E> error::Error for SharedError<E>
+    where E: error::Error,
+{
+    fn description(&self) -> &str {
+        self.error.description()
+    }
+
+    fn cause(&self) -> Option<&error::Error> {
+        self.error.cause()
+    }
+}
--- a/third_party/rust/futures/src/lib.rs
+++ b/third_party/rust/futures/src/lib.rs
@@ -32,17 +32,18 @@
 //!
 //! Let's take a look at a few examples of how futures might be used:
 //!
 //! ```
 //! extern crate futures;
 //!
 //! use std::io;
 //! use std::time::Duration;
-//! use futures::future::{Future, Map};
+//! use futures::prelude::*;
+//! use futures::future::Map;
 //!
 //! // A future is actually a trait implementation, so we can generically take a
 //! // future of any integer and return back a future that will resolve to that
 //! // value plus 10 more.
 //! //
 //! // Note here that like iterators, we're returning the `Map` combinator in
 //! // the futures crate, not a boxed abstraction. This is a zero-cost
 //! // construction of a future.
@@ -191,26 +192,31 @@ pub use future::{done, empty, failed, fi
 #[cfg(feature = "with-deprecated")]
 #[deprecated(since = "0.1.4", note = "import through the future module instead")]
 pub use future::{
     Done, Empty, Failed, Finished, Lazy, AndThen, Flatten, FlattenStream, Fuse, IntoStream,
     Join, Join3, Join4, Join5, Map, MapErr, OrElse, Select,
     SelectNext, Then
 };
 
-if_std! {
-    mod lock;
-    mod task_impl;
-    mod stack;
+#[cfg(feature = "use_std")]
+mod lock;
+mod task_impl;
+
+mod resultstream;
 
-    pub mod task;
-    pub mod executor;
-    pub mod sync;
-    pub mod unsync;
+pub mod task;
+pub mod executor;
+#[cfg(feature = "use_std")]
+pub mod sync;
+#[cfg(feature = "use_std")]
+pub mod unsync;
 
+
+if_std! {
     #[doc(hidden)]
     #[deprecated(since = "0.1.4", note = "use sync::oneshot::channel instead")]
     #[cfg(feature = "with-deprecated")]
     pub use sync::oneshot::channel as oneshot;
 
     #[doc(hidden)]
     #[deprecated(since = "0.1.4", note = "use sync::oneshot::Receiver instead")]
     #[cfg(feature = "with-deprecated")]
@@ -224,15 +230,36 @@ if_std! {
     #[doc(hidden)]
     #[deprecated(since = "0.1.4", note = "use sync::oneshot::Canceled instead")]
     #[cfg(feature = "with-deprecated")]
     pub use sync::oneshot::Canceled;
 
     #[doc(hidden)]
     #[deprecated(since = "0.1.4", note = "import through the future module instead")]
     #[cfg(feature = "with-deprecated")]
+    #[allow(deprecated)]
     pub use future::{BoxFuture, collect, select_all, select_ok};
 
     #[doc(hidden)]
     #[deprecated(since = "0.1.4", note = "import through the future module instead")]
     #[cfg(feature = "with-deprecated")]
     pub use future::{SelectAll, SelectAllNext, Collect, SelectOk};
 }
+
+/// A "prelude" for crates using the `futures` crate.
+///
+/// This prelude is similar to the standard library's prelude in that you'll
+/// almost always want to import its entire contents, but unlike the standard
+/// library's prelude you'll have to do so manually. An example of using this is:
+///
+/// ```
+/// use futures::prelude::*;
+/// ```
+///
+/// We may add items to this over time as they become ubiquitous as well, but
+/// otherwise this should help cut down on futures-related imports when you're
+/// working with the `futures` crate!
+pub mod prelude {
+    #[doc(no_inline)]
+    pub use {Future, Stream, Sink, Async, AsyncSink, Poll, StartSend};
+    #[doc(no_inline)]
+    pub use IntoFuture;
+}
--- a/third_party/rust/futures/src/poll.rs
+++ b/third_party/rust/futures/src/poll.rs
@@ -25,17 +25,17 @@ pub enum Async<T> {
     /// Represents that a value is immediately ready.
     Ready(T),
 
     /// Represents that a value is not ready yet, but may be so later.
     NotReady,
 }
 
 impl<T> Async<T> {
-    /// Change the success type of this `Async` value with the closure provided
+    /// Change the success value of this `Async` with the closure provided
     pub fn map<F, U>(self, f: F) -> Async<U>
         where F: FnOnce(T) -> U
     {
         match self {
             Async::Ready(t) => Async::Ready(f(t)),
             Async::NotReady => Async::NotReady,
         }
     }
@@ -70,16 +70,26 @@ pub enum AsyncSink<T> {
 
     /// The `start_send` attempt failed due to the sink being full. The value
     /// being sent is returned, and the current `Task` will be automatically
     /// notified again once the sink has room.
     NotReady(T),
 }
 
 impl<T> AsyncSink<T> {
+    /// Change the NotReady value of this `AsyncSink` with the closure provided
+    pub fn map<F, U>(self, f: F) -> AsyncSink<U>
+        where F: FnOnce(T) -> U,
+    {
+        match self {
+            AsyncSink::Ready => AsyncSink::Ready,
+            AsyncSink::NotReady(t) => AsyncSink::NotReady(f(t)),
+        }
+    }
+
     /// Returns whether this is `AsyncSink::Ready`
     pub fn is_ready(&self) -> bool {
         match *self {
             AsyncSink::Ready => true,
             AsyncSink::NotReady(_) => false,
         }
     }
 
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/resultstream.rs
@@ -0,0 +1,46 @@
+// This should really be in the stream module,
+// but `pub(crate)` isn't available until Rust 1.18,
+// and pre-1.18 there isn't a really good way to have a sub-module
+// available to the crate, but not without it.
+use core::marker::PhantomData;
+
+use {Poll, Async};
+use stream::Stream;
+
+
+/// A stream combinator used to convert a `Stream<Item=T,Error=E>`
+/// to a `Stream<Item=Result<T,E>>`.
+///
+/// A poll on this stream will never return an `Err`. As such the
+/// actual error type is parameterized, so it can match whatever error
+/// type is needed.
+///
+/// This structure is produced by the `Stream::results` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Results<S: Stream, E> {
+    inner: S,
+    phantom: PhantomData<E>
+}
+
+pub fn new<S, E>(s: S) -> Results<S, E> where S: Stream {
+    Results {
+        inner: s,
+        phantom: PhantomData
+    }
+}
+
+impl<S: Stream, E> Stream for Results<S, E> {
+    type Item = Result<S::Item, S::Error>;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<Result<S::Item, S::Error>>, E> {
+        match self.inner.poll() {
+            Ok(Async::Ready(Some(item))) => Ok(Async::Ready(Some(Ok(item)))),
+            Err(e) => Ok(Async::Ready(Some(Err(e)))),
+            Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+            Ok(Async::NotReady) => Ok(Async::NotReady)
+        }
+    }
+}
+
--- a/third_party/rust/futures/src/sink/buffer.rs
+++ b/third_party/rust/futures/src/sink/buffer.rs
@@ -31,23 +31,31 @@ impl<S: Sink> Buffer<S> {
         &self.sink
     }
 
     /// Get a mutable reference to the inner sink.
     pub fn get_mut(&mut self) -> &mut S {
         &mut self.sink
     }
 
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+
     fn try_empty_buffer(&mut self) -> Poll<(), S::SinkError> {
         while let Some(item) = self.buf.pop_front() {
-            if let AsyncSink::NotReady(item) = try!(self.sink.start_send(item)) {
+            if let AsyncSink::NotReady(item) = self.sink.start_send(item)? {
                 self.buf.push_front(item);
 
                 // ensure that we attempt to complete any pushes we've started
-                try!(self.sink.poll_complete());
+                self.sink.poll_complete()?;
 
                 return Ok(Async::NotReady);
             }
         }
 
         Ok(Async::Ready(()))
     }
 }
@@ -62,30 +70,42 @@ impl<S> Stream for Buffer<S> where S: Si
     }
 }
 
 impl<S: Sink> Sink for Buffer<S> {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
-        try!(self.try_empty_buffer());
-        if self.buf.len() > self.cap {
+        if self.cap == 0 {
+            return self.sink.start_send(item);
+        }
+
+        self.try_empty_buffer()?;
+        if self.buf.len() == self.cap {
             return Ok(AsyncSink::NotReady(item));
         }
         self.buf.push_back(item);
         Ok(AsyncSink::Ready)
     }
 
     fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        if self.cap == 0 {
+            return self.sink.poll_complete();
+        }
+
         try_ready!(self.try_empty_buffer());
         debug_assert!(self.buf.is_empty());
         self.sink.poll_complete()
     }
 
     fn close(&mut self) -> Poll<(), Self::SinkError> {
+        if self.cap == 0 {
+            return self.sink.close();
+        }
+
         if self.buf.len() > 0 {
             try_ready!(self.try_empty_buffer());
         }
         assert_eq!(self.buf.len(), 0);
         self.sink.close()
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/sink/fanout.rs
@@ -0,0 +1,135 @@
+use core::fmt::{Debug, Formatter, Result as FmtResult};
+use core::mem::replace;
+
+use {Async, AsyncSink, Poll, Sink, StartSend};
+
+/// Sink that clones incoming items and forwards them to two sinks at the same time.
+///
+/// Backpressure from any downstream sink propagates up, which means that this sink
+/// can only process items as fast as its _slowest_ downstream sink.
+pub struct Fanout<A: Sink, B: Sink> {
+    left: Downstream<A>,
+    right: Downstream<B>
+}
+
+impl<A: Sink, B: Sink> Fanout<A, B> {
+    /// Consumes this combinator, returning the underlying sinks.
+    ///
+    /// Note that this may discard intermediate state of this combinator,
+    /// so care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> (A, B) {
+        (self.left.sink, self.right.sink)
+    }
+}
+
+impl<A: Sink + Debug, B: Sink + Debug> Debug for Fanout<A, B>
+    where A::SinkItem: Debug,
+          B::SinkItem: Debug
+{
+    fn fmt(&self, f: &mut Formatter) -> FmtResult {
+        f.debug_struct("Fanout")
+            .field("left", &self.left)
+            .field("right", &self.right)
+            .finish()
+    }
+}
+
+pub fn new<A: Sink, B: Sink>(left: A, right: B) -> Fanout<A, B> {
+    Fanout {
+        left: Downstream::new(left),
+        right: Downstream::new(right)
+    }
+}
+
+impl<A, B> Sink for Fanout<A, B>
+    where A: Sink,
+          A::SinkItem: Clone,
+          B: Sink<SinkItem=A::SinkItem, SinkError=A::SinkError>
+{
+    type SinkItem = A::SinkItem;
+    type SinkError = A::SinkError;
+
+    fn start_send(
+        &mut self, 
+        item: Self::SinkItem
+    ) -> StartSend<Self::SinkItem, Self::SinkError> {
+        // Attempt to complete processing any outstanding requests.
+        self.left.keep_flushing()?;
+        self.right.keep_flushing()?;
+        // Only if both downstream sinks are ready, start sending the next item.
+        if self.left.is_ready() && self.right.is_ready() {
+            self.left.state = self.left.sink.start_send(item.clone())?;
+            self.right.state = self.right.sink.start_send(item)?;
+            Ok(AsyncSink::Ready)
+        } else {
+            Ok(AsyncSink::NotReady(item))
+        }
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        let left_async = self.left.poll_complete()?;
+        let right_async = self.right.poll_complete()?;
+        // Only if both downstream sinks are ready, signal readiness.
+        if left_async.is_ready() && right_async.is_ready() {
+            Ok(Async::Ready(()))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        let left_async = self.left.close()?;
+        let right_async = self.right.close()?;
+        // Only if both downstream sinks are ready, signal readiness.
+        if left_async.is_ready() && right_async.is_ready() {
+            Ok(Async::Ready(()))
+        } else {
+            Ok(Async::NotReady)
+        } 
+    }
+}
+
+#[derive(Debug)]
+struct Downstream<S: Sink> {
+    sink: S,
+    state: AsyncSink<S::SinkItem>
+}
+
+impl<S: Sink> Downstream<S> {
+    fn new(sink: S) -> Self {
+        Downstream { sink: sink, state: AsyncSink::Ready }
+    }
+
+    fn is_ready(&self) -> bool {
+        self.state.is_ready()
+    }
+
+    fn keep_flushing(&mut self) -> Result<(), S::SinkError> {
+        if let AsyncSink::NotReady(item) = replace(&mut self.state, AsyncSink::Ready) {
+            self.state = self.sink.start_send(item)?;
+        }
+        Ok(())
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.keep_flushing()?;
+        let async = self.sink.poll_complete()?;
+        // Only if all values have been sent _and_ the underlying
+        // sink is completely flushed, signal readiness.
+        if self.state.is_ready() && async.is_ready() {
+            Ok(Async::Ready(()))
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.keep_flushing()?;
+        // If all items have been flushed, initiate close.
+        if self.state.is_ready() {
+            self.sink.close()
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+}
--- a/third_party/rust/futures/src/sink/flush.rs
+++ b/third_party/rust/futures/src/sink/flush.rs
@@ -18,24 +18,29 @@ impl<S: Sink> Flush<S> {
     pub fn get_ref(&self) -> &S {
         self.sink.as_ref().expect("Attempted `Flush::get_ref` after the flush completed")
     }
 
     /// Get a mutable reference to the inner sink.
     pub fn get_mut(&mut self) -> &mut S {
         self.sink.as_mut().expect("Attempted `Flush::get_mut` after the flush completed")
     }
+
+    /// Consume the `Flush` and return the inner sink.
+    pub fn into_inner(self) -> S {
+        self.sink.expect("Attempted `Flush::into_inner` after the flush completed")
+    }
 }
 
 impl<S: Sink> Future for Flush<S> {
     type Item = S;
     type Error = S::SinkError;
 
     fn poll(&mut self) -> Poll<S, S::SinkError> {
         let mut sink = self.sink.take().expect("Attempted to poll Flush after it completed");
-        if try!(sink.poll_complete()).is_ready() {
+        if sink.poll_complete()?.is_ready() {
             Ok(Async::Ready(sink))
         } else {
             self.sink = Some(sink);
             Ok(Async::NotReady)
         }
     }
 }
--- a/third_party/rust/futures/src/sink/from_err.rs
+++ b/third_party/rust/futures/src/sink/from_err.rs
@@ -2,30 +2,50 @@ use core::marker::PhantomData;
 
 use {Sink, Poll, StartSend};
 
 /// A sink combinator to change the error type of a sink.
 ///
 /// This is created by the `Sink::from_err` method.
 #[derive(Debug)]
 #[must_use = "futures do nothing unless polled"]
-pub struct SinkFromErr<S, E> where S: Sink {
+pub struct SinkFromErr<S, E> {
     sink: S,
     f: PhantomData<E>
 }
 
 pub fn new<S, E>(sink: S) -> SinkFromErr<S, E>
     where S: Sink
 {
     SinkFromErr {
         sink: sink,
         f: PhantomData
     }
 }
 
+impl<S, E> SinkFromErr<S, E> {
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        &self.sink
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.sink
+    }
+
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+}
+
 impl<S, E> Sink for SinkFromErr<S, E>
     where S: Sink,
           E: From<S::SinkError>
 {
     type SinkItem = S::SinkItem;
     type SinkError = E;
 
     fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
@@ -36,16 +56,16 @@ impl<S, E> Sink for SinkFromErr<S, E>
         self.sink.poll_complete().map_err(|e| e.into())
     }
 
     fn close(&mut self) -> Poll<(), Self::SinkError> {
         self.sink.close().map_err(|e| e.into())
     }
 }
 
-impl<S: ::stream::Stream, E> ::stream::Stream for SinkFromErr<S, E> where S: Sink {
+impl<S: ::stream::Stream, E> ::stream::Stream for SinkFromErr<S, E> {
     type Item = S::Item;
     type Error = S::Error;
 
     fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
         self.sink.poll()
     }
 }
--- a/third_party/rust/futures/src/sink/map_err.rs
+++ b/third_party/rust/futures/src/sink/map_err.rs
@@ -1,24 +1,44 @@
 use sink::Sink;
 
-use {Poll, StartSend};
+use {Poll, StartSend, Stream};
 
 /// Sink for the `Sink::sink_map_err` combinator.
 #[derive(Debug)]
 #[must_use = "sinks do nothing unless polled"]
 pub struct SinkMapErr<S, F> {
     sink: S,
     f: Option<F>,
 }
 
 pub fn new<S, F>(s: S, f: F) -> SinkMapErr<S, F> {
     SinkMapErr { sink: s, f: Some(f) }
 }
 
+impl<S, E> SinkMapErr<S, E> {
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        &self.sink
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.sink
+    }
+
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+}
+
 impl<S, F, E> Sink for SinkMapErr<S, F>
     where S: Sink,
           F: FnOnce(S::SinkError) -> E,
 {
     type SinkItem = S::SinkItem;
     type SinkError = E;
 
     fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
@@ -28,8 +48,17 @@ impl<S, F, E> Sink for SinkMapErr<S, F>
     fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
         self.sink.poll_complete().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
     }
 
     fn close(&mut self) -> Poll<(), Self::SinkError> {
         self.sink.close().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
     }
 }
+
+impl<S: Stream, F> Stream for SinkMapErr<S, F> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.sink.poll()
+    }
+}
--- a/third_party/rust/futures/src/sink/mod.rs
+++ b/third_party/rust/futures/src/sink/mod.rs
@@ -1,30 +1,32 @@
 //! Asynchronous sinks
 //!
 //! This module contains the `Sink` trait, along with a number of adapter types
-//! for it. An overview is available in the documentaiton for the trait itself.
+//! for it. An overview is available in the documentation for the trait itself.
 //!
 //! You can find more information/tutorials about streams [online at
 //! https://tokio.rs][online]
 //!
 //! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
 
 use {IntoFuture, Poll, StartSend};
 use stream::Stream;
 
 mod with;
+mod with_flat_map;
 // mod with_map;
 // mod with_filter;
 // mod with_filter_map;
 mod flush;
 mod from_err;
 mod send;
 mod send_all;
 mod map_err;
+mod fanout;
 
 if_std! {
     mod buffer;
     mod wait;
 
     pub use self::buffer::Buffer;
     pub use self::wait::Wait;
 
@@ -44,17 +46,17 @@ if_std! {
             Ok(::Async::Ready(()))
         }
 
         fn close(&mut self) -> Poll<(), Self::SinkError> {
             Ok(::Async::Ready(()))
         }
     }
 
-    /// A type alias for `Box<Stream + Send>`
+    /// A type alias for `Box<Sink + Send>`
     pub type BoxSink<T, E> = ::std::boxed::Box<Sink<SinkItem = T, SinkError = E> +
                                                ::core::marker::Send>;
 
     impl<S: ?Sized + Sink> Sink for ::std::boxed::Box<S> {
         type SinkItem = S::SinkItem;
         type SinkError = S::SinkError;
 
         fn start_send(&mut self, item: Self::SinkItem)
@@ -68,21 +70,23 @@ if_std! {
 
         fn close(&mut self) -> Poll<(), Self::SinkError> {
             (**self).close()
         }
     }
 }
 
 pub use self::with::With;
+pub use self::with_flat_map::WithFlatMap;
 pub use self::flush::Flush;
 pub use self::send::Send;
 pub use self::send_all::SendAll;
 pub use self::map_err::SinkMapErr;
 pub use self::from_err::SinkFromErr;
+pub use self::fanout::Fanout;
 
 /// A `Sink` is a value into which other values can be sent, asynchronously.
 ///
 /// Basic examples of sinks include the sending side of:
 ///
 /// - Channels
 /// - Sockets
 /// - Pipes
@@ -232,17 +236,17 @@ pub trait Sink {
     /// # Return value
     ///
     /// This function, like `poll_complete`, returns a `Poll`. The value is
     /// `Ready` once the close operation has completed. At that point it should
     /// be safe to drop the sink and deallocate associated resources.
     ///
     /// If the value returned is `NotReady` then the sink is not yet closed and
     /// work needs to be done to close it. The work has been scheduled and the
-    /// current task will recieve a notification when it's next ready to call
+    /// current task will receive a notification when it's next ready to call
     /// this method again.
     ///
     /// Finally, this function may also return an error.
     ///
     /// # Errors
     ///
     /// This function will return an `Err` if any operation along the way during
     /// the close operation fails. An error typically is fatal for a sink and is
@@ -310,16 +314,54 @@ pub trait Sink {
         where F: FnMut(U) -> Fut,
               Fut: IntoFuture<Item = Self::SinkItem>,
               Fut::Error: From<Self::SinkError>,
               Self: Sized
     {
         with::new(self, f)
     }
 
+    /// Composes a function *in front of* the sink.
+    ///
+    /// This adapter produces a new sink that passes each value through the
+    /// given function `f` before sending it to `self`.
+    ///
+    /// To process each value, `f` produces a *stream*, of which each value
+    /// is passed to the underlying sink. A new value will not be accepted until
+    /// the stream has been drained
+    ///
+    /// Note that this function consumes the given sink, returning a wrapped
+    /// version, much like `Iterator::flat_map`.
+    ///
+    /// # Examples
+    /// ---
+    /// Using this function with an iterator through use of the `stream::iter_ok()`
+    /// function
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::stream;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (tx, rx) = mpsc::channel::<i32>(5);
+    ///
+    /// let tx = tx.with_flat_map(|x| {
+    ///     stream::iter_ok(vec![42; x].into_iter().map(|y| y))
+    /// });
+    /// tx.send(5).wait().unwrap();
+    /// assert_eq!(rx.collect().wait(), Ok(vec![42, 42, 42, 42, 42]))
+    /// ```
+    fn with_flat_map<U, F, St>(self, f: F) -> WithFlatMap<Self, U, F, St>
+        where F: FnMut(U) -> St,
+              St: Stream<Item = Self::SinkItem, Error=Self::SinkError>,
+              Self: Sized
+        {
+            with_flat_map::new(self, f)
+        }
+
     /*
     fn with_map<U, F>(self, f: F) -> WithMap<Self, U, F>
         where F: FnMut(U) -> Self::SinkItem,
               Self: Sized;
 
     fn with_filter<F>(self, f: F) -> WithFilter<Self, F>
         where F: FnMut(Self::SinkItem) -> bool,
               Self: Sized;
@@ -362,16 +404,28 @@ pub trait Sink {
     /// library is activated, and it is activated by default.
     #[cfg(feature = "use_std")]
     fn buffer(self, amt: usize) -> Buffer<Self>
         where Self: Sized
     {
         buffer::new(self, amt)
     }
 
+    /// Fanout items to multiple sinks.
+    ///
+    /// This adapter clones each incoming item and forwards it to both this as well as
+    /// the other sink at the same time.
+    fn fanout<S>(self, other: S) -> Fanout<Self, S>
+        where Self: Sized,
+              Self::SinkItem: Clone,
+              S: Sink<SinkItem=Self::SinkItem, SinkError=Self::SinkError>
+    {
+        fanout::new(self, other)
+    }
+
     /// A future that completes when the sink has finished processing all
     /// pending requests.
     ///
     /// The sink itself is returned after flushing is complete; this adapter is
     /// intended to be used when you want to stop sending to the sink until
     /// all current requests are processed.
     fn flush(self) -> Flush<Self>
         where Self: Sized
@@ -393,21 +447,23 @@ pub trait Sink {
         send::new(self, item)
     }
 
     /// A future that completes after the given stream has been fully processed
     /// into the sink, including flushing.
     ///
     /// This future will drive the stream to keep producing items until it is
     /// exhausted, sending each item to the sink. It will complete once both the
-    /// stream is exhausted, and the sink has fully processed and flushed all of
-    /// the items sent to it.
+    /// stream is exhausted, the sink has received all items, the sink has been
+    /// flushed, and the sink has been closed.
     ///
     /// Doing `sink.send_all(stream)` is roughly equivalent to
-    /// `stream.forward(sink)`.
+    /// `stream.forward(sink)`. The returned future will exhaust all items from
+    /// `stream` and send them to `self`, closing `self` when all items have been
+    /// received.
     ///
     /// On completion, the pair `(sink, source)` is returned.
     fn send_all<S>(self, stream: S) -> SendAll<Self, S>
         where S: Stream<Item = Self::SinkItem>,
               Self::SinkError: From<S::Error>,
               Self: Sized
     {
         send_all::new(self, stream)
--- a/third_party/rust/futures/src/sink/send.rs
+++ b/third_party/rust/futures/src/sink/send.rs
@@ -38,22 +38,22 @@ impl<S: Sink> Send<S> {
 }
 
 impl<S: Sink> Future for Send<S> {
     type Item = S;
     type Error = S::SinkError;
 
     fn poll(&mut self) -> Poll<S, S::SinkError> {
         if let Some(item) = self.item.take() {
-            if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
+            if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? {
                 self.item = Some(item);
-                return Ok(Async::NotReady)
+                return Ok(Async::NotReady);
             }
         }
 
         // we're done sending the item, but want to block on flushing the
         // sink
         try_ready!(self.sink_mut().poll_complete());
 
         // now everything's emptied, so return the sink for further use
-        return Ok(Async::Ready(self.take_sink()))
+        Ok(Async::Ready(self.take_sink()))
     }
 }
--- a/third_party/rust/futures/src/sink/send_all.rs
+++ b/third_party/rust/futures/src/sink/send_all.rs
@@ -38,22 +38,22 @@ impl<T, U> SendAll<T, U>
             .expect("Attempted to poll SendAll after completion")
     }
 
     fn take_result(&mut self) -> (T, U) {
         let sink = self.sink.take()
             .expect("Attempted to poll Forward after completion");
         let fuse = self.stream.take()
             .expect("Attempted to poll Forward after completion");
-        return (sink, fuse.into_inner());
+        (sink, fuse.into_inner())
     }
 
     fn try_start_send(&mut self, item: U::Item) -> Poll<(), T::SinkError> {
         debug_assert!(self.buffered.is_none());
-        if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
+        if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? {
             self.buffered = Some(item);
             return Ok(Async::NotReady)
         }
         Ok(Async::Ready(()))
     }
 }
 
 impl<T, U> Future for SendAll<T, U>
@@ -67,17 +67,17 @@ impl<T, U> Future for SendAll<T, U>
     fn poll(&mut self) -> Poll<(T, U), T::SinkError> {
         // If we've got an item buffered already, we need to write it to the
         // sink before we can do anything else
         if let Some(item) = self.buffered.take() {
             try_ready!(self.try_start_send(item))
         }
 
         loop {
-            match try!(self.stream_mut().poll()) {
+            match self.stream_mut().poll()? {
                 Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
                 Async::Ready(None) => {
                     try_ready!(self.sink_mut().close());
                     return Ok(Async::Ready(self.take_result()))
                 }
                 Async::NotReady => {
                     try_ready!(self.sink_mut().poll_complete());
                     return Ok(Async::NotReady)
--- a/third_party/rust/futures/src/sink/wait.rs
+++ b/third_party/rust/futures/src/sink/wait.rs
@@ -42,9 +42,18 @@ impl<S: Sink> Wait<S> {
     ///
     /// This function will call the underlying sink's `poll_complete` method
     /// until it returns that it's ready to proceed. If the method returns
     /// `NotReady` the current thread will be blocked until it's otherwise
     /// ready to proceed.
     pub fn flush(&mut self) -> Result<(), S::SinkError> {
         self.sink.wait_flush()
     }
+
+    /// Close this sink, blocking the current thread until it's entirely closed.
+    ///
+    /// This function will call the underlying sink's `close` method
+    /// until it returns that it's closed. If the method returns
+    /// `NotReady` the current thread will be blocked until it's otherwise closed.
+    pub fn close(&mut self) -> Result<(), S::SinkError> {
+        self.sink.wait_close()
+    }
 }
--- a/third_party/rust/futures/src/sink/with.rs
+++ b/third_party/rust/futures/src/sink/with.rs
@@ -76,33 +76,41 @@ impl<S, U, F, Fut> With<S, U, F, Fut>
         &self.sink
     }
 
     /// Get a mutable reference to the inner sink.
     pub fn get_mut(&mut self) -> &mut S {
         &mut self.sink
     }
 
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+
     fn poll(&mut self) -> Poll<(), Fut::Error> {
         loop {
             match mem::replace(&mut self.state, State::Empty) {
                 State::Empty => break,
                 State::Process(mut fut) => {
-                    match try!(fut.poll()) {
+                    match fut.poll()? {
                         Async::Ready(item) => {
                             self.state = State::Buffered(item);
                         }
                         Async::NotReady => {
                             self.state = State::Process(fut);
                             break
                         }
                     }
                 }
                 State::Buffered(item) => {
-                    if let AsyncSink::NotReady(item) = try!(self.sink.start_send(item)) {
+                    if let AsyncSink::NotReady(item) = self.sink.start_send(item)? {
                         self.state = State::Buffered(item);
                         break
                     }
                 }
             }
         }
 
         if self.state.is_empty() {
@@ -118,28 +126,28 @@ impl<S, U, F, Fut> Sink for With<S, U, F
           F: FnMut(U) -> Fut,
           Fut: IntoFuture<Item = S::SinkItem>,
           Fut::Error: From<S::SinkError>,
 {
     type SinkItem = U;
     type SinkError = Fut::Error;
 
     fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Fut::Error> {
-        if try!(self.poll()).is_not_ready() {
+        if self.poll()?.is_not_ready() {
             return Ok(AsyncSink::NotReady(item))
         }
         self.state = State::Process((self.f)(item).into_future());
         Ok(AsyncSink::Ready)
     }
 
     fn poll_complete(&mut self) -> Poll<(), Fut::Error> {
         // poll ourselves first, to push data downward
-        let me_ready = try!(self.poll());
+        let me_ready = self.poll()?;
         // always propagate `poll_complete` downward to attempt to make progress
         try_ready!(self.sink.poll_complete());
         Ok(me_ready)
     }
 
     fn close(&mut self) -> Poll<(), Fut::Error> {
         try_ready!(self.poll());
-        Ok(try!(self.sink.close()))
+        Ok(self.sink.close()?)
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/sink/with_flat_map.rs
@@ -0,0 +1,126 @@
+use core::marker::PhantomData;
+
+use {Poll, Async, StartSend, AsyncSink};
+use sink::Sink;
+use stream::Stream;
+
+/// Sink for the `Sink::with_flat_map` combinator, chaining a computation that returns an iterator
+/// to run prior to pushing a value into the underlying sink
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct WithFlatMap<S, U, F, St>
+where
+    S: Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    sink: S,
+    f: F,
+    stream: Option<St>,
+    buffer: Option<S::SinkItem>,
+    _phantom: PhantomData<fn(U)>,
+}
+
+pub fn new<S, U, F, St>(sink: S, f: F) -> WithFlatMap<S, U, F, St>
+where
+    S: Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    WithFlatMap {
+        sink: sink,
+        f: f,
+        stream: None,
+        buffer: None,
+        _phantom: PhantomData,
+    }
+}
+
+impl<S, U, F, St> WithFlatMap<S, U, F, St>
+where
+    S: Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    /// Get a shared reference to the inner sink.
+    pub fn get_ref(&self) -> &S {
+        &self.sink
+    }
+
+    /// Get a mutable reference to the inner sink.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.sink
+    }
+
+    /// Consumes this combinator, returning the underlying sink.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.sink
+    }
+
+    fn try_empty_stream(&mut self) -> Poll<(), S::SinkError> {
+        if let Some(x) = self.buffer.take() {
+            if let AsyncSink::NotReady(x) = self.sink.start_send(x)? {
+                self.buffer = Some(x);
+                return Ok(Async::NotReady);
+            }
+        }
+        if let Some(mut stream) = self.stream.take() {
+            while let Some(x) = try_ready!(stream.poll()) {
+                if let AsyncSink::NotReady(x) = self.sink.start_send(x)? {
+                    self.stream = Some(stream);
+                    self.buffer = Some(x);
+                    return Ok(Async::NotReady);
+                }
+            }
+        }
+        Ok(Async::Ready(()))
+    }
+}
+
+impl<S, U, F, St> Stream for WithFlatMap<S, U, F, St>
+where
+    S: Stream + Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.sink.poll()
+    }
+}
+
+impl<S, U, F, St> Sink for WithFlatMap<S, U, F, St>
+where
+    S: Sink,
+    F: FnMut(U) -> St,
+    St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+    type SinkItem = U;
+    type SinkError = S::SinkError;
+    fn start_send(&mut self, i: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+        if self.try_empty_stream()?.is_not_ready() {
+            return Ok(AsyncSink::NotReady(i));
+        }
+        assert!(self.stream.is_none());
+        self.stream = Some((self.f)(i));
+        self.try_empty_stream()?;
+        Ok(AsyncSink::Ready)
+    }
+    fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+        if self.try_empty_stream()?.is_not_ready() {
+            return Ok(Async::NotReady);
+        }
+        self.sink.poll_complete()
+    }
+    fn close(&mut self) -> Poll<(), Self::SinkError> {
+        if self.try_empty_stream()?.is_not_ready() {
+            return Ok(Async::NotReady);
+        }
+        assert!(self.stream.is_none());
+        self.sink.close()
+    }
+}
deleted file mode 100644
--- a/third_party/rust/futures/src/stack.rs
+++ /dev/null
@@ -1,140 +0,0 @@
-//! A lock-free stack which supports concurrent pushes and a concurrent call to
-//! drain the entire stack all at once.
-
-use std::prelude::v1::*;
-
-use std::mem;
-use std::ptr;
-use std::sync::atomic::AtomicPtr;
-use std::sync::atomic::Ordering::SeqCst;
-
-use task::EventSet;
-
-#[derive(Debug)]
-pub struct Stack<T> {
-    head: AtomicPtr<Node<T>>,
-}
-
-struct Node<T> {
-    data: T,
-    next: *mut Node<T>,
-}
-
-#[derive(Debug)]
-pub struct Drain<T> {
-    head: *mut Node<T>,
-}
-
-unsafe impl<T: Send> Send for Drain<T> {}
-unsafe impl<T: Sync> Sync for Drain<T> {}
-
-impl<T> Stack<T> {
-    pub fn new() -> Stack<T> {
-        Stack {
-            head: AtomicPtr::default(),
-        }
-    }
-
-    pub fn push(&self, data: T) {
-        let mut node = Box::new(Node { data: data, next: ptr::null_mut() });
-        let mut head = self.head.load(SeqCst);
-        loop {
-            node.next = head;
-            match self.head.compare_exchange(head, &mut *node, SeqCst, SeqCst) {
-                Ok(_) => {
-                    mem::forget(node);
-                    return
-                }
-                Err(cur) => head = cur,
-            }
-        }
-    }
-
-    pub fn drain(&self) -> Drain<T> {
-        Drain {
-            head: self.head.swap(ptr::null_mut(), SeqCst),
-        }
-    }
-}
-
-impl<T> Drop for Stack<T> {
-    fn drop(&mut self) {
-        self.drain();
-    }
-}
-
-impl<T> Iterator for Drain<T> {
-    type Item = T;
-
-    fn next(&mut self) -> Option<T> {
-        if self.head.is_null() {
-            return None
-        }
-        unsafe {
-            let node = Box::from_raw(self.head);
-            self.head = node.next;
-            return Some(node.data)
-        }
-    }
-}
-
-impl<T> Drop for Drain<T> {
-    fn drop(&mut self) {
-        for item in self.by_ref() {
-            drop(item);
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use std::prelude::v1::*;
-    use std::rc::Rc;
-    use std::cell::Cell;
-
-    use super::Stack;
-
-    struct Set(Rc<Cell<usize>>, usize);
-
-    impl Drop for Set {
-        fn drop(&mut self) {
-            self.0.set(self.1);
-        }
-    }
-
-    #[test]
-    fn simple() {
-        let s = Stack::new();
-        s.push(1);
-        s.push(2);
-        s.push(4);
-        assert_eq!(s.drain().collect::<Vec<_>>(), vec![4, 2, 1]);
-        s.push(5);
-        assert_eq!(s.drain().collect::<Vec<_>>(), vec![5]);
-        assert_eq!(s.drain().collect::<Vec<_>>(), vec![]);
-    }
-
-    #[test]
-    fn drain_drops() {
-        let data = Rc::new(Cell::new(0));
-        let s = Stack::new();
-        s.push(Set(data.clone(), 1));
-        drop(s.drain());
-        assert_eq!(data.get(), 1);
-    }
-
-    #[test]
-    fn drop_drops() {
-        let data = Rc::new(Cell::new(0));
-        let s = Stack::new();
-        s.push(Set(data.clone(), 1));
-        drop(s);
-        assert_eq!(data.get(), 1);
-    }
-}
-
-impl EventSet for Stack<usize> {
-    fn insert(&self, id: usize) {
-        self.push(id);
-    }
-}
--- a/third_party/rust/futures/src/stream/and_then.rs
+++ b/third_party/rust/futures/src/stream/and_then.rs
@@ -22,16 +22,43 @@ pub fn new<S, F, U>(s: S, f: F) -> AndTh
 {
     AndThen {
         stream: s,
         future: None,
         f: f,
     }
 }
 
+impl<S, F, U> AndThen<S, F, U>
+    where U: IntoFuture,
+{
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S, F, U: IntoFuture> ::sink::Sink for AndThen<S, F, U>
     where S: ::sink::Sink
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/buffer_unordered.rs
+++ b/third_party/rust/futures/src/stream/buffer_unordered.rs
@@ -1,165 +1,115 @@
-use std::prelude::v1::*;
 use std::fmt;
-use std::mem;
-use std::sync::Arc;
 
-use task::{self, UnparkEvent};
-
-use {Async, IntoFuture, Poll, Future};
-use stream::{Stream, Fuse};
-use stack::{Stack, Drain};
+use {Async, IntoFuture, Poll};
+use stream::{Stream, Fuse, FuturesUnordered};
 
 /// An adaptor for a stream of futures to execute the futures concurrently, if
 /// possible, delivering results as they become available.
 ///
 /// This adaptor will buffer up a list of pending futures, and then return their
 /// results in the order that they complete. This is created by the
 /// `Stream::buffer_unordered` method.
 #[must_use = "streams do nothing unless polled"]
 pub struct BufferUnordered<S>
     where S: Stream,
           S::Item: IntoFuture,
 {
     stream: Fuse<S>,
-
-    // A slab of futures that are being executed. Each slot in this vector is
-    // either an active future or a pointer to the next empty slot. This is used
-    // to get O(1) deallocation in the slab and O(1) allocation.
-    //
-    // The `next_future` field is the next slot in the `futures` array that's a
-    // `Slot::Next` variant. If it points to the end of the array then the array
-    // is full.
-    futures: Vec<Slot<<S::Item as IntoFuture>::Future>>,
-    next_future: usize,
-
-    // A list of events that will get pushed onto concurrently by our many
-    // futures. This is filled in and used with the `with_unpark_event`
-    // function. The `pending` list here is the last time we drained events from
-    // our stack.
-    stack: Arc<Stack<usize>>,
-    pending: Drain<usize>,
-
-    // Number of active futures running in the `futures` slab
-    active: usize,
+    queue: FuturesUnordered<<S::Item as IntoFuture>::Future>,
+    max: usize,
 }
 
 impl<S> fmt::Debug for BufferUnordered<S>
     where S: Stream + fmt::Debug,
           S::Item: IntoFuture,
           <<S as Stream>::Item as IntoFuture>::Future: fmt::Debug,
 {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         fmt.debug_struct("BufferUnordered")
             .field("stream", &self.stream)
-            .field("futures", &self.futures)
-            .field("next_future", &self.next_future)
-            .field("stack", &self.stack)
-            .field("pending", &self.pending)
-            .field("active", &self.active)
+            .field("queue", &self.queue)
+            .field("max", &self.max)
             .finish()
     }
 }
 
-#[derive(Debug)]
-enum Slot<T> {
-    Next(usize),
-    Data(T),
-}
-
 pub fn new<S>(s: S, amt: usize) -> BufferUnordered<S>
     where S: Stream,
           S::Item: IntoFuture<Error=<S as Stream>::Error>,
 {
     BufferUnordered {
         stream: super::fuse::new(s),
-        futures: (0..amt).map(|i| Slot::Next(i + 1)).collect(),
-        next_future: 0,
-        pending: Stack::new().drain(),
-        stack: Arc::new(Stack::new()),
-        active: 0,
+        queue: FuturesUnordered::new(),
+        max: amt,
     }
 }
 
 impl<S> BufferUnordered<S>
     where S: Stream,
           S::Item: IntoFuture<Error=<S as Stream>::Error>,
 {
-    fn poll_pending(&mut self)
-                    -> Option<Poll<Option<<S::Item as IntoFuture>::Item>,
-                                   S::Error>> {
-        while let Some(idx) = self.pending.next() {
-            let result = match self.futures[idx] {
-                Slot::Data(ref mut f) => {
-                    let event = UnparkEvent::new(self.stack.clone(), idx);
-                    match task::with_unpark_event(event, || f.poll()) {
-                        Ok(Async::NotReady) => continue,
-                        Ok(Async::Ready(e)) => Ok(Async::Ready(Some(e))),
-                        Err(e) => Err(e),
-                    }
-                },
-                Slot::Next(_) => continue,
-            };
-            self.active -= 1;
-            self.futures[idx] = Slot::Next(self.next_future);
-            self.next_future = idx;
-            return Some(result)
-        }
-        None
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        self.stream.get_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.stream.get_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream.into_inner()
     }
 }
 
 impl<S> Stream for BufferUnordered<S>
     where S: Stream,
           S::Item: IntoFuture<Error=<S as Stream>::Error>,
 {
     type Item = <S::Item as IntoFuture>::Item;
     type Error = <S as Stream>::Error;
 
     fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
         // First up, try to spawn off as many futures as possible by filling up
         // our slab of futures.
-        while self.next_future < self.futures.len() {
-            let future = match try!(self.stream.poll()) {
+        while self.queue.len() < self.max {
+            let future = match self.stream.poll()? {
                 Async::Ready(Some(s)) => s.into_future(),
                 Async::Ready(None) |
                 Async::NotReady => break,
             };
-            self.active += 1;
-            self.stack.push(self.next_future);
-            match mem::replace(&mut self.futures[self.next_future],
-                               Slot::Data(future)) {
-                Slot::Next(next) => self.next_future = next,
-                Slot::Data(_) => panic!(),
-            }
+
+            self.queue.push(future);
         }
 
-        // Next, see if our list of `pending` events from last time has any
-        // items, and if so process them here.
-        if let Some(ret) = self.poll_pending() {
-            return ret
+        // Try polling a new future
+        if let Some(val) = try_ready!(self.queue.poll()) {
+            return Ok(Async::Ready(Some(val)));
         }
 
-        // And finally, take a look at our stack of events, attempting to
-        // process all of those.
-        assert!(self.pending.next().is_none());
-        self.pending = self.stack.drain();
-        if let Some(ret) = self.poll_pending() {
-            return ret
+        // If we've gotten this far, then there are no events for us to process
+        // and nothing was ready, so figure out if we're not done yet  or if
+        // we've reached the end.
+        if self.stream.is_done() {
+            Ok(Async::Ready(None))
+        } else {
+            Ok(Async::NotReady)
         }
-
-        // If we've gotten this far then there's no events for us to process and
-        // nothing was ready, so figure out if we're not done yet or if we've
-        // reached the end.
-        Ok(if self.active > 0 || !self.stream.is_done() {
-            Async::NotReady
-        } else {
-            Async::Ready(None)
-        })
     }
 }
 
 // Forwarding impl of Sink from the underlying stream
 impl<S> ::sink::Sink for BufferUnordered<S>
     where S: ::sink::Sink + Stream,
           S::Item: IntoFuture,
 {
--- a/third_party/rust/futures/src/stream/buffered.rs
+++ b/third_party/rust/futures/src/stream/buffered.rs
@@ -1,63 +1,81 @@
-use std::prelude::v1::*;
+use std::fmt;
 
-use std::fmt;
-use std::mem;
-
-use {Async, IntoFuture, Poll, Future};
-use stream::{Stream, Fuse};
+use {Async, IntoFuture, Poll};
+use stream::{Stream, Fuse, FuturesOrdered};
 
 /// An adaptor for a stream of futures to execute the futures concurrently, if
 /// possible.
 ///
 /// This adaptor will buffer up a list of pending futures, and then return their
 /// results in the order that they were pulled out of the original stream. This
 /// is created by the `Stream::buffered` method.
 #[must_use = "streams do nothing unless polled"]
 pub struct Buffered<S>
     where S: Stream,
           S::Item: IntoFuture,
 {
     stream: Fuse<S>,
-    futures: Vec<State<<S::Item as IntoFuture>::Future>>,
-    cur: usize,
+    queue: FuturesOrdered<<S::Item as IntoFuture>::Future>,
+    max: usize,
 }
 
 impl<S> fmt::Debug for Buffered<S>
     where S: Stream + fmt::Debug,
           S::Item: IntoFuture,
           <<S as Stream>::Item as IntoFuture>::Future: fmt::Debug,
           <<S as Stream>::Item as IntoFuture>::Item: fmt::Debug,
           <<S as Stream>::Item as IntoFuture>::Error: fmt::Debug,
 {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        fmt.debug_struct("Stream")
+        fmt.debug_struct("Buffered")
             .field("stream", &self.stream)
-            .field("futures", &self.futures)
-            .field("cur", &self.cur)
+            .field("queue", &self.queue)
+            .field("max", &self.max)
             .finish()
     }
 }
 
-#[derive(Debug)]
-enum State<S: Future> {
-    Empty,
-    Running(S),
-    Finished(Result<S::Item, S::Error>),
-}
-
 pub fn new<S>(s: S, amt: usize) -> Buffered<S>
     where S: Stream,
           S::Item: IntoFuture<Error=<S as Stream>::Error>,
 {
     Buffered {
         stream: super::fuse::new(s),
-        futures: (0..amt).map(|_| State::Empty).collect(),
-        cur: 0,
+        queue: FuturesOrdered::new(),
+        max: amt,
+    }
+}
+
+impl<S> Buffered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        self.stream.get_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.stream.get_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream.into_inner()
     }
 }
 
 // Forwarding impl of Sink from the underlying stream
 impl<S> ::sink::Sink for Buffered<S>
     where S: ::sink::Sink + Stream,
           S::Item: IntoFuture,
 {
@@ -80,63 +98,35 @@ impl<S> ::sink::Sink for Buffered<S>
 impl<S> Stream for Buffered<S>
     where S: Stream,
           S::Item: IntoFuture<Error=<S as Stream>::Error>,
 {
     type Item = <S::Item as IntoFuture>::Item;
     type Error = <S as Stream>::Error;
 
     fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
-        // First, try to fill in all the futures
-        for i in 0..self.futures.len() {
-            let mut idx = self.cur + i;
-            if idx >= self.futures.len() {
-                idx -= self.futures.len();
-            }
+        // First up, try to spawn off as many futures as possible by filling up
+        // our slab of futures.
+        while self.queue.len() < self.max {
+            let future = match self.stream.poll()? {
+                Async::Ready(Some(s)) => s.into_future(),
+                Async::Ready(None) |
+                Async::NotReady => break,
+            };
 
-            if let State::Empty = self.futures[idx] {
-                match try!(self.stream.poll()) {
-                    Async::Ready(Some(future)) => {
-                        let future = future.into_future();
-                        self.futures[idx] = State::Running(future);
-                    }
-                    Async::Ready(None) => break,
-                    Async::NotReady => break,
-                }
-            }
+            self.queue.push(future);
         }
 
-        // Next, try and step all the futures forward
-        for future in self.futures.iter_mut() {
-            let result = match *future {
-                State::Running(ref mut s) => {
-                    match s.poll() {
-                        Ok(Async::NotReady) => continue,
-                        Ok(Async::Ready(e)) => Ok(e),
-                        Err(e) => Err(e),
-                    }
-                }
-                _ => continue,
-            };
-            *future = State::Finished(result);
+        // Try polling a new future
+        if let Some(val) = try_ready!(self.queue.poll()) {
+            return Ok(Async::Ready(Some(val)));
         }
 
-        // Check to see if our current future is done.
-        if let State::Finished(_) = self.futures[self.cur] {
-            let r = match mem::replace(&mut self.futures[self.cur], State::Empty) {
-                State::Finished(r) => r,
-                _ => panic!(),
-            };
-            self.cur += 1;
-            if self.cur >= self.futures.len() {
-                self.cur = 0;
-            }
-            return Ok(Async::Ready(Some(try!(r))))
+        // If we've gotten this far, then there are no events for us to process
+        // and nothing was ready, so figure out if we're not done yet  or if
+        // we've reached the end.
+        if self.stream.is_done() {
+            Ok(Async::Ready(None))
+        } else {
+            Ok(Async::NotReady)
         }
-
-        if self.stream.is_done() {
-            if let State::Empty = self.futures[self.cur] {
-                return Ok(Async::Ready(None))
-            }
-        }
-        Ok(Async::NotReady)
     }
 }
--- a/third_party/rust/futures/src/stream/chunks.rs
+++ b/third_party/rust/futures/src/stream/chunks.rs
@@ -52,16 +52,39 @@ impl<S> ::sink::Sink for Chunks<S>
 }
 
 
 impl<S> Chunks<S> where S: Stream {
     fn take(&mut self) -> Vec<S::Item> {
         let cap = self.items.capacity();
         mem::replace(&mut self.items, Vec::with_capacity(cap))
     }
+
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        self.stream.get_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.stream.get_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream.into_inner()
+    }
 }
 
 impl<S> Stream for Chunks<S>
     where S: Stream
 {
     type Item = Vec<<S as Stream>::Item>;
     type Error = <S as Stream>::Error;
 
--- a/third_party/rust/futures/src/stream/concat.rs
+++ b/third_party/rust/futures/src/stream/concat.rs
@@ -1,81 +1,172 @@
 use core::mem;
+use core::fmt::{Debug, Formatter, Result as FmtResult};
+use core::default::Default;
 
 use {Poll, Async};
 use future::Future;
 use stream::Stream;
 
 /// A stream combinator to concatenate the results of a stream into the first
 /// yielded item.
 ///
 /// This structure is produced by the `Stream::concat` method.
-#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Concat2<S>
+    where S: Stream,
+{
+    inner: ConcatSafe<S>
+}
+
+impl<S: Debug> Debug for Concat2<S> where S: Stream, S::Item: Debug {
+    fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
+        fmt.debug_struct("Concat2")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
+pub fn new2<S>(s: S) -> Concat2<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+{
+    Concat2 {
+        inner: new_safe(s)
+    }
+}
+
+impl<S> Future for Concat2<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        self.inner.poll().map(|a| {
+            match a {
+                Async::NotReady => Async::NotReady,
+                Async::Ready(None) => Async::Ready(Default::default()),
+                Async::Ready(Some(e)) => Async::Ready(e)
+            }
+        })
+    }
+}
+
+
+/// A stream combinator to concatenate the results of a stream into the first
+/// yielded item.
+///
+/// This structure is produced by the `Stream::concat` method.
+#[deprecated(since="0.1.18", note="please use `Stream::Concat2` instead")]
 #[must_use = "streams do nothing unless polled"]
 pub struct Concat<S>
     where S: Stream,
 {
+    inner: ConcatSafe<S>
+}
+
+#[allow(deprecated)]
+impl<S: Debug> Debug for Concat<S> where S: Stream, S::Item: Debug {
+    fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
+        fmt.debug_struct("Concat")
+            .field("inner", &self.inner)
+            .finish()
+    }
+}
+
+#[allow(deprecated)]
+pub fn new<S>(s: S) -> Concat<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+{
+    Concat {
+        inner: new_safe(s)
+    }
+}
+
+#[allow(deprecated)]
+impl<S> Future for Concat<S>
+    where S: Stream,
+          S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        self.inner.poll().map(|a| {
+            match a {
+                Async::NotReady => Async::NotReady,
+                Async::Ready(None) => panic!("attempted concatenation of empty stream"),
+                Async::Ready(Some(e)) => Async::Ready(e)
+            }
+        })
+    }
+}
+
+
+#[derive(Debug)]
+struct ConcatSafe<S>
+    where S: Stream,
+{
     stream: S,
     extend: Inner<S::Item>,
 }
 
-pub fn new<S>(s: S) -> Concat<S>
+fn new_safe<S>(s: S) -> ConcatSafe<S>
     where S: Stream,
           S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
 {
-    Concat {
+    ConcatSafe {
         stream: s,
         extend: Inner::First,
     }
 }
 
-impl<S> Future for Concat<S>
+impl<S> Future for ConcatSafe<S>
     where S: Stream,
           S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
 
 {
-    type Item = S::Item;
+    type Item = Option<S::Item>;
     type Error = S::Error;
 
     fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
         loop {
             match self.stream.poll() {
                 Ok(Async::Ready(Some(i))) => {
                     match self.extend {
                         Inner::First => {
                             self.extend = Inner::Extending(i);
                         },
                         Inner::Extending(ref mut e) => {
                             e.extend(i);
                         },
                         Inner::Done => unreachable!(),
                     }
                 },
-                Ok(Async::Ready(None)) => return Ok(Async::Ready(expect(self.extend.take()))),
+                Ok(Async::Ready(None)) => {
+                    match mem::replace(&mut self.extend, Inner::Done) {
+                        Inner::First => return Ok(Async::Ready(None)),
+                        Inner::Extending(e) => return Ok(Async::Ready(Some(e))),
+                        Inner::Done => panic!("cannot poll Concat again")
+                    }
+                },
                 Ok(Async::NotReady) => return Ok(Async::NotReady),
                 Err(e) => {
-                    self.extend.take();
+                    self.extend = Inner::Done;
                     return Err(e)
                 }
             }
         }
     }
 }
 
+
 #[derive(Debug)]
 enum Inner<E> {
     First,
     Extending(E),
     Done,
-}
-
-impl<E> Inner<E> {
-    fn take(&mut self) -> Option<E> {
-        match mem::replace(self, Inner::Done) {
-            Inner::Extending(e) => Some(e),
-            _ => None,
-        }
-    }
-}
-
-fn expect<T>(opt: Option<T>) -> T {
-    opt.expect("cannot poll Concat again")
-}
+}
\ No newline at end of file
--- a/third_party/rust/futures/src/stream/filter.rs
+++ b/third_party/rust/futures/src/stream/filter.rs
@@ -17,16 +17,41 @@ pub fn new<S, F>(s: S, f: F) -> Filter<S
           F: FnMut(&S::Item) -> bool,
 {
     Filter {
         stream: s,
         f: f,
     }
 }
 
+impl<S, F> Filter<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S, F> ::sink::Sink for Filter<S, F>
     where S: ::sink::Sink
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/filter_map.rs
+++ b/third_party/rust/futures/src/stream/filter_map.rs
@@ -17,16 +17,41 @@ pub fn new<S, F, B>(s: S, f: F) -> Filte
           F: FnMut(S::Item) -> Option<B>,
 {
     FilterMap {
         stream: s,
         f: f,
     }
 }
 
+impl<S, F> FilterMap<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S, F> ::sink::Sink for FilterMap<S, F>
     where S: ::sink::Sink
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/flatten.rs
+++ b/third_party/rust/futures/src/stream/flatten.rs
@@ -20,16 +20,41 @@ pub fn new<S>(s: S) -> Flatten<S>
           <S::Item as Stream>::Error: From<S::Error>,
 {
     Flatten {
         stream: s,
         next: None,
     }
 }
 
+impl<S: Stream> Flatten<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S> ::sink::Sink for Flatten<S>
     where S: ::sink::Sink + Stream
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/fold.rs
+++ b/third_party/rust/futures/src/stream/fold.rs
@@ -48,31 +48,31 @@ impl<S, F, Fut, T> Future for Fold<S, F,
     type Item = T;
     type Error = S::Error;
 
     fn poll(&mut self) -> Poll<T, S::Error> {
         loop {
             match mem::replace(&mut self.state, State::Empty) {
                 State::Empty => panic!("cannot poll Fold twice"),
                 State::Ready(state) => {
-                    match try!(self.stream.poll()) {
+                    match self.stream.poll()? {
                         Async::Ready(Some(e)) => {
                             let future = (self.f)(state, e);
                             let future = future.into_future();
                             self.state = State::Processing(future);
                         }
                         Async::Ready(None) => return Ok(Async::Ready(state)),
                         Async::NotReady => {
                             self.state = State::Ready(state);
                             return Ok(Async::NotReady)
                         }
                     }
                 }
                 State::Processing(mut fut) => {
-                    match try!(fut.poll()) {
+                    match fut.poll()? {
                         Async::Ready(state) => self.state = State::Ready(state),
                         Async::NotReady => {
                             self.state = State::Processing(fut);
                             return Ok(Async::NotReady)
                         }
                     }
                 }
             }
--- a/third_party/rust/futures/src/stream/for_each.rs
+++ b/third_party/rust/futures/src/stream/for_each.rs
@@ -31,17 +31,17 @@ impl<S, F, U> Future for ForEach<S, F, U
           U: IntoFuture<Item= (), Error = S::Error>,
 {
     type Item = ();
     type Error = S::Error;
 
     fn poll(&mut self) -> Poll<(), S::Error> {
         loop {
             if let Some(mut fut) = self.fut.take() {
-                if try!(fut.poll()).is_not_ready() {
+                if fut.poll()?.is_not_ready() {
                     self.fut = Some(fut);
                     return Ok(Async::NotReady);
                 }
             }
 
             match try_ready!(self.stream.poll()) {
                 Some(e) => self.fut = Some((self.f)(e).into_future()),
                 None => return Ok(Async::Ready(())),
--- a/third_party/rust/futures/src/stream/forward.rs
+++ b/third_party/rust/futures/src/stream/forward.rs
@@ -25,37 +25,54 @@ pub fn new<T, U>(stream: T, sink: U) -> 
     }
 }
 
 impl<T, U> Forward<T, U>
     where U: Sink<SinkItem=T::Item>,
           T: Stream,
           T::Error: From<U::SinkError>,
 {
-    fn sink_mut(&mut self) -> &mut U {
-        self.sink.as_mut().take()
-            .expect("Attempted to poll Forward after completion")
+    /// Get a shared reference to the inner sink.
+    /// If this combinator has already been polled to completion, None will be returned.
+    pub fn sink_ref(&self) -> Option<&U> {
+        self.sink.as_ref()
     }
 
-    fn stream_mut(&mut self) -> &mut Fuse<T> {
-        self.stream.as_mut().take()
-            .expect("Attempted to poll Forward after completion")
+    /// Get a mutable reference to the inner sink.
+    /// If this combinator has already been polled to completion, None will be returned.
+    pub fn sink_mut(&mut self) -> Option<&mut U> {
+        self.sink.as_mut()
+    }
+
+    /// Get a shared reference to the inner stream.
+    /// If this combinator has already been polled to completion, None will be returned.
+    pub fn stream_ref(&self) -> Option<&T> {
+        self.stream.as_ref().map(|x| x.get_ref())
+    }
+
+    /// Get a mutable reference to the inner stream.
+    /// If this combinator has already been polled to completion, None will be returned.
+    pub fn stream_mut(&mut self) -> Option<&mut T> {
+        self.stream.as_mut().map(|x| x.get_mut())
     }
 
     fn take_result(&mut self) -> (T, U) {
         let sink = self.sink.take()
             .expect("Attempted to poll Forward after completion");
         let fuse = self.stream.take()
             .expect("Attempted to poll Forward after completion");
-        return (fuse.into_inner(), sink)
+        (fuse.into_inner(), sink)
     }
 
     fn try_start_send(&mut self, item: T::Item) -> Poll<(), U::SinkError> {
         debug_assert!(self.buffered.is_none());
-        if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
+        if let AsyncSink::NotReady(item) = self.sink_mut()
+            .take().expect("Attempted to poll Forward after completion")
+            .start_send(item)?
+        {
             self.buffered = Some(item);
             return Ok(Async::NotReady)
         }
         Ok(Async::Ready(()))
     }
 }
 
 impl<T, U> Future for Forward<T, U>
@@ -69,22 +86,25 @@ impl<T, U> Future for Forward<T, U>
     fn poll(&mut self) -> Poll<(T, U), T::Error> {
         // If we've got an item buffered already, we need to write it to the
         // sink before we can do anything else
         if let Some(item) = self.buffered.take() {
             try_ready!(self.try_start_send(item))
         }
 
         loop {
-            match try!(self.stream_mut().poll()) {
+            match self.stream_mut()
+                .take().expect("Attempted to poll Forward after completion")
+                .poll()?
+            {
                 Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
                 Async::Ready(None) => {
-                    try_ready!(self.sink_mut().close());
+                    try_ready!(self.sink_mut().take().expect("Attempted to poll Forward after completion").close());
                     return Ok(Async::Ready(self.take_result()))
                 }
                 Async::NotReady => {
-                    try_ready!(self.sink_mut().poll_complete());
+                    try_ready!(self.sink_mut().take().expect("Attempted to poll Forward after completion").poll_complete());
                     return Ok(Async::NotReady)
                 }
             }
         }
     }
 }
--- a/third_party/rust/futures/src/stream/from_err.rs
+++ b/third_party/rust/futures/src/stream/from_err.rs
@@ -3,30 +3,56 @@ use poll::Poll;
 use Async;
 use stream::Stream;
 
 /// A stream combinator to change the error type of a stream.
 ///
 /// This is created by the `Stream::from_err` method.
 #[derive(Debug)]
 #[must_use = "futures do nothing unless polled"]
-pub struct FromErr<S, E> where S: Stream {
+pub struct FromErr<S, E> {
     stream: S,
     f: PhantomData<E>
 }
 
 pub fn new<S, E>(stream: S) -> FromErr<S, E>
     where S: Stream
 {
     FromErr {
         stream: stream,
         f: PhantomData
     }
 }
 
+impl<S, E> FromErr<S, E> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+
 impl<S: Stream, E: From<S::Error>> Stream for FromErr<S, E> {
     type Item = S::Item;
     type Error = E;
 
     fn poll(&mut self) -> Poll<Option<S::Item>, E> {
         let e = match self.stream.poll() {
             Ok(Async::NotReady) => return Ok(Async::NotReady),
             other => other,
--- a/third_party/rust/futures/src/stream/fuse.rs
+++ b/third_party/rust/futures/src/stream/fuse.rs
@@ -59,13 +59,31 @@ impl<S> Fuse<S> {
     ///
     /// If this method returns `true`, then all future calls to poll are
     /// guaranteed to return `None`. If this returns `false`, then the
     /// underlying stream is still in use.
     pub fn is_done(&self) -> bool {
         self.done
     }
 
-    /// Recover original stream
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
     pub fn into_inner(self) -> S {
         self.stream
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/stream/futures_ordered.rs
@@ -0,0 +1,213 @@
+use std::cmp::{Eq, PartialEq, PartialOrd, Ord, Ordering};
+use std::collections::BinaryHeap;
+use std::fmt::{self, Debug};
+use std::iter::FromIterator;
+
+use {Async, Future, IntoFuture, Poll, Stream};
+use stream::FuturesUnordered;
+
+#[derive(Debug)]
+struct OrderWrapper<T> {
+    item: T,
+    index: usize,
+}
+
+impl<T> PartialEq for OrderWrapper<T> {
+    fn eq(&self, other: &Self) -> bool {
+        self.index == other.index
+    }
+}
+
+impl<T> Eq for OrderWrapper<T> {}
+
+impl<T> PartialOrd for OrderWrapper<T> {
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl<T> Ord for OrderWrapper<T> {
+    fn cmp(&self, other: &Self) -> Ordering {
+        // BinaryHeap is a max heap, so compare backwards here.
+        other.index.cmp(&self.index)
+    }
+}
+
+impl<T> Future for OrderWrapper<T>
+    where T: Future
+{
+    type Item = OrderWrapper<T::Item>;
+    type Error = T::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        let result = try_ready!(self.item.poll());
+        Ok(Async::Ready(OrderWrapper {
+            item: result,
+            index: self.index
+        }))
+    }
+}
+
+/// An unbounded queue of futures.
+///
+/// This "combinator" is similar to `FuturesUnordered`, but it imposes an order
+/// on top of the set of futures. While futures in the set will race to
+/// completion in parallel, results will only be returned in the order their
+/// originating futures were added to the queue.
+///
+/// Futures are pushed into this queue and their realized values are yielded in
+/// order. This structure is optimized to manage a large number of futures.
+/// Futures managed by `FuturesOrdered` will only be polled when they generate
+/// notifications. This reduces the required amount of work needed to coordinate
+/// large numbers of futures.
+///
+/// When a `FuturesOrdered` is first created, it does not contain any futures.
+/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be
+/// returned. Futures are submitted to the queue using `push`; however, the
+/// future will **not** be polled at this point. `FuturesOrdered` will only
+/// poll managed futures when `FuturesOrdered::poll` is called. As such, it
+/// is important to call `poll` after pushing new futures.
+///
+/// If `FuturesOrdered::poll` returns `Ok(Async::Ready(None))` this means that
+/// the queue is currently not managing any futures. A future may be submitted
+/// to the queue at a later time. At that point, a call to
+/// `FuturesOrdered::poll` will either return the future's resolved value
+/// **or** `Ok(Async::NotReady)` if the future has not yet completed. When
+/// multiple futures are submitted to the queue, `FuturesOrdered::poll` will
+/// return `Ok(Async::NotReady)` until the first future completes, even if
+/// some of the later futures have already completed.
+///
+/// Note that you can create a ready-made `FuturesOrdered` via the
+/// `futures_ordered` function in the `stream` module, or you can start with an
+/// empty queue with the `FuturesOrdered::new` constructor.
+#[must_use = "streams do nothing unless polled"]
+pub struct FuturesOrdered<T>
+    where T: Future
+{
+    in_progress: FuturesUnordered<OrderWrapper<T>>,
+    queued_results: BinaryHeap<OrderWrapper<T::Item>>,
+    next_incoming_index: usize,
+    next_outgoing_index: usize,
+}
+
+/// Converts a list of futures into a `Stream` of results from the futures.
+///
+/// This function will take an list of futures (e.g. a vector, an iterator,
+/// etc), and return a stream. The stream will yield items as they become
+/// available on the futures internally, in the order that their originating
+/// futures were submitted to the queue. If the futures complete out of order,
+/// items will be stored internally within `FuturesOrdered` until all preceding
+/// items have been yielded.
+///
+/// Note that the returned queue can also be used to dynamically push more
+/// futures into the queue as they become available.
+pub fn futures_ordered<I>(futures: I) -> FuturesOrdered<<I::Item as IntoFuture>::Future>
+    where I: IntoIterator,
+          I::Item: IntoFuture
+{
+    let mut queue = FuturesOrdered::new();
+
+    for future in futures {
+        queue.push(future.into_future());
+    }
+
+    return queue
+}
+
+impl<T> FuturesOrdered<T>
+    where T: Future
+{
+    /// Constructs a new, empty `FuturesOrdered`
+    ///
+    /// The returned `FuturesOrdered` does not contain any futures and, in this
+    /// state, `FuturesOrdered::poll` will return `Ok(Async::Ready(None))`.
+    pub fn new() -> FuturesOrdered<T> {
+        FuturesOrdered {
+            in_progress: FuturesUnordered::new(),
+            queued_results: BinaryHeap::new(),
+            next_incoming_index: 0,
+            next_outgoing_index: 0,
+        }
+    }
+
+    /// Returns the number of futures contained in the queue.
+    ///
+    /// This represents the total number of in-flight futures, both
+    /// those currently processing and those that have completed but
+    /// which are waiting for earlier futures to complete.
+    pub fn len(&self) -> usize {
+        self.in_progress.len() + self.queued_results.len()
+    }
+
+    /// Returns `true` if the queue contains no futures
+    pub fn is_empty(&self) -> bool {
+        self.in_progress.is_empty() && self.queued_results.is_empty()
+    }
+
+    /// Push a future into the queue.
+    ///
+    /// This function submits the given future to the internal set for managing.
+    /// This function will not call `poll` on the submitted future. The caller
+    /// must ensure that `FuturesOrdered::poll` is called in order to receive
+    /// task notifications.
+    pub fn push(&mut self, future: T) {
+        let wrapped = OrderWrapper {
+            item: future,
+            index: self.next_incoming_index,
+        };
+        self.next_incoming_index += 1;
+        self.in_progress.push(wrapped);
+    }
+}
+
+impl<T> Stream for FuturesOrdered<T>
+    where T: Future
+{
+    type Item = T::Item;
+    type Error = T::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+        // Get any completed futures from the unordered set.
+        loop {
+            match self.in_progress.poll()? {
+                Async::Ready(Some(result)) => self.queued_results.push(result),
+                Async::Ready(None) | Async::NotReady => break,
+            }
+        }
+
+        if let Some(next_result) = self.queued_results.peek() {
+            // PeekMut::pop is not stable yet QQ
+            if next_result.index != self.next_outgoing_index {
+                return Ok(Async::NotReady);
+            }
+        } else if !self.in_progress.is_empty() {
+            return Ok(Async::NotReady);
+        } else {
+            return Ok(Async::Ready(None));
+        }
+
+        let next_result = self.queued_results.pop().unwrap();
+        self.next_outgoing_index += 1;
+        Ok(Async::Ready(Some(next_result.item)))
+    }
+}
+
+impl<T: Debug> Debug for FuturesOrdered<T>
+    where T: Future
+{
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "FuturesOrdered {{ ... }}")
+    }
+}
+
+impl<F: Future> FromIterator<F> for FuturesOrdered<F> {
+    fn from_iter<T>(iter: T) -> Self 
+        where T: IntoIterator<Item = F>
+    {
+        let mut new = FuturesOrdered::new();
+        for future in iter.into_iter() {
+            new.push(future);
+        }
+        new
+    }
+}
--- a/third_party/rust/futures/src/stream/futures_unordered.rs
+++ b/third_party/rust/futures/src/stream/futures_unordered.rs
@@ -1,107 +1,672 @@
-use future::{Future, IntoFuture};
-use stream::Stream;
-use poll::Poll;
-use Async;
-use stack::{Stack, Drain};
-use std::sync::Arc;
-use task::{self, UnparkEvent};
+//! An unbounded set of futures.
+
+use std::cell::UnsafeCell;
+use std::fmt::{self, Debug};
+use std::iter::FromIterator;
+use std::marker::PhantomData;
+use std::mem;
+use std::ptr;
+use std::sync::atomic::Ordering::{Relaxed, SeqCst, Acquire, Release, AcqRel};
+use std::sync::atomic::{AtomicPtr, AtomicBool};
+use std::sync::{Arc, Weak};
+use std::usize;
+
+use {task, Stream, Future, Poll, Async};
+use executor::{Notify, UnsafeNotify, NotifyHandle};
+use task_impl::{self, AtomicTask};
 
-use std::prelude::v1::*;
-
-/// An adaptor for a stream of futures to execute the futures concurrently, if
-/// possible, delivering results as they become available.
+/// An unbounded set of futures.
+///
+/// This "combinator" also serves a special function in this library, providing
+/// the ability to maintain a set of futures that and manage driving them all
+/// to completion.
+///
+/// Futures are pushed into this set and their realized values are yielded as
+/// they are ready. This structure is optimized to manage a large number of
+/// futures. Futures managed by `FuturesUnordered` will only be polled when they
+/// generate notifications. This reduces the required amount of work needed to
+/// coordinate large numbers of futures.
+///
+/// When a `FuturesUnordered` is first created, it does not contain any futures.
+/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be
+/// returned. Futures are submitted to the set using `push`; however, the
+/// future will **not** be polled at this point. `FuturesUnordered` will only
+/// poll managed futures when `FuturesUnordered::poll` is called. As such, it
+/// is important to call `poll` after pushing new futures.
+///
+/// If `FuturesUnordered::poll` returns `Ok(Async::Ready(None))` this means that
+/// the set is currently not managing any futures. A future may be submitted
+/// to the set at a later time. At that point, a call to
+/// `FuturesUnordered::poll` will either return the future's resolved value
+/// **or** `Ok(Async::NotReady)` if the future has not yet completed.
 ///
-/// This adaptor will return their results in the order that they complete.
-/// This is created by the `futures` method.
-///
-#[derive(Debug)]
+/// Note that you can create a ready-made `FuturesUnordered` via the
+/// `futures_unordered` function in the `stream` module, or you can start with an
+/// empty set with the `FuturesUnordered::new` constructor.
 #[must_use = "streams do nothing unless polled"]
-pub struct FuturesUnordered<F>
-    where F: Future
-{
-    futures: Vec<Option<F>>,
-    stack: Arc<Stack<usize>>,
-    pending: Option<Drain<usize>>,
-    active: usize,
+pub struct FuturesUnordered<F> {
+    inner: Arc<Inner<F>>,
+    len: usize,
+    head_all: *const Node<F>,
+}
+
+unsafe impl<T: Send> Send for FuturesUnordered<T> {}
+unsafe impl<T: Sync> Sync for FuturesUnordered<T> {}
+
+// FuturesUnordered is implemented using two linked lists. One which links all
+// futures managed by a `FuturesUnordered` and one that tracks futures that have
+// been scheduled for polling. The first linked list is not thread safe and is
+// only accessed by the thread that owns the `FuturesUnordered` value. The
+// second linked list is an implementation of the intrusive MPSC queue algorithm
+// described by 1024cores.net.
+//
+// When a future is submitted to the set a node is allocated and inserted in
+// both linked lists. The next call to `poll` will (eventually) see this node
+// and call `poll` on the future.
+//
+// Before a managed future is polled, the current task's `Notify` is replaced
+// with one that is aware of the specific future being run. This ensures that
+// task notifications generated by that specific future are visible to
+// `FuturesUnordered`. When a notification is received, the node is scheduled
+// for polling by being inserted into the concurrent linked list.
+//
+// Each node uses an `AtomicUsize` to track it's state. The node state is the
+// reference count (the number of outstanding handles to the node) as well as a
+// flag tracking if the node is currently inserted in the atomic queue. When the
+// future is notified, it will only insert itself into the linked list if it
+// isn't currently inserted.
+
+#[allow(missing_debug_implementations)]
+struct Inner<T> {
+    // The task using `FuturesUnordered`.
+    parent: AtomicTask,
+
+    // Head/tail of the readiness queue
+    head_readiness: AtomicPtr<Node<T>>,
+    tail_readiness: UnsafeCell<*const Node<T>>,
+    stub: Arc<Node<T>>,
+}
+
+struct Node<T> {
+    // The future
+    future: UnsafeCell<Option<T>>,
+
+    // Next pointer for linked list tracking all active nodes
+    next_all: UnsafeCell<*const Node<T>>,
+
+    // Previous node in linked list tracking all active nodes
+    prev_all: UnsafeCell<*const Node<T>>,
+
+    // Next pointer in readiness queue
+    next_readiness: AtomicPtr<Node<T>>,
+
+    // Queue that we'll be enqueued to when notified
+    queue: Weak<Inner<T>>,
+
+    // Whether or not this node is currently in the mpsc queue.
+    queued: AtomicBool,
+}
+
+enum Dequeue<T> {
+    Data(*const Node<T>),
+    Empty,
+    Inconsistent,
 }
 
-/// Converts a list of futures into a `Stream` of results from the futures.
-///
-/// This function will take an list of futures (e.g. a vector, an iterator,
-/// etc), and return a stream. The stream will yield items as they become
-/// available on the futures internally, in the order that they become
-/// available. This function is similar to `buffer_unordered` in that it may
-/// return items in a different order than in the list specified.
-pub fn futures_unordered<I>(futures: I) -> FuturesUnordered<<I::Item as IntoFuture>::Future>
-    where I: IntoIterator,
-          I::Item: IntoFuture
+impl<T> FuturesUnordered<T>
+    where T: Future,
 {
-    let futures = futures.into_iter()
-                         .map(IntoFuture::into_future)
-                         .map(Some)
-                         .collect::<Vec<_>>();
-    let stack = Arc::new(Stack::new());
-    for i in 0..futures.len() {
-        stack.push(i);
+    /// Constructs a new, empty `FuturesUnordered`
+    ///
+    /// The returned `FuturesUnordered` does not contain any futures and, in this
+    /// state, `FuturesUnordered::poll` will return `Ok(Async::Ready(None))`.
+    pub fn new() -> FuturesUnordered<T> {
+        let stub = Arc::new(Node {
+            future: UnsafeCell::new(None),
+            next_all: UnsafeCell::new(ptr::null()),
+            prev_all: UnsafeCell::new(ptr::null()),
+            next_readiness: AtomicPtr::new(ptr::null_mut()),
+            queued: AtomicBool::new(true),
+            queue: Weak::new(),
+        });
+        let stub_ptr = &*stub as *const Node<T>;
+        let inner = Arc::new(Inner {
+            parent: AtomicTask::new(),
+            head_readiness: AtomicPtr::new(stub_ptr as *mut _),
+            tail_readiness: UnsafeCell::new(stub_ptr),
+            stub: stub,
+        });
+
+        FuturesUnordered {
+            len: 0,
+            head_all: ptr::null_mut(),
+            inner: inner,
+        }
+    }
+}
+
+impl<T> FuturesUnordered<T> {
+    /// Returns the number of futures contained in the set.
+    ///
+    /// This represents the total number of in-flight futures.
+    pub fn len(&self) -> usize {
+        self.len
+    }
+
+    /// Returns `true` if the set contains no futures
+    pub fn is_empty(&self) -> bool {
+        self.len == 0
+    }
+
+    /// Push a future into the set.
+    ///
+    /// This function submits the given future to the set for managing. This
+    /// function will not call `poll` on the submitted future. The caller must
+    /// ensure that `FuturesUnordered::poll` is called in order to receive task
+    /// notifications.
+    pub fn push(&mut self, future: T) {
+        let node = Arc::new(Node {
+            future: UnsafeCell::new(Some(future)),
+            next_all: UnsafeCell::new(ptr::null_mut()),
+            prev_all: UnsafeCell::new(ptr::null_mut()),
+            next_readiness: AtomicPtr::new(ptr::null_mut()),
+            queued: AtomicBool::new(true),
+            queue: Arc::downgrade(&self.inner),
+        });
+
+        // Right now our node has a strong reference count of 1. We transfer
+        // ownership of this reference count to our internal linked list
+        // and we'll reclaim ownership through the `unlink` function below.
+        let ptr = self.link(node);
+
+        // We'll need to get the future "into the system" to start tracking it,
+        // e.g. getting its unpark notifications going to us tracking which
+        // futures are ready. To do that we unconditionally enqueue it for
+        // polling here.
+        self.inner.enqueue(ptr);
     }
-    FuturesUnordered {
-        active: futures.len(),
-        futures: futures,
-        pending: None,
-        stack: stack,
+
+    /// Returns an iterator that allows modifying each future in the set.
+    pub fn iter_mut(&mut self) -> IterMut<T> {
+        IterMut {
+            node: self.head_all,
+            len: self.len,
+            _marker: PhantomData
+        }
+    }
+
+    fn release_node(&mut self, node: Arc<Node<T>>) {
+        // The future is done, try to reset the queued flag. This will prevent
+        // `notify` from doing any work in the future
+        let prev = node.queued.swap(true, SeqCst);
+
+        // Drop the future, even if it hasn't finished yet. This is safe
+        // because we're dropping the future on the thread that owns
+        // `FuturesUnordered`, which correctly tracks T's lifetimes and such.
+        unsafe {
+            drop((*node.future.get()).take());
+        }
+
+        // If the queued flag was previously set then it means that this node
+        // is still in our internal mpsc queue. We then transfer ownership
+        // of our reference count to the mpsc queue, and it'll come along and
+        // free it later, noticing that the future is `None`.
+        //
+        // If, however, the queued flag was *not* set then we're safe to
+        // release our reference count on the internal node. The queued flag
+        // was set above so all future `enqueue` operations will not actually
+        // enqueue the node, so our node will never see the mpsc queue again.
+        // The node itself will be deallocated once all reference counts have
+        // been dropped by the various owning tasks elsewhere.
+        if prev {
+            mem::forget(node);
+        }
+    }
+
+    /// Insert a new node into the internal linked list.
+    fn link(&mut self, node: Arc<Node<T>>) -> *const Node<T> {
+        let ptr = arc2ptr(node);
+        unsafe {
+            *(*ptr).next_all.get() = self.head_all;
+            if !self.head_all.is_null() {
+                *(*self.head_all).prev_all.get() = ptr;
+            }
+        }
+
+        self.head_all = ptr;
+        self.len += 1;
+        return ptr
+    }
+
+    /// Remove the node from the linked list tracking all nodes currently
+    /// managed by `FuturesUnordered`.
+    unsafe fn unlink(&mut self, node: *const Node<T>) -> Arc<Node<T>> {
+        let node = ptr2arc(node);
+        let next = *node.next_all.get();
+        let prev = *node.prev_all.get();
+        *node.next_all.get() = ptr::null_mut();
+        *node.prev_all.get() = ptr::null_mut();
+
+        if !next.is_null() {
+            *(*next).prev_all.get() = prev;
+        }
+
+        if !prev.is_null() {
+            *(*prev).next_all.get() = next;
+        } else {
+            self.head_all = next;
+        }
+        self.len -= 1;
+        return node
     }
 }
 
-impl<F> FuturesUnordered<F>
-    where F: Future
+impl<T> Stream for FuturesUnordered<T>
+    where T: Future
 {
-    fn poll_pending(&mut self, mut drain: Drain<usize>)
-                    -> Option<Poll<Option<F::Item>, F::Error>> {
-        while let Some(id) = drain.next() {
-            // If this future was already done just skip the notification
-            if self.futures[id].is_none() {
-                continue
+    type Item = T::Item;
+    type Error = T::Error;
+
+    fn poll(&mut self) -> Poll<Option<T::Item>, T::Error> {
+        // Ensure `parent` is correctly set.
+        self.inner.parent.register();
+
+        loop {
+            let node = match unsafe { self.inner.dequeue() } {
+                Dequeue::Empty => {
+                    if self.is_empty() {
+                        return Ok(Async::Ready(None));
+                    } else {
+                        return Ok(Async::NotReady)
+                    }
+                }
+                Dequeue::Inconsistent => {
+                    // At this point, it may be worth yielding the thread &
+                    // spinning a few times... but for now, just yield using the
+                    // task system.
+                    task::current().notify();
+                    return Ok(Async::NotReady);
+                }
+                Dequeue::Data(node) => node,
+            };
+
+            debug_assert!(node != self.inner.stub());
+
+            unsafe {
+                let mut future = match (*(*node).future.get()).take() {
+                    Some(future) => future,
+
+                    // If the future has already gone away then we're just
+                    // cleaning out this node. See the comment in
+                    // `release_node` for more information, but we're basically
+                    // just taking ownership of our reference count here.
+                    None => {
+                        let node = ptr2arc(node);
+                        assert!((*node.next_all.get()).is_null());
+                        assert!((*node.prev_all.get()).is_null());
+                        continue
+                    }
+                };
+
+                // Unset queued flag... this must be done before
+                // polling. This ensures that the future gets
+                // rescheduled if it is notified **during** a call
+                // to `poll`.
+                let prev = (*node).queued.swap(false, SeqCst);
+                assert!(prev);
+
+                // We're going to need to be very careful if the `poll`
+                // function below panics. We need to (a) not leak memory and
+                // (b) ensure that we still don't have any use-after-frees. To
+                // manage this we do a few things:
+                //
+                // * This "bomb" here will call `release_node` if dropped
+                //   abnormally. That way we'll be sure the memory management
+                //   of the `node` is managed correctly.
+                // * The future was extracted above (taken ownership). That way
+                //   if it panics we're guaranteed that the future is
+                //   dropped on this thread and doesn't accidentally get
+                //   dropped on a different thread (bad).
+                // * We unlink the node from our internal queue to preemptively
+                //   assume it'll panic, in which case we'll want to discard it
+                //   regardless.
+                struct Bomb<'a, T: 'a> {
+                    queue: &'a mut FuturesUnordered<T>,
+                    node: Option<Arc<Node<T>>>,
+                }
+                impl<'a, T> Drop for Bomb<'a, T> {
+                    fn drop(&mut self) {
+                        if let Some(node) = self.node.take() {
+                            self.queue.release_node(node);
+                        }
+                    }
+                }
+                let mut bomb = Bomb {
+                    node: Some(self.unlink(node)),
+                    queue: self,
+                };
+
+                // Poll the underlying future with the appropriate `notify`
+                // implementation. This is where a large bit of the unsafety
+                // starts to stem from internally. The `notify` instance itself
+                // is basically just our `Arc<Node<T>>` and tracks the mpsc
+                // queue of ready futures.
+                //
+                // Critically though `Node<T>` won't actually access `T`, the
+                // future, while it's floating around inside of `Task`
+                // instances. These structs will basically just use `T` to size
+                // the internal allocation, appropriately accessing fields and
+                // deallocating the node if need be.
+                let res = {
+                    let notify = NodeToHandle(bomb.node.as_ref().unwrap());
+                    task_impl::with_notify(&notify, 0, || {
+                        future.poll()
+                    })
+                };
+
+                let ret = match res {
+                    Ok(Async::NotReady) => {
+                        let node = bomb.node.take().unwrap();
+                        *node.future.get() = Some(future);
+                        bomb.queue.link(node);
+                        continue
+                    }
+                    Ok(Async::Ready(e)) => Ok(Async::Ready(Some(e))),
+                    Err(e) => Err(e),
+                };
+                return ret
             }
-            let event = UnparkEvent::new(self.stack.clone(), id);
-            let ret = match task::with_unpark_event(event, || {
-                self.futures[id]
-                    .as_mut()
-                    .unwrap()
-                    .poll()
-            }) {
-                Ok(Async::NotReady) => continue,
-                Ok(Async::Ready(val)) => Ok(Async::Ready(Some(val))),
-                Err(e) => Err(e),
-            };
-            self.pending = Some(drain);
-            self.active -= 1;
-            self.futures[id] = None;
-            return Some(ret)
+        }
+    }
+}
+
+impl<T: Debug> Debug for FuturesUnordered<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "FuturesUnordered {{ ... }}")
+    }
+}
+
+impl<T> Drop for FuturesUnordered<T> {
+    fn drop(&mut self) {
+        // When a `FuturesUnordered` is dropped we want to drop all futures associated
+        // with it. At the same time though there may be tons of `Task` handles
+        // flying around which contain `Node<T>` references inside them. We'll
+        // let those naturally get deallocated when the `Task` itself goes out
+        // of scope or gets notified.
+        unsafe {
+            while !self.head_all.is_null() {
+                let head = self.head_all;
+                let node = self.unlink(head);
+                self.release_node(node);
+            }
         }
-        None
+
+        // Note that at this point we could still have a bunch of nodes in the
+        // mpsc queue. None of those nodes, however, have futures associated
+        // with them so they're safe to destroy on any thread. At this point
+        // the `FuturesUnordered` struct, the owner of the one strong reference
+        // to `Inner<T>` will drop the strong reference. At that point
+        // whichever thread releases the strong refcount last (be it this
+        // thread or some other thread as part of an `upgrade`) will clear out
+        // the mpsc queue and free all remaining nodes.
+        //
+        // While that freeing operation isn't guaranteed to happen here, it's
+        // guaranteed to happen "promptly" as no more "blocking work" will
+        // happen while there's a strong refcount held.
+    }
+}
+
+impl<F: Future> FromIterator<F> for FuturesUnordered<F> {
+    fn from_iter<T>(iter: T) -> Self 
+        where T: IntoIterator<Item = F>
+    {
+        let mut new = FuturesUnordered::new();
+        for future in iter.into_iter() {
+            new.push(future);
+        }
+        new
+    }
+}
+
+#[derive(Debug)]
+/// Mutable iterator over all futures in the unordered set.
+pub struct IterMut<'a, F: 'a> {
+    node: *const Node<F>,
+    len: usize,
+    _marker: PhantomData<&'a mut FuturesUnordered<F>>
+}
+
+impl<'a, F> Iterator for IterMut<'a, F> {
+    type Item = &'a mut F;
+
+    fn next(&mut self) -> Option<&'a mut F> {
+        if self.node.is_null() {
+            return None;
+        }
+        unsafe {
+            let future = (*(*self.node).future.get()).as_mut().unwrap();
+            let next = *(*self.node).next_all.get();
+            self.node = next;
+            self.len -= 1;
+            return Some(future);
+        }
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.len, Some(self.len))
     }
 }
 
-impl<F> Stream for FuturesUnordered<F>
-    where F: Future
-{
-    type Item = F::Item;
-    type Error = F::Error;
+impl<'a, F> ExactSizeIterator for IterMut<'a, F> {}
+
+impl<T> Inner<T> {
+    /// The enqueue function from the 1024cores intrusive MPSC queue algorithm.
+    fn enqueue(&self, node: *const Node<T>) {
+        unsafe {
+            debug_assert!((*node).queued.load(Relaxed));
+
+            // This action does not require any coordination
+            (*node).next_readiness.store(ptr::null_mut(), Relaxed);
+
+            // Note that these atomic orderings come from 1024cores
+            let node = node as *mut _;
+            let prev = self.head_readiness.swap(node, AcqRel);
+            (*prev).next_readiness.store(node, Release);
+        }
+    }
+
+    /// The dequeue function from the 1024cores intrusive MPSC queue algorithm
+    ///
+    /// Note that this unsafe as it required mutual exclusion (only one thread
+    /// can call this) to be guaranteed elsewhere.
+    unsafe fn dequeue(&self) -> Dequeue<T> {
+        let mut tail = *self.tail_readiness.get();
+        let mut next = (*tail).next_readiness.load(Acquire);
+
+        if tail == self.stub() {
+            if next.is_null() {
+                return Dequeue::Empty;
+            }
+
+            *self.tail_readiness.get() = next;
+            tail = next;
+            next = (*next).next_readiness.load(Acquire);
+        }
 
-    fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
-        if self.active == 0 {
-            return Ok(Async::Ready(None))
+        if !next.is_null() {
+            *self.tail_readiness.get() = next;
+            debug_assert!(tail != self.stub());
+            return Dequeue::Data(tail);
+        }
+
+        if self.head_readiness.load(Acquire) as *const _ != tail {
+            return Dequeue::Inconsistent;
+        }
+
+        self.enqueue(self.stub());
+
+        next = (*tail).next_readiness.load(Acquire);
+
+        if !next.is_null() {
+            *self.tail_readiness.get() = next;
+            return Dequeue::Data(tail);
         }
-        if let Some(drain) = self.pending.take() {
-            if let Some(ret) = self.poll_pending(drain) {
-                return ret
+
+        Dequeue::Inconsistent
+    }
+
+    fn stub(&self) -> *const Node<T> {
+        &*self.stub
+    }
+}
+
+impl<T> Drop for Inner<T> {
+    fn drop(&mut self) {
+        // Once we're in the destructor for `Inner<T>` we need to clear out the
+        // mpsc queue of nodes if there's anything left in there.
+        //
+        // Note that each node has a strong reference count associated with it
+        // which is owned by the mpsc queue. All nodes should have had their
+        // futures dropped already by the `FuturesUnordered` destructor above,
+        // so we're just pulling out nodes and dropping their refcounts.
+        unsafe {
+            loop {
+                match self.dequeue() {
+                    Dequeue::Empty => break,
+                    Dequeue::Inconsistent => abort("inconsistent in drop"),
+                    Dequeue::Data(ptr) => drop(ptr2arc(ptr)),
+                }
             }
         }
-        let drain = self.stack.drain();
-        if let Some(ret) = self.poll_pending(drain) {
-            return ret
+    }
+}
+
+#[allow(missing_debug_implementations)]
+struct NodeToHandle<'a, T: 'a>(&'a Arc<Node<T>>);
+
+impl<'a, T> Clone for NodeToHandle<'a, T> {
+    fn clone(&self) -> Self {
+        NodeToHandle(self.0)
+    }
+}
+
+impl<'a, T> From<NodeToHandle<'a, T>> for NotifyHandle {
+    fn from(handle: NodeToHandle<'a, T>) -> NotifyHandle {
+        unsafe {
+            let ptr = handle.0.clone();
+            let ptr = mem::transmute::<Arc<Node<T>>, *mut ArcNode<T>>(ptr);
+            NotifyHandle::new(hide_lt(ptr))
         }
-        assert!(self.active > 0);
-        Ok(Async::NotReady)
+    }
+}
+
+struct ArcNode<T>(PhantomData<T>);
+
+// We should never touch `T` on any thread other than the one owning
+// `FuturesUnordered`, so this should be a safe operation.
+unsafe impl<T> Send for ArcNode<T> {}
+unsafe impl<T> Sync for ArcNode<T> {}
+
+impl<T> Notify for ArcNode<T> {
+    fn notify(&self, _id: usize) {
+        unsafe {
+            let me: *const ArcNode<T> = self;
+            let me: *const *const ArcNode<T> = &me;
+            let me = me as *const Arc<Node<T>>;
+            Node::notify(&*me)
+        }
+    }
+}
+
+unsafe impl<T> UnsafeNotify for ArcNode<T> {
+    unsafe fn clone_raw(&self) -> NotifyHandle {
+        let me: *const ArcNode<T> = self;
+        let me: *const *const ArcNode<T> = &me;
+        let me = &*(me as *const Arc<Node<T>>);
+        NodeToHandle(me).into()
+    }
+
+    unsafe fn drop_raw(&self) {
+        let mut me: *const ArcNode<T> = self;
+        let me = &mut me as *mut *const ArcNode<T> as *mut Arc<Node<T>>;
+        ptr::drop_in_place(me);
     }
 }
+
+unsafe fn hide_lt<T>(p: *mut ArcNode<T>) -> *mut UnsafeNotify {
+    mem::transmute(p as *mut UnsafeNotify)
+}
+
+impl<T> Node<T> {
+    fn notify(me: &Arc<Node<T>>) {
+        let inner = match me.queue.upgrade() {
+            Some(inner) => inner,
+            None => return,
+        };
+
+        // It's our job to notify the node that it's ready to get polled,
+        // meaning that we need to enqueue it into the readiness queue. To
+        // do this we flag that we're ready to be queued, and if successful
+        // we then do the literal queueing operation, ensuring that we're
+        // only queued once.
+        //
+        // Once the node is inserted we be sure to notify the parent task,
+        // as it'll want to come along and pick up our node now.
+        //
+        // Note that we don't change the reference count of the node here,
+        // we're just enqueueing the raw pointer. The `FuturesUnordered`
+        // implementation guarantees that if we set the `queued` flag true that
+        // there's a reference count held by the main `FuturesUnordered` queue
+        // still.
+        let prev = me.queued.swap(true, SeqCst);
+        if !prev {
+            inner.enqueue(&**me);
+            inner.parent.notify();
+        }
+    }
+}
+
+impl<T> Drop for Node<T> {
+    fn drop(&mut self) {
+        // Currently a `Node<T>` is sent across all threads for any lifetime,
+        // regardless of `T`. This means that for memory safety we can't
+        // actually touch `T` at any time except when we have a reference to the
+        // `FuturesUnordered` itself.
+        //
+        // Consequently it *should* be the case that we always drop futures from
+        // the `FuturesUnordered` instance, but this is a bomb in place to catch
+        // any bugs in that logic.
+        unsafe {
+            if (*self.future.get()).is_some() {
+                abort("future still here when dropping");
+            }
+        }
+    }
+}
+
+fn arc2ptr<T>(ptr: Arc<T>) -> *const T {
+    let addr = &*ptr as *const T;
+    mem::forget(ptr);
+    return addr
+}
+
+unsafe fn ptr2arc<T>(ptr: *const T) -> Arc<T> {
+    let anchor = mem::transmute::<usize, Arc<T>>(0x10);
+    let addr = &*anchor as *const T;
+    mem::forget(anchor);
+    let offset = addr as isize - 0x10;
+    mem::transmute::<isize, Arc<T>>(ptr as isize - offset)
+}
+
+fn abort(s: &str) -> ! {
+    struct DoublePanic;
+
+    impl Drop for DoublePanic {
+        fn drop(&mut self) {
+            panic!("panicking twice to abort the program");
+        }
+    }
+
+    let _bomb = DoublePanic;
+    panic!("{}", s);
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/stream/inspect.rs
@@ -0,0 +1,84 @@
+use {Stream, Poll, Async};
+
+/// Do something with the items of a stream, passing it on.
+///
+/// This is created by the `Stream::inspect` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Inspect<S, F> where S: Stream {
+    stream: S,
+    inspect: F,
+}
+
+pub fn new<S, F>(stream: S, f: F) -> Inspect<S, F>
+    where S: Stream,
+          F: FnMut(&S::Item) -> (),
+{
+    Inspect {
+        stream: stream,
+        inspect: f,
+    }
+}
+
+impl<S: Stream, F> Inspect<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for Inspect<S, F>
+    where S: ::sink::Sink + Stream
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F> Stream for Inspect<S, F>
+    where S: Stream,
+          F: FnMut(&S::Item),
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        match try_ready!(self.stream.poll()) {
+            Some(e) => {
+                (self.inspect)(&e);
+                Ok(Async::Ready(Some(e)))
+            }
+            None => Ok(Async::Ready(None)),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/stream/inspect_err.rs
@@ -0,0 +1,81 @@
+use {Stream, Poll};
+
+/// Do something with the error of a stream, passing it on.
+///
+/// This is created by the `Stream::inspect_err` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct InspectErr<S, F> where S: Stream {
+    stream: S,
+    inspect: F,
+}
+
+pub fn new<S, F>(stream: S, f: F) -> InspectErr<S, F>
+    where S: Stream,
+          F: FnMut(&S::Error) -> (),
+{
+    InspectErr {
+        stream: stream,
+        inspect: f,
+    }
+}
+
+impl<S: Stream, F> InspectErr<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for InspectErr<S, F>
+    where S: ::sink::Sink + Stream
+{
+    type SinkItem = S::SinkItem;
+    type SinkError = S::SinkError;
+
+    fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+        self.stream.start_send(item)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.poll_complete()
+    }
+
+    fn close(&mut self) -> Poll<(), S::SinkError> {
+        self.stream.close()
+    }
+}
+
+impl<S, F> Stream for InspectErr<S, F>
+    where S: Stream,
+          F: FnMut(&S::Error),
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+        self.stream.poll().map_err(|e| {
+            (self.inspect)(&e);
+            e
+        })
+    }
+}
--- a/third_party/rust/futures/src/stream/iter.rs
+++ b/third_party/rust/futures/src/stream/iter.rs
@@ -1,49 +1,46 @@
-use {Async, Poll};
-use stream::Stream;
+#![deprecated(note = "implementation moved to `iter_ok` and `iter_result`")]
+#![allow(deprecated)]
+
+use Poll;
+use stream::{iter_result, IterResult, Stream};
 
 /// A stream which is just a shim over an underlying instance of `Iterator`.
 ///
 /// This stream will never block and is always ready.
 #[derive(Debug)]
 #[must_use = "streams do nothing unless polled"]
-pub struct Iter<I> {
-    iter: I,
-}
+pub struct Iter<I>(IterResult<I>);
 
 /// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
 /// to yield the next value.
 ///
 /// Iterators in Rust don't express the ability to block, so this adapter simply
 /// always calls `iter.next()` and returns that.
 ///
 /// ```rust
 /// use futures::*;
 ///
 /// let mut stream = stream::iter(vec![Ok(17), Err(false), Ok(19)]);
 /// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
 /// assert_eq!(Err(false), stream.poll());
 /// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
 /// assert_eq!(Ok(Async::Ready(None)), stream.poll());
 /// ```
+#[inline]
 pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter>
     where J: IntoIterator<Item=Result<T, E>>,
 {
-    Iter {
-        iter: i.into_iter(),
-    }
+    Iter(iter_result(i))
 }
 
 impl<I, T, E> Stream for Iter<I>
     where I: Iterator<Item=Result<T, E>>,
 {
     type Item = T;
     type Error = E;
 
+    #[inline]
     fn poll(&mut self) -> Poll<Option<T>, E> {
-        match self.iter.next() {
-            Some(Ok(e)) => Ok(Async::Ready(Some(e))),
-            Some(Err(e)) => Err(e),
-            None => Ok(Async::Ready(None)),
-        }
+        self.0.poll()
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/stream/iter_ok.rs
@@ -0,0 +1,48 @@
+use core::marker;
+
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct IterOk<I, E> {
+    iter: I,
+    _marker: marker::PhantomData<fn() -> E>,
+}
+
+/// Converts an `Iterator` into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter
+/// simply always calls `iter.next()` and returns that.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::iter_ok::<_, ()>(vec![17, 19]);
+/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+pub fn iter_ok<I, E>(i: I) -> IterOk<I::IntoIter, E>
+    where I: IntoIterator,
+{
+    IterOk {
+        iter: i.into_iter(),
+        _marker: marker::PhantomData,
+    }
+}
+
+impl<I, E> Stream for IterOk<I, E>
+    where I: Iterator,
+{
+    type Item = I::Item;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<I::Item>, E> {
+        Ok(Async::Ready(self.iter.next()))
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/stream/iter_result.rs
@@ -0,0 +1,51 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct IterResult<I> {
+    iter: I,
+}
+
+/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter simply
+/// always calls `iter.next()` and returns that.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::iter_result(vec![Ok(17), Err(false), Ok(19)]);
+/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+/// assert_eq!(Err(false), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+pub fn iter_result<J, T, E>(i: J) -> IterResult<J::IntoIter>
+where
+    J: IntoIterator<Item = Result<T, E>>,
+{
+    IterResult {
+        iter: i.into_iter(),
+    }
+}
+
+impl<I, T, E> Stream for IterResult<I>
+where
+    I: Iterator<Item = Result<T, E>>,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        match self.iter.next() {
+            Some(Ok(e)) => Ok(Async::Ready(Some(e))),
+            Some(Err(e)) => Err(e),
+            None => Ok(Async::Ready(None)),
+        }
+    }
+}
--- a/third_party/rust/futures/src/stream/map.rs
+++ b/third_party/rust/futures/src/stream/map.rs
@@ -17,16 +17,41 @@ pub fn new<S, F, U>(s: S, f: F) -> Map<S
           F: FnMut(S::Item) -> U,
 {
     Map {
         stream: s,
         f: f,
     }
 }
 
+impl<S, F> Map<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S, F> ::sink::Sink for Map<S, F>
     where S: ::sink::Sink
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/map_err.rs
+++ b/third_party/rust/futures/src/stream/map_err.rs
@@ -17,16 +17,41 @@ pub fn new<S, F, U>(s: S, f: F) -> MapEr
           F: FnMut(S::Error) -> U,
 {
     MapErr {
         stream: s,
         f: f,
     }
 }
 
+impl<S, F> MapErr<S, F> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S, F> ::sink::Sink for MapErr<S, F>
     where S: ::sink::Sink
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/merge.rs
+++ b/third_party/rust/futures/src/stream/merge.rs
@@ -1,8 +1,11 @@
+#![deprecated(note = "functionality provided by `select` now")]
+#![allow(deprecated)]
+
 use {Poll, Async};
 use stream::{Stream, Fuse};
 
 /// An adapter for merging the output of two streams.
 ///
 /// The merged stream produces items from one or both of the underlying
 /// streams as they become available. Errors, however, are not merged: you
 /// get at most one error at a time.
@@ -42,17 +45,17 @@ impl<S1, S2> Stream for Merge<S1, S2>
     type Item = MergedItem<S1::Item, S2::Item>;
     type Error = S1::Error;
 
     fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
         if let Some(e) = self.queued_error.take() {
             return Err(e)
         }
 
-        match try!(self.stream1.poll()) {
+        match self.stream1.poll()? {
             Async::NotReady => {
                 match try_ready!(self.stream2.poll()) {
                     Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))),
                     None => Ok(Async::NotReady),
                 }
             }
             Async::Ready(None) => {
                 match try_ready!(self.stream2.poll()) {
old mode 100755
new mode 100644
--- a/third_party/rust/futures/src/stream/mod.rs
+++ b/third_party/rust/futures/src/stream/mod.rs
@@ -13,68 +13,83 @@
 //! You can find more information/tutorials about streams [online at
 //! https://tokio.rs][online]
 //!
 //! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
 
 use {IntoFuture, Poll};
 
 mod iter;
+#[allow(deprecated)]
 pub use self::iter::{iter, Iter};
 #[cfg(feature = "with-deprecated")]
+#[allow(deprecated)]
 pub use self::Iter as IterStream;
+mod iter_ok;
+pub use self::iter_ok::{iter_ok, IterOk};
+mod iter_result;
+pub use self::iter_result::{iter_result, IterResult};
 
 mod repeat;
 pub use self::repeat::{repeat, Repeat};
 
 mod and_then;
 mod chain;
 mod concat;
 mod empty;
 mod filter;
 mod filter_map;
 mod flatten;
 mod fold;
 mod for_each;
 mod from_err;
 mod fuse;
 mod future;
+mod inspect;
+mod inspect_err;
 mod map;
 mod map_err;
 mod merge;
 mod once;
 mod or_else;
 mod peek;
+mod poll_fn;
 mod select;
 mod skip;
 mod skip_while;
 mod take;
 mod take_while;
 mod then;
 mod unfold;
 mod zip;
 mod forward;
 pub use self::and_then::AndThen;
 pub use self::chain::Chain;
+#[allow(deprecated)]
 pub use self::concat::Concat;
+pub use self::concat::Concat2;
 pub use self::empty::{Empty, empty};
 pub use self::filter::Filter;
 pub use self::filter_map::FilterMap;
 pub use self::flatten::Flatten;
 pub use self::fold::Fold;
 pub use self::for_each::ForEach;
 pub use self::from_err::FromErr;
 pub use self::fuse::Fuse;
 pub use self::future::StreamFuture;
+pub use self::inspect::Inspect;
+pub use self::inspect_err::InspectErr;
 pub use self::map::Map;
 pub use self::map_err::MapErr;
+#[allow(deprecated)]
 pub use self::merge::{Merge, MergedItem};
 pub use self::once::{Once, once};
 pub use self::or_else::OrElse;
 pub use self::peek::Peekable;
+pub use self::poll_fn::{poll_fn, PollFn};
 pub use self::select::Select;
 pub use self::skip::Skip;
 pub use self::skip_while::SkipWhile;
 pub use self::take::Take;
 pub use self::take_while::TakeWhile;
 pub use self::then::Then;
 pub use self::unfold::{Unfold, unfold};
 pub use self::zip::Zip;
@@ -87,32 +102,38 @@ if_std! {
     mod buffered;
     mod buffer_unordered;
     mod catch_unwind;
     mod chunks;
     mod collect;
     mod wait;
     mod channel;
     mod split;
-    mod futures_unordered;
+    pub mod futures_unordered;
+    mod futures_ordered;
     pub use self::buffered::Buffered;
     pub use self::buffer_unordered::BufferUnordered;
     pub use self::catch_unwind::CatchUnwind;
     pub use self::chunks::Chunks;
     pub use self::collect::Collect;
     pub use self::wait::Wait;
     pub use self::split::{SplitStream, SplitSink};
-    pub use self::futures_unordered::{futures_unordered, FuturesUnordered};
+    pub use self::futures_unordered::FuturesUnordered;
+    pub use self::futures_ordered::{futures_ordered, FuturesOrdered};
 
     #[doc(hidden)]
     #[cfg(feature = "with-deprecated")]
     #[allow(deprecated)]
     pub use self::channel::{channel, Sender, Receiver, FutureSender, SendError};
 
     /// A type alias for `Box<Stream + Send>`
+    #[doc(hidden)]
+    #[deprecated(note = "removed without replacement, recommended to use a \
+                         local extension trait or function if needed, more \
+                         details in https://github.com/alexcrichton/futures-rs/issues/228")]
     pub type BoxStream<T, E> = ::std::boxed::Box<Stream<Item = T, Error = E> + Send>;
 
     impl<S: ?Sized + Stream> Stream for ::std::boxed::Box<S> {
         type Item = S::Item;
         type Error = S::Error;
 
         fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
             (**self).poll()
@@ -242,16 +263,21 @@ pub trait Stream {
     /// ```
     /// use futures::stream::*;
     /// use futures::sync::mpsc;
     ///
     /// let (_tx, rx) = mpsc::channel(1);
     /// let a: BoxStream<i32, ()> = rx.boxed();
     /// ```
     #[cfg(feature = "use_std")]
+    #[doc(hidden)]
+    #[deprecated(note = "removed without replacement, recommended to use a \
+                         local extension trait or function if needed, more \
+                         details in https://github.com/alexcrichton/futures-rs/issues/228")]
+    #[allow(deprecated)]
     fn boxed(self) -> BoxStream<Self::Item, Self::Error>
         where Self: Sized + Send + 'static,
     {
         ::std::boxed::Box::new(self)
     }
 
     /// Converts this stream into a `Future`.
     ///
@@ -276,17 +302,17 @@ pub trait Stream {
     ///
     /// Note that this function consumes the receiving stream and returns a
     /// wrapped version of it, similar to the existing `map` methods in the
     /// standard library.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::Stream;
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (_tx, rx) = mpsc::channel::<i32>(1);
     /// let rx = rx.map(|x| x + 3);
     /// ```
     fn map<U, F>(self, f: F) -> Map<Self, F>
         where F: FnMut(Self::Item) -> U,
               Self: Sized
@@ -302,17 +328,17 @@ pub trait Stream {
     ///
     /// Note that this function consumes the receiving stream and returns a
     /// wrapped version of it, similar to the existing `map_err` methods in the
     /// standard library.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::Stream;
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (_tx, rx) = mpsc::channel::<i32>(1);
     /// let rx = rx.map_err(|()| 3);
     /// ```
     fn map_err<U, F>(self, f: F) -> MapErr<Self, F>
         where F: FnMut(Self::Error) -> U,
               Self: Sized
@@ -332,21 +358,21 @@ pub trait Stream {
     ///
     /// Note that this function consumes the receiving stream and returns a
     /// wrapped version of it, similar to the existing `filter` methods in the
     /// standard library.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::Stream;
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (_tx, rx) = mpsc::channel::<i32>(1);
-    /// let evens = rx.filter(|x| x % 0 == 2);
+    /// let evens = rx.filter(|x| x % 2 == 0);
     /// ```
     fn filter<F>(self, f: F) -> Filter<Self, F>
         where F: FnMut(&Self::Item) -> bool,
               Self: Sized
     {
         filter::new(self, f)
     }
 
@@ -362,17 +388,17 @@ pub trait Stream {
     ///
     /// Note that this function consumes the receiving stream and returns a
     /// wrapped version of it, similar to the existing `filter_map` methods in the
     /// standard library.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::Stream;
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (_tx, rx) = mpsc::channel::<i32>(1);
     /// let evens_plus_one = rx.filter_map(|x| {
     ///     if x % 0 == 2 {
     ///         Some(x + 1)
     ///     } else {
     ///         None
@@ -401,17 +427,17 @@ pub trait Stream {
     /// closure and return it.
     ///
     /// Note that this function consumes the receiving stream and returns a
     /// wrapped version of it.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::Stream;
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (_tx, rx) = mpsc::channel::<i32>(1);
     ///
     /// let rx = rx.then(|result| {
     ///     match result {
     ///         Ok(e) => Ok(e + 3),
     ///         Err(()) => Err(4),
@@ -441,20 +467,23 @@ pub trait Stream {
     /// and can represent some more work to be done before the composed stream
     /// is finished. Note that the `Result` type implements the `IntoFuture`
     /// trait so it is possible to simply alter the `Result` yielded to the
     /// closure and return it.
     ///
     /// Note that this function consumes the receiving stream and returns a
     /// wrapped version of it.
     ///
+    /// To process the entire stream and return a single future representing
+    /// success or error, use `for_each` instead.
+    ///
     /// # Examples
     ///
     /// ```
-    /// use futures::stream::*;
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (_tx, rx) = mpsc::channel::<i32>(1);
     ///
     /// let rx = rx.and_then(|result| {
     ///     if result % 2 == 0 {
     ///         Ok(result)
     ///     } else {
@@ -510,17 +539,17 @@ pub trait Stream {
     /// This method is only available when the `use_std` feature of this
     /// library is activated, and it is activated by default.
     ///
     /// # Examples
     ///
     /// ```
     /// use std::thread;
     ///
-    /// use futures::{Stream, Future, Sink};
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (mut tx, rx) = mpsc::channel(1);
     ///
     /// thread::spawn(|| {
     ///     for i in (0..5).rev() {
     ///         tx = tx.send(i + 1).wait().unwrap();
     ///     }
@@ -535,39 +564,82 @@ pub trait Stream {
     {
         collect::new(self)
     }
 
     /// Concatenate all results of a stream into a single extendable
     /// destination, returning a future representing the end result.
     ///
     /// This combinator will extend the first item with the contents
-    /// of all the successful results of the stream. If an error
-    /// occurs, all the results will be dropped and the error will be
-    /// returned.
+    /// of all the successful results of the stream. If the stream is
+    /// empty, the default value will be returned. If an error occurs,
+    /// all the results will be dropped and the error will be returned.
+    ///
+    /// The name `concat2` is an intermediate measure until the release of
+    /// futures 0.2, at which point it will be renamed back to `concat`.
     ///
     /// # Examples
     ///
     /// ```
     /// use std::thread;
     ///
-    /// use futures::{Future, Sink, Stream};
+    /// use futures::prelude::*;
+    /// use futures::sync::mpsc;
+    ///
+    /// let (mut tx, rx) = mpsc::channel(1);
+    ///
+    /// thread::spawn(move || {
+    ///     for i in (0..3).rev() {
+    ///         let n = i * 3;
+    ///         tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap();
+    ///     }
+    /// });
+    /// let result = rx.concat2();
+    /// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
+    /// ```
+    fn concat2(self) -> Concat2<Self>
+        where Self: Sized,
+              Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+    {
+        concat::new2(self)
+    }
+
+    /// Concatenate all results of a stream into a single extendable
+    /// destination, returning a future representing the end result.
+    ///
+    /// This combinator will extend the first item with the contents
+    /// of all the successful results of the stream. If an error occurs,
+    /// all the results will be dropped and the error will be returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use std::thread;
+    ///
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (mut tx, rx) = mpsc::channel(1);
     ///
     /// thread::spawn(move || {
     ///     for i in (0..3).rev() {
     ///         let n = i * 3;
     ///         tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap();
     ///     }
     /// });
     /// let result = rx.concat();
     /// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
     /// ```
+    ///
+    /// # Panics
+    ///
+    /// It's important to note that this function will panic if the stream
+    /// is empty, which is the reason for its deprecation.
+    #[deprecated(since="0.1.14", note="please use `Stream::concat2` instead")]
+    #[allow(deprecated)]
     fn concat(self) -> Concat<Self>
         where Self: Sized,
               Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator,
     {
         concat::new(self)
     }
 
     /// Execute an accumulating computation over a stream, collecting all the
@@ -580,21 +652,22 @@ pub trait Stream {
     /// resolve to this value.
     ///
     /// If an error happens then collected state will be dropped and the error
     /// will be returned.
     ///
     /// # Examples
     ///
     /// ```
-    /// use futures::stream::{self, Stream};
-    /// use futures::future::{ok, Future};
+    /// use futures::prelude::*;
+    /// use futures::stream;
+    /// use futures::future;
     ///
-    /// let number_stream = stream::iter::<_, _, ()>((0..6).map(Ok));
-    /// let sum = number_stream.fold(0, |a, b| ok(a + b));
+    /// let number_stream = stream::iter_ok::<_, ()>(0..6);
+    /// let sum = number_stream.fold(0, |acc, x| future::ok(acc + x));
     /// assert_eq!(sum.wait(), Ok(15));
     /// ```
     fn fold<F, T, Fut>(self, init: T, f: F) -> Fold<Self, F, Fut, T>
         where F: FnMut(T, Self::Item) -> Fut,
               Fut: IntoFuture<Item = T>,
               Self::Error: From<Fut::Error>,
               Self: Sized
     {
@@ -606,17 +679,17 @@ pub trait Stream {
     /// If this stream's elements are themselves streams then this combinator
     /// will flatten out the entire stream to one long chain of elements. Any
     /// errors are passed through without looking at them, but otherwise each
     /// individual stream will get exhausted before moving on to the next.
     ///
     /// ```
     /// use std::thread;
     ///
-    /// use futures::{Future, Stream, Poll, Sink};
+    /// use futures::prelude::*;
     /// use futures::sync::mpsc;
     ///
     /// let (tx1, rx1) = mpsc::channel::<i32>(1);
     /// let (tx2, rx2) = mpsc::channel::<i32>(1);
     /// let (tx3, rx3) = mpsc::channel(1);
     ///
     /// thread::spawn(|| {
     ///     tx1.send(1).wait().unwrap()
@@ -677,16 +750,19 @@ pub trait Stream {
     /// The closure provided will be called for each item this stream resolves
     /// to successfully, producing a future. That future will then be executed
     /// to completion before moving on to the next item.
     ///
     /// The returned value is a `Future` where the `Item` type is `()` and
     /// errors are otherwise threaded through. Any error on the stream or in the
     /// closure will cause iteration to be halted immediately and the future
     /// will resolve to that error.
+    ///
+    /// To process each item in the stream and produce another stream instead
+    /// of a single future, use `and_then` instead.
     fn for_each<F, U>(self, f: F) -> ForEach<Self, F, U>
         where F: FnMut(Self::Item) -> U,
               U: IntoFuture<Item=(), Error = Self::Error>,
               Self: Sized
     {
         for_each::new(self, f)
     }
 
@@ -754,16 +830,41 @@ pub trait Stream {
     /// Also note that as soon as this stream returns `None` it will be dropped
     /// to reclaim resources associated with it.
     fn fuse(self) -> Fuse<Self>
         where Self: Sized
     {
         fuse::new(self)
     }
 
+    /// Borrows a stream, rather than consuming it.
+    ///
+    /// This is useful to allow applying stream adaptors while still retaining
+    /// ownership of the original stream.
+    ///
+    /// ```
+    /// use futures::prelude::*;
+    /// use futures::stream;
+    /// use futures::future;
+    ///
+    /// let mut stream = stream::iter_ok::<_, ()>(1..5);
+    ///
+    /// let sum = stream.by_ref().take(2).fold(0, |a, b| future::ok(a + b)).wait();
+    /// assert_eq!(sum, Ok(3));
+    ///
+    /// // You can use the stream again
+    /// let sum = stream.take(2).fold(0, |a, b| future::ok(a + b)).wait();
+    /// assert_eq!(sum, Ok(7));
+    /// ```
+    fn by_ref(&mut self) -> &mut Self
+        where Self: Sized
+    {
+        self
+    }
+
     /// Catches unwinding panics while polling the stream.
     ///
     /// Caught panic (if any) will be the last element of the resulting stream.
     ///
     /// In general, panics within a stream can propagate all the way out to the
     /// task level. This combinator makes it possible to halt unwinding within
     /// the stream itself. It's most commonly used within task executors. This
     /// method should not be used for error handling.
@@ -775,21 +876,20 @@ pub trait Stream {
     /// implemented for `AssertUnwindSafe<S>` where `S` implements `Stream`.
     ///
     /// This method is only available when the `use_std` feature of this
     /// library is activated, and it is activated by default.
     ///
     /// # Examples
     ///
     /// ```rust
+    /// use futures::prelude::*;
     /// use futures::stream;
-    /// use futures::stream::Stream;
     ///
-    /// let stream = stream::iter::<_, Option<i32>, bool>(vec![
-    ///     Some(10), None, Some(11)].into_iter().map(Ok));
+    /// let stream = stream::iter_ok::<_, bool>(vec![Some(10), None, Some(11)]);
     /// // panic on second element
     /// let stream_panicking = stream.map(|o| o.unwrap());
     /// let mut iter = stream_panicking.catch_unwind().wait();
     ///
     /// assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap());
     /// assert!(iter.next().unwrap().is_err());
     /// assert!(iter.next().is_none());
     /// ```
@@ -842,16 +942,18 @@ pub trait Stream {
         buffer_unordered::new(self, amt)
     }
 
     /// An adapter for merging the output of two streams.
     ///
     /// The merged stream produces items from one or both of the underlying
     /// streams as they become available. Errors, however, are not merged: you
     /// get at most one error at a time.
+    #[deprecated(note = "functionality provided by `select` now")]
+    #[allow(deprecated)]
     fn merge<S>(self, other: S) -> Merge<Self, S>
         where S: Stream<Error = Self::Error>,
               Self: Sized,
     {
         merge::new(self, other)
     }
 
     /// An adapter for zipping two streams together.
@@ -867,21 +969,21 @@ pub trait Stream {
     }
 
     /// Adapter for chaining two stream.
     ///
     /// The resulting stream emits elements from the first stream, and when
     /// first stream reaches the end, emits the elements from the second stream.
     ///
     /// ```rust
+    /// use futures::prelude::*;
     /// use futures::stream;
-    /// use futures::stream::Stream;
     ///
-    /// let stream1 = stream::iter(vec![Ok(10), Err(false)]);
-    /// let stream2 = stream::iter(vec![Err(true), Ok(20)]);
+    /// let stream1 = stream::iter_result(vec![Ok(10), Err(false)]);
+    /// let stream2 = stream::iter_result(vec![Err(true), Ok(20)]);
     /// let mut chain = stream1.chain(stream2).wait();
     ///
     /// assert_eq!(Some(Ok(10)), chain.next());
     /// assert_eq!(Some(Err(false)), chain.next());
     /// assert_eq!(Some(Err(true)), chain.next());
     /// assert_eq!(Some(Ok(20)), chain.next());
     /// assert_eq!(None, chain.next());
     /// ```
@@ -946,21 +1048,23 @@ pub trait Stream {
         select::new(self, other)
     }
 
     /// A future that completes after the given stream has been fully processed
     /// into the sink, including flushing.
     ///
     /// This future will drive the stream to keep producing items until it is
     /// exhausted, sending each item to the sink. It will complete once both the
-    /// stream is exhausted, and the sink has fully processed and flushed all of
-    /// the items sent to it.
+    /// stream is exhausted, and the sink has fully processed received item,
+    /// flushed successfully, and closed successfully.
     ///
     /// Doing `stream.forward(sink)` is roughly equivalent to
-    /// `sink.send_all(stream)`.
+    /// `sink.send_all(stream)`. The returned future will exhaust all items from
+    /// `self`, sending them all to `sink`. Furthermore the `sink` will be
+    /// closed and flushed.
     ///
     /// On completion, the pair `(stream, sink)` is returned.
     fn forward<S>(self, sink: S) -> Forward<Self, S>
         where S: Sink<SinkItem = Self::Item>,
               Self::Error: From<S::SinkError>,
               Self: Sized
     {
         forward::new(self, sink)
@@ -976,18 +1080,66 @@ pub trait Stream {
     /// This method is only available when the `use_std` feature of this
     /// library is activated, and it is activated by default.
     #[cfg(feature = "use_std")]
     fn split(self) -> (SplitSink<Self>, SplitStream<Self>)
         where Self: super::sink::Sink + Sized
     {
         split::split(self)
     }
+
+    /// Do something with each item of this stream, afterwards passing it on.
+    ///
+    /// This is similar to the `Iterator::inspect` method in the standard
+    /// library where it allows easily inspecting each value as it passes
+    /// through the stream, for example to debug what's going on.
+    fn inspect<F>(self, f: F) -> Inspect<Self, F>
+        where F: FnMut(&Self::Item),
+              Self: Sized,
+    {
+        inspect::new(self, f)
+    }
+
+    /// Do something with the error of this stream, afterwards passing it on.
+    ///
+    /// This is similar to the `Stream::inspect` method where it allows
+    /// easily inspecting the error as it passes through the stream, for
+    /// example to debug what's going on.
+    fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
+        where F: FnMut(&Self::Error),
+              Self: Sized,
+    {
+        inspect_err::new(self, f)
+    }
 }
 
 impl<'a, S: ?Sized + Stream> Stream for &'a mut S {
     type Item = S::Item;
     type Error = S::Error;
 
     fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
         (**self).poll()
     }
 }
+
+/// Converts a list of futures into a `Stream` of results from the futures.
+///
+/// This function will take an list of futures (e.g. a vector, an iterator,
+/// etc), and return a stream. The stream will yield items as they become
+/// available on the futures internally, in the order that they become
+/// available. This function is similar to `buffer_unordered` in that it may
+/// return items in a different order than in the list specified.
+///
+/// Note that the returned set can also be used to dynamically push more
+/// futures into the set as they become available.
+#[cfg(feature = "use_std")]
+pub fn futures_unordered<I>(futures: I) -> FuturesUnordered<<I::Item as IntoFuture>::Future>
+    where I: IntoIterator,
+        I::Item: IntoFuture
+{
+    let mut set = FuturesUnordered::new();
+
+    for future in futures {
+        set.push(future.into_future());
+    }
+
+    return set
+}
--- a/third_party/rust/futures/src/stream/once.rs
+++ b/third_party/rust/futures/src/stream/once.rs
@@ -1,34 +1,35 @@
-use core;
-
-use Poll;
-use stream;
+use {Poll, Async};
 use stream::Stream;
 
 /// A stream which emits single element and then EOF.
 ///
 /// This stream will never block and is always ready.
 #[derive(Debug)]
 #[must_use = "streams do nothing unless polled"]
-pub struct Once<T, E>(stream::Iter<core::iter::Once<Result<T, E>>>);
+pub struct Once<T, E>(Option<Result<T, E>>);
 
 /// Creates a stream of single element
 ///
 /// ```rust
 /// use futures::*;
 ///
 /// let mut stream = stream::once::<(), _>(Err(17));
 /// assert_eq!(Err(17), stream.poll());
 /// assert_eq!(Ok(Async::Ready(None)), stream.poll());
 /// ```
 pub fn once<T, E>(item: Result<T, E>) -> Once<T, E> {
-    Once(stream::iter(core::iter::once(item)))
+    Once(Some(item))
 }
 
 impl<T, E> Stream for Once<T, E> {
     type Item = T;
     type Error = E;
 
     fn poll(&mut self) -> Poll<Option<T>, E> {
-        self.0.poll()
+        match self.0.take() {
+            Some(Ok(e)) => Ok(Async::Ready(Some(e))),
+            Some(Err(e)) => Err(e),
+            None => Ok(Async::Ready(None)),
+        }
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/stream/poll_fn.rs
@@ -0,0 +1,49 @@
+//! Definition of the `PollFn` combinator
+
+use {Stream, Poll};
+
+/// A stream which adapts a function returning `Poll`.
+///
+/// Created by the `poll_fn` function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct PollFn<F> {
+    inner: F,
+}
+
+/// Creates a new stream wrapping around a function returning `Poll`.
+///
+/// Polling the returned stream delegates to the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// use futures::stream::poll_fn;
+/// use futures::{Async, Poll};
+///
+/// let mut counter = 1usize;
+///
+/// let read_stream = poll_fn(move || -> Poll<Option<String>, std::io::Error> {
+///     if counter == 0 { return Ok(Async::Ready(None)); }
+///     counter -= 1;
+///     Ok(Async::Ready(Some("Hello, World!".to_owned())))
+/// });
+/// ```
+pub fn poll_fn<T, E, F>(f: F) -> PollFn<F>
+where
+    F: FnMut() -> Poll<Option<T>, E>,
+{
+    PollFn { inner: f }
+}
+
+impl<T, E, F> Stream for PollFn<F>
+where
+    F: FnMut() -> Poll<Option<T>, E>,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        (self.inner)()
+    }
+}
--- a/third_party/rust/futures/src/stream/repeat.rs
+++ b/third_party/rust/futures/src/stream/repeat.rs
@@ -2,28 +2,32 @@ use core::marker;
 
 
 use stream::Stream;
 
 use {Async, Poll};
 
 
 /// Stream that produces the same element repeatedly.
+///
+/// This structure is created by the `stream::repeat` function.
 #[derive(Debug)]
 #[must_use = "streams do nothing unless polled"]
 pub struct Repeat<T, E>
     where T: Clone
 {
     item: T,
     error: marker::PhantomData<E>,
 }
 
 /// Create a stream which produces the same item repeatedly.
 ///
-/// Stream never produces an error or EOF.
+/// Stream never produces an error or EOF. Note that you likely want to avoid
+/// usage of `collect` or such on the returned stream as it will exhaust
+/// available memory as it tries to just fill up all RAM.
 ///
 /// ```rust
 /// use futures::*;
 ///
 /// let mut stream = stream::repeat::<_, bool>(10);
 /// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
 /// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
 /// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
--- a/third_party/rust/futures/src/stream/select.rs
+++ b/third_party/rust/futures/src/stream/select.rs
@@ -37,29 +37,28 @@ impl<S1, S2> Stream for Select<S1, S2>
             (&mut self.stream2 as &mut Stream<Item=_, Error=_>,
              &mut self.stream1 as &mut Stream<Item=_, Error=_>)
         } else {
             (&mut self.stream1 as &mut Stream<Item=_, Error=_>,
              &mut self.stream2 as &mut Stream<Item=_, Error=_>)
         };
         self.flag = !self.flag;
 
-        let a_done = match try!(a.poll()) {
+        let a_done = match a.poll()? {
             Async::Ready(Some(item)) => return Ok(Some(item).into()),
             Async::Ready(None) => true,
             Async::NotReady => false,
         };
 
-        match try!(b.poll()) {
+        match b.poll()? {
             Async::Ready(Some(item)) => {
                 // If the other stream isn't finished yet, give them a chance to
                 // go first next time as we pulled something off `b`.
                 if !a_done {
                     self.flag = !self.flag;
                 }
-                return Ok(Some(item).into())
+                Ok(Some(item).into())
             }
             Async::Ready(None) if a_done => Ok(None.into()),
-            Async::Ready(None) => Ok(Async::NotReady),
-            Async::NotReady => Ok(Async::NotReady),
+            Async::Ready(None) | Async::NotReady => Ok(Async::NotReady),
         }
     }
 }
--- a/third_party/rust/futures/src/stream/skip.rs
+++ b/third_party/rust/futures/src/stream/skip.rs
@@ -15,16 +15,41 @@ pub fn new<S>(s: S, amt: u64) -> Skip<S>
     where S: Stream,
 {
     Skip {
         stream: s,
         remaining: amt,
     }
 }
 
+impl<S> Skip<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S> ::sink::Sink for Skip<S>
     where S: ::sink::Sink
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/skip_while.rs
+++ b/third_party/rust/futures/src/stream/skip_while.rs
@@ -22,16 +22,41 @@ pub fn new<S, P, R>(s: S, p: P) -> SkipW
     SkipWhile {
         stream: s,
         pred: p,
         pending: None,
         done_skipping: false,
     }
 }
 
+impl<S, P, R> SkipWhile<S, P, R> where S: Stream, R: IntoFuture {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S, P, R> ::sink::Sink for SkipWhile<S, P, R>
     where S: ::sink::Sink + Stream, R: IntoFuture
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/split.rs
+++ b/third_party/rust/futures/src/stream/split.rs
@@ -1,31 +1,55 @@
+use std::any::Any;
+use std::error::Error;
+use std::fmt;
+
 use {StartSend, Sink, Stream, Poll, Async, AsyncSink};
 use sync::BiLock;
 
 /// A `Stream` part of the split pair
 #[derive(Debug)]
 pub struct SplitStream<S>(BiLock<S>);
 
+impl<S> SplitStream<S> {
+    /// Attempts to put the two "halves" of a split `Stream + Sink` back
+    /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
+    /// a matching pair originating from the same call to `Stream::split`.
+    pub fn reunite(self, other: SplitSink<S>) -> Result<S, ReuniteError<S>> {
+        other.reunite(self)
+    }
+}
+
 impl<S: Stream> Stream for SplitStream<S> {
     type Item = S::Item;
     type Error = S::Error;
 
     fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
         match self.0.poll_lock() {
             Async::Ready(mut inner) => inner.poll(),
             Async::NotReady => Ok(Async::NotReady),
         }
     }
 }
 
 /// A `Sink` part of the split pair
 #[derive(Debug)]
 pub struct SplitSink<S>(BiLock<S>);
 
+impl<S> SplitSink<S> {
+    /// Attempts to put the two "halves" of a split `Stream + Sink` back
+    /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
+    /// a matching pair originating from the same call to `Stream::split`.
+    pub fn reunite(self, other: SplitStream<S>) -> Result<S, ReuniteError<S>> {
+        self.0.reunite(other.0).map_err(|err| {
+            ReuniteError(SplitSink(err.0), SplitStream(err.1))
+        })
+    }
+}
+
 impl<S: Sink> Sink for SplitSink<S> {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem)
         -> StartSend<S::SinkItem, S::SinkError>
     {
         match self.0.poll_lock() {
@@ -50,8 +74,32 @@ impl<S: Sink> Sink for SplitSink<S> {
 }
 
 pub fn split<S: Stream + Sink>(s: S) -> (SplitSink<S>, SplitStream<S>) {
     let (a, b) = BiLock::new(s);
     let read = SplitStream(a);
     let write = SplitSink(b);
     (write, read)
 }
+
+/// Error indicating a `SplitSink<S>` and `SplitStream<S>` were not two halves
+/// of a `Stream + Split`, and thus could not be `reunite`d.
+pub struct ReuniteError<T>(pub SplitSink<T>, pub SplitStream<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_tuple("ReuniteError")
+            .field(&"...")
+            .finish()
+    }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "tried to reunite a SplitStream and SplitSink that don't form a pair")
+    }
+}
+
+impl<T: Any> Error for ReuniteError<T> {
+    fn description(&self) -> &str {
+        "tried to reunite a SplitStream and SplitSink that don't form a pair"
+    }
+}
--- a/third_party/rust/futures/src/stream/take.rs
+++ b/third_party/rust/futures/src/stream/take.rs
@@ -15,16 +15,41 @@ pub fn new<S>(s: S, amt: u64) -> Take<S>
     where S: Stream,
 {
     Take {
         stream: s,
         remaining: amt,
     }
 }
 
+impl<S> Take<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S> ::sink::Sink for Take<S>
     where S: ::sink::Sink + Stream
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/take_while.rs
+++ b/third_party/rust/futures/src/stream/take_while.rs
@@ -22,16 +22,41 @@ pub fn new<S, P, R>(s: S, p: P) -> TakeW
     TakeWhile {
         stream: s,
         pred: p,
         pending: None,
         done_taking: false,
     }
 }
 
+impl<S, P, R> TakeWhile<S, P, R> where S: Stream, R: IntoFuture {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        &self.stream
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        &mut self.stream
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
+
 // Forwarding impl of Sink from the underlying stream
 impl<S, P, R> ::sink::Sink for TakeWhile<S, P, R>
     where S: ::sink::Sink + Stream, R: IntoFuture
 {
     type SinkItem = S::SinkItem;
     type SinkError = S::SinkError;
 
     fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
--- a/third_party/rust/futures/src/stream/unfold.rs
+++ b/third_party/rust/futures/src/stream/unfold.rs
@@ -1,17 +1,17 @@
 use core::mem;
 
 use {Future, IntoFuture, Async, Poll};
 use stream::Stream;
 
 /// Creates a `Stream` from a seed and a closure returning a `Future`.
 ///
 /// This function is the dual for the `Stream::fold()` adapter: while
-/// `Stream:fold()` reduces a `Stream` to one single value, `unfold()` creates a
+/// `Stream::fold()` reduces a `Stream` to one single value, `unfold()` creates a
 /// `Stream` from a seed value.
 ///
 /// `unfold()` will call the provided closure with the provided seed, then wait
 /// for the returned `Future` to complete with `(a, b)`. It will then yield the
 /// value `a`, and use `b` as the next internal state.
 ///
 /// If the closure returns `None` instead of `Some(Future)`, then the `unfold()`
 /// will stop producing items and return `Ok(Async::Ready(None))` in future
@@ -80,17 +80,17 @@ impl <T, F, Fut, It> Stream for Unfold<T
                 State::Empty => { return Ok(Async::Ready(None)); }
                 State::Ready(state) => {
                     match (self.f)(state) {
                         Some(fut) => { self.state = State::Processing(fut.into_future()); }
                         None => { return Ok(Async::Ready(None)); }
                     }
                 }
                 State::Processing(mut fut) => {
-                    match try!(fut.poll()) {
+                    match fut.poll()? {
                         Async:: Ready((item, next_state)) => {
                             self.state = State::Ready(next_state);
                             return Ok(Async::Ready(Some(item)));
                         }
                         Async::NotReady => {
                             self.state = State::Processing(fut);
                             return Ok(Async::NotReady);
                         }
--- a/third_party/rust/futures/src/stream/wait.rs
+++ b/third_party/rust/futures/src/stream/wait.rs
@@ -8,16 +8,41 @@ use executor;
 /// into a standard iterator. This is implemented by blocking the current thread
 /// while items on the underlying stream aren't ready yet.
 #[must_use = "iterators do nothing unless advanced"]
 #[derive(Debug)]
 pub struct Wait<S> {
     stream: executor::Spawn<S>,
 }
 
+impl<S> Wait<S> {
+    /// Acquires a reference to the underlying stream that this combinator is
+    /// pulling from.
+    pub fn get_ref(&self) -> &S {
+        self.stream.get_ref()
+    }
+
+    /// Acquires a mutable reference to the underlying stream that this
+    /// combinator is pulling from.
+    ///
+    /// Note that care must be taken to avoid tampering with the state of the
+    /// stream which may otherwise confuse this combinator.
+    pub fn get_mut(&mut self) -> &mut S {
+        self.stream.get_mut()
+    }
+
+    /// Consumes this combinator, returning the underlying stream.
+    ///
+    /// Note that this may discard intermediate state of this combinator, so
+    /// care should be taken to avoid losing resources when this is called.
+    pub fn into_inner(self) -> S {
+        self.stream.into_inner()
+    }
+}
+
 pub fn new<S: Stream>(s: S) -> Wait<S> {
     Wait {
         stream: executor::spawn(s),
     }
 }
 
 impl<S: Stream> Iterator for Wait<S> {
     type Item = Result<S::Item, S::Error>;
--- a/third_party/rust/futures/src/stream/zip.rs
+++ b/third_party/rust/futures/src/stream/zip.rs
@@ -29,27 +29,25 @@ pub fn new<S1, S2>(stream1: S1, stream2:
 impl<S1, S2> Stream for Zip<S1, S2>
     where S1: Stream, S2: Stream<Error = S1::Error>
 {
     type Item = (S1::Item, S2::Item);
     type Error = S1::Error;
 
     fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
         if self.queued1.is_none() {
-            match try!(self.stream1.poll()) {
-                Async::NotReady => {}
+            match self.stream1.poll()? {
                 Async::Ready(Some(item1)) => self.queued1 = Some(item1),
-                Async::Ready(None) => {}
+                Async::Ready(None) | Async::NotReady => {}
             }
         }
         if self.queued2.is_none() {
-            match try!(self.stream2.poll()) {
-                Async::NotReady => {}
+            match self.stream2.poll()? {
                 Async::Ready(Some(item2)) => self.queued2 = Some(item2),
-                Async::Ready(None) => {}
+                Async::Ready(None) | Async::NotReady => {}
             }
         }
 
         if self.queued1.is_some() && self.queued2.is_some() {
             let pair = (self.queued1.take().unwrap(),
                         self.queued2.take().unwrap());
             Ok(Async::Ready(Some(pair)))
         } else if self.stream1.is_done() || self.stream2.is_done() {
--- a/third_party/rust/futures/src/sync/bilock.rs
+++ b/third_party/rust/futures/src/sync/bilock.rs
@@ -1,10 +1,13 @@
+use std::any::Any;
 use std::boxed::Box;
 use std::cell::UnsafeCell;
+use std::error::Error;
+use std::fmt;
 use std::mem;
 use std::ops::{Deref, DerefMut};
 use std::sync::Arc;
 use std::sync::atomic::AtomicUsize;
 use std::sync::atomic::Ordering::SeqCst;
 
 use {Async, Future, Poll};
 use task::{self, Task};
@@ -30,32 +33,32 @@ use task::{self, Task};
 #[derive(Debug)]
 pub struct BiLock<T> {
     inner: Arc<Inner<T>>,
 }
 
 #[derive(Debug)]
 struct Inner<T> {
     state: AtomicUsize,
-    inner: UnsafeCell<T>,
+    inner: Option<UnsafeCell<T>>,
 }
 
 unsafe impl<T: Send> Send for Inner<T> {}
 unsafe impl<T: Send> Sync for Inner<T> {}
 
 impl<T> BiLock<T> {
     /// Creates a new `BiLock` protecting the provided data.
     ///
     /// Two handles to the lock are returned, and these are the only two handles
     /// that will ever be available to the lock. These can then be sent to separate
     /// tasks to be managed there.
     pub fn new(t: T) -> (BiLock<T>, BiLock<T>) {
         let inner = Arc::new(Inner {
             state: AtomicUsize::new(0),
-            inner: UnsafeCell::new(t),
+            inner: Some(UnsafeCell::new(t)),
         });
 
         (BiLock { inner: inner.clone() }, BiLock { inner: inner })
     }
 
     /// Attempt to acquire this lock, returning `NotReady` if it can't be
     /// acquired.
     ///
@@ -85,17 +88,17 @@ impl<T> BiLock<T> {
 
                 // A task was previously blocked on this lock, likely our task,
                 // so we need to update that task.
                 n => unsafe {
                     drop(Box::from_raw(n as *mut Task));
                 }
             }
 
-            let me = Box::new(task::park());
+            let me = Box::new(task::current());
             let me = Box::into_raw(me) as usize;
 
             match self.inner.state.compare_exchange(1, me, SeqCst, SeqCst) {
                 // The lock is still locked, but we've now parked ourselves, so
                 // just report that we're scheduled to receive a notification.
                 Ok(_) => return Async::NotReady,
 
                 // Oops, looks like the lock was unlocked after our swap above
@@ -122,127 +125,174 @@ impl<T> BiLock<T> {
     /// This function consumes the `BiLock<T>` and returns a sentinel future,
     /// `BiLockAcquire<T>`. The returned future will resolve to
     /// `BiLockAcquired<T>` which represents a locked lock similarly to
     /// `BiLockGuard<T>`.
     ///
     /// Note that the returned future will never resolve to an error.
     pub fn lock(self) -> BiLockAcquire<T> {
         BiLockAcquire {
-            inner: self,
+            inner: Some(self),
+        }
+    }
+
+    /// Attempts to put the two "halves" of a `BiLock<T>` back together and
+    /// recover the original value. Succeeds only if the two `BiLock<T>`s
+    /// originated from the same call to `BiLock::new`.
+    pub fn reunite(self, other: Self) -> Result<T, ReuniteError<T>> {
+        if &*self.inner as *const _ == &*other.inner as *const _ {
+            drop(other);
+            let inner = Arc::try_unwrap(self.inner)
+                .ok()
+                .expect("futures: try_unwrap failed in BiLock<T>::reunite");
+            Ok(unsafe { inner.into_inner() })
+        } else {
+            Err(ReuniteError(self, other))
         }
     }
 
     fn unlock(&self) {
         match self.inner.state.swap(0, SeqCst) {
             // we've locked the lock, shouldn't be possible for us to see an
             // unlocked lock.
             0 => panic!("invalid unlocked state"),
 
             // Ok, no one else tried to get the lock, we're done.
             1 => {}
 
             // Another task has parked themselves on this lock, let's wake them
             // up as its now their turn.
             n => unsafe {
-                Box::from_raw(n as *mut Task).unpark();
+                Box::from_raw(n as *mut Task).notify();
             }
         }
     }
 }
 
+impl<T> Inner<T> {
+    unsafe fn into_inner(mut self) -> T {
+        mem::replace(&mut self.inner, None).unwrap().into_inner()
+    }
+}
+
 impl<T> Drop for Inner<T> {
     fn drop(&mut self) {
         assert_eq!(self.state.load(SeqCst), 0);
     }
 }
 
+/// Error indicating two `BiLock<T>`s were not two halves of a whole, and
+/// thus could not be `reunite`d.
+pub struct ReuniteError<T>(pub BiLock<T>, pub BiLock<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_tuple("ReuniteError")
+            .field(&"...")
+            .finish()
+    }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "tried to reunite two BiLocks that don't form a pair")
+    }
+}
+
+impl<T: Any> Error for ReuniteError<T> {
+    fn description(&self) -> &str {
+        "tried to reunite two BiLocks that don't form a pair"
+    }
+}
+
 /// Returned RAII guard from the `poll_lock` method.
 ///
 /// This structure acts as a sentinel to the data in the `BiLock<T>` itself,
 /// implementing `Deref` and `DerefMut` to `T`. When dropped, the lock will be
 /// unlocked.
 #[derive(Debug)]
 pub struct BiLockGuard<'a, T: 'a> {
     inner: &'a BiLock<T>,
 }
 
 impl<'a, T> Deref for BiLockGuard<'a, T> {
     type Target = T;
     fn deref(&self) -> &T {
-        unsafe { &*self.inner.inner.inner.get() }
+        unsafe { &*self.inner.inner.inner.as_ref().unwrap().get() }
     }
 }
 
 impl<'a, T> DerefMut for BiLockGuard<'a, T> {
     fn deref_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.inner.inner.inner.get() }
+        unsafe { &mut *self.inner.inner.inner.as_ref().unwrap().get() }
     }
 }
 
 impl<'a, T> Drop for BiLockGuard<'a, T> {
     fn drop(&mut self) {
         self.inner.unlock();
     }
 }
 
 /// Future returned by `BiLock::lock` which will resolve when the lock is
 /// acquired.
 #[derive(Debug)]
 pub struct BiLockAcquire<T> {
-    inner: BiLock<T>,
+    inner: Option<BiLock<T>>,
 }
 
 impl<T> Future for BiLockAcquire<T> {
     type Item = BiLockAcquired<T>;
     type Error = ();
 
     fn poll(&mut self) -> Poll<BiLockAcquired<T>, ()> {
-        match self.inner.poll_lock() {
+        match self.inner.as_ref().expect("cannot poll after Ready").poll_lock() {
             Async::Ready(r) => {
                 mem::forget(r);
-                Ok(BiLockAcquired {
-                    inner: BiLock { inner: self.inner.inner.clone() },
-                }.into())
             }
-            Async::NotReady => Ok(Async::NotReady),
+            Async::NotReady => return Ok(Async::NotReady),
         }
+        Ok(Async::Ready(BiLockAcquired { inner: self.inner.take() }))
     }
 }
 
 /// Resolved value of the `BiLockAcquire<T>` future.
 ///
 /// This value, like `BiLockGuard<T>`, is a sentinel to the value `T` through
 /// implementations of `Deref` and `DerefMut`. When dropped will unlock the
 /// lock, and the original unlocked `BiLock<T>` can be recovered through the
 /// `unlock` method.
 #[derive(Debug)]
 pub struct BiLockAcquired<T> {
-    inner: BiLock<T>,
+    inner: Option<BiLock<T>>,
 }
 
 impl<T> BiLockAcquired<T> {
     /// Recovers the original `BiLock<T>`, unlocking this lock.
-    pub fn unlock(self) -> BiLock<T> {
-        // note that unlocked is implemented in `Drop`, so we don't do anything
-        // here other than creating a new handle to return.
-        BiLock { inner: self.inner.inner.clone() }
+    pub fn unlock(mut self) -> BiLock<T> {
+        let bi_lock = self.inner.take().unwrap();
+
+        bi_lock.unlock();
+
+        bi_lock
     }
 }
 
 impl<T> Deref for BiLockAcquired<T> {
     type Target = T;
     fn deref(&self) -> &T {
-        unsafe { &*self.inner.inner.inner.get() }
+        unsafe { &*self.inner.as_ref().unwrap().inner.inner.as_ref().unwrap().get() }
     }
 }
 
 impl<T> DerefMut for BiLockAcquired<T> {
     fn deref_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.inner.inner.inner.get() }
+        unsafe { &mut *self.inner.as_mut().unwrap().inner.inner.as_ref().unwrap().get() }
     }
 }
 
 impl<T> Drop for BiLockAcquired<T> {
     fn drop(&mut self) {
-        self.inner.unlock();
+        if let Some(ref bi_lock) = self.inner {
+            bi_lock.unlock();
+        }
     }
 }
--- a/third_party/rust/futures/src/sync/mod.rs
+++ b/third_party/rust/futures/src/sync/mod.rs
@@ -3,15 +3,15 @@
 //! This module, which is modeled after `std::sync`, contains user-space
 //! synchronization tools that work with futures, streams and sinks. In
 //! particular, these synchronizers do *not* block physical OS threads, but
 //! instead work at the task level.
 //!
 //! More information and examples of how to use these synchronization primitives
 //! can be found [online at tokio.rs].
 //!
-//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper/synchronization/
+//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/synchronization/
 
 pub mod oneshot;
 pub mod mpsc;
 mod bilock;
 
 pub use self::bilock::{BiLock, BiLockGuard, BiLockAcquire, BiLockAcquired};
--- a/third_party/rust/futures/src/sync/mpsc/mod.rs
+++ b/third_party/rust/futures/src/sync/mpsc/mod.rs
@@ -34,17 +34,17 @@
 //
 // The general idea is that the channel is created with a `buffer` size of `n`.
 // The channel capacity is `n + num-senders`. Each sender gets one "guaranteed"
 // slot to hold a message. This allows `Sender` to know for a fact that a send
 // will succeed *before* starting to do the actual work of sending the value.
 // Since most of this work is lock-free, once the work starts, it is impossible
 // to safely revert.
 //
-// If the sender is unable to process a send operation, then the the curren
+// If the sender is unable to process a send operation, then the current
 // task is parked and the handle is sent on the parked task queue.
 //
 // Note that the implementation guarantees that the channel capacity will never
 // exceed the configured limit, however there is no *strict* guarantee that the
 // receiver will wake up a parked task *immediately* when a slot becomes
 // available. However, it will almost always unpark a task when a slot becomes
 // available and it is *guaranteed* that a sender will be unparked when the
 // message that caused the sender to become parked is read out of the channel.
@@ -72,53 +72,51 @@ use std::error::Error;
 use std::any::Any;
 use std::sync::atomic::AtomicUsize;
 use std::sync::atomic::Ordering::SeqCst;
 use std::sync::{Arc, Mutex};
 use std::thread;
 use std::usize;
 
 use sync::mpsc::queue::{Queue, PopResult};
+use sync::oneshot;
 use task::{self, Task};
-use {Async, AsyncSink, Poll, StartSend, Sink, Stream};
+use future::Executor;
+use sink::SendAll;
+use resultstream::{self, Results};
+use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
 
 mod queue;
 
 /// The transmission end of a channel which is used to send values.
 ///
 /// This is created by the `channel` method.
 #[derive(Debug)]
 pub struct Sender<T> {
     // Channel state shared between the sender and receiver.
     inner: Arc<Inner<T>>,
 
     // Handle to the task that is blocked on this sender. This handle is sent
     // to the receiver half in order to be notified when the sender becomes
     // unblocked.
-    sender_task: SenderTask,
+    sender_task: Arc<Mutex<SenderTask>>,
 
     // True if the sender might be blocked. This is an optimization to avoid
     // having to lock the mutex most of the time.
     maybe_parked: bool,
 }
 
 /// The transmission end of a channel which is used to send values.
 ///
 /// This is created by the `unbounded` method.
 #[derive(Debug)]
 pub struct UnboundedSender<T>(Sender<T>);
 
-fn _assert_kinds() {
-    fn _assert_send<T: Send>() {}
-    fn _assert_sync<T: Sync>() {}
-    fn _assert_clone<T: Clone>() {}
-    _assert_send::<UnboundedSender<u32>>();
-    _assert_sync::<UnboundedSender<u32>>();
-    _assert_clone::<UnboundedSender<u32>>();
-}
+trait AssertKinds: Send + Sync + Clone {}
+impl AssertKinds for UnboundedSender<u32> {}
 
 
 /// The receiving end of a channel which implements the `Stream` trait.
 ///
 /// This is a concrete implementation of a stream which can be used to represent
 /// a stream of values being computed elsewhere. This is created by the
 /// `channel` method.
 #[derive(Debug)]
@@ -134,16 +132,28 @@ pub struct Receiver<T> {
 #[derive(Debug)]
 pub struct UnboundedReceiver<T>(Receiver<T>);
 
 /// Error type for sending, used when the receiving end of a channel is
 /// dropped
 #[derive(Clone, PartialEq, Eq)]
 pub struct SendError<T>(T);
 
+/// Error type returned from `try_send`
+#[derive(Clone, PartialEq, Eq)]
+pub struct TrySendError<T> {
+    kind: TrySendErrorKind<T>,
+}
+
+#[derive(Clone, PartialEq, Eq)]
+enum TrySendErrorKind<T> {
+    Full(T),
+    Disconnected(T),
+}
+
 impl<T> fmt::Debug for SendError<T> {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         fmt.debug_tuple("SendError")
             .field(&"...")
             .finish()
     }
 }
 
@@ -162,30 +172,89 @@ impl<T: Any> Error for SendError<T>
 
 impl<T> SendError<T> {
     /// Returns the message that was attempted to be sent but failed.
     pub fn into_inner(self) -> T {
         self.0
     }
 }
 
+impl<T> fmt::Debug for TrySendError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        fmt.debug_tuple("TrySendError")
+            .field(&"...")
+            .finish()
+    }
+}
+
+impl<T> fmt::Display for TrySendError<T> {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        if self.is_full() {
+            write!(fmt, "send failed because channel is full")
+        } else {
+            write!(fmt, "send failed because receiver is gone")
+        }
+    }
+}
+
+impl<T: Any> Error for TrySendError<T> {
+    fn description(&self) -> &str {
+        if self.is_full() {
+            "send failed because channel is full"
+        } else {
+            "send failed because receiver is gone"
+        }
+    }
+}
+
+impl<T> TrySendError<T> {
+    /// Returns true if this error is a result of the channel being full
+    pub fn is_full(&self) -> bool {
+        use self::TrySendErrorKind::*;
+
+        match self.kind {
+            Full(_) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns true if this error is a result of the receiver being dropped
+    pub fn is_disconnected(&self) -> bool {
+        use self::TrySendErrorKind::*;
+
+        match self.kind {
+            Disconnected(_) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns the message that was attempted to be sent but failed.
+    pub fn into_inner(self) -> T {
+        use self::TrySendErrorKind::*;
+
+        match self.kind {
+            Full(v) | Disconnected(v) => v,
+        }
+    }
+}
+
 #[derive(Debug)]
 struct Inner<T> {
     // Max buffer size of the channel. If `None` then the channel is unbounded.
     buffer: Option<usize>,
 
     // Internal channel state. Consists of the number of messages stored in the
     // channel as well as a flag signalling that the channel is closed.
     state: AtomicUsize,
 
     // Atomic, FIFO queue used to send messages to the receiver
     message_queue: Queue<Option<T>>,
 
     // Atomic, FIFO queue used to send parked task handles to the receiver.
-    parked_queue: Queue<SenderTask>,
+    parked_queue: Queue<Arc<Mutex<SenderTask>>>,
 
     // Number of senders in existence
     num_senders: AtomicUsize,
 
     // Handle to the receiver's task.
     recv_task: Mutex<ReceiverTask>,
 }
 
@@ -208,31 +277,52 @@ struct ReceiverTask {
 // Returned from Receiver::try_park()
 enum TryPark {
     Parked,
     Closed,
     NotEmpty,
 }
 
 // The `is_open` flag is stored in the left-most bit of `Inner::state`
-const OPEN_MASK: usize = 1 << 31;
+const OPEN_MASK: usize = usize::MAX - (usize::MAX >> 1);
 
 // When a new channel is created, it is created in the open state with no
 // pending messages.
 const INIT_STATE: usize = OPEN_MASK;
 
-// The maximum number of messages that a channel can track is `usize::MAX > 1`
+// The maximum number of messages that a channel can track is `usize::MAX >> 1`
 const MAX_CAPACITY: usize = !(OPEN_MASK);
 
 // The maximum requested buffer size must be less than the maximum capacity of
 // a channel. This is because each sender gets a guaranteed slot.
 const MAX_BUFFER: usize = MAX_CAPACITY >> 1;
 
 // Sent to the consumer to wake up blocked producers
-type SenderTask = Arc<Mutex<Option<Task>>>;
+#[derive(Debug)]
+struct SenderTask {
+    task: Option<Task>,
+    is_parked: bool,
+}
+
+impl SenderTask {
+    fn new() -> Self {
+        SenderTask {
+            task: None,
+            is_parked: false,
+        }
+    }
+
+    fn notify(&mut self) {
+        self.is_parked = false;
+
+        if let Some(task) = self.task.take() {
+            task.notify();
+        }
+    }
+}
 
 /// Creates an in-memory channel implementation of the `Stream` trait with
 /// bounded capacity.
 ///
 /// This method creates a concrete implementation of the `Stream` trait which
 /// can be used to send values across threads in a streaming fashion. This
 /// channel is unique in that it implements back pressure to ensure that the
 /// sender never outpaces the receiver. The channel capacity is equal to
@@ -276,17 +366,17 @@ fn channel2<T>(buffer: Option<usize>) ->
         recv_task: Mutex::new(ReceiverTask {
             unparked: false,
             task: None,
         }),
     });
 
     let tx = Sender {
         inner: inner.clone(),
-        sender_task: Arc::new(Mutex::new(None)),
+        sender_task: Arc::new(Mutex::new(SenderTask::new())),
         maybe_parked: false,
     };
 
     let rx = Receiver {
         inner: inner,
     };
 
     (tx, rx)
@@ -294,18 +384,45 @@ fn channel2<T>(buffer: Option<usize>) ->
 
 /*
  *
  * ===== impl Sender =====
  *
  */
 
 impl<T> Sender<T> {
+    /// Attempts to send a message on this `Sender<T>` without blocking.
+    ///
+    /// This function, unlike `start_send`, is safe to call whether it's being
+    /// called on a task or not. Note that this function, however, will *not*
+    /// attempt to block the current task if the message cannot be sent.
+    ///
+    /// It is not recommended to call this function from inside of a future,
+    /// only from an external thread where you've otherwise arranged to be
+    /// notified when the channel is no longer full.
+    pub fn try_send(&mut self, msg: T) -> Result<(), TrySendError<T>> {
+        // If the sender is currently blocked, reject the message
+        if !self.poll_unparked(false).is_ready() {
+            return Err(TrySendError {
+                kind: TrySendErrorKind::Full(msg),
+            });
+        }
+
+        // The channel has capacity to accept the message, so send it
+        self.do_send(Some(msg), false)
+            .map_err(|SendError(v)| {
+                TrySendError {
+                    kind: TrySendErrorKind::Disconnected(v),
+                }
+            })
+    }
+
     // Do the send without failing
-    fn do_send(&mut self, msg: Option<T>, can_park: bool) -> Result<(), SendError<T>> {
+    // None means close
+    fn do_send(&mut self, msg: Option<T>, do_park: bool) -> Result<(), SendError<T>> {
         // First, increment the number of messages contained by the channel.
         // This operation will also atomically determine if the sender task
         // should be parked.
         //
         // None is returned in the case that the channel has been closed by the
         // receiver. This happens when `Receiver::close` is called or the
         // receiver is dropped.
         let park_self = match self.inc_num_messages(msg.is_none()) {
@@ -326,21 +443,21 @@ impl<T> Sender<T> {
                 }
             }
         };
 
         // If the channel has reached capacity, then the sender task needs to
         // be parked. This will send the task handle on the parked task queue.
         //
         // However, when `do_send` is called while dropping the `Sender`,
-        // `task::park()` can't be called safely. In this case, in order to
+        // `task::current()` can't be called safely. In this case, in order to
         // maintain internal consistency, a blank message is pushed onto the
         // parked task queue.
         if park_self {
-            self.park(can_park);
+            self.park(do_park);
         }
 
         self.queue_push_and_signal(msg);
 
         Ok(())
     }
 
     // Do the send without parking current task.
@@ -423,88 +540,115 @@ impl<T> Sender<T> {
 
             // If the receiver has already been unparked, then there is nothing
             // more to do
             if recv_task.unparked {
                 return;
             }
 
             // Setting this flag enables the receiving end to detect that
-            // an unpark event happened in order to avoid unecessarily
+            // an unpark event happened in order to avoid unnecessarily
             // parking.
             recv_task.unparked = true;
             recv_task.task.take()
         };
 
         if let Some(task) = task {
-            task.unpark();
+            task.notify();
         }
     }
 
     fn park(&mut self, can_park: bool) {
-        // TODO: clean up internal state if the task::park will fail
+        // TODO: clean up internal state if the task::current will fail
 
         let task = if can_park {
-            Some(task::park())
+            Some(task::current())
         } else {
             None
         };
 
-        *self.sender_task.lock().unwrap() = task;
+        {
+            let mut sender = self.sender_task.lock().unwrap();
+            sender.task = task;
+            sender.is_parked = true;
+        }
 
         // Send handle over queue
         let t = self.sender_task.clone();
         self.inner.parked_queue.push(t);
 
         // Check to make sure we weren't closed after we sent our task on the
         // queue
         let state = decode_state(self.inner.state.load(SeqCst));
         self.maybe_parked = state.is_open;
     }
 
-    fn poll_unparked(&mut self) -> Async<()> {
+    /// Polls the channel to determine if there is guaranteed to be capacity to send at least one
+    /// item without waiting.
+    ///
+    /// Returns `Ok(Async::Ready(_))` if there is sufficient capacity, or returns
+    /// `Ok(Async::NotReady)` if the channel is not guaranteed to have capacity. Returns
+    /// `Err(SendError(_))` if the receiver has been dropped.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if called from outside the context of a task or future.
+    pub fn poll_ready(&mut self) -> Poll<(), SendError<()>> {
+        let state = decode_state(self.inner.state.load(SeqCst));
+        if !state.is_open {
+            return Err(SendError(()));
+        }
+
+        Ok(self.poll_unparked(true))
+    }
+
+    fn poll_unparked(&mut self, do_park: bool) -> Async<()> {
         // First check the `maybe_parked` variable. This avoids acquiring the
         // lock in most cases
         if self.maybe_parked {
             // Get a lock on the task handle
             let mut task = self.sender_task.lock().unwrap();
 
-            if task.is_none() {
+            if !task.is_parked {
                 self.maybe_parked = false;
                 return Async::Ready(())
             }
 
             // At this point, an unpark request is pending, so there will be an
             // unpark sometime in the future. We just need to make sure that
             // the correct task will be notified.
             //
             // Update the task in case the `Sender` has been moved to another
             // task
-            *task = Some(task::park());
+            task.task = if do_park {
+                Some(task::current())
+            } else {
+                None
+            };
 
             Async::NotReady
         } else {
             Async::Ready(())
         }
     }
 }
 
 impl<T> Sink for Sender<T> {
     type SinkItem = T;
     type SinkError = SendError<T>;
 
     fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
         // If the sender is currently blocked, reject the message before doing
         // any work.
-        if !self.poll_unparked().is_ready() {
+        if !self.poll_unparked(true).is_ready() {
             return Ok(AsyncSink::NotReady(msg));
         }
 
         // The channel has capacity to accept the message, so send it.
-        try!(self.do_send(Some(msg), true));
+        self.do_send(Some(msg), true)?;
 
         Ok(AsyncSink::Ready)
     }
 
     fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
         Ok(Async::Ready(()))
     }
 
@@ -514,17 +658,28 @@ impl<T> Sink for Sender<T> {
 }
 
 impl<T> UnboundedSender<T> {
     /// Sends the provided message along this channel.
     ///
     /// This is an unbounded sender, so this function differs from `Sink::send`
     /// by ensuring the return type reflects that the channel is always ready to
     /// receive messages.
+    #[deprecated(note = "renamed to `unbounded_send`")]
+    #[doc(hidden)]
     pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
+        self.unbounded_send(msg)
+    }
+
+    /// Sends the provided message along this channel.
+    ///
+    /// This is an unbounded sender, so this function differs from `Sink::send`
+    /// by ensuring the return type reflects that the channel is always ready to
+    /// receive messages.
+    pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
         self.0.do_send_nb(msg)
     }
 }
 
 impl<T> Sink for UnboundedSender<T> {
     type SinkItem = T;
     type SinkError = SendError<T>;
 
@@ -541,17 +696,17 @@ impl<T> Sink for UnboundedSender<T> {
     }
 }
 
 impl<'a, T> Sink for &'a UnboundedSender<T> {
     type SinkItem = T;
     type SinkError = SendError<T>;
 
     fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
-        try!(self.0.do_send_nb(msg));
+        self.0.do_send_nb(msg)?;
         Ok(AsyncSink::Ready)
     }
 
     fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
         Ok(Async::Ready(()))
     }
 
     fn close(&mut self) -> Poll<(), SendError<T>> {
@@ -584,17 +739,17 @@ impl<T> Clone for Sender<T> {
             let next = curr + 1;
             let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst);
 
             // The ABA problem doesn't matter here. We only care that the
             // number of senders never exceeds the maximum.
             if actual == curr {
                 return Sender {
                     inner: self.inner.clone(),
-                    sender_task: Arc::new(Mutex::new(None)),
+                    sender_task: Arc::new(Mutex::new(SenderTask::new())),
                     maybe_parked: false,
                 };
             }
 
             curr = actual;
         }
     }
 }
@@ -640,20 +795,17 @@ impl<T> Receiver<T> {
             }
         }
 
         // Wake up any threads waiting as they'll see that we've closed the
         // channel and will continue on their merry way.
         loop {
             match unsafe { self.inner.parked_queue.pop() } {
                 PopResult::Data(task) => {
-                    let task = task.lock().unwrap().take();
-                    if let Some(task) = task {
-                        task.unpark();
-                    }
+                    task.lock().unwrap().notify();
                 }
                 PopResult::Empty => break,
                 PopResult::Inconsistent => thread::yield_now(),
             }
         }
     }
 
     fn next_message(&mut self) -> Async<Option<T>> {
@@ -670,39 +822,32 @@ impl<T> Receiver<T> {
                 PopResult::Inconsistent => {
                     // Inconsistent means that there will be a message to pop
                     // in a short time. This branch can only be reached if
                     // values are being produced from another thread, so there
                     // are a few ways that we can deal with this:
                     //
                     // 1) Spin
                     // 2) thread::yield_now()
-                    // 3) task::park().unwrap() & return NotReady
+                    // 3) task::current().unwrap() & return NotReady
                     //
                     // For now, thread::yield_now() is used, but it would
                     // probably be better to spin a few times then yield.
                     thread::yield_now();
                 }
             }
         }
     }
 
     // Unpark a single task handle if there is one pending in the parked queue
     fn unpark_one(&mut self) {
         loop {
             match unsafe { self.inner.parked_queue.pop() } {
                 PopResult::Data(task) => {
-                    // Do this step first so that the lock is dropped when
-                    // `unpark` is called
-                    let task = task.lock().unwrap().take();
-
-                    if let Some(task) = task {
-                        task.unpark();
-                    }
-
+                    task.lock().unwrap().notify();
                     return;
                 }
                 PopResult::Empty => {
                     // Queue empty, no task to wake up.
                     return;
                 }
                 PopResult::Inconsistent => {
                     // Same as above
@@ -726,17 +871,17 @@ impl<T> Receiver<T> {
         let mut recv_task = self.inner.recv_task.lock().unwrap();
 
         if recv_task.unparked {
             // Consume the `unpark` signal without actually parking
             recv_task.unparked = false;
             return TryPark::NotEmpty;
         }
 
-        recv_task.task = Some(task::park());
+        recv_task.task = Some(task::current());
         TryPark::Parked
     }
 
     fn dec_num_messages(&self) {
         let mut curr = self.inner.state.load(SeqCst);
 
         loop {
             let mut state = decode_state(curr);
@@ -823,16 +968,148 @@ impl<T> Stream for UnboundedReceiver<T> 
     type Item = T;
     type Error = ();
 
     fn poll(&mut self) -> Poll<Option<T>, ()> {
         self.0.poll()
     }
 }
 
+/// Handle returned from the `spawn` function.
+///
+/// This handle is a stream that proxies a stream on a separate `Executor`.
+/// Created through the `mpsc::spawn` function, this handle will produce
+/// the same values as the proxied stream, as they are produced in the executor,
+/// and uses a limited buffer to exert back-pressure on the remote stream.
+///
+/// If this handle is dropped, then the stream will no longer be polled and is
+/// scheduled to be dropped.
+pub struct SpawnHandle<Item, Error> {
+    rx: Receiver<Result<Item, Error>>,
+    _cancel_tx: oneshot::Sender<()>,
+}
+
+/// Type of future which `Executor` instances must be able to execute for `spawn`.
+pub struct Execute<S: Stream> {
+    inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
+    cancel_rx: oneshot::Receiver<()>,
+}
+
+/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the remote stream.
+///
+/// The `stream` will be canceled if the `SpawnHandle` is dropped.
+///
+/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
+/// When `stream` has additional items available, then the `SpawnHandle`
+/// will have those same items available.
+///
+/// At most `buffer + 1` elements will be buffered at a time. If the buffer
+/// is full, then `stream` will stop progressing until more space is available.
+/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
+///
+/// # Panics
+///
+/// This function will panic if `executor` is unable spawn a `Future` containing
+/// the entirety of the `stream`.
+pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
+    where S: Stream,
+          E: Executor<Execute<S>>
+{
+    let (cancel_tx, cancel_rx) = oneshot::channel();
+    let (tx, rx) = channel(buffer);
+    executor.execute(Execute {
+        inner: tx.send_all(resultstream::new(stream)),
+        cancel_rx: cancel_rx,
+    }).expect("failed to spawn stream");
+    SpawnHandle {
+        rx: rx,
+        _cancel_tx: cancel_tx,
+    }
+}
+
+/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the remote stream, with unbounded buffering.
+///
+/// The `stream` will be canceled if the `SpawnHandle` is dropped.
+///
+/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
+/// When `stream` has additional items available, then the `SpawnHandle`
+/// will have those same items available.
+///
+/// An unbounded buffer is used, which means that values will be buffered as
+/// fast as `stream` can produce them, without any backpressure. Therefore, if
+/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
+/// potentially hog CPU resources.
+///
+/// # Panics
+///
+/// This function will panic if `executor` is unable spawn a `Future` containing
+/// the entirety of the `stream`.
+pub fn spawn_unbounded<S, E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
+    where S: Stream,
+          E: Executor<Execute<S>>
+{
+    let (cancel_tx, cancel_rx) = oneshot::channel();
+    let (tx, rx) = channel2(None);
+    executor.execute(Execute {
+        inner: tx.send_all(resultstream::new(stream)),
+        cancel_rx: cancel_rx,
+    }).expect("failed to spawn stream");
+    SpawnHandle {
+        rx: rx,
+        _cancel_tx: cancel_tx,
+    }
+}
+
+impl<I, E> Stream for SpawnHandle<I, E> {
+    type Item = I;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<I>, E> {
+        match self.rx.poll() {
+            Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
+            Ok(Async::Ready(Some(Err(e)))) => Err(e),
+            Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            Err(_) => unreachable!("mpsc::Receiver should never return Err"),
+        }
+    }
+}
+
+impl<I, E> fmt::Debug for SpawnHandle<I, E> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SpawnHandle")
+         .finish()
+    }
+}
+
+impl<S: Stream> Future for Execute<S> {
+    type Item = ();
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<(), ()> {
+        match self.cancel_rx.poll() {
+            Ok(Async::NotReady) => (),
+            _ => return Ok(Async::Ready(())),
+        }
+        match self.inner.poll() {
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            _ => Ok(Async::Ready(()))
+        }
+    }
+}
+
+impl<S: Stream> fmt::Debug for Execute<S> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Execute")
+         .finish()
+    }
+}
+
 /*
  *
  * ===== impl Inner =====
  *
  */
 
 impl<T> Inner<T> {
     // The return value is such that the total number of messages that can be
--- a/third_party/rust/futures/src/sync/mpsc/queue.rs
+++ b/third_party/rust/futures/src/sync/mpsc/queue.rs
@@ -109,17 +109,17 @@ impl<T> Queue<T> {
     }
 
     /// Pops some data from this queue.
     ///
     /// Note that the current implementation means that this function cannot
     /// return `Option<T>`. It is possible for this queue to be in an
     /// inconsistent state where many pushes have succeeded and completely
     /// finished, but pops cannot return `Some(t)`. This inconsistent state
-    /// happens when a pusher is pre-empted at an inopportune moment.
+    /// happens when a pusher is preempted at an inopportune moment.
     ///
     /// This inconsistent state means that this queue does indeed have data, but
     /// it does not currently have access to it at this time.
     ///
     /// This function is unsafe because only one thread can call it at a time.
     pub unsafe fn pop(&self) -> PopResult<T> {
         let tail = *self.tail.get();
         let next = (*tail).next.load(Ordering::Acquire);
--- a/third_party/rust/futures/src/sync/oneshot.rs
+++ b/third_party/rust/futures/src/sync/oneshot.rs
@@ -2,16 +2,17 @@
 
 use std::sync::Arc;
 use std::sync::atomic::AtomicBool;
 use std::sync::atomic::Ordering::SeqCst;
 use std::error::Error;
 use std::fmt;
 
 use {Future, Poll, Async};
+use future::{lazy, Lazy, Executor, IntoFuture};
 use lock::Lock;
 use task::{self, Task};
 
 /// A future representing the completion of a computation happening elsewhere in
 /// memory.
 ///
 /// This is created by the `oneshot::channel` function.
 #[must_use = "futures do nothing unless polled"]
@@ -29,17 +30,17 @@ pub struct Sender<T> {
     inner: Arc<Inner<T>>,
 }
 
 /// Internal state of the `Receiver`/`Sender` pair above. This is all used as
 /// the internal synchronization between the two for send/recv operations.
 #[derive(Debug)]
 struct Inner<T> {
     /// Indicates whether this oneshot is complete yet. This is filled in both
-    /// by `Sender::drop` and by `Receiver::drop`, and both sides iterpret it
+    /// by `Sender::drop` and by `Receiver::drop`, and both sides interpret it
     /// appropriately.
     ///
     /// For `Receiver`, if this is `true`, then it's guaranteed that `data` is
     /// unlocked and ready to be inspected.
     ///
     /// For `Sender` if this is `true` then the oneshot has gone away and it
     /// can return ready from `poll_cancel`.
     complete: AtomicBool,
@@ -78,131 +79,119 @@ struct Inner<T> {
 ///
 /// # Examples
 ///
 /// ```
 /// use std::thread;
 /// use futures::sync::oneshot;
 /// use futures::*;
 ///
-/// let (c, p) = oneshot::channel::<i32>();
+/// let (p, c) = oneshot::channel::<i32>();
 ///
 /// thread::spawn(|| {
-///     p.map(|i| {
+///     c.map(|i| {
 ///         println!("got: {}", i);
 ///     }).wait();
 /// });
 ///
-/// c.send(3).unwrap();
+/// p.send(3).unwrap();
 /// ```
 pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
-    let inner = Arc::new(Inner {
-        complete: AtomicBool::new(false),
-        data: Lock::new(None),
-        rx_task: Lock::new(None),
-        tx_task: Lock::new(None),
-    });
+    let inner = Arc::new(Inner::new());
     let receiver = Receiver {
         inner: inner.clone(),
     };
     let sender = Sender {
         inner: inner,
     };
     (sender, receiver)
 }
 
-impl<T> Sender<T> {
-    #[deprecated(note = "renamed to `send`", since = "0.1.11")]
-    #[doc(hidden)]
-    #[cfg(feature = "with-deprecated")]
-    pub fn complete(self, t: T) {
-        drop(self.send(t));
+impl<T> Inner<T> {
+    fn new() -> Inner<T> {
+        Inner {
+            complete: AtomicBool::new(false),
+            data: Lock::new(None),
+            rx_task: Lock::new(None),
+            tx_task: Lock::new(None),
+        }
     }
 
-    /// Completes this oneshot with a successful result.
-    ///
-    /// This function will consume `self` and indicate to the other end, the
-    /// `Receiver`, that the error provided is the result of the computation this
-    /// represents.
-    ///
-    /// If the value is successfully enqueued for the remote end to receive,
-    /// then `Ok(())` is returned. If the receiving end was deallocated before
-    /// this function was called, however, then `Err` is returned with the value
-    /// provided.
-    pub fn send(self, t: T) -> Result<(), T> {
-        if self.inner.complete.load(SeqCst) {
+    fn send(&self, t: T) -> Result<(), T> {
+        if self.complete.load(SeqCst) {
             return Err(t)
         }
 
-        // Note that this lock acquisition should always succeed as it can only
-        // interfere with `poll` in `Receiver` which is only called when the
-        // `complete` flag is true, which we're setting here.
-        let mut slot = self.inner.data.try_lock().unwrap();
-        assert!(slot.is_none());
-        *slot = Some(t);
-        drop(slot);
-        Ok(())
+        // Note that this lock acquisition may fail if the receiver
+        // is closed and sets the `complete` flag to true, whereupon
+        // the receiver may call `poll()`.
+        if let Some(mut slot) = self.data.try_lock() {
+            assert!(slot.is_none());
+            *slot = Some(t);
+            drop(slot);
+
+            // If the receiver called `close()` between the check at the
+            // start of the function, and the lock being released, then
+            // the receiver may not be around to receive it, so try to
+            // pull it back out.
+            if self.complete.load(SeqCst) {
+                // If lock acquisition fails, then receiver is actually
+                // receiving it, so we're good.
+                if let Some(mut slot) = self.data.try_lock() {
+                    if let Some(t) = slot.take() {
+                        return Err(t);
+                    }
+                }
+            }
+            Ok(())
+        } else {
+            // Must have been closed
+            Err(t)
+        }
     }
 
-    /// Polls this `Sender` half to detect whether the `Receiver` this has
-    /// paired with has gone away.
-    ///
-    /// This function can be used to learn about when the `Receiver` (consumer)
-    /// half has gone away and nothing will be able to receive a message sent
-    /// from `complete`.
-    ///
-    /// Like `Future::poll`, this function will panic if it's not called from
-    /// within the context of a task. In otherwords, this should only ever be
-    /// called from inside another future.
-    ///
-    /// If `Ready` is returned then it means that the `Receiver` has disappeared
-    /// and the result this `Sender` would otherwise produce should no longer
-    /// be produced.
-    ///
-    /// If `NotReady` is returned then the `Receiver` is still alive and may be
-    /// able to receive a message if sent. The current task, however, is
-    /// scheduled to receive a notification if the corresponding `Receiver` goes
-    /// away.
-    pub fn poll_cancel(&mut self) -> Poll<(), ()> {
+    fn poll_cancel(&self) -> Poll<(), ()> {
         // Fast path up first, just read the flag and see if our other half is
         // gone. This flag is set both in our destructor and the oneshot
         // destructor, but our destructor hasn't run yet so if it's set then the
         // oneshot is gone.
-        if self.inner.complete.load(SeqCst) {
+        if self.complete.load(SeqCst) {
             return Ok(Async::Ready(()))
         }
 
         // If our other half is not gone then we need to park our current task
         // and move it into the `notify_cancel` slot to get notified when it's
         // actually gone.
         //
         // If `try_lock` fails, then the `Receiver` is in the process of using
         // it, so we can deduce that it's now in the process of going away and
         // hence we're canceled. If it succeeds then we just store our handle.
         //
         // Crucially we then check `oneshot_gone` *again* before we return.
         // While we were storing our handle inside `notify_cancel` the `Receiver`
         // may have been dropped. The first thing it does is set the flag, and
         // if it fails to acquire the lock it assumes that we'll see the flag
         // later on. So... we then try to see the flag later on!
-        let handle = task::park();
-        match self.inner.tx_task.try_lock() {
+        let handle = task::current();
+        match self.tx_task.try_lock() {
             Some(mut p) => *p = Some(handle),
             None => return Ok(Async::Ready(())),
         }
-        if self.inner.complete.load(SeqCst) {
+        if self.complete.load(SeqCst) {
             Ok(Async::Ready(()))
         } else {
             Ok(Async::NotReady)
         }
     }
-}
 
-impl<T> Drop for Sender<T> {
-    fn drop(&mut self) {
+    fn is_canceled(&self) -> bool {
+        self.complete.load(SeqCst)
+    }
+
+    fn drop_tx(&self) {
         // Flag that we're a completed `Sender` and try to wake up a receiver.
         // Whether or not we actually stored any data will get picked up and
         // translated to either an item or cancellation.
         //
         // Note that if we fail to acquire the `rx_task` lock then that means
         // we're in one of two situations:
         //
         // 1. The receiver is trying to block in `poll`
@@ -213,27 +202,186 @@ impl<T> Drop for Sender<T> {
         // wake up anyone anyway. So in both cases it's ok to ignore the `None`
         // case of `try_lock` and bail out.
         //
         // The first case crucially depends on `Lock` using `SeqCst` ordering
         // under the hood. If it instead used `Release` / `Acquire` ordering,
         // then it would not necessarily synchronize with `inner.complete`
         // and deadlock might be possible, as was observed in
         // https://github.com/alexcrichton/futures-rs/pull/219.
-        self.inner.complete.store(true, SeqCst);
-        if let Some(mut slot) = self.inner.rx_task.try_lock() {
+        self.complete.store(true, SeqCst);
+        if let Some(mut slot) = self.rx_task.try_lock() {
             if let Some(task) = slot.take() {
                 drop(slot);
-                task.unpark();
+                task.notify();
+            }
+        }
+    }
+
+    fn close_rx(&self) {
+        // Flag our completion and then attempt to wake up the sender if it's
+        // blocked. See comments in `drop` below for more info
+        self.complete.store(true, SeqCst);
+        if let Some(mut handle) = self.tx_task.try_lock() {
+            if let Some(task) = handle.take() {
+                drop(handle);
+                task.notify()
+            }
+        }
+    }
+
+    fn recv(&self) -> Poll<T, Canceled> {
+        let mut done = false;
+
+        // Check to see if some data has arrived. If it hasn't then we need to
+        // block our task.
+        //
+        // Note that the acquisition of the `rx_task` lock might fail below, but
+        // the only situation where this can happen is during `Sender::drop`
+        // when we are indeed completed already. If that's happening then we
+        // know we're completed so keep going.
+        if self.complete.load(SeqCst) {
+            done = true;
+        } else {
+            let task = task::current();
+            match self.rx_task.try_lock() {
+                Some(mut slot) => *slot = Some(task),
+                None => done = true,
+            }
+        }
+
+        // If we're `done` via one of the paths above, then look at the data and
+        // figure out what the answer is. If, however, we stored `rx_task`
+        // successfully above we need to check again if we're completed in case
+        // a message was sent while `rx_task` was locked and couldn't notify us
+        // otherwise.
+        //
+        // If we're not done, and we're not complete, though, then we've
+        // successfully blocked our task and we return `NotReady`.
+        if done || self.complete.load(SeqCst) {
+            // If taking the lock fails, the sender will realise that the we're
+            // `done` when it checks the `complete` flag on the way out, and will
+            // treat the send as a failure.
+            if let Some(mut slot) = self.data.try_lock() {
+                if let Some(data) = slot.take() {
+                    return Ok(data.into());
+                }
+            }
+            Err(Canceled)
+        } else {
+            Ok(Async::NotReady)
+        }
+    }
+
+    fn drop_rx(&self) {
+        // Indicate to the `Sender` that we're done, so any future calls to
+        // `poll_cancel` are weeded out.
+        self.complete.store(true, SeqCst);
+
+        // If we've blocked a task then there's no need for it to stick around,
+        // so we need to drop it. If this lock acquisition fails, though, then
+        // it's just because our `Sender` is trying to take the task, so we
+        // let them take care of that.
+        if let Some(mut slot) = self.rx_task.try_lock() {
+            let task = slot.take();
+            drop(slot);
+            drop(task);
+        }
+
+        // Finally, if our `Sender` wants to get notified of us going away, it
+        // would have stored something in `tx_task`. Here we try to peel that
+        // out and unpark it.
+        //
+        // Note that the `try_lock` here may fail, but only if the `Sender` is
+        // in the process of filling in the task. If that happens then we
+        // already flagged `complete` and they'll pick that up above.
+        if let Some(mut handle) = self.tx_task.try_lock() {
+            if let Some(task) = handle.take() {
+                drop(handle);
+                task.notify()
             }
         }
     }
 }
 
-/// Error returned from a `Receiver<T>` whenever the correponding `Sender<T>`
+impl<T> Sender<T> {
+    #[deprecated(note = "renamed to `send`", since = "0.1.11")]
+    #[doc(hidden)]
+    #[cfg(feature = "with-deprecated")]
+    pub fn complete(self, t: T) {
+        drop(self.send(t));
+    }
+
+    /// Completes this oneshot with a successful result.
+    ///
+    /// This function will consume `self` and indicate to the other end, the
+    /// `Receiver`, that the value provided is the result of the computation this
+    /// represents.
+    ///
+    /// If the value is successfully enqueued for the remote end to receive,
+    /// then `Ok(())` is returned. If the receiving end was deallocated before
+    /// this function was called, however, then `Err` is returned with the value
+    /// provided.
+    pub fn send(self, t: T) -> Result<(), T> {
+        self.inner.send(t)
+    }
+
+    /// Polls this `Sender` half to detect whether the `Receiver` this has
+    /// paired with has gone away.
+    ///
+    /// This function can be used to learn about when the `Receiver` (consumer)
+    /// half has gone away and nothing will be able to receive a message sent
+    /// from `send`.
+    ///
+    /// If `Ready` is returned then it means that the `Receiver` has disappeared
+    /// and the result this `Sender` would otherwise produce should no longer
+    /// be produced.
+    ///
+    /// If `NotReady` is returned then the `Receiver` is still alive and may be
+    /// able to receive a message if sent. The current task, however, is
+    /// scheduled to receive a notification if the corresponding `Receiver` goes
+    /// away.
+    ///
+    /// # Panics
+    ///
+    /// Like `Future::poll`, this function will panic if it's not called from
+    /// within the context of a task. In other words, this should only ever be
+    /// called from inside another future.
+    ///
+    /// If you're calling this function from a context that does not have a
+    /// task, then you can use the `is_canceled` API instead.
+    pub fn poll_cancel(&mut self) -> Poll<(), ()> {
+        self.inner.poll_cancel()
+    }
+
+    /// Tests to see whether this `Sender`'s corresponding `Receiver`
+    /// has gone away.
+    ///
+    /// This function can be used to learn about when the `Receiver` (consumer)
+    /// half has gone away and nothing will be able to receive a message sent
+    /// from `send`.
+    ///
+    /// Note that this function is intended to *not* be used in the context of a
+    /// future. If you're implementing a future you probably want to call the
+    /// `poll_cancel` function which will block the current task if the
+    /// cancellation hasn't happened yet. This can be useful when working on a
+    /// non-futures related thread, though, which would otherwise panic if
+    /// `poll_cancel` were called.
+    pub fn is_canceled(&self) -> bool {
+        self.inner.is_canceled()
+    }
+}
+
+impl<T> Drop for Sender<T> {
+    fn drop(&mut self) {
+        self.inner.drop_tx()
+    }
+}
+
+/// Error returned from a `Receiver<T>` whenever the corresponding `Sender<T>`
 /// is dropped.
 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
 pub struct Canceled;
 
 impl fmt::Display for Canceled {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         write!(fmt, "oneshot canceled")
     }
@@ -248,94 +396,181 @@ impl Error for Canceled {
 impl<T> Receiver<T> {
     /// Gracefully close this receiver, preventing sending any future messages.
     ///
     /// Any `send` operation which happens after this method returns is
     /// guaranteed to fail. Once this method is called the normal `poll` method
     /// can be used to determine whether a message was actually sent or not. If
     /// `Canceled` is returned from `poll` then no message was sent.
     pub fn close(&mut self) {
-        // Flag our completion and then attempt to wake up the sender if it's
-        // blocked. See comments in `drop` below for more info
-        self.inner.complete.store(true, SeqCst);
-        if let Some(mut handle) = self.inner.tx_task.try_lock() {
-            if let Some(task) = handle.take() {
-                drop(handle);
-                task.unpark()
-            }
-        }
+        self.inner.close_rx()
     }
 }
 
 impl<T> Future for Receiver<T> {
     type Item = T;
     type Error = Canceled;
 
     fn poll(&mut self) -> Poll<T, Canceled> {
-        let mut done = false;
-
-        // Check to see if some data has arrived. If it hasn't then we need to
-        // block our task.
-        //
-        // Note that the acquisition of the `rx_task` lock might fail below, but
-        // the only situation where this can happen is during `Sender::drop`
-        // when we are indeed completed already. If that's happening then we
-        // know we're completed so keep going.
-        if self.inner.complete.load(SeqCst) {
-            done = true;
-        } else {
-            let task = task::park();
-            match self.inner.rx_task.try_lock() {
-                Some(mut slot) => *slot = Some(task),
-                None => done = true,
-            }
-        }
-
-        // If we're `done` via one of the paths above, then look at the data and
-        // figure out what the answer is. If, however, we stored `rx_task`
-        // successfully above we need to check again if we're completed in case
-        // a message was sent while `rx_task` was locked and couldn't notify us
-        // otherwise.
-        //
-        // If we're not done, and we're not complete, though, then we've
-        // successfully blocked our task and we return `NotReady`.
-        if done || self.inner.complete.load(SeqCst) {
-            match self.inner.data.try_lock().unwrap().take() {
-                Some(data) => Ok(data.into()),
-                None => Err(Canceled),
-            }
-        } else {
-            Ok(Async::NotReady)
-        }
+        self.inner.recv()
     }
 }
 
 impl<T> Drop for Receiver<T> {
     fn drop(&mut self) {
-        // Indicate to the `Sender` that we're done, so any future calls to
-        // `poll_cancel` are weeded out.
-        self.inner.complete.store(true, SeqCst);
+        self.inner.drop_rx()
+    }
+}
+
+/// Handle returned from the `spawn` function.
+///
+/// This handle is a future representing the completion of a different future on
+/// a separate executor. Created through the `oneshot::spawn` function this
+/// handle will resolve when the future provided to `spawn` resolves on the
+/// `Executor` instance provided to that function.
+///
+/// If this handle is dropped then the future will automatically no longer be
+/// polled and is scheduled to be dropped. This can be canceled with the
+/// `forget` function, however.
+pub struct SpawnHandle<T, E> {
+    rx: Arc<ExecuteInner<Result<T, E>>>,
+}
+
+struct ExecuteInner<T> {
+    inner: Inner<T>,
+    keep_running: AtomicBool,
+}
+
+/// Type of future which `Execute` instances below must be able to spawn.
+pub struct Execute<F: Future> {
+    future: F,
+    tx: Arc<ExecuteInner<Result<F::Item, F::Error>>>,
+}
 
-        // If we've blocked a task then there's no need for it to stick around,
-        // so we need to drop it. If this lock acquisition fails, though, then
-        // it's just because our `Sender` is trying to take the task, so we
-        // let them take care of that.
-        if let Some(mut slot) = self.inner.rx_task.try_lock() {
-            let task = slot.take();
-            drop(slot);
-            drop(task);
-        }
+/// Spawns a `future` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the completion of the future.
+///
+/// The `SpawnHandle` returned is a future that is a proxy for `future` itself.
+/// When `future` completes on `executor` then the `SpawnHandle` will itself be
+/// resolved.  Internally `SpawnHandle` contains a `oneshot` channel and is
+/// thus safe to send across threads.
+///
+/// The `future` will be canceled if the `SpawnHandle` is dropped. If this is
+/// not desired then the `SpawnHandle::forget` function can be used to continue
+/// running the future to completion.
+///
+/// # Panics
+///
+/// This function will panic if the instance of `Spawn` provided is unable to
+/// spawn the `future` provided.
+///
+/// If the provided instance of `Spawn` does not actually run `future` to
+/// completion, then the returned handle may panic when polled. Typically this
+/// is not a problem, though, as most instances of `Spawn` will run futures to
+/// completion.
+///
+/// Note that the returned future will likely panic if the `futures` provided
+/// panics. If a future running on an executor panics that typically means that
+/// the executor drops the future, which falls into the above case of not
+/// running the future to completion essentially.
+pub fn spawn<F, E>(future: F, executor: &E) -> SpawnHandle<F::Item, F::Error>
+    where F: Future,
+          E: Executor<Execute<F>>,
+{
+    let data = Arc::new(ExecuteInner {
+        inner: Inner::new(),
+        keep_running: AtomicBool::new(false),
+    });
+    executor.execute(Execute {
+        future: future,
+        tx: data.clone(),
+    }).expect("failed to spawn future");
+    SpawnHandle { rx: data }
+}
 
-        // Finally, if our `Sender` wants to get notified of us going away, it
-        // would have stored something in `tx_task`. Here we try to peel that
-        // out and unpark it.
-        //
-        // Note that the `try_lock` here may fail, but only if the `Sender` is
-        // in the process of filling in the task. If that happens then we
-        // already flagged `complete` and they'll pick that up above.
-        if let Some(mut handle) = self.inner.tx_task.try_lock() {
-            if let Some(task) = handle.take() {
-                drop(handle);
-                task.unpark()
-            }
+/// Spawns a function `f` onto the `Spawn` instance provided `s`.
+///
+/// For more information see the `spawn` function in this module. This function
+/// is just a thin wrapper around `spawn` which will execute the closure on the
+/// executor provided and then complete the future that the closure returns.
+pub fn spawn_fn<F, R, E>(f: F, executor: &E) -> SpawnHandle<R::Item, R::Error>
+    where F: FnOnce() -> R,
+          R: IntoFuture,
+          E: Executor<Execute<Lazy<F, R>>>,
+{
+    spawn(lazy(f), executor)
+}
+
+impl<T, E> SpawnHandle<T, E> {
+    /// Drop this future without canceling the underlying future.
+    ///
+    /// When `SpawnHandle` is dropped, the spawned future will be canceled as
+    /// well if the future hasn't already resolved. This function can be used
+    /// when to drop this future but keep executing the underlying future.
+    pub fn forget(self) {
+        self.rx.keep_running.store(true, SeqCst);
+    }
+}
+
+impl<T, E> Future for SpawnHandle<T, E> {
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<T, E> {
+        match self.rx.inner.recv() {
+            Ok(Async::Ready(Ok(t))) => Ok(t.into()),
+            Ok(Async::Ready(Err(e))) => Err(e),
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            Err(_) => panic!("future was canceled before completion"),
         }
     }
 }
+
+impl<T: fmt::Debug, E: fmt::Debug> fmt::Debug for SpawnHandle<T, E> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SpawnHandle")
+         .finish()
+    }
+}
+
+impl<T, E> Drop for SpawnHandle<T, E> {
+    fn drop(&mut self) {
+        self.rx.inner.drop_rx();
+    }
+}
+
+impl<F: Future> Future for Execute<F> {
+    type Item = ();
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<(), ()> {
+        // If we're canceled then we may want to bail out early.
+        //
+        // If the `forget` function was called, though, then we keep going.
+        if self.tx.inner.poll_cancel().unwrap().is_ready() {
+            if !self.tx.keep_running.load(SeqCst) {
+                return Ok(().into())
+            }
+        }
+
+        let result = match self.future.poll() {
+            Ok(Async::NotReady) => return Ok(Async::NotReady),
+            Ok(Async::Ready(t)) => Ok(t),
+            Err(e) => Err(e),
+        };
+        drop(self.tx.inner.send(result));
+        Ok(().into())
+    }
+}
+
+impl<F: Future + fmt::Debug> fmt::Debug for Execute<F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Execute")
+         .field("future", &self.future)
+         .finish()
+    }
+}
+
+impl<F: Future> Drop for Execute<F> {
+    fn drop(&mut self) {
+        self.tx.inner.drop_tx();
+    }
+}
--- a/third_party/rust/futures/src/task.rs
+++ b/third_party/rust/futures/src/task.rs
@@ -13,28 +13,34 @@
 //!
 //! Note that libraries typically should not manage tasks themselves, but rather
 //! leave that to event loops and other "executors" (see the `executor` module),
 //! or by using the `wait` method to create and execute a task directly on the
 //! current thread.
 //!
 //! More information about the task model can be found [online at tokio.rs].
 //!
-//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper/futures-model/
+//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-model/
 //!
 //! ## Functions
 //!
-//! There is an important bare function in this module: `park`. The `park`
-//! function is similar to the standard library's `thread::park` method where it
-//! returns a handle to wake up a task at a later date (via an `unpark` method).
+//! There is an important bare function in this module: `current`. The
+//! `current` function returns a handle to the currently running task, panicking
+//! if one isn't present. This handle is then used to later notify the task that
+//! it's ready to make progress through the `Task::notify` method.
 
 #[doc(hidden)]
 #[deprecated(since = "0.1.4", note = "import through the executor module instead")]
-#[cfg(feature = "with-deprecated")]
-pub use task_impl::{Spawn, spawn, Unpark, Executor, Run};
+#[cfg(all(feature = "with-deprecated", feature = "use_std"))]
+#[allow(deprecated)]
+pub use task_impl::{Spawn, spawn, Unpark, Executor, Run, park};
 
-pub use task_impl::{Task, LocalKey, park, with_unpark_event, UnparkEvent, EventSet};
+pub use task_impl::{Task, AtomicTask, current, init};
+
+#[allow(deprecated)]
+#[cfg(feature = "use_std")]
+pub use task_impl::{LocalKey, with_unpark_event, UnparkEvent, EventSet};
 
 #[doc(hidden)]
 #[deprecated(since = "0.1.4", note = "import through the executor module instead")]
-#[cfg(feature = "with-deprecated")]
+#[cfg(all(feature = "with-deprecated", feature = "use_std"))]
 #[allow(deprecated)]
 pub use task_impl::TaskRc;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/task_impl/atomic_task.rs
@@ -0,0 +1,191 @@
+#![allow(dead_code)]
+
+use super::Task;
+
+use core::fmt;
+use core::cell::UnsafeCell;
+use core::sync::atomic::AtomicUsize;
+use core::sync::atomic::Ordering::{Acquire, Release};
+
+/// A synchronization primitive for task notification.
+///
+/// `AtomicTask` will coordinate concurrent notifications with the consumer
+/// potentially "updating" the underlying task to notify. This is useful in
+/// scenarios where a computation completes in another thread and wants to
+/// notify the consumer, but the consumer is in the process of being migrated to
+/// a new logical task.
+///
+/// Consumers should call `register` before checking the result of a computation
+/// and producers should call `notify` after producing the computation (this
+/// differs from the usual `thread::park` pattern). It is also permitted for
+/// `notify` to be called **before** `register`. This results in a no-op.
+///
+/// A single `AtomicTask` may be reused for any number of calls to `register` or
+/// `notify`.
+///
+/// `AtomicTask` does not provide any memory ordering guarantees, as such the
+/// user should use caution and use other synchronization primitives to guard
+/// the result of the underlying computation.
+pub struct AtomicTask {
+    state: AtomicUsize,
+    task: UnsafeCell<Option<Task>>,
+}
+
+/// Initial state, the `AtomicTask` is currently not being used.
+///
+/// The value `2` is picked specifically because it between the write lock &
+/// read lock values. Since the read lock is represented by an incrementing
+/// counter, this enables an atomic fetch_sub operation to be used for releasing
+/// a lock.
+const WAITING: usize = 2;
+
+/// The `register` function has determined that the task is no longer current.
+/// This implies that `AtomicTask::register` is being called from a different
+/// task than is represented by the currently stored task. The write lock is
+/// obtained to update the task cell.
+const LOCKED_WRITE: usize = 0;
+
+/// At least one call to `notify` happened concurrently to `register` updating
+/// the task cell. This state is detected when `register` exits the mutation
+/// code and signals to `register` that it is responsible for notifying its own
+/// task.
+const LOCKED_WRITE_NOTIFIED: usize = 1;
+
+
+/// The `notify` function has locked access to the task cell for notification.
+///
+/// The constant is left here mostly for documentation reasons.
+#[allow(dead_code)]
+const LOCKED_READ: usize = 3;
+
+impl AtomicTask {
+    /// Create an `AtomicTask` initialized with the given `Task`
+    pub fn new() -> AtomicTask {
+        // Make sure that task is Sync
+        trait AssertSync: Sync {}
+        impl AssertSync for Task {}
+
+        AtomicTask {
+            state: AtomicUsize::new(WAITING),
+            task: UnsafeCell::new(None),
+        }
+    }
+
+    /// Registers the current task to be notified on calls to `notify`.
+    ///
+    /// The new task will take place of any previous tasks that were registered
+    /// by previous calls to `register`. Any calls to `notify` that happen after
+    /// a call to `register` (as defined by the memory ordering rules), will
+    /// notify the `register` caller's task.
+    ///
+    /// It is safe to call `register` with multiple other threads concurrently
+    /// calling `notify`. This will result in the `register` caller's current
+    /// task being notified once.
+    ///
+    /// This function is safe to call concurrently, but this is generally a bad
+    /// idea. Concurrent calls to `register` will attempt to register different
+    /// tasks to be notified. One of the callers will win and have its task set,
+    /// but there is no guarantee as to which caller will succeed.
+    pub fn register(&self) {
+        // Get a new task handle
+        let task = super::current();
+
+        match self.state.compare_and_swap(WAITING, LOCKED_WRITE, Acquire) {
+            WAITING => {
+                unsafe {
+                    // Locked acquired, update the task cell
+                    *self.task.get() = Some(task);
+
+                    // Release the lock. If the state transitioned to
+                    // `LOCKED_NOTIFIED`, this means that an notify has been
+                    // signaled, so notify the task.
+                    if LOCKED_WRITE_NOTIFIED == self.state.swap(WAITING, Release) {
+                        (*self.task.get()).as_ref().unwrap().notify();
+                    }
+                }
+            }
+            LOCKED_WRITE | LOCKED_WRITE_NOTIFIED => {
+                // A thread is concurrently calling `register`. This shouldn't
+                // happen as it doesn't really make much sense, but it isn't
+                // unsafe per se. Since two threads are concurrently trying to
+                // update the task, it's undefined which one "wins" (no ordering
+                // guarantees), so we can just do nothing.
+            }
+            state => {
+                debug_assert!(state != LOCKED_WRITE, "unexpected state LOCKED_WRITE");
+                debug_assert!(state != LOCKED_WRITE_NOTIFIED, "unexpected state LOCKED_WRITE_NOTIFIED");
+
+                // Currently in a read locked state, this implies that `notify`
+                // is currently being called on the old task handle. So, we call
+                // notify on the new task handle
+                task.notify();
+            }
+        }
+    }
+
+    /// Notifies the task that last called `register`.
+    ///
+    /// If `register` has not been called yet, then this does nothing.
+    pub fn notify(&self) {
+        let mut curr = WAITING;
+
+        loop {
+            if curr == LOCKED_WRITE {
+                // Transition the state to LOCKED_NOTIFIED
+                let actual = self.state.compare_and_swap(LOCKED_WRITE, LOCKED_WRITE_NOTIFIED, Release);
+
+                if curr == actual {
+                    // Success, return
+                    return;
+                }
+
+                // update current state variable and try again
+                curr = actual;
+
+            } else if curr == LOCKED_WRITE_NOTIFIED {
+                // Currently in `LOCKED_WRITE_NOTIFIED` state, nothing else to do.
+                return;
+
+            } else {
+                // Currently in a LOCKED_READ state, so attempt to increment the
+                // lock count.
+                let actual = self.state.compare_and_swap(curr, curr + 1, Acquire);
+
+                // Locked acquired
+                if actual == curr {
+                    // Notify the task
+                    unsafe {
+                        if let Some(ref task) = *self.task.get() {
+                            task.notify();
+                        }
+                    }
+
+                    // Release the lock
+                    self.state.fetch_sub(1, Release);
+
+                    // Done
+                    return;
+                }
+
+                // update current state variable and try again
+                curr = actual;
+
+            }
+        }
+    }
+}
+
+impl Default for AtomicTask {
+    fn default() -> Self {
+        AtomicTask::new()
+    }
+}
+
+impl fmt::Debug for AtomicTask {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "AtomicTask")
+    }
+}
+
+unsafe impl Send for AtomicTask {}
+unsafe impl Sync for AtomicTask {}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/task_impl/core.rs
@@ -0,0 +1,173 @@
+#![cfg_attr(feature = "use_std", allow(dead_code))]
+
+use core::marker;
+use core::mem;
+use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
+use core::sync::atomic::Ordering::{SeqCst, Relaxed};
+
+use super::{BorrowedTask, NotifyHandle};
+
+pub struct LocalKey;
+pub struct LocalMap;
+pub fn local_map() -> LocalMap { LocalMap }
+
+#[derive(Copy, Clone)]
+pub struct BorrowedEvents<'a>(marker::PhantomData<&'a ()>);
+
+#[derive(Copy, Clone)]
+pub struct BorrowedUnpark<'a> {
+    f: &'a Fn() -> NotifyHandle,
+    id: usize,
+}
+
+pub struct TaskUnpark {
+    handle: NotifyHandle,
+    id: usize,
+}
+
+#[derive(Clone)]
+pub struct UnparkEvents;
+
+impl<'a> BorrowedEvents<'a> {
+    pub fn new() -> BorrowedEvents<'a> {
+        BorrowedEvents(marker::PhantomData)
+    }
+
+    pub fn to_owned(&self) -> UnparkEvents {
+        UnparkEvents
+    }
+}
+
+impl<'a> BorrowedUnpark<'a> {
+    #[inline]
+    pub fn new(f: &'a Fn() -> NotifyHandle, id: usize) -> BorrowedUnpark<'a> {
+        BorrowedUnpark { f: f, id: id }
+    }
+
+    #[inline]
+    pub fn to_owned(&self) -> TaskUnpark {
+        let handle = (self.f)();
+        let id = handle.clone_id(self.id);
+        TaskUnpark { handle: handle, id: id }
+    }
+}
+
+impl UnparkEvents {
+    pub fn notify(&self) {}
+
+    pub fn will_notify(&self, _other: &BorrowedEvents) -> bool {
+        true
+    }
+}
+
+impl TaskUnpark {
+    pub fn notify(&self) {
+        self.handle.notify(self.id);
+    }
+
+    pub fn will_notify(&self, other: &BorrowedUnpark) -> bool {
+        self.id == other.id && self.handle.inner == (other.f)().inner
+    }
+}
+
+impl Clone for TaskUnpark {
+    fn clone(&self) -> TaskUnpark {
+        let handle = self.handle.clone();
+        let id = handle.clone_id(self.id);
+        TaskUnpark { handle: handle, id: id }
+    }
+}
+
+impl Drop for TaskUnpark {
+    fn drop(&mut self) {
+        self.handle.drop_id(self.id);
+    }
+}
+
+static GET: AtomicUsize = ATOMIC_USIZE_INIT;
+static SET: AtomicUsize = ATOMIC_USIZE_INIT;
+
+/// Initialize the `futures` task system.
+///
+/// This function is an unsafe low-level implementation detail typically only
+/// used by crates using `futures` in `no_std` context. Users of this crate
+/// who also use the standard library never need to invoke this function.
+///
+/// The task system in the `futures` crate relies on some notion of "local
+/// storage" for the running thread and/or context. The `task::current` function
+/// can get invoked in any context, for example, and needs to be able to return
+/// a `Task`. Typically with the standard library this is supported with
+/// thread-local-storage, but this is not available in `no_std` contexts!
+///
+/// This function is provided to allow `no_std` contexts to continue to be able
+/// to use the standard task system in this crate. The functions provided here
+/// will be used as-if they were thread-local-storage getters/setters. The `get`
+/// function provided is used to retrieve the current thread-local value of the
+/// task system's pointer, returning null if not initialized. The `set` function
+/// updates the value of the pointer.
+///
+/// # Return value
+///
+/// This function will return whether initialization succeeded or not. This
+/// function can be called concurrently and only the first invocation will
+/// succeed. If `false` is returned then the `get` and `set` pointers provided
+/// were *not* registered for use with the task system, but if `true` was
+/// provided then they will be called when the task system is used.
+///
+/// Note that while safe to call concurrently it's recommended to still perform
+/// external synchronization when calling this function. This task system is
+/// not guaranteed to be ready to go until a call to this function returns
+/// `true`. In other words, if you call this function and see `false`, the
+/// task system may not be ready to go as another thread may still be calling
+/// `init`.
+///
+/// # Unsafety
+///
+/// This function is unsafe due to the requirements on the behavior of the
+/// `get` and `set` functions. The pointers returned from these functions must
+/// reflect the semantics specified above and must also be thread-local,
+/// depending on the definition of a "thread" in the calling context.
+pub unsafe fn init(get: fn() -> *mut u8, set: fn(*mut u8)) -> bool {
+    if GET.compare_exchange(0, get as usize, SeqCst, SeqCst).is_ok() {
+        SET.store(set as usize, SeqCst);
+        true
+    } else {
+        false
+    }
+}
+
+#[inline]
+pub fn get_ptr() -> Option<*mut u8> {
+    match GET.load(Relaxed) {
+        0 => None,
+        n => Some(unsafe { mem::transmute::<usize, fn() -> *mut u8>(n)() }),
+    }
+}
+
+#[cfg(feature = "use_std")]
+#[inline]
+pub fn is_get_ptr(f: usize) -> bool {
+    GET.load(Relaxed) == f
+}
+
+pub fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R
+    where F: FnOnce() -> R
+{
+    let set = match SET.load(Relaxed) {
+        0 => panic!("not initialized"),
+        n => unsafe { mem::transmute::<usize, fn(*mut u8)>(n) },
+    };
+
+    struct Reset(fn(*mut u8), *mut u8);
+
+    impl Drop for Reset {
+        #[inline]
+        fn drop(&mut self) {
+            (self.0)(self.1);
+        }
+    }
+
+    let _reset = Reset(set, get_ptr().unwrap());
+    set(task as *const _ as *mut u8);
+    f()
+}
deleted file mode 100644
--- a/third_party/rust/futures/src/task_impl/data.rs
+++ /dev/null
@@ -1,129 +0,0 @@
-use std::prelude::v1::*;
-
-use std::any::TypeId;
-use std::cell::RefCell;
-use std::hash::{BuildHasherDefault, Hasher};
-use std::collections::HashMap;
-
-/// A macro to create a `static` of type `LocalKey`
-///
-/// This macro is intentionally similar to the `thread_local!`, and creates a
-/// `static` which has a `with` method to access the data on a task.
-///
-/// The data associated with each task local is per-task, so different tasks
-/// will contain different values.
-#[macro_export]
-macro_rules! task_local {
-    (static $NAME:ident: $t:ty = $e:expr) => (
-        static $NAME: $crate::task::LocalKey<$t> = {
-            fn __init() -> $t { $e }
-            fn __key() -> ::std::any::TypeId {
-                struct __A;
-                ::std::any::TypeId::of::<__A>()
-            }
-            $crate::task::LocalKey {
-                __init: __init,
-                __key: __key,
-            }
-        };
-    )
-}
-
-pub type LocalMap = RefCell<HashMap<TypeId,
-                                    Box<Opaque>,
-                                    BuildHasherDefault<IdHasher>>>;
-
-pub fn local_map() -> LocalMap {
-    RefCell::new(HashMap::default())
-}
-
-pub trait Opaque: Send {}
-impl<T: Send> Opaque for T {}
-
-/// A key for task-local data stored in a future's task.
-///
-/// This type is generated by the `task_local!` macro and performs very
-/// similarly to the `thread_local!` macro and `std::thread::LocalKey` types.
-/// Data associated with a `LocalKey<T>` is stored inside of a future's task,
-/// and the data is destroyed when the future is completed and the task is
-/// destroyed.
-///
-/// Task-local data can migrate between threads and hence requires a `Send`
-/// bound. Additionally, task-local data also requires the `'static` bound to
-/// ensure it lives long enough. When a key is accessed for the first time the
-/// task's data is initialized with the provided initialization expression to
-/// the macro.
-#[derive(Debug)]
-pub struct LocalKey<T> {
-    // "private" fields which have to be public to get around macro hygiene, not
-    // included in the stability story for this type. Can change at any time.
-    #[doc(hidden)]
-    pub __key: fn() -> TypeId,
-    #[doc(hidden)]
-    pub __init: fn() -> T,
-}
-
-pub struct IdHasher {
-    id: u64,
-}
-
-impl Default for IdHasher {
-    fn default() -> IdHasher {
-        IdHasher { id: 0 }
-    }
-}
-
-impl Hasher for IdHasher {
-    fn write(&mut self, _bytes: &[u8]) {
-        // TODO: need to do something sensible
-        panic!("can only hash u64");
-    }
-
-    fn write_u64(&mut self, u: u64) {
-        self.id = u;
-    }
-
-    fn finish(&self) -> u64 {
-        self.id
-    }
-}
-
-impl<T: Send + 'static> LocalKey<T> {
-    /// Access this task-local key, running the provided closure with a
-    /// reference to the value.
-    ///
-    /// This function will access this task-local key to retrieve the data
-    /// associated with the current task and this key. If this is the first time
-    /// this key has been accessed on this task, then the key will be
-    /// initialized with the initialization expression provided at the time the
-    /// `task_local!` macro was called.
-    ///
-    /// The provided closure will be provided a shared reference to the
-    /// underlying data associated with this task-local-key. The data itself is
-    /// stored inside of the current task.
-    ///
-    /// # Panics
-    ///
-    /// This function can possibly panic for a number of reasons:
-    ///
-    /// * If there is not a current task.
-    /// * If the initialization expression is run and it panics
-    /// * If the closure provided panics
-    pub fn with<F, R>(&'static self, f: F) -> R
-        where F: FnOnce(&T) -> R
-    {
-        let key = (self.__key)();
-        super::with(|task| {
-            let raw_pointer = {
-                let mut data = task.map.borrow_mut();
-                let entry = data.entry(key).or_insert_with(|| {
-                    Box::new((self.__init)())
-                });
-                &**entry as *const Opaque as *const T
-            };
-            unsafe {
-                f(&*raw_pointer)
-            }
-        })
-    }
-}
--- a/third_party/rust/futures/src/task_impl/mod.rs
+++ b/third_party/rust/futures/src/task_impl/mod.rs
@@ -1,604 +1,689 @@
-use std::prelude::v1::*;
+use core::fmt;
+use core::marker::PhantomData;
+
+use {Poll, Future, Stream, Sink, StartSend};
 
-use std::cell::Cell;
-use std::fmt;
-use std::mem;
-use std::sync::Arc;
-use std::sync::atomic::{Ordering, AtomicBool, AtomicUsize, ATOMIC_USIZE_INIT};
-use std::thread;
+mod atomic_task;
+pub use self::atomic_task::AtomicTask;
 
-use {Poll, Future, Async, Stream, Sink, StartSend, AsyncSink};
-use future::BoxFuture;
+mod core;
 
-mod unpark_mutex;
-use self::unpark_mutex::UnparkMutex;
+#[cfg(feature = "use_std")]
+mod std;
+#[cfg(feature = "use_std")]
+pub use self::std::*;
+#[cfg(not(feature = "use_std"))]
+pub use self::core::*;
 
-mod task_rc;
-mod data;
-#[allow(deprecated)]
-#[cfg(feature = "with-deprecated")]
-pub use self::task_rc::TaskRc;
-pub use self::data::LocalKey;
-
-struct BorrowedTask<'a> {
+pub struct BorrowedTask<'a> {
     id: usize,
-    unpark: &'a Arc<Unpark>,
-    map: &'a data::LocalMap,
-    events: Events,
+    unpark: BorrowedUnpark<'a>,
+    events: BorrowedEvents<'a>,
+    // Task-local storage
+    map: &'a LocalMap,
 }
 
-thread_local!(static CURRENT_TASK: Cell<*const BorrowedTask<'static>> = {
-    Cell::new(0 as *const _)
-});
+fn fresh_task_id() -> usize {
+    use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
 
-fn fresh_task_id() -> usize {
     // TODO: this assert is a real bummer, need to figure out how to reuse
     //       old IDs that are no longer in use.
+    //
+    // Note, though, that it is intended that these ids go away entirely
+    // eventually, see the comment on `is_current` below.
     static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
     let id = NEXT_ID.fetch_add(1, Ordering::Relaxed);
     assert!(id < usize::max_value() / 2,
             "too many previous tasks have been allocated");
     id
 }
 
-fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R
-    where F: FnOnce() -> R
-{
-    struct Reset(*const BorrowedTask<'static>);
-    impl Drop for Reset {
-        fn drop(&mut self) {
-            CURRENT_TASK.with(|c| c.set(self.0));
-        }
-    }
-
-    CURRENT_TASK.with(move |c| {
-        let _reset = Reset(c.get());
-        let task = unsafe {
-            mem::transmute::<&BorrowedTask<'a>,
-                             *const BorrowedTask<'static>>(task)
-        };
-        c.set(task);
-        f()
-    })
-}
-
 fn with<F: FnOnce(&BorrowedTask) -> R, R>(f: F) -> R {
-    let task = CURRENT_TASK.with(|c| c.get());
-    assert!(!task.is_null(), "no Task is currently running");
     unsafe {
-        f(&*task)
+        let task = get_ptr().expect("no Task is currently running");
+        assert!(!task.is_null(), "no Task is currently running");
+        f(&*(task as *const BorrowedTask))
     }
 }
 
 /// A handle to a "task", which represents a single lightweight "thread" of
 /// execution driving a future to completion.
 ///
 /// In general, futures are composed into large units of work, which are then
 /// spawned as tasks onto an *executor*. The executor is responsible for polling
 /// the future as notifications arrive, until the future terminates.
 ///
-/// This is obtained by the `task::park` function.
+/// This is obtained by the `task::current` function.
 #[derive(Clone)]
 pub struct Task {
     id: usize,
-    unpark: Arc<Unpark>,
-    events: Events,
+    unpark: TaskUnpark,
+    events: UnparkEvents,
 }
 
-fn _assert_kinds() {
-    fn _assert_send<T: Send>() {}
-    _assert_send::<Task>();
-}
+trait AssertSend: Send {}
+impl AssertSend for Task {}
 
-/// Returns a handle to the current task to call `unpark` at a later date.
-///
-/// This function is similar to the standard library's `thread::park` function
-/// except that it won't block the current thread but rather the current future
-/// that is being executed.
+/// Returns a handle to the current task to call `notify` at a later date.
 ///
 /// The returned handle implements the `Send` and `'static` bounds and may also
 /// be cheaply cloned. This is useful for squirreling away the handle into a
 /// location which is then later signaled that a future can make progress.
 ///
 /// Implementations of the `Future` trait typically use this function if they
 /// would otherwise perform a blocking operation. When something isn't ready
-/// yet, this `park` function is called to acquire a handle to the current
-/// task, and then the future arranges it such that when the block operation
-/// otherwise finishes (perhaps in the background) it will `unpark` the returned
-/// handle.
+/// yet, this `current` function is called to acquire a handle to the current
+/// task, and then the future arranges it such that when the blocking operation
+/// otherwise finishes (perhaps in the background) it will `notify` the
+/// returned handle.
 ///
 /// It's sometimes necessary to pass extra information to the task when
-/// unparking it, so that the task knows something about *why* it was woken. See
-/// the `with_unpark_event` for details on how to do this.
-///
-/// # Panics
-///
-/// This function will panic if a task is not currently being executed. That
-/// is, this method can be dangerous to call outside of an implementation of
-/// `poll`.
-pub fn park() -> Task {
-    with(|task| {
-        Task {
-            id: task.id,
-            events: task.events.clone(),
-            unpark: task.unpark.clone(),
-        }
-    })
-}
-
-impl Task {
-    /// Indicate that the task should attempt to poll its future in a timely
-    /// fashion.
-    ///
-    /// It's typically guaranteed that, for each call to `unpark`, `poll` will
-    /// be called at least once subsequently (unless the task has terminated).
-    /// If the task is currently polling its future when `unpark` is called, it
-    /// must poll the future *again* afterwards, ensuring that all relevant
-    /// events are eventually observed by the future.
-    pub fn unpark(&self) {
-        self.events.trigger();
-        self.unpark.unpark();
-    }
-
-    /// Returns `true` when called from within the context of the task. In
-    /// other words, the task is currently running on the thread calling the
-    /// function.
-    pub fn is_current(&self) -> bool {
-        with(|current| current.id == self.id)
-    }
-}
-
-impl fmt::Debug for Task {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        f.debug_struct("Task")
-         .field("id", &self.id)
-         .finish()
-    }
-}
-
-/// For the duration of the given callback, add an "unpark event" to be
-/// triggered when the task handle is used to unpark the task.
-///
-/// Unpark events are used to pass information about what event caused a task to
-/// be unparked. In some cases, tasks are waiting on a large number of possible
-/// events, and need precise information about the wakeup to avoid extraneous
-/// polling.
-///
-/// Every `Task` handle comes with a set of unpark events which will fire when
-/// `unpark` is called. When fired, these events insert an identifer into a
-/// concurrent set, which the task can read from to determine what events
-/// occurred.
-///
-/// This function immediately invokes the closure, `f`, but arranges things so
-/// that `task::park` will produce a `Task` handle that includes the given
-/// unpark event.
+/// unparking it, so that the task knows something about *why* it was woken.
+/// See the `FutureQueue` documentation for details on how to do this.
 ///
 /// # Panics
 ///
 /// This function will panic if a task is not currently being executed. That
 /// is, this method can be dangerous to call outside of an implementation of
 /// `poll`.
-pub fn with_unpark_event<F, R>(event: UnparkEvent, f: F) -> R
-    where F: FnOnce() -> R
-{
-    with(|task| {
-        let new_task = BorrowedTask {
-            id: task.id,
-            unpark: task.unpark,
-            events: task.events.with_event(event),
-            map: task.map,
-        };
-        set(&new_task, f)
+pub fn current() -> Task {
+    with(|borrowed| {
+        let unpark = borrowed.unpark.to_owned();
+        let events = borrowed.events.to_owned();
+
+        Task {
+            id: borrowed.id,
+            unpark: unpark,
+            events: events,
+        }
     })
 }
 
-#[derive(Clone)]
-/// A set insertion to trigger upon `unpark`.
-///
-/// Unpark events are used to communicate information about *why* an unpark
-/// occured, in particular populating sets with event identifiers so that the
-/// unparked task can avoid extraneous polling. See `with_unpark_event` for
-/// more.
-pub struct UnparkEvent {
-    set: Arc<EventSet>,
-    item: usize,
+#[doc(hidden)]
+#[deprecated(note = "renamed to `current`")]
+pub fn park() -> Task {
+    current()
 }
 
-impl UnparkEvent {
-    /// Construct an unpark event that will insert `id` into `set` when
-    /// triggered.
-    pub fn new(set: Arc<EventSet>, id: usize) -> UnparkEvent {
-        UnparkEvent {
-            set: set,
-            item: id,
-        }
+impl Task {
+    /// Indicate that the task should attempt to poll its future in a timely
+    /// fashion.
+    ///
+    /// It's typically guaranteed that, for each call to `notify`, `poll` will
+    /// be called at least once subsequently (unless the future has terminated).
+    /// If the task is currently polling its future when `notify` is called, it
+    /// must poll the future *again* afterwards, ensuring that all relevant
+    /// events are eventually observed by the future.
+    pub fn notify(&self) {
+        self.events.notify();
+        self.unpark.notify();
+    }
+
+    #[doc(hidden)]
+    #[deprecated(note = "renamed to `notify`")]
+    pub fn unpark(&self) {
+        self.notify()
+    }
+
+    /// Returns `true` when called from within the context of the task.
+    ///
+    /// In other words, the task is currently running on the thread calling the
+    /// function. Note that this is currently, and has historically, been
+    /// implemented by tracking an `id` on every instance of `Spawn` created.
+    /// When a `Spawn` is being polled it stores in thread-local-storage the id
+    /// of the instance, and then `task::current` will return a `Task` that also
+    /// stores this id.
+    ///
+    /// The intention of this function was to answer questions like "if I
+    /// `notify` this task, is it equivalent to `task::current().notify()`?"
+    /// The answer "yes" may be able to avoid some extra work to block the
+    /// current task, such as sending a task along a channel or updating a
+    /// stored `Task` somewhere. An answer of "no" typically results in doing
+    /// the work anyway.
+    ///
+    /// Unfortunately this function has been somewhat buggy in the past and is
+    /// not intended to be supported in the future. By simply matching `id` the
+    /// intended question above isn't accurately taking into account, for
+    /// example, unpark events (now deprecated, but still a feature). Thus many
+    /// old users of this API weren't fully accounting for the question it was
+    /// intended they were asking.
+    ///
+    /// This API continues to be implemented but will in the future, e.g. in the
+    /// 0.1.x series of this crate, eventually return `false` unconditionally.
+    /// It is intended that this function will be removed in the next breaking
+    /// change of this crate. If you'd like to continue to be able to answer the
+    /// example question above, it's recommended you use the
+    /// `will_notify_current` method.
+    ///
+    /// If you've got questions about this though please let us know! We'd like
+    /// to learn about other use cases here that we did not consider.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if no current future is being polled.
+    #[deprecated(note = "intended to be removed, see docs for details")]
+    pub fn is_current(&self) -> bool {
+        with(|current| current.id == self.id)
+    }
+
+    /// This function is intended as a performance optimization for structures
+    /// which store a `Task` internally.
+    ///
+    /// The purpose of this function is to answer the question "if I `notify`
+    /// this task is it equivalent to `task::current().notify()`". An answer
+    /// "yes" may mean that you don't actually need to call `task::current()`
+    /// and store it, but rather you can simply leave a stored task in place. An
+    /// answer of "no" typically means that you need to call `task::current()`
+    /// and store it somewhere.
+    ///
+    /// As this is purely a performance optimization a valid implementation for
+    /// this function is to always return `false`. A best effort is done to
+    /// return `true` where possible, but false negatives may happen. Note that
+    /// this function will not return a false positive, however.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if no current future is being polled.
+    #[allow(deprecated)]
+    pub fn will_notify_current(&self) -> bool {
+        with(|current| {
+            self.unpark.will_notify(&current.unpark) &&
+                self.events.will_notify(&current.events)
+        })
     }
 }
 
-impl fmt::Debug for UnparkEvent {
+impl fmt::Debug for Task {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        f.debug_struct("UnparkEvent")
-         .field("set", &"...")
-         .field("item", &self.item)
+        f.debug_struct("Task")
          .finish()
     }
 }
 
-/// A concurrent set which allows for the insertion of `usize` values.
-///
-/// `EventSet`s are used to communicate precise information about the event(s)
-/// that trigged a task notification. See `task::with_unpark_event` for details.
-pub trait EventSet: Send + Sync + 'static {
-    /// Insert the given ID into the set
-    fn insert(&self, id: usize);
-}
-
-// A collection of UnparkEvents to trigger on `unpark`
-#[derive(Clone)]
-enum Events {
-    Zero,
-    One(UnparkEvent),
-    Lots(Vec<UnparkEvent>),
-}
-
-impl Events {
-    fn new() -> Events {
-        Events::Zero
-    }
-
-    fn trigger(&self) {
-        match *self {
-            Events::Zero => {}
-            Events::One(ref event) => event.set.insert(event.item),
-            Events::Lots(ref list) => {
-                for event in list {
-                    event.set.insert(event.item);
-                }
-            }
-        }
-    }
-
-    fn with_event(&self, event: UnparkEvent) -> Events {
-        let mut list = match *self {
-            Events::Zero => return Events::One(event),
-            Events::One(ref event) => vec![event.clone()],
-            Events::Lots(ref list) => list.clone(),
-        };
-        list.push(event);
-        Events::Lots(list)
-    }
-}
-
 /// Representation of a spawned future/stream.
 ///
 /// This object is returned by the `spawn` function in this module. This
 /// represents a "fused task and future", storing all necessary pieces of a task
 /// and owning the top-level future that's being driven as well.
 ///
 /// A `Spawn` can be poll'd for completion or execution of the current thread
 /// can be blocked indefinitely until a notification arrives. This can be used
 /// with either futures or streams, with different methods being available on
 /// `Spawn` depending which is used.
-pub struct Spawn<T> {
+pub struct Spawn<T: ?Sized> {
+    id: usize,
+    data: LocalMap,
     obj: T,
-    id: usize,
-    data: data::LocalMap,
 }
 
-/// Spawns a new future, returning the fused future and task.
+/// Spawns a future or stream, returning it and the new task responsible for
+/// running it to completion.
 ///
 /// This function is the termination endpoint for running futures. This method
 /// will conceptually allocate a new task to run the given object, which is
 /// normally either a `Future` or `Stream`.
 ///
 /// This function is similar to the `thread::spawn` function but does not
 /// attempt to run code in the background. The future will not make progress
 /// until the methods on `Spawn` are called in turn.
 pub fn spawn<T>(obj: T) -> Spawn<T> {
     Spawn {
+        id: fresh_task_id(),
         obj: obj,
-        id: fresh_task_id(),
-        data: data::local_map(),
+        data: local_map(),
     }
 }
 
-impl<T> Spawn<T> {
+impl<T: ?Sized> Spawn<T> {
     /// Get a shared reference to the object the Spawn is wrapping.
     pub fn get_ref(&self) -> &T {
         &self.obj
     }
 
     /// Get a mutable reference to the object the Spawn is wrapping.
     pub fn get_mut(&mut self) -> &mut T {
         &mut self.obj
     }
 
     /// Consume the Spawn, returning its inner object
-    pub fn into_inner(self) -> T {
+    pub fn into_inner(self) -> T where T: Sized {
         self.obj
     }
-}
 
-impl<F: Future> Spawn<F> {
     /// Polls the internal future, scheduling notifications to be sent to the
-    /// `unpark` argument.
+    /// `notify` argument.
     ///
     /// This method will poll the internal future, testing if it's completed
-    /// yet. The `unpark` argument is used as a sink for notifications sent to
+    /// yet. The `notify` argument is used as a sink for notifications sent to
     /// this future. That is, while the future is being polled, any call to
-    /// `task::park()` will return a handle that contains the `unpark`
+    /// `task::current()` will return a handle that contains the `notify`
     /// specified.
     ///
-    /// If this function returns `NotReady`, then the `unpark` should have been
+    /// If this function returns `NotReady`, then the `notify` should have been
     /// scheduled to receive a notification when poll can be called again.
     /// Otherwise if `Ready` or `Err` is returned, the `Spawn` task can be
     /// safely destroyed.
-    pub fn poll_future(&mut self, unpark: Arc<Unpark>) -> Poll<F::Item, F::Error> {
-        self.enter(&unpark, |f| f.poll())
-    }
-
-    /// Waits for the internal future to complete, blocking this thread's
-    /// execution until it does.
+    ///
+    /// Note that `notify` itself is passed as a shared reference, and is itself
+    /// not required to be a `NotifyHandle`. The `Clone` and `Into` trait bounds
+    /// will be used to convert this `notify` to a `NotifyHandle` if necessary.
+    /// This construction can avoid an unnecessary atomic reference count bump
+    /// in some situations.
+    ///
+    /// ## Unsafety and `id`
     ///
-    /// This function will call `poll_future` in a loop, waiting for the future
-    /// to complete. When a future cannot make progress it will use
-    /// `thread::park` to block the current thread.
-    pub fn wait_future(&mut self) -> Result<F::Item, F::Error> {
-        let unpark = Arc::new(ThreadUnpark::new(thread::current()));
-        loop {
-            match try!(self.poll_future(unpark.clone())) {
-                Async::NotReady => unpark.park(),
-                Async::Ready(e) => return Ok(e),
-            }
-        }
+    /// This function and all other `*_notify` functions on this type will treat
+    /// the `id` specified very carefully, explicitly calling functions like the
+    /// `notify` argument's `clone_id` and `drop_id` functions. It should be
+    /// safe to encode a pointer itself into the `id` specified, such as an
+    /// `Arc<N>` or a `Box<N>`. The `clone_id` and `drop_id` functions are then
+    /// intended to be sufficient for the memory management related to that
+    /// pointer.
+    pub fn poll_future_notify<N>(&mut self,
+                                 notify: &N,
+                                 id: usize) -> Poll<T::Item, T::Error>
+        where N: Clone + Into<NotifyHandle>,
+              T: Future,
+    {
+        let mk = || notify.clone().into();
+        self.enter(BorrowedUnpark::new(&mk, id), |f| f.poll())
     }
 
-    /// A specialized function to request running a future to completion on the
-    /// specified executor.
-    ///
-    /// This function only works for futures whose item and error types are `()`
-    /// and also implement the `Send` and `'static` bounds. This will submit
-    /// units of work (instances of `Run`) to the `exec` argument provided
-    /// necessary to drive the future to completion.
-    ///
-    /// When the future would block, it's arranged that when the future is again
-    /// ready it will submit another unit of work to the `exec` provided. This
-    /// will happen in a loop until the future has completed.
-    ///
-    /// This method is not appropriate for all futures, and other kinds of
-    /// executors typically provide a similar function with perhaps relaxed
-    /// bounds as well.
-    pub fn execute(self, exec: Arc<Executor>)
-        where F: Future<Item=(), Error=()> + Send + 'static,
+    /// Like `poll_future_notify`, except polls the underlying stream.
+    pub fn poll_stream_notify<N>(&mut self,
+                                 notify: &N,
+                                 id: usize)
+                                 -> Poll<Option<T::Item>, T::Error>
+        where N: Clone + Into<NotifyHandle>,
+              T: Stream,
     {
-        exec.clone().execute(Run {
-            // Ideally this method would be defined directly on
-            // `Spawn<BoxFuture<(), ()>>` so we wouldn't have to box here and
-            // it'd be more explicit, but unfortunately that currently has a
-            // link error on nightly: rust-lang/rust#36155
-            spawn: Spawn {
-                id: self.id,
-                data: self.data,
-                obj: self.obj.boxed(),
-            },
-            inner: Arc::new(Inner {
-                exec: exec,
-                mutex: UnparkMutex::new()
-            }),
-        })
-    }
-}
-
-impl<S: Stream> Spawn<S> {
-    /// Like `poll_future`, except polls the underlying stream.
-    pub fn poll_stream(&mut self, unpark: Arc<Unpark>)
-                       -> Poll<Option<S::Item>, S::Error> {
-        self.enter(&unpark, |stream| stream.poll())
+        let mk = || notify.clone().into();
+        self.enter(BorrowedUnpark::new(&mk, id), |s| s.poll())
     }
 
-    /// Like `wait_future`, except only waits for the next element to arrive on
-    /// the underlying stream.
-    pub fn wait_stream(&mut self) -> Option<Result<S::Item, S::Error>> {
-        let unpark = Arc::new(ThreadUnpark::new(thread::current()));
-        loop {
-            match self.poll_stream(unpark.clone()) {
-                Ok(Async::NotReady) => unpark.park(),
-                Ok(Async::Ready(Some(e))) => return Some(Ok(e)),
-                Ok(Async::Ready(None)) => return None,
-                Err(e) => return Some(Err(e)),
-            }
-        }
-    }
-}
-
-impl<S: Sink> Spawn<S> {
     /// Invokes the underlying `start_send` method with this task in place.
     ///
-    /// If the underlying operation returns `NotReady` then the `unpark` value
+    /// If the underlying operation returns `NotReady` then the `notify` value
     /// passed in will receive a notification when the operation is ready to be
     /// attempted again.
-    pub fn start_send(&mut self, value: S::SinkItem, unpark: &Arc<Unpark>)
-                       -> StartSend<S::SinkItem, S::SinkError> {
-        self.enter(unpark, |sink| sink.start_send(value))
+    pub fn start_send_notify<N>(&mut self,
+                                value: T::SinkItem,
+                                notify: &N,
+                                id: usize)
+                               -> StartSend<T::SinkItem, T::SinkError>
+        where N: Clone + Into<NotifyHandle>,
+              T: Sink,
+    {
+        let mk = || notify.clone().into();
+        self.enter(BorrowedUnpark::new(&mk, id), |s| s.start_send(value))
     }
 
     /// Invokes the underlying `poll_complete` method with this task in place.
     ///
-    /// If the underlying operation returns `NotReady` then the `unpark` value
+    /// If the underlying operation returns `NotReady` then the `notify` value
     /// passed in will receive a notification when the operation is ready to be
     /// attempted again.
-    pub fn poll_flush(&mut self, unpark: &Arc<Unpark>)
-                       -> Poll<(), S::SinkError> {
-        self.enter(unpark, |sink| sink.poll_complete())
+    pub fn poll_flush_notify<N>(&mut self,
+                                notify: &N,
+                                id: usize)
+                                -> Poll<(), T::SinkError>
+        where N: Clone + Into<NotifyHandle>,
+              T: Sink,
+    {
+        let mk = || notify.clone().into();
+        self.enter(BorrowedUnpark::new(&mk, id), |s| s.poll_complete())
     }
 
-    /// Blocks the current thread until it's able to send `value` on this sink.
+    /// Invokes the underlying `close` method with this task in place.
     ///
-    /// This function will send the `value` on the sink that this task wraps. If
-    /// the sink is not ready to send the value yet then the current thread will
-    /// be blocked until it's able to send the value.
-    pub fn wait_send(&mut self, mut value: S::SinkItem)
-                     -> Result<(), S::SinkError> {
-        let unpark = Arc::new(ThreadUnpark::new(thread::current()));
-        let unpark2 = unpark.clone() as Arc<Unpark>;
-        loop {
-            value = match try!(self.start_send(value, &unpark2)) {
-                AsyncSink::NotReady(v) => v,
-                AsyncSink::Ready => return Ok(()),
-            };
-            unpark.park();
-        }
+    /// If the underlying operation returns `NotReady` then the `notify` value
+    /// passed in will receive a notification when the operation is ready to be
+    /// attempted again.
+    pub fn close_notify<N>(&mut self,
+                           notify: &N,
+                           id: usize)
+                           -> Poll<(), T::SinkError>
+        where N: Clone + Into<NotifyHandle>,
+              T: Sink,
+    {
+        let mk = || notify.clone().into();
+        self.enter(BorrowedUnpark::new(&mk, id), |s| s.close())
     }
 
-    /// Blocks the current thread until it's able to flush this sink.
-    ///
-    /// This function will call the underlying sink's `poll_complete` method
-    /// until it returns that it's ready, proxying out errors upwards to the
-    /// caller if one occurs.
-    ///
-    /// The thread will be blocked until `poll_complete` returns that it's
-    /// ready.
-    pub fn wait_flush(&mut self) -> Result<(), S::SinkError> {
-        let unpark = Arc::new(ThreadUnpark::new(thread::current()));
-        let unpark2 = unpark.clone() as Arc<Unpark>;
-        loop {
-            if try!(self.poll_flush(&unpark2)).is_ready() {
-                return Ok(())
-            }
-            unpark.park();
-        }
+    fn enter<F, R>(&mut self, unpark: BorrowedUnpark, f: F) -> R
+        where F: FnOnce(&mut T) -> R
+    {
+        let borrowed = BorrowedTask {
+            id: self.id,
+            unpark: unpark,
+            events: BorrowedEvents::new(),
+            map: &self.data,
+        };
+        let obj = &mut self.obj;
+        set(&borrowed, || f(obj))
     }
 }
 
-impl<T> Spawn<T> {
-    fn enter<F, R>(&mut self, unpark: &Arc<Unpark>, f: F) -> R
-        where F: FnOnce(&mut T) -> R
-    {
-        let task = BorrowedTask {
-            id: self.id,
-            unpark: unpark,
-            events: Events::new(),
-            map: &self.data,
-        };
-        let obj = &mut self.obj;
-        set(&task, || f(obj))
-    }
-}
-
-impl<T: fmt::Debug> fmt::Debug for Spawn<T> {
+impl<T: fmt::Debug + ?Sized> fmt::Debug for Spawn<T> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         f.debug_struct("Spawn")
-         .field("obj", &self.obj)
-         .field("id", &self.id)
+         .field("obj", &&self.obj)
          .finish()
     }
 }
 
 /// A trait which represents a sink of notifications that a future is ready to
 /// make progress.
 ///
-/// This trait is provided as an argument to the `Spawn::poll_future` and
-/// `Spawn::poll_stream` functions. It's transitively used as part of the
-/// `Task::unpark` method to internally deliver notifications of readiness of a
-/// future to move forward.
-pub trait Unpark: Send + Sync {
+/// This trait is provided as an argument to the `Spawn::*_notify` family of
+/// functions. It's transitively used as part of the `Task::notify` method to
+/// internally deliver notifications of readiness of a future to move forward.
+///
+/// An instance of `Notify` has one primary method, `notify`, which is given a
+/// contextual argument as to what's being notified. This contextual argument is
+/// *also* provided to the `Spawn::*_notify` family of functions and can be used
+/// to reuse an instance of `Notify` across many futures.
+///
+/// Instances of `Notify` must be safe to share across threads, and the methods
+/// be invoked concurrently. They must also live for the `'static` lifetime,
+/// not containing any stack references.
+pub trait Notify: Send + Sync {
     /// Indicates that an associated future and/or task are ready to make
     /// progress.
     ///
     /// Typically this means that the receiver of the notification should
     /// arrange for the future to get poll'd in a prompt fashion.
-    fn unpark(&self);
+    ///
+    /// This method takes an `id` as an argument which was transitively passed
+    /// in from the original call to `Spawn::*_notify`. This id can be used to
+    /// disambiguate which precise future became ready for polling.
+    ///
+    /// # Panics
+    ///
+    /// Since `unpark` may be invoked from arbitrary contexts, it should
+    /// endeavor not to panic and to do as little work as possible. However, it
+    /// is not guaranteed not to panic, and callers should be wary. If a panic
+    /// occurs, that panic may or may not be propagated to the end-user of the
+    /// future that you'd otherwise wake up.
+    fn notify(&self, id: usize);
+
+    /// This function is called whenever a new copy of `id` is needed.
+    ///
+    /// This is called in one of two situations:
+    ///
+    /// * A `Task` is being created through `task::current` while a future is
+    ///   being polled. In that case the instance of `Notify` passed in to one
+    ///   of the `poll_*` functions is called with the `id` passed into the same
+    ///   `poll_*` function.
+    /// * A `Task` is itself being cloned. Each `Task` contains its own id and a
+    ///   handle to the `Notify` behind it, and the task's `Notify` is used to
+    ///   clone the internal `id` to assign to the new task.
+    ///
+    /// The `id` returned here will be stored in the `Task`-to-be and used later
+    /// to pass to `notify` when the `Task::notify` function is called on that
+    /// `Task`.
+    ///
+    /// Note that typically this is just the identity function, passing through
+    /// the identifier. For more unsafe situations, however, if `id` is itself a
+    /// pointer of some kind this can be used as a hook to "clone" the pointer,
+    /// depending on what that means for the specified pointer.
+    fn clone_id(&self, id: usize) -> usize {
+        id
+    }
+
+    /// All instances of `Task` store an `id` that they're going to internally
+    /// notify with, and this function is called when the `Task` is dropped.
+    ///
+    /// This function provides a hook for schemes which encode pointers in this
+    /// `id` argument to deallocate resources associated with the pointer. It's
+    /// guaranteed that after this function is called the `Task` containing this
+    /// `id` will no longer use the `id`.
+    fn drop_id(&self, id: usize) {
+        drop(id);
+    }
 }
 
-/// A trait representing requests to poll futures.
+/// Sets the `NotifyHandle` of the current task for the duration of the provided
+/// closure.
+///
+/// This function takes a type that can be converted into a notify handle,
+/// `notify` and `id`, and a closure `f`. The closure `f` will be executed such
+/// that calls to `task::current()` will store a reference to the notify handle
+/// provided, not the one previously in the environment.
+///
+/// Note that calls to `task::current()` in the closure provided *will not* be
+/// equivalent to `task::current()` before this method is called. The two tasks
+/// returned will notify different handles, and the task handles pulled out
+/// during the duration of this closure will not notify the previous task. It's
+/// recommended that you call `task::current()` in some capacity before calling
+/// this function to ensure that calls to `task::current()` inside of this
+/// closure can transitively wake up the outer task.
+///
+/// # Panics
 ///
-/// This trait is an argument to the `Spawn::execute` which is used to run a
-/// future to completion. An executor will receive requests to run a future and
-/// an executor is responsible for ensuring that happens in a timely fashion.
-pub trait Executor: Send + Sync + 'static {
-    /// Requests that `Run` is executed soon on the given executor.
-    fn execute(&self, r: Run);
+/// This function will panic if it is called outside the context of a future's
+/// task. This is only valid to call once you've already entered a future via
+/// `Spawn::poll_*` functions.
+pub fn with_notify<F, T, R>(notify: &T, id: usize, f: F) -> R
+    where F: FnOnce() -> R,
+          T: Clone + Into<NotifyHandle>,
+{
+    with(|task| {
+        let mk = || notify.clone().into();
+        let new_task = BorrowedTask {
+            id: task.id,
+            unpark: BorrowedUnpark::new(&mk, id),
+            events: task.events,
+            map: task.map,
+        };
+
+        set(&new_task, f)
+    })
 }
 
-struct ThreadUnpark {
-    thread: thread::Thread,
-    ready: AtomicBool,
+/// An unsafe trait for implementing custom forms of memory management behind a
+/// `Task`.
+///
+/// The `futures` critically relies on "notification handles" to extract for
+/// futures to contain and then later inform that they're ready to make
+/// progress. These handles, however, must be cheap to create and cheap
+/// to clone to ensure that this operation is efficient throughout the
+/// execution of a program.
+///
+/// Typically this sort of memory management is done in the standard library
+/// with the `Arc` type. An `Arc` is relatively cheap to allocate an is
+/// quite cheap to clone and pass around. Plus, it's 100% safe!
+///
+/// When working outside the standard library, however, you don't always have
+/// and `Arc` type available to you. This trait, `UnsafeNotify`, is intended
+/// to be the "unsafe version" of the `Notify` trait. This trait encodes the
+/// memory management operations of a `Task`'s notification handle, allowing
+/// custom implementations for the memory management of a notification handle.
+///
+/// Put another way, the core notification type in this library,
+/// `NotifyHandle`, simply internally contains an instance of
+/// `*mut UnsafeNotify`. This "unsafe trait object" is then used exclusively
+/// to operate with, dynamically dispatching calls to clone, drop, and notify.
+/// Critically though as a raw pointer it doesn't require a particular form
+/// of memory management, allowing external implementations.
+///
+/// A default implementation of the `UnsafeNotify` trait is provided for the
+/// `Arc` type in the standard library. If the `use_std` feature of this crate
+/// is not available however, you'll be required to implement your own
+/// instance of this trait to pass it into `NotifyHandle::new`.
+///
+/// # Unsafety
+///
+/// This trait is manually encoding the memory management of the underlying
+/// handle, and as a result is quite unsafe to implement! Implementors of
+/// this trait must guarantee:
+///
+/// * Calls to `clone_raw` produce uniquely owned handles. It should be safe
+///   to drop the current handle and have the returned handle still be valid.
+/// * Calls to `drop_raw` work with `self` as a raw pointer, deallocating
+///   resources associated with it. This is a pretty unsafe operation as it's
+///   invalidating the `self` pointer, so extreme care needs to be taken.
+///
+/// In general it's recommended to review the trait documentation as well as
+/// the implementation for `Arc` in this crate. When in doubt ping the
+/// `futures` authors to clarify an unsafety question here.
+pub unsafe trait UnsafeNotify: Notify {
+    /// Creates a new `NotifyHandle` from this instance of `UnsafeNotify`.
+    ///
+    /// This function will create a new uniquely owned handle that under the
+    /// hood references the same notification instance. In other words calls
+    /// to `notify` on the returned handle should be equivalent to calls to
+    /// `notify` on this handle.
+    ///
+    /// # Unsafety
+    ///
+    /// This trait is unsafe to implement, as are all these methods. This
+    /// method is also unsafe to call as it's asserting the `UnsafeNotify`
+    /// value is in a consistent state. In general it's recommended to
+    /// review the trait documentation as well as the implementation for `Arc`
+    /// in this crate. When in doubt ping the `futures` authors to clarify
+    /// an unsafety question here.
+    unsafe fn clone_raw(&self) -> NotifyHandle;
+
+    /// Drops this instance of `UnsafeNotify`, deallocating resources
+    /// associated with it.
+    ///
+    /// This method is intended to have a signature such as:
+    ///
+    /// ```ignore
+    /// fn drop_raw(self: *mut Self);
+    /// ```
+    ///
+    /// Unfortunately in Rust today that signature is not object safe.
+    /// Nevertheless it's recommended to implement this function *as if* that
+    /// were its signature. As such it is not safe to call on an invalid
+    /// pointer, nor is the validity of the pointer guaranteed after this
+    /// function returns.
+    ///
+    /// # Unsafety
+    ///
+    /// This trait is unsafe to implement, as are all these methods. This
+    /// method is also unsafe to call as it's asserting the `UnsafeNotify`
+    /// value is in a consistent state. In general it's recommended to
+    /// review the trait documentation as well as the implementation for `Arc`
+    /// in this crate. When in doubt ping the `futures` authors to clarify
+    /// an unsafety question here.
+    unsafe fn drop_raw(&self);
 }
 
-impl ThreadUnpark {
-    fn new(thread: thread::Thread) -> ThreadUnpark {
-        ThreadUnpark {
-            thread: thread,
-            ready: AtomicBool::new(false),
-        }
+/// A `NotifyHandle` is the core value through which notifications are routed
+/// in the `futures` crate.
+///
+/// All instances of `Task` will contain a `NotifyHandle` handle internally.
+/// This handle itself contains a trait object pointing to an instance of the
+/// `Notify` trait, allowing notifications to get routed through it.
+///
+/// The `NotifyHandle` type internally does not codify any particular memory
+/// management strategy. Internally it contains an instance of `*mut
+/// UnsafeNotify`, and more details about that trait can be found on its own
+/// documentation. Consequently, though, the one constructor of this type,
+/// `NotifyHandle::new`, is `unsafe` to call. It is not recommended to call
+/// this constructor directly.
+///
+/// If you're working with the standard library then it's recommended to
+/// work with the `Arc` type. If you have a struct, `T`, which implements the
+/// `Notify` trait, then you can construct this with
+/// `NotifyHandle::from(t: Arc<T>)`. The coercion to `UnsafeNotify` will
+/// happen automatically and safely for you.
+///
+/// When working externally from the standard library it's recommended to
+/// provide a similar safe constructor for your custom type as opposed to
+/// recommending an invocation of `NotifyHandle::new` directly.
+pub struct NotifyHandle {
+    inner: *mut UnsafeNotify,
+}
+
+unsafe impl Send for NotifyHandle {}
+unsafe impl Sync for NotifyHandle {}
+
+impl NotifyHandle {
+    /// Constructs a new `NotifyHandle` directly.
+    ///
+    /// Note that most code will not need to call this. Implementers of the
+    /// `UnsafeNotify` trait will typically provide a wrapper that calls this
+    /// but you otherwise shouldn't call it directly.
+    ///
+    /// If you're working with the standard library then it's recommended to
+    /// use the `NotifyHandle::from` function instead which works with the safe
+    /// `Arc` type and the safe `Notify` trait.
+    #[inline]
+    pub unsafe fn new(inner: *mut UnsafeNotify) -> NotifyHandle {
+        NotifyHandle { inner: inner }
     }
 
-    fn park(&self) {
-        if !self.ready.swap(false, Ordering::SeqCst) {
-            thread::park();
+    /// Invokes the underlying instance of `Notify` with the provided `id`.
+    pub fn notify(&self, id: usize) {
+        unsafe { (*self.inner).notify(id) }
+    }
+
+    fn clone_id(&self, id: usize) -> usize {
+        unsafe { (*self.inner).clone_id(id) }
+    }
+
+    fn drop_id(&self, id: usize) {
+        unsafe { (*self.inner).drop_id(id) }
+    }
+}
+
+impl Clone for NotifyHandle {
+    #[inline]
+    fn clone(&self) -> Self {
+        unsafe {
+            (*self.inner).clone_raw()
         }
     }
 }
 
-impl Unpark for ThreadUnpark {
-    fn unpark(&self) {
-        self.ready.store(true, Ordering::SeqCst);
-        self.thread.unpark()
+impl fmt::Debug for NotifyHandle {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("NotifyHandle")
+         .finish()
     }
 }
 
-/// Units of work submitted to an `Executor`, currently only created
-/// internally.
-pub struct Run {
-    spawn: Spawn<BoxFuture<(), ()>>,
-    inner: Arc<Inner>,
-}
-
-struct Inner {
-    mutex: UnparkMutex<Run>,
-    exec: Arc<Executor>,
-}
-
-impl Run {
-    /// Actually run the task (invoking `poll` on its future) on the current
-    /// thread.
-    pub fn run(self) {
-        let Run { mut spawn, inner } = self;
-
-        // SAFETY: the ownership of this `Run` object is evidence that
-        // we are in the `POLLING`/`REPOLL` state for the mutex.
+impl Drop for NotifyHandle {
+    fn drop(&mut self) {
         unsafe {
-            inner.mutex.start_poll();
-
-            loop {
-                match spawn.poll_future(inner.clone()) {
-                    Ok(Async::NotReady) => {}
-                    Ok(Async::Ready(())) |
-                    Err(()) => return inner.mutex.complete(),
-                }
-                let run = Run { spawn: spawn, inner: inner.clone() };
-                match inner.mutex.wait(run) {
-                    Ok(()) => return,            // we've waited
-                    Err(r) => spawn = r.spawn,   // someone's notified us
-                }
-            }
+            (*self.inner).drop_raw()
         }
     }
 }
 
-impl fmt::Debug for Run {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        f.debug_struct("Run")
-         .field("contents", &"...")
-         .finish()
+/// Marker for a `T` that is behind &'static.
+struct StaticRef<T>(PhantomData<T>);
+
+impl<T: Notify> Notify for StaticRef<T> {
+    fn notify(&self, id: usize) {
+        let me = unsafe { &*(self as *const _ as *const T) };
+        me.notify(id);
+    }
+
+    fn clone_id(&self, id: usize) -> usize {
+        let me = unsafe { &*(self as *const _ as *const T) };
+        me.clone_id(id)
+    }
+
+    fn drop_id(&self, id: usize) {
+        let me = unsafe { &*(self as *const _ as *const T) };
+        me.drop_id(id);
     }
 }
 
-impl Unpark for Inner {
-    fn unpark(&self) {
-        match self.mutex.notify() {
-            Ok(run) => self.exec.execute(run),
-            Err(()) => {}
-        }
+unsafe impl<T: Notify + 'static> UnsafeNotify for StaticRef<T> {
+    unsafe fn clone_raw(&self) -> NotifyHandle {
+        NotifyHandle::new(self as *const _ as *mut StaticRef<T>)
+    }
+
+    unsafe fn drop_raw(&self) {}
+}
+
+impl<T: Notify> From<&'static T> for NotifyHandle {
+    fn from(src : &'static T) -> NotifyHandle {
+        unsafe { NotifyHandle::new(src as *const _ as *mut StaticRef<T>) }
     }
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/task_impl/std/data.rs
@@ -0,0 +1,131 @@
+use std::prelude::v1::*;
+
+use std::any::TypeId;
+use std::cell::RefCell;
+use std::hash::{BuildHasherDefault, Hasher};
+use std::collections::HashMap;
+
+use task_impl::with;
+
+/// A macro to create a `static` of type `LocalKey`
+///
+/// This macro is intentionally similar to the `thread_local!`, and creates a
+/// `static` which has a `with` method to access the data on a task.
+///
+/// The data associated with each task local is per-task, so different tasks
+/// will contain different values.
+#[macro_export]
+macro_rules! task_local {
+    (static $NAME:ident: $t:ty = $e:expr) => (
+        static $NAME: $crate::task::LocalKey<$t> = {
+            fn __init() -> $t { $e }
+            fn __key() -> ::std::any::TypeId {
+                struct __A;
+                ::std::any::TypeId::of::<__A>()
+            }
+            $crate::task::LocalKey {
+                __init: __init,
+                __key: __key,
+            }
+        };
+    )
+}
+
+pub type LocalMap = RefCell<HashMap<TypeId,
+                                    Box<Opaque>,
+                                    BuildHasherDefault<IdHasher>>>;
+
+pub fn local_map() -> LocalMap {
+    RefCell::new(HashMap::default())
+}
+
+pub trait Opaque: Send {}
+impl<T: Send> Opaque for T {}
+
+/// A key for task-local data stored in a future's task.
+///
+/// This type is generated by the `task_local!` macro and performs very
+/// similarly to the `thread_local!` macro and `std::thread::LocalKey` types.
+/// Data associated with a `LocalKey<T>` is stored inside of a future's task,
+/// and the data is destroyed when the future is completed and the task is
+/// destroyed.
+///
+/// Task-local data can migrate between threads and hence requires a `Send`
+/// bound. Additionally, task-local data also requires the `'static` bound to
+/// ensure it lives long enough. When a key is accessed for the first time the
+/// task's data is initialized with the provided initialization expression to
+/// the macro.
+#[derive(Debug)]
+pub struct LocalKey<T> {
+    // "private" fields which have to be public to get around macro hygiene, not
+    // included in the stability story for this type. Can change at any time.
+    #[doc(hidden)]
+    pub __key: fn() -> TypeId,
+    #[doc(hidden)]
+    pub __init: fn() -> T,
+}
+
+pub struct IdHasher {
+    id: u64,
+}
+
+impl Default for IdHasher {
+    fn default() -> IdHasher {
+        IdHasher { id: 0 }
+    }
+}
+
+impl Hasher for IdHasher {
+    fn write(&mut self, _bytes: &[u8]) {
+        // TODO: need to do something sensible
+        panic!("can only hash u64");
+    }
+
+    fn write_u64(&mut self, u: u64) {
+        self.id = u;
+    }
+
+    fn finish(&self) -> u64 {
+        self.id
+    }
+}
+
+impl<T: Send + 'static> LocalKey<T> {
+    /// Access this task-local key, running the provided closure with a
+    /// reference to the value.
+    ///
+    /// This function will access this task-local key to retrieve the data
+    /// associated with the current task and this key. If this is the first time
+    /// this key has been accessed on this task, then the key will be
+    /// initialized with the initialization expression provided at the time the
+    /// `task_local!` macro was called.
+    ///
+    /// The provided closure will be provided a shared reference to the
+    /// underlying data associated with this task-local-key. The data itself is
+    /// stored inside of the current task.
+    ///
+    /// # Panics
+    ///
+    /// This function can possibly panic for a number of reasons:
+    ///
+    /// * If there is not a current task.
+    /// * If the initialization expression is run and it panics
+    /// * If the closure provided panics
+    pub fn with<F, R>(&'static self, f: F) -> R
+        where F: FnOnce(&T) -> R
+    {
+        let key = (self.__key)();
+        with(|task| {
+            let raw_pointer = {
+                let mut data = task.map.borrow_mut();
+                let entry = data.entry(key).or_insert_with(|| {
+                    Box::new((self.__init)())
+                });
+                &**entry as *const Opaque as *const T
+            };
+            unsafe {
+                f(&*raw_pointer)
+            }
+        })
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/task_impl/std/mod.rs
@@ -0,0 +1,730 @@
+use std::prelude::v1::*;
+
+use std::cell::Cell;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem;
+use std::ptr;
+use std::sync::{Arc, Mutex, Condvar, Once, ONCE_INIT};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use {Future, Stream, Sink, Poll, Async, StartSend, AsyncSink};
+use super::core;
+use super::{BorrowedTask, NotifyHandle, Spawn, spawn, Notify, UnsafeNotify};
+
+mod unpark_mutex;
+pub use self::unpark_mutex::UnparkMutex;
+
+mod data;
+pub use self::data::*;
+
+mod task_rc;
+#[allow(deprecated)]
+#[cfg(feature = "with-deprecated")]
+pub use self::task_rc::TaskRc;
+
+pub use task_impl::core::init;
+
+thread_local!(static CURRENT_TASK: Cell<*mut u8> = Cell::new(ptr::null_mut()));
+
+static INIT: Once = ONCE_INIT;
+
+pub fn get_ptr() -> Option<*mut u8> {
+    // Since this condition will always return true when TLS task storage is
+    // used (the default), the branch predictor will be able to optimize the
+    // branching and a dynamic dispatch will be avoided, which makes the
+    // compiler happier.
+    if core::is_get_ptr(0x1) {
+        Some(CURRENT_TASK.with(|c| c.get()))
+    } else {
+        core::get_ptr()
+    }
+}
+
+fn tls_slot() -> *const Cell<*mut u8> {
+    CURRENT_TASK.with(|c| c as *const _)
+}
+
+pub fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R
+    where F: FnOnce() -> R
+{
+    // Lazily initialize the get / set ptrs
+    //
+    // Note that we won't actually use these functions ever, we'll instead be
+    // testing the pointer's value elsewhere and calling our own functions.
+    INIT.call_once(|| unsafe {
+        let get = mem::transmute::<usize, _>(0x1);
+        let set = mem::transmute::<usize, _>(0x2);
+        init(get, set);
+    });
+
+    // Same as above.
+    if core::is_get_ptr(0x1) {
+        struct Reset(*const Cell<*mut u8>, *mut u8);
+
+        impl Drop for Reset {
+            #[inline]
+            fn drop(&mut self) {
+                unsafe {
+                    (*self.0).set(self.1);
+                }
+            }
+        }
+
+        unsafe {
+            let slot = tls_slot();
+            let _reset = Reset(slot, (*slot).get());
+            (*slot).set(task as *const _ as *mut u8);
+            f()
+        }
+    } else {
+        core::set(task, f)
+    }
+}
+
+#[derive(Copy, Clone)]
+#[allow(deprecated)]
+pub enum BorrowedUnpark<'a> {
+    Old(&'a Arc<Unpark>),
+    New(core::BorrowedUnpark<'a>),
+}
+
+#[derive(Copy, Clone)]
+#[allow(deprecated)]
+pub enum BorrowedEvents<'a> {
+    None,
+    One(&'a UnparkEvent, &'a BorrowedEvents<'a>),
+}
+
+#[derive(Clone)]
+pub enum TaskUnpark {
+    #[allow(deprecated)]
+    Old(Arc<Unpark>),
+    New(core::TaskUnpark),
+}
+
+#[derive(Clone)]
+#[allow(deprecated)]
+pub enum UnparkEvents {
+    None,
+    One(UnparkEvent),
+    Many(Box<[UnparkEvent]>),
+}
+
+impl<'a> BorrowedUnpark<'a> {
+    #[inline]
+    pub fn new(f: &'a Fn() -> NotifyHandle, id: usize) -> BorrowedUnpark<'a> {
+        BorrowedUnpark::New(core::BorrowedUnpark::new(f, id))
+    }
+
+    #[inline]
+    pub fn to_owned(&self) -> TaskUnpark {
+        match *self {
+            BorrowedUnpark::Old(old) => TaskUnpark::Old(old.clone()),
+            BorrowedUnpark::New(new) => TaskUnpark::New(new.to_owned()),
+        }
+    }
+}
+
+impl<'a> BorrowedEvents<'a> {
+    #[inline]
+    pub fn new() -> BorrowedEvents<'a> {
+        BorrowedEvents::None
+    }
+
+    #[inline]
+    pub fn to_owned(&self) -> UnparkEvents {
+        let mut one_event = None;
+        let mut list = Vec::new();
+        let mut cur = self;
+        while let BorrowedEvents::One(event, next) = *cur {
+            let event = event.clone();
+            match one_event.take() {
+                None if list.len() == 0 => one_event = Some(event),
+                None => list.push(event),
+                Some(event2) =>  {
+                    list.push(event2);
+                    list.push(event);
+                }
+            }
+            cur = next;
+        }
+
+        match one_event {
+            None if list.len() == 0 => UnparkEvents::None,
+            None => UnparkEvents::Many(list.into_boxed_slice()),
+            Some(e) => UnparkEvents::One(e),
+        }
+    }
+}
+
+impl UnparkEvents {
+    pub fn notify(&self) {
+        match *self {
+            UnparkEvents::None => {}
+            UnparkEvents::One(ref e) => e.unpark(),
+            UnparkEvents::Many(ref list) => {
+                for event in list.iter() {
+                    event.unpark();
+                }
+            }
+        }
+    }
+
+    pub fn will_notify(&self, events: &BorrowedEvents) -> bool {
+        // Pessimistically assume that any unpark events mean that we're not
+        // equivalent to the current task.
+        match *self {
+            UnparkEvents::None => {}
+            _ => return false,
+        }
+
+        match *events {
+            BorrowedEvents::None => return true,
+            _ => {},
+        }
+
+        return false
+    }
+}
+
+#[allow(deprecated)]
+impl TaskUnpark {
+    pub fn notify(&self) {
+        match *self {
+            TaskUnpark::Old(ref old) => old.unpark(),
+            TaskUnpark::New(ref new) => new.notify(),
+        }
+    }
+
+    pub fn will_notify(&self, unpark: &BorrowedUnpark) -> bool {
+        match (unpark, self) {
+            (&BorrowedUnpark::Old(old1), &TaskUnpark::Old(ref old2)) => {
+                &**old1 as *const Unpark == &**old2 as *const Unpark
+            }
+            (&BorrowedUnpark::New(ref new1), &TaskUnpark::New(ref new2)) => {
+                new2.will_notify(new1)
+            }
+            _ => false,
+        }
+    }
+}
+
+impl<F: Future> Spawn<F> {
+    /// Polls the internal future, scheduling notifications to be sent to the
+    /// `unpark` argument.
+    ///
+    /// This method will poll the internal future, testing if it's completed
+    /// yet. The `unpark` argument is used as a sink for notifications sent to
+    /// this future. That is, while the future is being polled, any call to
+    /// `task::park()` will return a handle that contains the `unpark`
+    /// specified.
+    ///
+    /// If this function returns `NotReady`, then the `unpark` should have been
+    /// scheduled to receive a notification when poll can be called again.
+    /// Otherwise if `Ready` or `Err` is returned, the `Spawn` task can be
+    /// safely destroyed.
+    #[deprecated(note = "recommended to use `poll_future_notify` instead")]
+    #[allow(deprecated)]
+    pub fn poll_future(&mut self, unpark: Arc<Unpark>) -> Poll<F::Item, F::Error> {
+        self.enter(BorrowedUnpark::Old(&unpark), |f| f.poll())
+    }
+
+    /// Waits for the internal future to complete, blocking this thread's
+    /// execution until it does.
+    ///
+    /// This function will call `poll_future` in a loop, waiting for the future
+    /// to complete. When a future cannot make progress it will use
+    /// `thread::park` to block the current thread.
+    pub fn wait_future(&mut self) -> Result<F::Item, F::Error> {
+        ThreadNotify::with_current(|notify| {
+
+            loop {
+                match self.poll_future_notify(notify, 0)? {
+                    Async::NotReady => notify.park(),
+                    Async::Ready(e) => return Ok(e),
+                }
+            }
+        })
+    }
+
+    /// A specialized function to request running a future to completion on the
+    /// specified executor.
+    ///
+    /// This function only works for futures whose item and error types are `()`
+    /// and also implement the `Send` and `'static` bounds. This will submit
+    /// units of work (instances of `Run`) to the `exec` argument provided
+    /// necessary to drive the future to completion.
+    ///
+    /// When the future would block, it's arranged that when the future is again
+    /// ready it will submit another unit of work to the `exec` provided. This
+    /// will happen in a loop until the future has completed.
+    ///
+    /// This method is not appropriate for all futures, and other kinds of
+    /// executors typically provide a similar function with perhaps relaxed
+    /// bounds as well.
+    ///
+    /// Note that this method is likely to be deprecated in favor of the
+    /// `futures::Executor` trait and `execute` method, but if this'd cause
+    /// difficulty for you please let us know!
+    pub fn execute(self, exec: Arc<Executor>)
+        where F: Future<Item=(), Error=()> + Send + 'static,
+    {
+        exec.clone().execute(Run {
+            // Ideally this method would be defined directly on
+            // `Spawn<BoxFuture<(), ()>>` so we wouldn't have to box here and
+            // it'd be more explicit, but unfortunately that currently has a
+            // link error on nightly: rust-lang/rust#36155
+            spawn: spawn(Box::new(self.into_inner())),
+            inner: Arc::new(RunInner {
+                exec: exec,
+                mutex: UnparkMutex::new()
+            }),
+        })
+    }
+}
+
+impl<S: Stream> Spawn<S> {
+    /// Like `poll_future`, except polls the underlying stream.
+    #[deprecated(note = "recommended to use `poll_stream_notify` instead")]
+    #[allow(deprecated)]
+    pub fn poll_stream(&mut self, unpark: Arc<Unpark>)
+                       -> Poll<Option<S::Item>, S::Error> {
+        self.enter(BorrowedUnpark::Old(&unpark), |s| s.poll())
+    }
+
+    /// Like `wait_future`, except only waits for the next element to arrive on
+    /// the underlying stream.
+    pub fn wait_stream(&mut self) -> Option<Result<S::Item, S::Error>> {
+        ThreadNotify::with_current(|notify| {
+
+            loop {
+                match self.poll_stream_notify(notify, 0) {
+                    Ok(Async::NotReady) => notify.park(),
+                    Ok(Async::Ready(Some(e))) => return Some(Ok(e)),
+                    Ok(Async::Ready(None)) => return None,
+                    Err(e) => return Some(Err(e)),
+                }
+            }
+        })
+    }
+}
+
+impl<S: Sink> Spawn<S> {
+    /// Invokes the underlying `start_send` method with this task in place.
+    ///
+    /// If the underlying operation returns `NotReady` then the `unpark` value
+    /// passed in will receive a notification when the operation is ready to be
+    /// attempted again.
+    #[deprecated(note = "recommended to use `start_send_notify` instead")]
+    #[allow(deprecated)]
+    pub fn start_send(&mut self, value: S::SinkItem, unpark: &Arc<Unpark>)
+                       -> StartSend<S::SinkItem, S::SinkError> {
+        self.enter(BorrowedUnpark::Old(unpark), |s| s.start_send(value))
+    }
+
+    /// Invokes the underlying `poll_complete` method with this task in place.
+    ///
+    /// If the underlying operation returns `NotReady` then the `unpark` value
+    /// passed in will receive a notification when the operation is ready to be
+    /// attempted again.
+    #[deprecated(note = "recommended to use `poll_flush_notify` instead")]
+    #[allow(deprecated)]
+    pub fn poll_flush(&mut self, unpark: &Arc<Unpark>)
+                       -> Poll<(), S::SinkError> {
+        self.enter(BorrowedUnpark::Old(unpark), |s| s.poll_complete())
+    }
+
+    /// Blocks the current thread until it's able to send `value` on this sink.
+    ///
+    /// This function will send the `value` on the sink that this task wraps. If
+    /// the sink is not ready to send the value yet then the current thread will
+    /// be blocked until it's able to send the value.
+    pub fn wait_send(&mut self, mut value: S::SinkItem)
+                     -> Result<(), S::SinkError> {
+        ThreadNotify::with_current(|notify| {
+
+            loop {
+                value = match self.start_send_notify(value, notify, 0)? {
+                    AsyncSink::NotReady(v) => v,
+                    AsyncSink::Ready => return Ok(()),
+                };
+                notify.park();
+            }
+        })
+    }
+
+    /// Blocks the current thread until it's able to flush this sink.
+    ///
+    /// This function will call the underlying sink's `poll_complete` method
+    /// until it returns that it's ready, proxying out errors upwards to the
+    /// caller if one occurs.
+    ///
+    /// The thread will be blocked until `poll_complete` returns that it's
+    /// ready.
+    pub fn wait_flush(&mut self) -> Result<(), S::SinkError> {
+        ThreadNotify::with_current(|notify| {
+
+            loop {
+                if self.poll_flush_notify(notify, 0)?.is_ready() {
+                    return Ok(())
+                }
+                notify.park();
+            }
+        })
+    }
+
+    /// Blocks the current thread until it's able to close this sink.
+    ///
+    /// This function will close the sink that this task wraps. If the sink
+    /// is not ready to be close yet, then the current thread will be blocked
+    /// until it's closed.
+    pub fn wait_close(&mut self) -> Result<(), S::SinkError> {
+        ThreadNotify::with_current(|notify| {
+
+            loop {
+                if self.close_notify(notify, 0)?.is_ready() {
+                    return Ok(())
+                }
+                notify.park();
+            }
+        })
+    }
+}
+
+/// A trait which represents a sink of notifications that a future is ready to
+/// make progress.
+///
+/// This trait is provided as an argument to the `Spawn::poll_future` and
+/// `Spawn::poll_stream` functions. It's transitively used as part of the
+/// `Task::unpark` method to internally deliver notifications of readiness of a
+/// future to move forward.
+#[deprecated(note = "recommended to use `Notify` instead")]
+pub trait Unpark: Send + Sync {
+    /// Indicates that an associated future and/or task are ready to make
+    /// progress.
+    ///
+    /// Typically this means that the receiver of the notification should
+    /// arrange for the future to get poll'd in a prompt fashion.
+    fn unpark(&self);
+}
+
+/// A trait representing requests to poll futures.
+///
+/// This trait is an argument to the `Spawn::execute` which is used to run a
+/// future to completion. An executor will receive requests to run a future and
+/// an executor is responsible for ensuring that happens in a timely fashion.
+///
+/// Note that this trait is likely to be deprecated and/or renamed to avoid
+/// clashing with the `future::Executor` trait. If you've got a use case for
+/// this or would like to comment on the name please let us know!
+pub trait Executor: Send + Sync + 'static {
+    /// Requests that `Run` is executed soon on the given executor.
+    fn execute(&self, r: Run);
+}
+
+/// Units of work submitted to an `Executor`, currently only created
+/// internally.
+pub struct Run {
+    spawn: Spawn<Box<Future<Item = (), Error = ()> + Send>>,
+    inner: Arc<RunInner>,
+}
+
+struct RunInner {
+    mutex: UnparkMutex<Run>,
+    exec: Arc<Executor>,
+}
+
+impl Run {
+    /// Actually run the task (invoking `poll` on its future) on the current
+    /// thread.
+    pub fn run(self) {
+        let Run { mut spawn, inner } = self;
+
+        // SAFETY: the ownership of this `Run` object is evidence that
+        // we are in the `POLLING`/`REPOLL` state for the mutex.
+        unsafe {
+            inner.mutex.start_poll();
+
+            loop {
+                match spawn.poll_future_notify(&inner, 0) {
+                    Ok(Async::NotReady) => {}
+                    Ok(Async::Ready(())) |
+                    Err(()) => return inner.mutex.complete(),
+                }
+                let run = Run { spawn: spawn, inner: inner.clone() };
+                match inner.mutex.wait(run) {
+                    Ok(()) => return,            // we've waited
+                    Err(r) => spawn = r.spawn,   // someone's notified us
+                }
+            }
+        }
+    }
+}
+
+impl fmt::Debug for Run {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Run")
+         .field("contents", &"...")
+         .finish()
+    }
+}
+
+impl Notify for RunInner {
+    fn notify(&self, _id: usize) {
+        match self.mutex.notify() {
+            Ok(run) => self.exec.execute(run),
+            Err(()) => {}
+        }
+    }
+}
+
+// ===== ThreadNotify =====
+
+struct ThreadNotify {
+    state: AtomicUsize,
+    mutex: Mutex<()>,
+    condvar: Condvar,
+}
+
+const IDLE: usize = 0;
+const NOTIFY: usize = 1;
+const SLEEP: usize = 2;
+
+thread_local! {
+    static CURRENT_THREAD_NOTIFY: Arc<ThreadNotify> = Arc::new(ThreadNotify {
+        state: AtomicUsize::new(IDLE),
+        mutex: Mutex::new(()),
+        condvar: Condvar::new(),
+    });
+}
+
+impl ThreadNotify {
+    fn with_current<F, R>(f: F) -> R
+        where F: FnOnce(&Arc<ThreadNotify>) -> R,
+    {
+        CURRENT_THREAD_NOTIFY.with(|notify| f(notify))
+    }
+
+    fn park(&self) {
+        // If currently notified, then we skip sleeping. This is checked outside
+        // of the lock to avoid acquiring a mutex if not necessary.
+        match self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) {
+            NOTIFY => return,
+            IDLE => {},
+            _ => unreachable!(),
+        }
+
+        // The state is currently idle, so obtain the lock and then try to
+        // transition to a sleeping state.
+        let mut m = self.mutex.lock().unwrap();
+
+        // Transition to sleeping
+        match self.state.compare_and_swap(IDLE, SLEEP, Ordering::SeqCst) {
+            NOTIFY => {
+                // Notified before we could sleep, consume the notification and
+                // exit
+                self.state.store(IDLE, Ordering::SeqCst);
+                return;
+            }
+            IDLE => {},
+            _ => unreachable!(),
+        }
+
+        // Loop until we've been notified
+        loop {
+            m = self.condvar.wait(m).unwrap();
+
+            // Transition back to idle, loop otherwise
+            if NOTIFY == self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) {
+                return;
+            }
+        }
+    }
+}
+
+impl Notify for ThreadNotify {
+    fn notify(&self, _unpark_id: usize) {
+        // First, try transitioning from IDLE -> NOTIFY, this does not require a
+        // lock.
+        match self.state.compare_and_swap(IDLE, NOTIFY, Ordering::SeqCst) {
+            IDLE | NOTIFY => return,
+            SLEEP => {}
+            _ => unreachable!(),
+        }
+
+        // The other half is sleeping, this requires a lock
+        let _m = self.mutex.lock().unwrap();
+
+        // Transition from SLEEP -> NOTIFY
+        match self.state.compare_and_swap(SLEEP, NOTIFY, Ordering::SeqCst) {
+            SLEEP => {}
+            _ => return,
+        }
+
+        // Wakeup the sleeper
+        self.condvar.notify_one();
+    }
+}
+
+// ===== UnparkEvent =====
+
+/// For the duration of the given callback, add an "unpark event" to be
+/// triggered when the task handle is used to unpark the task.
+///
+/// Unpark events are used to pass information about what event caused a task to
+/// be unparked. In some cases, tasks are waiting on a large number of possible
+/// events, and need precise information about the wakeup to avoid extraneous
+/// polling.
+///
+/// Every `Task` handle comes with a set of unpark events which will fire when
+/// `unpark` is called. When fired, these events insert an identifier into a
+/// concurrent set, which the task can read from to determine what events
+/// occurred.
+///
+/// This function immediately invokes the closure, `f`, but arranges things so
+/// that `task::park` will produce a `Task` handle that includes the given
+/// unpark event.
+///
+/// # Panics
+///
+/// This function will panic if a task is not currently being executed. That
+/// is, this method can be dangerous to call outside of an implementation of
+/// `poll`.
+#[deprecated(note = "recommended to use `FuturesUnordered` instead")]
+#[allow(deprecated)]
+pub fn with_unpark_event<F, R>(event: UnparkEvent, f: F) -> R
+    where F: FnOnce() -> R
+{
+    super::with(|task| {
+        let new_task = BorrowedTask {
+            id: task.id,
+            unpark: task.unpark,
+            events: BorrowedEvents::One(&event, &task.events),
+            map: task.map,
+        };
+
+        super::set(&new_task, f)
+    })
+}
+
+/// A set insertion to trigger upon `unpark`.
+///
+/// Unpark events are used to communicate information about *why* an unpark
+/// occurred, in particular populating sets with event identifiers so that the
+/// unparked task can avoid extraneous polling. See `with_unpark_event` for
+/// more.
+#[derive(Clone)]
+#[deprecated(note = "recommended to use `FuturesUnordered` instead")]
+#[allow(deprecated)]
+pub struct UnparkEvent {
+    set: Arc<EventSet>,
+    item: usize,
+}
+
+#[allow(deprecated)]
+impl UnparkEvent {
+    /// Construct an unpark event that will insert `id` into `set` when
+    /// triggered.
+    #[deprecated(note = "recommended to use `FuturesUnordered` instead")]
+    pub fn new(set: Arc<EventSet>, id: usize) -> UnparkEvent {
+        UnparkEvent {
+            set: set,
+            item: id,
+        }
+    }
+
+    fn unpark(&self) {
+        self.set.insert(self.item);
+    }
+}
+
+#[allow(deprecated)]
+impl fmt::Debug for UnparkEvent {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("UnparkEvent")
+         .field("set", &"...")
+         .field("item", &self.item)
+         .finish()
+    }
+}
+
+/// A concurrent set which allows for the insertion of `usize` values.
+///
+/// `EventSet`s are used to communicate precise information about the event(s)
+/// that triggered a task notification. See `task::with_unpark_event` for details.
+#[deprecated(since="0.1.18", note = "recommended to use `FuturesUnordered` instead")]
+pub trait EventSet: Send + Sync + 'static {
+    /// Insert the given ID into the set
+    fn insert(&self, id: usize);
+}
+
+// Safe implementation of `UnsafeNotify` for `Arc` in the standard library.
+//
+// Note that this is a very unsafe implementation! The crucial pieces is that
+// these two values are considered equivalent:
+//
+// * Arc<T>
+// * *const ArcWrapped<T>
+//
+// We don't actually know the layout of `ArcWrapped<T>` as it's an
+// implementation detail in the standard library. We can work, though, by
+// casting it through and back an `Arc<T>`.
+//
+// This also means that you won't actually fine `UnsafeNotify for Arc<T>`
+// because it's the wrong level of indirection. These methods are sort of
+// receiving Arc<T>, but not an owned version. It's... complicated. We may be
+// one of the first users of unsafe trait objects!
+
+struct ArcWrapped<T>(PhantomData<T>);
+
+impl<T: Notify + 'static> Notify for ArcWrapped<T> {
+    fn notify(&self, id: usize) {
+        unsafe {
+            let me: *const ArcWrapped<T> = self;
+            T::notify(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
+                      id)
+        }
+    }
+
+    fn clone_id(&self, id: usize) -> usize {
+        unsafe {
+            let me: *const ArcWrapped<T> = self;
+            T::clone_id(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
+                        id)
+        }
+    }
+
+    fn drop_id(&self, id: usize) {
+        unsafe {
+            let me: *const ArcWrapped<T> = self;
+            T::drop_id(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
+                       id)
+        }
+    }
+}
+
+unsafe impl<T: Notify + 'static> UnsafeNotify for ArcWrapped<T> {
+    unsafe fn clone_raw(&self) -> NotifyHandle {
+        let me: *const ArcWrapped<T> = self;
+        let arc = (*(&me as *const *const ArcWrapped<T> as *const Arc<T>)).clone();
+        NotifyHandle::from(arc)
+    }
+
+    unsafe fn drop_raw(&self) {
+        let mut me: *const ArcWrapped<T> = self;
+        let me = &mut me as *mut *const ArcWrapped<T> as *mut Arc<T>;
+        ptr::drop_in_place(me);
+    }
+}
+
+impl<T> From<Arc<T>> for NotifyHandle
+    where T: Notify + 'static,
+{
+    fn from(rc: Arc<T>) -> NotifyHandle {
+        unsafe {
+            let ptr = mem::transmute::<Arc<T>, *mut ArcWrapped<T>>(rc);
+            NotifyHandle::new(ptr)
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/task_impl/std/task_rc.rs
@@ -0,0 +1,129 @@
+#![cfg(feature = "with-deprecated")]
+#![allow(deprecated)]
+#![deprecated(since = "0.1.4",
+              note = "replaced with `BiLock` in many cases, otherwise slated \
+                      for removal due to confusion")]
+
+use std::prelude::v1::*;
+use std::sync::Arc;
+use std::cell::UnsafeCell;
+use task_impl;
+
+// One critical piece of this module's contents are the `TaskRc<A>` handles.
+// The purpose of this is to conceptually be able to store data in a task,
+// allowing it to be accessed within multiple futures at once. For example if
+// you have some concurrent futures working, they may all want mutable access to
+// some data. We already know that when the futures are being poll'd that we're
+// entirely synchronized (aka `&mut Task`), so you shouldn't require an
+// `Arc<Mutex<T>>` to share as the synchronization isn't necessary!
+//
+// So the idea here is that you insert data into a task via `Task::insert`, and
+// a handle to that data is then returned to you. That handle can later get
+// presented to the task itself to actually retrieve the underlying data. The
+// invariant is that the data can only ever be accessed with the task present,
+// and the lifetime of the actual data returned is connected to the lifetime of
+// the task itself.
+//
+// Conceptually I at least like to think of this as "dynamically adding more
+// struct fields to a `Task`". Each call to insert creates a new "name" for the
+// struct field, a `TaskRc<A>`, and then you can access the fields of a struct
+// with the struct itself (`Task`) as well as the name of the field
+// (`TaskRc<A>`). If that analogy doesn't make sense then oh well, it at least
+// helped me!
+//
+// So anyway, we do some interesting trickery here to actually get it to work.
+// Each `TaskRc<A>` handle stores `Arc<UnsafeCell<A>>`. So it turns out, we're
+// not even adding data to the `Task`! Each `TaskRc<A>` contains a reference
+// to this `Arc`, and `TaskRc` handles can be cloned which just bumps the
+// reference count on the `Arc` itself.
+//
+// As before, though, you can present the `Arc` to a `Task` and if they
+// originated from the same place you're allowed safe access to the internals.
+// We allow but shared and mutable access without the `Sync` bound on the data,
+// crucially noting that a `Task` itself is not `Sync`.
+//
+// So hopefully I've convinced you of this point that the `get` and `get_mut`
+// methods below are indeed safe. The data is always valid as it's stored in an
+// `Arc`, and access is only allowed with the proof of the associated `Task`.
+// One thing you might be asking yourself though is what exactly is this "proof
+// of a task"? Right now it's a `usize` corresponding to the `Task`'s
+// `TaskHandle` arc allocation.
+//
+// Wait a minute, isn't that the ABA problem! That is, we create a task A, add
+// some data to it, destroy task A, do some work, create a task B, and then ask
+// to get the data from task B. In this case though the point of the
+// `task_inner` "proof" field is simply that there's some non-`Sync` token
+// proving that you can get access to the data. So while weird, this case should
+// still be safe, as the data's not stored in the task itself.
+
+/// A reference to a piece of data that's accessible only within a specific
+/// `Task`.
+///
+/// This data is `Send` even when `A` is not `Sync`, because the data stored
+/// within is accessed in a single-threaded way. The thread accessing it may
+/// change over time, if the task migrates, so `A` must be `Send`.
+#[derive(Debug)]
+pub struct TaskRc<A> {
+    task: task_impl::Task,
+    ptr: Arc<UnsafeCell<A>>,
+}
+
+// for safety here, see docs at the top of this module
+unsafe impl<A: Send> Send for TaskRc<A> {}
+unsafe impl<A: Sync> Sync for TaskRc<A> {}
+
+impl<A> TaskRc<A> {
+    /// Inserts a new piece of task-local data into this task, returning a
+    /// reference to it.
+    ///
+    /// Ownership of the data will be transferred to the task, and the data will
+    /// be destroyed when the task itself is destroyed. The returned value can
+    /// be passed to the `with` method to get a reference back to the original
+    /// data.
+    ///
+    /// Note that the returned handle is cloneable and copyable and can be sent
+    /// to other futures which will be associated with the same task. All
+    /// futures will then have access to this data when passed the reference
+    /// back.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if a task is not currently running.
+    pub fn new(a: A) -> TaskRc<A> {
+        TaskRc {
+            task: task_impl::park(),
+            ptr: Arc::new(UnsafeCell::new(a)),
+        }
+    }
+
+    /// Operate with a reference to the underlying data.
+    ///
+    /// This method should be passed a handle previously returned by
+    /// `Task::insert`. That handle, when passed back into this method, will
+    /// retrieve a reference to the original data.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if a task is not currently running or if `self`
+    /// does not belong to the task that is currently running. That is, if
+    /// another task generated the `data` handle passed in, this method will
+    /// panic.
+    pub fn with<F, R>(&self, f: F) -> R
+        where F: FnOnce(&A) -> R
+    {
+        if !self.task.is_current() {
+            panic!("TaskRc being accessed on task it does not belong to");
+        }
+
+        f(unsafe { &*self.ptr.get() })
+    }
+}
+
+impl<A> Clone for TaskRc<A> {
+    fn clone(&self) -> TaskRc<A> {
+        TaskRc {
+            task: self.task.clone(),
+            ptr: self.ptr.clone(),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/src/task_impl/std/unpark_mutex.rs
@@ -0,0 +1,144 @@
+use std::cell::UnsafeCell;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+
+/// A "lock" around data `D`, which employs a *helping* strategy.
+///
+/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being
+/// invoked on only a single thread at a time (2) `poll` being invoked at least
+/// once after each `unpark` (unless the future has completed).
+pub struct UnparkMutex<D> {
+    // The state of task execution (state machine described below)
+    status: AtomicUsize,
+
+    // The actual task data, accessible only in the POLLING state
+    inner: UnsafeCell<Option<D>>,
+}
+
+// `UnparkMutex<D>` functions in many ways like a `Mutex<D>`, except that on
+// acquisition failure, the current lock holder performs the desired work --
+// re-polling.
+//
+// As such, these impls mirror those for `Mutex<D>`. In particular, a reference
+// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which
+// must therefore be `Send`.
+unsafe impl<D: Send> Send for UnparkMutex<D> {}
+unsafe impl<D: Send> Sync for UnparkMutex<D> {}
+
+// There are four possible task states, listed below with their possible
+// transitions:
+
+// The task is blocked, waiting on an event
+const WAITING: usize = 0;       // --> POLLING
+
+// The task is actively being polled by a thread; arrival of additional events
+// of interest should move it to the REPOLL state
+const POLLING: usize = 1;       // --> WAITING, REPOLL, or COMPLETE
+
+// The task is actively being polled, but will need to be re-polled upon
+// completion to ensure that all events were observed.
+const REPOLL: usize = 2;        // --> POLLING
+
+// The task has finished executing (either successfully or with an error/panic)
+const COMPLETE: usize = 3;      // No transitions out
+
+impl<D> UnparkMutex<D> {
+    pub fn new() -> UnparkMutex<D> {
+        UnparkMutex {
+            status: AtomicUsize::new(WAITING),
+            inner: UnsafeCell::new(None),
+        }
+    }
+
+    /// Attempt to "notify" the mutex that a poll should occur.
+    ///
+    /// An `Ok` result indicates that the `POLLING` state has been entered, and
+    /// the caller can proceed to poll the future. An `Err` result indicates
+    /// that polling is not necessary (because the task is finished or the
+    /// polling has been delegated).
+    pub fn notify(&self) -> Result<D, ()> {
+        let mut status = self.status.load(SeqCst);
+        loop {
+            match status {
+                // The task is idle, so try to run it immediately.
+                WAITING => {
+                    match self.status.compare_exchange(WAITING, POLLING,
+                                                       SeqCst, SeqCst) {
+                        Ok(_) => {
+                            let data = unsafe {
+                                // SAFETY: we've ensured mutual exclusion via
+                                // the status protocol; we are the only thread
+                                // that has transitioned to the POLLING state,
+                                // and we won't transition back to QUEUED until
+                                // the lock is "released" by this thread. See
+                                // the protocol diagram above.
+                                (*self.inner.get()).take().unwrap()
+                            };
+                            return Ok(data);
+                        }
+                        Err(cur) => status = cur,
+                    }
+                }
+
+                // The task is being polled, so we need to record that it should
+                // be *repolled* when complete.
+                POLLING => {
+                    match self.status.compare_exchange(POLLING, REPOLL,
+                                                       SeqCst, SeqCst) {
+                        Ok(_) => return Err(()),
+                        Err(cur) => status = cur,
+                    }
+                }
+
+                // The task is already scheduled for polling, or is complete, so
+                // we've got nothing to do.
+                _ => return Err(()),
+            }
+        }
+    }
+
+    /// Alert the mutex that polling is about to begin, clearing any accumulated
+    /// re-poll requests.
+    ///
+    /// # Safety
+    ///
+    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+    /// successful calls to `notify` and `wait`/`complete`.
+    pub unsafe fn start_poll(&self) {
+        self.status.store(POLLING, SeqCst);
+    }
+
+    /// Alert the mutex that polling completed with NotReady.
+    ///
+    /// # Safety
+    ///
+    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+    /// successful calls to `notify` and `wait`/`complete`.
+    pub unsafe fn wait(&self, data: D) -> Result<(), D> {
+        *self.inner.get() = Some(data);
+
+        match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) {
+            // no unparks came in while we were running
+            Ok(_) => Ok(()),
+
+            // guaranteed to be in REPOLL state; just clobber the
+            // state and run again.
+            Err(status) => {
+                assert_eq!(status, REPOLL);
+                self.status.store(POLLING, SeqCst);
+                Err((*self.inner.get()).take().unwrap())
+            }
+        }
+    }
+
+    /// Alert the mutex that the task has completed execution and should not be
+    /// notified again.
+    ///
+    /// # Safety
+    ///
+    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+    /// successful calls to `notify` and `wait`/`complete`.
+    pub unsafe fn complete(&self) {
+        self.status.store(COMPLETE, SeqCst);
+    }
+}
deleted file mode 100644
--- a/third_party/rust/futures/src/task_impl/task_rc.rs
+++ /dev/null
@@ -1,131 +0,0 @@
-#![cfg(feature = "with-deprecated")]
-#![allow(deprecated)]
-#![deprecated(since = "0.1.4",
-              note = "replaced with `BiLock` in many cases, otherwise slated \
-                      for removal due to confusion")]
-
-use std::prelude::v1::*;
-use std::sync::Arc;
-use std::cell::UnsafeCell;
-
-// One critical piece of this module's contents are the `TaskRc<A>` handles.
-// The purpose of this is to conceptually be able to store data in a task,
-// allowing it to be accessed within multiple futures at once. For example if
-// you have some concurrent futures working, they may all want mutable access to
-// some data. We already know that when the futures are being poll'ed that we're
-// entirely synchronized (aka `&mut Task`), so you shouldn't require an
-// `Arc<Mutex<T>>` to share as the synchronization isn't necessary!
-//
-// So the idea here is that you insert data into a task via `Task::insert`, and
-// a handle to that data is then returned to you. That handle can later get
-// presented to the task itself to actually retrieve the underlying data. The
-// invariant is that the data can only ever be accessed with the task present,
-// and the lifetime of the actual data returned is connected to the lifetime of
-// the task itself.
-//
-// Conceptually I at least like to think of this as "dynamically adding more
-// struct fields to a `Task`". Each call to insert creates a new "name" for the
-// struct field, a `TaskRc<A>`, and then you can access the fields of a struct
-// with the struct itself (`Task`) as well as the name of the field
-// (`TaskRc<A>`). If that analogy doesn't make sense then oh well, it at least
-// helped me!
-//
-// So anyway, we do some interesting trickery here to actually get it to work.
-// Each `TaskRc<A>` handle stores `Arc<UnsafeCell<A>>`. So it turns out, we're
-// not even adding data to the `Task`! Each `TaskRc<A>` contains a reference
-// to this `Arc`, and `TaskRc` handles can be cloned which just bumps the
-// reference count on the `Arc` itself.
-//
-// As before, though, you can present the `Arc` to a `Task` and if they
-// originated from the same place you're allowed safe access to the internals.
-// We allow but shared and mutable access without the `Sync` bound on the data,
-// crucially noting that a `Task` itself is not `Sync`.
-//
-// So hopefully I've convinced you of this point that the `get` and `get_mut`
-// methods below are indeed safe. The data is always valid as it's stored in an
-// `Arc`, and access is only allowed with the proof of the associated `Task`.
-// One thing you might be asking yourself though is what exactly is this "proof
-// of a task"? Right now it's a `usize` corresponding to the `Task`'s
-// `TaskHandle` arc allocation.
-//
-// Wait a minute, isn't that the ABA problem! That is, we create a task A, add
-// some data to it, destroy task A, do some work, create a task B, and then ask
-// to get the data from task B. In this case though the point of the
-// `task_inner` "proof" field is simply that there's some non-`Sync` token
-// proving that you can get access to the data. So while weird, this case should
-// still be safe, as the data's not stored in the task itself.
-
-/// A reference to a piece of data that's accessible only within a specific
-/// `Task`.
-///
-/// This data is `Send` even when `A` is not `Sync`, because the data stored
-/// within is accessed in a single-threaded way. The thread accessing it may
-/// change over time, if the task migrates, so `A` must be `Send`.
-#[derive(Debug)]
-pub struct TaskRc<A> {
-    task_id: usize,
-    ptr: Arc<UnsafeCell<A>>,
-}
-
-// for safety here, see docs at the top of this module
-unsafe impl<A: Send> Send for TaskRc<A> {}
-unsafe impl<A: Sync> Sync for TaskRc<A> {}
-
-impl<A> TaskRc<A> {
-    /// Inserts a new piece of task-local data into this task, returning a
-    /// reference to it.
-    ///
-    /// Ownership of the data will be transferred to the task, and the data will
-    /// be destroyed when the task itself is destroyed. The returned value can
-    /// be passed to the `with` method to get a reference back to the original
-    /// data.
-    ///
-    /// Note that the returned handle is cloneable and copyable and can be sent
-    /// to other futures which will be associated with the same task. All
-    /// futures will then have access to this data when passed the reference
-    /// back.
-    ///
-    /// # Panics
-    ///
-    /// This function will panic if a task is not currently running.
-    pub fn new(a: A) -> TaskRc<A> {
-        super::with(|task| {
-            TaskRc {
-                task_id: task.id,
-                ptr: Arc::new(UnsafeCell::new(a)),
-            }
-        })
-    }
-
-    /// Operate with a reference to the underlying data.
-    ///
-    /// This method should be passed a handle previously returned by
-    /// `Task::insert`. That handle, when passed back into this method, will
-    /// retrieve a reference to the original data.
-    ///
-    /// # Panics
-    ///
-    /// This method will panic if a task is not currently running or if `self`
-    /// does not belong to the task that is currently running. That is, if
-    /// another task generated the `data` handle passed in, this method will
-    /// panic.
-    pub fn with<F, R>(&self, f: F) -> R
-        where F: FnOnce(&A) -> R
-    {
-        // for safety here, see docs at the top of this module
-        super::with(|task| {
-            assert!(self.task_id == task.id,
-                    "TaskRc being accessed on task it does not belong to");
-            f(unsafe { &*self.ptr.get() })
-        })
-    }
-}
-
-impl<A> Clone for TaskRc<A> {
-    fn clone(&self) -> TaskRc<A> {
-        TaskRc {
-            task_id: self.task_id,
-            ptr: self.ptr.clone(),
-        }
-    }
-}
deleted file mode 100644
--- a/third_party/rust/futures/src/task_impl/unpark_mutex.rs
+++ /dev/null
@@ -1,144 +0,0 @@
-use std::cell::UnsafeCell;
-use std::sync::atomic::AtomicUsize;
-use std::sync::atomic::Ordering::SeqCst;
-
-/// A "lock" around data `D`, which employs a *helping* strategy.
-///
-/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being
-/// invoked on only a single thread at a time (2) `poll` being invoked at least
-/// once after each `unpark` (unless the future has completed).
-pub struct UnparkMutex<D> {
-    // The state of task execution (state machine described below)
-    status: AtomicUsize,
-
-    // The actual task data, accessible only in the POLLING state
-    inner: UnsafeCell<Option<D>>,
-}
-
-// `UnparkMutex<D>` functions in many ways like a `Mutex<D>`, except that on
-// acquisition failure, the current lockholder performs the desired work --
-// re-polling.
-//
-// As such, these impls mirror those for `Mutex<D>`. In particular, a reference
-// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which
-// must therefore be `Send`.
-unsafe impl<D: Send> Send for UnparkMutex<D> {}
-unsafe impl<D: Send> Sync for UnparkMutex<D> {}
-
-// There are four possible task states, listed below with their possible
-// transitions:
-
-// The task is blocked, waiting on an event
-const WAITING: usize = 0;       // --> POLLING
-
-// The task is actively being polled by a thread; arrival of additional events
-// of interest should move it to the REPOLL state
-const POLLING: usize = 1;       // --> WAITING, REPOLL, or COMPLETE
-
-// The task is actively being polled, but will need to be re-polled upon
-// completion to ensure that all events were observed.
-const REPOLL: usize = 2;        // --> POLLING
-
-// The task has finished executing (either successfully or with an error/panic)
-const COMPLETE: usize = 3;      // No transitions out
-
-impl<D> UnparkMutex<D> {
-    pub fn new() -> UnparkMutex<D> {
-        UnparkMutex {
-            status: AtomicUsize::new(WAITING),
-            inner: UnsafeCell::new(None),
-        }
-    }
-
-    /// Attempt to "notify" the mutex that a poll should occur.
-    ///
-    /// An `Ok` result indicates that the `POLLING` state has been entered, and
-    /// the caller can proceed to poll the future. An `Err` result indicates
-    /// that polling is not necessary (because the task is finished or the
-    /// polling has been delegated).
-    pub fn notify(&self) -> Result<D, ()> {
-        let mut status = self.status.load(SeqCst);
-        loop {
-            match status {
-                // The task is idle, so try to run it immediately.
-                WAITING => {
-                    match self.status.compare_exchange(WAITING, POLLING,
-                                                       SeqCst, SeqCst) {
-                        Ok(_) => {
-                            let data = unsafe {
-                                // SAFETY: we've ensured mutual exclusion via
-                                // the status protocol; we are the only thread
-                                // that has transitioned to the POLLING state,
-                                // and we won't transition back to QUEUED until
-                                // the lock is "released" by this thread. See
-                                // the protocol diagram above.
-                                (*self.inner.get()).take().unwrap()
-                            };
-                            return Ok(data);
-                        }
-                        Err(cur) => status = cur,
-                    }
-                }
-
-                // The task is being polled, so we need to record that it should
-                // be *repolled* when complete.
-                POLLING => {
-                    match self.status.compare_exchange(POLLING, REPOLL,
-                                                       SeqCst, SeqCst) {
-                        Ok(_) => return Err(()),
-                        Err(cur) => status = cur,
-                    }
-                }
-
-                // The task is already scheduled for polling, or is complete, so
-                // we've got nothing to do.
-                _ => return Err(()),
-            }
-        }
-    }
-
-    /// Alert the mutex that polling is about to begin, clearing any accumulated
-    /// re-poll requests.
-    ///
-    /// # Safety
-    ///
-    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
-    /// successful calls to `notify` and `wait`/`complete`.
-    pub unsafe fn start_poll(&self) {
-        self.status.store(POLLING, SeqCst);
-    }
-
-    /// Alert the mutex that polling completed with NotReady.
-    ///
-    /// # Safety
-    ///
-    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
-    /// successful calls to `notify` and `wait`/`complete`.
-    pub unsafe fn wait(&self, data: D) -> Result<(), D> {
-        *self.inner.get() = Some(data);
-
-        match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) {
-            // no unparks came in while we were running
-            Ok(_) => Ok(()),
-
-            // guaranteed to be in REPOLL state; just clobber the
-            // state and run again.
-            Err(status) => {
-                assert_eq!(status, REPOLL);
-                self.status.store(POLLING, SeqCst);
-                Err((*self.inner.get()).take().unwrap())
-            }
-        }
-    }
-
-    /// Alert the mutex that the task has completed execution and should not be
-    /// notified again.
-    ///
-    /// # Safety
-    ///
-    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
-    /// successful calls to `notify` and `wait`/`complete`.
-    pub unsafe fn complete(&self) {
-        self.status.store(COMPLETE, SeqCst);
-    }
-}
--- a/third_party/rust/futures/src/unsync/mpsc.rs
+++ b/third_party/rust/futures/src/unsync/mpsc.rs
@@ -8,17 +8,21 @@ use std::any::Any;
 use std::cell::RefCell;
 use std::collections::VecDeque;
 use std::error::Error;
 use std::fmt;
 use std::mem;
 use std::rc::{Rc, Weak};
 
 use task::{self, Task};
-use {Async, AsyncSink, Poll, StartSend, Sink, Stream};
+use future::Executor;
+use sink::SendAll;
+use resultstream::{self, Results};
+use unsync::oneshot;
+use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
 
 /// Creates a bounded in-memory channel with buffered storage.
 ///
 /// This method creates concrete implementations of the `Stream` and `Sink`
 /// traits which can be used to communicate a stream of values between tasks
 /// with backpressure. The channel capacity is exactly `buffer`. On average,
 /// sending a message through this channel performs no dynamic allocation.
 pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
@@ -26,73 +30,65 @@ pub fn channel<T>(buffer: usize) -> (Sen
 }
 
 fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
     let shared = Rc::new(RefCell::new(Shared {
         buffer: VecDeque::new(),
         capacity: buffer,
         blocked_senders: VecDeque::new(),
         blocked_recv: None,
-        sender_count: 1,
     }));
     let sender = Sender { shared: Rc::downgrade(&shared) };
     let receiver = Receiver { state: State::Open(shared) };
     (sender, receiver)
 }
 
 #[derive(Debug)]
 struct Shared<T> {
     buffer: VecDeque<T>,
     capacity: Option<usize>,
     blocked_senders: VecDeque<Task>,
     blocked_recv: Option<Task>,
-    // TODO: Redundant to Rc::weak_count; use that if/when stabilized
-    sender_count: usize,
 }
 
 /// The transmission end of a channel.
 ///
 /// This is created by the `channel` function.
 #[derive(Debug)]
 pub struct Sender<T> {
     shared: Weak<RefCell<Shared<T>>>,
 }
 
 impl<T> Sender<T> {
     fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> {
         let shared = match self.shared.upgrade() {
             Some(shared) => shared,
-            None => return Err(SendError(msg)),
+            None => return Err(SendError(msg)), // receiver was dropped
         };
         let mut shared = shared.borrow_mut();
 
         match shared.capacity {
             Some(capacity) if shared.buffer.len() == capacity => {
-                shared.blocked_senders.push_back(task::park());
+                shared.blocked_senders.push_back(task::current());
                 Ok(AsyncSink::NotReady(msg))
             }
             _ => {
                 shared.buffer.push_back(msg);
                 if let Some(task) = shared.blocked_recv.take() {
-                    drop(shared);
-                    task.unpark();
+                    task.notify();
                 }
                 Ok(AsyncSink::Ready)
             }
         }
     }
 }
 
 impl<T> Clone for Sender<T> {
     fn clone(&self) -> Self {
-        let result = Sender { shared: self.shared.clone() };
-        if let Some(shared) = self.shared.upgrade() {
-            shared.borrow_mut().sender_count += 1;
-        }
-        result
+        Sender { shared: self.shared.clone() }
     }
 }
 
 impl<T> Sink for Sender<T> {
     type SinkItem = T;
     type SinkError = SendError<T>;
 
     fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
@@ -109,23 +105,20 @@ impl<T> Sink for Sender<T> {
 }
 
 impl<T> Drop for Sender<T> {
     fn drop(&mut self) {
         let shared = match self.shared.upgrade() {
             Some(shared) => shared,
             None => return,
         };
-        let mut shared = shared.borrow_mut();
-        shared.sender_count -= 1;
-        if shared.sender_count == 0 {
-            if let Some(task) = shared.blocked_recv.take() {
+        if Rc::weak_count(&shared) == 0 {
+            if let Some(task) = shared.borrow_mut().blocked_recv.take() {
                 // Wake up receiver as its stream has ended
-                drop(shared);
-                task.unpark();
+                task.notify();
             }
         }
     }
 }
 
 /// The receiving end of a channel which implements the `Stream` trait.
 ///
 /// This is created by the `channel` function.
@@ -154,17 +147,17 @@ impl<T> Receiver<T> {
                 let items = mem::replace(&mut state.buffer, VecDeque::new());
                 let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new());
                 (blockers, items)
             }
             State::Closed(_) => return,
         };
         self.state = State::Closed(items);
         for task in blockers {
-            task.unpark();
+            task.notify();
         }
     }
 }
 
 impl<T> Stream for Receiver<T> {
     type Item = T;
     type Error = ();
 
@@ -181,21 +174,21 @@ impl<T> Stream for Receiver<T> {
             // stream.
             return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front()));
         }
 
         let mut shared = me.borrow_mut();
         if let Some(msg) = shared.buffer.pop_front() {
             if let Some(task) = shared.blocked_senders.pop_front() {
                 drop(shared);
-                task.unpark();
+                task.notify();
             }
             Ok(Async::Ready(Some(msg)))
         } else {
-            shared.blocked_recv = Some(task::park());
+            shared.blocked_recv = Some(task::current());
             Ok(Async::NotReady)
         }
     }
 }
 
 impl<T> Drop for Receiver<T> {
     fn drop(&mut self) {
         self.close();
@@ -247,26 +240,37 @@ impl<'a, T> Sink for &'a UnboundedSender
 }
 
 impl<T> UnboundedSender<T> {
     /// Sends the provided message along this channel.
     ///
     /// This is an unbounded sender, so this function differs from `Sink::send`
     /// by ensuring the return type reflects that the channel is always ready to
     /// receive messages.
+    #[deprecated(note = "renamed to `unbounded_send`")]
+    #[doc(hidden)]
     pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
+        self.unbounded_send(msg)
+    }
+
+    /// Sends the provided message along this channel.
+    ///
+    /// This is an unbounded sender, so this function differs from `Sink::send`
+    /// by ensuring the return type reflects that the channel is always ready to
+    /// receive messages.
+    pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
         let shared = match self.0.shared.upgrade() {
             Some(shared) => shared,
             None => return Err(SendError(msg)),
         };
         let mut shared = shared.borrow_mut();
         shared.buffer.push_back(msg);
         if let Some(task) = shared.blocked_recv.take() {
             drop(shared);
-            task.unpark();
+            task.notify();
         }
         Ok(())
     }
 }
 
 /// The receiving end of an unbounded channel.
 ///
 /// This is created by the `unbounded` function.
@@ -325,8 +329,142 @@ impl<T: Any> Error for SendError<T> {
 }
 
 impl<T> SendError<T> {
     /// Returns the message that was attempted to be sent but failed.
     pub fn into_inner(self) -> T {
         self.0
     }
 }
+
+/// Handle returned from the `spawn` function.
+///
+/// This handle is a stream that proxies a stream on a separate `Executor`.
+/// Created through the `mpsc::spawn` function, this handle will produce
+/// the same values as the proxied stream, as they are produced in the executor,
+/// and uses a limited buffer to exert back-pressure on the remote stream.
+///
+/// If this handle is dropped, then the stream will no longer be polled and is
+/// scheduled to be dropped.
+pub struct SpawnHandle<Item, Error> {
+    inner: Receiver<Result<Item, Error>>,
+    _cancel_tx: oneshot::Sender<()>,
+}
+
+/// Type of future which `Executor` instances must be able to execute for `spawn`.
+pub struct Execute<S: Stream> {
+    inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
+    cancel_rx: oneshot::Receiver<()>,
+}
+
+/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the remote stream.
+///
+/// The `stream` will be canceled if the `SpawnHandle` is dropped.
+///
+/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
+/// When `stream` has additional items available, then the `SpawnHandle`
+/// will have those same items available.
+///
+/// At most `buffer + 1` elements will be buffered at a time. If the buffer
+/// is full, then `stream` will stop progressing until more space is available.
+/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
+///
+/// # Panics
+///
+/// This function will panic if `executor` is unable spawn a `Future` containing
+/// the entirety of the `stream`.
+pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
+    where S: Stream,
+          E: Executor<Execute<S>>
+{
+    let (cancel_tx, cancel_rx) = oneshot::channel();
+    let (tx, rx) = channel(buffer);
+    executor.execute(Execute {
+        inner: tx.send_all(resultstream::new(stream)),
+        cancel_rx: cancel_rx,
+    }).expect("failed to spawn stream");
+    SpawnHandle {
+        inner: rx,
+        _cancel_tx: cancel_tx,
+    }
+}
+
+/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the remote stream, with unbounded buffering.
+///
+/// The `stream` will be canceled if the `SpawnHandle` is dropped.
+///
+/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
+/// When `stream` has additional items available, then the `SpawnHandle`
+/// will have those same items available.
+///
+/// An unbounded buffer is used, which means that values will be buffered as
+/// fast as `stream` can produce them, without any backpressure. Therefore, if
+/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
+/// potentially hog CPU resources. In particular, if `stream` is infinite
+/// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it
+/// will result in an infinite loop.
+///
+/// # Panics
+///
+/// This function will panic if `executor` is unable spawn a `Future` containing
+/// the entirety of the `stream`.
+pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
+    where S: Stream,
+          E: Executor<Execute<S>>
+{
+    let (cancel_tx, cancel_rx) = oneshot::channel();
+    let (tx, rx) = channel_(None);
+    executor.execute(Execute {
+        inner: tx.send_all(resultstream::new(stream)),
+        cancel_rx: cancel_rx,
+    }).expect("failed to spawn stream");
+    SpawnHandle {
+        inner: rx,
+        _cancel_tx: cancel_tx,
+    }
+}
+
+impl<I, E> Stream for SpawnHandle<I, E> {
+    type Item = I;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<I>, E> {
+        match self.inner.poll() {
+            Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
+            Ok(Async::Ready(Some(Err(e)))) => Err(e),
+            Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            Err(_) => unreachable!("mpsc::Receiver should never return Err"),
+        }
+    }
+}
+
+impl<I, E> fmt::Debug for SpawnHandle<I, E> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SpawnHandle")
+            .finish()
+    }
+}
+
+impl<S: Stream> Future for Execute<S> {
+    type Item = ();
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<(), ()> {
+        match self.cancel_rx.poll() {
+            Ok(Async::NotReady) => (),
+            _ => return Ok(Async::Ready(())),
+        }
+        match self.inner.poll() {
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            _ => Ok(Async::Ready(()))
+        }
+    }
+}
+
+impl<S: Stream> fmt::Debug for Execute<S> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Execute")
+         .finish()
+    }
+}
--- a/third_party/rust/futures/src/unsync/oneshot.rs
+++ b/third_party/rust/futures/src/unsync/oneshot.rs
@@ -1,17 +1,19 @@
 //! A one-shot, futures-aware channel
 //!
 //! This channel is similar to that in `sync::oneshot` but cannot be sent across
 //! threads.
 
-use std::cell::RefCell;
+use std::cell::{Cell, RefCell};
+use std::fmt;
 use std::rc::{Rc, Weak};
 
 use {Future, Poll, Async};
+use future::{Executor, IntoFuture, Lazy, lazy};
 use task::{self, Task};
 
 /// Creates a new futures-aware, one-shot channel.
 ///
 /// This function is the same as `sync::oneshot::channel` except that the
 /// returned values cannot be sent across threads.
 pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
     let inner = Rc::new(RefCell::new(Inner {
@@ -52,19 +54,17 @@ pub struct Receiver<T> {
 }
 
 #[derive(Debug)]
 enum State<T> {
     Open(Rc<RefCell<Inner<T>>>),
     Closed(Option<T>),
 }
 
-/// Represents that the `Sender` dropped before sending a message.
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
-pub struct Canceled;
+pub use sync::oneshot::Canceled;
 
 #[derive(Debug)]
 struct Inner<T> {
     value: Option<T>,
     tx_task: Option<Task>,
     rx_task: Option<Task>,
 }
 
@@ -91,51 +91,68 @@ impl<T> Sender<T> {
     /// Polls this `Sender` half to detect whether the `Receiver` this has
     /// paired with has gone away.
     ///
     /// This function can be used to learn about when the `Receiver` (consumer)
     /// half has gone away and nothing will be able to receive a message sent
     /// from `complete`.
     ///
     /// Like `Future::poll`, this function will panic if it's not called from
-    /// within the context of a task. In otherwords, this should only ever be
+    /// within the context of a task. In other words, this should only ever be
     /// called from inside another future.
     ///
     /// If `Ready` is returned then it means that the `Receiver` has disappeared
     /// and the result this `Sender` would otherwise produce should no longer
     /// be produced.
     ///
     /// If `NotReady` is returned then the `Receiver` is still alive and may be
     /// able to receive a message if sent. The current task, however, is
     /// scheduled to receive a notification if the corresponding `Receiver` goes
     /// away.
     pub fn poll_cancel(&mut self) -> Poll<(), ()> {
         match self.inner.upgrade() {
             Some(inner) => {
-                inner.borrow_mut().tx_task = Some(task::park());
+                inner.borrow_mut().tx_task = Some(task::current());
                 Ok(Async::NotReady)
             }
             None => Ok(().into()),
         }
     }
+
+    /// Tests to see whether this `Sender`'s corresponding `Receiver`
+    /// has gone away.
+    ///
+    /// This function can be used to learn about when the `Receiver` (consumer)
+    /// half has gone away and nothing will be able to receive a message sent
+    /// from `send`.
+    ///
+    /// Note that this function is intended to *not* be used in the context of a
+    /// future. If you're implementing a future you probably want to call the
+    /// `poll_cancel` function which will block the current task if the
+    /// cancellation hasn't happened yet. This can be useful when working on a
+    /// non-futures related thread, though, which would otherwise panic if
+    /// `poll_cancel` were called.
+    pub fn is_canceled(&self) -> bool {
+        !self.inner.upgrade().is_some()
+    }
 }
 
 impl<T> Drop for Sender<T> {
     fn drop(&mut self) {
         let inner = match self.inner.upgrade() {
             Some(inner) => inner,
             None => return,
         };
         let rx_task = {
             let mut borrow = inner.borrow_mut();
             borrow.tx_task.take();
             borrow.rx_task.take()
         };
         if let Some(task) = rx_task {
-            task.unpark();
+            task.notify();
         }
     }
 }
 
 impl<T> Receiver<T> {
     /// Gracefully close this receiver, preventing sending any future messages.
     ///
     /// Any `send` operation which happens after this method returns is
@@ -148,17 +165,17 @@ impl<T> Receiver<T> {
                 let mut inner = inner.borrow_mut();
                 drop(inner.rx_task.take());
                 (inner.value.take(), inner.tx_task.take())
             }
             State::Closed(_) => return,
         };
         self.state = State::Closed(item);
         if let Some(task) = task {
-            task.unpark();
+            task.notify();
         }
     }
 }
 
 impl<T> Future for Receiver<T> {
     type Item = T;
     type Error = Canceled;
 
@@ -179,19 +196,156 @@ impl<T> Future for Receiver<T> {
         }
 
         // If we can get mutable access, then the sender has gone away. We
         // didn't see a value above, so we're canceled. Otherwise we park
         // our task and wait for a value to come in.
         if Rc::get_mut(inner).is_some() {
             Err(Canceled)
         } else {
-            inner.borrow_mut().rx_task = Some(task::park());
+            inner.borrow_mut().rx_task = Some(task::current());
             Ok(Async::NotReady)
         }
     }
 }
 
 impl<T> Drop for Receiver<T> {
     fn drop(&mut self) {
         self.close();
     }
 }
+
+/// Handle returned from the `spawn` function.
+///
+/// This handle is a future representing the completion of a different future on
+/// a separate executor. Created through the `oneshot::spawn` function this
+/// handle will resolve when the future provided to `spawn` resolves on the
+/// `Executor` instance provided to that function.
+///
+/// If this handle is dropped then the future will automatically no longer be
+/// polled and is scheduled to be dropped. This can be canceled with the
+/// `forget` function, however.
+pub struct SpawnHandle<T, E> {
+    rx: Receiver<Result<T, E>>,
+    keep_running: Rc<Cell<bool>>,
+}
+
+/// Type of future which `Spawn` instances below must be able to spawn.
+pub struct Execute<F: Future> {
+    future: F,
+    tx: Option<Sender<Result<F::Item, F::Error>>>,
+    keep_running: Rc<Cell<bool>>,
+}
+
+/// Spawns a `future` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the completion of the future.
+///
+/// The `SpawnHandle` returned is a future that is a proxy for `future` itself.
+/// When `future` completes on `executor` then the `SpawnHandle` will itself be
+/// resolved.  Internally `SpawnHandle` contains a `oneshot` channel and is
+/// thus not safe to send across threads.
+///
+/// The `future` will be canceled if the `SpawnHandle` is dropped. If this is
+/// not desired then the `SpawnHandle::forget` function can be used to continue
+/// running the future to completion.
+///
+/// # Panics
+///
+/// This function will panic if the instance of `Spawn` provided is unable to
+/// spawn the `future` provided.
+///
+/// If the provided instance of `Spawn` does not actually run `future` to
+/// completion, then the returned handle may panic when polled. Typically this
+/// is not a problem, though, as most instances of `Spawn` will run futures to
+/// completion.
+pub fn spawn<F, E>(future: F, executor: &E) -> SpawnHandle<F::Item, F::Error>
+    where F: Future,
+          E: Executor<Execute<F>>,
+{
+    let flag = Rc::new(Cell::new(false));
+    let (tx, rx) = channel();
+    executor.execute(Execute {
+        future: future,
+        tx: Some(tx),
+        keep_running: flag.clone(),
+    }).expect("failed to spawn future");
+    SpawnHandle {
+        rx: rx,
+        keep_running: flag,
+    }
+}
+
+/// Spawns a function `f` onto the `Spawn` instance provided `s`.
+///
+/// For more information see the `spawn` function in this module. This function
+/// is just a thin wrapper around `spawn` which will execute the closure on the
+/// executor provided and then complete the future that the closure returns.
+pub fn spawn_fn<F, R, E>(f: F, executor: &E) -> SpawnHandle<R::Item, R::Error>
+    where F: FnOnce() -> R,
+          R: IntoFuture,
+          E: Executor<Execute<Lazy<F, R>>>,
+{
+    spawn(lazy(f), executor)
+}
+
+impl<T, E> SpawnHandle<T, E> {
+    /// Drop this future without canceling the underlying future.
+    ///
+    /// When `SpawnHandle` is dropped, the spawned future will be canceled as
+    /// well if the future hasn't already resolved. This function can be used
+    /// when to drop this future but keep executing the underlying future.
+    pub fn forget(self) {
+        self.keep_running.set(true);
+    }
+}
+
+impl<T, E> Future for SpawnHandle<T, E> {
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<T, E> {
+        match self.rx.poll() {
+            Ok(Async::Ready(Ok(t))) => Ok(t.into()),
+            Ok(Async::Ready(Err(e))) => Err(e),
+            Ok(Async::NotReady) => Ok(Async::NotReady),
+            Err(_) => panic!("future was canceled before completion"),
+        }
+    }
+}
+
+impl<T: fmt::Debug, E: fmt::Debug> fmt::Debug for SpawnHandle<T, E> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("SpawnHandle")
+         .finish()
+    }
+}
+
+impl<F: Future> Future for Execute<F> {
+    type Item = ();
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<(), ()> {
+        // If we're canceled then we may want to bail out early.
+        //
+        // If the `forget` function was called, though, then we keep going.
+        if self.tx.as_mut().unwrap().poll_cancel().unwrap().is_ready() {
+            if !self.keep_running.get() {
+                return Ok(().into())
+            }
+        }
+
+        let result = match self.future.poll() {
+            Ok(Async::NotReady) => return Ok(Async::NotReady),
+            Ok(Async::Ready(t)) => Ok(t),
+            Err(e) => Err(e),
+        };
+        drop(self.tx.take().unwrap().send(result));
+        Ok(().into())
+    }
+}
+
+impl<F: Future + fmt::Debug> fmt::Debug for Execute<F> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Execute")
+         .field("future", &self.future)
+         .finish()
+    }
+}
--- a/third_party/rust/futures/tests/all.rs
+++ b/third_party/rust/futures/tests/all.rs
@@ -22,20 +22,20 @@ fn result_smoke() {
     fn is_future_v<A, B, C>(_: C)
         where A: Send + 'static,
               B: Send + 'static,
               C: Future<Item=A, Error=B>
     {}
 
     is_future_v::<i32, u32, _>(f_ok(1).map(|a| a + 1));
     is_future_v::<i32, u32, _>(f_ok(1).map_err(|a| a + 1));
-    is_future_v::<i32, u32, _>(f_ok(1).and_then(|a| Ok(a)));
-    is_future_v::<i32, u32, _>(f_ok(1).or_else(|a| Err(a)));
+    is_future_v::<i32, u32, _>(f_ok(1).and_then(Ok));
+    is_future_v::<i32, u32, _>(f_ok(1).or_else(Err));
     is_future_v::<(i32, i32), u32, _>(f_ok(1).join(Err(3)));
-    is_future_v::<i32, u32, _>(f_ok(1).map(move |a| f_ok(a)).flatten());
+    is_future_v::<i32, u32, _>(f_ok(1).map(f_ok).flatten());
 
     assert_done(|| f_ok(1), r_ok(1));
     assert_done(|| f_err(1), r_err(1));
     assert_done(|| result(Ok(1)), r_ok(1));
     assert_done(|| result(Err(1)), r_err(1));
     assert_done(|| ok(1), r_ok(1));
     assert_done(|| err(1), r_err(1));
     assert_done(|| f_ok(1).map(|a| a + 2), r_ok(3));
@@ -122,17 +122,17 @@ fn smoke_oneshot() {
     assert_empty(|| {
         let (a, b) = oneshot::channel::<i32>();
         completes.push(a);
         b
     });
 
     let (c, p) = oneshot::channel::<i32>();
     drop(c);
-    let res = executor::spawn(p).poll_future(unpark_panic());
+    let res = executor::spawn(p).poll_future_notify(&notify_panic(), 0);
     assert!(res.is_err());
     let (c, p) = oneshot::channel::<i32>();
     drop(c);
     let (tx, rx) = channel();
     p.then(move |_| {
         tx.send(())
     }).forget();
     rx.recv().unwrap();
@@ -145,46 +145,46 @@ fn select_cancels() {
     let b = b.map(move |b| { btx.send(b).unwrap(); b });
     let d = d.map(move |d| { dtx.send(d).unwrap(); d });
 
     let f = b.select(d).then(unselect);
     // assert!(f.poll(&mut Task::new()).is_not_ready());
     assert!(brx.try_recv().is_err());
     assert!(drx.try_recv().is_err());
     a.send(1).unwrap();
-    let res = executor::spawn(f).poll_future(unpark_panic());
+    let res = executor::spawn(f).poll_future_notify(&notify_panic(), 0);
     assert!(res.ok().unwrap().is_ready());
     assert_eq!(brx.recv().unwrap(), 1);
     drop(c);
     assert!(drx.recv().is_err());
 
     let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
     let ((btx, _brx), (dtx, drx)) = (channel(), channel());
     let b = b.map(move |b| { btx.send(b).unwrap(); b });
     let d = d.map(move |d| { dtx.send(d).unwrap(); d });
 
     let mut f = executor::spawn(b.select(d).then(unselect));
-    assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
-    assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
+    assert!(f.poll_future_notify(&notify_noop(), 0).ok().unwrap().is_not_ready());
+    assert!(f.poll_future_notify(&notify_noop(), 0).ok().unwrap().is_not_ready());
     a.send(1).unwrap();
-    assert!(f.poll_future(unpark_panic()).ok().unwrap().is_ready());
+    assert!(f.poll_future_notify(&notify_panic(), 0).ok().unwrap().is_ready());
     drop((c, f));
     assert!(drx.recv().is_err());
 }
 
 #[test]
 fn join_cancels() {
     let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
     let ((btx, _brx), (dtx, drx)) = (channel(), channel());
     let b = b.map(move |b| { btx.send(b).unwrap(); b });
     let d = d.map(move |d| { dtx.send(d).unwrap(); d });
 
     let f = b.join(d);
     drop(a);
-    let res = executor::spawn(f).poll_future(unpark_panic());
+    let res = executor::spawn(f).poll_future_notify(&notify_panic(), 0);
     assert!(res.is_err());
     drop(c);
     assert!(drx.recv().is_err());
 
     let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
     let ((btx, _brx), (dtx, drx)) = (channel(), channel());
     let b = b.map(move |b| { btx.send(b).unwrap(); b });
     let d = d.map(move |d| { dtx.send(d).unwrap(); d });
@@ -203,47 +203,47 @@ fn join_cancels() {
     assert!(drx.recv().is_err());
 }
 
 #[test]
 fn join_incomplete() {
     let (a, b) = oneshot::channel::<i32>();
     let (tx, rx) = channel();
     let mut f = executor::spawn(ok(1).join(b).map(move |r| tx.send(r).unwrap()));
-    assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
+    assert!(f.poll_future_notify(&notify_noop(), 0).ok().unwrap().is_not_ready());
     assert!(rx.try_recv().is_err());
     a.send(2).unwrap();
-    assert!(f.poll_future(unpark_noop()).ok().unwrap().is_ready());
+    assert!(f.poll_future_notify(&notify_noop(), 0).ok().unwrap().is_ready());
     assert_eq!(rx.recv().unwrap(), (1, 2));
 
     let (a, b) = oneshot::channel::<i32>();
     let (tx, rx) = channel();
     let mut f = executor::spawn(b.join(Ok(2)).map(move |r| tx.send(r).unwrap()));
-    assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
+    assert!(f.poll_future_notify(&notify_noop(), 0).ok().unwrap().is_not_ready());
     assert!(rx.try_recv().is_err());
     a.send(1).unwrap();
-    assert!(f.poll_future(unpark_noop()).ok().unwrap().is_ready());
+    assert!(f.poll_future_notify(&notify_noop(), 0).ok().unwrap().is_ready());
     assert_eq!(rx.recv().unwrap(), (1, 2));
 
     let (a, b) = oneshot::channel::<i32>();
     let (tx, rx) = channel();
     let mut f = executor::spawn(ok(1).join(b).map_err(move |_r| tx.send(2).unwrap()));
-    assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
+    assert!(f.poll_future_notify(&notify_noop(), 0).ok().unwrap().is_not_ready());
     assert!(rx.try_recv().is_err());
     drop(a);
-    assert!(f.poll_future(unpark_noop()).is_err());
+    assert!(f.poll_future_notify(&notify_noop(), 0).is_err());
     assert_eq!(rx.recv().unwrap(), 2);
 
     let (a, b) = oneshot::channel::<i32>();
     let (tx, rx) = channel();
     let mut f = executor::spawn(b.join(Ok(2)).map_err(move |_r| tx.send(1).unwrap()));
-    assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
+    assert!(f.poll_future_notify(&notify_noop(), 0).ok().unwrap().is_not_ready());
     assert!(rx.try_recv().is_err());
     drop(a);
-    assert!(f.poll_future(unpark_noop()).is_err());
+    assert!(f.poll_future_notify(&notify_noop(), 0).is_err());
     assert_eq!(rx.recv().unwrap(), 1);
 }
 
 #[test]
 fn collect_collects() {
     assert_done(|| join_all(vec![f_ok(1), f_ok(2)]), Ok(vec![1, 2]));
     assert_done(|| join_all(vec![f_ok(1)]), Ok(vec![1]));
     assert_done(|| join_all(Vec::<Result<i32, u32>>::new()), Ok(vec![]));
@@ -318,17 +318,17 @@ fn select2() {
 
     // Cancel after a schedule
     {
         let ((_a, b), (_c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
         let ((btx, brx), (dtx, drx)) = (channel(), channel());
         let b = b.map(move |v| { btx.send(v).unwrap(); v });
         let d = d.map(move |v| { dtx.send(v).unwrap(); v });
         let f = b.select(d);
-        drop(executor::spawn(f).poll_future(support::unpark_noop()));
+        drop(executor::spawn(f).poll_future_notify(&support::notify_noop(), 0));
         assert!(drx.recv().is_err());
         assert!(brx.recv().is_err());
     }
 
     // Cancel propagates
     {
         let ((a, b), (_c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
         let ((btx, brx), (dtx, drx)) = (channel(), channel());
@@ -354,8 +354,22 @@ fn select2() {
     }
 }
 
 #[test]
 fn option() {
     assert_eq!(Ok(Some(())), Some(ok::<(), ()>(())).wait());
     assert_eq!(Ok(None), <Option<FutureResult<(), ()>> as Future>::wait(None));
 }
+
+#[test]
+fn spawn_does_unsize() {
+    #[derive(Clone, Copy)]
+    struct EmptyNotify;
+    impl executor::Notify for EmptyNotify {
+        fn notify(&self, _: usize) { panic!("Cannot notify"); }
+    }
+    static EMPTY: &'static EmptyNotify = &EmptyNotify;
+
+    let spawn: executor::Spawn<FutureResult<(), ()>> = executor::spawn(future::ok(()));
+    let mut spawn_box: Box<executor::Spawn<Future<Item = (), Error = ()>>> = Box::new(spawn);
+    spawn_box.poll_future_notify(&EMPTY, 0).unwrap();
+}
--- a/third_party/rust/futures/tests/bilock.rs
+++ b/third_party/rust/futures/tests/bilock.rs
@@ -1,64 +1,70 @@
 extern crate futures;
 
 use std::thread;
 
-use futures::{Async, Poll};
+use futures::prelude::*;
 use futures::executor;
-use futures::stream::{self, Stream};
-use futures::future::{self, Future};
+use futures::stream;
+use futures::future;
 use futures::sync::BiLock;
 
 mod support;
 use support::*;
 
 #[test]
 fn smoke() {
     let future = future::lazy(|| {
         let (a, b) = BiLock::new(1);
-        let mut lock = match a.poll_lock() {
-            Async::Ready(l) => l,
-            Async::NotReady => panic!("poll not ready"),
-        };
-        assert_eq!(*lock, 1);
-        *lock = 2;
 
-        assert!(b.poll_lock().is_not_ready());
-        assert!(a.poll_lock().is_not_ready());
-        drop(lock);
+        {
+            let mut lock = match a.poll_lock() {
+                Async::Ready(l) => l,
+                Async::NotReady => panic!("poll not ready"),
+            };
+            assert_eq!(*lock, 1);
+            *lock = 2;
+
+            assert!(b.poll_lock().is_not_ready());
+            assert!(a.poll_lock().is_not_ready());
+        }
 
         assert!(b.poll_lock().is_ready());
         assert!(a.poll_lock().is_ready());
 
-        let lock = match b.poll_lock() {
-            Async::Ready(l) => l,
-            Async::NotReady => panic!("poll not ready"),
-        };
-        assert_eq!(*lock, 2);
+        {
+            let lock = match b.poll_lock() {
+                Async::Ready(l) => l,
+                Async::NotReady => panic!("poll not ready"),
+            };
+            assert_eq!(*lock, 2);
+        }
+
+        assert_eq!(a.reunite(b).expect("bilock/smoke: reunite error"), 2);
 
         Ok::<(), ()>(())
     });
 
     assert!(executor::spawn(future)
-                .poll_future(unpark_noop())
+                .poll_future_notify(&notify_noop(), 0)
                 .expect("failure in poll")
                 .is_ready());
 }
 
 #[test]
 fn concurrent() {
     const N: usize = 10000;
     let (a, b) = BiLock::new(0);
 
     let a = Increment {
         a: Some(a),
         remaining: N,
     };
-    let b = stream::iter((0..N).map(Ok::<_, ()>)).fold(b, |b, _n| {
+    let b = stream::iter_ok::<_, ()>((0..N)).fold(b, |b, _n| {
         b.lock().map(|mut b| {
             *b += 1;
             b.unlock()
         })
     });
 
     let t1 = thread::spawn(move || a.wait());
     let b = b.wait().expect("b error");
@@ -68,16 +74,18 @@ fn concurrent() {
         Async::Ready(l) => assert_eq!(*l, 2 * N),
         Async::NotReady => panic!("poll not ready"),
     }
     match b.poll_lock() {
         Async::Ready(l) => assert_eq!(*l, 2 * N),
         Async::NotReady => panic!("poll not ready"),
     }
 
+    assert_eq!(a.reunite(b).expect("bilock/concurrent: reunite error"), 2 * N);
+
     struct Increment {
         remaining: usize,
         a: Option<BiLock<usize>>,
     }
 
     impl Future for Increment {
         type Item = BiLock<usize>;
         type Error = ();
--- a/third_party/rust/futures/tests/buffer_unordered.rs
+++ b/third_party/rust/futures/tests/buffer_unordered.rs
@@ -1,14 +1,14 @@
 extern crate futures;
 
 use std::sync::mpsc as std_mpsc;
 use std::thread;
 
-use futures::{Future, Stream, Sink};
+use futures::prelude::*;
 use futures::sync::oneshot;
 use futures::sync::mpsc;
 
 #[test]
 fn works() {
     const N: usize = 4;
 
     let (mut tx, rx) = mpsc::channel(1);
--- a/third_party/rust/futures/tests/channel.rs
+++ b/third_party/rust/futures/tests/channel.rs
@@ -1,13 +1,13 @@
 extern crate futures;
 
 use std::sync::atomic::*;
 
-use futures::{Future, Stream, Sink};
+use futures::prelude::*;
 use futures::future::result;
 use futures::sync::mpsc;
 
 mod support;
 use support::*;
 
 #[test]
 fn sequence() {
@@ -22,21 +22,21 @@ fn sequence() {
     for i in (1..amt + 1).rev() {
         assert_eq!(rx.next(), Some(Ok(i)));
     }
     assert_eq!(rx.next(), None);
 
     fn send(n: u32, sender: mpsc::Sender<u32>)
             -> Box<Future<Item=(), Error=()> + Send> {
         if n == 0 {
-            return result(Ok(())).boxed()
+            return Box::new(result(Ok(())))
         }
-        sender.send(n).map_err(|_| ()).and_then(move |sender| {
+        Box::new(sender.send(n).map_err(|_| ()).and_then(move |sender| {
             send(n - 1, sender)
-        }).boxed()
+        }))
     }
 }
 
 #[test]
 fn drop_sender() {
     let (tx, mut rx) = mpsc::channel::<u32>(1);
     drop(tx);
     sassert_done(&mut rx);
--- a/third_party/rust/futures/tests/eager_drop.rs
+++ b/third_party/rust/futures/tests/eager_drop.rs
@@ -1,15 +1,15 @@
 extern crate futures;
 
 use std::sync::mpsc::channel;
 
-use futures::Poll;
-use futures::future::*;
+use futures::prelude::*;
 use futures::sync::oneshot;
+use futures::future::{err, ok};
 
 mod support;
 use support::*;
 
 #[test]
 fn map() {
     // Whatever runs after a `map` should have dropped the closure by that
     // point.
--- a/third_party/rust/futures/tests/eventual.rs
+++ b/third_party/rust/futures/tests/eventual.rs
@@ -1,17 +1,17 @@
 extern crate futures;
 
 mod support;
 use support::*;
 
 use std::sync::mpsc;
 use std::thread;
 
-use futures::Future;
+use futures::prelude::*;
 use futures::future::{ok, err};
 use futures::sync::oneshot;
 
 #[test]
 fn and_then1() {
     let (tx, rx) = mpsc::channel();
 
     let tx2 = tx.clone();
--- a/third_party/rust/futures/tests/fuse.rs
+++ b/third_party/rust/futures/tests/fuse.rs
@@ -1,14 +1,15 @@
 extern crate futures;
 
-use futures::future::{ok, Future};
+use futures::prelude::*;
+use futures::future::ok;
 use futures::executor;
 
 mod support;
 use support::*;
 
 #[test]
 fn fuse() {
     let mut future = executor::spawn(ok::<i32, u32>(2).fuse());
-    assert!(future.poll_future(unpark_panic()).unwrap().is_ready());
-    assert!(future.poll_future(unpark_panic()).unwrap().is_not_ready());
+    assert!(future.poll_future_notify(&notify_panic(), 0).unwrap().is_ready());
+    assert!(future.poll_future_notify(&notify_panic(), 0).unwrap().is_not_ready());
 }
--- a/third_party/rust/futures/tests/future_flatten_stream.rs
+++ b/third_party/rust/futures/tests/future_flatten_stream.rs
@@ -1,27 +1,26 @@
 extern crate core;
 extern crate futures;
 
 use core::marker;
 
-use futures::{Stream, Future, Poll};
+use futures::prelude::*;
 use futures::future::{ok, err};
 use futures::stream;
 
 #[test]
 fn successful_future() {
-    let stream_items = vec![Ok(17), Err(true), Ok(19)];
-    let future_of_a_stream = ok::<_, bool>(stream::iter(stream_items));
+    let stream_items = vec![17, 19];
+    let future_of_a_stream = ok::<_, bool>(stream::iter_ok(stream_items));
 
     let stream = future_of_a_stream.flatten_stream();
 
     let mut iter = stream.wait();
     assert_eq!(Ok(17), iter.next().unwrap());
-    assert_eq!(Err(true), iter.next().unwrap());
     assert_eq!(Ok(19), iter.next().unwrap());
     assert_eq!(None, iter.next());
 }
 
 struct PanickingStream<T, E> {
     _marker: marker::PhantomData<(T, E)>
 }
 
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/tests/futures_ordered.rs
@@ -0,0 +1,86 @@
+extern crate futures;
+
+use std::any::Any;
+
+use futures::sync::oneshot;
+use futures::stream::futures_ordered;
+use futures::prelude::*;
+
+mod support;
+
+#[test]
+fn works_1() {
+    let (a_tx, a_rx) = oneshot::channel::<u32>();
+    let (b_tx, b_rx) = oneshot::channel::<u32>();
+    let (c_tx, c_rx) = oneshot::channel::<u32>();
+
+    let stream = futures_ordered(vec![a_rx, b_rx, c_rx]);
+
+    let mut spawn = futures::executor::spawn(stream);
+    b_tx.send(99).unwrap();
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
+
+    a_tx.send(33).unwrap();
+    c_tx.send(33).unwrap();
+    assert_eq!(Some(Ok(33)), spawn.wait_stream());
+    assert_eq!(Some(Ok(99)), spawn.wait_stream());
+    assert_eq!(Some(Ok(33)), spawn.wait_stream());
+    assert_eq!(None, spawn.wait_stream());
+}
+
+#[test]
+fn works_2() {
+    let (a_tx, a_rx) = oneshot::channel::<u32>();
+    let (b_tx, b_rx) = oneshot::channel::<u32>();
+    let (c_tx, c_rx) = oneshot::channel::<u32>();
+
+    let stream = futures_ordered(vec![
+        Box::new(a_rx) as Box<Future<Item = _, Error = _>>,
+        Box::new(b_rx.join(c_rx).map(|(a, b)| a + b)),
+    ]);
+
+    let mut spawn = futures::executor::spawn(stream);
+    a_tx.send(33).unwrap();
+    b_tx.send(33).unwrap();
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready());
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
+    c_tx.send(33).unwrap();
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready());
+}
+
+#[test]
+fn from_iterator() {
+    use futures::future::ok;
+    use futures::stream::FuturesOrdered;
+
+    let stream = vec![
+        ok::<u32, ()>(1),
+        ok::<u32, ()>(2),
+        ok::<u32, ()>(3)
+    ].into_iter().collect::<FuturesOrdered<_>>();
+    assert_eq!(stream.len(), 3);
+    assert_eq!(stream.collect().wait(), Ok(vec![1,2,3]));
+}
+
+#[test]
+fn queue_never_unblocked() {
+    let (_a_tx, a_rx) = oneshot::channel::<Box<Any+Send>>();
+    let (b_tx, b_rx) = oneshot::channel::<Box<Any+Send>>();
+    let (c_tx, c_rx) = oneshot::channel::<Box<Any+Send>>();
+
+    let stream = futures_ordered(vec![
+        Box::new(a_rx) as Box<Future<Item = _, Error = _>>,
+        Box::new(b_rx.select(c_rx).then(|res| Ok(Box::new(res) as Box<Any+Send>))),
+    ]);
+
+    let mut spawn = futures::executor::spawn(stream);
+    for _ in 0..10 {
+        assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
+    }
+
+    b_tx.send(Box::new(())).unwrap();
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
+    c_tx.send(Box::new(())).unwrap();
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
+}
--- a/third_party/rust/futures/tests/futures_unordered.rs
+++ b/third_party/rust/futures/tests/futures_unordered.rs
@@ -1,15 +1,15 @@
 extern crate futures;
 
 use std::any::Any;
 
 use futures::sync::oneshot;
 use futures::stream::futures_unordered;
-use futures::Future;
+use futures::prelude::*;
 
 mod support;
 
 #[test]
 fn works_1() {
     let (a_tx, a_rx) = oneshot::channel::<u32>();
     let (b_tx, b_rx) = oneshot::channel::<u32>();
     let (c_tx, c_rx) = oneshot::channel::<u32>();
@@ -28,41 +28,100 @@ fn works_1() {
 }
 
 #[test]
 fn works_2() {
     let (a_tx, a_rx) = oneshot::channel::<u32>();
     let (b_tx, b_rx) = oneshot::channel::<u32>();
     let (c_tx, c_rx) = oneshot::channel::<u32>();
 
-    let stream = futures_unordered(vec![a_rx.boxed(), b_rx.join(c_rx).map(|(a, b)| a + b).boxed()]);
+    let stream = futures_unordered(vec![
+        Box::new(a_rx) as Box<Future<Item = _, Error = _>>,
+        Box::new(b_rx.join(c_rx).map(|(a, b)| a + b)),
+    ]);
 
     let mut spawn = futures::executor::spawn(stream);
     a_tx.send(33).unwrap();
     b_tx.send(33).unwrap();
-    assert!(spawn.poll_stream(support::unpark_noop()).unwrap().is_ready());
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready());
     c_tx.send(33).unwrap();
-    assert!(spawn.poll_stream(support::unpark_noop()).unwrap().is_ready());
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_ready());
+}
+
+#[test]
+fn from_iterator() {
+    use futures::future::ok;
+    use futures::stream::FuturesUnordered;
+
+    let stream = vec![
+        ok::<u32, ()>(1),
+        ok::<u32, ()>(2),
+        ok::<u32, ()>(3)
+    ].into_iter().collect::<FuturesUnordered<_>>();
+    assert_eq!(stream.len(), 3);
+    assert_eq!(stream.collect().wait(), Ok(vec![1,2,3]));
 }
 
 #[test]
 fn finished_future_ok() {
     let (_a_tx, a_rx) = oneshot::channel::<Box<Any+Send>>();
     let (b_tx, b_rx) = oneshot::channel::<Box<Any+Send>>();
     let (c_tx, c_rx) = oneshot::channel::<Box<Any+Send>>();
 
     let stream = futures_unordered(vec![
-        a_rx.boxed(),
-        b_rx.select(c_rx).then(|res| Ok(Box::new(res) as Box<Any+Send>)).boxed(),
+        Box::new(a_rx) as Box<Future<Item = _, Error = _>>,
+        Box::new(b_rx.select(c_rx).then(|res| Ok(Box::new(res) as Box<Any+Send>))),
     ]);
 
     let mut spawn = futures::executor::spawn(stream);
     for _ in 0..10 {
-        assert!(spawn.poll_stream(support::unpark_noop()).unwrap().is_not_ready());
+        assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
     }
 
     b_tx.send(Box::new(())).unwrap();
-    let next = spawn.poll_stream(support::unpark_noop()).unwrap();
+    let next = spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap();
     assert!(next.is_ready());
     c_tx.send(Box::new(())).unwrap();
-    assert!(spawn.poll_stream(support::unpark_noop()).unwrap().is_not_ready());
-    assert!(spawn.poll_stream(support::unpark_noop()).unwrap().is_not_ready());
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
+    assert!(spawn.poll_stream_notify(&support::notify_noop(), 0).unwrap().is_not_ready());
 }
+
+#[test]
+fn iter_mut_cancel() {
+    let (a_tx, a_rx) = oneshot::channel::<u32>();
+    let (b_tx, b_rx) = oneshot::channel::<u32>();
+    let (c_tx, c_rx) = oneshot::channel::<u32>();
+
+    let mut stream = futures_unordered(vec![a_rx, b_rx, c_rx]);
+
+    for rx in stream.iter_mut() {
+        rx.close();
+    }
+
+    assert!(a_tx.is_canceled());
+    assert!(b_tx.is_canceled());
+    assert!(c_tx.is_canceled());
+
+    let mut spawn = futures::executor::spawn(stream);
+    assert_eq!(Some(Err(futures::sync::oneshot::Canceled)), spawn.wait_stream());
+    assert_eq!(Some(Err(futures::sync::oneshot::Canceled)), spawn.wait_stream());
+    assert_eq!(Some(Err(futures::sync::oneshot::Canceled)), spawn.wait_stream());
+    assert_eq!(None, spawn.wait_stream());
+}
+
+#[test]
+fn iter_mut_len() {
+    let mut stream = futures_unordered(vec![
+        futures::future::empty::<(),()>(),
+        futures::future::empty::<(),()>(),
+        futures::future::empty::<(),()>()
+    ]);
+
+    let mut iter_mut = stream.iter_mut();
+    assert_eq!(iter_mut.len(), 3);
+    assert!(iter_mut.next().is_some());
+    assert_eq!(iter_mut.len(), 2);
+    assert!(iter_mut.next().is_some());
+    assert_eq!(iter_mut.len(), 1);
+    assert!(iter_mut.next().is_some());
+    assert_eq!(iter_mut.len(), 0);
+    assert!(iter_mut.next().is_none());
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/tests/inspect.rs
@@ -0,0 +1,23 @@
+extern crate futures;
+
+use futures::prelude::*;
+use futures::future::{ok, err};
+
+#[test]
+fn smoke() {
+    let mut counter = 0;
+
+    {
+        let work = ok::<u32, u32>(40).inspect(|val| { counter += *val; });
+        assert_eq!(work.wait(), Ok(40));
+    }
+
+    assert_eq!(counter, 40);
+
+    {
+        let work = err::<u32, u32>(4).inspect(|val| { counter += *val; });
+        assert_eq!(work.wait(), Err(4));
+    }
+
+    assert_eq!(counter, 40);
+}
--- a/third_party/rust/futures/tests/mpsc-close.rs
+++ b/third_party/rust/futures/tests/mpsc-close.rs
@@ -1,24 +1,21 @@
 extern crate futures;
 
 use std::thread;
 
-use futures::{Sink, Stream, Future};
+use futures::prelude::*;
 use futures::sync::mpsc::*;
 
 #[test]
 fn smoke() {
     let (mut sender, receiver) = channel(1);
 
     let t = thread::spawn(move ||{
-        loop {
-            match sender.send(42).wait() {
-                Ok(s) => sender = s,
-                Err(_) => break,
-            }
+        while let Ok(s) = sender.send(42).wait() {
+            sender = s;
         }
     });
 
     receiver.take(3).for_each(|_| Ok(())).wait().unwrap();
 
     t.join().unwrap()
 }
--- a/third_party/rust/futures/tests/mpsc.rs
+++ b/third_party/rust/futures/tests/mpsc.rs
@@ -1,28 +1,30 @@
 #![cfg(feature = "use_std")]
 
+#[macro_use]
 extern crate futures;
 
-use futures::{Future, Stream, Sink, Async, AsyncSink};
-use futures::future::lazy;
+use futures::prelude::*;
+use futures::future::{lazy, ok};
+use futures::stream::unfold;
 use futures::sync::mpsc;
+use futures::sync::oneshot;
 
-use std::time::Duration;
 use std::thread;
 use std::sync::{Arc, Mutex};
 use std::sync::atomic::{AtomicUsize, Ordering};
 
-fn is_send<T: Send>() {}
+mod support;
+use support::*;
+
 
-#[test]
-fn bounds() {
-    is_send::<mpsc::Sender<i32>>();
-    is_send::<mpsc::Receiver<i32>>();
-}
+trait AssertSend: Send {}
+impl AssertSend for mpsc::Sender<i32> {}
+impl AssertSend for mpsc::Receiver<i32> {}
 
 #[test]
 fn send_recv() {
     let (tx, rx) = mpsc::channel::<i32>(16);
     let mut rx = rx.wait();
 
     tx.send(1).wait().unwrap();
 
@@ -31,34 +33,38 @@ fn send_recv() {
 
 #[test]
 fn send_recv_no_buffer() {
     let (mut tx, mut rx) = mpsc::channel::<i32>(0);
 
     // Run on a task context
     lazy(move || {
         assert!(tx.poll_complete().unwrap().is_ready());
+        assert!(tx.poll_ready().unwrap().is_ready());
 
         // Send first message
-
         let res = tx.start_send(1).unwrap();
         assert!(is_ready(&res));
+        assert!(tx.poll_ready().unwrap().is_not_ready());
 
         // Send second message
         let res = tx.start_send(2).unwrap();
         assert!(!is_ready(&res));
 
         // Take the value
         assert_eq!(rx.poll().unwrap(), Async::Ready(Some(1)));
+        assert!(tx.poll_ready().unwrap().is_ready());
 
         let res = tx.start_send(2).unwrap();
         assert!(is_ready(&res));
+        assert!(tx.poll_ready().unwrap().is_not_ready());
 
         // Take the value
         assert_eq!(rx.poll().unwrap(), Async::Ready(Some(2)));
+        assert!(tx.poll_ready().unwrap().is_ready());
 
         Ok::<(), ()>(())
     }).wait().unwrap();
 }
 
 #[test]
 fn send_shared_recv() {
     let (tx1, rx) = mpsc::channel::<i32>(16);
@@ -81,42 +87,45 @@ fn send_recv_threads() {
         tx.send(1).wait().unwrap();
     });
 
     assert_eq!(rx.next().unwrap(), Ok(1));
 }
 
 #[test]
 fn send_recv_threads_no_capacity() {
-    let (mut tx, rx) = mpsc::channel::<i32>(0);
+    let (tx, rx) = mpsc::channel::<i32>(0);
     let mut rx = rx.wait();
 
+    let (readytx, readyrx) = mpsc::channel::<()>(2);
+    let mut readyrx = readyrx.wait();
     let t = thread::spawn(move|| {
-        tx = tx.send(1).wait().unwrap();
-        tx = tx.send(2).wait().unwrap();
+        let readytx = readytx.sink_map_err(|_| panic!());
+        let (a, b) = tx.send(1).join(readytx.send(())).wait().unwrap();
+        a.send(2).join(b.send(())).wait().unwrap();
     });
 
-    thread::sleep(Duration::from_millis(100));
+    drop(readyrx.next().unwrap());
     assert_eq!(rx.next().unwrap(), Ok(1));
-
-    thread::sleep(Duration::from_millis(100));
+    drop(readyrx.next().unwrap());
     assert_eq!(rx.next().unwrap(), Ok(2));
 
     t.join().unwrap();
 }
 
 #[test]
 fn recv_close_gets_none() {
-    let (tx, mut rx) = mpsc::channel::<i32>(10);
+    let (mut tx, mut rx) = mpsc::channel::<i32>(10);
 
     // Run on a task context
     lazy(move || {
         rx.close();
 
         assert_eq!(rx.poll(), Ok(Async::Ready(None)));
+        assert!(tx.poll_ready().is_err());
 
         drop(tx);
 
         Ok::<(), ()>(())
     }).wait().unwrap();
 }
 
 
@@ -129,16 +138,83 @@ fn tx_close_gets_none() {
         assert_eq!(rx.poll(), Ok(Async::Ready(None)));
         assert_eq!(rx.poll(), Ok(Async::Ready(None)));
 
         Ok::<(), ()>(())
     }).wait().unwrap();
 }
 
 #[test]
+fn spawn_sends_items() {
+    let core = local_executor::Core::new();
+    let stream = unfold(0, |i| Some(ok::<_,u8>((i, i + 1))));
+    let rx = mpsc::spawn(stream, &core, 1);
+    assert_eq!(core.run(rx.take(4).collect()).unwrap(),
+               [0, 1, 2, 3]);
+}
+
+#[test]
+fn spawn_kill_dead_stream() {
+    use std::thread;
+    use std::time::Duration;
+    use futures::future::Either;
+    use futures::sync::oneshot;
+
+    // a stream which never returns anything (maybe a remote end isn't
+    // responding), but dropping it leads to observable side effects
+    // (like closing connections, releasing limited resources, ...)
+    #[derive(Debug)]
+    struct Dead {
+        // when dropped you should get Err(oneshot::Canceled) on the
+        // receiving end
+        done: oneshot::Sender<()>,
+    }
+    impl Stream for Dead {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+            Ok(Async::NotReady)
+        }
+    }
+
+    // need to implement a timeout for the test, as it would hang
+    // forever right now
+    let (timeout_tx, timeout_rx) = oneshot::channel();
+    thread::spawn(move || {
+        thread::sleep(Duration::from_millis(1000));
+        let _ = timeout_tx.send(());
+    });
+
+    let core = local_executor::Core::new();
+    let (done_tx, done_rx) = oneshot::channel();
+    let stream = Dead{done: done_tx};
+    let rx = mpsc::spawn(stream, &core, 1);
+    let res = core.run(
+        Ok::<_, ()>(())
+        .into_future()
+        .then(move |_| {
+            // now drop the spawned stream: maybe some timeout exceeded,
+            // or some connection on this end was closed by the remote
+            // end.
+            drop(rx);
+            // and wait for the spawned stream to release its resources
+            done_rx
+        })
+        .select2(timeout_rx)
+    );
+    match res {
+        Err(Either::A((oneshot::Canceled, _))) => (),
+        _ => {
+            panic!("dead stream wasn't canceled");
+        },
+    }
+}
+
+#[test]
 fn stress_shared_unbounded() {
     const AMT: u32 = 10000;
     const NTHREADS: u32 = 8;
     let (tx, rx) = mpsc::unbounded::<i32>();
     let mut rx = rx.wait();
 
     let t = thread::spawn(move|| {
         for _ in 0..AMT * NTHREADS {
@@ -146,21 +222,21 @@ fn stress_shared_unbounded() {
         }
 
         if rx.next().is_some() {
             panic!();
         }
     });
 
     for _ in 0..NTHREADS {
-        let mut tx = tx.clone();
+        let tx = tx.clone();
 
         thread::spawn(move|| {
             for _ in 0..AMT {
-                mpsc::UnboundedSender::send(&mut tx, 1).unwrap();
+                tx.unbounded_send(1).unwrap();
             }
         });
     }
 
     drop(tx);
 
     t.join().ok().unwrap();
 }
@@ -273,14 +349,202 @@ fn stress_receiver_multi_task_bounded_ha
 
     for t in th {
         t.join().unwrap();
     }
 
     assert_eq!(AMT, n.load(Ordering::Relaxed));
 }
 
+/// Stress test that receiver properly receives all the messages
+/// after sender dropped.
+#[test]
+fn stress_drop_sender() {
+    fn list() -> Box<Stream<Item=i32, Error=u32>> {
+        let (tx, rx) = mpsc::channel(1);
+        tx.send(Ok(1))
+          .and_then(|tx| tx.send(Ok(2)))
+          .and_then(|tx| tx.send(Ok(3)))
+          .forget();
+        Box::new(rx.then(|r| r.unwrap()))
+    }
+
+    for _ in 0..10000 {
+        assert_eq!(list().wait().collect::<Result<Vec<_>, _>>(),
+        Ok(vec![1, 2, 3]));
+    }
+}
+
+/// Stress test that after receiver dropped,
+/// no messages are lost.
+fn stress_close_receiver_iter() {
+    let (tx, rx) = mpsc::unbounded();
+    let (unwritten_tx, unwritten_rx) = std::sync::mpsc::channel();
+    let th = thread::spawn(move || {
+        for i in 1.. {
+            if let Err(_) = tx.unbounded_send(i) {
+                unwritten_tx.send(i).expect("unwritten_tx");
+                return;
+            }
+        }
+    });
+
+    let mut rx = rx.wait();
+
+    // Read one message to make sure thread effectively started
+    assert_eq!(Some(Ok(1)), rx.next());
+
+    rx.get_mut().close();
+
+    for i in 2.. {
+        match rx.next() {
+            Some(Ok(r)) => assert!(i == r),
+            Some(Err(_)) => unreachable!(),
+            None => {
+                let unwritten = unwritten_rx.recv().expect("unwritten_rx");
+                assert_eq!(unwritten, i);
+                th.join().unwrap();
+                return;
+            }
+        }
+    }
+}
+
+#[test]
+fn stress_close_receiver() {
+    for _ in 0..10000 {
+        stress_close_receiver_iter();
+    }
+}
+
+/// Tests that after `poll_ready` indicates capacity a channel can always send without waiting.
+#[test]
+fn stress_poll_ready() {
+    // A task which checks channel capacity using poll_ready, and pushes items onto the channel when
+    // ready.
+    struct SenderTask {
+        sender: mpsc::Sender<u32>,
+        count: u32,
+    }
+    impl Future for SenderTask {
+        type Item = ();
+        type Error = ();
+        fn poll(&mut self) -> Poll<(), ()> {
+            // In a loop, check if the channel is ready. If so, push an item onto the channel
+            // (asserting that it doesn't attempt to block).
+            while self.count > 0 {
+                try_ready!(self.sender.poll_ready().map_err(|_| ()));
+                assert!(self.sender.start_send(self.count).unwrap().is_ready());
+                self.count -= 1;
+            }
+            Ok(Async::Ready(()))
+        }
+    }
+
+    const AMT: u32 = 1000;
+    const NTHREADS: u32 = 8;
+
+    /// Run a stress test using the specified channel capacity.
+    fn stress(capacity: usize) {
+        let (tx, rx) = mpsc::channel(capacity);
+        let mut threads = Vec::new();
+        for _ in 0..NTHREADS {
+            let sender = tx.clone();
+            threads.push(thread::spawn(move || {
+                SenderTask {
+                    sender: sender,
+                    count: AMT,
+                }.wait()
+            }));
+        }
+        drop(tx);
+
+        let mut rx = rx.wait();
+        for _ in 0..AMT * NTHREADS {
+            assert!(rx.next().is_some());
+        }
+
+        assert!(rx.next().is_none());
+
+        for thread in threads {
+            thread.join().unwrap().unwrap();
+        }
+    }
+
+    stress(0);
+    stress(1);
+    stress(8);
+    stress(16);
+}
+
 fn is_ready<T>(res: &AsyncSink<T>) -> bool {
     match *res {
         AsyncSink::Ready => true,
         _ => false,
     }
 }
+
+#[test]
+fn try_send_1() {
+    const N: usize = 3000;
+    let (mut tx, rx) = mpsc::channel(0);
+
+    let t = thread::spawn(move || {
+        for i in 0..N {
+            loop {
+                if tx.try_send(i).is_ok() {
+                    break
+                }
+            }
+        }
+    });
+    for (i, j) in rx.wait().enumerate() {
+        assert_eq!(i, j.unwrap());
+    }
+    t.join().unwrap();
+}
+
+#[test]
+fn try_send_2() {
+    let (mut tx, rx) = mpsc::channel(0);
+
+    tx.try_send("hello").unwrap();
+
+    let (readytx, readyrx) = oneshot::channel::<()>();
+
+    let th = thread::spawn(|| {
+        lazy(|| {
+            assert!(tx.start_send("fail").unwrap().is_not_ready());
+            Ok::<_, ()>(())
+        }).wait().unwrap();
+
+        drop(readytx);
+        tx.send("goodbye").wait().unwrap();
+    });
+
+    let mut rx = rx.wait();
+
+    drop(readyrx.wait());
+    assert_eq!(rx.next(), Some(Ok("hello")));
+    assert_eq!(rx.next(), Some(Ok("goodbye")));
+    assert!(rx.next().is_none());
+
+    th.join().unwrap();
+}
+
+#[test]
+fn try_send_fail() {
+    let (mut tx, rx) = mpsc::channel(0);
+    let mut rx = rx.wait();
+
+    tx.try_send("hello").unwrap();
+
+    // This should fail
+    assert!(tx.try_send("fail").is_err());
+
+    assert_eq!(rx.next(), Some(Ok("hello")));
+
+    tx.try_send("goodbye").unwrap();
+    drop(tx);
+
+    assert_eq!(rx.next(), Some(Ok("goodbye")));
+    assert!(rx.next().is_none());
+}
--- a/third_party/rust/futures/tests/oneshot.rs
+++ b/third_party/rust/futures/tests/oneshot.rs
@@ -1,14 +1,14 @@
 extern crate futures;
 
 use std::sync::mpsc;
 use std::thread;
 
-use futures::{Future, Poll};
+use futures::prelude::*;
 use futures::future::{lazy, ok};
 use futures::sync::oneshot::*;
 
 mod support;
 use support::*;
 
 #[test]
 fn smoke_poll() {
@@ -16,17 +16,17 @@ fn smoke_poll() {
     let mut task = futures::executor::spawn(lazy(|| {
         assert!(tx.poll_cancel().unwrap().is_not_ready());
         assert!(tx.poll_cancel().unwrap().is_not_ready());
         drop(rx);
         assert!(tx.poll_cancel().unwrap().is_ready());
         assert!(tx.poll_cancel().unwrap().is_ready());
         ok::<(), ()>(())
     }));
-    assert!(task.poll_future(unpark_noop()).unwrap().is_ready());
+    assert!(task.poll_future_notify(&notify_noop(), 0).unwrap().is_ready());
 }
 
 #[test]
 fn cancel_notifies() {
     let (tx, rx) = channel::<u32>();
     let (tx2, rx2) = mpsc::channel();
 
     WaitForCancel { tx: tx }.then(move |v| tx2.send(v)).forget();
@@ -84,8 +84,170 @@ fn close_wakes() {
     let t = thread::spawn(move || {
         rx.close();
         rx2.recv().unwrap();
     });
     WaitForCancel { tx: tx }.wait().unwrap();
     tx2.send(()).unwrap();
     t.join().unwrap();
 }
+
+#[test]
+fn is_canceled() {
+    let (tx, rx) = channel::<u32>();
+    assert!(!tx.is_canceled());
+    drop(rx);
+    assert!(tx.is_canceled());
+}
+
+#[test]
+fn cancel_sends() {
+    let (tx, rx) = mpsc::channel::<Sender<_>>();
+    let t = thread::spawn(move || {
+        for otx in rx {
+            let _ = otx.send(42);
+        }
+    });
+
+    for _ in 0..20000 {
+        let (otx, mut orx) = channel::<u32>();
+        tx.send(otx).unwrap();
+
+        orx.close();
+        // Not necessary to wrap in a task because the implementation of oneshot
+        // never calls `task::current()` if the channel has been closed already.
+        let _ = orx.poll();
+    }
+
+    drop(tx);
+    t.join().unwrap();
+}
+
+#[test]
+fn spawn_sends_items() {
+    let core = local_executor::Core::new();
+    let future = ok::<_, ()>(1);
+    let rx = spawn(future, &core);
+    assert_eq!(core.run(rx).unwrap(), 1);
+}
+
+#[test]
+fn spawn_kill_dead_stream() {
+    use std::thread;
+    use std::time::Duration;
+    use futures::future::Either;
+    use futures::sync::oneshot;
+
+    // a future which never returns anything (forever accepting incoming
+    // connections), but dropping it leads to observable side effects
+    // (like closing listening sockets, releasing limited resources,
+    // ...)
+    #[derive(Debug)]
+    struct Dead {
+        // when dropped you should get Err(oneshot::Canceled) on the
+        // receiving end
+        done: oneshot::Sender<()>,
+    }
+    impl Future for Dead {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+            Ok(Async::NotReady)
+        }
+    }
+
+    // need to implement a timeout for the test, as it would hang
+    // forever right now
+    let (timeout_tx, timeout_rx) = oneshot::channel();
+    thread::spawn(move || {
+        thread::sleep(Duration::from_millis(1000));
+        let _ = timeout_tx.send(());
+    });
+
+    let core = local_executor::Core::new();
+    let (done_tx, done_rx) = oneshot::channel();
+    let future = Dead{done: done_tx};
+    let rx = spawn(future, &core);
+    let res = core.run(
+        Ok::<_, ()>(())
+        .into_future()
+        .then(move |_| {
+            // now drop the spawned future: maybe some timeout exceeded,
+            // or some connection on this end was closed by the remote
+            // end.
+            drop(rx);
+            // and wait for the spawned future to release its resources
+            done_rx
+        })
+        .select2(timeout_rx)
+    );
+    match res {
+        Err(Either::A((oneshot::Canceled, _))) => (),
+        Ok(Either::B(((), _))) => {
+            panic!("dead future wasn't canceled (timeout)");
+        },
+        _ => {
+            panic!("dead future wasn't canceled (unexpected result)");
+        },
+    }
+}
+
+#[test]
+fn spawn_dont_kill_forgot_dead_stream() {
+    use std::thread;
+    use std::time::Duration;
+    use futures::future::Either;
+    use futures::sync::oneshot;
+
+    // a future which never returns anything (forever accepting incoming
+    // connections), but dropping it leads to observable side effects
+    // (like closing listening sockets, releasing limited resources,
+    // ...)
+    #[derive(Debug)]
+    struct Dead {
+        // when dropped you should get Err(oneshot::Canceled) on the
+        // receiving end
+        done: oneshot::Sender<()>,
+    }
+    impl Future for Dead {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+            Ok(Async::NotReady)
+        }
+    }
+
+    // need to implement a timeout for the test, as it would hang
+    // forever right now
+    let (timeout_tx, timeout_rx) = oneshot::channel();
+    thread::spawn(move || {
+        thread::sleep(Duration::from_millis(1000));
+        let _ = timeout_tx.send(());
+    });
+
+    let core = local_executor::Core::new();
+    let (done_tx, done_rx) = oneshot::channel();
+    let future = Dead{done: done_tx};
+    let rx = spawn(future, &core);
+    let res = core.run(
+        Ok::<_, ()>(())
+        .into_future()
+        .then(move |_| {
+            // forget the spawned future: should keep running, i.e. hit
+            // the timeout below.
+            rx.forget();
+            // and wait for the spawned future to release its resources
+            done_rx
+        })
+        .select2(timeout_rx)
+    );
+    match res {
+        Err(Either::A((oneshot::Canceled, _))) => {
+            panic!("forgotten dead future was canceled");
+        },
+        Ok(Either::B(((), _))) => (), // reached timeout
+        _ => {
+            panic!("forgotten dead future was canceled (unexpected result)");
+        },
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures/tests/ready_queue.rs
@@ -0,0 +1,164 @@
+extern crate futures;
+
+use std::panic::{self, AssertUnwindSafe};
+
+use futures::prelude::*;
+use futures::Async::*;
+use futures::future;
+use futures::stream::FuturesUnordered;
+use futures::sync::oneshot;
+
+trait AssertSendSync: Send + Sync {}
+impl AssertSendSync for FuturesUnordered<()> {}
+
+#[test]
+fn basic_usage() {
+    future::lazy(move || {
+        let mut queue = FuturesUnordered::new();
+        let (tx1, rx1) = oneshot::channel();
+        let (tx2, rx2) = oneshot::channel();
+        let (tx3, rx3) = oneshot::channel();
+
+        queue.push(rx1);
+        queue.push(rx2);
+        queue.push(rx3);
+
+        assert!(!queue.poll().unwrap().is_ready());
+
+        tx2.send("hello").unwrap();
+
+        assert_eq!(Ready(Some("hello")), queue.poll().unwrap());
+        assert!(!queue.poll().unwrap().is_ready());
+
+        tx1.send("world").unwrap();
+        tx3.send("world2").unwrap();
+
+        assert_eq!(Ready(Some("world")), queue.poll().unwrap());
+        assert_eq!(Ready(Some("world2")), queue.poll().unwrap());
+        assert_eq!(Ready(None), queue.poll().unwrap());
+
+        Ok::<_, ()>(())
+    }).wait().unwrap();
+}
+
+#[test]
+fn resolving_errors() {
+    future::lazy(move || {
+        let mut queue = FuturesUnordered::new();
+        let (tx1, rx1) = oneshot::channel();
+        let (tx2, rx2) = oneshot::channel();
+        let (tx3, rx3) = oneshot::channel();
+
+        queue.push(rx1);
+        queue.push(rx2);
+        queue.push(rx3);
+
+        assert!(!queue.poll().unwrap().is_ready());
+
+        drop(tx2);
+
+        assert!(queue.poll().is_err());
+        assert!(!queue.poll().unwrap().is_ready());
+
+        drop(tx1);
+        tx3.send("world2").unwrap();
+
+        assert!(queue.poll().is_err());
+        assert_eq!(Ready(Some("world2")), queue.poll().unwrap());
+        assert_eq!(Ready(None), queue.poll().unwrap());
+
+        Ok::<_, ()>(())
+    }).wait().unwrap();
+}
+
+#[test]
+fn dropping_ready_queue() {
+    future::lazy(move || {
+        let mut queue = FuturesUnordered::new();
+        let (mut tx1, rx1) = oneshot::channel::<()>();
+        let (mut tx2, rx2) = oneshot::channel::<()>();
+        let (mut tx3, rx3) = oneshot::channel::<()>();
+
+        queue.push(rx1);
+        queue.push(rx2);
+        queue.push(rx3);
+
+        assert!(!tx1.poll_cancel().unwrap().is_ready());
+        assert!(!tx2.poll_cancel().unwrap().is_ready());
+        assert!(!tx3.poll_cancel().unwrap().is_ready());
+
+        drop(queue);
+
+        assert!(tx1.poll_cancel().unwrap().is_ready());
+        assert!(tx2.poll_cancel().unwrap().is_ready());
+        assert!(tx3.poll_cancel().unwrap().is_ready());
+
+        Ok::<_, ()>(())
+    }).wait().unwrap();
+}
+
+#[test]
+fn stress() {
+    const ITER: usize = 300;
+
+    use std::sync::{Arc, Barrier};
+    use std::thread;
+
+    for i in 0..ITER {
+        let n = (i % 10) + 1;
+
+        let mut queue = FuturesUnordered::new();
+
+        for _ in 0..5 {
+            let barrier = Arc::new(Barrier::new(n + 1));
+
+            for num in 0..n {
+                let barrier = barrier.clone();
+                let (tx, rx) = oneshot::channel();
+
+                queue.push(rx);
+
+                thread::spawn(move || {
+                    barrier.wait();
+                    tx.send(num).unwrap();
+                });
+            }
+
+            barrier.wait();
+
+            let mut sync = queue.wait();
+
+            let mut rx: Vec<_> = (&mut sync)
+                .take(n)
+                .map(|res| res.unwrap())
+                .collect();
+
+            assert_eq!(rx.len(), n);
+
+            rx.sort();
+
+            for num in 0..n {
+                assert_eq!(rx[num], num);
+            }
+
+            queue = sync.into_inner();
+        }
+    }
+}
+
+#[test]
+fn panicking_future_dropped() {
+    future::lazy(move || {
+        let mut queue = FuturesUnordered::new();
+        queue.push(future::poll_fn(|| -> Poll<i32, i32> {
+            panic!()
+        }));
+
+        let r = panic::catch_unwind(AssertUnwindSafe(|| queue.poll()));
+        assert!(r.is_err());
+        assert!(queue.is_empty());
+        assert_eq!(Ready(None), queue.poll().unwrap());
+
+        Ok::<_, ()>(())
+    }).wait().unwrap();
+}
--- a/third_party/rust/futures/tests/recurse.rs
+++ b/third_party/rust/futures/tests/recurse.rs
@@ -1,21 +1,22 @@
 extern crate futures;
 
 use std::sync::mpsc::channel;
 
-use futures::future::{ok, Future};
+use futures::future::ok;
+use futures::prelude::*;
 
 #[test]
 fn lots() {
     fn doit(n: usize) -> Box<Future<Item=(), Error=()> + Send> {
         if n == 0 {
-            ok(()).boxed()
+            Box::new(ok(()))
         } else {
-            ok(n - 1).and_then(doit).boxed()
+            Box::new(ok(n - 1).and_then(doit))
         }
     }
 
     let (tx, rx) = channel();
     ::std::thread::spawn(|| {
         doit(1_000).map(move |_| tx.send(()).unwrap()).wait()
     });
     rx.recv().unwrap();
--- a/third_party/rust/futures/tests/select_all.rs
+++ b/third_party/rust/futures/tests/select_all.rs
@@ -1,26 +1,27 @@
 extern crate futures;
 
-use futures::future::*;
+use futures::prelude::*;
+use futures::future::{ok, select_all, err};
 
 #[test]
 fn smoke() {
     let v = vec![
-        ok(1).boxed(),
-        err(2).boxed(),
-        ok(3).boxed(),
+        ok(1),
+        err(2),
+        ok(3),
     ];
 
     let (i, idx, v) = select_all(v).wait().ok().unwrap();
     assert_eq!(i, 1);
     assert_eq!(idx, 0);
 
     let (i, idx, v) = select_all(v).wait().err().unwrap();
     assert_eq!(i, 2);
     assert_eq!(idx, 0);
 
     let (i, idx, v) = select_all(v).wait().ok().unwrap();
     assert_eq!(i, 3);
     assert_eq!(idx, 0);
 
-    assert!(v.len() == 0);
+    assert!(v.is_empty());
 }
--- a/third_party/rust/futures/tests/select_ok.rs
+++ b/third_party/rust/futures/tests/select_ok.rs
@@ -1,40 +1,40 @@
 extern crate futures;
 
 use futures::future::*;
 
 #[test]
 fn ignore_err() {
     let v = vec![
-        err(1).boxed(),
-        err(2).boxed(),
-        ok(3).boxed(),
-        ok(4).boxed(),
+        err(1),
+        err(2),
+        ok(3),
+        ok(4),
     ];
 
     let (i, v) = select_ok(v).wait().ok().unwrap();
     assert_eq!(i, 3);
 
-    assert!(v.len() == 1);
+    assert_eq!(v.len(), 1);
 
     let (i, v) = select_ok(v).wait().ok().unwrap();
     assert_eq!(i, 4);
 
-    assert!(v.len() == 0);
+    assert!(v.is_empty());
 }
 
 #[test]
 fn last_err() {
     let v = vec![
-        ok(1).boxed(),
-        err(2).boxed(),
-        err(3).boxed(),
+        ok(1),
+        err(2),
+        err(3),
     ];
 
     let (i, v) = select_ok(v).wait().ok().unwrap();
     assert_eq!(i, 1);
 
-    assert!(v.len() == 2);
+    assert_eq!(v.len(), 2);
 
     let i = select_ok(v).wait().err().unwrap();
     assert_eq!(i, 3);
 }
--- a/third_party/rust/futures/tests/shared.rs
+++ b/third_party/rust/futures/tests/shared.rs
@@ -2,30 +2,30 @@ extern crate futures;
 
 mod support;
 
 use std::cell::RefCell;
 use std::rc::Rc;
 use std::thread;
 
 use futures::sync::oneshot;
-use futures::Future;
+use futures::prelude::*;
 use futures::future;
 
 fn send_shared_oneshot_and_wait_on_multiple_threads(threads_number: u32) {
     let (tx, rx) = oneshot::channel::<u32>();
     let f = rx.shared();
     let threads = (0..threads_number).map(|_| {
         let cloned_future = f.clone();
         thread::spawn(move || {
-            assert!(*cloned_future.wait().unwrap() == 6);
+            assert_eq!(*cloned_future.wait().unwrap(), 6);
         })
     }).collect::<Vec<_>>();
     tx.send(6).unwrap();
-    assert!(*f.wait().unwrap() == 6);
+    assert_eq!(*f.wait().unwrap(), 6);
     for f in threads {
         f.join().unwrap();
     }
 }
 
 #[test]
 fn one_thread() {
     send_shared_oneshot_and_wait_on_multiple_threads(1);
@@ -52,17 +52,17 @@ fn drop_on_one_task_ok() {
     let t1 = thread::spawn(|| {
         let f = f1.map_err(|_| ()).map(|x| *x).select(rx2.map_err(|_| ()));
         drop(f.wait());
     });
 
     let (tx3, rx3) = oneshot::channel::<u32>();
 
     let t2 = thread::spawn(|| {
-        drop(f2.map(|x| tx3.send(*x).unwrap()).map_err(|_| ()).wait());
+        let _ = f2.map(|x| tx3.send(*x).unwrap()).map_err(|_| ()).wait();
     });
 
     tx2.send(11).unwrap(); // cancel `f1`
     t1.join().unwrap();
 
     tx.send(42).unwrap(); // Should cause `f2` and then `rx3` to get resolved.
     let result = rx3.wait().unwrap();
     assert_eq!(result, 42);
@@ -79,17 +79,17 @@ fn drop_in_poll() {
     }).shared();
     let future2 = Box::new(future.clone()) as Box<Future<Item=_, Error=_>>;
     *slot.borrow_mut() = Some(future2);
     assert_eq!(*future.wait().unwrap(), 1);
 }
 
 #[test]
 fn peek() {
-    let mut core = ::support::local_executor::Core::new();
+    let core = ::support::local_executor::Core::new();
 
     let (tx0, rx0) = oneshot::channel::<u32>();
     let f1 = rx0.shared();
     let f2 = f1.clone();
 
     // Repeated calls on the original or clone do not change the outcome.
     for _ in 0..2 {
         assert!(f1.peek().is_none());
@@ -108,17 +108,17 @@ fn peek() {
     core.run(future::ok::<(),()>(())).unwrap();
     for _ in 0..2 {
         assert_eq!(42, *f2.peek().unwrap().unwrap());
     }
 }
 
 #[test]
 fn polled_then_ignored() {
-    let mut core = ::support::local_executor::Core::new();
+    let core = ::support::local_executor::Core::new();
 
     let (tx0, rx0) = oneshot::channel::<u32>();
     let f1 = rx0.shared();
     let f2 = f1.clone();
 
     let (tx1, rx1) = oneshot::channel::<u32>();
     let (tx2, rx2) = oneshot::channel::<u32>();
     let (tx3, rx3) = oneshot::channel::<u32>();
@@ -141,26 +141,26 @@ fn polled_then_ignored() {
     assert_eq!(core.run(rx1).unwrap(), 42);
 }
 
 #[test]
 fn recursive_poll() {
     use futures::sync::mpsc;
     use futures::Stream;
 
-    let mut core = ::support::local_executor::Core::new();
+    let core = ::support::local_executor::Core::new();
     let (tx0, rx0) = mpsc::unbounded::<Box<Future<Item=(),Error=()>>>();
     let run_stream = rx0.for_each(|f| f);
 
     let (tx1, rx1) = oneshot::channel::<()>();
 
     let f1 = run_stream.shared();
     let f2 = f1.clone();
     let f3 = f1.clone();
-    tx0.send(Box::new(
+    tx0.unbounded_send(Box::new(
         f1.map(|_|()).map_err(|_|())
             .select(rx1.map_err(|_|()))
             .map(|_| ()).map_err(|_|()))).unwrap();
 
     core.spawn(f2.map(|_|()).map_err(|_|()));
 
     // Call poll() on the spawned future. We want to be sure that this does not trigger a
     // deadlock or panic due to a recursive lock() on a mutex.
@@ -171,27 +171,27 @@ fn recursive_poll() {
     core.run(f3).unwrap();
 }
 
 #[test]
 fn recursive_poll_with_unpark() {
     use futures::sync::mpsc;
     use futures::{Stream, task};
 
-    let mut core = ::support::local_executor::Core::new();
+    let core = ::support::local_executor::Core::new();
     let (tx0, rx0) = mpsc::unbounded::<Box<Future<Item=(),Error=()>>>();
     let run_stream = rx0.for_each(|f| f);
 
     let (tx1, rx1) = oneshot::channel::<()>();
 
     let f1 = run_stream.shared();
     let f2 = f1.clone();
     let f3 = f1.clone();
-    tx0.send(Box::new(future::lazy(move || {
-        task::park().unpark();
+    tx0.unbounded_send(Box::new(future::lazy(move || {
+        task::current().notify();
         f1.map(|_|()).map_err(|_|())
             .select(rx1.map_err(|_|()))
             .map(|_| ()).map_err(|_|())
     }))).unwrap();
 
     core.spawn(f2.map(|_|()).map_err(|_|()));
 
     // Call poll() on the spawned future. We want to be sure that this does not trigger a
--- a/third_party/rust/futures/tests/sink.rs
+++ b/third_party/rust/futures/tests/sink.rs
@@ -1,23 +1,23 @@
 extern crate futures;
 
 use std::mem;
 use std::sync::Arc;
 use std::rc::Rc;
 use std::cell::{Cell, RefCell};
 use std::sync::atomic::{Ordering, AtomicBool};
 
-use futures::{Poll, Async, Future, AsyncSink, StartSend};
+use futures::prelude::*;
 use futures::future::ok;
 use futures::stream;
 use futures::sync::{oneshot, mpsc};
 use futures::task::{self, Task};
-use futures::executor::{self, Unpark};
-use futures::sink::*;
+use futures::executor::{self, Notify};
+use futures::sink::SinkFromErr;
 
 mod support;
 use support::*;
 
 #[test]
 fn vec_sink() {
     let mut v = Vec::new();
     assert_eq!(v.start_send(0), Ok(AsyncSink::Ready));
@@ -39,24 +39,24 @@ fn send() {
     assert_done(move || v.send(2),
                 Ok(vec![0, 1, 2]));
 }
 
 #[test]
 fn send_all() {
     let v = Vec::new();
 
-    let (v, _) = v.send_all(stream::iter(vec![Ok(0), Ok(1)])).wait().unwrap();
+    let (v, _) = v.send_all(stream::iter_ok(vec![0, 1])).wait().unwrap();
     assert_eq!(v, vec![0, 1]);
 
-    let (v, _) = v.send_all(stream::iter(vec![Ok(2), Ok(3)])).wait().unwrap();
+    let (v, _) = v.send_all(stream::iter_ok(vec![2, 3])).wait().unwrap();
     assert_eq!(v, vec![0, 1, 2, 3]);
 
     assert_done(
-        move || v.send_all(stream::iter(vec![Ok(4), Ok(5)])).map(|(v, _)| v),
+        move || v.send_all(stream::iter_ok(vec![4, 5])).map(|(v, _)| v),
         Ok(vec![0, 1, 2, 3, 4, 5]));
 }
 
 // An Unpark struct that records unpark events for inspection
 struct Flag(pub AtomicBool);
 
 impl Flag {
     fn new() -> Arc<Flag> {
@@ -67,18 +67,18 @@ impl Flag {
         self.0.load(Ordering::SeqCst)
     }
 
     fn set(&self, v: bool) {
         self.0.store(v, Ordering::SeqCst)
     }
 }
 
-impl Unpark for Flag {
-    fn unpark(&self) {
+impl Notify for Flag {
+    fn notify(&self, _id: usize) {
         self.set(true)
     }
 }
 
 // Sends a value on an i32 channel sink
 struct StartSendFut<S: Sink>(Option<S>, Option<S::SinkItem>);
 
 impl<S: Sink> StartSendFut<S> {
@@ -87,17 +87,17 @@ impl<S: Sink> StartSendFut<S> {
     }
 }
 
 impl<S: Sink> Future for StartSendFut<S> {
     type Item = S;
     type Error = S::SinkError;
 
     fn poll(&mut self) -> Poll<S, S::SinkError> {
-        match try!(self.0.as_mut().unwrap().start_send(self.1.take().unwrap())) {
+        match self.0.as_mut().unwrap().start_send(self.1.take().unwrap())? {
             AsyncSink::Ready => Ok(Async::Ready(self.0.take().unwrap())),
             AsyncSink::NotReady(item) => {
                 self.1 = Some(item);
                 Ok(Async::NotReady)
             }
         }
 
     }
@@ -110,49 +110,49 @@ fn mpsc_blocking_start_send() {
     let (mut tx, mut rx) = mpsc::channel::<i32>(0);
 
     futures::future::lazy(|| {
         assert_eq!(tx.start_send(0).unwrap(), AsyncSink::Ready);
 
         let flag = Flag::new();
         let mut task = executor::spawn(StartSendFut::new(tx, 1));
 
-        assert!(task.poll_future(flag.clone()).unwrap().is_not_ready());
+        assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready());
         assert!(!flag.get());
         sassert_next(&mut rx, 0);
         assert!(flag.get());
         flag.set(false);
-        assert!(task.poll_future(flag.clone()).unwrap().is_ready());
+        assert!(task.poll_future_notify(&flag, 0).unwrap().is_ready());
         assert!(!flag.get());
         sassert_next(&mut rx, 1);
 
         Ok::<(), ()>(())
     }).wait().unwrap();
 }
 
 #[test]
 // test `flush` by using `with` to make the first insertion into a sink block
 // until a oneshot is completed
 fn with_flush() {
     let (tx, rx) = oneshot::channel();
-    let mut block = rx.boxed();
+    let mut block = Box::new(rx) as Box<Future<Item = _, Error = _>>;
     let mut sink = Vec::new().with(|elem| {
-        mem::replace(&mut block, ok(()).boxed())
+        mem::replace(&mut block, Box::new(ok(())))
             .map(move |_| elem + 1).map_err(|_| -> () { panic!() })
     });
 
     assert_eq!(sink.start_send(0), Ok(AsyncSink::Ready));
 
     let flag = Flag::new();
     let mut task = executor::spawn(sink.flush());
-    assert!(task.poll_future(flag.clone()).unwrap().is_not_ready());
+    assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready());
     tx.send(()).unwrap();
     assert!(flag.get());
 
-    let sink = match task.poll_future(flag.clone()).unwrap() {
+    let sink = match task.poll_future_notify(&flag, 0).unwrap() {
         Async::Ready(sink) => sink,
         _ => panic!()
     };
 
     assert_eq!(sink.send(1).wait().unwrap().get_ref(), &[1, 2]);
 }
 
 #[test]
@@ -162,16 +162,29 @@ fn with_as_map() {
         Ok(item * 2)
     });
     let sink = sink.send(0).wait().unwrap();
     let sink = sink.send(1).wait().unwrap();
     let sink = sink.send(2).wait().unwrap();
     assert_eq!(sink.get_ref(), &[0, 2, 4]);
 }
 
+#[test]
+// test simple use of with_flat_map
+fn with_flat_map() {
+    let sink = Vec::new().with_flat_map(|item| {
+        stream::iter_ok(vec![item; item])
+    });
+    let sink = sink.send(0).wait().unwrap();
+    let sink = sink.send(1).wait().unwrap();
+    let sink = sink.send(2).wait().unwrap();
+    let sink = sink.send(3).wait().unwrap();
+    assert_eq!(sink.get_ref(), &[1,2,2,3,3,3]);
+}
+
 // Immediately accepts all requests to start pushing, but completion is managed
 // by manually flushing
 struct ManualFlush<T> {
     data: Vec<T>,
     waiting_tasks: Vec<Task>,
 }
 
 impl<T> Sink for ManualFlush<T> {
@@ -186,17 +199,17 @@ impl<T> Sink for ManualFlush<T> {
         }
         Ok(AsyncSink::Ready)
     }
 
     fn poll_complete(&mut self) -> Poll<(), ()> {
         if self.data.is_empty() {
             Ok(Async::Ready(()))
         } else {
-            self.waiting_tasks.push(task::park());
+            self.waiting_tasks.push(task::current());
             Ok(Async::NotReady)
         }
     }
 
     fn close(&mut self) -> Poll<(), ()> {
         Ok(().into())
     }
 }
@@ -206,37 +219,37 @@ impl<T> ManualFlush<T> {
         ManualFlush {
             data: Vec::new(),
             waiting_tasks: Vec::new()
         }
     }
 
     fn force_flush(&mut self) -> Vec<T> {
         for task in self.waiting_tasks.drain(..) {
-            task.unpark()
+            task.notify()
         }
         mem::replace(&mut self.data, Vec::new())
     }
 }
 
 #[test]
 // test that the `with` sink doesn't require the underlying sink to flush,
-// but doesn't claim to be flushed until the underlyig sink is
+// but doesn't claim to be flushed until the underlying sink is
 fn with_flush_propagate() {
     let mut sink = ManualFlush::new().with(|x| -> Result<Option<i32>, ()> { Ok(x) });
     assert_eq!(sink.start_send(Some(0)).unwrap(), AsyncSink::Ready);
     assert_eq!(sink.start_send(Some(1)).unwrap(), AsyncSink::Ready);
 
     let flag = Flag::new();
     let mut task = executor::spawn(sink.flush());
-    assert!(task.poll_future(flag.clone()).unwrap().is_not_ready());
+    assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready());
     assert!(!flag.get());
     assert_eq!(task.get_mut().get_mut().get_mut().force_flush(), vec![0, 1]);
     assert!(flag.get());
-    assert!(task.poll_future(flag.clone()).unwrap().is_ready());
+    assert!(task.poll_future_notify(&flag, 0).unwrap().is_ready());
 }
 
 #[test]
 // test that a buffer is a no-nop around a sink that always accepts sends
 fn buffer_noop() {
     let sink = Vec::new().buffer(0);
     let sink = sink.send(0).wait().unwrap();
     let sink = sink.send(1).wait().unwrap();
@@ -265,26 +278,26 @@ impl Allow {
             tasks: RefCell::new(Vec::new()),
         }
     }
 
     fn check(&self) -> bool {
         if self.flag.get() {
             true
         } else {
-            self.tasks.borrow_mut().push(task::park());
+            self.tasks.borrow_mut().push(task::current());
             false
         }
     }
 
     fn start(&self) {
         self.flag.set(true);
         let mut tasks = self.tasks.borrow_mut();
         for task in tasks.drain(..) {
-            task.unpark();
+            task.notify();
         }
     }
 }
 
 impl<T> Sink for ManualAllow<T> {
     type SinkItem = T;
     type SinkError = ();
 
@@ -322,29 +335,79 @@ fn buffer() {
     let (sink, allow) = manual_allow::<i32>();
     let sink = sink.buffer(2);
 
     let sink = StartSendFut::new(sink, 0).wait().unwrap();
     let sink = StartSendFut::new(sink, 1).wait().unwrap();
 
     let flag = Flag::new();
     let mut task = executor::spawn(sink.send(2));
-    assert!(task.poll_future(flag.clone()).unwrap().is_not_ready());
+    assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready());
     assert!(!flag.get());
     allow.start();
     assert!(flag.get());
-    match task.poll_future(flag.clone()).unwrap() {
+    match task.poll_future_notify(&flag, 0).unwrap() {
         Async::Ready(sink) => {
             assert_eq!(sink.get_ref().data, vec![0, 1, 2]);
         }
         _ => panic!()
     }
 }
 
 #[test]
+fn fanout_smoke() {
+    let sink1 = Vec::new();
+    let sink2 = Vec::new();
+    let sink = sink1.fanout(sink2);
+    let stream = futures::stream::iter_ok(vec![1,2,3]);
+    let (sink, _) = sink.send_all(stream).wait().unwrap();
+    let (sink1, sink2) = sink.into_inner();
+    assert_eq!(sink1, vec![1,2,3]);
+    assert_eq!(sink2, vec![1,2,3]);
+}
+
+#[test]
+fn fanout_backpressure() {
+    let (left_send, left_recv) = mpsc::channel(0);
+    let (right_send, right_recv) = mpsc::channel(0);
+    let sink = left_send.fanout(right_send);
+
+    let sink = StartSendFut::new(sink, 0).wait().unwrap();
+    let sink = StartSendFut::new(sink, 1).wait().unwrap();
+ 
+    let flag = Flag::new();
+    let mut task = executor::spawn(sink.send(2));
+    assert!(!flag.get());
+    assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready());
+    let (item, left_recv) = left_recv.into_future().wait().unwrap();
+    assert_eq!(item, Some(0));
+    assert!(flag.get());
+    assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready());
+    let (item, right_recv) = right_recv.into_future().wait().unwrap();
+    assert_eq!(item, Some(0));
+    assert!(flag.get());
+    assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready());
+    let (item, left_recv) = left_recv.into_future().wait().unwrap();
+    assert_eq!(item, Some(1));
+    assert!(flag.get());
+    assert!(task.poll_future_notify(&flag, 0).unwrap().is_not_ready());
+    let (item, right_recv) = right_recv.into_future().wait().unwrap();
+    assert_eq!(item, Some(1));
+    assert!(flag.get());
+    match task.poll_future_notify(&flag, 0).unwrap() {
+        Async::Ready(_) => {
+        },
+        _ => panic!()
+    };
+    // make sure receivers live until end of test to prevent send errors
+    drop(left_recv);
+    drop(right_recv);
+}
+
+#[test]
 fn map_err() {
     {
         let (tx, _rx) = mpsc::channel(1);
         let mut tx = tx.sink_map_err(|_| ());
         assert_eq!(tx.start_send(()), Ok(AsyncSink::Ready));
         assert_eq!(tx.poll_complete(), Ok(Async::Ready(())));
     }
 
--- a/third_party/rust/futures/tests/split.rs
+++ b/third_party/rust/futures/tests/split.rs
@@ -1,12 +1,12 @@
 extern crate futures;
 
-use futures::{Future, StartSend, Sink, Stream, Poll};
-use futures::stream::iter;
+use futures::prelude::*;
+use futures::stream::iter_ok;
 
 struct Join<T, U>(T, U);
 
 impl<T: Stream, U> Stream for Join<T, U> {
     type Item = T::Item;
     type Error = T::Error;
 
     fn poll(&mut self) -> Poll<Option<T::Item>, T::Error> {
@@ -32,14 +32,16 @@ impl<T, U: Sink> Sink for Join<T, U> {
         self.1.close()
     }
 }
 
 #[test]
 fn test_split() {
     let mut dest = Vec::new();
     {
-        let j = Join(iter(vec![Ok(10), Ok(20), Ok(30)]), &mut dest);
+        let j = Join(iter_ok(vec![10, 20, 30]), &mut dest);
+        let (sink, stream) = j.split();
+        let j = sink.reunite(stream).expect("test_split: reunite error");
         let (sink, stream) = j.split();
         sink.send_all(stream).wait().unwrap();
     }
     assert_eq!(dest, vec![10, 20, 30]);
 }
--- a/third_party/rust/futures/tests/stream.rs
+++ b/third_party/rust/futures/tests/stream.rs
@@ -1,38 +1,64 @@
 #[macro_use]
 extern crate futures;
 
-use futures::{Poll, Future, Stream, Sink};
+use futures::prelude::*;
 use futures::executor;
-use futures::future::{ok, err};
-use futures::stream::{iter, Peekable, BoxStream};
+use futures::future::{err, ok};
+use futures::stream::{empty, iter_ok, poll_fn, Peekable};
 use futures::sync::oneshot;
 use futures::sync::mpsc;
 
 mod support;
 use support::*;
 
+pub struct Iter<I> {
+    iter: I,
+}
 
-fn list() -> BoxStream<i32, u32> {
+pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter>
+    where J: IntoIterator<Item=Result<T, E>>,
+{
+    Iter {
+        iter: i.into_iter(),
+    }
+}
+
+impl<I, T, E> Stream for Iter<I>
+    where I: Iterator<Item=Result<T, E>>,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<Option<T>, E> {
+        match self.iter.next() {
+            Some(Ok(e)) => Ok(Async::Ready(Some(e))),
+            Some(Err(e)) => Err(e),
+            None => Ok(Async::Ready(None)),
+        }
+    }
+}
+
+fn list() -> Box<Stream<Item=i32, Error=u32> + Send> {
     let (tx, rx) = mpsc::channel(1);
     tx.send(Ok(1))
       .and_then(|tx| tx.send(Ok(2)))
       .and_then(|tx| tx.send(Ok(3)))
       .forget();
-    rx.then(|r| r.unwrap()).boxed()
+    Box::new(rx.then(|r| r.unwrap()))
 }
 
-fn err_list() -> BoxStream<i32, u32> {
+fn err_list() -> Box<Stream<Item=i32, Error=u32> + Send> {
     let (tx, rx) = mpsc::channel(1);
     tx.send(Ok(1))
       .and_then(|tx| tx.send(Ok(2)))
       .and_then(|tx| tx.send(Err(3)))
       .forget();
-    rx.then(|r| r.unwrap()).boxed()
+    Box::new(rx.then(|r| r.unwrap()))
 }
 
 #[test]
 fn map() {
     assert_done(|| list().map(|a| a + 1).collect(), Ok(vec![2, 3, 4]));
 }
 
 #[test]
@@ -168,36 +194,36 @@ fn fuse() {
 }
 
 #[test]
 fn buffered() {
     let (tx, rx) = mpsc::channel(1);
     let (a, b) = oneshot::channel::<u32>();
     let (c, d) = oneshot::channel::<u32>();
 
-    tx.send(b.map_err(|_| ()).boxed())
-      .and_then(|tx| tx.send(d.map_err(|_| ()).boxed()))
+    tx.send(Box::new(b.map_err(|_| ())) as Box<Future<Item = _, Error = _> + Send>)
+      .and_then(|tx| tx.send(Box::new(d.map_err(|_| ()))))
       .forget();
 
     let mut rx = rx.buffered(2);
     sassert_empty(&mut rx);
     c.send(3).unwrap();
     sassert_empty(&mut rx);
     a.send(5).unwrap();
     let mut rx = rx.wait();
     assert_eq!(rx.next(), Some(Ok(5)));
     assert_eq!(rx.next(), Some(Ok(3)));
     assert_eq!(rx.next(), None);
 
     let (tx, rx) = mpsc::channel(1);
     let (a, b) = oneshot::channel::<u32>();
     let (c, d) = oneshot::channel::<u32>();
 
-    tx.send(b.map_err(|_| ()).boxed())
-      .and_then(|tx| tx.send(d.map_err(|_| ()).boxed()))
+    tx.send(Box::new(b.map_err(|_| ())) as Box<Future<Item = _, Error = _> + Send>)
+      .and_then(|tx| tx.send(Box::new(d.map_err(|_| ()))))
       .forget();
 
     let mut rx = rx.buffered(1);
     sassert_empty(&mut rx);
     c.send(3).unwrap();
     sassert_empty(&mut rx);
     a.send(5).unwrap();
     let mut rx = rx.wait();
@@ -207,35 +233,35 @@ fn buffered() {
 }
 
 #[test]
 fn unordered() {
     let (tx, rx) = mpsc::channel(1);
     let (a, b) = oneshot::channel::<u32>();
     let (c, d) = oneshot::channel::<u32>();
 
-    tx.send(b.map_err(|_| ()).boxed())
-      .and_then(|tx| tx.send(d.map_err(|_| ()).boxed()))
+    tx.send(Box::new(b.map_err(|_| ())) as Box<Future<Item = _, Error = _> + Send>)
+      .and_then(|tx| tx.send(Box::new(d.map_err(|_| ()))))
       .forget();
 
     let mut rx = rx.buffer_unordered(2);
     sassert_empty(&mut rx);
     let mut rx = rx.wait();
     c.send(3).unwrap();
     assert_eq!(rx.next(), Some(Ok(3)));
     a.send(5).unwrap();
     assert_eq!(rx.next(), Some(Ok(5)));
     assert_eq!(rx.next(), None);
 
     let (tx, rx) = mpsc::channel(1);
     let (a, b) = oneshot::channel::<u32>();
     let (c, d) = oneshot::channel::<u32>();
 
-    tx.send(b.map_err(|_| ()).boxed())
-      .and_then(|tx| tx.send(d.map_err(|_| ()).boxed()))
+    tx.send(Box::new(b.map_err(|_| ())) as Box<Future<Item = _, Error = _> + Send>)
+      .and_then(|tx| tx.send(Box::new(d.map_err(|_| ()))))
       .forget();
 
     // We don't even get to see `c` until `a` completes.
     let mut rx = rx.buffer_unordered(1);
     sassert_empty(&mut rx);
     c.send(3).unwrap();
     sassert_empty(&mut rx);
     a.send(5).unwrap();
@@ -256,17 +282,17 @@ fn zip() {
     assert_done(|| err_list().zip(list()).collect(), Err(3));
     assert_done(|| list().zip(list().map(|x| x + 1)).collect(),
                 Ok(vec![(1, 2), (2, 3), (3, 4)]));
 }
 
 #[test]
 fn peek() {
     struct Peek {
-        inner: Peekable<BoxStream<i32, u32>>
+        inner: Peekable<Box<Stream<Item = i32, Error =u32> + Send>>
     }
 
     impl Future for Peek {
         type Item = ();
         type Error = u32;
 
         fn poll(&mut self) -> Poll<(), u32> {
             {
@@ -305,42 +331,84 @@ fn chunks() {
 #[test]
 #[should_panic]
 fn chunks_panic_on_cap_zero() {
     let _ = list().chunks(0);
 }
 
 #[test]
 fn select() {
-    let a = iter(vec![Ok::<_, u32>(1), Ok(2), Ok(3)]);
-    let b = iter(vec![Ok(4), Ok(5), Ok(6)]);
+    let a = iter_ok::<_, u32>(vec![1, 2, 3]);
+    let b = iter_ok(vec![4, 5, 6]);
     assert_done(|| a.select(b).collect(), Ok(vec![1, 4, 2, 5, 3, 6]));
 
-    let a = iter(vec![Ok::<_, u32>(1), Ok(2), Ok(3)]);
-    let b = iter(vec![Ok(1), Ok(2)]);
+    let a = iter_ok::<_, u32>(vec![1, 2, 3]);
+    let b = iter_ok(vec![1, 2]);
     assert_done(|| a.select(b).collect(), Ok(vec![1, 1, 2, 2, 3]));
 
-    let a = iter(vec![Ok(1), Ok(2)]);
-    let b = iter(vec![Ok::<_, u32>(1), Ok(2), Ok(3)]);
+    let a = iter_ok(vec![1, 2]);
+    let b = iter_ok::<_, u32>(vec![1, 2, 3]);
     assert_done(|| a.select(b).collect(), Ok(vec![1, 1, 2, 2, 3]));
 }
 
 #[test]
 fn forward() {
     let v = Vec::new();
-    let v = iter(vec![Ok::<_, ()>(0), Ok(1)]).forward(v).wait().unwrap().1;
+    let v = iter_ok::<_, ()>(vec![0, 1]).forward(v).wait().unwrap().1;
     assert_eq!(v, vec![0, 1]);
 
-    let v = iter(vec![Ok::<_, ()>(2), Ok(3)]).forward(v).wait().unwrap().1;
+    let v = iter_ok::<_, ()>(vec![2, 3]).forward(v).wait().unwrap().1;
     assert_eq!(v, vec![0, 1, 2, 3]);
 
-    assert_done(move || iter(vec![Ok(4), Ok(5)]).forward(v).map(|(_, s)| s),
+    assert_done(move || iter_ok(vec![4, 5]).forward(v).map(|(_, s)| s),
                 Ok::<_, ()>(vec![0, 1, 2, 3, 4, 5]));
 }
 
 #[test]
+#[allow(deprecated)]
 fn concat() {
-    let a = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Ok(vec![4, 5, 6]), Ok(vec![7, 8, 9])]);
+    let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]);
     assert_done(move || a.concat(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]));
 
     let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]);
     assert_done(move || b.concat(), Err(()));
 }
+
+#[test]
+fn concat2() {
+    let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]);
+    assert_done(move || a.concat2(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]));
+
+    let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]);
+    assert_done(move || b.concat2(), Err(()));
+
+    let c = empty::<Vec<()>, ()>();
+    assert_done(move || c.concat2(), Ok(vec![]))
+}
+
+#[test]
+fn stream_poll_fn() {
+    let mut counter = 5usize;
+
+    let read_stream = poll_fn(move || -> Poll<Option<usize>, std::io::Error> {
+        if counter == 0 {
+            return Ok(Async::Ready(None));
+        }
+        counter -= 1;
+        Ok(Async::Ready(Some(counter)))
+    });
+
+    assert_eq!(read_stream.wait().count(), 5);
+}
+
+#[test]
+fn inspect() {
+    let mut seen = vec![];
+    assert_done(|| list().inspect(|&a| seen.push(a)).collect(), Ok(vec![1, 2, 3]));
+    assert_eq!(seen, [1, 2, 3]);
+}
+
+#[test]
+fn inspect_err() {
+    let mut seen = vec![];
+    assert_done(|| err_list().inspect_err(|&a| seen.push(a)).collect(), Err(3));
+    assert_eq!(seen, [3]);
+}
--- a/third_party/rust/futures/tests/stream_catch_unwind.rs
+++ b/third_party/rust/futures/tests/stream_catch_unwind.rs
@@ -1,31 +1,29 @@
 extern crate futures;
 
 use futures::stream;
-use futures::stream::Stream;
+use futures::prelude::*;
 
 #[test]
 fn panic_in_the_middle_of_the_stream() {
-    let stream = stream::iter::<_, Option<i32>, bool>(vec![
-        Some(10), None, Some(11)].into_iter().map(Ok));
+    let stream = stream::iter_ok::<_, bool>(vec![Some(10), None, Some(11)]);
 
     // panic on second element
     let stream_panicking = stream.map(|o| o.unwrap());
     let mut iter = stream_panicking.catch_unwind().wait();
 
     assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap());
     assert!(iter.next().unwrap().is_err());
     assert!(iter.next().is_none());
 }
 
 #[test]
 fn no_panic() {
-    let stream = stream::iter::<_, _, bool>(vec![
-        10, 11, 12].into_iter().map(Ok));
+    let stream = stream::iter_ok::<_, bool>(vec![10, 11, 12]);
 
     let mut iter = stream.catch_unwind().wait();
 
     assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap());
     assert_eq!(Ok(11), iter.next().unwrap().ok().unwrap());
     assert_eq!(Ok(12), iter.next().unwrap().ok().unwrap());
     assert!(iter.next().is_none());
 }
--- a/third_party/rust/futures/tests/support/local_executor.rs
+++ b/third_party/rust/futures/tests/support/local_executor.rs
@@ -1,93 +1,162 @@
 //! Execution of futures on a single thread
 //!
 //! This module has no special handling of any blocking operations other than
-//! futures-aware inter-thread communications, and should therefore probably not
-//! be used to manage IO.
+//! futures-aware inter-thread communications, and is not intended to be used to
+//! manage I/O. For futures that do I/O you'll likely want to use `tokio-core`.
 
+use std::cell::{Cell, RefCell};
 use std::sync::{Arc, Mutex, mpsc};
-use std::collections::HashMap;
-use std::collections::hash_map;
-use std::boxed::Box;
-use std::rc::Rc;
-use std::cell::RefCell;
 
+use futures::executor::{self, Spawn, Notify};
+use futures::future::{Executor, ExecuteError};
 use futures::{Future, Async};
 
-use futures::executor::{self, Spawn};
-
 /// Main loop object
 pub struct Core {
-    unpark_send: mpsc::Sender<u64>,
-    unpark: mpsc::Receiver<u64>,
-    live: HashMap<u64, Spawn<Box<Future<Item=(), Error=()>>>>,
-    next_id: u64,
+    tx: mpsc::Sender<usize>,
+    rx: mpsc::Receiver<usize>,
+    notify: Arc<MyNotify>,
+
+    // Slab of running futures used to track what's running and what slots are
+    // empty. Slot indexes are then sent along tx/rx above to indicate which
+    // future is ready to get polled.
+    tasks: RefCell<Vec<Slot>>,
+    next_vacant: Cell<usize>,
+}
+
+enum Slot {
+    Vacant { next_vacant: usize },
+    Running(Option<Spawn<Box<Future<Item = (), Error = ()>>>>),
 }
 
 impl Core {
     /// Create a new `Core`.
     pub fn new() -> Self {
-        let (send, recv) = mpsc::channel();
+        let (tx, rx) = mpsc::channel();
         Core {
-            unpark_send: send,
-            unpark: recv,
-            live: HashMap::new(),
-            next_id: 0,
+            notify: Arc::new(MyNotify {
+                tx: Mutex::new(tx.clone()),
+            }),
+            tx: tx,
+            rx: rx,
+            next_vacant: Cell::new(0),
+            tasks: RefCell::new(Vec::new()),
         }
     }
 
     /// Spawn a future to be executed by a future call to `run`.
-    pub fn spawn<F>(&mut self, f: F)
+    ///
+    /// The future `f` provided will not be executed until `run` is called
+    /// below. While futures passed to `run` are executing, the future provided
+    /// here will be executed concurrently as well.
+    pub fn spawn<F>(&self, f: F)
         where F: Future<Item=(), Error=()> + 'static
     {
-        self.live.insert(self.next_id, executor::spawn(Box::new(f)));
-        self.unpark_send.send(self.next_id).unwrap();
-        self.next_id += 1;
-    }
-
-    /// Run the loop until all futures previously passed to `spawn` complete.
-    pub fn wait(&mut self) {
-        while !self.live.is_empty() {
-            self.turn();
+        let idx = self.next_vacant.get();
+        let mut tasks = self.tasks.borrow_mut();
+        match tasks.get_mut(idx) {
+            Some(&mut Slot::Vacant { next_vacant }) => {
+                self.next_vacant.set(next_vacant);
+            }
+            Some(&mut Slot::Running (_)) => {
+                panic!("vacant points to running future")
+            }
+            None => {
+                assert_eq!(idx, tasks.len());
+                tasks.push(Slot::Vacant { next_vacant: 0 });
+                self.next_vacant.set(idx + 1);
+            }
         }
+        tasks[idx] = Slot::Running(Some(executor::spawn(Box::new(f))));
+        self.tx.send(idx).unwrap();
     }
 
     /// Run the loop until the future `f` completes.
-    pub fn run<F>(&mut self, f: F) -> Result<F::Item, F::Error>
-        where F: Future + 'static,
-              F::Item: 'static,
-              F::Error: 'static,
+    ///
+    /// This method will block the current thread until the future `f` has
+    /// resolved. While waiting on `f` to finish it will also execute any
+    /// futures spawned via `spawn` above.
+    pub fn run<F>(&self, f: F) -> Result<F::Item, F::Error>
+        where F: Future,
     {
-        let out = Rc::new(RefCell::new(None));
-        let out2 = out.clone();
-        self.spawn(f.then(move |x| { *out.borrow_mut() = Some(x); Ok(()) }));
+        let id = usize::max_value();
+        self.tx.send(id).unwrap();
+        let mut f = executor::spawn(f);
         loop {
-            self.turn();
-            if let Some(x) = out2.borrow_mut().take() {
-                return x;
+            if self.turn() {
+                match f.poll_future_notify(&self.notify, id)? {
+                    Async::Ready(e) => return Ok(e),
+                    Async::NotReady => {}
+                }
             }
         }
     }
 
-    fn turn(&mut self) {
-        let task = self.unpark.recv().unwrap(); // Safe to unwrap because self.unpark_send keeps the channel alive
-        let unpark = Arc::new(Unpark { task: task, send: Mutex::new(self.unpark_send.clone()), });
-        let mut task = if let hash_map::Entry::Occupied(x) = self.live.entry(task) { x } else { return };
-        let result = task.get_mut().poll_future(unpark);
-        match result {
-            Ok(Async::Ready(())) => { task.remove(); }
-            Err(()) => { task.remove(); }
-            Ok(Async::NotReady) => {}
+    /// "Turns" this event loop one tick.
+    ///
+    /// This'll block the current thread until something happens, and once an
+    /// event happens this will act on that event.
+    ///
+    /// # Return value
+    ///
+    /// Returns `true` if the future passed to `run` should be polled or `false`
+    /// otherwise.
+    fn turn(&self) -> bool {
+        let task_id = self.rx.recv().unwrap();
+        if task_id == usize::max_value() {
+            return true
         }
+
+        // This may be a spurious wakeup so we're not guaranteed to have a
+        // future associated with `task_id`, so do a fallible lookup.
+        //
+        // Note that we don't want to borrow `self.tasks` for too long so we
+        // try to extract the future here and leave behind a tombstone future
+        // which'll get replaced or removed later. This is how we support
+        // spawn-in-run.
+        let mut future = match self.tasks.borrow_mut().get_mut(task_id) {
+            Some(&mut Slot::Running(ref mut future)) => future.take().unwrap(),
+            Some(&mut Slot::Vacant { .. }) => return false,
+            None => return false,
+        };
+
+        // Drive this future forward. If it's done we remove it and if it's not
+        // done then we put it back in the tasks array.
+        let done = match future.poll_future_notify(&self.notify, task_id) {
+            Ok(Async::Ready(())) | Err(()) => true,
+            Ok(Async::NotReady) => false,
+        };
+        let mut tasks = self.tasks.borrow_mut();
+        if done {
+            tasks[task_id] = Slot::Vacant { next_vacant: self.next_vacant.get() };
+            self.next_vacant.set(task_id);
+        } else {
+            tasks[task_id] = Slot::Running(Some(future));
+        }
+
+        return false
     }
 }
 
-struct Unpark {
-    task: u64,
-    send: Mutex<mpsc::Sender<u64>>,
+impl<F> Executor<F> for Core
+    where F: Future<Item = (), Error = ()> + 'static,
+{
+    fn execute(&self, future: F) -> Result<(), ExecuteError<F>> {
+        self.spawn(future);
+        Ok(())
+    }
 }
 
-impl executor::Unpark for Unpark {
-    fn unpark(&self) {
-        let _ = self.send.lock().unwrap().send(self.task);
+struct MyNotify {
+    // TODO: it's pretty unfortunate to use a `Mutex` here where the `Sender`
+    //       itself is basically `Sync` as-is. Ideally this'd use something like
+    //       an off-the-shelf mpsc queue as well as `thread::park` and
+    //       `Thread::unpark`.
+    tx: Mutex<mpsc::Sender<usize>>,
+}
+
+impl Notify for MyNotify {
+    fn notify(&self, id: usize) {
+        drop(self.tx.lock().unwrap().send(id));
     }
 }
old mode 100755
new mode 100644
--- a/third_party/rust/futures/tests/support/mod.rs
+++ b/third_party/rust/futures/tests/support/mod.rs
@@ -2,17 +2,17 @@
 
 use std::fmt;
 use std::sync::Arc;
 use std::thread;
 
 use futures::{Future, IntoFuture, Async, Poll};
 use futures::future::FutureResult;
 use futures::stream::Stream;
-use futures::executor::{self, Unpark};
+use futures::executor::{self, NotifyHandle, Notify};
 use futures::task;
 
 pub mod local_executor;
 
 pub fn f_ok(a: i32) -> FutureResult<i32, u32> { Ok(a).into_future() }
 pub fn f_err(a: u32) -> FutureResult<i32, u32> { Err(a).into_future() }
 pub fn r_ok(a: i32) -> Result<i32, u32> { Ok(a) }
 pub fn r_err(a: u32) -> Result<i32, u32> { Err(a) }
@@ -22,79 +22,81 @@ pub fn assert_done<T, F>(f: F, result: R
           T::Item: Eq + fmt::Debug,
           T::Error: Eq + fmt::Debug,
           F: FnOnce() -> T,
 {
     assert_eq!(f().wait(), result);
 }
 
 pub fn assert_empty<T: Future, F: FnMut() -> T>(mut f: F) {
-    assert!(executor::spawn(f()).poll_future(unpark_panic()).ok().unwrap().is_not_ready());
+    assert!(executor::spawn(f()).poll_future_notify(&notify_panic(), 0).ok().unwrap().is_not_ready());
 }
 
 pub fn sassert_done<S: Stream>(s: &mut S) {
-    match executor::spawn(s).poll_stream(unpark_panic()) {
+    match executor::spawn(s).poll_stream_notify(&notify_panic(), 0) {
         Ok(Async::Ready(None)) => {}
         Ok(Async::Ready(Some(_))) => panic!("stream had more elements"),
         Ok(Async::NotReady) => panic!("stream wasn't ready"),
         Err(_) => panic!("stream had an error"),
     }
 }
 
 pub fn sassert_empty<S: Stream>(s: &mut S) {
-    match executor::spawn(s).poll_stream(unpark_noop()) {
+    match executor::spawn(s).poll_stream_notify(&notify_noop(), 0) {
         Ok(Async::Ready(None)) => panic!("stream is at its end"),
         Ok(Async::Ready(Some(_))) => panic!("stream had more elements"),
         Ok(Async::NotReady) => {}
         Err(_) => panic!("stream had an error"),
     }
 }
 
 pub fn sassert_next<S: Stream>(s: &mut S, item: S::Item)
     where S::Item: Eq + fmt::Debug
 {
-    match executor::spawn(s).poll_stream(unpark_panic()) {
+    match executor::spawn(s).poll_stream_notify(&notify_panic(), 0) {
         Ok(Async::Ready(None)) => panic!("stream is at its end"),
         Ok(Async::Ready(Some(e))) => assert_eq!(e, item),
         Ok(Async::NotReady) => panic!("stream wasn't ready"),
         Err(_) => panic!("stream had an error"),
     }
 }
 
 pub fn sassert_err<S: Stream>(s: &mut S, err: S::Error)
     where S::Error: Eq + fmt::Debug
 {
-    match executor::spawn(s).poll_stream(unpark_panic()) {
+    match executor::spawn(s).poll_stream_notify(&notify_panic(), 0) {
         Ok(Async::Ready(None)) => panic!("stream is at its end"),
         Ok(Async::Ready(Some(_))) => panic!("stream had more elements"),
         Ok(Async::NotReady) => panic!("stream wasn't ready"),
         Err(e) => assert_eq!(e, err),
     }
 }
 
-pub fn unpark_panic() -> Arc<Unpark> {
+pub fn notify_panic() -> NotifyHandle {
     struct Foo;
 
-    impl Unpark for Foo {
-        fn unpark(&self) {
-            panic!("should not be unparked");
+    impl Notify for Foo {
+        fn notify(&self, _id: usize) {
+            panic!("should not be notified");
         }
     }
 
-    Arc::new(Foo)
+    NotifyHandle::from(Arc::new(Foo))
 }
 
-pub fn unpark_noop() -> Arc<Unpark> {
-    struct Foo;
+pub fn notify_noop() -> NotifyHandle {
+    struct Noop;
 
-    impl Unpark for Foo {
-        fn unpark(&self) {}
+    impl Notify for Noop {
+        fn notify(&self, _id: usize) {}
     }
 
-    Arc::new(Foo)
+    const NOOP : &'static Noop = &Noop;
+
+    NotifyHandle::from(NOOP)
 }
 
 pub trait ForgetExt {
     fn forget(self);
 }
 
 impl<F> ForgetExt for F
     where F: Future + Sized + Send + 'static,
@@ -112,21 +114,21 @@ impl<F: Future> Future for DelayFuture<F
     type Item = F::Item;
     type Error = F::Error;
 
     fn poll(&mut self) -> Poll<F::Item,F::Error> {
         if self.1 {
             self.0.poll()
         } else {
             self.1 = true;
-            task::park().unpark();
+            task::current().notify();
             Ok(Async::NotReady)
         }
     }
 }
 
-/// Introduces one Ok(Async::NotReady) before polling the given future
+/// Introduces one `Ok(Async::NotReady)` before polling the given future
 pub fn delay_future<F>(f: F) -> DelayFuture<F::Future>
     where F: IntoFuture,
 {
     DelayFuture(f.into_future(), false)
 }
 
--- a/third_party/rust/futures/tests/unfold.rs
+++ b/third_party/rust/futures/tests/unfold.rs
@@ -1,13 +1,13 @@
 extern crate futures;
 
 mod support;
 
-use futures::*;
+use futures::stream;
 
 use support::*;
 
 #[test]
 fn unfold1() {
     let mut stream = stream::unfold(0, |state| {
         if state <= 2 {
             let res: Result<_,()> = Ok((state * 2, state + 1));
--- a/third_party/rust/futures/tests/unsync-oneshot.rs
+++ b/third_party/rust/futures/tests/unsync-oneshot.rs
@@ -1,13 +1,16 @@
 extern crate futures;
 
-use futures::Future;
+use futures::prelude::*;
 use futures::future;
-use futures::unsync::oneshot::{channel, Canceled};
+use futures::unsync::oneshot::{channel, Canceled, spawn};
+
+mod support;
+use support::local_executor;
 
 #[test]
 fn smoke() {
     let (tx, rx) = channel();
     tx.send(33).unwrap();
     assert_eq!(rx.wait().unwrap(), 33);
 }
 
@@ -39,8 +42,148 @@ fn tx_dropped_rx_unparked() {
     let (tx, rx) = channel::<i32>();
 
     let res = rx.join(future::lazy(move || {
         let _tx = tx;
         Ok(11)
     }));
     assert_eq!(res.wait().unwrap_err(), Canceled);
 }
+
+
+#[test]
+fn is_canceled() {
+    let (tx, rx) = channel::<u32>();
+    assert!(!tx.is_canceled());
+    drop(rx);
+    assert!(tx.is_canceled());
+}
+
+#[test]
+fn spawn_sends_items() {
+    let core = local_executor::Core::new();
+    let future = future::ok::<_, ()>(1);
+    let rx = spawn(future, &core);
+    assert_eq!(core.run(rx).unwrap(), 1);
+}
+
+#[test]
+fn spawn_kill_dead_stream() {
+    use std::thread;
+    use std::time::Duration;
+    use futures::future::Either;
+    use futures::sync::oneshot;
+
+    // a future which never returns anything (forever accepting incoming
+    // connections), but dropping it leads to observable side effects
+    // (like closing listening sockets, releasing limited resources,
+    // ...)
+    #[derive(Debug)]
+    struct Dead {
+        // when dropped you should get Err(oneshot::Canceled) on the
+        // receiving end
+        done: oneshot::Sender<()>,
+    }
+    impl Future for Dead {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+            Ok(Async::NotReady)
+        }
+    }
+
+    // need to implement a timeout for the test, as it would hang
+    // forever right now
+    let (timeout_tx, timeout_rx) = oneshot::channel();
+    thread::spawn(move || {
+        thread::sleep(Duration::from_millis(1000));
+        let _ = timeout_tx.send(());
+    });
+
+    let core = local_executor::Core::new();
+    let (done_tx, done_rx) = oneshot::channel();
+    let future = Dead{done: done_tx};
+    let rx = spawn(future, &core);
+    let res = core.run(
+        Ok::<_, ()>(())
+        .into_future()
+        .then(move |_| {
+            // now drop the spawned future: maybe some timeout exceeded,
+            // or some connection on this end was closed by the remote
+            // end.
+            drop(rx);
+            // and wait for the spawned future to release its resources
+            done_rx
+        })
+        .select2(timeout_rx)
+    );
+    match res {
+        Err(Either::A((oneshot::Canceled, _))) => (),
+        Ok(Either::B(((), _))) => {
+            panic!("dead future wasn't canceled (timeout)");
+        },
+        _ => {
+            panic!("dead future wasn't canceled (unexpected result)");
+        },
+    }
+}
+
+#[test]
+fn spawn_dont_kill_forgot_dead_stream() {
+    use std::thread;
+    use std::time::Duration;
+    use futures::future::Either;
+    use futures::sync::oneshot;
+
+    // a future which never returns anything (forever accepting incoming
+    // connections), but dropping it leads to observable side effects
+    // (like closing listening sockets, releasing limited resources,
+    // ...)
+    #[derive(Debug)]
+    struct Dead {
+        // when dropped you should get Err(oneshot::Canceled) on the
+        // receiving end
+        done: oneshot::Sender<()>,
+    }
+    impl Future for Dead {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+            Ok(Async::NotReady)
+        }
+    }
+
+    // need to implement a timeout for the test, as it would hang
+    // forever right now
+    let (timeout_tx, timeout_rx) = oneshot::channel();
+    thread::spawn(move || {
+        thread::sleep(Duration::from_millis(1000));
+        let _ = timeout_tx.send(());
+    });
+
+    let core = local_executor::Core::new();
+    let (done_tx, done_rx) = oneshot::channel();
+    let future = Dead{done: done_tx};
+    let rx = spawn(future, &core);
+    let res = core.run(
+        Ok::<_, ()>(())
+        .into_future()
+        .then(move |_| {
+            // forget the spawned future: should keep running, i.e. hit
+            // the timeout below.
+            rx.forget();
+            // and wait for the spawned future to release its resources
+            done_rx
+        })
+        .select2(timeout_rx)
+    );
+    match res {
+        Err(Either::A((oneshot::Canceled, _))) => {
+            panic!("forgotten dead future was canceled");
+        },
+        Ok(Either::B(((), _))) => (), // reached timeout
+        _ => {
+            panic!("forgotten dead future was canceled (unexpected result)");
+        },
+    }
+}
--- a/third_party/rust/futures/tests/unsync.rs
+++ b/third_party/rust/futures/tests/unsync.rs
@@ -1,18 +1,19 @@
 #![cfg(feature = "use_std")]
 
 extern crate futures;
 
 mod support;
 
-use futures::{Future, Stream, Sink, Async};
+use futures::prelude::*;
+use futures::unsync::oneshot;
 use futures::unsync::mpsc::{self, SendError};
 use futures::future::lazy;
-use futures::stream::iter;
+use futures::stream::{iter_ok, unfold};
 
 use support::local_executor::Core;
 
 #[test]
 fn mpsc_send_recv() {
     let (tx, rx) = mpsc::channel::<i32>(1);
     let mut rx = rx.wait();
 
@@ -38,16 +39,39 @@ fn mpsc_rx_end() {
 
     lazy(|| {
         assert_eq!(rx.poll().unwrap(), Async::Ready(None));
         Ok(()) as Result<(), ()>
     }).wait().unwrap();
 }
 
 #[test]
+fn mpsc_tx_clone_weak_rc() {
+    let (tx, mut rx) = mpsc::channel::<i32>(1); // rc = 1
+
+    let tx_clone = tx.clone(); // rc = 2
+    lazy(|| {
+        assert_eq!(rx.poll().unwrap(), Async::NotReady);
+        Ok(()) as Result<(), ()>
+    }).wait().unwrap();
+
+    drop(tx); // rc = 1
+    lazy(|| {
+        assert_eq!(rx.poll().unwrap(), Async::NotReady);
+        Ok(()) as Result<(), ()>
+    }).wait().unwrap();
+
+    drop(tx_clone); // rc = 0
+    lazy(|| {
+        assert_eq!(rx.poll().unwrap(), Async::Ready(None));
+        Ok(()) as Result<(), ()>
+    }).wait().unwrap();
+}
+
+#[test]
 fn mpsc_tx_notready() {
     let (tx, _rx) = mpsc::channel::<i32>(1);
     let tx = tx.send(1).wait().unwrap();
     lazy(move || {
         assert!(tx.send(2).poll().unwrap().is_not_ready());
         Ok(()) as Result<(), ()>
     }).wait().unwrap();
 }
@@ -60,49 +84,120 @@ fn mpsc_tx_err() {
         Ok(()) as Result<(), ()>
     }).wait().unwrap();
 }
 
 #[test]
 fn mpsc_backpressure() {
     let (tx, rx) = mpsc::channel::<i32>(1);
     lazy(move || {
-        iter(vec![1, 2, 3].into_iter().map(Ok))
+        iter_ok(vec![1, 2, 3])
             .forward(tx)
             .map_err(|e: SendError<i32>| panic!("{}", e))
             .join(rx.take(3).collect().map(|xs| {
-                assert!(xs == [1, 2, 3]);
+                assert_eq!(xs, [1, 2, 3]);
             }))
     }).wait().unwrap();
 }
 
 #[test]
 fn mpsc_unbounded() {
     let (tx, rx) = mpsc::unbounded::<i32>();
     lazy(move || {
-        iter(vec![1, 2, 3].into_iter().map(Ok))
+        iter_ok(vec![1, 2, 3])
             .forward(tx)
             .map_err(|e: SendError<i32>| panic!("{}", e))
             .join(rx.take(3).collect().map(|xs| {
-                assert!(xs == [1, 2, 3]);
+                assert_eq!(xs, [1, 2, 3]);
             }))
     }).wait().unwrap();
 }
 
 #[test]
 fn mpsc_recv_unpark() {
-    let mut core = Core::new();
+    let core = Core::new();
     let (tx, rx) = mpsc::channel::<i32>(1);
     let tx2 = tx.clone();
-    core.spawn(rx.collect().map(|xs| assert!(xs == [1, 2])));
+    core.spawn(rx.collect().map(|xs| assert_eq!(xs, [1, 2])));
     core.spawn(lazy(move || tx.send(1).map(|_| ()).map_err(|e| panic!("{}", e))));
     core.run(lazy(move || tx2.send(2))).unwrap();
 }
 
 #[test]
 fn mpsc_send_unpark() {
-    let mut core = Core::new();
+    let core = Core::new();
     let (tx, rx) = mpsc::channel::<i32>(1);
-    core.spawn(iter(vec![1, 2].into_iter().map(Ok)).forward(tx)
-               .then(|x: Result<_, SendError<i32>>| { assert!(x.is_err()); Ok(()) }));
+    let (donetx, donerx) = oneshot::channel();
+    core.spawn(iter_ok(vec![1, 2]).forward(tx)
+        .then(|x: Result<_, SendError<i32>>| {
+            assert!(x.is_err());
+            donetx.send(()).unwrap();
+            Ok(())
+        }));
     core.spawn(lazy(move || { let _ = rx; Ok(()) }));
-    core.wait();
+    core.run(donerx).unwrap();
+}
+
+#[test]
+fn spawn_sends_items() {
+    let core = Core::new();
+    let stream = unfold(0, |i| Some(Ok::<_,u8>((i, i + 1))));
+    let rx = mpsc::spawn(stream, &core, 1);
+    assert_eq!(core.run(rx.take(4).collect()).unwrap(),
+               [0, 1, 2, 3]);
 }
+
+#[test]
+fn spawn_kill_dead_stream() {
+    use std::thread;
+    use std::time::Duration;
+    use futures::future::Either;
+
+    // a stream which never returns anything (maybe a remote end isn't
+    // responding), but dropping it leads to observable side effects
+    // (like closing connections, releasing limited resources, ...)
+    #[derive(Debug)]
+    struct Dead {
+        // when dropped you should get Err(oneshot::Canceled) on the
+        // receiving end
+        done: oneshot::Sender<()>,
+    }
+    impl Stream for Dead {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+            Ok(Async::NotReady)
+        }
+    }
+
+    // need to implement a timeout for the test, as it would hang
+    // forever right now
+    let (timeout_tx, timeout_rx) = futures::sync::oneshot::channel();
+    thread::spawn(move || {
+        thread::sleep(Duration::from_millis(1000));
+        let _ = timeout_tx.send(());
+    });
+
+    let core = Core::new();
+    let (done_tx, done_rx) = oneshot::channel();
+    let stream = Dead{done: done_tx};
+    let rx = mpsc::spawn(stream, &core, 1);
+    let res = core.run(
+        Ok::<_, ()>(())
+        .into_future()
+        .then(move |_| {
+            // now drop the spawned stream: maybe some timeout exceeded,
+            // or some connection on this end was closed by the remote
+            // end.
+            drop(rx);
+            // and wait for the spawned stream to release its resources
+            done_rx
+        })
+        .select2(timeout_rx)
+    );
+    match res {
+        Err(Either::A((oneshot::Canceled, _))) => (),
+        _ => {
+            panic!("dead stream wasn't canceled");
+        },
+    }
+}