From 2bfc571ce5e4a26aedf5a823b4e7b1499a54c2ed Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Wed, 1 Jun 2022 06:44:07 +0000 Subject: [PATCH] Bug 1710421 - [webdriver] Update warp and hyper dependencies. r=mjf,webdriver-reviewers At the same time, update mdns_service to socket2 0.4 to avoid a duplicate. Differential Revision: https://phabricator.services.mozilla.com/D147479 --- Cargo.lock | 205 +- Cargo.toml | 3 + build/rust/tokio-util/Cargo.toml | 23 + build/rust/tokio-util/lib.rs | 5 + .../webrtc/transport/mdns_service/Cargo.toml | 2 +- .../webrtc/transport/mdns_service/src/lib.rs | 8 +- .../mozbuild/mozbuild/vendor/vendor_rust.py | 5 +- testing/geckodriver/Cargo.toml | 2 +- testing/webdriver/Cargo.toml | 9 +- testing/webdriver/src/server.rs | 12 +- .../rust/bytes-0.5.6/.cargo-checksum.json | 1 - third_party/rust/bytes-0.5.6/CHANGELOG.md | 166 - third_party/rust/bytes-0.5.6/Cargo.toml | 37 - third_party/rust/bytes-0.5.6/README.md | 47 - third_party/rust/bytes-0.5.6/benches/buf.rs | 187 - third_party/rust/bytes-0.5.6/benches/bytes.rs | 119 - .../rust/bytes-0.5.6/benches/bytes_mut.rs | 266 -- .../rust/bytes-0.5.6/ci/test-stable.sh | 27 - third_party/rust/bytes-0.5.6/ci/tsan.sh | 13 - .../rust/bytes-0.5.6/src/buf/buf_impl.rs | 1007 ------ .../rust/bytes-0.5.6/src/buf/buf_mut.rs | 1100 ------ .../rust/bytes-0.5.6/src/buf/ext/chain.rs | 233 -- .../rust/bytes-0.5.6/src/buf/ext/limit.rs | 74 - .../rust/bytes-0.5.6/src/buf/ext/mod.rs | 186 - .../rust/bytes-0.5.6/src/buf/ext/reader.rs | 81 - .../rust/bytes-0.5.6/src/buf/ext/take.rs | 147 - .../rust/bytes-0.5.6/src/buf/ext/writer.rs | 88 - third_party/rust/bytes-0.5.6/src/buf/iter.rs | 133 - third_party/rust/bytes-0.5.6/src/buf/mod.rs | 30 - .../rust/bytes-0.5.6/src/buf/vec_deque.rs | 22 - third_party/rust/bytes-0.5.6/src/bytes.rs | 1108 ------ third_party/rust/bytes-0.5.6/src/bytes_mut.rs | 1581 --------- third_party/rust/bytes-0.5.6/src/fmt/debug.rs | 49 - third_party/rust/bytes-0.5.6/src/fmt/hex.rs | 37 - third_party/rust/bytes-0.5.6/src/fmt/mod.rs | 5 - third_party/rust/bytes-0.5.6/src/lib.rs | 117 - third_party/rust/bytes-0.5.6/src/loom.rs | 30 - third_party/rust/bytes-0.5.6/src/serde.rs | 89 - .../rust/bytes-0.5.6/tests/test_buf.rs | 103 - .../rust/bytes-0.5.6/tests/test_buf_mut.rs | 120 - .../rust/bytes-0.5.6/tests/test_bytes.rs | 962 ------ .../bytes-0.5.6/tests/test_bytes_odd_alloc.rs | 67 - .../bytes-0.5.6/tests/test_bytes_vec_alloc.rs | 79 - .../rust/bytes-0.5.6/tests/test_chain.rs | 135 - .../rust/bytes-0.5.6/tests/test_debug.rs | 35 - .../rust/bytes-0.5.6/tests/test_iter.rs | 21 - .../rust/bytes-0.5.6/tests/test_reader.rs | 29 - .../rust/bytes-0.5.6/tests/test_serde.rs | 20 - .../rust/bytes-0.5.6/tests/test_take.rs | 12 - .../rust/form_urlencoded/.cargo-checksum.json | 1 + .../Cargo.toml | 26 +- .../rust/form_urlencoded/LICENSE-APACHE | 201 ++ .../LICENSE-MIT | 2 + third_party/rust/form_urlencoded/src/lib.rs | 420 +++ third_party/rust/h2/.cargo-checksum.json | 2 +- third_party/rust/h2/CHANGELOG.md | 87 + third_party/rust/h2/Cargo.lock | 711 ++-- third_party/rust/h2/Cargo.toml | 90 +- third_party/rust/h2/README.md | 17 +- third_party/rust/h2/examples/akamai.rs | 24 +- third_party/rust/h2/examples/server.rs | 47 +- third_party/rust/h2/src/client.rs | 203 +- third_party/rust/h2/src/codec/error.rs | 55 +- third_party/rust/h2/src/codec/framed_read.rs | 501 +-- third_party/rust/h2/src/codec/framed_write.rs | 261 +- third_party/rust/h2/src/codec/mod.rs | 5 +- third_party/rust/h2/src/error.rs | 101 +- third_party/rust/h2/src/ext.rs | 55 + third_party/rust/h2/src/frame/data.rs | 4 +- third_party/rust/h2/src/frame/go_away.rs | 5 +- third_party/rust/h2/src/frame/head.rs | 2 +- third_party/rust/h2/src/frame/headers.rs | 285 +- third_party/rust/h2/src/frame/mod.rs | 1 - third_party/rust/h2/src/frame/ping.rs | 2 +- third_party/rust/h2/src/frame/reason.rs | 2 +- third_party/rust/h2/src/frame/reset.rs | 4 +- third_party/rust/h2/src/frame/settings.rs | 37 +- .../rust/h2/src/frame/window_update.rs | 2 +- third_party/rust/h2/src/fuzz_bridge.rs | 28 + third_party/rust/h2/src/hpack/decoder.rs | 150 +- third_party/rust/h2/src/hpack/encoder.rs | 261 +- third_party/rust/h2/src/hpack/header.rs | 28 +- third_party/rust/h2/src/hpack/huffman/mod.rs | 33 +- third_party/rust/h2/src/hpack/mod.rs | 4 +- third_party/rust/h2/src/hpack/table.rs | 3 +- third_party/rust/h2/src/hpack/test/fixture.rs | 10 +- third_party/rust/h2/src/hpack/test/fuzz.rs | 266 +- third_party/rust/h2/src/lib.rs | 41 +- third_party/rust/h2/src/proto/connection.rs | 549 +-- third_party/rust/h2/src/proto/error.rs | 92 +- third_party/rust/h2/src/proto/go_away.rs | 16 +- third_party/rust/h2/src/proto/mod.rs | 5 +- third_party/rust/h2/src/proto/peer.rs | 14 +- third_party/rust/h2/src/proto/ping_pong.rs | 51 +- third_party/rust/h2/src/proto/settings.rs | 20 +- .../rust/h2/src/proto/streams/buffer.rs | 9 - .../rust/h2/src/proto/streams/counts.rs | 16 +- .../rust/h2/src/proto/streams/flow_control.rs | 38 +- third_party/rust/h2/src/proto/streams/mod.rs | 8 +- .../rust/h2/src/proto/streams/prioritize.rs | 323 +- third_party/rust/h2/src/proto/streams/recv.rs | 298 +- third_party/rust/h2/src/proto/streams/send.rs | 116 +- .../rust/h2/src/proto/streams/state.rs | 140 +- .../rust/h2/src/proto/streams/store.rs | 28 +- .../rust/h2/src/proto/streams/stream.rs | 32 +- .../rust/h2/src/proto/streams/streams.rs | 1217 ++++--- third_party/rust/h2/src/server.rs | 414 ++- third_party/rust/h2/src/share.rs | 19 +- .../rust/http-body/.cargo-checksum.json | 2 +- third_party/rust/http-body/CHANGELOG.md | 34 + third_party/rust/http-body/Cargo.toml | 36 +- .../http-body/src/combinators/box_body.rs | 134 + .../http-body/src/combinators/map_data.rs | 94 + .../rust/http-body/src/combinators/map_err.rs | 97 + .../rust/http-body/src/combinators/mod.rs | 11 + third_party/rust/http-body/src/empty.rs | 75 + third_party/rust/http-body/src/full.rs | 151 + third_party/rust/http-body/src/lib.rs | 88 +- third_party/rust/http-body/src/limited.rs | 299 ++ third_party/rust/http/.cargo-checksum.json | 2 +- third_party/rust/http/CHANGELOG.md | 10 + third_party/rust/http/Cargo.toml | 19 +- third_party/rust/http/README.md | 10 +- third_party/rust/http/benches/header_name.rs | 139 + third_party/rust/http/src/byte_str.rs | 2 +- third_party/rust/http/src/extensions.rs | 33 + third_party/rust/http/src/header/name.rs | 884 +---- third_party/rust/http/src/header/value.rs | 5 +- third_party/rust/http/src/lib.rs | 2 +- third_party/rust/http/src/uri/mod.rs | 94 +- third_party/rust/http/tests/status_code.rs | 49 + third_party/rust/hyper/.cargo-checksum.json | 2 +- third_party/rust/hyper/Cargo.lock | 750 ++-- third_party/rust/hyper/Cargo.toml | 198 +- third_party/rust/hyper/LICENSE | 3 +- third_party/rust/hyper/src/body/aggregate.rs | 6 + third_party/rust/hyper/src/body/body.rs | 196 +- third_party/rust/hyper/src/body/length.rs | 123 + third_party/rust/hyper/src/body/mod.rs | 4 + third_party/rust/hyper/src/body/to_bytes.rs | 39 +- third_party/rust/hyper/src/cfg.rs | 44 + third_party/rust/hyper/src/client/client.rs | 1462 ++++++++ third_party/rust/hyper/src/client/conn.rs | 488 ++- .../rust/hyper/src/client/connect/dns.rs | 153 +- .../rust/hyper/src/client/connect/http.rs | 497 ++- .../rust/hyper/src/client/connect/mod.rs | 97 +- third_party/rust/hyper/src/client/dispatch.rs | 74 +- third_party/rust/hyper/src/client/mod.rs | 1185 +------ third_party/rust/hyper/src/client/pool.rs | 87 +- third_party/rust/hyper/src/client/service.rs | 2 + third_party/rust/hyper/src/client/tests.rs | 2 +- third_party/rust/hyper/src/common/buf.rs | 89 +- third_party/rust/hyper/src/common/date.rs | 124 + third_party/rust/hyper/src/common/drain.rs | 87 +- third_party/rust/hyper/src/common/exec.rs | 56 +- .../rust/hyper/src/common/io/rewind.rs | 44 +- third_party/rust/hyper/src/common/lazy.rs | 57 +- third_party/rust/hyper/src/common/mod.rs | 20 +- third_party/rust/hyper/src/common/never.rs | 2 +- .../rust/hyper/src/common/sync_wrapper.rs | 13 +- third_party/rust/hyper/src/common/task.rs | 2 + third_party/rust/hyper/src/error.rs | 298 +- third_party/rust/hyper/src/ext.rs | 122 + third_party/rust/hyper/src/ffi/body.rs | 229 ++ third_party/rust/hyper/src/ffi/client.rs | 162 + third_party/rust/hyper/src/ffi/error.rs | 85 + third_party/rust/hyper/src/ffi/http_types.rs | 558 +++ third_party/rust/hyper/src/ffi/io.rs | 178 + third_party/rust/hyper/src/ffi/macros.rs | 53 + third_party/rust/hyper/src/ffi/mod.rs | 94 + third_party/rust/hyper/src/ffi/task.rs | 411 +++ third_party/rust/hyper/src/headers.rs | 115 +- third_party/rust/hyper/src/lib.rs | 74 +- third_party/rust/hyper/src/proto/h1/conn.rs | 188 +- third_party/rust/hyper/src/proto/h1/date.rs | 82 - third_party/rust/hyper/src/proto/h1/decode.rs | 115 +- .../rust/hyper/src/proto/h1/dispatch.rs | 434 +-- third_party/rust/hyper/src/proto/h1/encode.rs | 81 +- third_party/rust/hyper/src/proto/h1/io.rs | 440 ++- third_party/rust/hyper/src/proto/h1/mod.rs | 42 +- third_party/rust/hyper/src/proto/h1/role.rs | 1231 ++++++- third_party/rust/hyper/src/proto/h2/client.rs | 210 +- third_party/rust/hyper/src/proto/h2/mod.rs | 288 +- third_party/rust/hyper/src/proto/h2/ping.rs | 105 +- third_party/rust/hyper/src/proto/h2/server.rs | 198 +- third_party/rust/hyper/src/proto/mod.rs | 148 +- third_party/rust/hyper/src/rt.rs | 6 +- third_party/rust/hyper/src/server/accept.rs | 18 +- third_party/rust/hyper/src/server/conn.rs | 718 ++-- third_party/rust/hyper/src/server/mod.rs | 525 +-- third_party/rust/hyper/src/server/server.rs | 560 +++ third_party/rust/hyper/src/server/shutdown.rs | 62 +- third_party/rust/hyper/src/server/tcp.rs | 137 +- third_party/rust/hyper/src/service/make.rs | 1 + third_party/rust/hyper/src/service/mod.rs | 16 +- third_party/rust/hyper/src/service/oneshot.rs | 66 +- third_party/rust/hyper/src/upgrade.rs | 227 +- .../pin-project-internal/.cargo-checksum.json | 2 +- .../rust/pin-project-internal/Cargo.toml | 9 +- .../rust/pin-project-internal/LICENSE-APACHE | 25 - .../rust/pin-project-internal/build.rs | 34 - .../rust/pin-project-internal/src/lib.rs | 391 +-- .../src/pin_project/args.rs | 254 ++ .../src/pin_project/attribute.rs | 17 +- .../src/pin_project/derive.rs | 2025 ++++++----- .../src/pin_project/mod.rs | 6 +- .../pin-project-internal/src/pinned_drop.rs | 235 +- .../rust/pin-project-internal/src/project.rs | 353 -- .../rust/pin-project-internal/src/utils.rs | 97 +- .../.cargo-checksum.json | 1 - .../rust/pin-project-lite-0.1.12/CHANGELOG.md | 111 - .../rust/pin-project-lite-0.1.12/Cargo.toml | 36 - .../pin-project-lite-0.1.12/LICENSE-APACHE | 177 - .../rust/pin-project-lite-0.1.12/README.md | 109 - .../rust/pin-project-lite-0.1.12/src/lib.rs | 628 ---- .../tests/compiletest.rs | 8 - .../tests/include/basic.rs | 10 - .../pin-project-lite-0.1.12/tests/lint.rs | 131 - .../tests/proper_unpin.rs | 50 - .../pin-project-lite-0.1.12/tests/test.rs | 416 --- .../tests/ui/conflict-drop.rs | 15 - .../tests/ui/conflict-drop.stderr | 16 - .../tests/ui/conflict-unpin.rs | 40 - .../tests/ui/conflict-unpin.stderr | 50 - .../tests/ui/invalid-bounds.rs | 93 - .../tests/ui/invalid-bounds.stderr | 134 - .../tests/ui/invalid.rs | 25 - .../tests/ui/invalid.stderr | 17 - .../tests/ui/overlapping_lifetime_names.rs | 10 - .../ui/overlapping_lifetime_names.stderr | 75 - .../tests/ui/overlapping_unpin_struct.rs | 19 - .../tests/ui/overlapping_unpin_struct.stderr | 11 - .../tests/ui/packed.rs | 19 - .../tests/ui/packed.stderr | 107 - .../tests/ui/unpin_sneaky.rs | 12 - .../tests/ui/unpin_sneaky.stderr | 11 - .../tests/ui/unsupported.rs | 27 - .../tests/ui/unsupported.stderr | 29 - .../rust/pin-project/.cargo-checksum.json | 2 +- third_party/rust/pin-project/CHANGELOG.md | 182 +- third_party/rust/pin-project/Cargo.lock | 136 +- third_party/rust/pin-project/Cargo.toml | 21 +- third_party/rust/pin-project/LICENSE-APACHE | 25 - third_party/rust/pin-project/README.md | 74 +- .../rust/pin-project/examples/README.md | 36 +- .../examples/enum-default-expanded.rs | 57 +- .../examples/not_unpin-expanded.rs | 78 +- .../examples/pinned_drop-expanded.rs | 74 +- .../examples/project_replace-expanded.rs | 105 +- .../examples/struct-default-expanded.rs | 89 +- .../examples/unsafe_unpin-expanded.rs | 72 +- third_party/rust/pin-project/src/lib.rs | 115 +- third_party/rust/pin-project/tests/README.md | 44 + .../tests/auxiliary/mod.rs | 0 third_party/rust/pin-project/tests/cfg.rs | 207 +- .../rust/pin-project/tests/compiletest.rs | 16 +- .../rust/pin-project/tests/drop_order.rs | 54 +- .../tests/expand/default/enum.expanded.rs | 137 + .../pin-project/tests/expand/default/enum.rs | 14 + .../tests/expand/default/struct.expanded.rs | 101 + .../tests/expand/default/struct.rs | 10 + .../expand/default/tuple_struct.expanded.rs | 89 + .../tests/expand/default/tuple_struct.rs | 6 + .../tests/expand/multifields/enum.expanded.rs | 253 ++ .../tests/expand/multifields/enum.rs | 17 + .../expand/multifields/struct.expanded.rs | 159 + .../tests/expand/multifields/struct.rs | 13 + .../multifields/tuple_struct.expanded.rs | 135 + .../tests/expand/multifields/tuple_struct.rs | 6 + .../tests/expand/naming/enum-all.expanded.rs | 193 ++ .../tests/expand/naming/enum-all.rs | 14 + .../tests/expand/naming/enum-mut.expanded.rs | 96 + .../tests/expand/naming/enum-mut.rs | 14 + .../tests/expand/naming/enum-none.expanded.rs | 59 + .../tests/expand/naming/enum-none.rs | 14 + .../tests/expand/naming/enum-own.expanded.rs | 118 + .../tests/expand/naming/enum-own.rs | 14 + .../tests/expand/naming/enum-ref.expanded.rs | 99 + .../tests/expand/naming/enum-ref.rs | 14 + .../expand/naming/struct-all.expanded.rs | 150 + .../tests/expand/naming/struct-all.rs | 10 + .../expand/naming/struct-mut.expanded.rs | 108 + .../tests/expand/naming/struct-mut.rs | 10 + .../expand/naming/struct-none.expanded.rs | 101 + .../tests/expand/naming/struct-none.rs | 10 + .../expand/naming/struct-own.expanded.rs | 134 + .../tests/expand/naming/struct-own.rs | 10 + .../expand/naming/struct-ref.expanded.rs | 110 + .../tests/expand/naming/struct-ref.rs | 10 + .../naming/tuple_struct-all.expanded.rs | 129 + .../tests/expand/naming/tuple_struct-all.rs | 6 + .../naming/tuple_struct-mut.expanded.rs | 93 + .../tests/expand/naming/tuple_struct-mut.rs | 6 + .../naming/tuple_struct-none.expanded.rs | 89 + .../tests/expand/naming/tuple_struct-none.rs | 6 + .../naming/tuple_struct-own.expanded.rs | 119 + .../tests/expand/naming/tuple_struct-own.rs | 6 + .../naming/tuple_struct-ref.expanded.rs | 95 + .../tests/expand/naming/tuple_struct-ref.rs | 6 + .../tests/expand/not_unpin/enum.expanded.rs | 127 + .../tests/expand/not_unpin/enum.rs | 14 + .../tests/expand/not_unpin/struct.expanded.rs | 92 + .../tests/expand/not_unpin/struct.rs | 10 + .../expand/not_unpin/tuple_struct.expanded.rs | 80 + .../tests/expand/not_unpin/tuple_struct.rs | 6 + .../tests/expand/pinned_drop/enum.expanded.rs | 149 + .../tests/expand/pinned_drop/enum.rs | 23 + .../expand/pinned_drop/struct.expanded.rs | 113 + .../tests/expand/pinned_drop/struct.rs | 19 + .../pinned_drop/tuple_struct.expanded.rs | 101 + .../tests/expand/pinned_drop/tuple_struct.rs | 15 + .../expand/project_replace/enum.expanded.rs | 118 + .../tests/expand/project_replace/enum.rs | 14 + .../expand/project_replace/struct.expanded.rs | 125 + .../tests/expand/project_replace/struct.rs | 10 + .../project_replace/tuple_struct.expanded.rs | 110 + .../expand/project_replace/tuple_struct.rs | 6 + .../tests/expand/pub/enum.expanded.rs | 137 + .../rust/pin-project/tests/expand/pub/enum.rs | 14 + .../tests/expand/pub/struct.expanded.rs | 101 + .../pin-project/tests/expand/pub/struct.rs | 10 + .../tests/expand/pub/tuple_struct.expanded.rs | 89 + .../tests/expand/pub/tuple_struct.rs | 6 + .../expand/unsafe_unpin/enum.expanded.rs | 121 + .../tests/expand/unsafe_unpin/enum.rs | 16 + .../expand/unsafe_unpin/struct.expanded.rs | 86 + .../tests/expand/unsafe_unpin/struct.rs | 12 + .../unsafe_unpin/tuple_struct.expanded.rs | 74 + .../tests/expand/unsafe_unpin/tuple_struct.rs | 8 + .../rust/pin-project/tests/expandtest.rs | 43 + .../tests/include/basic-safe-part.rs | 67 +- third_party/rust/pin-project/tests/lint.rs | 1129 ++++++- .../rust/pin-project/tests/pin_project.rs | 467 +-- .../rust/pin-project/tests/pinned_drop.rs | 226 +- third_party/rust/pin-project/tests/project.rs | 301 -- .../pin-project/tests/project_if_attr.rs.in | 45 - .../rust/pin-project/tests/project_ref.rs | 176 - .../rust/pin-project/tests/project_replace.rs | 100 - .../rust/pin-project/tests/proper_unpin.rs | 153 + .../rust/pin-project/tests/repr_packed.rs | 10 +- third_party/rust/pin-project/tests/sized.rs | 13 - .../tests/ui/cfg/cfg_attr-resolve.rs | 6 +- .../tests/ui/cfg/cfg_attr-resolve.stderr | 6 +- .../tests/ui/cfg/cfg_attr-type-mismatch.rs | 15 +- .../ui/cfg/cfg_attr-type-mismatch.stderr | 14 +- .../tests/ui/cfg/cfg_attr-unpin.rs | 21 - .../tests/ui/cfg/cfg_attr-unpin.stderr | 43 - .../ui/cfg/packed_sneaky-span-issue-1.rs | 11 +- .../ui/cfg/packed_sneaky-span-issue-1.stderr | 8 +- .../ui/cfg/packed_sneaky-span-issue-2.rs | 11 +- .../ui/cfg/packed_sneaky-span-issue-2.stderr | 8 +- .../pin-project/tests/ui/cfg/packed_sneaky.rs | 4 +- .../tests/ui/cfg/packed_sneaky.stderr | 6 +- .../pin-project/tests/ui/cfg/proper_unpin.rs | 28 - .../tests/ui/cfg/proper_unpin.stderr | 25 - .../pin-project/tests/ui/cfg/unsupported.rs | 6 +- .../tests/ui/cfg/unsupported.stderr | 19 +- .../tests/ui/not_unpin/assert-not-unpin.rs | 40 - .../ui/not_unpin/assert-not-unpin.stderr | 146 - .../tests/ui/not_unpin/conflict-unpin.rs | 12 +- .../tests/ui/not_unpin/impl-unsafe-unpin.rs | 12 +- .../ui/not_unpin/impl-unsafe-unpin.stderr | 6 +- .../ui/pin_project/add-attr-to-struct.rs | 7 +- .../ui/pin_project/add-attr-to-struct.stderr | 8 +- .../tests/ui/pin_project/add-pinned-field.rs | 4 +- .../tests/ui/pin_project/conflict-drop.rs | 11 +- .../tests/ui/pin_project/conflict-drop.stderr | 10 +- .../tests/ui/pin_project/conflict-unpin.rs | 12 +- .../tests/ui/pin_project/impl-unsafe-unpin.rs | 12 +- .../ui/pin_project/impl-unsafe-unpin.stderr | 6 +- .../tests/ui/pin_project/import_unnamed.rs | 30 + .../ui/pin_project/import_unnamed.stderr | 29 + .../tests/ui/pin_project/invalid.rs | 45 +- .../tests/ui/pin_project/invalid.stderr | 214 +- .../pin_project/overlapping_unpin_struct.rs | 13 +- .../overlapping_unpin_struct.stderr | 28 +- .../tests/ui/pin_project/override-priv-mod.rs | 32 + .../ui/pin_project/override-priv-mod.stderr | 10 + .../tests/ui/pin_project/packed-enum.rs | 4 + .../tests/ui/pin_project/packed-enum.stderr | 52 +- .../tests/ui/pin_project/packed-name-value.rs | 19 +- .../ui/pin_project/packed-name-value.stderr | 36 +- .../tests/ui/pin_project/packed_sneaky-1.rs | 9 +- .../ui/pin_project/packed_sneaky-1.stderr | 24 +- .../tests/ui/pin_project/packed_sneaky-2.rs | 2 +- .../ui/pin_project/packed_sneaky-2.stderr | 2 +- .../tests/ui/pin_project/packed_sneaky-3.rs | 32 + .../ui/pin_project/packed_sneaky-3.stderr | 32 + .../ui/pin_project/private_in_public-enum.rs | 8 +- .../pin_project/private_in_public-enum.stderr | 12 +- .../ui/pin_project/project_replace_unsized.rs | 2 +- .../project_replace_unsized.stderr | 61 +- .../project_replace_unsized_fn_params.rs | 2 +- .../project_replace_unsized_fn_params.stderr | 61 +- .../tests/ui/pin_project/proper_unpin.rs | 38 - .../tests/ui/pin_project/proper_unpin.stderr | 87 - .../ui/pin_project/remove-attr-from-field.rs | 15 +- .../pin_project/remove-attr-from-field.stderr | 12 +- .../ui/pin_project/remove-attr-from-struct.rs | 17 +- .../remove-attr-from-struct.stderr | 82 +- .../tests/ui/pin_project/unpin_sneaky.rs | 6 +- .../tests/ui/pin_project/unpin_sneaky.stderr | 10 +- .../tests/ui/pin_project/visibility.rs | 87 +- .../tests/ui/pin_project/visibility.stderr | 70 +- .../tests/ui/pinned_drop/call-drop-inner.rs | 5 +- .../ui/pinned_drop/call-drop-inner.stderr | 11 +- .../ui/pinned_drop/conditional-drop-impl.rs | 7 +- .../pinned_drop/conditional-drop-impl.stderr | 22 +- .../ui/pinned_drop/forget-pinned-drop-impl.rs | 2 +- .../tests/ui/pinned_drop/invalid-self.rs | 6 +- .../tests/ui/pinned_drop/invalid-self.stderr | 4 +- .../tests/ui/pinned_drop/invalid.rs | 10 +- .../tests/ui/pinned_drop/invalid.stderr | 102 +- .../ui/pinned_drop/pinned-drop-no-attr-arg.rs | 10 +- .../pinned-drop-no-attr-arg.stderr | 10 +- .../pin-project/tests/ui/pinned_drop/self.rs | 16 +- .../tests/ui/pinned_drop/self.stderr | 38 +- .../tests/ui/pinned_drop/unsafe-call.rs | 11 +- .../tests/ui/pinned_drop/unsafe-call.stderr | 6 +- .../tests/ui/project/ambiguous-let.rs | 26 - .../tests/ui/project/ambiguous-let.stderr | 5 - .../tests/ui/project/deprecated.rs | 8 - .../tests/ui/project/deprecated.stderr | 23 - .../pin-project/tests/ui/project/invalid.rs | 192 -- .../tests/ui/project/invalid.stderr | 155 - .../tests/ui/project/type-mismatch.rs | 72 - .../tests/ui/project/type-mismatch.stderr | 17 - .../tests/ui/project/use-public.rs | 17 - .../tests/ui/project/use-public.stderr | 7 - .../rust/pin-project/tests/ui/project/use.rs | 19 - .../pin-project/tests/ui/project/use.stderr | 11 - .../tests/ui/unsafe_unpin/conflict-unpin.rs | 12 +- .../ui/unsafe_unpin/conflict-unpin.stderr | 6 +- .../not-implement-unsafe-unpin.rs | 14 - .../not-implement-unsafe-unpin.stderr | 19 - .../tests/ui/unsafe_unpin/proper_unpin.rs | 41 - .../tests/ui/unsafe_unpin/proper_unpin.stderr | 127 - .../tests/ui/unstable-features/README.md | 6 +- .../marker_trait_attr-feature-gate.rs | 7 +- .../marker_trait_attr-feature-gate.stderr | 6 +- .../ui/unstable-features/marker_trait_attr.rs | 7 +- .../marker_trait_attr.stderr | 6 +- .../overlapping_marker_traits-feature-gate.rs | 7 +- ...rlapping_marker_traits-feature-gate.stderr | 6 +- .../overlapping_marker_traits.rs | 11 +- .../overlapping_marker_traits.stderr | 6 +- .../run-pass/stmt_expr_attributes.rs | 63 - .../stmt_expr_attributes-feature-gate.rs | 57 - .../stmt_expr_attributes-feature-gate.stderr | 35 - .../trivial_bounds-feature-gate.rs | 5 +- .../trivial_bounds-feature-gate.stderr | 20 +- .../rust/pin-project/tests/unsafe_unpin.rs | 50 +- .../serde_urlencoded/.cargo-checksum.json | 2 +- third_party/rust/serde_urlencoded/Cargo.toml | 31 +- third_party/rust/serde_urlencoded/README.md | 5 +- third_party/rust/serde_urlencoded/bors.toml | 1 - .../rust/serde_urlencoded/rustfmt.toml | 2 +- third_party/rust/serde_urlencoded/src/de.rs | 9 +- third_party/rust/serde_urlencoded/src/lib.rs | 11 +- .../rust/serde_urlencoded/src/ser/key.rs | 6 +- .../rust/serde_urlencoded/src/ser/mod.rs | 73 +- .../rust/serde_urlencoded/src/ser/pair.rs | 32 +- .../rust/serde_urlencoded/src/ser/part.rs | 30 +- .../rust/serde_urlencoded/src/ser/value.rs | 18 +- .../tests/test_deserialize.rs | 9 +- .../serde_urlencoded/tests/test_serialize.rs | 27 +- third_party/rust/socket2/.cargo-checksum.json | 2 +- third_party/rust/socket2/Cargo.toml | 43 +- third_party/rust/socket2/README.md | 70 +- third_party/rust/socket2/SO_ACCEPTCONN.patch | 96 - third_party/rust/socket2/TODO | 20 - third_party/rust/socket2/check_targets.bash | 15 - third_party/rust/socket2/diff.patch | 134 - third_party/rust/socket2/src/lib.rs | 350 +- third_party/rust/socket2/src/sockaddr.rs | 408 +-- third_party/rust/socket2/src/socket.rs | 2319 ++++++++----- third_party/rust/socket2/src/sockref.rs | 147 + third_party/rust/socket2/src/sys/unix.rs | 3006 ++++++++++------- third_party/rust/socket2/src/sys/windows.rs | 1610 ++++----- third_party/rust/socket2/src/tests.rs | 62 - third_party/rust/socket2/src/utils.rs | 48 - .../rust/tokio-0.2.25/.cargo-checksum.json | 1 - third_party/rust/tokio-0.2.25/CHANGELOG.md | 697 ---- third_party/rust/tokio-0.2.25/Cargo.toml | 143 - third_party/rust/tokio-0.2.25/README.md | 174 - third_party/rust/tokio-0.2.25/src/coop.rs | 301 -- .../rust/tokio-0.2.25/src/fs/canonicalize.rs | 51 - third_party/rust/tokio-0.2.25/src/fs/copy.rs | 24 - .../rust/tokio-0.2.25/src/fs/create_dir.rs | 52 - .../tokio-0.2.25/src/fs/create_dir_all.rs | 53 - .../rust/tokio-0.2.25/src/fs/dir_builder.rs | 117 - third_party/rust/tokio-0.2.25/src/fs/file.rs | 790 ----- .../rust/tokio-0.2.25/src/fs/hard_link.rs | 46 - .../rust/tokio-0.2.25/src/fs/metadata.rs | 47 - third_party/rust/tokio-0.2.25/src/fs/mod.rs | 112 - .../rust/tokio-0.2.25/src/fs/open_options.rs | 403 --- .../rust/tokio-0.2.25/src/fs/os/mod.rs | 7 - .../src/fs/os/unix/dir_builder_ext.rs | 29 - .../rust/tokio-0.2.25/src/fs/os/unix/mod.rs | 10 - .../src/fs/os/unix/open_options_ext.rs | 79 - .../tokio-0.2.25/src/fs/os/unix/symlink.rs | 18 - .../tokio-0.2.25/src/fs/os/windows/mod.rs | 7 - .../src/fs/os/windows/symlink_dir.rs | 19 - .../src/fs/os/windows/symlink_file.rs | 19 - third_party/rust/tokio-0.2.25/src/fs/read.rs | 47 - .../rust/tokio-0.2.25/src/fs/read_dir.rs | 244 -- .../rust/tokio-0.2.25/src/fs/read_link.rs | 14 - .../tokio-0.2.25/src/fs/read_to_string.rs | 24 - .../rust/tokio-0.2.25/src/fs/remove_dir.rs | 12 - .../tokio-0.2.25/src/fs/remove_dir_all.rs | 14 - .../rust/tokio-0.2.25/src/fs/remove_file.rs | 18 - .../rust/tokio-0.2.25/src/fs/rename.rs | 17 - .../tokio-0.2.25/src/fs/set_permissions.rs | 15 - .../tokio-0.2.25/src/fs/symlink_metadata.rs | 15 - third_party/rust/tokio-0.2.25/src/fs/write.rs | 25 - .../tokio-0.2.25/src/future/maybe_done.rs | 76 - .../rust/tokio-0.2.25/src/future/mod.rs | 15 - .../rust/tokio-0.2.25/src/future/pending.rs | 44 - .../rust/tokio-0.2.25/src/future/poll_fn.rs | 38 - .../rust/tokio-0.2.25/src/future/ready.rs | 27 - .../rust/tokio-0.2.25/src/future/try_join.rs | 82 - .../tokio-0.2.25/src/io/async_buf_read.rs | 117 - .../rust/tokio-0.2.25/src/io/async_read.rs | 205 -- .../rust/tokio-0.2.25/src/io/async_seek.rs | 101 - .../rust/tokio-0.2.25/src/io/async_write.rs | 293 -- .../rust/tokio-0.2.25/src/io/blocking.rs | 279 -- .../rust/tokio-0.2.25/src/io/driver/mod.rs | 403 --- .../tokio-0.2.25/src/io/driver/platform.rs | 44 - .../src/io/driver/scheduled_io.rs | 141 - third_party/rust/tokio-0.2.25/src/io/mod.rs | 256 -- .../rust/tokio-0.2.25/src/io/poll_evented.rs | 461 --- .../rust/tokio-0.2.25/src/io/registration.rs | 340 -- third_party/rust/tokio-0.2.25/src/io/seek.rs | 56 - third_party/rust/tokio-0.2.25/src/io/split.rs | 195 -- .../rust/tokio-0.2.25/src/io/stderr.rs | 108 - third_party/rust/tokio-0.2.25/src/io/stdin.rs | 78 - .../rust/tokio-0.2.25/src/io/stdout.rs | 108 - .../src/io/util/async_buf_read_ext.rs | 258 -- .../src/io/util/async_read_ext.rs | 1118 ------ .../src/io/util/async_seek_ext.rs | 67 - .../src/io/util/async_write_ext.rs | 1006 ------ .../tokio-0.2.25/src/io/util/buf_reader.rs | 203 -- .../tokio-0.2.25/src/io/util/buf_stream.rs | 169 - .../tokio-0.2.25/src/io/util/buf_writer.rs | 192 -- .../rust/tokio-0.2.25/src/io/util/chain.rs | 150 - .../rust/tokio-0.2.25/src/io/util/copy.rs | 135 - .../rust/tokio-0.2.25/src/io/util/empty.rs | 87 - .../rust/tokio-0.2.25/src/io/util/flush.rs | 48 - .../rust/tokio-0.2.25/src/io/util/lines.rs | 133 - .../rust/tokio-0.2.25/src/io/util/mem.rs | 222 -- .../rust/tokio-0.2.25/src/io/util/mod.rs | 94 - .../rust/tokio-0.2.25/src/io/util/read.rs | 55 - .../rust/tokio-0.2.25/src/io/util/read_buf.rs | 38 - .../tokio-0.2.25/src/io/util/read_exact.rs | 77 - .../rust/tokio-0.2.25/src/io/util/read_int.rs | 133 - .../tokio-0.2.25/src/io/util/read_line.rs | 129 - .../tokio-0.2.25/src/io/util/read_to_end.rs | 170 - .../src/io/util/read_to_string.rs | 94 - .../tokio-0.2.25/src/io/util/read_until.rs | 89 - .../tokio-0.2.25/src/io/util/reader_stream.rs | 105 - .../rust/tokio-0.2.25/src/io/util/repeat.rs | 74 - .../rust/tokio-0.2.25/src/io/util/shutdown.rs | 48 - .../rust/tokio-0.2.25/src/io/util/sink.rs | 87 - .../rust/tokio-0.2.25/src/io/util/split.rs | 114 - .../tokio-0.2.25/src/io/util/stream_reader.rs | 184 - .../rust/tokio-0.2.25/src/io/util/take.rs | 131 - .../rust/tokio-0.2.25/src/io/util/write.rs | 37 - .../tokio-0.2.25/src/io/util/write_all.rs | 57 - .../tokio-0.2.25/src/io/util/write_buf.rs | 40 - .../tokio-0.2.25/src/io/util/write_int.rs | 132 - third_party/rust/tokio-0.2.25/src/lib.rs | 437 --- .../rust/tokio-0.2.25/src/loom/mocked.rs | 13 - third_party/rust/tokio-0.2.25/src/loom/mod.rs | 12 - .../tokio-0.2.25/src/loom/std/atomic_ptr.rs | 28 - .../tokio-0.2.25/src/loom/std/atomic_u16.rs | 44 - .../tokio-0.2.25/src/loom/std/atomic_u32.rs | 34 - .../tokio-0.2.25/src/loom/std/atomic_u64.rs | 60 - .../tokio-0.2.25/src/loom/std/atomic_u8.rs | 34 - .../tokio-0.2.25/src/loom/std/atomic_usize.rs | 56 - .../rust/tokio-0.2.25/src/loom/std/mod.rs | 86 - .../tokio-0.2.25/src/loom/std/parking_lot.rs | 97 - .../tokio-0.2.25/src/loom/std/unsafe_cell.rs | 16 - .../rust/tokio-0.2.25/src/macros/cfg.rs | 404 --- .../rust/tokio-0.2.25/src/macros/join.rs | 119 - .../rust/tokio-0.2.25/src/macros/loom.rs | 12 - .../rust/tokio-0.2.25/src/macros/mod.rs | 35 - .../rust/tokio-0.2.25/src/macros/pin.rs | 144 - .../rust/tokio-0.2.25/src/macros/ready.rs | 8 - .../tokio-0.2.25/src/macros/scoped_tls.rs | 79 - .../rust/tokio-0.2.25/src/macros/select.rs | 880 ----- .../rust/tokio-0.2.25/src/macros/support.rs | 8 - .../tokio-0.2.25/src/macros/thread_local.rs | 4 - .../rust/tokio-0.2.25/src/macros/try_join.rs | 132 - third_party/rust/tokio-0.2.25/src/net/addr.rs | 308 -- .../rust/tokio-0.2.25/src/net/lookup_host.rs | 38 - third_party/rust/tokio-0.2.25/src/net/mod.rs | 49 - .../rust/tokio-0.2.25/src/net/tcp/incoming.rs | 42 - .../rust/tokio-0.2.25/src/net/tcp/listener.rs | 466 --- .../rust/tokio-0.2.25/src/net/tcp/mod.rs | 16 - .../rust/tokio-0.2.25/src/net/tcp/split.rs | 186 - .../tokio-0.2.25/src/net/tcp/split_owned.rs | 272 -- .../rust/tokio-0.2.25/src/net/tcp/stream.rs | 939 ----- .../rust/tokio-0.2.25/src/net/udp/mod.rs | 7 - .../rust/tokio-0.2.25/src/net/udp/socket.rs | 454 --- .../rust/tokio-0.2.25/src/net/udp/split.rs | 148 - .../tokio-0.2.25/src/net/unix/datagram/mod.rs | 8 - .../src/net/unix/datagram/socket.rs | 350 -- .../src/net/unix/datagram/split.rs | 68 - .../src/net/unix/datagram/split_owned.rs | 148 - .../tokio-0.2.25/src/net/unix/incoming.rs | 42 - .../tokio-0.2.25/src/net/unix/listener.rs | 232 -- .../rust/tokio-0.2.25/src/net/unix/mod.rs | 21 - .../rust/tokio-0.2.25/src/net/unix/split.rs | 95 - .../tokio-0.2.25/src/net/unix/split_owned.rs | 187 - .../rust/tokio-0.2.25/src/net/unix/stream.rs | 257 -- .../rust/tokio-0.2.25/src/net/unix/ucred.rs | 151 - .../rust/tokio-0.2.25/src/park/either.rs | 72 - third_party/rust/tokio-0.2.25/src/park/mod.rs | 121 - .../rust/tokio-0.2.25/src/park/thread.rs | 329 -- third_party/rust/tokio-0.2.25/src/prelude.rs | 21 - .../rust/tokio-0.2.25/src/process/kill.rs | 13 - .../rust/tokio-0.2.25/src/process/mod.rs | 1125 ------ .../rust/tokio-0.2.25/src/process/unix/mod.rs | 227 -- .../tokio-0.2.25/src/process/unix/orphan.rs | 191 -- .../tokio-0.2.25/src/process/unix/reap.rs | 342 -- .../rust/tokio-0.2.25/src/process/windows.rs | 191 -- .../src/runtime/basic_scheduler.rs | 326 -- .../tokio-0.2.25/src/runtime/blocking/mod.rs | 43 - .../tokio-0.2.25/src/runtime/blocking/pool.rs | 329 -- .../src/runtime/blocking/schedule.rs | 24 - .../src/runtime/blocking/shutdown.rs | 71 - .../tokio-0.2.25/src/runtime/blocking/task.rs | 43 - .../rust/tokio-0.2.25/src/runtime/builder.rs | 522 --- .../rust/tokio-0.2.25/src/runtime/context.rs | 73 - .../rust/tokio-0.2.25/src/runtime/enter.rs | 225 -- .../rust/tokio-0.2.25/src/runtime/handle.rs | 371 -- .../rust/tokio-0.2.25/src/runtime/io.rs | 63 - .../rust/tokio-0.2.25/src/runtime/mod.rs | 580 ---- .../rust/tokio-0.2.25/src/runtime/park.rs | 257 -- .../rust/tokio-0.2.25/src/runtime/queue.rs | 630 ---- .../rust/tokio-0.2.25/src/runtime/shell.rs | 62 - .../rust/tokio-0.2.25/src/runtime/spawner.rs | 48 - .../tokio-0.2.25/src/runtime/task/core.rs | 289 -- .../tokio-0.2.25/src/runtime/task/error.rs | 163 - .../tokio-0.2.25/src/runtime/task/harness.rs | 371 -- .../tokio-0.2.25/src/runtime/task/join.rs | 156 - .../rust/tokio-0.2.25/src/runtime/task/mod.rs | 220 -- .../rust/tokio-0.2.25/src/runtime/task/raw.rs | 131 - .../tokio-0.2.25/src/runtime/task/stack.rs | 83 - .../tokio-0.2.25/src/runtime/task/state.rs | 446 --- .../tokio-0.2.25/src/runtime/task/waker.rs | 101 - .../src/runtime/tests/loom_blocking.rs | 31 - .../src/runtime/tests/loom_oneshot.rs | 49 - .../src/runtime/tests/loom_pool.rs | 380 --- .../src/runtime/tests/loom_queue.rs | 216 -- .../tokio-0.2.25/src/runtime/tests/mod.rs | 13 - .../tokio-0.2.25/src/runtime/tests/queue.rs | 202 -- .../tokio-0.2.25/src/runtime/tests/task.rs | 159 - .../src/runtime/thread_pool/atomic_cell.rs | 52 - .../src/runtime/thread_pool/idle.rs | 222 -- .../src/runtime/thread_pool/mod.rs | 121 - .../src/runtime/thread_pool/worker.rs | 804 ----- .../rust/tokio-0.2.25/src/runtime/time.rs | 59 - .../rust/tokio-0.2.25/src/signal/ctrl_c.rs | 53 - .../rust/tokio-0.2.25/src/signal/mod.rs | 60 - .../rust/tokio-0.2.25/src/signal/registry.rs | 321 -- .../rust/tokio-0.2.25/src/signal/unix.rs | 513 --- .../rust/tokio-0.2.25/src/signal/windows.rs | 297 -- .../rust/tokio-0.2.25/src/stream/all.rs | 45 - .../rust/tokio-0.2.25/src/stream/any.rs | 45 - .../rust/tokio-0.2.25/src/stream/collect.rs | 246 -- .../rust/tokio-0.2.25/src/stream/next.rs | 28 - .../rust/tokio-0.2.25/src/stream/timeout.rs | 65 - .../rust/tokio-0.2.25/src/stream/try_next.rs | 30 - .../rust/tokio-0.2.25/src/sync/barrier.rs | 136 - .../tokio-0.2.25/src/sync/batch_semaphore.rs | 561 --- .../rust/tokio-0.2.25/src/sync/broadcast.rs | 1237 ------- .../src/sync/cancellation_token.rs | 861 ----- third_party/rust/tokio-0.2.25/src/sync/mod.rs | 486 --- .../rust/tokio-0.2.25/src/sync/mpsc/block.rs | 387 --- .../tokio-0.2.25/src/sync/mpsc/bounded.rs | 479 --- .../rust/tokio-0.2.25/src/sync/mpsc/chan.rs | 543 --- .../rust/tokio-0.2.25/src/sync/mpsc/error.rs | 146 - .../rust/tokio-0.2.25/src/sync/mpsc/list.rs | 341 -- .../rust/tokio-0.2.25/src/sync/mpsc/mod.rs | 92 - .../tokio-0.2.25/src/sync/mpsc/unbounded.rs | 180 - .../rust/tokio-0.2.25/src/sync/mutex.rs | 453 --- .../rust/tokio-0.2.25/src/sync/notify.rs | 556 --- .../rust/tokio-0.2.25/src/sync/oneshot.rs | 795 ----- .../rust/tokio-0.2.25/src/sync/rwlock.rs | 559 --- .../rust/tokio-0.2.25/src/sync/semaphore.rs | 166 - .../tokio-0.2.25/src/sync/semaphore_ll.rs | 1225 ------- .../src/sync/task/atomic_waker.rs | 318 -- .../rust/tokio-0.2.25/src/sync/task/mod.rs | 4 - .../src/sync/tests/atomic_waker.rs | 34 - .../src/sync/tests/loom_atomic_waker.rs | 45 - .../src/sync/tests/loom_broadcast.rs | 180 - .../tokio-0.2.25/src/sync/tests/loom_list.rs | 48 - .../tokio-0.2.25/src/sync/tests/loom_mpsc.rs | 77 - .../src/sync/tests/loom_notify.rs | 90 - .../src/sync/tests/loom_oneshot.rs | 109 - .../src/sync/tests/loom_rwlock.rs | 78 - .../src/sync/tests/loom_semaphore_batch.rs | 215 -- .../src/sync/tests/loom_semaphore_ll.rs | 192 -- .../rust/tokio-0.2.25/src/sync/tests/mod.rs | 18 - .../src/sync/tests/semaphore_batch.rs | 250 -- .../src/sync/tests/semaphore_ll.rs | 470 --- .../rust/tokio-0.2.25/src/sync/watch.rs | 431 --- .../rust/tokio-0.2.25/src/task/blocking.rs | 132 - .../rust/tokio-0.2.25/src/task/local.rs | 589 ---- third_party/rust/tokio-0.2.25/src/task/mod.rs | 242 -- .../rust/tokio-0.2.25/src/task/spawn.rs | 135 - .../rust/tokio-0.2.25/src/task/task_local.rs | 242 -- .../rust/tokio-0.2.25/src/task/yield_now.rs | 38 - .../rust/tokio-0.2.25/src/time/clock.rs | 165 - .../rust/tokio-0.2.25/src/time/delay.rs | 118 - .../src/time/driver/atomic_stack.rs | 124 - .../tokio-0.2.25/src/time/driver/entry.rs | 358 -- .../tokio-0.2.25/src/time/driver/handle.rs | 39 - .../rust/tokio-0.2.25/src/time/driver/mod.rs | 415 --- .../src/time/driver/registration.rs | 56 - .../tokio-0.2.25/src/time/driver/stack.rs | 121 - .../tokio-0.2.25/src/time/driver/tests/mod.rs | 55 - .../rust/tokio-0.2.25/src/time/error.rs | 101 - .../rust/tokio-0.2.25/src/time/instant.rs | 199 -- .../rust/tokio-0.2.25/src/time/interval.rs | 175 - third_party/rust/tokio-0.2.25/src/time/mod.rs | 162 - .../rust/tokio-0.2.25/src/time/tests/mod.rs | 22 - .../tokio-0.2.25/src/time/tests/test_delay.rs | 449 --- .../rust/tokio-0.2.25/src/time/timeout.rs | 184 - third_party/rust/tokio-0.2.25/src/util/bit.rs | 85 - .../src/util/intrusive_double_linked_list.rs | 788 ----- .../rust/tokio-0.2.25/src/util/linked_list.rs | 585 ---- third_party/rust/tokio-0.2.25/src/util/mod.rs | 28 - third_party/rust/tokio-0.2.25/src/util/pad.rs | 52 - .../rust/tokio-0.2.25/src/util/rand.rs | 64 - .../rust/tokio-0.2.25/src/util/slab/addr.rs | 154 - .../rust/tokio-0.2.25/src/util/slab/entry.rs | 7 - .../tokio-0.2.25/src/util/slab/generation.rs | 32 - .../rust/tokio-0.2.25/src/util/slab/mod.rs | 107 - .../rust/tokio-0.2.25/src/util/slab/page.rs | 187 - .../rust/tokio-0.2.25/src/util/slab/shard.rs | 105 - .../rust/tokio-0.2.25/src/util/slab/slot.rs | 42 - .../rust/tokio-0.2.25/src/util/slab/stack.rs | 58 - .../src/util/slab/tests/loom_slab.rs | 327 -- .../src/util/slab/tests/loom_stack.rs | 88 - .../tokio-0.2.25/src/util/slab/tests/mod.rs | 2 - .../rust/tokio-0.2.25/src/util/trace.rs | 57 - .../rust/tokio-0.2.25/src/util/try_lock.rs | 80 - .../rust/tokio-0.2.25/src/util/wake.rs | 83 - .../rust/tokio-0.2.25/tests/_require_full.rs | 2 - .../tokio-0.2.25/tests/async_send_sync.rs | 264 -- .../rust/tokio-0.2.25/tests/buffered.rs | 51 - third_party/rust/tokio-0.2.25/tests/fs.rs | 20 - .../rust/tokio-0.2.25/tests/fs_copy.rs | 39 - third_party/rust/tokio-0.2.25/tests/fs_dir.rs | 119 - .../rust/tokio-0.2.25/tests/fs_file.rs | 87 - .../rust/tokio-0.2.25/tests/fs_file_mocked.rs | 777 ----- .../rust/tokio-0.2.25/tests/fs_link.rs | 70 - .../rust/tokio-0.2.25/tests/io_async_read.rs | 148 - .../rust/tokio-0.2.25/tests/io_chain.rs | 16 - .../rust/tokio-0.2.25/tests/io_copy.rs | 36 - .../rust/tokio-0.2.25/tests/io_driver.rs | 88 - .../rust/tokio-0.2.25/tests/io_driver_drop.rs | 53 - .../rust/tokio-0.2.25/tests/io_lines.rs | 35 - .../rust/tokio-0.2.25/tests/io_mem_stream.rs | 83 - .../rust/tokio-0.2.25/tests/io_read.rs | 60 - .../rust/tokio-0.2.25/tests/io_read_exact.rs | 15 - .../rust/tokio-0.2.25/tests/io_read_line.rs | 107 - .../rust/tokio-0.2.25/tests/io_read_to_end.rs | 15 - .../tokio-0.2.25/tests/io_read_to_string.rs | 63 - .../rust/tokio-0.2.25/tests/io_read_until.rs | 74 - .../rust/tokio-0.2.25/tests/io_split.rs | 78 - .../rust/tokio-0.2.25/tests/io_take.rs | 16 - .../rust/tokio-0.2.25/tests/io_write.rs | 58 - .../rust/tokio-0.2.25/tests/io_write_all.rs | 51 - .../rust/tokio-0.2.25/tests/io_write_int.rs | 37 - .../rust/tokio-0.2.25/tests/macros_join.rs | 72 - .../rust/tokio-0.2.25/tests/macros_pin.rs | 13 - .../rust/tokio-0.2.25/tests/macros_select.rs | 465 --- .../rust/tokio-0.2.25/tests/macros_test.rs | 19 - .../tokio-0.2.25/tests/macros_try_join.rs | 101 - .../tokio-0.2.25/tests/net_bind_resource.rs | 14 - .../tokio-0.2.25/tests/net_lookup_host.rs | 36 - third_party/rust/tokio-0.2.25/tests/no_rt.rs | 27 - .../tokio-0.2.25/tests/process_issue_2174.rs | 46 - .../tokio-0.2.25/tests/process_issue_42.rs | 36 - .../tests/process_kill_on_drop.rs | 42 - .../rust/tokio-0.2.25/tests/process_smoke.rs | 29 - .../rust/tokio-0.2.25/tests/rt_basic.rs | 137 - .../rust/tokio-0.2.25/tests/rt_common.rs | 1067 ------ .../rust/tokio-0.2.25/tests/rt_threaded.rs | 396 --- .../rust/tokio-0.2.25/tests/signal_ctrl_c.rs | 30 - .../tokio-0.2.25/tests/signal_drop_recv.rs | 22 - .../rust/tokio-0.2.25/tests/signal_drop_rt.rs | 45 - .../tokio-0.2.25/tests/signal_drop_signal.rs | 26 - .../tokio-0.2.25/tests/signal_multi_rt.rs | 55 - .../rust/tokio-0.2.25/tests/signal_no_rt.rs | 11 - .../tokio-0.2.25/tests/signal_notify_both.rs | 23 - .../rust/tokio-0.2.25/tests/signal_twice.rs | 22 - .../rust/tokio-0.2.25/tests/signal_usr1.rs | 23 - .../tokio-0.2.25/tests/support/mock_file.rs | 281 -- .../tokio-0.2.25/tests/support/mock_pool.rs | 66 - .../rust/tokio-0.2.25/tests/support/signal.rs | 7 - .../rust/tokio-0.2.25/tests/sync_barrier.rs | 96 - .../rust/tokio-0.2.25/tests/sync_broadcast.rs | 532 --- .../tests/sync_cancellation_token.rs | 220 -- .../rust/tokio-0.2.25/tests/sync_errors.rs | 27 - .../rust/tokio-0.2.25/tests/sync_mpsc.rs | 538 --- .../rust/tokio-0.2.25/tests/sync_mutex.rs | 163 - .../tokio-0.2.25/tests/sync_mutex_owned.rs | 121 - .../rust/tokio-0.2.25/tests/sync_notify.rs | 102 - .../rust/tokio-0.2.25/tests/sync_oneshot.rs | 234 -- .../rust/tokio-0.2.25/tests/sync_rwlock.rs | 237 -- .../rust/tokio-0.2.25/tests/sync_semaphore.rs | 81 - .../tests/sync_semaphore_owned.rs | 75 - .../rust/tokio-0.2.25/tests/sync_watch.rs | 231 -- .../rust/tokio-0.2.25/tests/task_blocking.rs | 245 -- .../rust/tokio-0.2.25/tests/task_local.rs | 31 - .../rust/tokio-0.2.25/tests/task_local_set.rs | 499 --- .../rust/tokio-0.2.25/tests/tcp_accept.rs | 101 - .../rust/tokio-0.2.25/tests/tcp_connect.rs | 229 -- .../rust/tokio-0.2.25/tests/tcp_echo.rs | 42 - .../rust/tokio-0.2.25/tests/tcp_into_split.rs | 131 - .../rust/tokio-0.2.25/tests/tcp_peek.rs | 29 - .../rust/tokio-0.2.25/tests/tcp_shutdown.rs | 29 - .../rust/tokio-0.2.25/tests/tcp_split.rs | 42 - .../rust/tokio-0.2.25/tests/test_clock.rs | 50 - .../rust/tokio-0.2.25/tests/time_delay.rs | 196 -- .../rust/tokio-0.2.25/tests/time_interval.rs | 66 - .../rust/tokio-0.2.25/tests/time_rt.rs | 93 - .../rust/tokio-0.2.25/tests/time_timeout.rs | 110 - third_party/rust/tokio-0.2.25/tests/udp.rs | 120 - .../rust/tokio-0.2.25/tests/uds_cred.rs | 30 - .../rust/tokio-0.2.25/tests/uds_datagram.rs | 133 - .../rust/tokio-0.2.25/tests/uds_split.rs | 43 - .../rust/tokio-0.2.25/tests/uds_stream.rs | 58 - .../rust/tokio-stream/.cargo-checksum.json | 1 + third_party/rust/tokio-stream/CHANGELOG.md | 89 + third_party/rust/tokio-stream/Cargo.toml | 61 + .../{bytes-0.5.6 => tokio-stream}/LICENSE | 2 +- .../src/stream => tokio-stream/src}/empty.rs | 4 +- .../src/stream => tokio-stream/src}/iter.rs | 21 +- third_party/rust/tokio-stream/src/lib.rs | 98 + third_party/rust/tokio-stream/src/macros.rs | 68 + .../src/stream => tokio-stream/src}/once.rs | 6 +- .../stream => tokio-stream/src}/pending.rs | 6 +- .../mod.rs => tokio-stream/src/stream_ext.rs} | 196 +- .../rust/tokio-stream/src/stream_ext/all.rs | 58 + .../rust/tokio-stream/src/stream_ext/any.rs | 58 + .../src/stream_ext}/chain.rs | 3 +- .../tokio-stream/src/stream_ext/collect.rs | 233 ++ .../src/stream_ext}/filter.rs | 2 +- .../src/stream_ext}/filter_map.rs | 2 +- .../src/stream_ext}/fold.rs | 8 +- .../src/stream_ext}/fuse.rs | 2 +- .../src/stream_ext}/map.rs | 2 +- .../src/stream_ext}/merge.rs | 3 +- .../rust/tokio-stream/src/stream_ext/next.rs | 37 + .../src/stream_ext}/skip.rs | 2 +- .../src/stream_ext}/skip_while.rs | 2 +- .../src/stream_ext}/take.rs | 2 +- .../src/stream_ext}/take_while.rs | 2 +- .../src/stream_ext}/throttle.rs | 65 +- .../tokio-stream/src/stream_ext/timeout.rs | 107 + .../tokio-stream/src/stream_ext/try_next.rs | 39 + .../stream => tokio-stream/src}/stream_map.rs | 236 +- third_party/rust/tokio-stream/src/wrappers.rs | 62 + .../tokio-stream/src/wrappers/broadcast.rs | 79 + .../tokio-stream/src/wrappers/interval.rs | 50 + .../rust/tokio-stream/src/wrappers/lines.rs | 60 + .../tokio-stream/src/wrappers/mpsc_bounded.rs | 65 + .../src/wrappers/mpsc_unbounded.rs | 59 + .../tokio-stream/src/wrappers/read_dir.rs | 47 + .../tokio-stream/src/wrappers/signal_unix.rs | 46 + .../src/wrappers/signal_windows.rs | 88 + .../rust/tokio-stream/src/wrappers/split.rs | 60 + .../tokio-stream/src/wrappers/tcp_listener.rs | 54 + .../src/wrappers/unix_listener.rs | 54 + .../rust/tokio-stream/src/wrappers/watch.rs | 102 + .../tokio-stream/tests/async_send_sync.rs | 107 + .../tests/stream_chain.rs | 19 +- .../tests/stream_collect.rs | 56 +- .../tests/stream_empty.rs | 2 +- .../tests/stream_fuse.rs | 2 +- .../tests/stream_iter.rs | 2 +- .../tests/stream_merge.rs | 19 +- .../tests/stream_once.rs | 2 +- .../tests/stream_pending.rs | 2 +- .../tests/stream_stream_map.rs | 46 +- .../tests/stream_timeout.rs | 16 +- .../rust/tokio-stream/tests/support/mpsc.rs | 15 + .../tests/time_throttle.rs | 8 +- third_party/rust/tokio-stream/tests/watch.rs | 29 + .../rust/tokio-util/.cargo-checksum.json | 2 +- third_party/rust/tokio-util/CHANGELOG.md | 231 +- third_party/rust/tokio-util/Cargo.toml | 87 +- third_party/rust/tokio-util/LICENSE | 2 +- third_party/rust/tokio-util/README.md | 2 +- third_party/rust/tokio-util/src/cfg.rs | 48 +- .../src/codec/any_delimiter_codec.rs | 263 ++ .../rust/tokio-util/src/codec/bytes_codec.rs | 12 +- .../rust/tokio-util/src/codec/decoder.rs | 23 +- .../rust/tokio-util/src/codec/framed.rs | 281 +- .../rust/tokio-util/src/codec/framed_impl.rs | 308 ++ .../rust/tokio-util/src/codec/framed_read.rs | 255 +- .../rust/tokio-util/src/codec/framed_write.rs | 284 +- .../tokio-util/src/codec/length_delimited.rs | 141 +- .../rust/tokio-util/src/codec/lines_codec.rs | 6 +- third_party/rust/tokio-util/src/codec/mod.rs | 262 +- third_party/rust/tokio-util/src/compat.rs | 95 +- third_party/rust/tokio-util/src/context.rs | 190 ++ third_party/rust/tokio-util/src/either.rs | 188 ++ third_party/rust/tokio-util/src/io/mod.rs | 24 + .../rust/tokio-util/src/io/read_buf.rs | 65 + .../rust/tokio-util/src/io/reader_stream.rs | 118 + .../rust/tokio-util/src/io/stream_reader.rs | 203 ++ .../rust/tokio-util/src/io/sync_bridge.rs | 103 + third_party/rust/tokio-util/src/lib.rs | 170 +- third_party/rust/tokio-util/src/loom.rs | 1 + third_party/rust/tokio-util/src/net/mod.rs | 97 + .../rust/tokio-util/src/net/unix/mod.rs | 18 + .../tokio-util/src/sync/cancellation_token.rs | 224 ++ .../src/sync/cancellation_token/guard.rs | 27 + .../src/sync/cancellation_token/tree_node.rs | 373 ++ third_party/rust/tokio-util/src/sync/mod.rs | 13 + third_party/rust/tokio-util/src/sync/mpsc.rs | 283 ++ .../tokio-util/src/sync/poll_semaphore.rs | 136 + .../rust/tokio-util/src/sync/reusable_box.rs | 148 + .../src/sync/tests/loom_cancellation_token.rs | 0 .../rust/tokio-util/src/sync/tests/mod.rs | 1 + third_party/rust/tokio-util/src/task/mod.rs | 4 + .../rust/tokio-util/src/task/spawn_pinned.rs | 307 ++ .../src/time/delay_queue.rs | 646 +++- third_party/rust/tokio-util/src/time/mod.rs | 47 + .../src/time/wheel/level.rs | 6 +- .../src/time/wheel/mod.rs | 68 +- .../src/time/wheel/stack.rs | 4 +- third_party/rust/tokio-util/src/udp/frame.rs | 150 +- third_party/rust/tokio-util/src/udp/mod.rs | 2 +- .../rust/tokio-util/tests/_require_full.rs | 2 + third_party/rust/tokio-util/tests/codecs.rs | 249 +- third_party/rust/tokio-util/tests/context.rs | 24 + third_party/rust/tokio-util/tests/framed.rs | 73 +- .../rust/tokio-util/tests/framed_read.rs | 90 +- .../rust/tokio-util/tests/framed_stream.rs | 38 + .../rust/tokio-util/tests/framed_write.rs | 39 + .../tests/io_reader_stream.rs | 21 +- .../tests/io_stream_reader.rs} | 10 +- .../rust/tokio-util/tests/io_sync_bridge.rs | 43 + .../rust/tokio-util/tests/length_delimited.rs | 33 +- third_party/rust/tokio-util/tests/mpsc.rs | 239 ++ .../rust/tokio-util/tests/poll_semaphore.rs | 36 + .../rust/tokio-util/tests/reusable_box.rs | 72 + .../rust/tokio-util/tests/spawn_pinned.rs | 193 ++ .../tests/sync_cancellation_token.rs | 400 +++ .../tests/time_delay_queue.rs | 377 ++- third_party/rust/tokio-util/tests/udp.rs | 59 +- .../tracing-attributes/.cargo-checksum.json | 1 + .../rust/tracing-attributes/CHANGELOG.md | 283 ++ .../rust/tracing-attributes/Cargo.toml | 81 + .../LICENSE | 0 third_party/rust/tracing-attributes/README.md | 91 + .../rust/tracing-attributes/src/attr.rs | 413 +++ .../rust/tracing-attributes/src/expand.rs | 756 +++++ .../rust/tracing-attributes/src/lib.rs | 656 ++++ .../rust/tracing-attributes/tests/async_fn.rs | 449 +++ .../tracing-attributes/tests/destructuring.rs | 213 ++ .../rust/tracing-attributes/tests/err.rs | 200 ++ .../rust/tracing-attributes/tests/fields.rs | 147 + .../tracing-attributes/tests/follows_from.rs | 99 + .../tracing-attributes/tests/instrument.rs | 243 ++ .../rust/tracing-attributes/tests/levels.rs | 96 + .../rust/tracing-attributes/tests/names.rs | 63 + .../rust/tracing-attributes/tests/parents.rs | 102 + .../rust/tracing-attributes/tests/ret.rs | 221 ++ .../rust/tracing-attributes/tests/targets.rs | 97 + .../rust/tracing-core/.cargo-checksum.json | 1 + third_party/rust/tracing-core/CHANGELOG.md | 397 +++ third_party/rust/tracing-core/Cargo.toml | 41 + third_party/rust/tracing-core/LICENSE | 25 + third_party/rust/tracing-core/README.md | 121 + third_party/rust/tracing-core/src/callsite.rs | 173 + .../rust/tracing-core/src/dispatcher.rs | 905 +++++ third_party/rust/tracing-core/src/event.rs | 128 + third_party/rust/tracing-core/src/field.rs | 1190 +++++++ .../rust/tracing-core/src/lazy_static/LICENSE | 26 + .../tracing-core/src/lazy_static/core_lazy.rs | 30 + .../rust/tracing-core/src/lazy_static/mod.rs | 89 + third_party/rust/tracing-core/src/lib.rs | 302 ++ third_party/rust/tracing-core/src/metadata.rs | 1062 ++++++ third_party/rust/tracing-core/src/parent.rs | 11 + third_party/rust/tracing-core/src/span.rs | 334 ++ .../src/spin}/LICENSE | 12 +- third_party/rust/tracing-core/src/spin/mod.rs | 7 + .../rust/tracing-core/src/spin/mutex.rs | 118 + .../rust/tracing-core/src/spin/once.rs | 158 + third_party/rust/tracing-core/src/stdlib.rs | 78 + .../rust/tracing-core/src/subscriber.rs | 750 ++++ .../rust/tracing-core/tests/common/mod.rs | 30 + .../rust/tracing-core/tests/dispatch.rs | 56 + .../tracing-core/tests/global_dispatch.rs | 34 + third_party/rust/tracing-core/tests/macros.rs | 48 + third_party/rust/tracing/.cargo-checksum.json | 1 + third_party/rust/tracing/CHANGELOG.md | 708 ++++ third_party/rust/tracing/Cargo.toml | 114 + third_party/rust/tracing/LICENSE | 25 + third_party/rust/tracing/README.md | 463 +++ .../rust/tracing/benches/global_subscriber.rs | 136 + .../rust/tracing/benches/no_subscriber.rs | 101 + .../rust/tracing/benches/subscriber.rs | 189 ++ third_party/rust/tracing/src/dispatcher.rs | 149 + third_party/rust/tracing/src/field.rs | 170 + third_party/rust/tracing/src/instrument.rs | 370 ++ third_party/rust/tracing/src/level_filters.rs | 94 + third_party/rust/tracing/src/lib.rs | 1183 +++++++ third_party/rust/tracing/src/macros.rs | 2500 ++++++++++++++ third_party/rust/tracing/src/span.rs | 1617 +++++++++ third_party/rust/tracing/src/stdlib.rs | 55 + third_party/rust/tracing/src/subscriber.rs | 68 + third_party/rust/tracing/tests/enabled.rs | 54 + third_party/rust/tracing/tests/event.rs | 476 +++ .../filter_caching_is_lexically_scoped.rs | 65 + ...s_are_not_reevaluated_for_the_same_span.rs | 70 + ...re_reevaluated_for_different_call_sites.rs | 80 + .../rust/tracing/tests/filters_dont_leak.rs | 81 + third_party/rust/tracing/tests/future_send.rs | 22 + .../rust/tracing/tests/macro_imports.rs | 23 + third_party/rust/tracing/tests/macros.rs | 963 ++++++ .../tests/macros_incompatible_concat.rs | 24 + .../tracing/tests/macros_redefined_core.rs | 18 + .../rust/tracing/tests/max_level_hint.rs | 37 + .../tracing/tests/multiple_max_level_hints.rs | 69 + .../rust/tracing/tests/no_subscriber.rs | 15 + .../tracing/tests/scoped_clobbers_default.rs | 35 + third_party/rust/tracing/tests/span.rs | 825 +++++ third_party/rust/tracing/tests/subscriber.rs | 130 + .../rust/urlencoding/.cargo-checksum.json | 1 - third_party/rust/urlencoding/README.md | 37 - third_party/rust/urlencoding/benches/bench.rs | 65 - third_party/rust/urlencoding/src/dec.rs | 100 - third_party/rust/urlencoding/src/enc.rs | 137 - third_party/rust/urlencoding/src/lib.rs | 82 - third_party/rust/warp/.cargo-checksum.json | 2 +- third_party/rust/warp/CHANGELOG.md | 55 + third_party/rust/warp/Cargo.lock | 1232 +++---- third_party/rust/warp/Cargo.toml | 86 +- third_party/rust/warp/README.md | 12 +- third_party/rust/warp/examples/README.md | 27 + third_party/rust/warp/examples/autoreload.rs | 1 - .../rust/warp/examples/custom_methods.rs | 61 + third_party/rust/warp/examples/futures.rs | 2 +- .../rust/warp/examples/handlebars_template.rs | 2 +- .../rust/warp/examples/query_string.rs | 59 + third_party/rust/warp/examples/rejections.rs | 38 +- third_party/rust/warp/examples/routing.rs | 16 +- third_party/rust/warp/examples/sse.rs | 13 +- third_party/rust/warp/examples/sse_chat.rs | 15 +- third_party/rust/warp/examples/tls.rs | 1 + third_party/rust/warp/examples/todos.rs | 6 +- third_party/rust/warp/examples/tracing.rs | 59 + third_party/rust/warp/examples/unix_socket.rs | 16 +- third_party/rust/warp/examples/websockets.rs | 2 +- .../rust/warp/examples/websockets_chat.rs | 67 +- third_party/rust/warp/examples/wrapping.rs | 31 + third_party/rust/warp/src/error.rs | 20 +- third_party/rust/warp/src/filter/and.rs | 76 +- third_party/rust/warp/src/filter/and_then.rs | 65 +- third_party/rust/warp/src/filter/boxed.rs | 4 +- third_party/rust/warp/src/filter/map.rs | 4 +- third_party/rust/warp/src/filter/map_err.rs | 4 +- third_party/rust/warp/src/filter/mod.rs | 58 +- third_party/rust/warp/src/filter/or.rs | 16 +- third_party/rust/warp/src/filter/or_else.rs | 16 +- third_party/rust/warp/src/filter/recover.rs | 16 +- third_party/rust/warp/src/filter/service.rs | 8 +- third_party/rust/warp/src/filter/then.rs | 95 + third_party/rust/warp/src/filter/unify.rs | 13 +- .../rust/warp/src/filter/untuple_one.rs | 4 +- third_party/rust/warp/src/filter/wrap.rs | 40 + third_party/rust/warp/src/filters/addr.rs | 2 +- third_party/rust/warp/src/filters/any.rs | 2 +- third_party/rust/warp/src/filters/body.rs | 52 +- .../rust/warp/src/filters/compression.rs | 47 +- third_party/rust/warp/src/filters/cookie.rs | 29 +- third_party/rust/warp/src/filters/cors.rs | 27 +- third_party/rust/warp/src/filters/ext.rs | 2 +- third_party/rust/warp/src/filters/fs.rs | 89 +- third_party/rust/warp/src/filters/header.rs | 18 +- third_party/rust/warp/src/filters/host.rs | 96 + third_party/rust/warp/src/filters/log.rs | 18 +- third_party/rust/warp/src/filters/method.rs | 4 +- third_party/rust/warp/src/filters/mod.rs | 4 +- .../rust/warp/src/filters/multipart.rs | 16 +- third_party/rust/warp/src/filters/path.rs | 33 +- third_party/rust/warp/src/filters/query.rs | 58 +- third_party/rust/warp/src/filters/sse.rs | 496 +-- third_party/rust/warp/src/filters/trace.rs | 305 ++ third_party/rust/warp/src/filters/ws.rs | 128 +- third_party/rust/warp/src/generic.rs | 13 + third_party/rust/warp/src/lib.rs | 12 +- third_party/rust/warp/src/redirect.rs | 70 +- third_party/rust/warp/src/reject.rs | 91 +- third_party/rust/warp/src/reply.rs | 27 +- third_party/rust/warp/src/route.rs | 12 +- third_party/rust/warp/src/server.rs | 137 +- third_party/rust/warp/src/test.rs | 102 +- third_party/rust/warp/src/tls.rs | 116 +- third_party/rust/warp/src/transport.rs | 6 +- third_party/rust/warp/tests/body.rs | 4 +- third_party/rust/warp/tests/cookie.rs | 6 +- third_party/rust/warp/tests/cors.rs | 2 +- third_party/rust/warp/tests/fs.rs | 49 +- third_party/rust/warp/tests/host.rs | 87 + third_party/rust/warp/tests/multipart.rs | 2 +- third_party/rust/warp/tests/path.rs | 19 +- third_party/rust/warp/tests/redirect.rs | 44 + third_party/rust/warp/tests/tracing.rs | 48 + third_party/rust/warp/tests/ws.rs | 84 +- 1119 files changed, 64078 insertions(+), 93118 deletions(-) create mode 100644 build/rust/tokio-util/Cargo.toml create mode 100644 build/rust/tokio-util/lib.rs delete mode 100644 third_party/rust/bytes-0.5.6/.cargo-checksum.json delete mode 100644 third_party/rust/bytes-0.5.6/CHANGELOG.md delete mode 100644 third_party/rust/bytes-0.5.6/Cargo.toml delete mode 100644 third_party/rust/bytes-0.5.6/README.md delete mode 100644 third_party/rust/bytes-0.5.6/benches/buf.rs delete mode 100644 third_party/rust/bytes-0.5.6/benches/bytes.rs delete mode 100644 third_party/rust/bytes-0.5.6/benches/bytes_mut.rs delete mode 100644 third_party/rust/bytes-0.5.6/ci/test-stable.sh delete mode 100644 third_party/rust/bytes-0.5.6/ci/tsan.sh delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/buf_impl.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/buf_mut.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/ext/chain.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/ext/limit.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/ext/mod.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/ext/reader.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/ext/take.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/ext/writer.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/iter.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/mod.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/buf/vec_deque.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/bytes.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/bytes_mut.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/fmt/debug.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/fmt/hex.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/fmt/mod.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/lib.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/loom.rs delete mode 100644 third_party/rust/bytes-0.5.6/src/serde.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_buf.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_buf_mut.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_bytes.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_bytes_odd_alloc.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_bytes_vec_alloc.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_chain.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_debug.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_iter.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_reader.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_serde.rs delete mode 100644 third_party/rust/bytes-0.5.6/tests/test_take.rs create mode 100644 third_party/rust/form_urlencoded/.cargo-checksum.json rename third_party/rust/{urlencoding => form_urlencoded}/Cargo.toml (52%) create mode 100644 third_party/rust/form_urlencoded/LICENSE-APACHE rename third_party/rust/{pin-project-lite-0.1.12 => form_urlencoded}/LICENSE-MIT (95%) create mode 100644 third_party/rust/form_urlencoded/src/lib.rs create mode 100644 third_party/rust/h2/src/ext.rs create mode 100644 third_party/rust/h2/src/fuzz_bridge.rs create mode 100644 third_party/rust/http-body/src/combinators/box_body.rs create mode 100644 third_party/rust/http-body/src/combinators/map_data.rs create mode 100644 third_party/rust/http-body/src/combinators/map_err.rs create mode 100644 third_party/rust/http-body/src/combinators/mod.rs create mode 100644 third_party/rust/http-body/src/empty.rs create mode 100644 third_party/rust/http-body/src/full.rs create mode 100644 third_party/rust/http-body/src/limited.rs create mode 100644 third_party/rust/hyper/src/body/length.rs create mode 100644 third_party/rust/hyper/src/cfg.rs create mode 100644 third_party/rust/hyper/src/client/client.rs create mode 100644 third_party/rust/hyper/src/common/date.rs create mode 100644 third_party/rust/hyper/src/ext.rs create mode 100644 third_party/rust/hyper/src/ffi/body.rs create mode 100644 third_party/rust/hyper/src/ffi/client.rs create mode 100644 third_party/rust/hyper/src/ffi/error.rs create mode 100644 third_party/rust/hyper/src/ffi/http_types.rs create mode 100644 third_party/rust/hyper/src/ffi/io.rs create mode 100644 third_party/rust/hyper/src/ffi/macros.rs create mode 100644 third_party/rust/hyper/src/ffi/mod.rs create mode 100644 third_party/rust/hyper/src/ffi/task.rs delete mode 100644 third_party/rust/hyper/src/proto/h1/date.rs create mode 100644 third_party/rust/hyper/src/server/server.rs delete mode 100644 third_party/rust/pin-project-internal/build.rs create mode 100644 third_party/rust/pin-project-internal/src/pin_project/args.rs delete mode 100644 third_party/rust/pin-project-internal/src/project.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/.cargo-checksum.json delete mode 100644 third_party/rust/pin-project-lite-0.1.12/CHANGELOG.md delete mode 100644 third_party/rust/pin-project-lite-0.1.12/Cargo.toml delete mode 100644 third_party/rust/pin-project-lite-0.1.12/LICENSE-APACHE delete mode 100644 third_party/rust/pin-project-lite-0.1.12/README.md delete mode 100644 third_party/rust/pin-project-lite-0.1.12/src/lib.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/compiletest.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/include/basic.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/lint.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/proper_unpin.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/test.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-drop.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-drop.stderr delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-unpin.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-unpin.stderr delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid-bounds.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid-bounds.stderr delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid.stderr delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_lifetime_names.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_lifetime_names.stderr delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_unpin_struct.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_unpin_struct.stderr delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/packed.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/packed.stderr delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/unpin_sneaky.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/unpin_sneaky.stderr delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/unsupported.rs delete mode 100644 third_party/rust/pin-project-lite-0.1.12/tests/ui/unsupported.stderr create mode 100644 third_party/rust/pin-project/tests/README.md rename third_party/rust/{pin-project-lite-0.1.12 => pin-project}/tests/auxiliary/mod.rs (100%) create mode 100644 third_party/rust/pin-project/tests/expand/default/enum.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/default/enum.rs create mode 100644 third_party/rust/pin-project/tests/expand/default/struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/default/struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/default/tuple_struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/default/tuple_struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/multifields/enum.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/multifields/enum.rs create mode 100644 third_party/rust/pin-project/tests/expand/multifields/struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/multifields/struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/multifields/tuple_struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/multifields/tuple_struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-all.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-all.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-mut.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-mut.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-none.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-none.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-own.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-own.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-ref.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/enum-ref.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-all.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-all.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-mut.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-mut.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-none.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-none.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-own.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-own.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-ref.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/struct-ref.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-all.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-all.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-mut.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-mut.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-none.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-none.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-own.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-own.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-ref.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/naming/tuple_struct-ref.rs create mode 100644 third_party/rust/pin-project/tests/expand/not_unpin/enum.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/not_unpin/enum.rs create mode 100644 third_party/rust/pin-project/tests/expand/not_unpin/struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/not_unpin/struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/not_unpin/tuple_struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/not_unpin/tuple_struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/pinned_drop/enum.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/pinned_drop/enum.rs create mode 100644 third_party/rust/pin-project/tests/expand/pinned_drop/struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/pinned_drop/struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/pinned_drop/tuple_struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/pinned_drop/tuple_struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/project_replace/enum.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/project_replace/enum.rs create mode 100644 third_party/rust/pin-project/tests/expand/project_replace/struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/project_replace/struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/project_replace/tuple_struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/project_replace/tuple_struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/pub/enum.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/pub/enum.rs create mode 100644 third_party/rust/pin-project/tests/expand/pub/struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/pub/struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/pub/tuple_struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/pub/tuple_struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/unsafe_unpin/enum.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/unsafe_unpin/enum.rs create mode 100644 third_party/rust/pin-project/tests/expand/unsafe_unpin/struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/unsafe_unpin/struct.rs create mode 100644 third_party/rust/pin-project/tests/expand/unsafe_unpin/tuple_struct.expanded.rs create mode 100644 third_party/rust/pin-project/tests/expand/unsafe_unpin/tuple_struct.rs create mode 100644 third_party/rust/pin-project/tests/expandtest.rs delete mode 100644 third_party/rust/pin-project/tests/project.rs delete mode 100644 third_party/rust/pin-project/tests/project_if_attr.rs.in delete mode 100644 third_party/rust/pin-project/tests/project_ref.rs delete mode 100644 third_party/rust/pin-project/tests/project_replace.rs create mode 100644 third_party/rust/pin-project/tests/proper_unpin.rs delete mode 100644 third_party/rust/pin-project/tests/sized.rs delete mode 100644 third_party/rust/pin-project/tests/ui/cfg/cfg_attr-unpin.rs delete mode 100644 third_party/rust/pin-project/tests/ui/cfg/cfg_attr-unpin.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/cfg/proper_unpin.rs delete mode 100644 third_party/rust/pin-project/tests/ui/cfg/proper_unpin.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/not_unpin/assert-not-unpin.rs delete mode 100644 third_party/rust/pin-project/tests/ui/not_unpin/assert-not-unpin.stderr create mode 100644 third_party/rust/pin-project/tests/ui/pin_project/import_unnamed.rs create mode 100644 third_party/rust/pin-project/tests/ui/pin_project/import_unnamed.stderr create mode 100644 third_party/rust/pin-project/tests/ui/pin_project/override-priv-mod.rs create mode 100644 third_party/rust/pin-project/tests/ui/pin_project/override-priv-mod.stderr create mode 100644 third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-3.rs create mode 100644 third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-3.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/pin_project/proper_unpin.rs delete mode 100644 third_party/rust/pin-project/tests/ui/pin_project/proper_unpin.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/project/ambiguous-let.rs delete mode 100644 third_party/rust/pin-project/tests/ui/project/ambiguous-let.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/project/deprecated.rs delete mode 100644 third_party/rust/pin-project/tests/ui/project/deprecated.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/project/invalid.rs delete mode 100644 third_party/rust/pin-project/tests/ui/project/invalid.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/project/type-mismatch.rs delete mode 100644 third_party/rust/pin-project/tests/ui/project/type-mismatch.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/project/use-public.rs delete mode 100644 third_party/rust/pin-project/tests/ui/project/use-public.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/project/use.rs delete mode 100644 third_party/rust/pin-project/tests/ui/project/use.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/unsafe_unpin/not-implement-unsafe-unpin.rs delete mode 100644 third_party/rust/pin-project/tests/ui/unsafe_unpin/not-implement-unsafe-unpin.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/unsafe_unpin/proper_unpin.rs delete mode 100644 third_party/rust/pin-project/tests/ui/unsafe_unpin/proper_unpin.stderr delete mode 100644 third_party/rust/pin-project/tests/ui/unstable-features/run-pass/stmt_expr_attributes.rs delete mode 100644 third_party/rust/pin-project/tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs delete mode 100644 third_party/rust/pin-project/tests/ui/unstable-features/stmt_expr_attributes-feature-gate.stderr delete mode 100644 third_party/rust/serde_urlencoded/bors.toml delete mode 100644 third_party/rust/socket2/SO_ACCEPTCONN.patch delete mode 100644 third_party/rust/socket2/TODO delete mode 100755 third_party/rust/socket2/check_targets.bash delete mode 100644 third_party/rust/socket2/diff.patch create mode 100644 third_party/rust/socket2/src/sockref.rs delete mode 100644 third_party/rust/socket2/src/tests.rs delete mode 100644 third_party/rust/socket2/src/utils.rs delete mode 100644 third_party/rust/tokio-0.2.25/.cargo-checksum.json delete mode 100644 third_party/rust/tokio-0.2.25/CHANGELOG.md delete mode 100644 third_party/rust/tokio-0.2.25/Cargo.toml delete mode 100644 third_party/rust/tokio-0.2.25/README.md delete mode 100644 third_party/rust/tokio-0.2.25/src/coop.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/canonicalize.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/copy.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/create_dir.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/create_dir_all.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/dir_builder.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/file.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/hard_link.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/metadata.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/open_options.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/os/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/os/unix/dir_builder_ext.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/os/unix/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/os/unix/open_options_ext.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/os/unix/symlink.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/os/windows/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/os/windows/symlink_dir.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/os/windows/symlink_file.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/read.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/read_dir.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/read_link.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/read_to_string.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/remove_dir.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/remove_dir_all.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/remove_file.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/rename.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/set_permissions.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/symlink_metadata.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/fs/write.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/future/maybe_done.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/future/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/future/pending.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/future/poll_fn.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/future/ready.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/future/try_join.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/async_buf_read.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/async_read.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/async_seek.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/async_write.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/blocking.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/driver/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/driver/platform.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/driver/scheduled_io.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/poll_evented.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/registration.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/seek.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/split.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/stderr.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/stdin.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/stdout.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/async_buf_read_ext.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/async_read_ext.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/async_seek_ext.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/async_write_ext.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/buf_reader.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/buf_stream.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/buf_writer.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/chain.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/copy.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/empty.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/flush.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/lines.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/mem.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/read.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/read_buf.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/read_exact.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/read_int.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/read_line.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/read_to_end.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/read_to_string.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/read_until.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/reader_stream.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/repeat.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/shutdown.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/sink.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/split.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/stream_reader.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/take.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/write.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/write_all.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/write_buf.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/io/util/write_int.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/lib.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/mocked.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/atomic_ptr.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/atomic_u16.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/atomic_u32.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/atomic_u64.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/atomic_u8.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/atomic_usize.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/parking_lot.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/loom/std/unsafe_cell.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/cfg.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/join.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/loom.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/pin.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/ready.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/scoped_tls.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/select.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/support.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/thread_local.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/macros/try_join.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/addr.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/lookup_host.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/tcp/incoming.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/tcp/listener.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/tcp/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/tcp/split.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/tcp/split_owned.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/tcp/stream.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/udp/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/udp/socket.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/udp/split.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/datagram/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/datagram/socket.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/datagram/split.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/datagram/split_owned.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/incoming.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/listener.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/split.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/split_owned.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/stream.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/net/unix/ucred.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/park/either.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/park/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/park/thread.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/prelude.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/process/kill.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/process/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/process/unix/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/process/unix/orphan.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/process/unix/reap.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/process/windows.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/basic_scheduler.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/blocking/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/blocking/pool.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/blocking/schedule.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/blocking/shutdown.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/blocking/task.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/builder.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/context.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/enter.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/handle.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/io.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/park.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/queue.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/shell.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/spawner.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/core.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/error.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/harness.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/join.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/raw.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/stack.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/state.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/task/waker.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/tests/loom_blocking.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/tests/loom_oneshot.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/tests/loom_pool.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/tests/loom_queue.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/tests/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/tests/queue.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/tests/task.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/thread_pool/atomic_cell.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/thread_pool/idle.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/thread_pool/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/thread_pool/worker.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/runtime/time.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/signal/ctrl_c.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/signal/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/signal/registry.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/signal/unix.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/signal/windows.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/stream/all.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/stream/any.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/stream/collect.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/stream/next.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/stream/timeout.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/stream/try_next.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/barrier.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/batch_semaphore.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/broadcast.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/cancellation_token.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mpsc/block.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mpsc/bounded.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mpsc/chan.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mpsc/error.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mpsc/list.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mpsc/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mpsc/unbounded.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/mutex.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/notify.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/oneshot.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/rwlock.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/semaphore.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/semaphore_ll.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/task/atomic_waker.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/task/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/atomic_waker.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_atomic_waker.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_broadcast.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_list.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_mpsc.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_notify.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_oneshot.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_rwlock.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_semaphore_batch.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/loom_semaphore_ll.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/semaphore_batch.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/tests/semaphore_ll.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/sync/watch.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/task/blocking.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/task/local.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/task/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/task/spawn.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/task/task_local.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/task/yield_now.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/clock.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/delay.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/driver/atomic_stack.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/driver/entry.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/driver/handle.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/driver/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/driver/registration.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/driver/stack.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/driver/tests/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/error.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/instant.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/interval.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/tests/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/tests/test_delay.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/time/timeout.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/bit.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/intrusive_double_linked_list.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/linked_list.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/pad.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/rand.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/addr.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/entry.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/generation.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/page.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/shard.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/slot.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/stack.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/tests/loom_slab.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/tests/loom_stack.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/slab/tests/mod.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/trace.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/try_lock.rs delete mode 100644 third_party/rust/tokio-0.2.25/src/util/wake.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/_require_full.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/async_send_sync.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/buffered.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/fs.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/fs_copy.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/fs_dir.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/fs_file.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/fs_file_mocked.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/fs_link.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_async_read.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_chain.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_copy.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_driver.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_driver_drop.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_lines.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_mem_stream.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_read.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_read_exact.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_read_line.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_read_to_end.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_read_to_string.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_read_until.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_split.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_take.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_write.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_write_all.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/io_write_int.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/macros_join.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/macros_pin.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/macros_select.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/macros_test.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/macros_try_join.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/net_bind_resource.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/net_lookup_host.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/no_rt.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/process_issue_2174.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/process_issue_42.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/process_kill_on_drop.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/process_smoke.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/rt_basic.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/rt_common.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/rt_threaded.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_ctrl_c.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_drop_recv.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_drop_rt.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_drop_signal.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_multi_rt.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_no_rt.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_notify_both.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_twice.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/signal_usr1.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/support/mock_file.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/support/mock_pool.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/support/signal.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_barrier.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_broadcast.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_cancellation_token.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_errors.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_mpsc.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_mutex.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_mutex_owned.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_notify.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_oneshot.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_rwlock.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_semaphore.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_semaphore_owned.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/sync_watch.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/task_blocking.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/task_local.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/task_local_set.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/tcp_accept.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/tcp_connect.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/tcp_echo.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/tcp_into_split.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/tcp_peek.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/tcp_shutdown.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/tcp_split.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/test_clock.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/time_delay.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/time_interval.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/time_rt.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/time_timeout.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/udp.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/uds_cred.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/uds_datagram.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/uds_split.rs delete mode 100644 third_party/rust/tokio-0.2.25/tests/uds_stream.rs create mode 100644 third_party/rust/tokio-stream/.cargo-checksum.json create mode 100644 third_party/rust/tokio-stream/CHANGELOG.md create mode 100644 third_party/rust/tokio-stream/Cargo.toml rename third_party/rust/{bytes-0.5.6 => tokio-stream}/LICENSE (96%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src}/empty.rs (93%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src}/iter.rs (74%) create mode 100644 third_party/rust/tokio-stream/src/lib.rs create mode 100644 third_party/rust/tokio-stream/src/macros.rs rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src}/once.rs (88%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src}/pending.rs (89%) rename third_party/rust/{tokio-0.2.25/src/stream/mod.rs => tokio-stream/src/stream_ext.rs} (82%) create mode 100644 third_party/rust/tokio-stream/src/stream_ext/all.rs create mode 100644 third_party/rust/tokio-stream/src/stream_ext/any.rs rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/chain.rs (95%) create mode 100644 third_party/rust/tokio-stream/src/stream_ext/collect.rs rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/filter.rs (98%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/filter_map.rs (98%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/fold.rs (80%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/fuse.rs (97%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/map.rs (97%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/merge.rs (97%) create mode 100644 third_party/rust/tokio-stream/src/stream_ext/next.rs rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/skip.rs (98%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/skip_while.rs (98%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/take.rs (98%) rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src/stream_ext}/take_while.rs (98%) rename third_party/rust/{tokio-0.2.25/src/time => tokio-stream/src/stream_ext}/throttle.rs (50%) create mode 100644 third_party/rust/tokio-stream/src/stream_ext/timeout.rs create mode 100644 third_party/rust/tokio-stream/src/stream_ext/try_next.rs rename third_party/rust/{tokio-0.2.25/src/stream => tokio-stream/src}/stream_map.rs (67%) create mode 100644 third_party/rust/tokio-stream/src/wrappers.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/broadcast.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/interval.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/lines.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/mpsc_bounded.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/mpsc_unbounded.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/read_dir.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/signal_unix.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/signal_windows.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/split.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/tcp_listener.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/unix_listener.rs create mode 100644 third_party/rust/tokio-stream/src/wrappers/watch.rs create mode 100644 third_party/rust/tokio-stream/tests/async_send_sync.rs rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_chain.rs (85%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_collect.rs (73%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_empty.rs (79%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_fuse.rs (96%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_iter.rs (91%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_merge.rs (81%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_once.rs (82%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_pending.rs (85%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_stream_map.rs (90%) rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/stream_timeout.rs (88%) create mode 100644 third_party/rust/tokio-stream/tests/support/mpsc.rs rename third_party/rust/{tokio-0.2.25 => tokio-stream}/tests/time_throttle.rs (74%) create mode 100644 third_party/rust/tokio-stream/tests/watch.rs create mode 100644 third_party/rust/tokio-util/src/codec/any_delimiter_codec.rs create mode 100644 third_party/rust/tokio-util/src/codec/framed_impl.rs create mode 100644 third_party/rust/tokio-util/src/context.rs create mode 100644 third_party/rust/tokio-util/src/either.rs create mode 100644 third_party/rust/tokio-util/src/io/mod.rs create mode 100644 third_party/rust/tokio-util/src/io/read_buf.rs create mode 100644 third_party/rust/tokio-util/src/io/reader_stream.rs create mode 100644 third_party/rust/tokio-util/src/io/stream_reader.rs create mode 100644 third_party/rust/tokio-util/src/io/sync_bridge.rs create mode 100644 third_party/rust/tokio-util/src/loom.rs create mode 100644 third_party/rust/tokio-util/src/net/mod.rs create mode 100644 third_party/rust/tokio-util/src/net/unix/mod.rs create mode 100644 third_party/rust/tokio-util/src/sync/cancellation_token.rs create mode 100644 third_party/rust/tokio-util/src/sync/cancellation_token/guard.rs create mode 100644 third_party/rust/tokio-util/src/sync/cancellation_token/tree_node.rs create mode 100644 third_party/rust/tokio-util/src/sync/mod.rs create mode 100644 third_party/rust/tokio-util/src/sync/mpsc.rs create mode 100644 third_party/rust/tokio-util/src/sync/poll_semaphore.rs create mode 100644 third_party/rust/tokio-util/src/sync/reusable_box.rs rename third_party/rust/{tokio-0.2.25 => tokio-util}/src/sync/tests/loom_cancellation_token.rs (100%) create mode 100644 third_party/rust/tokio-util/src/sync/tests/mod.rs create mode 100644 third_party/rust/tokio-util/src/task/mod.rs create mode 100644 third_party/rust/tokio-util/src/task/spawn_pinned.rs rename third_party/rust/{tokio-0.2.25 => tokio-util}/src/time/delay_queue.rs (51%) create mode 100644 third_party/rust/tokio-util/src/time/mod.rs rename third_party/rust/{tokio-0.2.25 => tokio-util}/src/time/wheel/level.rs (99%) rename third_party/rust/{tokio-0.2.25 => tokio-util}/src/time/wheel/mod.rs (84%) rename third_party/rust/{tokio-0.2.25 => tokio-util}/src/time/wheel/stack.rs (91%) create mode 100644 third_party/rust/tokio-util/tests/_require_full.rs create mode 100644 third_party/rust/tokio-util/tests/context.rs create mode 100644 third_party/rust/tokio-util/tests/framed_stream.rs rename third_party/rust/{tokio-0.2.25 => tokio-util}/tests/io_reader_stream.rs (74%) rename third_party/rust/{tokio-0.2.25/tests/stream_reader.rs => tokio-util/tests/io_stream_reader.rs} (80%) create mode 100644 third_party/rust/tokio-util/tests/io_sync_bridge.rs create mode 100644 third_party/rust/tokio-util/tests/mpsc.rs create mode 100644 third_party/rust/tokio-util/tests/poll_semaphore.rs create mode 100644 third_party/rust/tokio-util/tests/reusable_box.rs create mode 100644 third_party/rust/tokio-util/tests/spawn_pinned.rs create mode 100644 third_party/rust/tokio-util/tests/sync_cancellation_token.rs rename third_party/rust/{tokio-0.2.25 => tokio-util}/tests/time_delay_queue.rs (53%) create mode 100644 third_party/rust/tracing-attributes/.cargo-checksum.json create mode 100644 third_party/rust/tracing-attributes/CHANGELOG.md create mode 100644 third_party/rust/tracing-attributes/Cargo.toml rename third_party/rust/{tokio-0.2.25 => tracing-attributes}/LICENSE (100%) create mode 100644 third_party/rust/tracing-attributes/README.md create mode 100644 third_party/rust/tracing-attributes/src/attr.rs create mode 100644 third_party/rust/tracing-attributes/src/expand.rs create mode 100644 third_party/rust/tracing-attributes/src/lib.rs create mode 100644 third_party/rust/tracing-attributes/tests/async_fn.rs create mode 100644 third_party/rust/tracing-attributes/tests/destructuring.rs create mode 100644 third_party/rust/tracing-attributes/tests/err.rs create mode 100644 third_party/rust/tracing-attributes/tests/fields.rs create mode 100644 third_party/rust/tracing-attributes/tests/follows_from.rs create mode 100644 third_party/rust/tracing-attributes/tests/instrument.rs create mode 100644 third_party/rust/tracing-attributes/tests/levels.rs create mode 100644 third_party/rust/tracing-attributes/tests/names.rs create mode 100644 third_party/rust/tracing-attributes/tests/parents.rs create mode 100644 third_party/rust/tracing-attributes/tests/ret.rs create mode 100644 third_party/rust/tracing-attributes/tests/targets.rs create mode 100644 third_party/rust/tracing-core/.cargo-checksum.json create mode 100644 third_party/rust/tracing-core/CHANGELOG.md create mode 100644 third_party/rust/tracing-core/Cargo.toml create mode 100644 third_party/rust/tracing-core/LICENSE create mode 100644 third_party/rust/tracing-core/README.md create mode 100644 third_party/rust/tracing-core/src/callsite.rs create mode 100644 third_party/rust/tracing-core/src/dispatcher.rs create mode 100644 third_party/rust/tracing-core/src/event.rs create mode 100644 third_party/rust/tracing-core/src/field.rs create mode 100644 third_party/rust/tracing-core/src/lazy_static/LICENSE create mode 100644 third_party/rust/tracing-core/src/lazy_static/core_lazy.rs create mode 100644 third_party/rust/tracing-core/src/lazy_static/mod.rs create mode 100644 third_party/rust/tracing-core/src/lib.rs create mode 100644 third_party/rust/tracing-core/src/metadata.rs create mode 100644 third_party/rust/tracing-core/src/parent.rs create mode 100644 third_party/rust/tracing-core/src/span.rs rename third_party/rust/{urlencoding => tracing-core/src/spin}/LICENSE (86%) create mode 100644 third_party/rust/tracing-core/src/spin/mod.rs create mode 100644 third_party/rust/tracing-core/src/spin/mutex.rs create mode 100644 third_party/rust/tracing-core/src/spin/once.rs create mode 100644 third_party/rust/tracing-core/src/stdlib.rs create mode 100644 third_party/rust/tracing-core/src/subscriber.rs create mode 100644 third_party/rust/tracing-core/tests/common/mod.rs create mode 100644 third_party/rust/tracing-core/tests/dispatch.rs create mode 100644 third_party/rust/tracing-core/tests/global_dispatch.rs create mode 100644 third_party/rust/tracing-core/tests/macros.rs create mode 100644 third_party/rust/tracing/.cargo-checksum.json create mode 100644 third_party/rust/tracing/CHANGELOG.md create mode 100644 third_party/rust/tracing/Cargo.toml create mode 100644 third_party/rust/tracing/LICENSE create mode 100644 third_party/rust/tracing/README.md create mode 100644 third_party/rust/tracing/benches/global_subscriber.rs create mode 100644 third_party/rust/tracing/benches/no_subscriber.rs create mode 100644 third_party/rust/tracing/benches/subscriber.rs create mode 100644 third_party/rust/tracing/src/dispatcher.rs create mode 100644 third_party/rust/tracing/src/field.rs create mode 100644 third_party/rust/tracing/src/instrument.rs create mode 100644 third_party/rust/tracing/src/level_filters.rs create mode 100644 third_party/rust/tracing/src/lib.rs create mode 100644 third_party/rust/tracing/src/macros.rs create mode 100644 third_party/rust/tracing/src/span.rs create mode 100644 third_party/rust/tracing/src/stdlib.rs create mode 100644 third_party/rust/tracing/src/subscriber.rs create mode 100644 third_party/rust/tracing/tests/enabled.rs create mode 100644 third_party/rust/tracing/tests/event.rs create mode 100644 third_party/rust/tracing/tests/filter_caching_is_lexically_scoped.rs create mode 100644 third_party/rust/tracing/tests/filters_are_not_reevaluated_for_the_same_span.rs create mode 100644 third_party/rust/tracing/tests/filters_are_reevaluated_for_different_call_sites.rs create mode 100644 third_party/rust/tracing/tests/filters_dont_leak.rs create mode 100644 third_party/rust/tracing/tests/future_send.rs create mode 100644 third_party/rust/tracing/tests/macro_imports.rs create mode 100644 third_party/rust/tracing/tests/macros.rs create mode 100644 third_party/rust/tracing/tests/macros_incompatible_concat.rs create mode 100644 third_party/rust/tracing/tests/macros_redefined_core.rs create mode 100644 third_party/rust/tracing/tests/max_level_hint.rs create mode 100644 third_party/rust/tracing/tests/multiple_max_level_hints.rs create mode 100644 third_party/rust/tracing/tests/no_subscriber.rs create mode 100644 third_party/rust/tracing/tests/scoped_clobbers_default.rs create mode 100644 third_party/rust/tracing/tests/span.rs create mode 100644 third_party/rust/tracing/tests/subscriber.rs delete mode 100644 third_party/rust/urlencoding/.cargo-checksum.json delete mode 100644 third_party/rust/urlencoding/README.md delete mode 100644 third_party/rust/urlencoding/benches/bench.rs delete mode 100644 third_party/rust/urlencoding/src/dec.rs delete mode 100644 third_party/rust/urlencoding/src/enc.rs delete mode 100644 third_party/rust/urlencoding/src/lib.rs create mode 100644 third_party/rust/warp/examples/custom_methods.rs create mode 100644 third_party/rust/warp/examples/query_string.rs create mode 100644 third_party/rust/warp/examples/tracing.rs create mode 100644 third_party/rust/warp/examples/wrapping.rs create mode 100644 third_party/rust/warp/src/filter/then.rs create mode 100644 third_party/rust/warp/src/filters/host.rs create mode 100644 third_party/rust/warp/src/filters/trace.rs create mode 100644 third_party/rust/warp/tests/host.rs create mode 100644 third_party/rust/warp/tests/tracing.rs diff --git a/Cargo.lock b/Cargo.lock index 430b1b6c15e6..e65828b431dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -554,12 +554,6 @@ dependencies = [ "iovec", ] -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - [[package]] name = "bytes" version = "1.1.0" @@ -1741,6 +1735,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +[[package]] +name = "form_urlencoded" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +dependencies = [ + "matches", + "percent-encoding", +] + [[package]] name = "freetype" version = "0.7.0" @@ -1866,7 +1870,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite", "pin-utils", "slab", ] @@ -2302,21 +2306,21 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.5" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "fnv", "futures-core", "futures-sink", "futures-util", "http", "indexmap", - "log", "slab", - "tokio 0.2.25", - "tokio-util", + "tokio 1.17.0", + "tokio-util 0.7.2", + "tracing", ] [[package]] @@ -2386,23 +2390,24 @@ checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df" [[package]] name = "http" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" +checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" dependencies = [ "bytes 1.1.0", "fnv", - "itoa 0.4.999", + "itoa 1.0.2", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "http", + "pin-project-lite", ] [[package]] @@ -2452,11 +2457,11 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.13.6" +version = "0.14.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" +checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "futures-channel", "futures-core", "futures-util", @@ -2464,13 +2469,13 @@ dependencies = [ "http", "http-body", "httparse", - "itoa 0.4.999", - "log", - "pin-project", + "httpdate", + "itoa 1.0.2", + "pin-project-lite", "socket2", - "time 0.1.43", - "tokio 0.2.25", + "tokio 1.17.0", "tower-service", + "tracing", "want", ] @@ -2778,7 +2783,7 @@ dependencies = [ "fluent-fallback", "fluent-testing", "futures 0.3.21", - "pin-project-lite 0.2.9", + "pin-project-lite", "replace_with", "rustc-hash", "serial_test", @@ -3937,30 +3942,24 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.29" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.29" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - [[package]] name = "pin-project-lite" version = "0.2.9" @@ -4602,14 +4601,14 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ - "dtoa", - "itoa 0.4.999", + "form_urlencoded", + "itoa 1.0.2", + "ryu", "serde", - "url", ] [[package]] @@ -4757,11 +4756,10 @@ dependencies = [ [[package]] name = "socket2" -version = "0.3.19" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ - "cfg-if 1.0.0", "libc", "winapi", ] @@ -5153,23 +5151,6 @@ dependencies = [ "tokio-uds", ] -[[package]] -name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.12", - "slab", -] - [[package]] name = "tokio" version = "1.17.0" @@ -5177,10 +5158,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ "bytes 1.1.0", + "libc", "memchr", + "mio 0.8.0", "num_cpus", - "pin-project-lite 0.2.9", + "pin-project-lite", + "socket2", "tokio-macros", + "winapi", ] [[package]] @@ -5261,6 +5246,17 @@ dependencies = [ "tokio-io", ] +[[package]] +name = "tokio-stream" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio 1.17.0", +] + [[package]] name = "tokio-tcp" version = "0.1.4" @@ -5339,16 +5335,23 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +version = "0.6.999" dependencies = [ - "bytes 0.5.6", + "tokio-util 0.7.2", +] + +[[package]] +name = "tokio-util" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" +dependencies = [ + "bytes 1.1.0", "futures-core", "futures-sink", - "log", - "pin-project-lite 0.1.12", - "tokio 0.2.25", + "pin-project-lite", + "tokio 1.17.0", + "tracing", ] [[package]] @@ -5372,6 +5375,39 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +[[package]] +name = "tracing" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" +dependencies = [ + "cfg-if 1.0.0", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +dependencies = [ + "lazy_static", +] + [[package]] name = "tracy-rs" version = "0.1.2" @@ -5537,12 +5573,6 @@ dependencies = [ "serde", ] -[[package]] -name = "urlencoding" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a1f0175e03a0973cf4afd476bef05c26e228520400eb1fd473ad417b1c00ffb" - [[package]] name = "uuid" version = "0.8.1" @@ -5608,26 +5638,30 @@ dependencies = [ [[package]] name = "warp" -version = "0.2.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e95175b7a927258ecbb816bdada3cc469cb68593e7940b96a60f4af366a9970" +checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e" dependencies = [ - "bytes 0.5.6", - "futures 0.3.21", + "bytes 1.1.0", + "futures-channel", + "futures-util", "headers", "http", "hyper", "log", "mime", "mime_guess", + "percent-encoding", "pin-project", "scoped-tls", "serde", "serde_json", "serde_urlencoded", - "tokio 0.2.25", + "tokio 1.17.0", + "tokio-stream", + "tokio-util 0.6.999", "tower-service", - "urlencoding", + "tracing", ] [[package]] @@ -5696,7 +5730,7 @@ name = "webdriver" version = "0.45.0" dependencies = [ "base64 0.12.3", - "bytes 0.5.6", + "bytes 1.1.0", "cookie", "http", "log", @@ -5704,7 +5738,8 @@ dependencies = [ "serde_derive", "serde_json", "time 0.3.9", - "tokio 0.2.25", + "tokio 1.17.0", + "tokio-stream", "unicode-segmentation", "url", "warp", diff --git a/Cargo.toml b/Cargo.toml index 9de1a1820aca..e275de5a2803 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,6 +117,9 @@ rand = { path = "build/rust/rand" } # Patch hashbrown 0.9 to 0.11 hashbrown = { path = "build/rust/hashbrown" } +# Patch tokio-util 0.6 to 0.7 +tokio-util = { path = "build/rust/tokio-util" } + # Patch autocfg to hide rustc output. Workaround for https://github.com/cuviper/autocfg/issues/30 autocfg = { path = "third_party/rust/autocfg" } diff --git a/build/rust/tokio-util/Cargo.toml b/build/rust/tokio-util/Cargo.toml new file mode 100644 index 000000000000..0550aeedf80e --- /dev/null +++ b/build/rust/tokio-util/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "tokio-util" +version = "0.6.999" +edition = "2018" +license = "MPL-2.0" + +[lib] +path = "lib.rs" + +[dependencies] +tokio-util = "0.7" + +[features] +__docs_rs = ["tokio-util/__docs_rs"] +codec = ["tokio-util/codec"] +compat = ["tokio-util/compat"] +default = ["tokio-util/default"] +full = ["tokio-util/full"] +io = ["tokio-util/io"] +io-util = ["tokio-util/io-util"] +net = ["tokio-util/net"] +rt = ["tokio-util/rt"] +time = ["tokio-util/time"] diff --git a/build/rust/tokio-util/lib.rs b/build/rust/tokio-util/lib.rs new file mode 100644 index 000000000000..fcb10588eeba --- /dev/null +++ b/build/rust/tokio-util/lib.rs @@ -0,0 +1,5 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +pub use tokio_util::*; diff --git a/dom/media/webrtc/transport/mdns_service/Cargo.toml b/dom/media/webrtc/transport/mdns_service/Cargo.toml index 9b2ef42012a9..4051c29bc2c1 100644 --- a/dom/media/webrtc/transport/mdns_service/Cargo.toml +++ b/dom/media/webrtc/transport/mdns_service/Cargo.toml @@ -9,5 +9,5 @@ byteorder = "1.3.1" dns-parser = "0.8.0" gecko-profiler = { path = "../../../../../tools/profiler/rust-api" } log = "0.4" -socket2 = { version = "0.3.9", features = ["reuseport"] } +socket2 = { version = "0.4", features = ["all"] } uuid = { version = "0.8", features = ["v4"] } diff --git a/dom/media/webrtc/transport/mdns_service/src/lib.rs b/dom/media/webrtc/transport/mdns_service/src/lib.rs index 04432684b2ea..e4dcccd09aae 100644 --- a/dom/media/webrtc/transport/mdns_service/src/lib.rs +++ b/dom/media/webrtc/transport/mdns_service/src/lib.rs @@ -412,7 +412,7 @@ impl MDNSService { let mdns_addr = std::net::Ipv4Addr::new(224, 0, 0, 251); let port = 5353; - let socket = Socket::new(Domain::ipv4(), Type::dgram(), None)?; + let socket = Socket::new(Domain::IPV4, Type::DGRAM, None)?; socket.set_reuse_address(true)?; #[cfg(not(target_os = "windows"))] @@ -422,7 +422,7 @@ impl MDNSService { port, ))))?; - let socket = socket.into_udp_socket(); + let socket = std::net::UdpSocket::from(socket); socket.set_multicast_loop_v4(true)?; socket.set_read_timeout(Some(time::Duration::from_millis(1)))?; socket.set_write_timeout(Some(time::Duration::from_millis(1)))?; @@ -658,7 +658,7 @@ mod tests { fn listen_until(addr: &std::net::Ipv4Addr, stop: u64) -> thread::JoinHandle> { let port = 5353; - let socket = Socket::new(Domain::ipv4(), Type::dgram(), None).unwrap(); + let socket = Socket::new(Domain::IPV4, Type::DGRAM, None).unwrap(); socket.set_reuse_address(true).unwrap(); #[cfg(not(target_os = "windows"))] @@ -670,7 +670,7 @@ mod tests { )))) .unwrap(); - let socket = socket.into_udp_socket(); + let socket = std::net::UdpSocket::from(socket); socket.set_multicast_loop_v4(true).unwrap(); socket .set_read_timeout(Some(time::Duration::from_millis(10))) diff --git a/python/mozbuild/mozbuild/vendor/vendor_rust.py b/python/mozbuild/mozbuild/vendor/vendor_rust.py index 249ab9bf79d2..8f75d6c51ea0 100644 --- a/python/mozbuild/mozbuild/vendor/vendor_rust.py +++ b/python/mozbuild/mozbuild/vendor/vendor_rust.py @@ -74,7 +74,7 @@ PACKAGES_WE_ALWAYS_WANT_AN_OVERRIDE_OF = [ # add a comment as to why. TOLERATED_DUPES = { "base64": 2, - "bytes": 3, + "bytes": 2, "crossbeam-deque": 2, "crossbeam-epoch": 2, "crossbeam-utils": 3, @@ -82,12 +82,11 @@ TOLERATED_DUPES = { "libloading": 2, "memoffset": 2, "mio": 2, - "pin-project-lite": 2, # Transition from time 0.1 to 0.3 underway, but chrono is stuck on 0.1 # and hasn't been updated in 1.5 years (an hypothetical update is # expected to remove the dependency on time altogether). "time": 2, - "tokio": 3, + "tokio": 2, } diff --git a/testing/geckodriver/Cargo.toml b/testing/geckodriver/Cargo.toml index 853d31b170b7..85219c0c1834 100644 --- a/testing/geckodriver/Cargo.toml +++ b/testing/geckodriver/Cargo.toml @@ -13,7 +13,7 @@ edition = "2018" base64 = "0.12" chrono = "0.4.6" clap = { version = "3.1", default-features = false, features = ["cargo", "std", "suggestions", "wrap_help"] } -hyper = "0.13" +hyper = "0.14" lazy_static = "1.0" log = { version = "0.4", features = ["std"] } marionette = { path = "./marionette", version="0.2.0" } diff --git a/testing/webdriver/Cargo.toml b/testing/webdriver/Cargo.toml index e6938d8229b5..9d12bc2b32b9 100644 --- a/testing/webdriver/Cargo.toml +++ b/testing/webdriver/Cargo.toml @@ -12,11 +12,11 @@ edition = "2018" [features] default = ["server"] -server = ["tokio", "warp"] +server = ["tokio", "tokio-stream", "warp"] [dependencies] base64 = "0.12" -bytes = "0.5" +bytes = "1.0" cookie = { version = "0.16", default-features = false } http = "0.2" log = "0.4" @@ -24,7 +24,8 @@ serde = "1.0" serde_json = "1.0" serde_derive = "1.0" time = "0.3" -tokio = { version = "0.2", features = ["rt-core"], optional = true} +tokio = { version = "1.0", features = ["rt", "net"], optional = true} +tokio-stream = { version = "0.1", features = ["net"], optional = true} unicode-segmentation = "1.2" url = "2.0" -warp = { version = "0.2", default-features = false, optional = true } +warp = { version = "0.3", default-features = false, optional = true } diff --git a/testing/webdriver/src/server.rs b/testing/webdriver/src/server.rs index 3954f272015f..c0a73cb2ef9e 100644 --- a/testing/webdriver/src/server.rs +++ b/testing/webdriver/src/server.rs @@ -17,6 +17,7 @@ use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::{Arc, Mutex}; use std::thread; use tokio::net::TcpListener; +use tokio_stream::wrappers::TcpListenerStream; use url::{Host, Url}; use warp::{self, Buf, Filter, Rejection}; @@ -218,14 +219,11 @@ where let builder = thread::Builder::new().name("webdriver server".to_string()); let handle = builder.spawn(move || { - let mut rt = tokio::runtime::Builder::new() - .basic_scheduler() + let rt = tokio::runtime::Builder::new_current_thread() .enable_io() .build() .unwrap(); - let mut listener = rt - .handle() - .enter(|| TcpListener::from_std(listener).unwrap()); + let listener = TcpListener::from_std(listener).unwrap(); let wroutes = build_warp_routes( address, allow_hosts, @@ -233,7 +231,7 @@ where &extension_routes, msg_send.clone(), ); - let fut = warp::serve(wroutes).run_incoming(listener.incoming()); + let fut = warp::serve(wroutes).run_incoming(TcpListenerStream::new(listener)); rt.block_on(fut); })?; @@ -498,7 +496,7 @@ fn build_route( Some(_) | None => {} } } - let body = String::from_utf8(body.bytes().to_vec()); + let body = String::from_utf8(body.chunk().to_vec()); if body.is_err() { let err = WebDriverError::new( ErrorStatus::UnknownError, diff --git a/third_party/rust/bytes-0.5.6/.cargo-checksum.json b/third_party/rust/bytes-0.5.6/.cargo-checksum.json deleted file mode 100644 index 5f6f1d2f7a0d..000000000000 --- a/third_party/rust/bytes-0.5.6/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"CHANGELOG.md":"7c1c6fe9fa6aa8a155d4a04dab5d4e3abadb349121886b2f24252db0e45fba51","Cargo.toml":"bb5072cd9bad83919ed35f49f3a7f88b608a0150d6ccdcbb4bf17dfb3c64ef3f","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"2c2f6f1a240ad375f9dbd8e7f023510b645d98e327ea0a42ba339c94fd9baaa9","benches/buf.rs":"b0f4f1130081680f6f99d1efd49a75bd1d97d9a30117b7ad9525c96b7c8968e6","benches/bytes.rs":"dc5289a9ce82be35e71ed5853ab33aa108a30460e481135f6058fe4d2f7dc15e","benches/bytes_mut.rs":"1326fe6224b26826228e02b4133151e756f38152c2d9cfe66adf83af76c3ec98","ci/test-stable.sh":"6e010f1a95b72fea7bebdd217fda78427f3eb07b1e753f79507c71d982b2d38a","ci/tsan.sh":"466b86b19225dd26c756cf2252cb1973f87a145642c99364b462ed7ceb55c7dd","src/buf/buf_impl.rs":"fe1bc64bb9aef5b57d83901268f89bf148490e71bebc340c7ecc40ff95bcfb70","src/buf/buf_mut.rs":"d226189d9db76c9023537dcca0687aa5dd25851a9052d19154de8ee9b25bdee3","src/buf/ext/chain.rs":"337f58e1a8da5b4768e55921ff394f4ba3a0c6d476448fd5bceab6f3c1db1b3e","src/buf/ext/limit.rs":"a705d7cf38f9a11a904d6ee5e7afea83e9abdf8f454bb8e16b407b0e055dc11a","src/buf/ext/mod.rs":"ba2fa392c61b7429530c71797114e3f09d9b6b750b6f77f57fde964d2b218bc4","src/buf/ext/reader.rs":"ee4733fa2c2d893c6df8151c2333a46171619e8a45ec9bae863edc8deb438ac5","src/buf/ext/take.rs":"e92be765539b8b0c1cb67a01b691319cccd35fc098f2bb59ced3bbbe41ee0257","src/buf/ext/writer.rs":"3c52df6e73d09935d37bed9a05689c1966952f980b85b40aaab05081ec7ef6d8","src/buf/iter.rs":"a0de69367fa61d0d1c6c2ff4b4d337de9c5f4213d0c86e083226cf409666d860","src/buf/mod.rs":"4f8e3b4c4b69b7d004306d458ad835801e53659b38ca08312d7217d82da4c64f","src/buf/vec_deque.rs":"5a4063961d10380c1ab3681f8b3f6201112766d9f57a63e2861dc9f2b134668d","src/bytes.rs":"8c3aa5fe425604206ffc1b85a8bff5a9be38917786453450955984523f829cec","src/bytes_mut.rs":"e276f74da841ab65ca681cb09820de98aa2e9837dd975ed564b1a9be40440cf3","src/fmt/debug.rs":"19ebe7e5516e40ab712995f3ec2e0ba78ddfa905cce117e6d01e8eb330f3970a","src/fmt/hex.rs":"13755ec6f1b79923e1f1a05c51b179a38c03c40bb8ed2db0210e8901812e61e7","src/fmt/mod.rs":"176da4e359da99b8e5cf16e480cb7b978f574876827f1b9bb9c08da4d74ac0f5","src/lib.rs":"9b96e2a011a782ceb82428e6b71fd212a46bc186bd152102018c7b6428a0d441","src/loom.rs":"5dc97a5afce14875a66e44cbf0afa67e084c8b6b8c560bc14e7a70ef73aee96e","src/serde.rs":"3ecd7e828cd4c2b7db93c807cb1548fad209e674df493edf7cda69a7b04d405d","tests/test_buf.rs":"3ca99c58f470e7c4beb18e5dc69250ce541dd8ac96b88fb1162640510a735ada","tests/test_buf_mut.rs":"56636e439cb07af2fabdfb60a08995829680c9730a8ebe5c6ad2f54dbf208e32","tests/test_bytes.rs":"3ec0a82ce98fea633ed7d635caca21cd8035d0c9ea4287d1cc0199e167a4a3c1","tests/test_bytes_odd_alloc.rs":"87d51d4ab6ad98193b140ea8158f6631eba985a204c2ea94d34b3bb157791a16","tests/test_bytes_vec_alloc.rs":"2b686b6ab44f924e69d8270a4f256eb3626a3b4db8c1919b74bc422c10124899","tests/test_chain.rs":"71772fbc0bab72a697bd85c6c1be0eddfe7d7dc4f4737a0cd53be4ad191d076b","tests/test_debug.rs":"13299107172809e8cbbd823964ac9450cd0d6b6de79f2e6a2e0f44b9225a0593","tests/test_iter.rs":"c1f46823df26a90139645fd8728a03138edd95b2849dfec830452a80ddd9726d","tests/test_reader.rs":"9c94e164aa7de4c10966f8084ad04d06f4e9c66e156d017d194a1dac3dfc6619","tests/test_serde.rs":"2691f891796ba259de0ecf926de05c514f4912cc5fcd3e6a1591efbcd23ed4d0","tests/test_take.rs":"975aa2e216b6a3c939b31e41ecfbb3a90938096413a14a2ae986c842d2250180"},"package":"0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"} \ No newline at end of file diff --git a/third_party/rust/bytes-0.5.6/CHANGELOG.md b/third_party/rust/bytes-0.5.6/CHANGELOG.md deleted file mode 100644 index 1b821da42864..000000000000 --- a/third_party/rust/bytes-0.5.6/CHANGELOG.md +++ /dev/null @@ -1,166 +0,0 @@ -# 0.5.6 (July 13, 2020) - -- Improve `BytesMut` to reuse buffer when fully `advance`d. -- Mark `BytesMut::{as_mut, set_len}` with `#[inline]`. -- Relax synchronization when cloning in shared vtable of `Bytes`. -- Move `loom` to `dev-dependencies`. - -# 0.5.5 (June 18, 2020) - -### Added -- Allow using the `serde` feature in `no_std` environments (#385). - -### Fix -- Fix `BufMut::advance_mut` to panic if advanced passed the capacity (#354).. -- Fix `BytesMut::freeze` ignoring amount previously `advance`d (#352). - -# 0.5.4 (January 23, 2020) - -### Added -- Make `Bytes::new` a `const fn`. -- Add `From` for `Bytes`. - -### Fix -- Fix reversed arguments in `PartialOrd` for `Bytes`. -- Fix `Bytes::truncate` losing original capacity when repr is an unshared `Vec`. -- Fix `Bytes::from(Vec)` when allocator gave `Vec` a pointer with LSB set. -- Fix panic in `Bytes::slice_ref` if argument is an empty slice. - -# 0.5.3 (December 12, 2019) - -### Added -- `must_use` attributes to `split`, `split_off`, and `split_to` methods (#337). - -### Fix -- Potential freeing of a null pointer in `Bytes` when constructed with an empty `Vec` (#341, #342). -- Calling `Bytes::truncate` with a size large than the length will no longer clear the `Bytes` (#333). - -# 0.5.2 (November 27, 2019) - -### Added -- `Limit` methods `into_inner`, `get_ref`, `get_mut`, `limit`, and `set_limit` (#325). - -# 0.5.1 (November 25, 2019) - -### Fix -- Growth documentation for `BytesMut` (#321) - -# 0.5.0 (November 25, 2019) - -### Fix -- Potential overflow in `copy_to_slice` - -### Changed -- Increased minimum supported Rust version to 1.39. -- `Bytes` is now a "trait object", allowing for custom allocation strategies (#298) -- `BytesMut` implicitly grows internal storage. `remaining_mut()` returns - `usize::MAX` (#316). -- `BufMut::bytes_mut` returns `&mut [MaybeUninit]` to reflect the unknown - initialization state (#305). -- `Buf` / `BufMut` implementations for `&[u8]` and `&mut [u8]` - respectively (#261). -- Move `Buf` / `BufMut` "extra" functions to an extension trait (#306). -- `BufMutExt::limit` (#309). -- `Bytes::slice` takes a `RangeBounds` argument (#265). -- `Bytes::from_static` is now a `const fn` (#311). -- A multitude of smaller performance optimizations. - -### Added -- `no_std` support (#281). -- `get_*`, `put_*`, `get_*_le`, and `put_*le` accessors for handling byte order. -- `BorrowMut` implementation for `BytesMut` (#185). - -### Removed -- `IntoBuf` (#288). -- `Buf` implementation for `&str` (#301). -- `byteorder` dependency (#280). -- `iovec` dependency, use `std::IoSlice` instead (#263). -- optional `either` dependency (#315). -- optional `i128` feature -- now available on stable. (#276). - -# 0.4.12 (March 6, 2019) - -### Added -- Implement `FromIterator<&'a u8>` for `BytesMut`/`Bytes` (#244). -- Implement `Buf` for `VecDeque` (#249). - -# 0.4.11 (November 17, 2018) - -* Use raw pointers for potentially racy loads (#233). -* Implement `BufRead` for `buf::Reader` (#232). -* Documentation tweaks (#234). - -# 0.4.10 (September 4, 2018) - -* impl `Buf` and `BufMut` for `Either` (#225). -* Add `Bytes::slice_ref` (#208). - -# 0.4.9 (July 12, 2018) - -* Add 128 bit number support behind a feature flag (#209). -* Implement `IntoBuf` for `&mut [u8]` - -# 0.4.8 (May 25, 2018) - -* Fix panic in `BytesMut` `FromIterator` implementation. -* Bytes: Recycle space when reserving space in vec mode (#197). -* Bytes: Add resize fn (#203). - -# 0.4.7 (April 27, 2018) - -* Make `Buf` and `BufMut` usable as trait objects (#186). -* impl BorrowMut for BytesMut (#185). -* Improve accessor performance (#195). - -# 0.4.6 (Janary 8, 2018) - -* Implement FromIterator for Bytes/BytesMut (#148). -* Add `advance` fn to Bytes/BytesMut (#166). -* Add `unsplit` fn to `BytesMut` (#162, #173). -* Improvements to Bytes split fns (#92). - -# 0.4.5 (August 12, 2017) - -* Fix range bug in `Take::bytes` -* Misc performance improvements -* Add extra `PartialEq` implementations. -* Add `Bytes::with_capacity` -* Implement `AsMut[u8]` for `BytesMut` - -# 0.4.4 (May 26, 2017) - -* Add serde support behind feature flag -* Add `extend_from_slice` on `Bytes` and `BytesMut` -* Add `truncate` and `clear` on `Bytes` -* Misc additional std trait implementations -* Misc performance improvements - -# 0.4.3 (April 30, 2017) - -* Fix Vec::advance_mut bug -* Bump minimum Rust version to 1.15 -* Misc performance tweaks - -# 0.4.2 (April 5, 2017) - -* Misc performance tweaks -* Improved `Debug` implementation for `Bytes` -* Avoid some incorrect assert panics - -# 0.4.1 (March 15, 2017) - -* Expose `buf` module and have most types available from there vs. root. -* Implement `IntoBuf` for `T: Buf`. -* Add `FromBuf` and `Buf::collect`. -* Add iterator adapter for `Buf`. -* Add scatter/gather support to `Buf` and `BufMut`. -* Add `Buf::chain`. -* Reduce allocations on repeated calls to `BytesMut::reserve`. -* Implement `Debug` for more types. -* Remove `Source` in favor of `IntoBuf`. -* Implement `Extend` for `BytesMut`. - - -# 0.4.0 (February 24, 2017) - -* Initial release diff --git a/third_party/rust/bytes-0.5.6/Cargo.toml b/third_party/rust/bytes-0.5.6/Cargo.toml deleted file mode 100644 index 81a7224790ef..000000000000 --- a/third_party/rust/bytes-0.5.6/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "bytes" -version = "0.5.6" -authors = ["Carl Lerche ", "Sean McArthur "] -description = "Types and traits for working with bytes" -documentation = "https://docs.rs/bytes" -readme = "README.md" -keywords = ["buffers", "zero-copy", "io"] -categories = ["network-programming", "data-structures"] -license = "MIT" -repository = "https://github.com/tokio-rs/bytes" -[dependencies.serde] -version = "1.0.60" -features = ["alloc"] -optional = true -default-features = false -[dev-dependencies.serde_test] -version = "1.0" - -[features] -default = ["std"] -std = [] -[target."cfg(loom)".dev-dependencies.loom] -version = "0.3" diff --git a/third_party/rust/bytes-0.5.6/README.md b/third_party/rust/bytes-0.5.6/README.md deleted file mode 100644 index 73c43abc8990..000000000000 --- a/third_party/rust/bytes-0.5.6/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Bytes - -A utility library for working with bytes. - -[![Crates.io][crates-badge]][crates-url] -[![Build Status][ci-badge]][ci-url] - -[crates-badge]: https://img.shields.io/crates/v/bytes.svg -[crates-url]: https://crates.io/crates/bytes -[ci-badge]: https://github.com/tokio-rs/bytes/workflows/CI/badge.svg -[ci-url]: https://github.com/tokio-rs/bytes/actions - -[Documentation](https://docs.rs/bytes) - -## Usage - -To use `bytes`, first add this to your `Cargo.toml`: - -```toml -[dependencies] -bytes = "0.5" -``` - -Next, add this to your crate: - -```rust -use bytes::{Bytes, BytesMut, Buf, BufMut}; -``` - -## Serde support - -Serde support is optional and disabled by default. To enable use the feature `serde`. - -```toml -[dependencies] -bytes = { version = "0.5", features = ["serde"] } -``` - -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in `bytes` by you, shall be licensed as MIT, without any additional -terms or conditions. diff --git a/third_party/rust/bytes-0.5.6/benches/buf.rs b/third_party/rust/bytes-0.5.6/benches/buf.rs deleted file mode 100644 index 77b0633eecce..000000000000 --- a/third_party/rust/bytes-0.5.6/benches/buf.rs +++ /dev/null @@ -1,187 +0,0 @@ -#![feature(test)] -#![warn(rust_2018_idioms)] - -extern crate test; - -use bytes::Buf; -use test::Bencher; - -/// Dummy Buf implementation -struct TestBuf { - buf: &'static [u8], - readlens: &'static [usize], - init_pos: usize, - pos: usize, - readlen_pos: usize, - readlen: usize, -} -impl TestBuf { - fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBuf { - let mut buf = TestBuf { - buf, - readlens, - init_pos, - pos: 0, - readlen_pos: 0, - readlen: 0, - }; - buf.reset(); - buf - } - fn reset(&mut self) { - self.pos = self.init_pos; - self.readlen_pos = 0; - self.next_readlen(); - } - /// Compute the length of the next read : - /// - use the next value specified in readlens (capped by remaining) if any - /// - else the remaining - fn next_readlen(&mut self) { - self.readlen = self.buf.len() - self.pos; - if let Some(readlen) = self.readlens.get(self.readlen_pos) { - self.readlen = std::cmp::min(self.readlen, *readlen); - self.readlen_pos += 1; - } - } -} -impl Buf for TestBuf { - fn remaining(&self) -> usize { - return self.buf.len() - self.pos; - } - fn advance(&mut self, cnt: usize) { - self.pos += cnt; - assert!(self.pos <= self.buf.len()); - self.next_readlen(); - } - fn bytes(&self) -> &[u8] { - if self.readlen == 0 { - Default::default() - } else { - &self.buf[self.pos..self.pos + self.readlen] - } - } -} - -/// Dummy Buf implementation -/// version with methods forced to not be inlined (to simulate costly calls) -struct TestBufC { - inner: TestBuf, -} -impl TestBufC { - fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBufC { - TestBufC { - inner: TestBuf::new(buf, readlens, init_pos), - } - } - fn reset(&mut self) { - self.inner.reset() - } -} -impl Buf for TestBufC { - #[inline(never)] - fn remaining(&self) -> usize { - self.inner.remaining() - } - #[inline(never)] - fn advance(&mut self, cnt: usize) { - self.inner.advance(cnt) - } - #[inline(never)] - fn bytes(&self) -> &[u8] { - self.inner.bytes() - } -} - -macro_rules! bench { - ($fname:ident, testbuf $testbuf:ident $readlens:expr, $method:ident $(,$arg:expr)*) => ( - #[bench] - fn $fname(b: &mut Bencher) { - let mut bufs = [ - $testbuf::new(&[1u8; 8+0], $readlens, 0), - $testbuf::new(&[1u8; 8+1], $readlens, 1), - $testbuf::new(&[1u8; 8+2], $readlens, 2), - $testbuf::new(&[1u8; 8+3], $readlens, 3), - $testbuf::new(&[1u8; 8+4], $readlens, 4), - $testbuf::new(&[1u8; 8+5], $readlens, 5), - $testbuf::new(&[1u8; 8+6], $readlens, 6), - $testbuf::new(&[1u8; 8+7], $readlens, 7), - ]; - b.iter(|| { - for i in 0..8 { - bufs[i].reset(); - let buf: &mut dyn Buf = &mut bufs[i]; // type erasure - test::black_box(buf.$method($($arg,)*)); - } - }) - } - ); - ($fname:ident, slice, $method:ident $(,$arg:expr)*) => ( - #[bench] - fn $fname(b: &mut Bencher) { - // buf must be long enough for one read of 8 bytes starting at pos 7 - let arr = [1u8; 8+7]; - b.iter(|| { - for i in 0..8 { - let mut buf = &arr[i..]; - let buf = &mut buf as &mut dyn Buf; // type erasure - test::black_box(buf.$method($($arg,)*)); - } - }) - } - ); - ($fname:ident, option) => ( - #[bench] - fn $fname(b: &mut Bencher) { - let data = [1u8; 1]; - b.iter(|| { - for _ in 0..8 { - let mut buf = Some(data); - let buf = &mut buf as &mut dyn Buf; // type erasure - test::black_box(buf.get_u8()); - } - }) - } - ); -} - -macro_rules! bench_group { - ($method:ident $(,$arg:expr)*) => ( - bench!(slice, slice, $method $(,$arg)*); - bench!(tbuf_1, testbuf TestBuf &[], $method $(,$arg)*); - bench!(tbuf_1_costly, testbuf TestBufC &[], $method $(,$arg)*); - bench!(tbuf_2, testbuf TestBuf &[1], $method $(,$arg)*); - bench!(tbuf_2_costly, testbuf TestBufC &[1], $method $(,$arg)*); - // bench!(tbuf_onebyone, testbuf TestBuf &[1,1,1,1,1,1,1,1], $method $(,$arg)*); - // bench!(tbuf_onebyone_costly, testbuf TestBufC &[1,1,1,1,1,1,1,1], $method $(,$arg)*); - ); -} - -mod get_u8 { - use super::*; - bench_group!(get_u8); - bench!(option, option); -} -mod get_u16 { - use super::*; - bench_group!(get_u16); -} -mod get_u32 { - use super::*; - bench_group!(get_u32); -} -mod get_u64 { - use super::*; - bench_group!(get_u64); -} -mod get_f32 { - use super::*; - bench_group!(get_f32); -} -mod get_f64 { - use super::*; - bench_group!(get_f64); -} -mod get_uint24 { - use super::*; - bench_group!(get_uint, 3); -} diff --git a/third_party/rust/bytes-0.5.6/benches/bytes.rs b/third_party/rust/bytes-0.5.6/benches/bytes.rs deleted file mode 100644 index c5b84124f173..000000000000 --- a/third_party/rust/bytes-0.5.6/benches/bytes.rs +++ /dev/null @@ -1,119 +0,0 @@ -#![feature(test)] -#![warn(rust_2018_idioms)] - -extern crate test; - -use bytes::Bytes; -use test::Bencher; - -#[bench] -fn deref_unique(b: &mut Bencher) { - let buf = Bytes::from(vec![0; 1024]); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_shared(b: &mut Bencher) { - let buf = Bytes::from(vec![0; 1024]); - let _b2 = buf.clone(); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_static(b: &mut Bencher) { - let buf = Bytes::from_static(b"hello world"); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn clone_static(b: &mut Bencher) { - let bytes = - Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&bytes.clone()); - } - }) -} - -#[bench] -fn clone_shared(b: &mut Bencher) { - let bytes = Bytes::from(b"hello world 1234567890 and have a good byte 0987654321".to_vec()); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&bytes.clone()); - } - }) -} - -#[bench] -fn clone_arc_vec(b: &mut Bencher) { - use std::sync::Arc; - let bytes = Arc::new(b"hello world 1234567890 and have a good byte 0987654321".to_vec()); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&bytes.clone()); - } - }) -} - -#[bench] -fn from_long_slice(b: &mut Bencher) { - let data = [0u8; 128]; - b.bytes = data.len() as u64; - b.iter(|| { - let buf = Bytes::copy_from_slice(&data[..]); - test::black_box(buf); - }) -} - -#[bench] -fn slice_empty(b: &mut Bencher) { - b.iter(|| { - let b = Bytes::from(vec![17; 1024]).clone(); - for i in 0..1000 { - test::black_box(b.slice(i % 100..i % 100)); - } - }) -} - -#[bench] -fn slice_short_from_arc(b: &mut Bencher) { - b.iter(|| { - // `clone` is to convert to ARC - let b = Bytes::from(vec![17; 1024]).clone(); - for i in 0..1000 { - test::black_box(b.slice(1..2 + i % 10)); - } - }) -} - -#[bench] -fn split_off_and_drop(b: &mut Bencher) { - b.iter(|| { - for _ in 0..1024 { - let v = vec![10; 200]; - let mut b = Bytes::from(v); - test::black_box(b.split_off(100)); - test::black_box(b); - } - }) -} diff --git a/third_party/rust/bytes-0.5.6/benches/bytes_mut.rs b/third_party/rust/bytes-0.5.6/benches/bytes_mut.rs deleted file mode 100644 index b06943621005..000000000000 --- a/third_party/rust/bytes-0.5.6/benches/bytes_mut.rs +++ /dev/null @@ -1,266 +0,0 @@ -#![feature(test)] -#![warn(rust_2018_idioms)] - -extern crate test; - -use bytes::{BufMut, BytesMut}; -use test::Bencher; - -#[bench] -fn alloc_small(b: &mut Bencher) { - b.iter(|| { - for _ in 0..1024 { - test::black_box(BytesMut::with_capacity(12)); - } - }) -} - -#[bench] -fn alloc_mid(b: &mut Bencher) { - b.iter(|| { - test::black_box(BytesMut::with_capacity(128)); - }) -} - -#[bench] -fn alloc_big(b: &mut Bencher) { - b.iter(|| { - test::black_box(BytesMut::with_capacity(4096)); - }) -} - -#[bench] -fn deref_unique(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(4096); - buf.put(&[0u8; 1024][..]); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_unique_unroll(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(4096); - buf.put(&[0u8; 1024][..]); - - b.iter(|| { - for _ in 0..128 { - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_shared(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(4096); - buf.put(&[0u8; 1024][..]); - let _b2 = buf.split_off(1024); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_two(b: &mut Bencher) { - let mut buf1 = BytesMut::with_capacity(8); - buf1.put(&[0u8; 8][..]); - - let mut buf2 = BytesMut::with_capacity(4096); - buf2.put(&[0u8; 1024][..]); - - b.iter(|| { - for _ in 0..512 { - test::black_box(&buf1[..]); - test::black_box(&buf2[..]); - } - }) -} - -#[bench] -fn clone_frozen(b: &mut Bencher) { - let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..]) - .split() - .freeze(); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&bytes.clone()); - } - }) -} - -#[bench] -fn alloc_write_split_to_mid(b: &mut Bencher) { - b.iter(|| { - let mut buf = BytesMut::with_capacity(128); - buf.put_slice(&[0u8; 64]); - test::black_box(buf.split_to(64)); - }) -} - -#[bench] -fn drain_write_drain(b: &mut Bencher) { - let data = [0u8; 128]; - - b.iter(|| { - let mut buf = BytesMut::with_capacity(1024); - let mut parts = Vec::with_capacity(8); - - for _ in 0..8 { - buf.put(&data[..]); - parts.push(buf.split_to(128)); - } - - test::black_box(parts); - }) -} - -#[bench] -fn fmt_write(b: &mut Bencher) { - use std::fmt::Write; - let mut buf = BytesMut::with_capacity(128); - let s = "foo bar baz quux lorem ipsum dolor et"; - - b.bytes = s.len() as u64; - b.iter(|| { - let _ = write!(buf, "{}", s); - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }) -} - -#[bench] -fn bytes_mut_extend(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(256); - let data = [33u8; 32]; - - b.bytes = data.len() as u64 * 4; - b.iter(|| { - for _ in 0..4 { - buf.extend(&data); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -// BufMut for BytesMut vs Vec - -#[bench] -fn put_slice_bytes_mut(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(256); - let data = [33u8; 32]; - - b.bytes = data.len() as u64 * 4; - b.iter(|| { - for _ in 0..4 { - buf.put_slice(&data); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_u8_bytes_mut(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(256); - let cnt = 128; - - b.bytes = cnt as u64; - b.iter(|| { - for _ in 0..cnt { - buf.put_u8(b'x'); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_slice_vec(b: &mut Bencher) { - let mut buf = Vec::::with_capacity(256); - let data = [33u8; 32]; - - b.bytes = data.len() as u64 * 4; - b.iter(|| { - for _ in 0..4 { - buf.put_slice(&data); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_u8_vec(b: &mut Bencher) { - let mut buf = Vec::::with_capacity(256); - let cnt = 128; - - b.bytes = cnt as u64; - b.iter(|| { - for _ in 0..cnt { - buf.put_u8(b'x'); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_slice_vec_extend(b: &mut Bencher) { - let mut buf = Vec::::with_capacity(256); - let data = [33u8; 32]; - - b.bytes = data.len() as u64 * 4; - b.iter(|| { - for _ in 0..4 { - buf.extend_from_slice(&data); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_u8_vec_push(b: &mut Bencher) { - let mut buf = Vec::::with_capacity(256); - let cnt = 128; - - b.bytes = cnt as u64; - b.iter(|| { - for _ in 0..cnt { - buf.push(b'x'); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} diff --git a/third_party/rust/bytes-0.5.6/ci/test-stable.sh b/third_party/rust/bytes-0.5.6/ci/test-stable.sh deleted file mode 100644 index 01a32f5a661c..000000000000 --- a/third_party/rust/bytes-0.5.6/ci/test-stable.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -ex - -cmd="${1:-test}" - -# Install cargo-hack for feature flag test -cargo install cargo-hack - -# Run with each feature -# * --each-feature includes both default/no-default features -# * --optional-deps is needed for serde feature -cargo hack "${cmd}" --each-feature --optional-deps -# Run with all features -cargo "${cmd}" --all-features - -cargo doc --no-deps --all-features - -if [[ "${RUST_VERSION}" == "nightly"* ]]; then - # Check benchmarks - cargo check --benches - - # Check minimal versions - cargo clean - cargo update -Zminimal-versions - cargo check --all-features -fi diff --git a/third_party/rust/bytes-0.5.6/ci/tsan.sh b/third_party/rust/bytes-0.5.6/ci/tsan.sh deleted file mode 100644 index ca520bd7fd04..000000000000 --- a/third_party/rust/bytes-0.5.6/ci/tsan.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -ex - -export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" - -# Run address sanitizer -RUSTFLAGS="-Z sanitizer=address" \ -cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut - -# Run thread sanitizer -RUSTFLAGS="-Z sanitizer=thread" \ -cargo -Zbuild-std test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut diff --git a/third_party/rust/bytes-0.5.6/src/buf/buf_impl.rs b/third_party/rust/bytes-0.5.6/src/buf/buf_impl.rs deleted file mode 100644 index 5cd7c686e504..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/buf_impl.rs +++ /dev/null @@ -1,1007 +0,0 @@ -use core::{cmp, mem, ptr}; - -#[cfg(feature = "std")] -use std::io::IoSlice; - -use alloc::boxed::Box; - -macro_rules! buf_get_impl { - ($this:ident, $typ:tt::$conv:tt) => {{ - const SIZE: usize = mem::size_of::<$typ>(); - // try to convert directly from the bytes - // this Option trick is to avoid keeping a borrow on self - // when advance() is called (mut borrow) and to call bytes() only once - let ret = $this - .bytes() - .get(..SIZE) - .map(|src| unsafe { $typ::$conv(*(src as *const _ as *const [_; SIZE])) }); - - if let Some(ret) = ret { - // if the direct conversion was possible, advance and return - $this.advance(SIZE); - return ret; - } else { - // if not we copy the bytes in a temp buffer then convert - let mut buf = [0; SIZE]; - $this.copy_to_slice(&mut buf); // (do the advance) - return $typ::$conv(buf); - } - }}; - (le => $this:ident, $typ:tt, $len_to_read:expr) => {{ - debug_assert!(mem::size_of::<$typ>() >= $len_to_read); - - // The same trick as above does not improve the best case speed. - // It seems to be linked to the way the method is optimised by the compiler - let mut buf = [0; (mem::size_of::<$typ>())]; - $this.copy_to_slice(&mut buf[..($len_to_read)]); - return $typ::from_le_bytes(buf); - }}; - (be => $this:ident, $typ:tt, $len_to_read:expr) => {{ - debug_assert!(mem::size_of::<$typ>() >= $len_to_read); - - let mut buf = [0; (mem::size_of::<$typ>())]; - $this.copy_to_slice(&mut buf[mem::size_of::<$typ>() - ($len_to_read)..]); - return $typ::from_be_bytes(buf); - }}; -} - -/// Read bytes from a buffer. -/// -/// A buffer stores bytes in memory such that read operations are infallible. -/// The underlying storage may or may not be in contiguous memory. A `Buf` value -/// is a cursor into the buffer. Reading from `Buf` advances the cursor -/// position. It can be thought of as an efficient `Iterator` for collections of -/// bytes. -/// -/// The simplest `Buf` is a `&[u8]`. -/// -/// ``` -/// use bytes::Buf; -/// -/// let mut buf = &b"hello world"[..]; -/// -/// assert_eq!(b'h', buf.get_u8()); -/// assert_eq!(b'e', buf.get_u8()); -/// assert_eq!(b'l', buf.get_u8()); -/// -/// let mut rest = [0; 8]; -/// buf.copy_to_slice(&mut rest); -/// -/// assert_eq!(&rest[..], &b"lo world"[..]); -/// ``` -pub trait Buf { - /// Returns the number of bytes between the current position and the end of - /// the buffer. - /// - /// This value is greater than or equal to the length of the slice returned - /// by `bytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"hello world"[..]; - /// - /// assert_eq!(buf.remaining(), 11); - /// - /// buf.get_u8(); - /// - /// assert_eq!(buf.remaining(), 10); - /// ``` - /// - /// # Implementer notes - /// - /// Implementations of `remaining` should ensure that the return value does - /// not change unless a call is made to `advance` or any other function that - /// is documented to change the `Buf`'s current position. - fn remaining(&self) -> usize; - - /// Returns a slice starting at the current position and of length between 0 - /// and `Buf::remaining()`. Note that this *can* return shorter slice (this allows - /// non-continuous internal representation). - /// - /// This is a lower level function. Most operations are done with other - /// functions. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"hello world"[..]; - /// - /// assert_eq!(buf.bytes(), &b"hello world"[..]); - /// - /// buf.advance(6); - /// - /// assert_eq!(buf.bytes(), &b"world"[..]); - /// ``` - /// - /// # Implementer notes - /// - /// This function should never panic. Once the end of the buffer is reached, - /// i.e., `Buf::remaining` returns 0, calls to `bytes` should return an - /// empty slice. - fn bytes(&self) -> &[u8]; - - /// Fills `dst` with potentially multiple slices starting at `self`'s - /// current position. - /// - /// If the `Buf` is backed by disjoint slices of bytes, `bytes_vectored` enables - /// fetching more than one slice at once. `dst` is a slice of `IoSlice` - /// references, enabling the slice to be directly used with [`writev`] - /// without any further conversion. The sum of the lengths of all the - /// buffers in `dst` will be less than or equal to `Buf::remaining()`. - /// - /// The entries in `dst` will be overwritten, but the data **contained** by - /// the slices **will not** be modified. If `bytes_vectored` does not fill every - /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices - /// in `self. - /// - /// This is a lower level function. Most operations are done with other - /// functions. - /// - /// # Implementer notes - /// - /// This function should never panic. Once the end of the buffer is reached, - /// i.e., `Buf::remaining` returns 0, calls to `bytes_vectored` must return 0 - /// without mutating `dst`. - /// - /// Implementations should also take care to properly handle being called - /// with `dst` being a zero length slice. - /// - /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html - #[cfg(feature = "std")] - fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { - if dst.is_empty() { - return 0; - } - - if self.has_remaining() { - dst[0] = IoSlice::new(self.bytes()); - 1 - } else { - 0 - } - } - - /// Advance the internal cursor of the Buf - /// - /// The next call to `bytes` will return a slice starting `cnt` bytes - /// further into the underlying buffer. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"hello world"[..]; - /// - /// assert_eq!(buf.bytes(), &b"hello world"[..]); - /// - /// buf.advance(6); - /// - /// assert_eq!(buf.bytes(), &b"world"[..]); - /// ``` - /// - /// # Panics - /// - /// This function **may** panic if `cnt > self.remaining()`. - /// - /// # Implementer notes - /// - /// It is recommended for implementations of `advance` to panic if `cnt > - /// self.remaining()`. If the implementation does not panic, the call must - /// behave as if `cnt == self.remaining()`. - /// - /// A call with `cnt == 0` should never panic and be a no-op. - fn advance(&mut self, cnt: usize); - - /// Returns true if there are any more bytes to consume - /// - /// This is equivalent to `self.remaining() != 0`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"a"[..]; - /// - /// assert!(buf.has_remaining()); - /// - /// buf.get_u8(); - /// - /// assert!(!buf.has_remaining()); - /// ``` - fn has_remaining(&self) -> bool { - self.remaining() > 0 - } - - /// Copies bytes from `self` into `dst`. - /// - /// The cursor is advanced by the number of bytes copied. `self` must have - /// enough remaining bytes to fill `dst`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"hello world"[..]; - /// let mut dst = [0; 5]; - /// - /// buf.copy_to_slice(&mut dst); - /// assert_eq!(&b"hello"[..], &dst); - /// assert_eq!(6, buf.remaining()); - /// ``` - /// - /// # Panics - /// - /// This function panics if `self.remaining() < dst.len()` - fn copy_to_slice(&mut self, dst: &mut [u8]) { - let mut off = 0; - - assert!(self.remaining() >= dst.len()); - - while off < dst.len() { - let cnt; - - unsafe { - let src = self.bytes(); - cnt = cmp::min(src.len(), dst.len() - off); - - ptr::copy_nonoverlapping(src.as_ptr(), dst[off..].as_mut_ptr(), cnt); - - off += cnt; - } - - self.advance(cnt); - } - } - - /// Gets an unsigned 8 bit integer from `self`. - /// - /// The current position is advanced by 1. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08 hello"[..]; - /// assert_eq!(8, buf.get_u8()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is no more remaining data in `self`. - fn get_u8(&mut self) -> u8 { - assert!(self.remaining() >= 1); - let ret = self.bytes()[0]; - self.advance(1); - ret - } - - /// Gets a signed 8 bit integer from `self`. - /// - /// The current position is advanced by 1. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08 hello"[..]; - /// assert_eq!(8, buf.get_i8()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is no more remaining data in `self`. - fn get_i8(&mut self) -> i8 { - assert!(self.remaining() >= 1); - let ret = self.bytes()[0] as i8; - self.advance(1); - ret - } - - /// Gets an unsigned 16 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x09 hello"[..]; - /// assert_eq!(0x0809, buf.get_u16()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u16(&mut self) -> u16 { - buf_get_impl!(self, u16::from_be_bytes); - } - - /// Gets an unsigned 16 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x09\x08 hello"[..]; - /// assert_eq!(0x0809, buf.get_u16_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u16_le(&mut self) -> u16 { - buf_get_impl!(self, u16::from_le_bytes); - } - - /// Gets a signed 16 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x09 hello"[..]; - /// assert_eq!(0x0809, buf.get_i16()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i16(&mut self) -> i16 { - buf_get_impl!(self, i16::from_be_bytes); - } - - /// Gets a signed 16 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x09\x08 hello"[..]; - /// assert_eq!(0x0809, buf.get_i16_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i16_le(&mut self) -> i16 { - buf_get_impl!(self, i16::from_le_bytes); - } - - /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..]; - /// assert_eq!(0x0809A0A1, buf.get_u32()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u32(&mut self) -> u32 { - buf_get_impl!(self, u32::from_be_bytes); - } - - /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..]; - /// assert_eq!(0x0809A0A1, buf.get_u32_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u32_le(&mut self) -> u32 { - buf_get_impl!(self, u32::from_le_bytes); - } - - /// Gets a signed 32 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..]; - /// assert_eq!(0x0809A0A1, buf.get_i32()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i32(&mut self) -> i32 { - buf_get_impl!(self, i32::from_be_bytes); - } - - /// Gets a signed 32 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..]; - /// assert_eq!(0x0809A0A1, buf.get_i32_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i32_le(&mut self) -> i32 { - buf_get_impl!(self, i32::from_le_bytes); - } - - /// Gets an unsigned 64 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..]; - /// assert_eq!(0x0102030405060708, buf.get_u64()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u64(&mut self) -> u64 { - buf_get_impl!(self, u64::from_be_bytes); - } - - /// Gets an unsigned 64 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; - /// assert_eq!(0x0102030405060708, buf.get_u64_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u64_le(&mut self) -> u64 { - buf_get_impl!(self, u64::from_le_bytes); - } - - /// Gets a signed 64 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..]; - /// assert_eq!(0x0102030405060708, buf.get_i64()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i64(&mut self) -> i64 { - buf_get_impl!(self, i64::from_be_bytes); - } - - /// Gets a signed 64 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; - /// assert_eq!(0x0102030405060708, buf.get_i64_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i64_le(&mut self) -> i64 { - buf_get_impl!(self, i64::from_le_bytes); - } - - /// Gets an unsigned 128 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..]; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u128(&mut self) -> u128 { - buf_get_impl!(self, u128::from_be_bytes); - } - - /// Gets an unsigned 128 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u128_le(&mut self) -> u128 { - buf_get_impl!(self, u128::from_le_bytes); - } - - /// Gets a signed 128 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..]; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i128(&mut self) -> i128 { - buf_get_impl!(self, i128::from_be_bytes); - } - - /// Gets a signed 128 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i128_le(&mut self) -> i128 { - buf_get_impl!(self, i128::from_le_bytes); - } - - /// Gets an unsigned n-byte integer from `self` in big-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03 hello"[..]; - /// assert_eq!(0x010203, buf.get_uint(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_uint(&mut self, nbytes: usize) -> u64 { - buf_get_impl!(be => self, u64, nbytes); - } - - /// Gets an unsigned n-byte integer from `self` in little-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x03\x02\x01 hello"[..]; - /// assert_eq!(0x010203, buf.get_uint_le(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_uint_le(&mut self, nbytes: usize) -> u64 { - buf_get_impl!(le => self, u64, nbytes); - } - - /// Gets a signed n-byte integer from `self` in big-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03 hello"[..]; - /// assert_eq!(0x010203, buf.get_int(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_int(&mut self, nbytes: usize) -> i64 { - buf_get_impl!(be => self, i64, nbytes); - } - - /// Gets a signed n-byte integer from `self` in little-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x03\x02\x01 hello"[..]; - /// assert_eq!(0x010203, buf.get_int_le(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_int_le(&mut self, nbytes: usize) -> i64 { - buf_get_impl!(le => self, i64, nbytes); - } - - /// Gets an IEEE754 single-precision (4 bytes) floating point number from - /// `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x3F\x99\x99\x9A hello"[..]; - /// assert_eq!(1.2f32, buf.get_f32()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f32(&mut self) -> f32 { - f32::from_bits(Self::get_u32(self)) - } - - /// Gets an IEEE754 single-precision (4 bytes) floating point number from - /// `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x9A\x99\x99\x3F hello"[..]; - /// assert_eq!(1.2f32, buf.get_f32_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f32_le(&mut self) -> f32 { - f32::from_bits(Self::get_u32_le(self)) - } - - /// Gets an IEEE754 double-precision (8 bytes) floating point number from - /// `self` in big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..]; - /// assert_eq!(1.2f64, buf.get_f64()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f64(&mut self) -> f64 { - f64::from_bits(Self::get_u64(self)) - } - - /// Gets an IEEE754 double-precision (8 bytes) floating point number from - /// `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..]; - /// assert_eq!(1.2f64, buf.get_f64_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f64_le(&mut self) -> f64 { - f64::from_bits(Self::get_u64_le(self)) - } - - /// Consumes remaining bytes inside self and returns new instance of `Bytes` - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let bytes = (&b"hello world"[..]).to_bytes(); - /// assert_eq!(&bytes[..], &b"hello world"[..]); - /// ``` - fn to_bytes(&mut self) -> crate::Bytes { - use super::BufMut; - let mut ret = crate::BytesMut::with_capacity(self.remaining()); - ret.put(self); - ret.freeze() - } -} - -macro_rules! deref_forward_buf { - () => { - fn remaining(&self) -> usize { - (**self).remaining() - } - - fn bytes(&self) -> &[u8] { - (**self).bytes() - } - - #[cfg(feature = "std")] - fn bytes_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize { - (**self).bytes_vectored(dst) - } - - fn advance(&mut self, cnt: usize) { - (**self).advance(cnt) - } - - fn has_remaining(&self) -> bool { - (**self).has_remaining() - } - - fn copy_to_slice(&mut self, dst: &mut [u8]) { - (**self).copy_to_slice(dst) - } - - fn get_u8(&mut self) -> u8 { - (**self).get_u8() - } - - fn get_i8(&mut self) -> i8 { - (**self).get_i8() - } - - fn get_u16(&mut self) -> u16 { - (**self).get_u16() - } - - fn get_u16_le(&mut self) -> u16 { - (**self).get_u16_le() - } - - fn get_i16(&mut self) -> i16 { - (**self).get_i16() - } - - fn get_i16_le(&mut self) -> i16 { - (**self).get_i16_le() - } - - fn get_u32(&mut self) -> u32 { - (**self).get_u32() - } - - fn get_u32_le(&mut self) -> u32 { - (**self).get_u32_le() - } - - fn get_i32(&mut self) -> i32 { - (**self).get_i32() - } - - fn get_i32_le(&mut self) -> i32 { - (**self).get_i32_le() - } - - fn get_u64(&mut self) -> u64 { - (**self).get_u64() - } - - fn get_u64_le(&mut self) -> u64 { - (**self).get_u64_le() - } - - fn get_i64(&mut self) -> i64 { - (**self).get_i64() - } - - fn get_i64_le(&mut self) -> i64 { - (**self).get_i64_le() - } - - fn get_uint(&mut self, nbytes: usize) -> u64 { - (**self).get_uint(nbytes) - } - - fn get_uint_le(&mut self, nbytes: usize) -> u64 { - (**self).get_uint_le(nbytes) - } - - fn get_int(&mut self, nbytes: usize) -> i64 { - (**self).get_int(nbytes) - } - - fn get_int_le(&mut self, nbytes: usize) -> i64 { - (**self).get_int_le(nbytes) - } - - fn to_bytes(&mut self) -> crate::Bytes { - (**self).to_bytes() - } - }; -} - -impl Buf for &mut T { - deref_forward_buf!(); -} - -impl Buf for Box { - deref_forward_buf!(); -} - -impl Buf for &[u8] { - #[inline] - fn remaining(&self) -> usize { - self.len() - } - - #[inline] - fn bytes(&self) -> &[u8] { - self - } - - #[inline] - fn advance(&mut self, cnt: usize) { - *self = &self[cnt..]; - } -} - -impl Buf for Option<[u8; 1]> { - fn remaining(&self) -> usize { - if self.is_some() { - 1 - } else { - 0 - } - } - - fn bytes(&self) -> &[u8] { - self.as_ref() - .map(AsRef::as_ref) - .unwrap_or(Default::default()) - } - - fn advance(&mut self, cnt: usize) { - if cnt == 0 { - return; - } - - if self.is_none() { - panic!("overflow"); - } else { - assert_eq!(1, cnt); - *self = None; - } - } -} - -#[cfg(feature = "std")] -impl> Buf for std::io::Cursor { - fn remaining(&self) -> usize { - let len = self.get_ref().as_ref().len(); - let pos = self.position(); - - if pos >= len as u64 { - return 0; - } - - len - pos as usize - } - - fn bytes(&self) -> &[u8] { - let len = self.get_ref().as_ref().len(); - let pos = self.position(); - - if pos >= len as u64 { - return &[]; - } - - &self.get_ref().as_ref()[pos as usize..] - } - - fn advance(&mut self, cnt: usize) { - let pos = (self.position() as usize) - .checked_add(cnt) - .expect("overflow"); - - assert!(pos <= self.get_ref().as_ref().len()); - self.set_position(pos as u64); - } -} - -// The existence of this function makes the compiler catch if the Buf -// trait is "object-safe" or not. -fn _assert_trait_object(_b: &dyn Buf) {} diff --git a/third_party/rust/bytes-0.5.6/src/buf/buf_mut.rs b/third_party/rust/bytes-0.5.6/src/buf/buf_mut.rs deleted file mode 100644 index 628b240a385c..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/buf_mut.rs +++ /dev/null @@ -1,1100 +0,0 @@ -use core::{ - cmp, - mem::{self, MaybeUninit}, - ptr, usize, -}; - -#[cfg(feature = "std")] -use std::fmt; - -use alloc::{boxed::Box, vec::Vec}; - -/// A trait for values that provide sequential write access to bytes. -/// -/// Write bytes to a buffer -/// -/// A buffer stores bytes in memory such that write operations are infallible. -/// The underlying storage may or may not be in contiguous memory. A `BufMut` -/// value is a cursor into the buffer. Writing to `BufMut` advances the cursor -/// position. -/// -/// The simplest `BufMut` is a `Vec`. -/// -/// ``` -/// use bytes::BufMut; -/// -/// let mut buf = vec![]; -/// -/// buf.put(&b"hello world"[..]); -/// -/// assert_eq!(buf, b"hello world"); -/// ``` -pub trait BufMut { - /// Returns the number of bytes that can be written from the current - /// position until the end of the buffer is reached. - /// - /// This value is greater than or equal to the length of the slice returned - /// by `bytes_mut`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut dst = [0; 10]; - /// let mut buf = &mut dst[..]; - /// - /// let original_remaining = buf.remaining_mut(); - /// buf.put(&b"hello"[..]); - /// - /// assert_eq!(original_remaining - 5, buf.remaining_mut()); - /// ``` - /// - /// # Implementer notes - /// - /// Implementations of `remaining_mut` should ensure that the return value - /// does not change unless a call is made to `advance_mut` or any other - /// function that is documented to change the `BufMut`'s current position. - fn remaining_mut(&self) -> usize; - - /// Advance the internal cursor of the BufMut - /// - /// The next call to `bytes_mut` will return a slice starting `cnt` bytes - /// further into the underlying buffer. - /// - /// This function is unsafe because there is no guarantee that the bytes - /// being advanced past have been initialized. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = Vec::with_capacity(16); - /// - /// unsafe { - /// // MaybeUninit::as_mut_ptr - /// buf.bytes_mut()[0].as_mut_ptr().write(b'h'); - /// buf.bytes_mut()[1].as_mut_ptr().write(b'e'); - /// - /// buf.advance_mut(2); - /// - /// buf.bytes_mut()[0].as_mut_ptr().write(b'l'); - /// buf.bytes_mut()[1].as_mut_ptr().write(b'l'); - /// buf.bytes_mut()[2].as_mut_ptr().write(b'o'); - /// - /// buf.advance_mut(3); - /// } - /// - /// assert_eq!(5, buf.len()); - /// assert_eq!(buf, b"hello"); - /// ``` - /// - /// # Panics - /// - /// This function **may** panic if `cnt > self.remaining_mut()`. - /// - /// # Implementer notes - /// - /// It is recommended for implementations of `advance_mut` to panic if - /// `cnt > self.remaining_mut()`. If the implementation does not panic, - /// the call must behave as if `cnt == self.remaining_mut()`. - /// - /// A call with `cnt == 0` should never panic and be a no-op. - unsafe fn advance_mut(&mut self, cnt: usize); - - /// Returns true if there is space in `self` for more bytes. - /// - /// This is equivalent to `self.remaining_mut() != 0`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut dst = [0; 5]; - /// let mut buf = &mut dst[..]; - /// - /// assert!(buf.has_remaining_mut()); - /// - /// buf.put(&b"hello"[..]); - /// - /// assert!(!buf.has_remaining_mut()); - /// ``` - fn has_remaining_mut(&self) -> bool { - self.remaining_mut() > 0 - } - - /// Returns a mutable slice starting at the current BufMut position and of - /// length between 0 and `BufMut::remaining_mut()`. Note that this *can* be shorter than the - /// whole remainder of the buffer (this allows non-continuous implementation). - /// - /// This is a lower level function. Most operations are done with other - /// functions. - /// - /// The returned byte slice may represent uninitialized memory. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = Vec::with_capacity(16); - /// - /// unsafe { - /// // MaybeUninit::as_mut_ptr - /// buf.bytes_mut()[0].as_mut_ptr().write(b'h'); - /// buf.bytes_mut()[1].as_mut_ptr().write(b'e'); - /// - /// buf.advance_mut(2); - /// - /// buf.bytes_mut()[0].as_mut_ptr().write(b'l'); - /// buf.bytes_mut()[1].as_mut_ptr().write(b'l'); - /// buf.bytes_mut()[2].as_mut_ptr().write(b'o'); - /// - /// buf.advance_mut(3); - /// } - /// - /// assert_eq!(5, buf.len()); - /// assert_eq!(buf, b"hello"); - /// ``` - /// - /// # Implementer notes - /// - /// This function should never panic. `bytes_mut` should return an empty - /// slice **if and only if** `remaining_mut` returns 0. In other words, - /// `bytes_mut` returning an empty slice implies that `remaining_mut` will - /// return 0 and `remaining_mut` returning 0 implies that `bytes_mut` will - /// return an empty slice. - fn bytes_mut(&mut self) -> &mut [MaybeUninit]; - - /// Fills `dst` with potentially multiple mutable slices starting at `self`'s - /// current position. - /// - /// If the `BufMut` is backed by disjoint slices of bytes, `bytes_vectored_mut` - /// enables fetching more than one slice at once. `dst` is a slice of - /// mutable `IoSliceMut` references, enabling the slice to be directly used with - /// [`readv`] without any further conversion. The sum of the lengths of all - /// the buffers in `dst` will be less than or equal to - /// `Buf::remaining_mut()`. - /// - /// The entries in `dst` will be overwritten, but the data **contained** by - /// the slices **will not** be modified. If `bytes_vectored_mut` does not fill every - /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices - /// in `self. - /// - /// This is a lower level function. Most operations are done with other - /// functions. - /// - /// # Implementer notes - /// - /// This function should never panic. Once the end of the buffer is reached, - /// i.e., `BufMut::remaining_mut` returns 0, calls to `bytes_vectored_mut` must - /// return 0 without mutating `dst`. - /// - /// Implementations should also take care to properly handle being called - /// with `dst` being a zero length slice. - /// - /// [`readv`]: http://man7.org/linux/man-pages/man2/readv.2.html - #[cfg(feature = "std")] - fn bytes_vectored_mut<'a>(&'a mut self, dst: &mut [IoSliceMut<'a>]) -> usize { - if dst.is_empty() { - return 0; - } - - if self.has_remaining_mut() { - dst[0] = IoSliceMut::from(self.bytes_mut()); - 1 - } else { - 0 - } - } - - /// Transfer bytes into `self` from `src` and advance the cursor by the - /// number of bytes written. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// - /// buf.put_u8(b'h'); - /// buf.put(&b"ello"[..]); - /// buf.put(&b" world"[..]); - /// - /// assert_eq!(buf, b"hello world"); - /// ``` - /// - /// # Panics - /// - /// Panics if `self` does not have enough capacity to contain `src`. - fn put(&mut self, mut src: T) - where - Self: Sized, - { - assert!(self.remaining_mut() >= src.remaining()); - - while src.has_remaining() { - let l; - - unsafe { - let s = src.bytes(); - let d = self.bytes_mut(); - l = cmp::min(s.len(), d.len()); - - ptr::copy_nonoverlapping(s.as_ptr(), d.as_mut_ptr() as *mut u8, l); - } - - src.advance(l); - unsafe { - self.advance_mut(l); - } - } - } - - /// Transfer bytes into `self` from `src` and advance the cursor by the - /// number of bytes written. - /// - /// `self` must have enough remaining capacity to contain all of `src`. - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut dst = [0; 6]; - /// - /// { - /// let mut buf = &mut dst[..]; - /// buf.put_slice(b"hello"); - /// - /// assert_eq!(1, buf.remaining_mut()); - /// } - /// - /// assert_eq!(b"hello\0", &dst); - /// ``` - fn put_slice(&mut self, src: &[u8]) { - let mut off = 0; - - assert!( - self.remaining_mut() >= src.len(), - "buffer overflow; remaining = {}; src = {}", - self.remaining_mut(), - src.len() - ); - - while off < src.len() { - let cnt; - - unsafe { - let dst = self.bytes_mut(); - cnt = cmp::min(dst.len(), src.len() - off); - - ptr::copy_nonoverlapping(src[off..].as_ptr(), dst.as_mut_ptr() as *mut u8, cnt); - - off += cnt; - } - - unsafe { - self.advance_mut(cnt); - } - } - } - - /// Writes an unsigned 8 bit integer to `self`. - /// - /// The current position is advanced by 1. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u8(0x01); - /// assert_eq!(buf, b"\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u8(&mut self, n: u8) { - let src = [n]; - self.put_slice(&src); - } - - /// Writes a signed 8 bit integer to `self`. - /// - /// The current position is advanced by 1. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i8(0x01); - /// assert_eq!(buf, b"\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i8(&mut self, n: i8) { - let src = [n as u8]; - self.put_slice(&src) - } - - /// Writes an unsigned 16 bit integer to `self` in big-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u16(0x0809); - /// assert_eq!(buf, b"\x08\x09"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u16(&mut self, n: u16) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes an unsigned 16 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u16_le(0x0809); - /// assert_eq!(buf, b"\x09\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u16_le(&mut self, n: u16) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes a signed 16 bit integer to `self` in big-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i16(0x0809); - /// assert_eq!(buf, b"\x08\x09"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i16(&mut self, n: i16) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes a signed 16 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i16_le(0x0809); - /// assert_eq!(buf, b"\x09\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i16_le(&mut self, n: i16) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u32(0x0809A0A1); - /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u32(&mut self, n: u32) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes an unsigned 32 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u32_le(0x0809A0A1); - /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u32_le(&mut self, n: u32) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes a signed 32 bit integer to `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i32(0x0809A0A1); - /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i32(&mut self, n: i32) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes a signed 32 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i32_le(0x0809A0A1); - /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i32_le(&mut self, n: i32) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u64(0x0102030405060708); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u64(&mut self, n: u64) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes an unsigned 64 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u64_le(0x0102030405060708); - /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u64_le(&mut self, n: u64) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes a signed 64 bit integer to `self` in the big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i64(0x0102030405060708); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i64(&mut self, n: i64) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes a signed 64 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i64_le(0x0102030405060708); - /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i64_le(&mut self, n: i64) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u128(0x01020304050607080910111213141516); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u128(&mut self, n: u128) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes an unsigned 128 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u128_le(0x01020304050607080910111213141516); - /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u128_le(&mut self, n: u128) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes a signed 128 bit integer to `self` in the big-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i128(0x01020304050607080910111213141516); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i128(&mut self, n: i128) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes a signed 128 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i128_le(0x01020304050607080910111213141516); - /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i128_le(&mut self, n: i128) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes an unsigned n-byte integer to `self` in big-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_uint(0x010203, 3); - /// assert_eq!(buf, b"\x01\x02\x03"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_uint(&mut self, n: u64, nbytes: usize) { - self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); - } - - /// Writes an unsigned n-byte integer to `self` in the little-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_uint_le(0x010203, 3); - /// assert_eq!(buf, b"\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_uint_le(&mut self, n: u64, nbytes: usize) { - self.put_slice(&n.to_le_bytes()[0..nbytes]); - } - - /// Writes a signed n-byte integer to `self` in big-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_int(0x010203, 3); - /// assert_eq!(buf, b"\x01\x02\x03"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_int(&mut self, n: i64, nbytes: usize) { - self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); - } - - /// Writes a signed n-byte integer to `self` in little-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_int_le(0x010203, 3); - /// assert_eq!(buf, b"\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_int_le(&mut self, n: i64, nbytes: usize) { - self.put_slice(&n.to_le_bytes()[0..nbytes]); - } - - /// Writes an IEEE754 single-precision (4 bytes) floating point number to - /// `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f32(1.2f32); - /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f32(&mut self, n: f32) { - self.put_u32(n.to_bits()); - } - - /// Writes an IEEE754 single-precision (4 bytes) floating point number to - /// `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f32_le(1.2f32); - /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f32_le(&mut self, n: f32) { - self.put_u32_le(n.to_bits()); - } - - /// Writes an IEEE754 double-precision (8 bytes) floating point number to - /// `self` in big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f64(1.2f64); - /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f64(&mut self, n: f64) { - self.put_u64(n.to_bits()); - } - - /// Writes an IEEE754 double-precision (8 bytes) floating point number to - /// `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f64_le(1.2f64); - /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f64_le(&mut self, n: f64) { - self.put_u64_le(n.to_bits()); - } -} - -macro_rules! deref_forward_bufmut { - () => { - fn remaining_mut(&self) -> usize { - (**self).remaining_mut() - } - - fn bytes_mut(&mut self) -> &mut [MaybeUninit] { - (**self).bytes_mut() - } - - #[cfg(feature = "std")] - fn bytes_vectored_mut<'b>(&'b mut self, dst: &mut [IoSliceMut<'b>]) -> usize { - (**self).bytes_vectored_mut(dst) - } - - unsafe fn advance_mut(&mut self, cnt: usize) { - (**self).advance_mut(cnt) - } - - fn put_slice(&mut self, src: &[u8]) { - (**self).put_slice(src) - } - - fn put_u8(&mut self, n: u8) { - (**self).put_u8(n) - } - - fn put_i8(&mut self, n: i8) { - (**self).put_i8(n) - } - - fn put_u16(&mut self, n: u16) { - (**self).put_u16(n) - } - - fn put_u16_le(&mut self, n: u16) { - (**self).put_u16_le(n) - } - - fn put_i16(&mut self, n: i16) { - (**self).put_i16(n) - } - - fn put_i16_le(&mut self, n: i16) { - (**self).put_i16_le(n) - } - - fn put_u32(&mut self, n: u32) { - (**self).put_u32(n) - } - - fn put_u32_le(&mut self, n: u32) { - (**self).put_u32_le(n) - } - - fn put_i32(&mut self, n: i32) { - (**self).put_i32(n) - } - - fn put_i32_le(&mut self, n: i32) { - (**self).put_i32_le(n) - } - - fn put_u64(&mut self, n: u64) { - (**self).put_u64(n) - } - - fn put_u64_le(&mut self, n: u64) { - (**self).put_u64_le(n) - } - - fn put_i64(&mut self, n: i64) { - (**self).put_i64(n) - } - - fn put_i64_le(&mut self, n: i64) { - (**self).put_i64_le(n) - } - }; -} - -impl BufMut for &mut T { - deref_forward_bufmut!(); -} - -impl BufMut for Box { - deref_forward_bufmut!(); -} - -impl BufMut for &mut [u8] { - #[inline] - fn remaining_mut(&self) -> usize { - self.len() - } - - #[inline] - fn bytes_mut(&mut self) -> &mut [MaybeUninit] { - // MaybeUninit is repr(transparent), so safe to transmute - unsafe { mem::transmute(&mut **self) } - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - // Lifetime dance taken from `impl Write for &mut [u8]`. - let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); - *self = b; - } -} - -impl BufMut for Vec { - #[inline] - fn remaining_mut(&self) -> usize { - usize::MAX - self.len() - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - let len = self.len(); - let remaining = self.capacity() - len; - - assert!( - cnt <= remaining, - "cannot advance past `remaining_mut`: {:?} <= {:?}", - cnt, - remaining - ); - - self.set_len(len + cnt); - } - - #[inline] - fn bytes_mut(&mut self) -> &mut [MaybeUninit] { - use core::slice; - - if self.capacity() == self.len() { - self.reserve(64); // Grow the vec - } - - let cap = self.capacity(); - let len = self.len(); - - let ptr = self.as_mut_ptr() as *mut MaybeUninit; - unsafe { &mut slice::from_raw_parts_mut(ptr, cap)[len..] } - } - - // Specialize these methods so they can skip checking `remaining_mut` - // and `advance_mut`. - - fn put(&mut self, mut src: T) - where - Self: Sized, - { - // In case the src isn't contiguous, reserve upfront - self.reserve(src.remaining()); - - while src.has_remaining() { - let l; - - // a block to contain the src.bytes() borrow - { - let s = src.bytes(); - l = s.len(); - self.extend_from_slice(s); - } - - src.advance(l); - } - } - - fn put_slice(&mut self, src: &[u8]) { - self.extend_from_slice(src); - } -} - -// The existence of this function makes the compiler catch if the BufMut -// trait is "object-safe" or not. -fn _assert_trait_object(_b: &dyn BufMut) {} - -// ===== impl IoSliceMut ===== - -/// A buffer type used for `readv`. -/// -/// This is a wrapper around an `std::io::IoSliceMut`, but does not expose -/// the inner bytes in a safe API, as they may point at uninitialized memory. -/// -/// This is `repr(transparent)` of the `std::io::IoSliceMut`, so it is valid to -/// transmute them. However, as the memory might be uninitialized, care must be -/// taken to not *read* the internal bytes, only *write* to them. -#[repr(transparent)] -#[cfg(feature = "std")] -pub struct IoSliceMut<'a>(std::io::IoSliceMut<'a>); - -#[cfg(feature = "std")] -impl fmt::Debug for IoSliceMut<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("IoSliceMut") - .field("len", &self.0.len()) - .finish() - } -} - -#[cfg(feature = "std")] -impl<'a> From<&'a mut [u8]> for IoSliceMut<'a> { - fn from(buf: &'a mut [u8]) -> IoSliceMut<'a> { - IoSliceMut(std::io::IoSliceMut::new(buf)) - } -} - -#[cfg(feature = "std")] -impl<'a> From<&'a mut [MaybeUninit]> for IoSliceMut<'a> { - fn from(buf: &'a mut [MaybeUninit]) -> IoSliceMut<'a> { - IoSliceMut(std::io::IoSliceMut::new(unsafe { - // We don't look at the contents, and `std::io::IoSliceMut` - // doesn't either. - mem::transmute::<&'a mut [MaybeUninit], &'a mut [u8]>(buf) - })) - } -} diff --git a/third_party/rust/bytes-0.5.6/src/buf/ext/chain.rs b/third_party/rust/bytes-0.5.6/src/buf/ext/chain.rs deleted file mode 100644 index e62e2f1b96b9..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/ext/chain.rs +++ /dev/null @@ -1,233 +0,0 @@ -use crate::buf::IntoIter; -use crate::{Buf, BufMut}; - -use core::mem::MaybeUninit; - -#[cfg(feature = "std")] -use crate::buf::IoSliceMut; -#[cfg(feature = "std")] -use std::io::IoSlice; - -/// A `Chain` sequences two buffers. -/// -/// `Chain` is an adapter that links two underlying buffers and provides a -/// continuous view across both buffers. It is able to sequence either immutable -/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values). -/// -/// This struct is generally created by calling [`Buf::chain`]. Please see that -/// function's documentation for more detail. -/// -/// # Examples -/// -/// ``` -/// use bytes::{Bytes, Buf, buf::BufExt}; -/// -/// let mut buf = (&b"hello "[..]) -/// .chain(&b"world"[..]); -/// -/// let full: Bytes = buf.to_bytes(); -/// assert_eq!(full[..], b"hello world"[..]); -/// ``` -/// -/// [`Buf::chain`]: trait.Buf.html#method.chain -/// [`Buf`]: trait.Buf.html -/// [`BufMut`]: trait.BufMut.html -#[derive(Debug)] -pub struct Chain { - a: T, - b: U, -} - -impl Chain { - /// Creates a new `Chain` sequencing the provided values. - pub fn new(a: T, b: U) -> Chain { - Chain { a, b } - } - - /// Gets a reference to the first underlying `Buf`. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::BufExt; - /// - /// let buf = (&b"hello"[..]) - /// .chain(&b"world"[..]); - /// - /// assert_eq!(buf.first_ref()[..], b"hello"[..]); - /// ``` - pub fn first_ref(&self) -> &T { - &self.a - } - - /// Gets a mutable reference to the first underlying `Buf`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, buf::BufExt}; - /// - /// let mut buf = (&b"hello"[..]) - /// .chain(&b"world"[..]); - /// - /// buf.first_mut().advance(1); - /// - /// let full = buf.to_bytes(); - /// assert_eq!(full, b"elloworld"[..]); - /// ``` - pub fn first_mut(&mut self) -> &mut T { - &mut self.a - } - - /// Gets a reference to the last underlying `Buf`. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::BufExt; - /// - /// let buf = (&b"hello"[..]) - /// .chain(&b"world"[..]); - /// - /// assert_eq!(buf.last_ref()[..], b"world"[..]); - /// ``` - pub fn last_ref(&self) -> &U { - &self.b - } - - /// Gets a mutable reference to the last underlying `Buf`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, buf::BufExt}; - /// - /// let mut buf = (&b"hello "[..]) - /// .chain(&b"world"[..]); - /// - /// buf.last_mut().advance(1); - /// - /// let full = buf.to_bytes(); - /// assert_eq!(full, b"hello orld"[..]); - /// ``` - pub fn last_mut(&mut self) -> &mut U { - &mut self.b - } - - /// Consumes this `Chain`, returning the underlying values. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::BufExt; - /// - /// let chain = (&b"hello"[..]) - /// .chain(&b"world"[..]); - /// - /// let (first, last) = chain.into_inner(); - /// assert_eq!(first[..], b"hello"[..]); - /// assert_eq!(last[..], b"world"[..]); - /// ``` - pub fn into_inner(self) -> (T, U) { - (self.a, self.b) - } -} - -impl Buf for Chain -where - T: Buf, - U: Buf, -{ - fn remaining(&self) -> usize { - self.a.remaining() + self.b.remaining() - } - - fn bytes(&self) -> &[u8] { - if self.a.has_remaining() { - self.a.bytes() - } else { - self.b.bytes() - } - } - - fn advance(&mut self, mut cnt: usize) { - let a_rem = self.a.remaining(); - - if a_rem != 0 { - if a_rem >= cnt { - self.a.advance(cnt); - return; - } - - // Consume what is left of a - self.a.advance(a_rem); - - cnt -= a_rem; - } - - self.b.advance(cnt); - } - - #[cfg(feature = "std")] - fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { - let mut n = self.a.bytes_vectored(dst); - n += self.b.bytes_vectored(&mut dst[n..]); - n - } -} - -impl BufMut for Chain -where - T: BufMut, - U: BufMut, -{ - fn remaining_mut(&self) -> usize { - self.a.remaining_mut() + self.b.remaining_mut() - } - - fn bytes_mut(&mut self) -> &mut [MaybeUninit] { - if self.a.has_remaining_mut() { - self.a.bytes_mut() - } else { - self.b.bytes_mut() - } - } - - unsafe fn advance_mut(&mut self, mut cnt: usize) { - let a_rem = self.a.remaining_mut(); - - if a_rem != 0 { - if a_rem >= cnt { - self.a.advance_mut(cnt); - return; - } - - // Consume what is left of a - self.a.advance_mut(a_rem); - - cnt -= a_rem; - } - - self.b.advance_mut(cnt); - } - - #[cfg(feature = "std")] - fn bytes_vectored_mut<'a>(&'a mut self, dst: &mut [IoSliceMut<'a>]) -> usize { - let mut n = self.a.bytes_vectored_mut(dst); - n += self.b.bytes_vectored_mut(&mut dst[n..]); - n - } -} - -impl IntoIterator for Chain -where - T: Buf, - U: Buf, -{ - type Item = u8; - type IntoIter = IntoIter>; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self) - } -} diff --git a/third_party/rust/bytes-0.5.6/src/buf/ext/limit.rs b/third_party/rust/bytes-0.5.6/src/buf/ext/limit.rs deleted file mode 100644 index a36eceeef16e..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/ext/limit.rs +++ /dev/null @@ -1,74 +0,0 @@ -use crate::BufMut; - -use core::{cmp, mem::MaybeUninit}; - -/// A `BufMut` adapter which limits the amount of bytes that can be written -/// to an underlying buffer. -#[derive(Debug)] -pub struct Limit { - inner: T, - limit: usize, -} - -pub(super) fn new(inner: T, limit: usize) -> Limit { - Limit { inner, limit } -} - -impl Limit { - /// Consumes this `Limit`, returning the underlying value. - pub fn into_inner(self) -> T { - self.inner - } - - /// Gets a reference to the underlying `BufMut`. - /// - /// It is inadvisable to directly write to the underlying `BufMut`. - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Gets a mutable reference to the underlying `BufMut`. - /// - /// It is inadvisable to directly write to the underlying `BufMut`. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Returns the maximum number of bytes that can be written - /// - /// # Note - /// - /// If the inner `BufMut` has fewer bytes than indicated by this method then - /// that is the actual number of available bytes. - pub fn limit(&self) -> usize { - self.limit - } - - /// Sets the maximum number of bytes that can be written. - /// - /// # Note - /// - /// If the inner `BufMut` has fewer bytes than `lim` then that is the actual - /// number of available bytes. - pub fn set_limit(&mut self, lim: usize) { - self.limit = lim - } -} - -impl BufMut for Limit { - fn remaining_mut(&self) -> usize { - cmp::min(self.inner.remaining_mut(), self.limit) - } - - fn bytes_mut(&mut self) -> &mut [MaybeUninit] { - let bytes = self.inner.bytes_mut(); - let end = cmp::min(bytes.len(), self.limit); - &mut bytes[..end] - } - - unsafe fn advance_mut(&mut self, cnt: usize) { - assert!(cnt <= self.limit); - self.inner.advance_mut(cnt); - self.limit -= cnt; - } -} diff --git a/third_party/rust/bytes-0.5.6/src/buf/ext/mod.rs b/third_party/rust/bytes-0.5.6/src/buf/ext/mod.rs deleted file mode 100644 index 4a292676a213..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/ext/mod.rs +++ /dev/null @@ -1,186 +0,0 @@ -//! Extra utilities for `Buf` and `BufMut` types. - -use super::{Buf, BufMut}; - -mod chain; -mod limit; -#[cfg(feature = "std")] -mod reader; -mod take; -#[cfg(feature = "std")] -mod writer; - -pub use self::chain::Chain; -pub use self::limit::Limit; -pub use self::take::Take; - -#[cfg(feature = "std")] -pub use self::{reader::Reader, writer::Writer}; - -/// Extra methods for implementations of `Buf`. -pub trait BufExt: Buf { - /// Creates an adaptor which will read at most `limit` bytes from `self`. - /// - /// This function returns a new instance of `Buf` which will read at most - /// `limit` bytes. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BufMut, buf::BufExt}; - /// - /// let mut buf = b"hello world"[..].take(5); - /// let mut dst = vec![]; - /// - /// dst.put(&mut buf); - /// assert_eq!(dst, b"hello"); - /// - /// let mut buf = buf.into_inner(); - /// dst.clear(); - /// dst.put(&mut buf); - /// assert_eq!(dst, b" world"); - /// ``` - fn take(self, limit: usize) -> Take - where - Self: Sized, - { - take::new(self, limit) - } - - /// Creates an adaptor which will chain this buffer with another. - /// - /// The returned `Buf` instance will first consume all bytes from `self`. - /// Afterwards the output is equivalent to the output of next. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, buf::BufExt}; - /// - /// let mut chain = b"hello "[..].chain(&b"world"[..]); - /// - /// let full = chain.to_bytes(); - /// assert_eq!(full.bytes(), b"hello world"); - /// ``` - fn chain(self, next: U) -> Chain - where - Self: Sized, - { - Chain::new(self, next) - } - - /// Creates an adaptor which implements the `Read` trait for `self`. - /// - /// This function returns a new value which implements `Read` by adapting - /// the `Read` trait functions to the `Buf` trait functions. Given that - /// `Buf` operations are infallible, none of the `Read` functions will - /// return with `Err`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Bytes, buf::BufExt}; - /// use std::io::Read; - /// - /// let buf = Bytes::from("hello world"); - /// - /// let mut reader = buf.reader(); - /// let mut dst = [0; 1024]; - /// - /// let num = reader.read(&mut dst).unwrap(); - /// - /// assert_eq!(11, num); - /// assert_eq!(&dst[..11], &b"hello world"[..]); - /// ``` - #[cfg(feature = "std")] - fn reader(self) -> Reader - where - Self: Sized, - { - reader::new(self) - } -} - -impl BufExt for B {} - -/// Extra methods for implementations of `BufMut`. -pub trait BufMutExt: BufMut { - /// Creates an adaptor which can write at most `limit` bytes to `self`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BufMut, buf::BufMutExt}; - /// - /// let arr = &mut [0u8; 128][..]; - /// assert_eq!(arr.remaining_mut(), 128); - /// - /// let dst = arr.limit(10); - /// assert_eq!(dst.remaining_mut(), 10); - /// ``` - fn limit(self, limit: usize) -> Limit - where - Self: Sized, - { - limit::new(self, limit) - } - - /// Creates an adaptor which implements the `Write` trait for `self`. - /// - /// This function returns a new value which implements `Write` by adapting - /// the `Write` trait functions to the `BufMut` trait functions. Given that - /// `BufMut` operations are infallible, none of the `Write` functions will - /// return with `Err`. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::BufMutExt; - /// use std::io::Write; - /// - /// let mut buf = vec![].writer(); - /// - /// let num = buf.write(&b"hello world"[..]).unwrap(); - /// assert_eq!(11, num); - /// - /// let buf = buf.into_inner(); - /// - /// assert_eq!(*buf, b"hello world"[..]); - /// ``` - #[cfg(feature = "std")] - fn writer(self) -> Writer - where - Self: Sized, - { - writer::new(self) - } - - /// Creates an adapter which will chain this buffer with another. - /// - /// The returned `BufMut` instance will first write to all bytes from - /// `self`. Afterwards, it will write to `next`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BufMut, buf::BufMutExt}; - /// - /// let mut a = [0u8; 5]; - /// let mut b = [0u8; 6]; - /// - /// let mut chain = (&mut a[..]).chain_mut(&mut b[..]); - /// - /// chain.put_slice(b"hello world"); - /// - /// assert_eq!(&a[..], b"hello"); - /// assert_eq!(&b[..], b" world"); - /// ``` - fn chain_mut(self, next: U) -> Chain - where - Self: Sized, - { - Chain::new(self, next) - } -} - -impl BufMutExt for B {} diff --git a/third_party/rust/bytes-0.5.6/src/buf/ext/reader.rs b/third_party/rust/bytes-0.5.6/src/buf/ext/reader.rs deleted file mode 100644 index dde3548bfe97..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/ext/reader.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::Buf; - -use std::{cmp, io}; - -/// A `Buf` adapter which implements `io::Read` for the inner value. -/// -/// This struct is generally created by calling `reader()` on `Buf`. See -/// documentation of [`reader()`](trait.Buf.html#method.reader) for more -/// details. -#[derive(Debug)] -pub struct Reader { - buf: B, -} - -pub fn new(buf: B) -> Reader { - Reader { buf } -} - -impl Reader { - /// Gets a reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::buf::BufExt; - /// - /// let buf = b"hello world".reader(); - /// - /// assert_eq!(b"hello world", buf.get_ref()); - /// ``` - pub fn get_ref(&self) -> &B { - &self.buf - } - - /// Gets a mutable reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - pub fn get_mut(&mut self) -> &mut B { - &mut self.buf - } - - /// Consumes this `Reader`, returning the underlying value. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, buf::BufExt}; - /// use std::io; - /// - /// let mut buf = b"hello world".reader(); - /// let mut dst = vec![]; - /// - /// io::copy(&mut buf, &mut dst).unwrap(); - /// - /// let buf = buf.into_inner(); - /// assert_eq!(0, buf.remaining()); - /// ``` - pub fn into_inner(self) -> B { - self.buf - } -} - -impl io::Read for Reader { - fn read(&mut self, dst: &mut [u8]) -> io::Result { - let len = cmp::min(self.buf.remaining(), dst.len()); - - Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]); - Ok(len) - } -} - -impl io::BufRead for Reader { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - Ok(self.buf.bytes()) - } - fn consume(&mut self, amt: usize) { - self.buf.advance(amt) - } -} diff --git a/third_party/rust/bytes-0.5.6/src/buf/ext/take.rs b/third_party/rust/bytes-0.5.6/src/buf/ext/take.rs deleted file mode 100644 index 1d84868bfb4d..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/ext/take.rs +++ /dev/null @@ -1,147 +0,0 @@ -use crate::Buf; - -use core::cmp; - -/// A `Buf` adapter which limits the bytes read from an underlying buffer. -/// -/// This struct is generally created by calling `take()` on `Buf`. See -/// documentation of [`take()`](trait.BufExt.html#method.take) for more details. -#[derive(Debug)] -pub struct Take { - inner: T, - limit: usize, -} - -pub fn new(inner: T, limit: usize) -> Take { - Take { inner, limit } -} - -impl Take { - /// Consumes this `Take`, returning the underlying value. - /// - /// # Examples - /// - /// ```rust - /// use bytes::buf::{BufMut, BufExt}; - /// - /// let mut buf = b"hello world".take(2); - /// let mut dst = vec![]; - /// - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"he"[..]); - /// - /// let mut buf = buf.into_inner(); - /// - /// dst.clear(); - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"llo world"[..]); - /// ``` - pub fn into_inner(self) -> T { - self.inner - } - - /// Gets a reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, buf::BufExt}; - /// - /// let buf = b"hello world".take(2); - /// - /// assert_eq!(11, buf.get_ref().remaining()); - /// ``` - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Gets a mutable reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, BufMut, buf::BufExt}; - /// - /// let mut buf = b"hello world".take(2); - /// let mut dst = vec![]; - /// - /// buf.get_mut().advance(2); - /// - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"ll"[..]); - /// ``` - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Returns the maximum number of bytes that can be read. - /// - /// # Note - /// - /// If the inner `Buf` has fewer bytes than indicated by this method then - /// that is the actual number of available bytes. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, buf::BufExt}; - /// - /// let mut buf = b"hello world".take(2); - /// - /// assert_eq!(2, buf.limit()); - /// assert_eq!(b'h', buf.get_u8()); - /// assert_eq!(1, buf.limit()); - /// ``` - pub fn limit(&self) -> usize { - self.limit - } - - /// Sets the maximum number of bytes that can be read. - /// - /// # Note - /// - /// If the inner `Buf` has fewer bytes than `lim` then that is the actual - /// number of available bytes. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{BufMut, buf::BufExt}; - /// - /// let mut buf = b"hello world".take(2); - /// let mut dst = vec![]; - /// - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"he"[..]); - /// - /// dst.clear(); - /// - /// buf.set_limit(3); - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"llo"[..]); - /// ``` - pub fn set_limit(&mut self, lim: usize) { - self.limit = lim - } -} - -impl Buf for Take { - fn remaining(&self) -> usize { - cmp::min(self.inner.remaining(), self.limit) - } - - fn bytes(&self) -> &[u8] { - let bytes = self.inner.bytes(); - &bytes[..cmp::min(bytes.len(), self.limit)] - } - - fn advance(&mut self, cnt: usize) { - assert!(cnt <= self.limit); - self.inner.advance(cnt); - self.limit -= cnt; - } -} diff --git a/third_party/rust/bytes-0.5.6/src/buf/ext/writer.rs b/third_party/rust/bytes-0.5.6/src/buf/ext/writer.rs deleted file mode 100644 index a14197c8132e..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/ext/writer.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::BufMut; - -use std::{cmp, io}; - -/// A `BufMut` adapter which implements `io::Write` for the inner value. -/// -/// This struct is generally created by calling `writer()` on `BufMut`. See -/// documentation of [`writer()`](trait.BufMut.html#method.writer) for more -/// details. -#[derive(Debug)] -pub struct Writer { - buf: B, -} - -pub fn new(buf: B) -> Writer { - Writer { buf } -} - -impl Writer { - /// Gets a reference to the underlying `BufMut`. - /// - /// It is inadvisable to directly write to the underlying `BufMut`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::buf::BufMutExt; - /// - /// let buf = Vec::with_capacity(1024).writer(); - /// - /// assert_eq!(1024, buf.get_ref().capacity()); - /// ``` - pub fn get_ref(&self) -> &B { - &self.buf - } - - /// Gets a mutable reference to the underlying `BufMut`. - /// - /// It is inadvisable to directly write to the underlying `BufMut`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::buf::BufMutExt; - /// - /// let mut buf = vec![].writer(); - /// - /// buf.get_mut().reserve(1024); - /// - /// assert_eq!(1024, buf.get_ref().capacity()); - /// ``` - pub fn get_mut(&mut self) -> &mut B { - &mut self.buf - } - - /// Consumes this `Writer`, returning the underlying value. - /// - /// # Examples - /// - /// ```rust - /// use bytes::buf::BufMutExt; - /// use std::io; - /// - /// let mut buf = vec![].writer(); - /// let mut src = &b"hello world"[..]; - /// - /// io::copy(&mut src, &mut buf).unwrap(); - /// - /// let buf = buf.into_inner(); - /// assert_eq!(*buf, b"hello world"[..]); - /// ``` - pub fn into_inner(self) -> B { - self.buf - } -} - -impl io::Write for Writer { - fn write(&mut self, src: &[u8]) -> io::Result { - let n = cmp::min(self.buf.remaining_mut(), src.len()); - - self.buf.put(&src[0..n]); - Ok(n) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} diff --git a/third_party/rust/bytes-0.5.6/src/buf/iter.rs b/third_party/rust/bytes-0.5.6/src/buf/iter.rs deleted file mode 100644 index 0f9bdc04fc1b..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/iter.rs +++ /dev/null @@ -1,133 +0,0 @@ -use crate::Buf; - -/// Iterator over the bytes contained by the buffer. -/// -/// This struct is created by the [`iter`] method on [`Buf`]. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use bytes::Bytes; -/// -/// let buf = Bytes::from(&b"abc"[..]); -/// let mut iter = buf.into_iter(); -/// -/// assert_eq!(iter.next(), Some(b'a')); -/// assert_eq!(iter.next(), Some(b'b')); -/// assert_eq!(iter.next(), Some(b'c')); -/// assert_eq!(iter.next(), None); -/// ``` -/// -/// [`iter`]: trait.Buf.html#method.iter -/// [`Buf`]: trait.Buf.html -#[derive(Debug)] -pub struct IntoIter { - inner: T, -} - -impl IntoIter { - /// Creates an iterator over the bytes contained by the buffer. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// use bytes::buf::IntoIter; - /// - /// let buf = Bytes::from_static(b"abc"); - /// let mut iter = IntoIter::new(buf); - /// - /// assert_eq!(iter.next(), Some(b'a')); - /// assert_eq!(iter.next(), Some(b'b')); - /// assert_eq!(iter.next(), Some(b'c')); - /// assert_eq!(iter.next(), None); - /// ``` - pub fn new(inner: T) -> IntoIter { - IntoIter { inner } - } - - /// Consumes this `IntoIter`, returning the underlying value. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, Bytes}; - /// - /// let buf = Bytes::from(&b"abc"[..]); - /// let mut iter = buf.into_iter(); - /// - /// assert_eq!(iter.next(), Some(b'a')); - /// - /// let buf = iter.into_inner(); - /// assert_eq!(2, buf.remaining()); - /// ``` - pub fn into_inner(self) -> T { - self.inner - } - - /// Gets a reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, Bytes}; - /// - /// let buf = Bytes::from(&b"abc"[..]); - /// let mut iter = buf.into_iter(); - /// - /// assert_eq!(iter.next(), Some(b'a')); - /// - /// assert_eq!(2, iter.get_ref().remaining()); - /// ``` - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Gets a mutable reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, BytesMut}; - /// - /// let buf = BytesMut::from(&b"abc"[..]); - /// let mut iter = buf.into_iter(); - /// - /// assert_eq!(iter.next(), Some(b'a')); - /// - /// iter.get_mut().advance(1); - /// - /// assert_eq!(iter.next(), Some(b'c')); - /// ``` - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } -} - -impl Iterator for IntoIter { - type Item = u8; - - fn next(&mut self) -> Option { - if !self.inner.has_remaining() { - return None; - } - - let b = self.inner.bytes()[0]; - self.inner.advance(1); - - Some(b) - } - - fn size_hint(&self) -> (usize, Option) { - let rem = self.inner.remaining(); - (rem, Some(rem)) - } -} - -impl ExactSizeIterator for IntoIter {} diff --git a/third_party/rust/bytes-0.5.6/src/buf/mod.rs b/third_party/rust/bytes-0.5.6/src/buf/mod.rs deleted file mode 100644 index 1d7292c9e6eb..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! Utilities for working with buffers. -//! -//! A buffer is any structure that contains a sequence of bytes. The bytes may -//! or may not be stored in contiguous memory. This module contains traits used -//! to abstract over buffers as well as utilities for working with buffer types. -//! -//! # `Buf`, `BufMut` -//! -//! These are the two foundational traits for abstractly working with buffers. -//! They can be thought as iterators for byte structures. They offer additional -//! performance over `Iterator` by providing an API optimized for byte slices. -//! -//! See [`Buf`] and [`BufMut`] for more details. -//! -//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) -//! [`Buf`]: trait.Buf.html -//! [`BufMut`]: trait.BufMut.html - -mod buf_impl; -mod buf_mut; -pub mod ext; -mod iter; -mod vec_deque; - -pub use self::buf_impl::Buf; -pub use self::buf_mut::BufMut; -#[cfg(feature = "std")] -pub use self::buf_mut::IoSliceMut; -pub use self::ext::{BufExt, BufMutExt}; -pub use self::iter::IntoIter; diff --git a/third_party/rust/bytes-0.5.6/src/buf/vec_deque.rs b/third_party/rust/bytes-0.5.6/src/buf/vec_deque.rs deleted file mode 100644 index 195e6897f4c3..000000000000 --- a/third_party/rust/bytes-0.5.6/src/buf/vec_deque.rs +++ /dev/null @@ -1,22 +0,0 @@ -use alloc::collections::VecDeque; - -use super::Buf; - -impl Buf for VecDeque { - fn remaining(&self) -> usize { - self.len() - } - - fn bytes(&self) -> &[u8] { - let (s1, s2) = self.as_slices(); - if s1.is_empty() { - s2 - } else { - s1 - } - } - - fn advance(&mut self, cnt: usize) { - self.drain(..cnt); - } -} diff --git a/third_party/rust/bytes-0.5.6/src/bytes.rs b/third_party/rust/bytes-0.5.6/src/bytes.rs deleted file mode 100644 index 79a09f398192..000000000000 --- a/third_party/rust/bytes-0.5.6/src/bytes.rs +++ /dev/null @@ -1,1108 +0,0 @@ -use core::iter::FromIterator; -use core::ops::{Deref, RangeBounds}; -use core::{cmp, fmt, hash, mem, ptr, slice, usize}; - -use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec}; - -use crate::buf::IntoIter; -#[allow(unused)] -use crate::loom::sync::atomic::AtomicMut; -use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; -use crate::Buf; - -/// A reference counted contiguous slice of memory. -/// -/// `Bytes` is an efficient container for storing and operating on contiguous -/// slices of memory. It is intended for use primarily in networking code, but -/// could have applications elsewhere as well. -/// -/// `Bytes` values facilitate zero-copy network programming by allowing multiple -/// `Bytes` objects to point to the same underlying memory. This is managed by -/// using a reference count to track when the memory is no longer needed and can -/// be freed. -/// -/// ``` -/// use bytes::Bytes; -/// -/// let mut mem = Bytes::from("Hello world"); -/// let a = mem.slice(0..5); -/// -/// assert_eq!(a, "Hello"); -/// -/// let b = mem.split_to(6); -/// -/// assert_eq!(mem, "world"); -/// assert_eq!(b, "Hello "); -/// ``` -/// -/// # Memory layout -/// -/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used -/// to track information about which segment of the underlying memory the -/// `Bytes` handle has access to. -/// -/// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory -/// slice and a pointer to the start of the region visible by the handle. -/// `Bytes` also tracks the length of its view into the memory. -/// -/// # Sharing -/// -/// The memory itself is reference counted, and multiple `Bytes` objects may -/// point to the same region. Each `Bytes` handle point to different sections within -/// the memory region, and `Bytes` handle may or may not have overlapping views -/// into the memory. -/// -/// -/// ```text -/// -/// Arc ptrs +---------+ -/// ________________________ / | Bytes 2 | -/// / +---------+ -/// / +-----------+ | | -/// |_________/ | Bytes 1 | | | -/// | +-----------+ | | -/// | | | ___/ data | tail -/// | data | tail |/ | -/// v v v v -/// +-----+---------------------------------+-----+ -/// | Arc | | | | | -/// +-----+---------------------------------+-----+ -/// ``` -pub struct Bytes { - ptr: *const u8, - len: usize, - // inlined "trait object" - data: AtomicPtr<()>, - vtable: &'static Vtable, -} - -pub(crate) struct Vtable { - /// fn(data, ptr, len) - pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, - /// fn(data, ptr, len) - pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), -} - -impl Bytes { - /// Creates a new empty `Bytes`. - /// - /// This will not allocate and the returned `Bytes` handle will be empty. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let b = Bytes::new(); - /// assert_eq!(&b[..], b""); - /// ``` - #[inline] - #[cfg(not(all(loom, test)))] - pub const fn new() -> Bytes { - // Make it a named const to work around - // "unsizing casts are not allowed in const fn" - const EMPTY: &[u8] = &[]; - Bytes::from_static(EMPTY) - } - - #[cfg(all(loom, test))] - pub fn new() -> Bytes { - const EMPTY: &[u8] = &[]; - Bytes::from_static(EMPTY) - } - - /// Creates a new `Bytes` from a static slice. - /// - /// The returned `Bytes` will point directly to the static slice. There is - /// no allocating or copying. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let b = Bytes::from_static(b"hello"); - /// assert_eq!(&b[..], b"hello"); - /// ``` - #[inline] - #[cfg(not(all(loom, test)))] - pub const fn from_static(bytes: &'static [u8]) -> Bytes { - Bytes { - ptr: bytes.as_ptr(), - len: bytes.len(), - data: AtomicPtr::new(ptr::null_mut()), - vtable: &STATIC_VTABLE, - } - } - - #[cfg(all(loom, test))] - pub fn from_static(bytes: &'static [u8]) -> Bytes { - Bytes { - ptr: bytes.as_ptr(), - len: bytes.len(), - data: AtomicPtr::new(ptr::null_mut()), - vtable: &STATIC_VTABLE, - } - } - - /// Returns the number of bytes contained in this `Bytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let b = Bytes::from(&b"hello"[..]); - /// assert_eq!(b.len(), 5); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.len - } - - /// Returns true if the `Bytes` has a length of 0. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let b = Bytes::new(); - /// assert!(b.is_empty()); - /// ``` - #[inline] - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - ///Creates `Bytes` instance from slice, by copying it. - pub fn copy_from_slice(data: &[u8]) -> Self { - data.to_vec().into() - } - - /// Returns a slice of self for the provided range. - /// - /// This will increment the reference count for the underlying memory and - /// return a new `Bytes` handle set to the slice. - /// - /// This operation is `O(1)`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let a = Bytes::from(&b"hello world"[..]); - /// let b = a.slice(2..5); - /// - /// assert_eq!(&b[..], b"llo"); - /// ``` - /// - /// # Panics - /// - /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing - /// will panic. - pub fn slice(&self, range: impl RangeBounds) -> Bytes { - use core::ops::Bound; - - let len = self.len(); - - let begin = match range.start_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n + 1, - Bound::Unbounded => 0, - }; - - let end = match range.end_bound() { - Bound::Included(&n) => n + 1, - Bound::Excluded(&n) => n, - Bound::Unbounded => len, - }; - - assert!( - begin <= end, - "range start must not be greater than end: {:?} <= {:?}", - begin, - end, - ); - assert!( - end <= len, - "range end out of bounds: {:?} <= {:?}", - end, - len, - ); - - if end == begin { - return Bytes::new(); - } - - let mut ret = self.clone(); - - ret.len = end - begin; - ret.ptr = unsafe { ret.ptr.offset(begin as isize) }; - - ret - } - - /// Returns a slice of self that is equivalent to the given `subset`. - /// - /// When processing a `Bytes` buffer with other tools, one often gets a - /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it. - /// This function turns that `&[u8]` into another `Bytes`, as if one had - /// called `self.slice()` with the offsets that correspond to `subset`. - /// - /// This operation is `O(1)`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let bytes = Bytes::from(&b"012345678"[..]); - /// let as_slice = bytes.as_ref(); - /// let subset = &as_slice[2..6]; - /// let subslice = bytes.slice_ref(&subset); - /// assert_eq!(&subslice[..], b"2345"); - /// ``` - /// - /// # Panics - /// - /// Requires that the given `sub` slice is in fact contained within the - /// `Bytes` buffer; otherwise this function will panic. - pub fn slice_ref(&self, subset: &[u8]) -> Bytes { - // Empty slice and empty Bytes may have their pointers reset - // so explicitly allow empty slice to be a subslice of any slice. - if subset.is_empty() { - return Bytes::new(); - } - - let bytes_p = self.as_ptr() as usize; - let bytes_len = self.len(); - - let sub_p = subset.as_ptr() as usize; - let sub_len = subset.len(); - - assert!( - sub_p >= bytes_p, - "subset pointer ({:p}) is smaller than self pointer ({:p})", - sub_p as *const u8, - bytes_p as *const u8, - ); - assert!( - sub_p + sub_len <= bytes_p + bytes_len, - "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})", - bytes_p as *const u8, - bytes_len, - sub_p as *const u8, - sub_len, - ); - - let sub_offset = sub_p - bytes_p; - - self.slice(sub_offset..(sub_offset + sub_len)) - } - - /// Splits the bytes into two at the given index. - /// - /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` - /// contains elements `[at, len)`. - /// - /// This is an `O(1)` operation that just increases the reference count and - /// sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let mut a = Bytes::from(&b"hello world"[..]); - /// let b = a.split_off(5); - /// - /// assert_eq!(&a[..], b"hello"); - /// assert_eq!(&b[..], b" world"); - /// ``` - /// - /// # Panics - /// - /// Panics if `at > len`. - #[must_use = "consider Bytes::truncate if you don't need the other half"] - pub fn split_off(&mut self, at: usize) -> Bytes { - assert!( - at <= self.len(), - "split_off out of bounds: {:?} <= {:?}", - at, - self.len(), - ); - - if at == self.len() { - return Bytes::new(); - } - - if at == 0 { - return mem::replace(self, Bytes::new()); - } - - let mut ret = self.clone(); - - self.len = at; - - unsafe { ret.inc_start(at) }; - - ret - } - - /// Splits the bytes into two at the given index. - /// - /// Afterwards `self` contains elements `[at, len)`, and the returned - /// `Bytes` contains elements `[0, at)`. - /// - /// This is an `O(1)` operation that just increases the reference count and - /// sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let mut a = Bytes::from(&b"hello world"[..]); - /// let b = a.split_to(5); - /// - /// assert_eq!(&a[..], b" world"); - /// assert_eq!(&b[..], b"hello"); - /// ``` - /// - /// # Panics - /// - /// Panics if `at > len`. - #[must_use = "consider Bytes::advance if you don't need the other half"] - pub fn split_to(&mut self, at: usize) -> Bytes { - assert!( - at <= self.len(), - "split_to out of bounds: {:?} <= {:?}", - at, - self.len(), - ); - - if at == self.len() { - return mem::replace(self, Bytes::new()); - } - - if at == 0 { - return Bytes::new(); - } - - let mut ret = self.clone(); - - unsafe { self.inc_start(at) }; - - ret.len = at; - ret - } - - /// Shortens the buffer, keeping the first `len` bytes and dropping the - /// rest. - /// - /// If `len` is greater than the buffer's current length, this has no - /// effect. - /// - /// The [`split_off`] method can emulate `truncate`, but this causes the - /// excess bytes to be returned instead of dropped. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let mut buf = Bytes::from(&b"hello world"[..]); - /// buf.truncate(5); - /// assert_eq!(buf, b"hello"[..]); - /// ``` - /// - /// [`split_off`]: #method.split_off - #[inline] - pub fn truncate(&mut self, len: usize) { - if len < self.len { - // The Vec "promotable" vtables do not store the capacity, - // so we cannot truncate while using this repr. We *have* to - // promote using `split_off` so the capacity can be stored. - if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE - || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE - { - drop(self.split_off(len)); - } else { - self.len = len; - } - } - } - - /// Clears the buffer, removing all data. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let mut buf = Bytes::from(&b"hello world"[..]); - /// buf.clear(); - /// assert!(buf.is_empty()); - /// ``` - #[inline] - pub fn clear(&mut self) { - self.truncate(0); - } - - #[inline] - pub(crate) unsafe fn with_vtable( - ptr: *const u8, - len: usize, - data: AtomicPtr<()>, - vtable: &'static Vtable, - ) -> Bytes { - Bytes { - ptr, - len, - data, - vtable, - } - } - - // private - - #[inline] - fn as_slice(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self.ptr, self.len) } - } - - #[inline] - unsafe fn inc_start(&mut self, by: usize) { - // should already be asserted, but debug assert for tests - debug_assert!(self.len >= by, "internal: inc_start out of bounds"); - self.len -= by; - self.ptr = self.ptr.offset(by as isize); - } -} - -// Vtable must enforce this behavior -unsafe impl Send for Bytes {} -unsafe impl Sync for Bytes {} - -impl Drop for Bytes { - #[inline] - fn drop(&mut self) { - unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) } - } -} - -impl Clone for Bytes { - #[inline] - fn clone(&self) -> Bytes { - unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) } - } -} - -impl Buf for Bytes { - #[inline] - fn remaining(&self) -> usize { - self.len() - } - - #[inline] - fn bytes(&self) -> &[u8] { - self.as_slice() - } - - #[inline] - fn advance(&mut self, cnt: usize) { - assert!( - cnt <= self.len(), - "cannot advance past `remaining`: {:?} <= {:?}", - cnt, - self.len(), - ); - - unsafe { - self.inc_start(cnt); - } - } - - fn to_bytes(&mut self) -> crate::Bytes { - core::mem::replace(self, Bytes::new()) - } -} - -impl Deref for Bytes { - type Target = [u8]; - - #[inline] - fn deref(&self) -> &[u8] { - self.as_slice() - } -} - -impl AsRef<[u8]> for Bytes { - #[inline] - fn as_ref(&self) -> &[u8] { - self.as_slice() - } -} - -impl hash::Hash for Bytes { - fn hash(&self, state: &mut H) - where - H: hash::Hasher, - { - self.as_slice().hash(state); - } -} - -impl Borrow<[u8]> for Bytes { - fn borrow(&self) -> &[u8] { - self.as_slice() - } -} - -impl IntoIterator for Bytes { - type Item = u8; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self) - } -} - -impl<'a> IntoIterator for &'a Bytes { - type Item = &'a u8; - type IntoIter = core::slice::Iter<'a, u8>; - - fn into_iter(self) -> Self::IntoIter { - self.as_slice().into_iter() - } -} - -impl FromIterator for Bytes { - fn from_iter>(into_iter: T) -> Self { - Vec::from_iter(into_iter).into() - } -} - -// impl Eq - -impl PartialEq for Bytes { - fn eq(&self, other: &Bytes) -> bool { - self.as_slice() == other.as_slice() - } -} - -impl PartialOrd for Bytes { - fn partial_cmp(&self, other: &Bytes) -> Option { - self.as_slice().partial_cmp(other.as_slice()) - } -} - -impl Ord for Bytes { - fn cmp(&self, other: &Bytes) -> cmp::Ordering { - self.as_slice().cmp(other.as_slice()) - } -} - -impl Eq for Bytes {} - -impl PartialEq<[u8]> for Bytes { - fn eq(&self, other: &[u8]) -> bool { - self.as_slice() == other - } -} - -impl PartialOrd<[u8]> for Bytes { - fn partial_cmp(&self, other: &[u8]) -> Option { - self.as_slice().partial_cmp(other) - } -} - -impl PartialEq for [u8] { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for [u8] { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for Bytes { - fn eq(&self, other: &str) -> bool { - self.as_slice() == other.as_bytes() - } -} - -impl PartialOrd for Bytes { - fn partial_cmp(&self, other: &str) -> Option { - self.as_slice().partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for str { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for str { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl PartialEq> for Bytes { - fn eq(&self, other: &Vec) -> bool { - *self == &other[..] - } -} - -impl PartialOrd> for Bytes { - fn partial_cmp(&self, other: &Vec) -> Option { - self.as_slice().partial_cmp(&other[..]) - } -} - -impl PartialEq for Vec { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for Vec { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for Bytes { - fn eq(&self, other: &String) -> bool { - *self == &other[..] - } -} - -impl PartialOrd for Bytes { - fn partial_cmp(&self, other: &String) -> Option { - self.as_slice().partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for String { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for String { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl PartialEq for &[u8] { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for &[u8] { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for &str { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for &str { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes -where - Bytes: PartialEq, -{ - fn eq(&self, other: &&'a T) -> bool { - *self == **other - } -} - -impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes -where - Bytes: PartialOrd, -{ - fn partial_cmp(&self, other: &&'a T) -> Option { - self.partial_cmp(&**other) - } -} - -// impl From - -impl Default for Bytes { - #[inline] - fn default() -> Bytes { - Bytes::new() - } -} - -impl From<&'static [u8]> for Bytes { - fn from(slice: &'static [u8]) -> Bytes { - Bytes::from_static(slice) - } -} - -impl From<&'static str> for Bytes { - fn from(slice: &'static str) -> Bytes { - Bytes::from_static(slice.as_bytes()) - } -} - -impl From> for Bytes { - fn from(vec: Vec) -> Bytes { - // into_boxed_slice doesn't return a heap allocation for empty vectors, - // so the pointer isn't aligned enough for the KIND_VEC stashing to - // work. - if vec.is_empty() { - return Bytes::new(); - } - - let slice = vec.into_boxed_slice(); - let len = slice.len(); - let ptr = slice.as_ptr(); - drop(Box::into_raw(slice)); - - if ptr as usize & 0x1 == 0 { - let data = ptr as usize | KIND_VEC; - Bytes { - ptr, - len, - data: AtomicPtr::new(data as *mut _), - vtable: &PROMOTABLE_EVEN_VTABLE, - } - } else { - Bytes { - ptr, - len, - data: AtomicPtr::new(ptr as *mut _), - vtable: &PROMOTABLE_ODD_VTABLE, - } - } - } -} - -impl From for Bytes { - fn from(s: String) -> Bytes { - Bytes::from(s.into_bytes()) - } -} - -// ===== impl Vtable ===== - -impl fmt::Debug for Vtable { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Vtable") - .field("clone", &(self.clone as *const ())) - .field("drop", &(self.drop as *const ())) - .finish() - } -} - -// ===== impl StaticVtable ===== - -const STATIC_VTABLE: Vtable = Vtable { - clone: static_clone, - drop: static_drop, -}; - -unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let slice = slice::from_raw_parts(ptr, len); - Bytes::from_static(slice) -} - -unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { - // nothing to drop for &'static [u8] -} - -// ===== impl PromotableVtable ===== - -static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { - clone: promotable_even_clone, - drop: promotable_even_drop, -}; - -static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { - clone: promotable_odd_clone, - drop: promotable_odd_drop, -}; - -unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shallow_clone_arc(shared as _, ptr, len) - } else { - debug_assert_eq!(kind, KIND_VEC); - let buf = (shared as usize & !KIND_MASK) as *mut u8; - shallow_clone_vec(data, shared, buf, ptr, len) - } -} - -unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - data.with_mut(|shared| { - let shared = *shared; - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - release_shared(shared as *mut Shared); - } else { - debug_assert_eq!(kind, KIND_VEC); - let buf = (shared as usize & !KIND_MASK) as *mut u8; - drop(rebuild_boxed_slice(buf, ptr, len)); - } - }); -} - -unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shallow_clone_arc(shared as _, ptr, len) - } else { - debug_assert_eq!(kind, KIND_VEC); - shallow_clone_vec(data, shared, shared as *mut u8, ptr, len) - } -} - -unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - data.with_mut(|shared| { - let shared = *shared; - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - release_shared(shared as *mut Shared); - } else { - debug_assert_eq!(kind, KIND_VEC); - - drop(rebuild_boxed_slice(shared as *mut u8, ptr, len)); - } - }); -} - -unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> { - let cap = (offset as usize - buf as usize) + len; - Box::from_raw(slice::from_raw_parts_mut(buf, cap)) -} - -// ===== impl SharedVtable ===== - -struct Shared { - // holds vec for drop, but otherwise doesnt access it - _vec: Vec, - ref_cnt: AtomicUsize, -} - -// Assert that the alignment of `Shared` is divisible by 2. -// This is a necessary invariant since we depend on allocating `Shared` a -// shared object to implicitly carry the `KIND_ARC` flag in its pointer. -// This flag is set when the LSB is 0. -const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. - -static SHARED_VTABLE: Vtable = Vtable { - clone: shared_clone, - drop: shared_drop, -}; - -const KIND_ARC: usize = 0b0; -const KIND_VEC: usize = 0b1; -const KIND_MASK: usize = 0b1; - -unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Relaxed); - shallow_clone_arc(shared as _, ptr, len) -} - -unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { - data.with_mut(|shared| { - release_shared(*shared as *mut Shared); - }); -} - -unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes { - let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); - - if old_size > usize::MAX >> 1 { - crate::abort(); - } - - Bytes { - ptr, - len, - data: AtomicPtr::new(shared as _), - vtable: &SHARED_VTABLE, - } -} - -#[cold] -unsafe fn shallow_clone_vec( - atom: &AtomicPtr<()>, - ptr: *const (), - buf: *mut u8, - offset: *const u8, - len: usize, -) -> Bytes { - // If the buffer is still tracked in a `Vec`. It is time to - // promote the vec to an `Arc`. This could potentially be called - // concurrently, so some care must be taken. - - // First, allocate a new `Shared` instance containing the - // `Vec` fields. It's important to note that `ptr`, `len`, - // and `cap` cannot be mutated without having `&mut self`. - // This means that these fields will not be concurrently - // updated and since the buffer hasn't been promoted to an - // `Arc`, those three fields still are the components of the - // vector. - let vec = rebuild_boxed_slice(buf, offset, len).into_vec(); - let shared = Box::new(Shared { - _vec: vec, - // Initialize refcount to 2. One for this reference, and one - // for the new clone that will be returned from - // `shallow_clone`. - ref_cnt: AtomicUsize::new(2), - }); - - let shared = Box::into_raw(shared); - - // The pointer should be aligned, so this assert should - // always succeed. - debug_assert!( - 0 == (shared as usize & KIND_MASK), - "internal: Box should have an aligned pointer", - ); - - // Try compare & swapping the pointer into the `arc` field. - // `Release` is used synchronize with other threads that - // will load the `arc` field. - // - // If the `compare_and_swap` fails, then the thread lost the - // race to promote the buffer to shared. The `Acquire` - // ordering will synchronize with the `compare_and_swap` - // that happened in the other thread and the `Shared` - // pointed to by `actual` will be visible. - let actual = atom.compare_and_swap(ptr as _, shared as _, Ordering::AcqRel); - - if actual as usize == ptr as usize { - // The upgrade was successful, the new handle can be - // returned. - return Bytes { - ptr: offset, - len, - data: AtomicPtr::new(shared as _), - vtable: &SHARED_VTABLE, - }; - } - - // The upgrade failed, a concurrent clone happened. Release - // the allocation that was made in this thread, it will not - // be needed. - let shared = Box::from_raw(shared); - mem::forget(*shared); - - // Buffer already promoted to shared storage, so increment ref - // count. - shallow_clone_arc(actual as _, offset, len) -} - -unsafe fn release_shared(ptr: *mut Shared) { - // `Shared` storage... follow the drop steps from Arc. - if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 { - return; - } - - // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the decreasing - // of the reference count synchronizes with this `Acquire` fence. This - // means that use of the data happens before decreasing the reference - // count, which happens before this fence, which happens before the - // deletion of the data. - // - // As explained in the [Boost documentation][1], - // - // > It is important to enforce any possible access to the object in one - // > thread (through an existing reference) to *happen before* deleting - // > the object in a different thread. This is achieved by a "release" - // > operation after dropping a reference (any access to the object - // > through this reference must obviously happened before), and an - // > "acquire" operation before deleting the object. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - atomic::fence(Ordering::Acquire); - - // Drop the data - Box::from_raw(ptr); -} - -// compile-fails - -/// ```compile_fail -/// use bytes::Bytes; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = Bytes::from("hello world"); -/// b1.split_to(6); -/// } -/// ``` -fn _split_to_must_use() {} - -/// ```compile_fail -/// use bytes::Bytes; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = Bytes::from("hello world"); -/// b1.split_off(6); -/// } -/// ``` -fn _split_off_must_use() {} - -// fuzz tests -#[cfg(all(test, loom))] -mod fuzz { - use loom::sync::Arc; - use loom::thread; - - use super::Bytes; - #[test] - fn bytes_cloning_vec() { - loom::model(|| { - let a = Bytes::from(b"abcdefgh".to_vec()); - let addr = a.as_ptr() as usize; - - // test the Bytes::clone is Sync by putting it in an Arc - let a1 = Arc::new(a); - let a2 = a1.clone(); - - let t1 = thread::spawn(move || { - let b: Bytes = (*a1).clone(); - assert_eq!(b.as_ptr() as usize, addr); - }); - - let t2 = thread::spawn(move || { - let b: Bytes = (*a2).clone(); - assert_eq!(b.as_ptr() as usize, addr); - }); - - t1.join().unwrap(); - t2.join().unwrap(); - }); - } -} diff --git a/third_party/rust/bytes-0.5.6/src/bytes_mut.rs b/third_party/rust/bytes-0.5.6/src/bytes_mut.rs deleted file mode 100644 index a7a8e579872d..000000000000 --- a/third_party/rust/bytes-0.5.6/src/bytes_mut.rs +++ /dev/null @@ -1,1581 +0,0 @@ -use core::iter::{FromIterator, Iterator}; -use core::mem::{self, ManuallyDrop}; -use core::ops::{Deref, DerefMut}; -use core::ptr::{self, NonNull}; -use core::{cmp, fmt, hash, isize, slice, usize}; - -use alloc::{ - borrow::{Borrow, BorrowMut}, - boxed::Box, - string::String, - vec::Vec, -}; - -use crate::buf::IntoIter; -use crate::bytes::Vtable; -#[allow(unused)] -use crate::loom::sync::atomic::AtomicMut; -use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; -use crate::{Buf, BufMut, Bytes}; - -/// A unique reference to a contiguous slice of memory. -/// -/// `BytesMut` represents a unique view into a potentially shared memory region. -/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to -/// mutate the memory. -/// -/// `BytesMut` can be thought of as containing a `buf: Arc>`, an offset -/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the -/// same `buf` overlaps with its slice. That guarantee means that a write lock -/// is not required. -/// -/// # Growth -/// -/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as -/// necessary. However, explicitly reserving the required space up-front before -/// a series of inserts will be more efficient. -/// -/// # Examples -/// -/// ``` -/// use bytes::{BytesMut, BufMut}; -/// -/// let mut buf = BytesMut::with_capacity(64); -/// -/// buf.put_u8(b'h'); -/// buf.put_u8(b'e'); -/// buf.put(&b"llo"[..]); -/// -/// assert_eq!(&buf[..], b"hello"); -/// -/// // Freeze the buffer so that it can be shared -/// let a = buf.freeze(); -/// -/// // This does not allocate, instead `b` points to the same memory. -/// let b = a.clone(); -/// -/// assert_eq!(&a[..], b"hello"); -/// assert_eq!(&b[..], b"hello"); -/// ``` -pub struct BytesMut { - ptr: NonNull, - len: usize, - cap: usize, - data: *mut Shared, -} - -// Thread-safe reference-counted container for the shared storage. This mostly -// the same as `core::sync::Arc` but without the weak counter. The ref counting -// fns are based on the ones found in `std`. -// -// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends -// up making the overall code simpler and easier to reason about. This is due to -// some of the logic around setting `Inner::arc` and other ways the `arc` field -// is used. Using `Arc` ended up requiring a number of funky transmutes and -// other shenanigans to make it work. -struct Shared { - vec: Vec, - original_capacity_repr: usize, - ref_count: AtomicUsize, -} - -// Buffer storage strategy flags. -const KIND_ARC: usize = 0b0; -const KIND_VEC: usize = 0b1; -const KIND_MASK: usize = 0b1; - -// The max original capacity value. Any `Bytes` allocated with a greater initial -// capacity will default to this. -const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17; -// The original capacity algorithm will not take effect unless the originally -// allocated capacity was at least 1kb in size. -const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; -// The original capacity is stored in powers of 2 starting at 1kb to a max of -// 64kb. Representing it as such requires only 3 bits of storage. -const ORIGINAL_CAPACITY_MASK: usize = 0b11100; -const ORIGINAL_CAPACITY_OFFSET: usize = 2; - -// When the storage is in the `Vec` representation, the pointer can be advanced -// at most this value. This is due to the amount of storage available to track -// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY -// bits. -const VEC_POS_OFFSET: usize = 5; -const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; -const NOT_VEC_POS_MASK: usize = 0b11111; - -#[cfg(target_pointer_width = "64")] -const PTR_WIDTH: usize = 64; -#[cfg(target_pointer_width = "32")] -const PTR_WIDTH: usize = 32; - -/* - * - * ===== BytesMut ===== - * - */ - -impl BytesMut { - /// Creates a new `BytesMut` with the specified capacity. - /// - /// The returned `BytesMut` will be able to hold at least `capacity` bytes - /// without reallocating. - /// - /// It is important to note that this function does not specify the length - /// of the returned `BytesMut`, but only the capacity. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// - /// let mut bytes = BytesMut::with_capacity(64); - /// - /// // `bytes` contains no data, even though there is capacity - /// assert_eq!(bytes.len(), 0); - /// - /// bytes.put(&b"hello world"[..]); - /// - /// assert_eq!(&bytes[..], b"hello world"); - /// ``` - #[inline] - pub fn with_capacity(capacity: usize) -> BytesMut { - BytesMut::from_vec(Vec::with_capacity(capacity)) - } - - /// Creates a new `BytesMut` with default capacity. - /// - /// Resulting object has length 0 and unspecified capacity. - /// This function does not allocate. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// - /// let mut bytes = BytesMut::new(); - /// - /// assert_eq!(0, bytes.len()); - /// - /// bytes.reserve(2); - /// bytes.put_slice(b"xy"); - /// - /// assert_eq!(&b"xy"[..], &bytes[..]); - /// ``` - #[inline] - pub fn new() -> BytesMut { - BytesMut::with_capacity(0) - } - - /// Returns the number of bytes contained in this `BytesMut`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let b = BytesMut::from(&b"hello"[..]); - /// assert_eq!(b.len(), 5); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.len - } - - /// Returns true if the `BytesMut` has a length of 0. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let b = BytesMut::with_capacity(64); - /// assert!(b.is_empty()); - /// ``` - #[inline] - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Returns the number of bytes the `BytesMut` can hold without reallocating. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let b = BytesMut::with_capacity(64); - /// assert_eq!(b.capacity(), 64); - /// ``` - #[inline] - pub fn capacity(&self) -> usize { - self.cap - } - - /// Converts `self` into an immutable `Bytes`. - /// - /// The conversion is zero cost and is used to indicate that the slice - /// referenced by the handle will no longer be mutated. Once the conversion - /// is done, the handle can be cloned and shared across threads. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// use std::thread; - /// - /// let mut b = BytesMut::with_capacity(64); - /// b.put(&b"hello world"[..]); - /// let b1 = b.freeze(); - /// let b2 = b1.clone(); - /// - /// let th = thread::spawn(move || { - /// assert_eq!(&b1[..], b"hello world"); - /// }); - /// - /// assert_eq!(&b2[..], b"hello world"); - /// th.join().unwrap(); - /// ``` - #[inline] - pub fn freeze(mut self) -> Bytes { - if self.kind() == KIND_VEC { - // Just re-use `Bytes` internal Vec vtable - unsafe { - let (off, _) = self.get_vec_pos(); - let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); - mem::forget(self); - let mut b: Bytes = vec.into(); - b.advance(off); - b - } - } else { - debug_assert_eq!(self.kind(), KIND_ARC); - - let ptr = self.ptr.as_ptr(); - let len = self.len; - let data = AtomicPtr::new(self.data as _); - mem::forget(self); - unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } - } - } - - /// Splits the bytes into two at the given index. - /// - /// Afterwards `self` contains elements `[0, at)`, and the returned - /// `BytesMut` contains elements `[at, capacity)`. - /// - /// This is an `O(1)` operation that just increases the reference count - /// and sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut a = BytesMut::from(&b"hello world"[..]); - /// let mut b = a.split_off(5); - /// - /// a[0] = b'j'; - /// b[0] = b'!'; - /// - /// assert_eq!(&a[..], b"jello"); - /// assert_eq!(&b[..], b"!world"); - /// ``` - /// - /// # Panics - /// - /// Panics if `at > capacity`. - #[must_use = "consider BytesMut::truncate if you don't need the other half"] - pub fn split_off(&mut self, at: usize) -> BytesMut { - assert!( - at <= self.capacity(), - "split_off out of bounds: {:?} <= {:?}", - at, - self.capacity(), - ); - unsafe { - let mut other = self.shallow_clone(); - other.set_start(at); - self.set_end(at); - other - } - } - - /// Removes the bytes from the current view, returning them in a new - /// `BytesMut` handle. - /// - /// Afterwards, `self` will be empty, but will retain any additional - /// capacity that it had before the operation. This is identical to - /// `self.split_to(self.len())`. - /// - /// This is an `O(1)` operation that just increases the reference count and - /// sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// - /// let mut buf = BytesMut::with_capacity(1024); - /// buf.put(&b"hello world"[..]); - /// - /// let other = buf.split(); - /// - /// assert!(buf.is_empty()); - /// assert_eq!(1013, buf.capacity()); - /// - /// assert_eq!(other, b"hello world"[..]); - /// ``` - #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"] - pub fn split(&mut self) -> BytesMut { - let len = self.len(); - self.split_to(len) - } - - /// Splits the buffer into two at the given index. - /// - /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut` - /// contains elements `[0, at)`. - /// - /// This is an `O(1)` operation that just increases the reference count and - /// sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut a = BytesMut::from(&b"hello world"[..]); - /// let mut b = a.split_to(5); - /// - /// a[0] = b'!'; - /// b[0] = b'j'; - /// - /// assert_eq!(&a[..], b"!world"); - /// assert_eq!(&b[..], b"jello"); - /// ``` - /// - /// # Panics - /// - /// Panics if `at > len`. - #[must_use = "consider BytesMut::advance if you don't need the other half"] - pub fn split_to(&mut self, at: usize) -> BytesMut { - assert!( - at <= self.len(), - "split_to out of bounds: {:?} <= {:?}", - at, - self.len(), - ); - - unsafe { - let mut other = self.shallow_clone(); - other.set_end(at); - self.set_start(at); - other - } - } - - /// Shortens the buffer, keeping the first `len` bytes and dropping the - /// rest. - /// - /// If `len` is greater than the buffer's current length, this has no - /// effect. - /// - /// The [`split_off`] method can emulate `truncate`, but this causes the - /// excess bytes to be returned instead of dropped. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::from(&b"hello world"[..]); - /// buf.truncate(5); - /// assert_eq!(buf, b"hello"[..]); - /// ``` - /// - /// [`split_off`]: #method.split_off - pub fn truncate(&mut self, len: usize) { - if len <= self.len() { - unsafe { - self.set_len(len); - } - } - } - - /// Clears the buffer, removing all data. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::from(&b"hello world"[..]); - /// buf.clear(); - /// assert!(buf.is_empty()); - /// ``` - pub fn clear(&mut self) { - self.truncate(0); - } - - /// Resizes the buffer so that `len` is equal to `new_len`. - /// - /// If `new_len` is greater than `len`, the buffer is extended by the - /// difference with each additional byte set to `value`. If `new_len` is - /// less than `len`, the buffer is simply truncated. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::new(); - /// - /// buf.resize(3, 0x1); - /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]); - /// - /// buf.resize(2, 0x2); - /// assert_eq!(&buf[..], &[0x1, 0x1]); - /// - /// buf.resize(4, 0x3); - /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); - /// ``` - pub fn resize(&mut self, new_len: usize, value: u8) { - let len = self.len(); - if new_len > len { - let additional = new_len - len; - self.reserve(additional); - unsafe { - let dst = self.bytes_mut().as_mut_ptr(); - ptr::write_bytes(dst, value, additional); - self.set_len(new_len); - } - } else { - self.truncate(new_len); - } - } - - /// Sets the length of the buffer. - /// - /// This will explicitly set the size of the buffer without actually - /// modifying the data, so it is up to the caller to ensure that the data - /// has been initialized. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut b = BytesMut::from(&b"hello world"[..]); - /// - /// unsafe { - /// b.set_len(5); - /// } - /// - /// assert_eq!(&b[..], b"hello"); - /// - /// unsafe { - /// b.set_len(11); - /// } - /// - /// assert_eq!(&b[..], b"hello world"); - /// ``` - #[inline] - pub unsafe fn set_len(&mut self, len: usize) { - debug_assert!(len <= self.cap, "set_len out of bounds"); - self.len = len; - } - - /// Reserves capacity for at least `additional` more bytes to be inserted - /// into the given `BytesMut`. - /// - /// More than `additional` bytes may be reserved in order to avoid frequent - /// reallocations. A call to `reserve` may result in an allocation. - /// - /// Before allocating new buffer space, the function will attempt to reclaim - /// space in the existing buffer. If the current handle references a small - /// view in the original buffer and all other handles have been dropped, - /// and the requested capacity is less than or equal to the existing - /// buffer's capacity, then the current view will be copied to the front of - /// the buffer and the handle will take ownership of the full buffer. - /// - /// # Examples - /// - /// In the following example, a new buffer is allocated. - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::from(&b"hello"[..]); - /// buf.reserve(64); - /// assert!(buf.capacity() >= 69); - /// ``` - /// - /// In the following example, the existing buffer is reclaimed. - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// - /// let mut buf = BytesMut::with_capacity(128); - /// buf.put(&[0; 64][..]); - /// - /// let ptr = buf.as_ptr(); - /// let other = buf.split(); - /// - /// assert!(buf.is_empty()); - /// assert_eq!(buf.capacity(), 64); - /// - /// drop(other); - /// buf.reserve(128); - /// - /// assert_eq!(buf.capacity(), 128); - /// assert_eq!(buf.as_ptr(), ptr); - /// ``` - /// - /// # Panics - /// - /// Panics if the new capacity overflows `usize`. - #[inline] - pub fn reserve(&mut self, additional: usize) { - let len = self.len(); - let rem = self.capacity() - len; - - if additional <= rem { - // The handle can already store at least `additional` more bytes, so - // there is no further work needed to be done. - return; - } - - self.reserve_inner(additional); - } - - // In separate function to allow the short-circuits in `reserve` to - // be inline-able. Significant helps performance. - fn reserve_inner(&mut self, additional: usize) { - let len = self.len(); - let kind = self.kind(); - - if kind == KIND_VEC { - // If there's enough free space before the start of the buffer, then - // just copy the data backwards and reuse the already-allocated - // space. - // - // Otherwise, since backed by a vector, use `Vec::reserve` - unsafe { - let (off, prev) = self.get_vec_pos(); - - // Only reuse space if we can satisfy the requested additional space. - if self.capacity() - self.len() + off >= additional { - // There's space - reuse it - // - // Just move the pointer back to the start after copying - // data back. - let base_ptr = self.ptr.as_ptr().offset(-(off as isize)); - ptr::copy(self.ptr.as_ptr(), base_ptr, self.len); - self.ptr = vptr(base_ptr); - self.set_vec_pos(0, prev); - - // Length stays constant, but since we moved backwards we - // can gain capacity back. - self.cap += off; - } else { - // No space - allocate more - let mut v = - ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); - v.reserve(additional); - - // Update the info - self.ptr = vptr(v.as_mut_ptr().offset(off as isize)); - self.len = v.len() - off; - self.cap = v.capacity() - off; - } - - return; - } - } - - debug_assert_eq!(kind, KIND_ARC); - let shared: *mut Shared = self.data as _; - - // Reserving involves abandoning the currently shared buffer and - // allocating a new vector with the requested capacity. - // - // Compute the new capacity - let mut new_cap = len.checked_add(additional).expect("overflow"); - - let original_capacity; - let original_capacity_repr; - - unsafe { - original_capacity_repr = (*shared).original_capacity_repr; - original_capacity = original_capacity_from_repr(original_capacity_repr); - - // First, try to reclaim the buffer. This is possible if the current - // handle is the only outstanding handle pointing to the buffer. - if (*shared).is_unique() { - // This is the only handle to the buffer. It can be reclaimed. - // However, before doing the work of copying data, check to make - // sure that the vector has enough capacity. - let v = &mut (*shared).vec; - - if v.capacity() >= new_cap { - // The capacity is sufficient, reclaim the buffer - let ptr = v.as_mut_ptr(); - - ptr::copy(self.ptr.as_ptr(), ptr, len); - - self.ptr = vptr(ptr); - self.cap = v.capacity(); - - return; - } - - // The vector capacity is not sufficient. The reserve request is - // asking for more than the initial buffer capacity. Allocate more - // than requested if `new_cap` is not much bigger than the current - // capacity. - // - // There are some situations, using `reserve_exact` that the - // buffer capacity could be below `original_capacity`, so do a - // check. - let double = v.capacity().checked_shl(1).unwrap_or(new_cap); - - new_cap = cmp::max(cmp::max(double, new_cap), original_capacity); - } else { - new_cap = cmp::max(new_cap, original_capacity); - } - } - - // Create a new vector to store the data - let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap)); - - // Copy the bytes - v.extend_from_slice(self.as_ref()); - - // Release the shared handle. This must be done *after* the bytes are - // copied. - unsafe { release_shared(shared) }; - - // Update self - let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; - self.data = data as _; - self.ptr = vptr(v.as_mut_ptr()); - self.len = v.len(); - self.cap = v.capacity(); - } - - /// Appends given bytes to this `BytesMut`. - /// - /// If this `BytesMut` object does not have enough capacity, it is resized - /// first. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::with_capacity(0); - /// buf.extend_from_slice(b"aaabbb"); - /// buf.extend_from_slice(b"cccddd"); - /// - /// assert_eq!(b"aaabbbcccddd", &buf[..]); - /// ``` - pub fn extend_from_slice(&mut self, extend: &[u8]) { - let cnt = extend.len(); - self.reserve(cnt); - - unsafe { - let dst = self.maybe_uninit_bytes(); - // Reserved above - debug_assert!(dst.len() >= cnt); - - ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt); - } - - unsafe { - self.advance_mut(cnt); - } - } - - /// Absorbs a `BytesMut` that was previously split off. - /// - /// If the two `BytesMut` objects were previously contiguous, i.e., if - /// `other` was created by calling `split_off` on this `BytesMut`, then - /// this is an `O(1)` operation that just decreases a reference - /// count and sets a few indices. Otherwise this method degenerates to - /// `self.extend_from_slice(other.as_ref())`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::with_capacity(64); - /// buf.extend_from_slice(b"aaabbbcccddd"); - /// - /// let split = buf.split_off(6); - /// assert_eq!(b"aaabbb", &buf[..]); - /// assert_eq!(b"cccddd", &split[..]); - /// - /// buf.unsplit(split); - /// assert_eq!(b"aaabbbcccddd", &buf[..]); - /// ``` - pub fn unsplit(&mut self, other: BytesMut) { - if self.is_empty() { - *self = other; - return; - } - - if let Err(other) = self.try_unsplit(other) { - self.extend_from_slice(other.as_ref()); - } - } - - // private - - // For now, use a `Vec` to manage the memory for us, but we may want to - // change that in the future to some alternate allocator strategy. - // - // Thus, we don't expose an easy way to construct from a `Vec` since an - // internal change could make a simple pattern (`BytesMut::from(vec)`) - // suddenly a lot more expensive. - #[inline] - pub(crate) fn from_vec(mut vec: Vec) -> BytesMut { - let ptr = vptr(vec.as_mut_ptr()); - let len = vec.len(); - let cap = vec.capacity(); - mem::forget(vec); - - let original_capacity_repr = original_capacity_to_repr(cap); - let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; - - BytesMut { - ptr, - len, - cap, - data: data as *mut _, - } - } - - #[inline] - fn as_slice(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } - } - - #[inline] - fn as_slice_mut(&mut self) -> &mut [u8] { - unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } - } - - unsafe fn set_start(&mut self, start: usize) { - // Setting the start to 0 is a no-op, so return early if this is the - // case. - if start == 0 { - return; - } - - debug_assert!(start <= self.cap, "internal: set_start out of bounds"); - - let kind = self.kind(); - - if kind == KIND_VEC { - // Setting the start when in vec representation is a little more - // complicated. First, we have to track how far ahead the - // "start" of the byte buffer from the beginning of the vec. We - // also have to ensure that we don't exceed the maximum shift. - let (mut pos, prev) = self.get_vec_pos(); - pos += start; - - if pos <= MAX_VEC_POS { - self.set_vec_pos(pos, prev); - } else { - // The repr must be upgraded to ARC. This will never happen - // on 64 bit systems and will only happen on 32 bit systems - // when shifting past 134,217,727 bytes. As such, we don't - // worry too much about performance here. - self.promote_to_shared(/*ref_count = */ 1); - } - } - - // Updating the start of the view is setting `ptr` to point to the - // new start and updating the `len` field to reflect the new length - // of the view. - self.ptr = vptr(self.ptr.as_ptr().offset(start as isize)); - - if self.len >= start { - self.len -= start; - } else { - self.len = 0; - } - - self.cap -= start; - } - - unsafe fn set_end(&mut self, end: usize) { - debug_assert_eq!(self.kind(), KIND_ARC); - assert!(end <= self.cap, "set_end out of bounds"); - - self.cap = end; - self.len = cmp::min(self.len, end); - } - - fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> { - if other.is_empty() { - return Ok(()); - } - - let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) }; - if ptr == other.ptr.as_ptr() - && self.kind() == KIND_ARC - && other.kind() == KIND_ARC - && self.data == other.data - { - // Contiguous blocks, just combine directly - self.len += other.len; - self.cap += other.cap; - Ok(()) - } else { - Err(other) - } - } - - #[inline] - fn kind(&self) -> usize { - self.data as usize & KIND_MASK - } - - unsafe fn promote_to_shared(&mut self, ref_cnt: usize) { - debug_assert_eq!(self.kind(), KIND_VEC); - debug_assert!(ref_cnt == 1 || ref_cnt == 2); - - let original_capacity_repr = - (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET; - - // The vec offset cannot be concurrently mutated, so there - // should be no danger reading it. - let off = (self.data as usize) >> VEC_POS_OFFSET; - - // First, allocate a new `Shared` instance containing the - // `Vec` fields. It's important to note that `ptr`, `len`, - // and `cap` cannot be mutated without having `&mut self`. - // This means that these fields will not be concurrently - // updated and since the buffer hasn't been promoted to an - // `Arc`, those three fields still are the components of the - // vector. - let shared = Box::new(Shared { - vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off), - original_capacity_repr, - ref_count: AtomicUsize::new(ref_cnt), - }); - - let shared = Box::into_raw(shared); - - // The pointer should be aligned, so this assert should - // always succeed. - debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC); - - self.data = shared as _; - } - - /// Makes an exact shallow clone of `self`. - /// - /// The kind of `self` doesn't matter, but this is unsafe - /// because the clone will have the same offsets. You must - /// be sure the returned value to the user doesn't allow - /// two views into the same range. - #[inline] - unsafe fn shallow_clone(&mut self) -> BytesMut { - if self.kind() == KIND_ARC { - increment_shared(self.data); - ptr::read(self) - } else { - self.promote_to_shared(/*ref_count = */ 2); - ptr::read(self) - } - } - - #[inline] - unsafe fn get_vec_pos(&mut self) -> (usize, usize) { - debug_assert_eq!(self.kind(), KIND_VEC); - - let prev = self.data as usize; - (prev >> VEC_POS_OFFSET, prev) - } - - #[inline] - unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) { - debug_assert_eq!(self.kind(), KIND_VEC); - debug_assert!(pos <= MAX_VEC_POS); - - self.data = ((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut _; - } - - #[inline] - fn maybe_uninit_bytes(&mut self) -> &mut [mem::MaybeUninit] { - unsafe { - let ptr = self.ptr.as_ptr().offset(self.len as isize); - let len = self.cap - self.len; - - slice::from_raw_parts_mut(ptr as *mut mem::MaybeUninit, len) - } - } -} - -impl Drop for BytesMut { - fn drop(&mut self) { - let kind = self.kind(); - - if kind == KIND_VEC { - unsafe { - let (off, _) = self.get_vec_pos(); - - // Vector storage, free the vector - let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); - } - } else if kind == KIND_ARC { - unsafe { release_shared(self.data as _) }; - } - } -} - -impl Buf for BytesMut { - #[inline] - fn remaining(&self) -> usize { - self.len() - } - - #[inline] - fn bytes(&self) -> &[u8] { - self.as_slice() - } - - #[inline] - fn advance(&mut self, cnt: usize) { - assert!( - cnt <= self.remaining(), - "cannot advance past `remaining`: {:?} <= {:?}", - cnt, - self.remaining(), - ); - unsafe { - self.set_start(cnt); - } - } - - fn to_bytes(&mut self) -> crate::Bytes { - self.split().freeze() - } -} - -impl BufMut for BytesMut { - #[inline] - fn remaining_mut(&self) -> usize { - usize::MAX - self.len() - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - let new_len = self.len() + cnt; - assert!( - new_len <= self.cap, - "new_len = {}; capacity = {}", - new_len, - self.cap - ); - self.len = new_len; - } - - #[inline] - fn bytes_mut(&mut self) -> &mut [mem::MaybeUninit] { - if self.capacity() == self.len() { - self.reserve(64); - } - self.maybe_uninit_bytes() - } - - // Specialize these methods so they can skip checking `remaining_mut` - // and `advance_mut`. - - fn put(&mut self, mut src: T) - where - Self: Sized, - { - while src.has_remaining() { - let s = src.bytes(); - let l = s.len(); - self.extend_from_slice(s); - src.advance(l); - } - } - - fn put_slice(&mut self, src: &[u8]) { - self.extend_from_slice(src); - } -} - -impl AsRef<[u8]> for BytesMut { - #[inline] - fn as_ref(&self) -> &[u8] { - self.as_slice() - } -} - -impl Deref for BytesMut { - type Target = [u8]; - - #[inline] - fn deref(&self) -> &[u8] { - self.as_ref() - } -} - -impl AsMut<[u8]> for BytesMut { - #[inline] - fn as_mut(&mut self) -> &mut [u8] { - self.as_slice_mut() - } -} - -impl DerefMut for BytesMut { - #[inline] - fn deref_mut(&mut self) -> &mut [u8] { - self.as_mut() - } -} - -impl<'a> From<&'a [u8]> for BytesMut { - fn from(src: &'a [u8]) -> BytesMut { - BytesMut::from_vec(src.to_vec()) - } -} - -impl<'a> From<&'a str> for BytesMut { - fn from(src: &'a str) -> BytesMut { - BytesMut::from(src.as_bytes()) - } -} - -impl From for Bytes { - fn from(src: BytesMut) -> Bytes { - src.freeze() - } -} - -impl PartialEq for BytesMut { - fn eq(&self, other: &BytesMut) -> bool { - self.as_slice() == other.as_slice() - } -} - -impl PartialOrd for BytesMut { - fn partial_cmp(&self, other: &BytesMut) -> Option { - self.as_slice().partial_cmp(other.as_slice()) - } -} - -impl Ord for BytesMut { - fn cmp(&self, other: &BytesMut) -> cmp::Ordering { - self.as_slice().cmp(other.as_slice()) - } -} - -impl Eq for BytesMut {} - -impl Default for BytesMut { - #[inline] - fn default() -> BytesMut { - BytesMut::new() - } -} - -impl hash::Hash for BytesMut { - fn hash(&self, state: &mut H) - where - H: hash::Hasher, - { - let s: &[u8] = self.as_ref(); - s.hash(state); - } -} - -impl Borrow<[u8]> for BytesMut { - fn borrow(&self) -> &[u8] { - self.as_ref() - } -} - -impl BorrowMut<[u8]> for BytesMut { - fn borrow_mut(&mut self) -> &mut [u8] { - self.as_mut() - } -} - -impl fmt::Write for BytesMut { - #[inline] - fn write_str(&mut self, s: &str) -> fmt::Result { - if self.remaining_mut() >= s.len() { - self.put_slice(s.as_bytes()); - Ok(()) - } else { - Err(fmt::Error) - } - } - - #[inline] - fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { - fmt::write(self, args) - } -} - -impl Clone for BytesMut { - fn clone(&self) -> BytesMut { - BytesMut::from(&self[..]) - } -} - -impl IntoIterator for BytesMut { - type Item = u8; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self) - } -} - -impl<'a> IntoIterator for &'a BytesMut { - type Item = &'a u8; - type IntoIter = core::slice::Iter<'a, u8>; - - fn into_iter(self) -> Self::IntoIter { - self.as_ref().into_iter() - } -} - -impl Extend for BytesMut { - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - let iter = iter.into_iter(); - - let (lower, _) = iter.size_hint(); - self.reserve(lower); - - // TODO: optimize - // 1. If self.kind() == KIND_VEC, use Vec::extend - // 2. Make `reserve` inline-able - for b in iter { - self.reserve(1); - self.put_u8(b); - } - } -} - -impl<'a> Extend<&'a u8> for BytesMut { - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - self.extend(iter.into_iter().map(|b| *b)) - } -} - -impl FromIterator for BytesMut { - fn from_iter>(into_iter: T) -> Self { - BytesMut::from_vec(Vec::from_iter(into_iter)) - } -} - -impl<'a> FromIterator<&'a u8> for BytesMut { - fn from_iter>(into_iter: T) -> Self { - BytesMut::from_iter(into_iter.into_iter().map(|b| *b)) - } -} - -/* - * - * ===== Inner ===== - * - */ - -unsafe fn increment_shared(ptr: *mut Shared) { - let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed); - - if old_size > isize::MAX as usize { - crate::abort(); - } -} - -unsafe fn release_shared(ptr: *mut Shared) { - // `Shared` storage... follow the drop steps from Arc. - if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 { - return; - } - - // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the decreasing - // of the reference count synchronizes with this `Acquire` fence. This - // means that use of the data happens before decreasing the reference - // count, which happens before this fence, which happens before the - // deletion of the data. - // - // As explained in the [Boost documentation][1], - // - // > It is important to enforce any possible access to the object in one - // > thread (through an existing reference) to *happen before* deleting - // > the object in a different thread. This is achieved by a "release" - // > operation after dropping a reference (any access to the object - // > through this reference must obviously happened before), and an - // > "acquire" operation before deleting the object. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - atomic::fence(Ordering::Acquire); - - // Drop the data - Box::from_raw(ptr); -} - -impl Shared { - fn is_unique(&self) -> bool { - // The goal is to check if the current handle is the only handle - // that currently has access to the buffer. This is done by - // checking if the `ref_count` is currently 1. - // - // The `Acquire` ordering synchronizes with the `Release` as - // part of the `fetch_sub` in `release_shared`. The `fetch_sub` - // operation guarantees that any mutations done in other threads - // are ordered before the `ref_count` is decremented. As such, - // this `Acquire` will guarantee that those mutations are - // visible to the current thread. - self.ref_count.load(Ordering::Acquire) == 1 - } -} - -fn original_capacity_to_repr(cap: usize) -> usize { - let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); - cmp::min( - width, - MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH, - ) -} - -fn original_capacity_from_repr(repr: usize) -> usize { - if repr == 0 { - return 0; - } - - 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) -} - -/* -#[test] -fn test_original_capacity_to_repr() { - assert_eq!(original_capacity_to_repr(0), 0); - - let max_width = 32; - - for width in 1..(max_width + 1) { - let cap = 1 << width - 1; - - let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { - 0 - } else if width < MAX_ORIGINAL_CAPACITY_WIDTH { - width - MIN_ORIGINAL_CAPACITY_WIDTH - } else { - MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH - }; - - assert_eq!(original_capacity_to_repr(cap), expected); - - if width > 1 { - assert_eq!(original_capacity_to_repr(cap + 1), expected); - } - - // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below - if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 { - assert_eq!(original_capacity_to_repr(cap - 24), expected - 1); - assert_eq!(original_capacity_to_repr(cap + 76), expected); - } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 { - assert_eq!(original_capacity_to_repr(cap - 1), expected - 1); - assert_eq!(original_capacity_to_repr(cap - 48), expected - 1); - } - } -} - -#[test] -fn test_original_capacity_from_repr() { - assert_eq!(0, original_capacity_from_repr(0)); - - let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH; - - assert_eq!(min_cap, original_capacity_from_repr(1)); - assert_eq!(min_cap * 2, original_capacity_from_repr(2)); - assert_eq!(min_cap * 4, original_capacity_from_repr(3)); - assert_eq!(min_cap * 8, original_capacity_from_repr(4)); - assert_eq!(min_cap * 16, original_capacity_from_repr(5)); - assert_eq!(min_cap * 32, original_capacity_from_repr(6)); - assert_eq!(min_cap * 64, original_capacity_from_repr(7)); -} -*/ - -unsafe impl Send for BytesMut {} -unsafe impl Sync for BytesMut {} - -/* - * - * ===== PartialEq / PartialOrd ===== - * - */ - -impl PartialEq<[u8]> for BytesMut { - fn eq(&self, other: &[u8]) -> bool { - &**self == other - } -} - -impl PartialOrd<[u8]> for BytesMut { - fn partial_cmp(&self, other: &[u8]) -> Option { - (**self).partial_cmp(other) - } -} - -impl PartialEq for [u8] { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for [u8] { - fn partial_cmp(&self, other: &BytesMut) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for BytesMut { - fn eq(&self, other: &str) -> bool { - &**self == other.as_bytes() - } -} - -impl PartialOrd for BytesMut { - fn partial_cmp(&self, other: &str) -> Option { - (**self).partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for str { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for str { - fn partial_cmp(&self, other: &BytesMut) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl PartialEq> for BytesMut { - fn eq(&self, other: &Vec) -> bool { - *self == &other[..] - } -} - -impl PartialOrd> for BytesMut { - fn partial_cmp(&self, other: &Vec) -> Option { - (**self).partial_cmp(&other[..]) - } -} - -impl PartialEq for Vec { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for Vec { - fn partial_cmp(&self, other: &BytesMut) -> Option { - other.partial_cmp(self) - } -} - -impl PartialEq for BytesMut { - fn eq(&self, other: &String) -> bool { - *self == &other[..] - } -} - -impl PartialOrd for BytesMut { - fn partial_cmp(&self, other: &String) -> Option { - (**self).partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for String { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for String { - fn partial_cmp(&self, other: &BytesMut) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut -where - BytesMut: PartialEq, -{ - fn eq(&self, other: &&'a T) -> bool { - *self == **other - } -} - -impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut -where - BytesMut: PartialOrd, -{ - fn partial_cmp(&self, other: &&'a T) -> Option { - self.partial_cmp(*other) - } -} - -impl PartialEq for &[u8] { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for &[u8] { - fn partial_cmp(&self, other: &BytesMut) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for &str { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for &str { - fn partial_cmp(&self, other: &BytesMut) -> Option { - other.partial_cmp(self) - } -} - -impl PartialEq for Bytes { - fn eq(&self, other: &BytesMut) -> bool { - &other[..] == &self[..] - } -} - -impl PartialEq for BytesMut { - fn eq(&self, other: &Bytes) -> bool { - &other[..] == &self[..] - } -} - -fn vptr(ptr: *mut u8) -> NonNull { - if cfg!(debug_assertions) { - NonNull::new(ptr).expect("Vec pointer should be non-null") - } else { - unsafe { NonNull::new_unchecked(ptr) } - } -} - -unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec { - let ptr = ptr.offset(-(off as isize)); - len += off; - cap += off; - - Vec::from_raw_parts(ptr, len, cap) -} - -// ===== impl SharedVtable ===== - -static SHARED_VTABLE: Vtable = Vtable { - clone: shared_v_clone, - drop: shared_v_drop, -}; - -unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Relaxed) as *mut Shared; - increment_shared(shared); - - let data = AtomicPtr::new(shared as _); - Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) -} - -unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { - data.with_mut(|shared| { - release_shared(*shared as *mut Shared); - }); -} - -// compile-fails - -/// ```compile_fail -/// use bytes::BytesMut; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = BytesMut::from("hello world"); -/// b1.split_to(6); -/// } -/// ``` -fn _split_to_must_use() {} - -/// ```compile_fail -/// use bytes::BytesMut; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = BytesMut::from("hello world"); -/// b1.split_off(6); -/// } -/// ``` -fn _split_off_must_use() {} - -/// ```compile_fail -/// use bytes::BytesMut; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = BytesMut::from("hello world"); -/// b1.split(); -/// } -/// ``` -fn _split_must_use() {} - -// fuzz tests -#[cfg(all(test, loom))] -mod fuzz { - use loom::sync::Arc; - use loom::thread; - - use super::BytesMut; - use crate::Bytes; - - #[test] - fn bytes_mut_cloning_frozen() { - loom::model(|| { - let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze(); - let addr = a.as_ptr() as usize; - - // test the Bytes::clone is Sync by putting it in an Arc - let a1 = Arc::new(a); - let a2 = a1.clone(); - - let t1 = thread::spawn(move || { - let b: Bytes = (*a1).clone(); - assert_eq!(b.as_ptr() as usize, addr); - }); - - let t2 = thread::spawn(move || { - let b: Bytes = (*a2).clone(); - assert_eq!(b.as_ptr() as usize, addr); - }); - - t1.join().unwrap(); - t2.join().unwrap(); - }); - } -} diff --git a/third_party/rust/bytes-0.5.6/src/fmt/debug.rs b/third_party/rust/bytes-0.5.6/src/fmt/debug.rs deleted file mode 100644 index a8545514e326..000000000000 --- a/third_party/rust/bytes-0.5.6/src/fmt/debug.rs +++ /dev/null @@ -1,49 +0,0 @@ -use core::fmt::{Debug, Formatter, Result}; - -use super::BytesRef; -use crate::{Bytes, BytesMut}; - -/// Alternative implementation of `std::fmt::Debug` for byte slice. -/// -/// Standard `Debug` implementation for `[u8]` is comma separated -/// list of numbers. Since large amount of byte strings are in fact -/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP), -/// it is convenient to print strings as ASCII when possible. -impl Debug for BytesRef<'_> { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - write!(f, "b\"")?; - for &b in self.0 { - // https://doc.rust-lang.org/reference/tokens.html#byte-escapes - if b == b'\n' { - write!(f, "\\n")?; - } else if b == b'\r' { - write!(f, "\\r")?; - } else if b == b'\t' { - write!(f, "\\t")?; - } else if b == b'\\' || b == b'"' { - write!(f, "\\{}", b as char)?; - } else if b == b'\0' { - write!(f, "\\0")?; - // ASCII printable - } else if b >= 0x20 && b < 0x7f { - write!(f, "{}", b as char)?; - } else { - write!(f, "\\x{:02x}", b)?; - } - } - write!(f, "\"")?; - Ok(()) - } -} - -impl Debug for Bytes { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - Debug::fmt(&BytesRef(&self.as_ref()), f) - } -} - -impl Debug for BytesMut { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - Debug::fmt(&BytesRef(&self.as_ref()), f) - } -} diff --git a/third_party/rust/bytes-0.5.6/src/fmt/hex.rs b/third_party/rust/bytes-0.5.6/src/fmt/hex.rs deleted file mode 100644 index 97a749a33682..000000000000 --- a/third_party/rust/bytes-0.5.6/src/fmt/hex.rs +++ /dev/null @@ -1,37 +0,0 @@ -use core::fmt::{Formatter, LowerHex, Result, UpperHex}; - -use super::BytesRef; -use crate::{Bytes, BytesMut}; - -impl LowerHex for BytesRef<'_> { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - for &b in self.0 { - write!(f, "{:02x}", b)?; - } - Ok(()) - } -} - -impl UpperHex for BytesRef<'_> { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - for &b in self.0 { - write!(f, "{:02X}", b)?; - } - Ok(()) - } -} - -macro_rules! hex_impl { - ($tr:ident, $ty:ty) => { - impl $tr for $ty { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - $tr::fmt(&BytesRef(self.as_ref()), f) - } - } - }; -} - -hex_impl!(LowerHex, Bytes); -hex_impl!(LowerHex, BytesMut); -hex_impl!(UpperHex, Bytes); -hex_impl!(UpperHex, BytesMut); diff --git a/third_party/rust/bytes-0.5.6/src/fmt/mod.rs b/third_party/rust/bytes-0.5.6/src/fmt/mod.rs deleted file mode 100644 index 676d15fc21f1..000000000000 --- a/third_party/rust/bytes-0.5.6/src/fmt/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod debug; -mod hex; - -/// `BytesRef` is not a part of public API of bytes crate. -struct BytesRef<'a>(&'a [u8]); diff --git a/third_party/rust/bytes-0.5.6/src/lib.rs b/third_party/rust/bytes-0.5.6/src/lib.rs deleted file mode 100644 index e375c01b1ac0..000000000000 --- a/third_party/rust/bytes-0.5.6/src/lib.rs +++ /dev/null @@ -1,117 +0,0 @@ -#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) -))] -#![doc(html_root_url = "https://docs.rs/bytes/0.5.6")] -#![no_std] - -//! Provides abstractions for working with bytes. -//! -//! The `bytes` crate provides an efficient byte buffer structure -//! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer -//! implementations ([`Buf`], [`BufMut`]). -//! -//! [`Buf`]: trait.Buf.html -//! [`BufMut`]: trait.BufMut.html -//! -//! # `Bytes` -//! -//! `Bytes` is an efficient container for storing and operating on contiguous -//! slices of memory. It is intended for use primarily in networking code, but -//! could have applications elsewhere as well. -//! -//! `Bytes` values facilitate zero-copy network programming by allowing multiple -//! `Bytes` objects to point to the same underlying memory. This is managed by -//! using a reference count to track when the memory is no longer needed and can -//! be freed. -//! -//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]` -//! or `Vec`), but usually a `BytesMut` is used first and written to. For -//! example: -//! -//! ```rust -//! use bytes::{BytesMut, BufMut}; -//! -//! let mut buf = BytesMut::with_capacity(1024); -//! buf.put(&b"hello world"[..]); -//! buf.put_u16(1234); -//! -//! let a = buf.split(); -//! assert_eq!(a, b"hello world\x04\xD2"[..]); -//! -//! buf.put(&b"goodbye world"[..]); -//! -//! let b = buf.split(); -//! assert_eq!(b, b"goodbye world"[..]); -//! -//! assert_eq!(buf.capacity(), 998); -//! ``` -//! -//! In the above example, only a single buffer of 1024 is allocated. The handles -//! `a` and `b` will share the underlying buffer and maintain indices tracking -//! the view into the buffer represented by the handle. -//! -//! See the [struct docs] for more details. -//! -//! [struct docs]: struct.Bytes.html -//! -//! # `Buf`, `BufMut` -//! -//! These two traits provide read and write access to buffers. The underlying -//! storage may or may not be in contiguous memory. For example, `Bytes` is a -//! buffer that guarantees contiguous memory, but a [rope] stores the bytes in -//! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current -//! position in the underlying byte storage. When bytes are read or written, the -//! cursor is advanced. -//! -//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) -//! -//! ## Relation with `Read` and `Write` -//! -//! At first glance, it may seem that `Buf` and `BufMut` overlap in -//! functionality with `std::io::Read` and `std::io::Write`. However, they -//! serve different purposes. A buffer is the value that is provided as an -//! argument to `Read::read` and `Write::write`. `Read` and `Write` may then -//! perform a syscall, which has the potential of failing. Operations on `Buf` -//! and `BufMut` are infallible. - -extern crate alloc; - -#[cfg(feature = "std")] -extern crate std; - -pub mod buf; -pub use crate::buf::{Buf, BufMut}; - -mod bytes; -mod bytes_mut; -mod fmt; -mod loom; -pub use crate::bytes::Bytes; -pub use crate::bytes_mut::BytesMut; - -// Optional Serde support -#[cfg(feature = "serde")] -mod serde; - -#[inline(never)] -#[cold] -fn abort() -> ! { - #[cfg(feature = "std")] - { - std::process::abort(); - } - - #[cfg(not(feature = "std"))] - { - struct Abort; - impl Drop for Abort { - fn drop(&mut self) { - panic!(); - } - } - let _a = Abort; - panic!("abort"); - } -} diff --git a/third_party/rust/bytes-0.5.6/src/loom.rs b/third_party/rust/bytes-0.5.6/src/loom.rs deleted file mode 100644 index 1cae8812e6a2..000000000000 --- a/third_party/rust/bytes-0.5.6/src/loom.rs +++ /dev/null @@ -1,30 +0,0 @@ -#[cfg(not(all(test, loom)))] -pub(crate) mod sync { - pub(crate) mod atomic { - pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; - - pub(crate) trait AtomicMut { - fn with_mut(&mut self, f: F) -> R - where - F: FnOnce(&mut *mut T) -> R; - } - - impl AtomicMut for AtomicPtr { - fn with_mut(&mut self, f: F) -> R - where - F: FnOnce(&mut *mut T) -> R, - { - f(self.get_mut()) - } - } - } -} - -#[cfg(all(test, loom))] -pub(crate) mod sync { - pub(crate) mod atomic { - pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; - - pub(crate) trait AtomicMut {} - } -} diff --git a/third_party/rust/bytes-0.5.6/src/serde.rs b/third_party/rust/bytes-0.5.6/src/serde.rs deleted file mode 100644 index 0a5bd144a960..000000000000 --- a/third_party/rust/bytes-0.5.6/src/serde.rs +++ /dev/null @@ -1,89 +0,0 @@ -use super::{Bytes, BytesMut}; -use alloc::string::String; -use alloc::vec::Vec; -use core::{cmp, fmt}; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; - -macro_rules! serde_impl { - ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => { - impl Serialize for $ty { - #[inline] - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_bytes(&self) - } - } - - struct $visitor_ty; - - impl<'de> de::Visitor<'de> for $visitor_ty { - type Value = $ty; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("byte array") - } - - #[inline] - fn visit_seq(self, mut seq: V) -> Result - where - V: de::SeqAccess<'de>, - { - let len = cmp::min(seq.size_hint().unwrap_or(0), 4096); - let mut values: Vec = Vec::with_capacity(len); - - while let Some(value) = seq.next_element()? { - values.push(value); - } - - Ok($ty::$from_vec(values)) - } - - #[inline] - fn visit_bytes(self, v: &[u8]) -> Result - where - E: de::Error, - { - Ok($ty::$from_slice(v)) - } - - #[inline] - fn visit_byte_buf(self, v: Vec) -> Result - where - E: de::Error, - { - Ok($ty::$from_vec(v)) - } - - #[inline] - fn visit_str(self, v: &str) -> Result - where - E: de::Error, - { - Ok($ty::$from_slice(v.as_bytes())) - } - - #[inline] - fn visit_string(self, v: String) -> Result - where - E: de::Error, - { - Ok($ty::$from_vec(v.into_bytes())) - } - } - - impl<'de> Deserialize<'de> for $ty { - #[inline] - fn deserialize(deserializer: D) -> Result<$ty, D::Error> - where - D: Deserializer<'de>, - { - deserializer.deserialize_byte_buf($visitor_ty) - } - } - }; -} - -serde_impl!(Bytes, BytesVisitor, copy_from_slice, from); -serde_impl!(BytesMut, BytesMutVisitor, from, from_vec); diff --git a/third_party/rust/bytes-0.5.6/tests/test_buf.rs b/third_party/rust/bytes-0.5.6/tests/test_buf.rs deleted file mode 100644 index 17bdd54e821a..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_buf.rs +++ /dev/null @@ -1,103 +0,0 @@ -#![warn(rust_2018_idioms)] - -use bytes::Buf; -#[cfg(feature = "std")] -use std::io::IoSlice; - -#[test] -fn test_fresh_cursor_vec() { - let mut buf = &b"hello"[..]; - - assert_eq!(buf.remaining(), 5); - assert_eq!(buf.bytes(), b"hello"); - - buf.advance(2); - - assert_eq!(buf.remaining(), 3); - assert_eq!(buf.bytes(), b"llo"); - - buf.advance(3); - - assert_eq!(buf.remaining(), 0); - assert_eq!(buf.bytes(), b""); -} - -#[test] -fn test_get_u8() { - let mut buf = &b"\x21zomg"[..]; - assert_eq!(0x21, buf.get_u8()); -} - -#[test] -fn test_get_u16() { - let mut buf = &b"\x21\x54zomg"[..]; - assert_eq!(0x2154, buf.get_u16()); - let mut buf = &b"\x21\x54zomg"[..]; - assert_eq!(0x5421, buf.get_u16_le()); -} - -#[test] -#[should_panic] -fn test_get_u16_buffer_underflow() { - let mut buf = &b"\x21"[..]; - buf.get_u16(); -} - -#[cfg(feature = "std")] -#[test] -fn test_bufs_vec() { - let buf = &b"hello world"[..]; - - let b1: &[u8] = &mut []; - let b2: &[u8] = &mut []; - - let mut dst = [IoSlice::new(b1), IoSlice::new(b2)]; - - assert_eq!(1, buf.bytes_vectored(&mut dst[..])); -} - -#[test] -fn test_vec_deque() { - use std::collections::VecDeque; - - let mut buffer: VecDeque = VecDeque::new(); - buffer.extend(b"hello world"); - assert_eq!(11, buffer.remaining()); - assert_eq!(b"hello world", buffer.bytes()); - buffer.advance(6); - assert_eq!(b"world", buffer.bytes()); - buffer.extend(b" piece"); - let mut out = [0; 11]; - buffer.copy_to_slice(&mut out); - assert_eq!(b"world piece", &out[..]); -} - -#[test] -fn test_deref_buf_forwards() { - struct Special; - - impl Buf for Special { - fn remaining(&self) -> usize { - unreachable!("remaining"); - } - - fn bytes(&self) -> &[u8] { - unreachable!("bytes"); - } - - fn advance(&mut self, _: usize) { - unreachable!("advance"); - } - - fn get_u8(&mut self) -> u8 { - // specialized! - b'x' - } - } - - // these should all use the specialized method - assert_eq!(Special.get_u8(), b'x'); - assert_eq!((&mut Special as &mut dyn Buf).get_u8(), b'x'); - assert_eq!((Box::new(Special) as Box).get_u8(), b'x'); - assert_eq!(Box::new(Special).get_u8(), b'x'); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_buf_mut.rs b/third_party/rust/bytes-0.5.6/tests/test_buf_mut.rs deleted file mode 100644 index b91e2e511919..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_buf_mut.rs +++ /dev/null @@ -1,120 +0,0 @@ -#![warn(rust_2018_idioms)] - -#[cfg(feature = "std")] -use bytes::buf::IoSliceMut; -use bytes::{BufMut, BytesMut}; -use core::fmt::Write; -use core::usize; - -#[test] -fn test_vec_as_mut_buf() { - let mut buf = Vec::with_capacity(64); - - assert_eq!(buf.remaining_mut(), usize::MAX); - - assert!(buf.bytes_mut().len() >= 64); - - buf.put(&b"zomg"[..]); - - assert_eq!(&buf, b"zomg"); - - assert_eq!(buf.remaining_mut(), usize::MAX - 4); - assert_eq!(buf.capacity(), 64); - - for _ in 0..16 { - buf.put(&b"zomg"[..]); - } - - assert_eq!(buf.len(), 68); -} - -#[test] -fn test_put_u8() { - let mut buf = Vec::with_capacity(8); - buf.put_u8(33); - assert_eq!(b"\x21", &buf[..]); -} - -#[test] -fn test_put_u16() { - let mut buf = Vec::with_capacity(8); - buf.put_u16(8532); - assert_eq!(b"\x21\x54", &buf[..]); - - buf.clear(); - buf.put_u16_le(8532); - assert_eq!(b"\x54\x21", &buf[..]); -} - -#[test] -#[should_panic(expected = "cannot advance")] -fn test_vec_advance_mut() { - // Verify fix for #354 - let mut buf = Vec::with_capacity(8); - unsafe { - buf.advance_mut(12); - } -} - -#[test] -fn test_clone() { - let mut buf = BytesMut::with_capacity(100); - buf.write_str("this is a test").unwrap(); - let buf2 = buf.clone(); - - buf.write_str(" of our emergency broadcast system").unwrap(); - assert!(buf != buf2); -} - -#[cfg(feature = "std")] -#[test] -fn test_bufs_vec_mut() { - let b1: &mut [u8] = &mut []; - let b2: &mut [u8] = &mut []; - let mut dst = [IoSliceMut::from(b1), IoSliceMut::from(b2)]; - - // with no capacity - let mut buf = BytesMut::new(); - assert_eq!(buf.capacity(), 0); - assert_eq!(1, buf.bytes_vectored_mut(&mut dst[..])); - - // with capacity - let mut buf = BytesMut::with_capacity(64); - assert_eq!(1, buf.bytes_vectored_mut(&mut dst[..])); -} - -#[test] -fn test_mut_slice() { - let mut v = vec![0, 0, 0, 0]; - let mut s = &mut v[..]; - s.put_u32(42); -} - -#[test] -fn test_deref_bufmut_forwards() { - struct Special; - - impl BufMut for Special { - fn remaining_mut(&self) -> usize { - unreachable!("remaining_mut"); - } - - fn bytes_mut(&mut self) -> &mut [std::mem::MaybeUninit] { - unreachable!("bytes_mut"); - } - - unsafe fn advance_mut(&mut self, _: usize) { - unreachable!("advance"); - } - - fn put_u8(&mut self, _: u8) { - // specialized! - } - } - - // these should all use the specialized method - Special.put_u8(b'x'); - (&mut Special as &mut dyn BufMut).put_u8(b'x'); - (Box::new(Special) as Box).put_u8(b'x'); - Box::new(Special).put_u8(b'x'); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_bytes.rs b/third_party/rust/bytes-0.5.6/tests/test_bytes.rs deleted file mode 100644 index 6b106a6bcdd1..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_bytes.rs +++ /dev/null @@ -1,962 +0,0 @@ -#![warn(rust_2018_idioms)] - -use bytes::{Buf, BufMut, Bytes, BytesMut}; - -use std::usize; - -const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb"; -const SHORT: &'static [u8] = b"hello world"; - -fn is_sync() {} -fn is_send() {} - -#[test] -fn test_bounds() { - is_sync::(); - is_sync::(); - is_send::(); - is_send::(); -} - -#[test] -fn test_layout() { - use std::mem; - - assert_eq!( - mem::size_of::(), - mem::size_of::() * 4, - "Bytes size should be 4 words", - ); - assert_eq!( - mem::size_of::(), - mem::size_of::() * 4, - "BytesMut should be 4 words", - ); - - assert_eq!( - mem::size_of::(), - mem::size_of::>(), - "Bytes should be same size as Option", - ); - - assert_eq!( - mem::size_of::(), - mem::size_of::>(), - "BytesMut should be same size as Option", - ); -} - -#[test] -fn from_slice() { - let a = Bytes::from(&b"abcdefgh"[..]); - assert_eq!(a, b"abcdefgh"[..]); - assert_eq!(a, &b"abcdefgh"[..]); - assert_eq!(a, Vec::from(&b"abcdefgh"[..])); - assert_eq!(b"abcdefgh"[..], a); - assert_eq!(&b"abcdefgh"[..], a); - assert_eq!(Vec::from(&b"abcdefgh"[..]), a); - - let a = BytesMut::from(&b"abcdefgh"[..]); - assert_eq!(a, b"abcdefgh"[..]); - assert_eq!(a, &b"abcdefgh"[..]); - assert_eq!(a, Vec::from(&b"abcdefgh"[..])); - assert_eq!(b"abcdefgh"[..], a); - assert_eq!(&b"abcdefgh"[..], a); - assert_eq!(Vec::from(&b"abcdefgh"[..]), a); -} - -#[test] -fn fmt() { - let a = format!("{:?}", Bytes::from(&b"abcdefg"[..])); - let b = "b\"abcdefg\""; - - assert_eq!(a, b); - - let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..])); - assert_eq!(a, b); -} - -#[test] -fn fmt_write() { - use std::fmt::Write; - use std::iter::FromIterator; - let s = String::from_iter((0..10).map(|_| "abcdefg")); - - let mut a = BytesMut::with_capacity(64); - write!(a, "{}", &s[..64]).unwrap(); - assert_eq!(a, s[..64].as_bytes()); - - let mut b = BytesMut::with_capacity(64); - write!(b, "{}", &s[..32]).unwrap(); - write!(b, "{}", &s[32..64]).unwrap(); - assert_eq!(b, s[..64].as_bytes()); - - let mut c = BytesMut::with_capacity(64); - write!(c, "{}", s).unwrap(); - assert_eq!(c, s[..].as_bytes()); -} - -#[test] -fn len() { - let a = Bytes::from(&b"abcdefg"[..]); - assert_eq!(a.len(), 7); - - let a = BytesMut::from(&b"abcdefg"[..]); - assert_eq!(a.len(), 7); - - let a = Bytes::from(&b""[..]); - assert!(a.is_empty()); - - let a = BytesMut::from(&b""[..]); - assert!(a.is_empty()); -} - -#[test] -fn index() { - let a = Bytes::from(&b"hello world"[..]); - assert_eq!(a[0..5], *b"hello"); -} - -#[test] -fn slice() { - let a = Bytes::from(&b"hello world"[..]); - - let b = a.slice(3..5); - assert_eq!(b, b"lo"[..]); - - let b = a.slice(0..0); - assert_eq!(b, b""[..]); - - let b = a.slice(3..3); - assert_eq!(b, b""[..]); - - let b = a.slice(a.len()..a.len()); - assert_eq!(b, b""[..]); - - let b = a.slice(..5); - assert_eq!(b, b"hello"[..]); - - let b = a.slice(3..); - assert_eq!(b, b"lo world"[..]); -} - -#[test] -#[should_panic] -fn slice_oob_1() { - let a = Bytes::from(&b"hello world"[..]); - a.slice(5..44); -} - -#[test] -#[should_panic] -fn slice_oob_2() { - let a = Bytes::from(&b"hello world"[..]); - a.slice(44..49); -} - -#[test] -fn split_off() { - let mut hello = Bytes::from(&b"helloworld"[..]); - let world = hello.split_off(5); - - assert_eq!(hello, &b"hello"[..]); - assert_eq!(world, &b"world"[..]); - - let mut hello = BytesMut::from(&b"helloworld"[..]); - let world = hello.split_off(5); - - assert_eq!(hello, &b"hello"[..]); - assert_eq!(world, &b"world"[..]); -} - -#[test] -#[should_panic] -fn split_off_oob() { - let mut hello = Bytes::from(&b"helloworld"[..]); - let _ = hello.split_off(44); -} - -#[test] -fn split_off_uninitialized() { - let mut bytes = BytesMut::with_capacity(1024); - let other = bytes.split_off(128); - - assert_eq!(bytes.len(), 0); - assert_eq!(bytes.capacity(), 128); - - assert_eq!(other.len(), 0); - assert_eq!(other.capacity(), 896); -} - -#[test] -fn split_off_to_loop() { - let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; - - for i in 0..(s.len() + 1) { - { - let mut bytes = Bytes::from(&s[..]); - let off = bytes.split_off(i); - assert_eq!(i, bytes.len()); - let mut sum = Vec::new(); - sum.extend(bytes.iter()); - sum.extend(off.iter()); - assert_eq!(&s[..], &sum[..]); - } - { - let mut bytes = BytesMut::from(&s[..]); - let off = bytes.split_off(i); - assert_eq!(i, bytes.len()); - let mut sum = Vec::new(); - sum.extend(&bytes); - sum.extend(&off); - assert_eq!(&s[..], &sum[..]); - } - { - let mut bytes = Bytes::from(&s[..]); - let off = bytes.split_to(i); - assert_eq!(i, off.len()); - let mut sum = Vec::new(); - sum.extend(off.iter()); - sum.extend(bytes.iter()); - assert_eq!(&s[..], &sum[..]); - } - { - let mut bytes = BytesMut::from(&s[..]); - let off = bytes.split_to(i); - assert_eq!(i, off.len()); - let mut sum = Vec::new(); - sum.extend(&off); - sum.extend(&bytes); - assert_eq!(&s[..], &sum[..]); - } - } -} - -#[test] -fn split_to_1() { - // Static - let mut a = Bytes::from_static(SHORT); - let b = a.split_to(4); - - assert_eq!(SHORT[4..], a); - assert_eq!(SHORT[..4], b); - - // Allocated - let mut a = Bytes::copy_from_slice(LONG); - let b = a.split_to(4); - - assert_eq!(LONG[4..], a); - assert_eq!(LONG[..4], b); - - let mut a = Bytes::copy_from_slice(LONG); - let b = a.split_to(30); - - assert_eq!(LONG[30..], a); - assert_eq!(LONG[..30], b); -} - -#[test] -fn split_to_2() { - let mut a = Bytes::from(LONG); - assert_eq!(LONG, a); - - let b = a.split_to(1); - - assert_eq!(LONG[1..], a); - drop(b); -} - -#[test] -#[should_panic] -fn split_to_oob() { - let mut hello = Bytes::from(&b"helloworld"[..]); - let _ = hello.split_to(33); -} - -#[test] -#[should_panic] -fn split_to_oob_mut() { - let mut hello = BytesMut::from(&b"helloworld"[..]); - let _ = hello.split_to(33); -} - -#[test] -#[should_panic] -fn split_to_uninitialized() { - let mut bytes = BytesMut::with_capacity(1024); - let _other = bytes.split_to(128); -} - -#[test] -fn split_off_to_at_gt_len() { - fn make_bytes() -> Bytes { - let mut bytes = BytesMut::with_capacity(100); - bytes.put_slice(&[10, 20, 30, 40]); - bytes.freeze() - } - - use std::panic; - - let _ = make_bytes().split_to(4); - let _ = make_bytes().split_off(4); - - assert!(panic::catch_unwind(move || { - let _ = make_bytes().split_to(5); - }) - .is_err()); - - assert!(panic::catch_unwind(move || { - let _ = make_bytes().split_off(5); - }) - .is_err()); -} - -#[test] -fn truncate() { - let s = &b"helloworld"[..]; - let mut hello = Bytes::from(s); - hello.truncate(15); - assert_eq!(hello, s); - hello.truncate(10); - assert_eq!(hello, s); - hello.truncate(5); - assert_eq!(hello, "hello"); -} - -#[test] -fn freeze_clone_shared() { - let s = &b"abcdefgh"[..]; - let b = BytesMut::from(s).split().freeze(); - assert_eq!(b, s); - let c = b.clone(); - assert_eq!(c, s); -} - -#[test] -fn freeze_clone_unique() { - let s = &b"abcdefgh"[..]; - let b = BytesMut::from(s).freeze(); - assert_eq!(b, s); - let c = b.clone(); - assert_eq!(c, s); -} - -#[test] -fn freeze_after_advance() { - let s = &b"abcdefgh"[..]; - let mut b = BytesMut::from(s); - b.advance(1); - assert_eq!(b, s[1..]); - let b = b.freeze(); - // Verify fix for #352. Previously, freeze would ignore the start offset - // for BytesMuts in Vec mode. - assert_eq!(b, s[1..]); -} - -#[test] -fn freeze_after_advance_arc() { - let s = &b"abcdefgh"[..]; - let mut b = BytesMut::from(s); - // Make b Arc - let _ = b.split_to(0); - b.advance(1); - assert_eq!(b, s[1..]); - let b = b.freeze(); - assert_eq!(b, s[1..]); -} - -#[test] -fn freeze_after_split_to() { - let s = &b"abcdefgh"[..]; - let mut b = BytesMut::from(s); - let _ = b.split_to(1); - assert_eq!(b, s[1..]); - let b = b.freeze(); - assert_eq!(b, s[1..]); -} - -#[test] -fn freeze_after_truncate() { - let s = &b"abcdefgh"[..]; - let mut b = BytesMut::from(s); - b.truncate(7); - assert_eq!(b, s[..7]); - let b = b.freeze(); - assert_eq!(b, s[..7]); -} - -#[test] -fn freeze_after_truncate_arc() { - let s = &b"abcdefgh"[..]; - let mut b = BytesMut::from(s); - // Make b Arc - let _ = b.split_to(0); - b.truncate(7); - assert_eq!(b, s[..7]); - let b = b.freeze(); - assert_eq!(b, s[..7]); -} - -#[test] -fn freeze_after_split_off() { - let s = &b"abcdefgh"[..]; - let mut b = BytesMut::from(s); - let _ = b.split_off(7); - assert_eq!(b, s[..7]); - let b = b.freeze(); - assert_eq!(b, s[..7]); -} - -#[test] -fn fns_defined_for_bytes_mut() { - let mut bytes = BytesMut::from(&b"hello world"[..]); - - bytes.as_ptr(); - bytes.as_mut_ptr(); - - // Iterator - let v: Vec = bytes.as_ref().iter().cloned().collect(); - assert_eq!(&v[..], bytes); -} - -#[test] -fn reserve_convert() { - // Vec -> Vec - let mut bytes = BytesMut::from(LONG); - bytes.reserve(64); - assert_eq!(bytes.capacity(), LONG.len() + 64); - - // Arc -> Vec - let mut bytes = BytesMut::from(LONG); - let a = bytes.split_to(30); - - bytes.reserve(128); - assert!(bytes.capacity() >= bytes.len() + 128); - - drop(a); -} - -#[test] -fn reserve_growth() { - let mut bytes = BytesMut::with_capacity(64); - bytes.put("hello world".as_bytes()); - let _ = bytes.split(); - - bytes.reserve(65); - assert_eq!(bytes.capacity(), 128); -} - -#[test] -fn reserve_allocates_at_least_original_capacity() { - let mut bytes = BytesMut::with_capacity(1024); - - for i in 0..1020 { - bytes.put_u8(i as u8); - } - - let _other = bytes.split(); - - bytes.reserve(16); - assert_eq!(bytes.capacity(), 1024); -} - -#[test] -fn reserve_max_original_capacity_value() { - const SIZE: usize = 128 * 1024; - - let mut bytes = BytesMut::with_capacity(SIZE); - - for _ in 0..SIZE { - bytes.put_u8(0u8); - } - - let _other = bytes.split(); - - bytes.reserve(16); - assert_eq!(bytes.capacity(), 64 * 1024); -} - -#[test] -fn reserve_vec_recycling() { - let mut bytes = BytesMut::with_capacity(16); - assert_eq!(bytes.capacity(), 16); - let addr = bytes.as_ptr() as usize; - bytes.put("0123456789012345".as_bytes()); - assert_eq!(bytes.as_ptr() as usize, addr); - bytes.advance(10); - assert_eq!(bytes.capacity(), 6); - bytes.reserve(8); - assert_eq!(bytes.capacity(), 16); - assert_eq!(bytes.as_ptr() as usize, addr); -} - -#[test] -fn reserve_in_arc_unique_does_not_overallocate() { - let mut bytes = BytesMut::with_capacity(1000); - let _ = bytes.split(); - - // now bytes is Arc and refcount == 1 - - assert_eq!(1000, bytes.capacity()); - bytes.reserve(2001); - assert_eq!(2001, bytes.capacity()); -} - -#[test] -fn reserve_in_arc_unique_doubles() { - let mut bytes = BytesMut::with_capacity(1000); - let _ = bytes.split(); - - // now bytes is Arc and refcount == 1 - - assert_eq!(1000, bytes.capacity()); - bytes.reserve(1001); - assert_eq!(2000, bytes.capacity()); -} - -#[test] -fn reserve_in_arc_nonunique_does_not_overallocate() { - let mut bytes = BytesMut::with_capacity(1000); - let _copy = bytes.split(); - - // now bytes is Arc and refcount == 2 - - assert_eq!(1000, bytes.capacity()); - bytes.reserve(2001); - assert_eq!(2001, bytes.capacity()); -} - -#[test] -fn extend_mut() { - let mut bytes = BytesMut::with_capacity(0); - bytes.extend(LONG); - assert_eq!(*bytes, LONG[..]); -} - -#[test] -fn extend_from_slice_mut() { - for &i in &[3, 34] { - let mut bytes = BytesMut::new(); - bytes.extend_from_slice(&LONG[..i]); - bytes.extend_from_slice(&LONG[i..]); - assert_eq!(LONG[..], *bytes); - } -} - -#[test] -fn extend_mut_without_size_hint() { - let mut bytes = BytesMut::with_capacity(0); - let mut long_iter = LONG.iter(); - - // Use iter::from_fn since it doesn't know a size_hint - bytes.extend(std::iter::from_fn(|| long_iter.next())); - assert_eq!(*bytes, LONG[..]); -} - -#[test] -fn from_static() { - let mut a = Bytes::from_static(b"ab"); - let b = a.split_off(1); - - assert_eq!(a, b"a"[..]); - assert_eq!(b, b"b"[..]); -} - -#[test] -fn advance_static() { - let mut a = Bytes::from_static(b"hello world"); - a.advance(6); - assert_eq!(a, &b"world"[..]); -} - -#[test] -fn advance_vec() { - let mut a = Bytes::from(b"hello world boooo yah world zomg wat wat".to_vec()); - a.advance(16); - assert_eq!(a, b"o yah world zomg wat wat"[..]); - - a.advance(4); - assert_eq!(a, b"h world zomg wat wat"[..]); - - a.advance(6); - assert_eq!(a, b"d zomg wat wat"[..]); -} - -#[test] -fn advance_bytes_mut() { - let mut a = BytesMut::from("hello world boooo yah world zomg wat wat"); - a.advance(16); - assert_eq!(a, b"o yah world zomg wat wat"[..]); - - a.advance(4); - assert_eq!(a, b"h world zomg wat wat"[..]); - - // Reserve some space. - a.reserve(1024); - assert_eq!(a, b"h world zomg wat wat"[..]); - - a.advance(6); - assert_eq!(a, b"d zomg wat wat"[..]); -} - -#[test] -#[should_panic] -fn advance_past_len() { - let mut a = BytesMut::from("hello world"); - a.advance(20); -} - -#[test] -// Only run these tests on little endian systems. CI uses qemu for testing -// little endian... and qemu doesn't really support threading all that well. -#[cfg(target_endian = "little")] -fn stress() { - // Tests promoting a buffer from a vec -> shared in a concurrent situation - use std::sync::{Arc, Barrier}; - use std::thread; - - const THREADS: usize = 8; - const ITERS: usize = 1_000; - - for i in 0..ITERS { - let data = [i as u8; 256]; - let buf = Arc::new(Bytes::copy_from_slice(&data[..])); - - let barrier = Arc::new(Barrier::new(THREADS)); - let mut joins = Vec::with_capacity(THREADS); - - for _ in 0..THREADS { - let c = barrier.clone(); - let buf = buf.clone(); - - joins.push(thread::spawn(move || { - c.wait(); - let buf: Bytes = (*buf).clone(); - drop(buf); - })); - } - - for th in joins { - th.join().unwrap(); - } - - assert_eq!(*buf, data[..]); - } -} - -#[test] -fn partial_eq_bytesmut() { - let bytes = Bytes::from(&b"The quick red fox"[..]); - let bytesmut = BytesMut::from(&b"The quick red fox"[..]); - assert!(bytes == bytesmut); - assert!(bytesmut == bytes); - let bytes2 = Bytes::from(&b"Jumped over the lazy brown dog"[..]); - assert!(bytes2 != bytesmut); - assert!(bytesmut != bytes2); -} - -/* -#[test] -fn bytes_unsplit_basic() { - let buf = Bytes::from(&b"aaabbbcccddd"[..]); - - let splitted = buf.split_off(6); - assert_eq!(b"aaabbb", &buf[..]); - assert_eq!(b"cccddd", &splitted[..]); - - buf.unsplit(splitted); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_empty_other() { - let buf = Bytes::from(&b"aaabbbcccddd"[..]); - - // empty other - let other = Bytes::new(); - - buf.unsplit(other); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_empty_self() { - // empty self - let mut buf = Bytes::new(); - - let mut other = Bytes::with_capacity(64); - other.extend_from_slice(b"aaabbbcccddd"); - - buf.unsplit(other); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_arc_different() { - let mut buf = Bytes::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbeeee"); - - buf.split_off(8); //arc - - let mut buf2 = Bytes::with_capacity(64); - buf2.extend_from_slice(b"ccccddddeeee"); - - buf2.split_off(8); //arc - - buf.unsplit(buf2); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_arc_non_contiguous() { - let mut buf = Bytes::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); - - let mut buf2 = buf.split_off(8); //arc - - let buf3 = buf2.split_off(4); //arc - - buf.unsplit(buf3); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_two_split_offs() { - let mut buf = Bytes::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbccccdddd"); - - let mut buf2 = buf.split_off(8); //arc - let buf3 = buf2.split_off(4); //arc - - buf2.unsplit(buf3); - buf.unsplit(buf2); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_overlapping_references() { - let mut buf = Bytes::with_capacity(64); - buf.extend_from_slice(b"abcdefghijklmnopqrstuvwxyz"); - let mut buf0010 = buf.slice(0..10); - let buf1020 = buf.slice(10..20); - let buf0515 = buf.slice(5..15); - buf0010.unsplit(buf1020); - assert_eq!(b"abcdefghijklmnopqrst", &buf0010[..]); - assert_eq!(b"fghijklmno", &buf0515[..]); -} -*/ - -#[test] -fn bytes_mut_unsplit_basic() { - let mut buf = BytesMut::with_capacity(64); - buf.extend_from_slice(b"aaabbbcccddd"); - - let splitted = buf.split_off(6); - assert_eq!(b"aaabbb", &buf[..]); - assert_eq!(b"cccddd", &splitted[..]); - - buf.unsplit(splitted); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_mut_unsplit_empty_other() { - let mut buf = BytesMut::with_capacity(64); - buf.extend_from_slice(b"aaabbbcccddd"); - - // empty other - let other = BytesMut::new(); - - buf.unsplit(other); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_mut_unsplit_empty_self() { - // empty self - let mut buf = BytesMut::new(); - - let mut other = BytesMut::with_capacity(64); - other.extend_from_slice(b"aaabbbcccddd"); - - buf.unsplit(other); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_mut_unsplit_arc_different() { - let mut buf = BytesMut::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbeeee"); - - let _ = buf.split_off(8); //arc - - let mut buf2 = BytesMut::with_capacity(64); - buf2.extend_from_slice(b"ccccddddeeee"); - - let _ = buf2.split_off(8); //arc - - buf.unsplit(buf2); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn bytes_mut_unsplit_arc_non_contiguous() { - let mut buf = BytesMut::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); - - let mut buf2 = buf.split_off(8); //arc - - let buf3 = buf2.split_off(4); //arc - - buf.unsplit(buf3); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn bytes_mut_unsplit_two_split_offs() { - let mut buf = BytesMut::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbccccdddd"); - - let mut buf2 = buf.split_off(8); //arc - let buf3 = buf2.split_off(4); //arc - - buf2.unsplit(buf3); - buf.unsplit(buf2); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn from_iter_no_size_hint() { - use std::iter; - - let mut expect = vec![]; - - let actual: Bytes = iter::repeat(b'x') - .scan(100, |cnt, item| { - if *cnt >= 1 { - *cnt -= 1; - expect.push(item); - Some(item) - } else { - None - } - }) - .collect(); - - assert_eq!(&actual[..], &expect[..]); -} - -fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) { - let slice = &(bytes.as_ref()[start..end]); - let sub = bytes.slice_ref(&slice); - assert_eq!(&sub[..], expected); -} - -#[test] -fn slice_ref_works() { - let bytes = Bytes::from(&b"012345678"[..]); - - test_slice_ref(&bytes, 0, 0, b""); - test_slice_ref(&bytes, 0, 3, b"012"); - test_slice_ref(&bytes, 2, 6, b"2345"); - test_slice_ref(&bytes, 7, 9, b"78"); - test_slice_ref(&bytes, 9, 9, b""); -} - -#[test] -fn slice_ref_empty() { - let bytes = Bytes::from(&b""[..]); - let slice = &(bytes.as_ref()[0..0]); - - let sub = bytes.slice_ref(&slice); - assert_eq!(&sub[..], b""); -} - -#[test] -fn slice_ref_empty_subslice() { - let bytes = Bytes::from(&b"abcde"[..]); - let subbytes = bytes.slice(0..0); - let slice = &subbytes[..]; - // The `slice` object is derived from the original `bytes` object - // so `slice_ref` should work. - assert_eq!(Bytes::new(), bytes.slice_ref(slice)); -} - -#[test] -#[should_panic] -fn slice_ref_catches_not_a_subset() { - let bytes = Bytes::from(&b"012345678"[..]); - let slice = &b"012345"[0..4]; - - bytes.slice_ref(slice); -} - -#[test] -fn slice_ref_not_an_empty_subset() { - let bytes = Bytes::from(&b"012345678"[..]); - let slice = &b""[0..0]; - - assert_eq!(Bytes::new(), bytes.slice_ref(slice)); -} - -#[test] -fn empty_slice_ref_not_an_empty_subset() { - let bytes = Bytes::new(); - let slice = &b"some other slice"[0..0]; - - assert_eq!(Bytes::new(), bytes.slice_ref(slice)); -} - -#[test] -fn bytes_buf_mut_advance() { - let mut bytes = BytesMut::with_capacity(1024); - - unsafe { - let ptr = bytes.bytes_mut().as_ptr(); - assert_eq!(1024, bytes.bytes_mut().len()); - - bytes.advance_mut(10); - - let next = bytes.bytes_mut().as_ptr(); - assert_eq!(1024 - 10, bytes.bytes_mut().len()); - assert_eq!(ptr.offset(10), next); - - // advance to the end - bytes.advance_mut(1024 - 10); - - // The buffer size is doubled - assert_eq!(1024, bytes.bytes_mut().len()); - } -} - -#[test] -fn bytes_buf_mut_reuse_when_fully_consumed() { - use bytes::{Buf, BytesMut}; - let mut buf = BytesMut::new(); - buf.reserve(8192); - buf.extend_from_slice(&[0u8; 100][..]); - - let p = &buf[0] as *const u8; - buf.advance(100); - - buf.reserve(8192); - buf.extend_from_slice(b" "); - - assert_eq!(&buf[0] as *const u8, p); -} - -#[test] -#[should_panic] -fn bytes_reserve_overflow() { - let mut bytes = BytesMut::with_capacity(1024); - bytes.put_slice(b"hello world"); - - bytes.reserve(usize::MAX); -} - -#[test] -fn bytes_with_capacity_but_empty() { - // See https://github.com/tokio-rs/bytes/issues/340 - let vec = Vec::with_capacity(1); - let _ = Bytes::from(vec); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_bytes_odd_alloc.rs b/third_party/rust/bytes-0.5.6/tests/test_bytes_odd_alloc.rs deleted file mode 100644 index 4ce424b7c00a..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_bytes_odd_alloc.rs +++ /dev/null @@ -1,67 +0,0 @@ -//! Test using `Bytes` with an allocator that hands out "odd" pointers for -//! vectors (pointers where the LSB is set). - -use std::alloc::{GlobalAlloc, Layout, System}; -use std::ptr; - -use bytes::Bytes; - -#[global_allocator] -static ODD: Odd = Odd; - -struct Odd; - -unsafe impl GlobalAlloc for Odd { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if layout.align() == 1 && layout.size() > 0 { - // Allocate slightly bigger so that we can offset the pointer by 1 - let size = layout.size() + 1; - let new_layout = match Layout::from_size_align(size, 1) { - Ok(layout) => layout, - Err(_err) => return ptr::null_mut(), - }; - let ptr = System.alloc(new_layout); - if !ptr.is_null() { - let ptr = ptr.offset(1); - ptr - } else { - ptr - } - } else { - System.alloc(layout) - } - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if layout.align() == 1 && layout.size() > 0 { - let size = layout.size() + 1; - let new_layout = match Layout::from_size_align(size, 1) { - Ok(layout) => layout, - Err(_err) => std::process::abort(), - }; - System.dealloc(ptr.offset(-1), new_layout); - } else { - System.dealloc(ptr, layout); - } - } -} - -#[test] -fn sanity_check_odd_allocator() { - let vec = vec![33u8; 1024]; - let p = vec.as_ptr() as usize; - assert!(p & 0x1 == 0x1, "{:#b}", p); -} - -#[test] -fn test_bytes_from_vec_drop() { - let vec = vec![33u8; 1024]; - let _b = Bytes::from(vec); -} - -#[test] -fn test_bytes_clone_drop() { - let vec = vec![33u8; 1024]; - let b1 = Bytes::from(vec); - let _b2 = b1.clone(); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_bytes_vec_alloc.rs b/third_party/rust/bytes-0.5.6/tests/test_bytes_vec_alloc.rs deleted file mode 100644 index 418a9cd64f89..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_bytes_vec_alloc.rs +++ /dev/null @@ -1,79 +0,0 @@ -use std::alloc::{GlobalAlloc, Layout, System}; -use std::{mem, ptr}; - -use bytes::{Buf, Bytes}; - -#[global_allocator] -static LEDGER: Ledger = Ledger; - -struct Ledger; - -const USIZE_SIZE: usize = mem::size_of::(); - -unsafe impl GlobalAlloc for Ledger { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if layout.align() == 1 && layout.size() > 0 { - // Allocate extra space to stash a record of - // how much space there was. - let orig_size = layout.size(); - let size = orig_size + USIZE_SIZE; - let new_layout = match Layout::from_size_align(size, 1) { - Ok(layout) => layout, - Err(_err) => return ptr::null_mut(), - }; - let ptr = System.alloc(new_layout); - if !ptr.is_null() { - (ptr as *mut usize).write(orig_size); - let ptr = ptr.offset(USIZE_SIZE as isize); - ptr - } else { - ptr - } - } else { - System.alloc(layout) - } - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if layout.align() == 1 && layout.size() > 0 { - let off_ptr = (ptr as *mut usize).offset(-1); - let orig_size = off_ptr.read(); - if orig_size != layout.size() { - panic!( - "bad dealloc: alloc size was {}, dealloc size is {}", - orig_size, - layout.size() - ); - } - - let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) { - Ok(layout) => layout, - Err(_err) => std::process::abort(), - }; - System.dealloc(off_ptr as *mut u8, new_layout); - } else { - System.dealloc(ptr, layout); - } - } -} -#[test] -fn test_bytes_advance() { - let mut bytes = Bytes::from(vec![10, 20, 30]); - bytes.advance(1); - drop(bytes); -} - -#[test] -fn test_bytes_truncate() { - let mut bytes = Bytes::from(vec![10, 20, 30]); - bytes.truncate(2); - drop(bytes); -} - -#[test] -fn test_bytes_truncate_and_advance() { - let mut bytes = Bytes::from(vec![10, 20, 30]); - bytes.truncate(2); - bytes.advance(1); - drop(bytes); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_chain.rs b/third_party/rust/bytes-0.5.6/tests/test_chain.rs deleted file mode 100644 index 6dbc45d04858..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_chain.rs +++ /dev/null @@ -1,135 +0,0 @@ -#![warn(rust_2018_idioms)] - -use bytes::buf::{BufExt, BufMutExt}; -use bytes::{Buf, BufMut, Bytes}; -#[cfg(feature = "std")] -use std::io::IoSlice; - -#[test] -fn collect_two_bufs() { - let a = Bytes::from(&b"hello"[..]); - let b = Bytes::from(&b"world"[..]); - - let res = a.chain(b).to_bytes(); - assert_eq!(res, &b"helloworld"[..]); -} - -#[test] -fn writing_chained() { - let mut a = [0u8; 64]; - let mut b = [0u8; 64]; - - { - let mut buf = (&mut a[..]).chain_mut(&mut b[..]); - - for i in 0u8..128 { - buf.put_u8(i); - } - } - - for i in 0..64 { - let expect = i as u8; - assert_eq!(expect, a[i]); - assert_eq!(expect + 64, b[i]); - } -} - -#[test] -fn iterating_two_bufs() { - let a = Bytes::from(&b"hello"[..]); - let b = Bytes::from(&b"world"[..]); - - let res: Vec = a.chain(b).into_iter().collect(); - assert_eq!(res, &b"helloworld"[..]); -} - -#[cfg(feature = "std")] -#[test] -fn vectored_read() { - let a = Bytes::from(&b"hello"[..]); - let b = Bytes::from(&b"world"[..]); - - let mut buf = a.chain(b); - - { - let b1: &[u8] = &mut []; - let b2: &[u8] = &mut []; - let b3: &[u8] = &mut []; - let b4: &[u8] = &mut []; - let mut iovecs = [ - IoSlice::new(b1), - IoSlice::new(b2), - IoSlice::new(b3), - IoSlice::new(b4), - ]; - - assert_eq!(2, buf.bytes_vectored(&mut iovecs)); - assert_eq!(iovecs[0][..], b"hello"[..]); - assert_eq!(iovecs[1][..], b"world"[..]); - assert_eq!(iovecs[2][..], b""[..]); - assert_eq!(iovecs[3][..], b""[..]); - } - - buf.advance(2); - - { - let b1: &[u8] = &mut []; - let b2: &[u8] = &mut []; - let b3: &[u8] = &mut []; - let b4: &[u8] = &mut []; - let mut iovecs = [ - IoSlice::new(b1), - IoSlice::new(b2), - IoSlice::new(b3), - IoSlice::new(b4), - ]; - - assert_eq!(2, buf.bytes_vectored(&mut iovecs)); - assert_eq!(iovecs[0][..], b"llo"[..]); - assert_eq!(iovecs[1][..], b"world"[..]); - assert_eq!(iovecs[2][..], b""[..]); - assert_eq!(iovecs[3][..], b""[..]); - } - - buf.advance(3); - - { - let b1: &[u8] = &mut []; - let b2: &[u8] = &mut []; - let b3: &[u8] = &mut []; - let b4: &[u8] = &mut []; - let mut iovecs = [ - IoSlice::new(b1), - IoSlice::new(b2), - IoSlice::new(b3), - IoSlice::new(b4), - ]; - - assert_eq!(1, buf.bytes_vectored(&mut iovecs)); - assert_eq!(iovecs[0][..], b"world"[..]); - assert_eq!(iovecs[1][..], b""[..]); - assert_eq!(iovecs[2][..], b""[..]); - assert_eq!(iovecs[3][..], b""[..]); - } - - buf.advance(3); - - { - let b1: &[u8] = &mut []; - let b2: &[u8] = &mut []; - let b3: &[u8] = &mut []; - let b4: &[u8] = &mut []; - let mut iovecs = [ - IoSlice::new(b1), - IoSlice::new(b2), - IoSlice::new(b3), - IoSlice::new(b4), - ]; - - assert_eq!(1, buf.bytes_vectored(&mut iovecs)); - assert_eq!(iovecs[0][..], b"ld"[..]); - assert_eq!(iovecs[1][..], b""[..]); - assert_eq!(iovecs[2][..], b""[..]); - assert_eq!(iovecs[3][..], b""[..]); - } -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_debug.rs b/third_party/rust/bytes-0.5.6/tests/test_debug.rs deleted file mode 100644 index 08d2f254e819..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_debug.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![warn(rust_2018_idioms)] - -use bytes::Bytes; - -#[test] -fn fmt() { - let vec: Vec<_> = (0..0x100).map(|b| b as u8).collect(); - - let expected = "b\"\ - \\0\\x01\\x02\\x03\\x04\\x05\\x06\\x07\ - \\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\ - \\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\ - \\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\ - \x20!\\\"#$%&'()*+,-./0123456789:;<=>?\ - @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\ - `abcdefghijklmnopqrstuvwxyz{|}~\\x7f\ - \\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\ - \\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\ - \\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\ - \\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\ - \\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\ - \\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\ - \\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\ - \\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\ - \\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\ - \\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\ - \\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\ - \\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\ - \\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\ - \\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\ - \\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\ - \\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff\""; - - assert_eq!(expected, format!("{:?}", Bytes::from(vec))); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_iter.rs b/third_party/rust/bytes-0.5.6/tests/test_iter.rs deleted file mode 100644 index a5bfddddf5e9..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_iter.rs +++ /dev/null @@ -1,21 +0,0 @@ -#![warn(rust_2018_idioms)] - -use bytes::Bytes; - -#[test] -fn iter_len() { - let buf = Bytes::from_static(b"hello world"); - let iter = buf.iter(); - - assert_eq!(iter.size_hint(), (11, Some(11))); - assert_eq!(iter.len(), 11); -} - -#[test] -fn empty_iter_len() { - let buf = Bytes::from_static(b""); - let iter = buf.iter(); - - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.len(), 0); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_reader.rs b/third_party/rust/bytes-0.5.6/tests/test_reader.rs deleted file mode 100644 index 10b480fcc389..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_reader.rs +++ /dev/null @@ -1,29 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "std")] - -use std::io::{BufRead, Read}; - -use bytes::buf::BufExt; - -#[test] -fn read() { - let buf1 = &b"hello "[..]; - let buf2 = &b"world"[..]; - let buf = BufExt::chain(buf1, buf2); // Disambiguate with Read::chain - let mut buffer = Vec::new(); - buf.reader().read_to_end(&mut buffer).unwrap(); - assert_eq!(b"hello world", &buffer[..]); -} - -#[test] -fn buf_read() { - let buf1 = &b"hell"[..]; - let buf2 = &b"o\nworld"[..]; - let mut reader = BufExt::chain(buf1, buf2).reader(); - let mut line = String::new(); - reader.read_line(&mut line).unwrap(); - assert_eq!("hello\n", &line); - line.clear(); - reader.read_line(&mut line).unwrap(); - assert_eq!("world", &line); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_serde.rs b/third_party/rust/bytes-0.5.6/tests/test_serde.rs deleted file mode 100644 index cf4aeffa7854..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_serde.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![cfg(feature = "serde")] -#![warn(rust_2018_idioms)] - -use serde_test::{assert_tokens, Token}; - -#[test] -fn test_ser_de_empty() { - let b = bytes::Bytes::new(); - assert_tokens(&b, &[Token::Bytes(b"")]); - let b = bytes::BytesMut::with_capacity(0); - assert_tokens(&b, &[Token::Bytes(b"")]); -} - -#[test] -fn test_ser_de() { - let b = bytes::Bytes::from(&b"bytes"[..]); - assert_tokens(&b, &[Token::Bytes(b"bytes")]); - let b = bytes::BytesMut::from(&b"bytes"[..]); - assert_tokens(&b, &[Token::Bytes(b"bytes")]); -} diff --git a/third_party/rust/bytes-0.5.6/tests/test_take.rs b/third_party/rust/bytes-0.5.6/tests/test_take.rs deleted file mode 100644 index 0afb28bb4dcc..000000000000 --- a/third_party/rust/bytes-0.5.6/tests/test_take.rs +++ /dev/null @@ -1,12 +0,0 @@ -#![warn(rust_2018_idioms)] - -use bytes::buf::{Buf, BufExt}; - -#[test] -fn long_take() { - // Tests that get a take with a size greater than the buffer length will not - // overrun the buffer. Regression test for #138. - let buf = b"hello world".take(100); - assert_eq!(11, buf.remaining()); - assert_eq!(b"hello world", buf.bytes()); -} diff --git a/third_party/rust/form_urlencoded/.cargo-checksum.json b/third_party/rust/form_urlencoded/.cargo-checksum.json new file mode 100644 index 000000000000..33e19d14029d --- /dev/null +++ b/third_party/rust/form_urlencoded/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"aadc4e4ba33e86861d8d1d8b848ac11a27b6f87340d082b47f762387464c61ed","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"20c7855c364d57ea4c97889a5e8d98470a9952dade37bd9248b9a54431670e5e","src/lib.rs":"5d30edec687843447c97e4ea87583983eb9fc06135ae718c8ecc0fa8cebef2df"},"package":"5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191"} \ No newline at end of file diff --git a/third_party/rust/urlencoding/Cargo.toml b/third_party/rust/form_urlencoded/Cargo.toml similarity index 52% rename from third_party/rust/urlencoding/Cargo.toml rename to third_party/rust/form_urlencoded/Cargo.toml index afe24283f933..4c9fae259778 100644 --- a/third_party/rust/urlencoding/Cargo.toml +++ b/third_party/rust/form_urlencoded/Cargo.toml @@ -12,15 +12,17 @@ [package] edition = "2018" -name = "urlencoding" -version = "1.3.3" -authors = ["Kornel ", "Bertram Truong "] -description = "A Rust library for doing URL percentage encoding." -homepage = "https://lib.rs/urlencoding" -readme = "README.md" -keywords = ["url", "percent", "escape", "urlencode", "urldecode"] -categories = ["encoding", "web-programming"] -license = "MIT" -repository = "https://github.com/kornelski/rust_urlencoding" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] +name = "form_urlencoded" +version = "1.0.1" +authors = ["The rust-url developers"] +description = "Parser and serializer for the application/x-www-form-urlencoded syntax, as used by HTML forms." +license = "MIT/Apache-2.0" +repository = "https://github.com/servo/rust-url" + +[lib] +test = false +[dependencies.matches] +version = "0.1" + +[dependencies.percent-encoding] +version = "2.1.0" diff --git a/third_party/rust/form_urlencoded/LICENSE-APACHE b/third_party/rust/form_urlencoded/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/form_urlencoded/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/pin-project-lite-0.1.12/LICENSE-MIT b/third_party/rust/form_urlencoded/LICENSE-MIT similarity index 95% rename from third_party/rust/pin-project-lite-0.1.12/LICENSE-MIT rename to third_party/rust/form_urlencoded/LICENSE-MIT index 31aa79387f27..24de6b418eaf 100644 --- a/third_party/rust/pin-project-lite-0.1.12/LICENSE-MIT +++ b/third_party/rust/form_urlencoded/LICENSE-MIT @@ -1,3 +1,5 @@ +Copyright (c) 2013-2016 The rust-url developers + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the diff --git a/third_party/rust/form_urlencoded/src/lib.rs b/third_party/rust/form_urlencoded/src/lib.rs new file mode 100644 index 000000000000..765ee168a1cb --- /dev/null +++ b/third_party/rust/form_urlencoded/src/lib.rs @@ -0,0 +1,420 @@ +// Copyright 2013-2016 The rust-url developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Parser and serializer for the [`application/x-www-form-urlencoded` syntax]( +//! http://url.spec.whatwg.org/#application/x-www-form-urlencoded), +//! as used by HTML forms. +//! +//! Converts between a string (such as an URL’s query string) +//! and a sequence of (name, value) pairs. + +#[macro_use] +extern crate matches; + +use percent_encoding::{percent_decode, percent_encode_byte}; +use std::borrow::{Borrow, Cow}; +use std::str; + +/// Convert a byte string in the `application/x-www-form-urlencoded` syntax +/// into a iterator of (name, value) pairs. +/// +/// Use `parse(input.as_bytes())` to parse a `&str` string. +/// +/// The names and values are percent-decoded. For instance, `%23first=%25try%25` will be +/// converted to `[("#first", "%try%")]`. +#[inline] +pub fn parse(input: &[u8]) -> Parse<'_> { + Parse { input } +} +/// The return type of `parse()`. +#[derive(Copy, Clone)] +pub struct Parse<'a> { + input: &'a [u8], +} + +impl<'a> Iterator for Parse<'a> { + type Item = (Cow<'a, str>, Cow<'a, str>); + + fn next(&mut self) -> Option { + loop { + if self.input.is_empty() { + return None; + } + let mut split2 = self.input.splitn(2, |&b| b == b'&'); + let sequence = split2.next().unwrap(); + self.input = split2.next().unwrap_or(&[][..]); + if sequence.is_empty() { + continue; + } + let mut split2 = sequence.splitn(2, |&b| b == b'='); + let name = split2.next().unwrap(); + let value = split2.next().unwrap_or(&[][..]); + return Some((decode(name), decode(value))); + } + } +} + +fn decode(input: &[u8]) -> Cow<'_, str> { + let replaced = replace_plus(input); + decode_utf8_lossy(match percent_decode(&replaced).into() { + Cow::Owned(vec) => Cow::Owned(vec), + Cow::Borrowed(_) => replaced, + }) +} + +/// Replace b'+' with b' ' +fn replace_plus(input: &[u8]) -> Cow<'_, [u8]> { + match input.iter().position(|&b| b == b'+') { + None => Cow::Borrowed(input), + Some(first_position) => { + let mut replaced = input.to_owned(); + replaced[first_position] = b' '; + for byte in &mut replaced[first_position + 1..] { + if *byte == b'+' { + *byte = b' '; + } + } + Cow::Owned(replaced) + } + } +} + +impl<'a> Parse<'a> { + /// Return a new iterator that yields pairs of `String` instead of pairs of `Cow`. + pub fn into_owned(self) -> ParseIntoOwned<'a> { + ParseIntoOwned { inner: self } + } +} + +/// Like `Parse`, but yields pairs of `String` instead of pairs of `Cow`. +pub struct ParseIntoOwned<'a> { + inner: Parse<'a>, +} + +impl<'a> Iterator for ParseIntoOwned<'a> { + type Item = (String, String); + + fn next(&mut self) -> Option { + self.inner + .next() + .map(|(k, v)| (k.into_owned(), v.into_owned())) + } +} + +/// The [`application/x-www-form-urlencoded` byte serializer]( +/// https://url.spec.whatwg.org/#concept-urlencoded-byte-serializer). +/// +/// Return an iterator of `&str` slices. +pub fn byte_serialize(input: &[u8]) -> ByteSerialize<'_> { + ByteSerialize { bytes: input } +} + +/// Return value of `byte_serialize()`. +#[derive(Debug)] +pub struct ByteSerialize<'a> { + bytes: &'a [u8], +} + +fn byte_serialized_unchanged(byte: u8) -> bool { + matches!(byte, b'*' | b'-' | b'.' | b'0' ..= b'9' | b'A' ..= b'Z' | b'_' | b'a' ..= b'z') +} + +impl<'a> Iterator for ByteSerialize<'a> { + type Item = &'a str; + + fn next(&mut self) -> Option<&'a str> { + if let Some((&first, tail)) = self.bytes.split_first() { + if !byte_serialized_unchanged(first) { + self.bytes = tail; + return Some(if first == b' ' { + "+" + } else { + percent_encode_byte(first) + }); + } + let position = tail.iter().position(|&b| !byte_serialized_unchanged(b)); + let (unchanged_slice, remaining) = match position { + // 1 for first_byte + i unchanged in tail + Some(i) => self.bytes.split_at(1 + i), + None => (self.bytes, &[][..]), + }; + self.bytes = remaining; + // This unsafe is appropriate because we have already checked these + // bytes in byte_serialized_unchanged, which checks for a subset + // of UTF-8. So we know these bytes are valid UTF-8, and doing + // another UTF-8 check would be wasteful. + Some(unsafe { str::from_utf8_unchecked(unchanged_slice) }) + } else { + None + } + } + + fn size_hint(&self) -> (usize, Option) { + if self.bytes.is_empty() { + (0, Some(0)) + } else { + (1, Some(self.bytes.len())) + } + } +} + +/// The [`application/x-www-form-urlencoded` serializer]( +/// https://url.spec.whatwg.org/#concept-urlencoded-serializer). +pub struct Serializer<'a, T: Target> { + target: Option, + start_position: usize, + encoding: EncodingOverride<'a>, +} + +pub trait Target { + fn as_mut_string(&mut self) -> &mut String; + fn finish(self) -> Self::Finished; + type Finished; +} + +impl Target for String { + fn as_mut_string(&mut self) -> &mut String { + self + } + fn finish(self) -> Self { + self + } + type Finished = Self; +} + +impl<'a> Target for &'a mut String { + fn as_mut_string(&mut self) -> &mut String { + &mut **self + } + fn finish(self) -> Self { + self + } + type Finished = Self; +} + +impl<'a, T: Target> Serializer<'a, T> { + /// Create a new `application/x-www-form-urlencoded` serializer for the given target. + /// + /// If the target is non-empty, + /// its content is assumed to already be in `application/x-www-form-urlencoded` syntax. + pub fn new(target: T) -> Self { + Self::for_suffix(target, 0) + } + + /// Create a new `application/x-www-form-urlencoded` serializer + /// for a suffix of the given target. + /// + /// If that suffix is non-empty, + /// its content is assumed to already be in `application/x-www-form-urlencoded` syntax. + pub fn for_suffix(mut target: T, start_position: usize) -> Self { + if target.as_mut_string().len() < start_position { + panic!( + "invalid length {} for target of length {}", + start_position, + target.as_mut_string().len() + ); + } + + Serializer { + target: Some(target), + start_position, + encoding: None, + } + } + + /// Remove any existing name/value pair. + /// + /// Panics if called after `.finish()`. + pub fn clear(&mut self) -> &mut Self { + string(&mut self.target).truncate(self.start_position); + self + } + + /// Set the character encoding to be used for names and values before percent-encoding. + pub fn encoding_override(&mut self, new: EncodingOverride<'a>) -> &mut Self { + self.encoding = new; + self + } + + /// Serialize and append a name/value pair. + /// + /// Panics if called after `.finish()`. + pub fn append_pair(&mut self, name: &str, value: &str) -> &mut Self { + append_pair( + string(&mut self.target), + self.start_position, + self.encoding, + name, + value, + ); + self + } + + /// Serialize and append a name of parameter without any value. + /// + /// Panics if called after `.finish()`. + pub fn append_key_only(&mut self, name: &str) -> &mut Self { + append_key_only( + string(&mut self.target), + self.start_position, + self.encoding, + name, + ); + self + } + + /// Serialize and append a number of name/value pairs. + /// + /// This simply calls `append_pair` repeatedly. + /// This can be more convenient, so the user doesn’t need to introduce a block + /// to limit the scope of `Serializer`’s borrow of its string. + /// + /// Panics if called after `.finish()`. + pub fn extend_pairs(&mut self, iter: I) -> &mut Self + where + I: IntoIterator, + I::Item: Borrow<(K, V)>, + K: AsRef, + V: AsRef, + { + { + let string = string(&mut self.target); + for pair in iter { + let &(ref k, ref v) = pair.borrow(); + append_pair( + string, + self.start_position, + self.encoding, + k.as_ref(), + v.as_ref(), + ); + } + } + self + } + + /// Serialize and append a number of names without values. + /// + /// This simply calls `append_key_only` repeatedly. + /// This can be more convenient, so the user doesn’t need to introduce a block + /// to limit the scope of `Serializer`’s borrow of its string. + /// + /// Panics if called after `.finish()`. + pub fn extend_keys_only(&mut self, iter: I) -> &mut Self + where + I: IntoIterator, + I::Item: Borrow, + K: AsRef, + { + { + let string = string(&mut self.target); + for key in iter { + let k = key.borrow().as_ref(); + append_key_only(string, self.start_position, self.encoding, k); + } + } + self + } + + /// If this serializer was constructed with a string, take and return that string. + /// + /// ```rust + /// use form_urlencoded; + /// let encoded: String = form_urlencoded::Serializer::new(String::new()) + /// .append_pair("foo", "bar & baz") + /// .append_pair("saison", "Été+hiver") + /// .finish(); + /// assert_eq!(encoded, "foo=bar+%26+baz&saison=%C3%89t%C3%A9%2Bhiver"); + /// ``` + /// + /// Panics if called more than once. + pub fn finish(&mut self) -> T::Finished { + self.target + .take() + .expect("url::form_urlencoded::Serializer double finish") + .finish() + } +} + +fn append_separator_if_needed(string: &mut String, start_position: usize) { + if string.len() > start_position { + string.push('&') + } +} + +fn string(target: &mut Option) -> &mut String { + target + .as_mut() + .expect("url::form_urlencoded::Serializer finished") + .as_mut_string() +} + +fn append_pair( + string: &mut String, + start_position: usize, + encoding: EncodingOverride<'_>, + name: &str, + value: &str, +) { + append_separator_if_needed(string, start_position); + append_encoded(name, string, encoding); + string.push('='); + append_encoded(value, string, encoding); +} + +fn append_key_only( + string: &mut String, + start_position: usize, + encoding: EncodingOverride, + name: &str, +) { + append_separator_if_needed(string, start_position); + append_encoded(name, string, encoding); +} + +fn append_encoded(s: &str, string: &mut String, encoding: EncodingOverride<'_>) { + string.extend(byte_serialize(&encode(encoding, s))) +} + +pub(crate) fn encode<'a>(encoding_override: EncodingOverride<'_>, input: &'a str) -> Cow<'a, [u8]> { + if let Some(o) = encoding_override { + return o(input); + } + input.as_bytes().into() +} + +pub(crate) fn decode_utf8_lossy(input: Cow<'_, [u8]>) -> Cow<'_, str> { + // Note: This function is duplicated in `percent_encoding/lib.rs`. + match input { + Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes), + Cow::Owned(bytes) => { + match String::from_utf8_lossy(&bytes) { + Cow::Borrowed(utf8) => { + // If from_utf8_lossy returns a Cow::Borrowed, then we can + // be sure our original bytes were valid UTF-8. This is because + // if the bytes were invalid UTF-8 from_utf8_lossy would have + // to allocate a new owned string to back the Cow so it could + // replace invalid bytes with a placeholder. + + // First we do a debug_assert to confirm our description above. + let raw_utf8: *const [u8]; + raw_utf8 = utf8.as_bytes(); + debug_assert!(raw_utf8 == &*bytes as *const [u8]); + + // Given we know the original input bytes are valid UTF-8, + // and we have ownership of those bytes, we re-use them and + // return a Cow::Owned here. + Cow::Owned(unsafe { String::from_utf8_unchecked(bytes) }) + } + Cow::Owned(s) => Cow::Owned(s), + } + } + } +} + +pub type EncodingOverride<'a> = Option<&'a dyn Fn(&str) -> Cow<'_, [u8]>>; diff --git a/third_party/rust/h2/.cargo-checksum.json b/third_party/rust/h2/.cargo-checksum.json index 56de41e18b7f..7e4fb8e0955b 100644 --- a/third_party/rust/h2/.cargo-checksum.json +++ b/third_party/rust/h2/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"c8d0fcbab309456259f7607712d0972230b425a3ec01aaf5d30cb7f8ac971ea7","CONTRIBUTING.md":"eff9610bd3a73e6c297b9b487a629bcdb40da9090e6e28c26e48fcfd3a899a6c","Cargo.lock":"f85d4a2bf96e76ec7d11dbae65da309ee0bc75b0f28298371da0661c4c6f7c1e","Cargo.toml":"422b404a2f8a77d5ce661538b9e9994fdef482166a407d6e93fd32423295700c","LICENSE":"b21623012e6c453d944b0342c515b631cfcbf30704c2621b291526b69c10724d","README.md":"5b720af15b5a4df034cd9a8c362ea69f499a80f952bdd983d33d5571c740bbae","examples/akamai.rs":"f8d310ba4ba0364f887746071f76f566fdffa3b0959050ec07c47afeeb8786d4","examples/client.rs":"5ad136b838e9d55ae3d1fd8801cec4af88139b58864d6438f75d0e173eb3aeb3","examples/server.rs":"254447655808eabf6ffd0718f011d445351d5c2399fe2d545b46bcddb725f2fb","src/client.rs":"f669d249560e4e757baef03d181913d0b4f82e2ae874bd45502f3a2db6b1b575","src/codec/error.rs":"408b7d394caf6fc4464d08f717cb6ed4a618b687197cebc47593ad878f5adec4","src/codec/framed_read.rs":"8a61555f9f74cf7956e9aab98be5463486f0cf071809db1ed0099607763d2554","src/codec/framed_write.rs":"cb2c2be5a90eeb13346541996ffd3e12e960e4ea6d8de73272d1771a634a5e1e","src/codec/mod.rs":"10ed96b6e187a86c827f066bb81b4047dbdff942f9cc7fab81bd37c989a8a9c1","src/error.rs":"445f810c0d9f3bdd2889792aac00d6af06363c0d8e5cb73a400ae39191cdefde","src/frame/data.rs":"dd34ef8f82345aaa1b2def2c170a317c83a56de4f37324a635251812aa15ed6e","src/frame/go_away.rs":"999d650e757897a798219bcc3a0ceb2d699d3b3ccf8402b9ed8c302e07d7c0be","src/frame/head.rs":"9cde126609db8ddd1e27b8212af3a613a1d59461166567ae1c97fcba7902f2fe","src/frame/headers.rs":"535dc0cbd7bddf0abe65db9e08c51f9da2db496bc4ca8382e30aa209002d39a3","src/frame/mod.rs":"f1baebdfff10c0f1d9937681b0f21df8631196c285aa860516235d49275ae90f","src/frame/ping.rs":"e28bdb103a8188a1e0470b3ade9e1cbb22720cc8bd773d44bf086efebb1947b2","src/frame/priority.rs":"9392b7aa2636157024dc645c92d0e450a4d3f7a53bc9de1188d3b61178c2b5fc","src/frame/reason.rs":"4337f5933bfd4064337c80d3c110f51514cbdfb97bc26f4980ee009e4f6fa773","src/frame/reset.rs":"1b9abe02a3479b7d527cb414ca534100f70325daec9c47a4ad84841c22b9bdb0","src/frame/settings.rs":"c4eb8bf15c8b505c8b5a04c63e80b75c7bd8653a888ba2a734cd3924444c47df","src/frame/stream_id.rs":"0aa72cc3d735aa31e4d0cca0a8b94bae75c97d041c3712fe8e49f687881a73fe","src/frame/util.rs":"1a1408ddefe35f9efe5faa5360cb5ecc461fc0846175d4b43031720da7f5188d","src/frame/window_update.rs":"e5ceee8953a97f1ede101212ccaf0473fa9f3adcb21e367acc3f14bfdbe51ac5","src/hpack/decoder.rs":"27469d21961fc2984b4be0cda1079964c81a4f49992f909ffae475e23048120c","src/hpack/encoder.rs":"9ca7fb55c0bdfd6d83697b960d05aa22808a54335c4a7aaf3cc26b3c788da814","src/hpack/header.rs":"d5b5ed925d4cf06b13a765d34f858004d1dd0003fd2e1d35d7927f86cf34f1dc","src/hpack/huffman/mod.rs":"04fc9b146177e7bf615156b8570fa0f97b89f68a2c02b946778711728b81e81f","src/hpack/huffman/table.rs":"6b7f94af0bb5d236d4e671eff4afe5dc254a20eaddd2d57dd6e8f53e2a60c337","src/hpack/mod.rs":"702a0b41ef5aa9e83683cec25363fb4f9c0f61c6697f9def9994967440fea378","src/hpack/table.rs":"d9f60aaf4c72e9eecaf70ebcb723a2bad8fcfb898ea9d2da1dffd6e5fa127f34","src/hpack/test/fixture.rs":"d451057b056e20ba7723ca7a395cba7819e69fc2ae788d9f9e676558f1fba6fb","src/hpack/test/fuzz.rs":"3db484a9b73597ac59af5135599e255771f3c2ef96e9457e2d513f8cd53ea18c","src/hpack/test/mod.rs":"56ad5643e7f1e273e5bce8a6fc0552be39c326dacfffd7f9757ccdbe75e9b66e","src/lib.rs":"fe2251378cf31e8b60f79671ddb5775cdea7eaf0158312f6d089ae406bccde3a","src/proto/connection.rs":"effd5744bf9b3ee9ce363cbbd0d71f5af2d1bb93dd885dce4b2b7d67e8ffd68d","src/proto/error.rs":"7486777b6d9f13c9df3e4a921196e6d16ae45922b4e34aa15282876198d1ebf7","src/proto/go_away.rs":"16fdecca841ce046960d29ca03a3dbf61886e4b7b9532217dde493ebcdc10477","src/proto/mod.rs":"010fba0601c69ae1ad58a7d3abdb9c0090ab3e0902f76e836be55780456dec8c","src/proto/peer.rs":"02e323c3aaad49cd7833c7bc775d3ec005dc9392152cae4db9d64c7c4e84285f","src/proto/ping_pong.rs":"fa173f89ae0e325bb01dbe1270cde1de735f674c34ffaabd2ae1eb1257cd457d","src/proto/settings.rs":"acdf7c2c91cae106f8413e1b7d0391af94426ef8a9aa87cca530023037308c7e","src/proto/streams/buffer.rs":"949ac62539b144095fcda2e00002fa5dba5e32e7671735de040ccaf091911233","src/proto/streams/counts.rs":"48eef4cf24d3f030f12632c0514f7dcd81e96c5975c656d79c15e16aff6a6314","src/proto/streams/flow_control.rs":"d07f1dee3d9da62b3e998042ff1ae86bf696d64f111a8843373b7153b270ab9f","src/proto/streams/mod.rs":"27ac7844762633fdf0413ef483342fe394d5d5f5246776f39f8791e0db17befb","src/proto/streams/prioritize.rs":"a0cd7c3ec651196f1aaade6b9568706a4ea2385c2e8e4e6fcf38576fe0796127","src/proto/streams/recv.rs":"f3818cf525155b191407472aa2bf2c3d53f427ca6ab9f85560f12b63fffd73ba","src/proto/streams/send.rs":"0d118b4efedda41b92d4d5223d229cb30aa4540cd4ac99cd33803462063e0eb1","src/proto/streams/state.rs":"d03ef61cbf512851d41110bf91b9c69a796fc4e244deac149115f6ab479cb846","src/proto/streams/store.rs":"51f7c3659c1395a9c440cde03a9e0b4b5ae9635663f799780c2edf3f567791e8","src/proto/streams/stream.rs":"a9d823bee1cce247eaffa90cf9ad7cbd90dba99b1a18823a5020ab836644f623","src/proto/streams/streams.rs":"8645a27ba71ff62942486a91f4477f1707e9a874271f6435c07230e2151fec22","src/server.rs":"c4ee723c0df38af6a9bb3de8ee55c572a467ba1617aabd5dee31b1ee9bde933e","src/share.rs":"c4cc610228d6d42d803b303b2988d8bfcb1fb0bfbc5b81c45ef168b02bfdd738"},"package":"79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff"} \ No newline at end of file +{"files":{"CHANGELOG.md":"e4c1822a1bdf3278c6a8a0b8750ab2c8af54a4a266cf8c22f20780f5fb6010d7","CONTRIBUTING.md":"eff9610bd3a73e6c297b9b487a629bcdb40da9090e6e28c26e48fcfd3a899a6c","Cargo.lock":"e8862f1cec21e0943ffa4ccd15e07c85a3deb861ec19bf68ec9e2f6dc0d4f6bf","Cargo.toml":"165fa6dda73f094b20380213688266a0b8240a56c143ed1fd0e0f2e2256d0879","LICENSE":"b21623012e6c453d944b0342c515b631cfcbf30704c2621b291526b69c10724d","README.md":"686a7e3e4b488fe813c4fb99b5082e00a168068cd80bdb29c496e8b68f1ce257","examples/akamai.rs":"3bb1308d0a0bf9d18b8761c5bd370fc473e2e94fa32ca85ec0152fa5224362db","examples/client.rs":"5ad136b838e9d55ae3d1fd8801cec4af88139b58864d6438f75d0e173eb3aeb3","examples/server.rs":"8cc7927c7fe98a2474c41ab0094ba2dc7a2ae6a7f58ce342a4c05bb8056a63a5","src/client.rs":"910e6857212f0eefe4abcc193e75229b9152234516a873cc713ad197612cd251","src/codec/error.rs":"e4b494d6234d8e44de22188540af5e3e0964bbaabc1d8339c7702d401c55afa2","src/codec/framed_read.rs":"c56c4698af65cc1909442a9b8bf9f746fe120dbeb73640b9efc7ec3f9a812c7f","src/codec/framed_write.rs":"d88bd4355286be8d0f054ad104ce3e49ffb41d44f187229aaddc8469c7f0d025","src/codec/mod.rs":"75b18c4dbbb7314823a00cab2843a3ca8f12343214541e23f294af6145718809","src/error.rs":"d6642e067169439f8773c071e957bff01d2a53cb9528789dae80a81c816810e5","src/ext.rs":"2fb968f754829fc50392b6ccb4b90cc4c8b88657a5e3a7259f53916e13dc1c91","src/frame/data.rs":"65fbfe306d525df7ac0ba229ca75ec3d142203ec62ddd3df0b8452da1496da3e","src/frame/go_away.rs":"a668f42887f775eeb40cdc53a6e64daf76661efb1dfc7218afaabb68f61b59ad","src/frame/head.rs":"4073eb70a8ed9ec0ca460e18fb470fca1dbe6900c1ac08d66397ab4d75414e4c","src/frame/headers.rs":"9924f8077b54ddd781677ba27492d9ac5e11ffe42988b27e926b152b4f0fe30d","src/frame/mod.rs":"8361e4a1754e50227f2e32bb85bb99a98797c2d9900f13e692e9f22e9bff9653","src/frame/ping.rs":"ff4e4059101300e7b03c23d271026b058da4315c3bd68280422e144c2aa1b9e6","src/frame/priority.rs":"9392b7aa2636157024dc645c92d0e450a4d3f7a53bc9de1188d3b61178c2b5fc","src/frame/reason.rs":"45b13202141b1d8b261d64624b276a9b034d8c3f7d9b6870ee41a204589f4e14","src/frame/reset.rs":"91c17a7391fcb516223fd0358f7770524023b33dd6489902ba23e47b8acc9a9e","src/frame/settings.rs":"5f7b4cc2ace3c5b6d1b629d57ba69d5e9abbcb44bad7ad9f5b8b5c6f891433f6","src/frame/stream_id.rs":"0aa72cc3d735aa31e4d0cca0a8b94bae75c97d041c3712fe8e49f687881a73fe","src/frame/util.rs":"1a1408ddefe35f9efe5faa5360cb5ecc461fc0846175d4b43031720da7f5188d","src/frame/window_update.rs":"05c1b84478208802d09154f5d6fb5eb886d45397f43ccc6ccbf40bf3be912819","src/fuzz_bridge.rs":"c27c716732c21a972a8bef43c00ee851a78de80259db62f0b6e793008ccf01a3","src/hpack/decoder.rs":"bb4f10992cb7f29f87c3fc460b22fc40c1c0fcafba7f0db335e5c28b4a84e475","src/hpack/encoder.rs":"5ffe9af12831c5f724a325db397f78274b6b5185c114146fe99aa76079e45ab4","src/hpack/header.rs":"e3283fdf7901f9b9b3d6b2945f5baa176638062d8bb5e294e98414565cdc4003","src/hpack/huffman/mod.rs":"fe4881780e8cd0181748a891102b2dd54c2060546d7648ec1b6435529bd0dcc2","src/hpack/huffman/table.rs":"6b7f94af0bb5d236d4e671eff4afe5dc254a20eaddd2d57dd6e8f53e2a60c337","src/hpack/mod.rs":"581033d44fd5525e9ccb546549a99f8357ab8f55d58b490d9980fb36323c5dbc","src/hpack/table.rs":"12e553873564874108b9b759424ccfc76968aee37e0819d402ae62492f0aec55","src/hpack/test/fixture.rs":"3cd07043cde78f2ea49c53040857faea8c7fd9bdf3a441b3bdf655d7b2a87876","src/hpack/test/fuzz.rs":"ad020a59f203d43d94c7eb602fa1c51a85b2cfe152629e68914d74bde1ac422c","src/hpack/test/mod.rs":"56ad5643e7f1e273e5bce8a6fc0552be39c326dacfffd7f9757ccdbe75e9b66e","src/lib.rs":"0fea926441cf4c5f8aff4981afd20a40932dc478c3365e77d8ec86b5965afc7b","src/proto/connection.rs":"5f2a8e271788f6e1fbf3ab913daf9c2f1b4c9ae136b13fd3adbd90a023e0e2f0","src/proto/error.rs":"7cdf1fb33860af4fdc29ce793cc296b51af52da78649535e71662a224c3b4aa7","src/proto/go_away.rs":"d07d1ce28c1486e1ced968e1ab3bd89c22fe813f1497c8b94ee0c0dd78a1688f","src/proto/mod.rs":"6d71faa7ceab9951e0ac6d036e9ac682aba68446c52e491729461330faf7190e","src/proto/peer.rs":"e26317f7d0a27441e15bf8b4ca4af3e4cca0ea01803027078a088eafb83e0910","src/proto/ping_pong.rs":"eb4757f4ba7e4f323d38724e1a09476c29efda01c5606af8e1b6e91942af45e1","src/proto/settings.rs":"d937cfb44d952fe8034b67be603147360540e21654404862d18758fd80c51d22","src/proto/streams/buffer.rs":"cf2205c607f8a6b8aa8662983d9907fedeb14b5890e051d8e63d7bc2b0a960e9","src/proto/streams/counts.rs":"46e9e574d1c804b0b3a426c393a81867d8a10f553859b4964dbfd9ef9a44ee94","src/proto/streams/flow_control.rs":"e85027bcf070e4aee3d2a69bb89d3dc5823c88a358975318bc2d02abb38a1377","src/proto/streams/mod.rs":"8a9fa234199d55a5412568ebad81ec499472ef50875fa6193d1dfa502f448a01","src/proto/streams/prioritize.rs":"6af528a7aa887d6bde7ded2f06de9863e42ba19717f8f65307eba3a29f26e4a1","src/proto/streams/recv.rs":"179b1d776834e165e71e871ed1f291c8b576aa94d6c5863a352aa9e0ef277d63","src/proto/streams/send.rs":"75d336dfb6182fb80d3d05377a037dd96f2fd7eba48569550d868d5e7f7b498f","src/proto/streams/state.rs":"56f33a8f8b2f26a16add34a5cc6f60af7106ae881d33040b2b5e17b576e37f1a","src/proto/streams/store.rs":"79f166c9a5cd5dda169dab16b5994ecaa39a741033f522fe0e049229a3901494","src/proto/streams/stream.rs":"bb44879933b049d1b767a0e232728effc828e4fba1a487a086d94831e97e6a36","src/proto/streams/streams.rs":"4dee91aa2e059f4e13570e9e5280a31e35987ba10c8122869aa42642ce42a0b9","src/server.rs":"96156b002ddcaa145b1f0b71009d7def20ed834202e29075b7dda393b3740f2b","src/share.rs":"d53c93a0ed2d7f026d5bacb845dddc1a272aee0181a7660f383dd890a7089341"},"package":"37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57"} \ No newline at end of file diff --git a/third_party/rust/h2/CHANGELOG.md b/third_party/rust/h2/CHANGELOG.md index 162e47541070..7b00632f3193 100644 --- a/third_party/rust/h2/CHANGELOG.md +++ b/third_party/rust/h2/CHANGELOG.md @@ -1,3 +1,90 @@ +# 0.3.13 (March 31, 2022) + +* Update private internal `tokio-util` dependency. + +# 0.3.12 (March 9, 2022) + +* Avoid time operations that can panic (#599) +* Bump MSRV to Rust 1.49 (#606) +* Fix header decoding error when a header name is contained at a continuation + header boundary (#589) +* Remove I/O type names from handshake `tracing` spans (#608) + +# 0.3.11 (January 26, 2022) + +* Make `SendStream::poll_capacity` never return `Ok(Some(0))` (#596) +* Fix panic when receiving already reset push promise (#597) + +# 0.3.10 (January 6, 2022) + +* Add `Error::is_go_away()` and `Error::is_remote()` methods. +* Fix panic if receiving malformed PUSH_PROMISE with stream ID of 0. + +# 0.3.9 (December 9, 2021) + +* Fix hang related to new `max_send_buffer_size`. + +# 0.3.8 (December 8, 2021) + +* Add "extended CONNECT support". Adds `h2::ext::Protocol`, which is used for request and response extensions to connect new protocols over an HTTP/2 stream. +* Add `max_send_buffer_size` options to client and server builders, and a default of ~400MB. This acts like a high-water mark for the `poll_capacity()` method. +* Fix panic if receiving malformed HEADERS with stream ID of 0. + +# 0.3.7 (October 22, 2021) + +* Fix panic if server sends a malformed frame on a stream client was about to open. +* Fix server to treat `:status` in a request as a stream error instead of connection error. + +# 0.3.6 (September 30, 2021) + +* Fix regression of `h2::Error` that were created via `From` not returning their reason code in `Error::reason()`. + +# 0.3.5 (September 29, 2021) + +* Fix sending of very large headers. Previously when a single header was too big to fit in a single `HEADERS` frame, an error was returned. Now it is broken up and sent correctly. +* Fix buffered data field to be a bigger integer size. +* Refactor error format to include what initiated the error (remote, local, or user), if it was a stream or connection-level error, and any received debug data. + +# 0.3.4 (August 20, 2021) + +* Fix panic when encoding header size update over a certain size. +* Fix `SendRequest` to wake up connection when dropped. +* Fix potential hang if `RecvStream` is placed in the request or response `extensions`. +* Stop calling `Instant::now` if zero reset streams are configured. + +# 0.3.3 (April 29, 2021) + +* Fix client being able to make `CONNECT` requests without a `:path`. +* Expose `RecvStream::poll_data`. +* Fix some docs. + +# 0.3.2 (March 24, 2021) + +* Fix incorrect handling of received 1xx responses on the client when the request body is still streaming. + +# 0.3.1 (February 26, 2021) + +* Add `Connection::max_concurrent_recv_streams()` getter. +* Add `Connection::max_concurrent_send_streams()` getter. +* Fix client to ignore receipt of 1xx headers frames. +* Fix incorrect calculation of pseudo header lengths when determining if a received header is too big. +* Reduce monomorphized code size of internal code. + +# 0.3.0 (December 23, 2020) + +* Update to Tokio v1 and Bytes v1. +* Disable `tracing`'s `log` feature. (It can still be enabled by a user in their own `Cargo.toml`.) + +# 0.2.7 (October 22, 2020) + +* Fix stream ref count when sending a push promise +* Fix receiving empty DATA frames in response to a HEAD request +* Fix handling of client disabling SERVER_PUSH + +# 0.2.6 (July 13, 2020) + +* Integrate `tracing` directly where `log` was used. (For 0.2.x, `log`s are still emitted by default.) + # 0.2.5 (May 6, 2020) * Fix rare debug assert failure in store shutdown. diff --git a/third_party/rust/h2/Cargo.lock b/third_party/rust/h2/Cargo.lock index c0c765499001..220ebabf0251 100644 --- a/third_party/rust/h2/Cargo.lock +++ b/third_party/rust/h2/Cargo.lock @@ -1,717 +1,642 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] +version = 3 [[package]] name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "bitflags" -version = "1.2.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "bumpalo" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byteorder" -version = "1.3.4" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" [[package]] name = "bytes" -version = "0.5.4" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cc" -version = "1.0.50" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" [[package]] name = "cfg-if" -version = "0.1.10" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "env_logger" -version = "0.5.13" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" dependencies = [ - "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log", ] [[package]] name = "fnv" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "futures-core" -version = "0.3.4" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af51b1b4a7fdff033703db39de8802c673eb91855f2e0d47dcf3bf2c0ef01f99" [[package]] name = "futures-sink" -version = "0.3.4" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0f30aaa67363d119812743aa5f33c201a7a66329f97d1a887022971feea4b53" [[package]] name = "futures-task" -version = "0.3.4" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbe54a98670017f3be909561f6ad13e810d9a51f3f061b902062ca3da80799f2" [[package]] name = "futures-util" -version = "0.3.4" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eb846bfd58e44a8481a00049e82c43e0ccb5d61f8dc071057cb19249dd4d78" dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "getrandom" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +dependencies = [ + "cfg-if", + "libc", + "wasi", ] [[package]] name = "h2" -version = "0.2.5" +version = "0.3.13" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quickcheck 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-rustls 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "env_logger", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "hex", + "http", + "indexmap", + "quickcheck", + "rand", + "serde", + "serde_json", + "slab", + "tokio", + "tokio-rustls", + "tokio-util", + "tracing", + "walkdir", + "webpki-roots", ] [[package]] -name = "hermit-abi" -version = "0.1.8" +name = "hashbrown" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "hex" -version = "0.2.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "fnv", + "itoa", ] [[package]] name = "indexmap" -version = "1.3.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ - "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "hashbrown", ] [[package]] name = "itoa" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "js-sys" -version = "0.3.36" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4bf49d50e2961077d9c99f4b7997d770a1114f087c3c2e0069b36c13fc2979d" dependencies = [ - "wasm-bindgen 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.68" +version = "0.2.121" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" [[package]] name = "log" -version = "0.4.8" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", ] [[package]] name = "memchr" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "mio" -version = "0.6.21" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "log", + "miow", + "ntapi", + "winapi", ] [[package]] name = "miow" -version = "0.2.1" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] -name = "net2" -version = "0.2.33" +name = "ntapi" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" + [[package]] name = "pin-project-lite" -version = "0.1.4" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" -version = "0.1.0-alpha.4" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "ppv-lite86" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro2" -version = "1.0.9" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "quickcheck" -version = "0.4.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", + "rand", ] [[package]] name = "quote" -version = "1.0.3" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", ] [[package]] name = "rand" -version = "0.3.23" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "rand_chacha", + "rand_core", ] [[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" +name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86", + "rand_core", ] [[package]] name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rdrand" -version = "0.4.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", ] [[package]] name = "ring" -version = "0.16.11" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ - "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.36 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", ] [[package]] name = "rustls" -version = "0.16.0" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", - "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log", + "ring", + "sct", + "webpki", ] [[package]] name = "ryu" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" [[package]] name = "same-file" -version = "0.1.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util", ] [[package]] name = "sct" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ring", + "untrusted", ] [[package]] name = "serde" -version = "1.0.105" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f03b9878abf6d14e6779d3f24f07b2cfa90352cfec4acc5aab8f1ac7f146fae8" [[package]] name = "serde_json" -version = "1.0.48" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" dependencies = [ - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa", + "ryu", + "serde", ] [[package]] name = "slab" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "syn" -version = "1.0.17" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "termcolor" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] name = "tokio" -version = "0.2.13" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cf844b23c6131f624accf65ce0e4e9956a8bb329400ea5bcc26ae3a5c20b0b" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-macros 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "pin-project-lite", + "tokio-macros", + "winapi", ] [[package]] name = "tokio-macros" -version = "0.2.5" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "tokio-rustls" -version = "0.12.2" +version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls", + "tokio", + "webpki", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "tracing" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +dependencies = [ + "cfg-if", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ca517f43f0fb96e0c3072ed5c275fe5eece87e8cb52f4a77b69226d3b1c9df8" +dependencies = [ + "lazy_static", ] [[package]] name = "unicode-xid" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "untrusted" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "walkdir" -version = "1.0.7" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file", + "winapi", + "winapi-util", ] [[package]] -name = "wasm-bindgen" -version = "0.2.59" +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "wasm-bindgen" +version = "0.2.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce9b1b516211d33767048e5d47fa2a381ed8b76fc48d2ce4aa39877f9f183e0" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.59" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe8dc78e2326ba5f845f4b5bf548401604fa20b1dd1d365fb73b6c1d6364041" dependencies = [ - "bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.59" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44468aa53335841d9d6b6c023eaab07c0cd4bddbcfdee3e2bb1e8d2cb8069fef" dependencies = [ - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro-support 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", + "quote", + "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.59" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0195807922713af1e67dc66132c7328206ed9766af3858164fb583eedc25fbad" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.59" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdb075a845574a1fa5f09fd77e43f7747599301ea3417a9fbffdeedfc1f4a29" [[package]] name = "web-sys" -version = "0.3.36" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224b2f6b67919060055ef1a67807367c2066ed520c3862cc013d26cf893a783c" dependencies = [ - "js-sys 0.3.36 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys", + "wasm-bindgen", ] [[package]] name = "webpki" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)", - "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ring", + "untrusted", ] [[package]] name = "webpki-roots" -version = "0.17.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" dependencies = [ - "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki", ] [[package]] name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[metadata] -"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" -"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" -"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" -"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)" = "15b0a4d2e39f8420210be8b27eeda28029729e2fd4291019455016c348240c38" -"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" -"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" -"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" -"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" -"checksum hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" -"checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" -"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" -"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -"checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" -"checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" -"checksum js-sys 0.3.36 (registry+https://github.com/rust-lang/crates.io-index)" = "1cb931d43e71f560c81badb0191596562bafad2be06a3f9025b845c847c60df5" -"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" -"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" -"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" -"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" -"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" -"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" -"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -"checksum quickcheck 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "02c2411d418cea2364325b18a205664f9ef8252e06b2e911db97c0b0d98b1406" -"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" -"checksum rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" -"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -"checksum ring 0.16.11 (registry+https://github.com/rust-lang/crates.io-index)" = "741ba1704ae21999c00942f9f5944f801e977f54302af346b596287599ad1862" -"checksum rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" -"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" -"checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7" -"checksum sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -"checksum serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)" = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff" -"checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" -"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -"checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" -"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" -"checksum tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "0fa5e81d6bc4e67fe889d5783bd2a128ab2e0cfa487e0be16b6a8d177b101616" -"checksum tokio-macros 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" -"checksum tokio-rustls 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)" = "141afec0978abae6573065a48882c6bae44c5cc61db9b511ac4abf6a09bfd9cc" -"checksum tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" -"checksum walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bb08f9e670fab86099470b97cd2b252d6527f0b3cc1401acdb595ffc9dd288ff" -"checksum wasm-bindgen 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)" = "3557c397ab5a8e347d434782bcd31fc1483d927a6826804cec05cc792ee2519d" -"checksum wasm-bindgen-backend 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)" = "e0da9c9a19850d3af6df1cb9574970b566d617ecfaf36eb0b706b6f3ef9bd2f8" -"checksum wasm-bindgen-macro 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)" = "0f6fde1d36e75a714b5fe0cffbb78978f222ea6baebb726af13c78869fdb4205" -"checksum wasm-bindgen-macro-support 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)" = "25bda4168030a6412ea8a047e27238cadf56f0e53516e1e83fec0a8b7c786f6d" -"checksum wasm-bindgen-shared 0.2.59 (registry+https://github.com/rust-lang/crates.io-index)" = "fc9f36ad51f25b0219a3d4d13b90eb44cd075dff8b6280cca015775d7acaddd8" -"checksum web-sys 0.3.36 (registry+https://github.com/rust-lang/crates.io-index)" = "721c6263e2c66fd44501cc5efbfa2b7dfa775d13e4ea38c46299646ed1f9c70a" -"checksum webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" -"checksum webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a262ae37dd9d60f60dd473d1158f9fbebf110ba7b6a5051c8160460f6043718b" -"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/h2/Cargo.toml b/third_party/rust/h2/Cargo.toml index 2b9f0af95301..bd178fde4d08 100644 --- a/third_party/rust/h2/Cargo.toml +++ b/third_party/rust/h2/Cargo.toml @@ -3,28 +3,45 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "h2" -version = "0.2.5" -authors = ["Carl Lerche ", "Sean McArthur "] -exclude = ["fixtures/**", "ci/**"] -description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.2.5/h2/" +version = "0.3.13" +authors = [ + "Carl Lerche ", + "Sean McArthur ", +] +exclude = [ + "fixtures/**", + "ci/**", +] +description = "An HTTP/2 client and server" +documentation = "https://docs.rs/h2" readme = "README.md" -keywords = ["http", "async", "non-blocking"] -categories = ["asynchronous", "web-programming", "network-programming"] +keywords = [ + "http", + "async", + "non-blocking", +] +categories = [ + "asynchronous", + "web-programming", + "network-programming", +] license = "MIT" repository = "https://github.com/hyperium/h2" + +[package.metadata.docs.rs] +features = ["stream"] + [dependencies.bytes] -version = "0.5.2" +version = "1" [dependencies.fnv] version = "1.0.5" @@ -45,37 +62,38 @@ default-features = false version = "0.2" [dependencies.indexmap] -version = "1.0" - -[dependencies.log] -version = "0.4.1" +version = "1.5.2" +features = ["std"] [dependencies.slab] -version = "0.4.0" +version = "0.4.2" [dependencies.tokio] -version = "0.2" +version = "1" features = ["io-util"] [dependencies.tokio-util] -version = "0.3.1" +version = "0.7.1" features = ["codec"] + +[dependencies.tracing] +version = "0.1.21" +features = ["std"] +default-features = false + [dev-dependencies.env_logger] -version = "0.5.3" +version = "0.9" default-features = false [dev-dependencies.hex] -version = "0.2.0" +version = "0.4.3" [dev-dependencies.quickcheck] -version = "0.4.1" +version = "1.0.3" default-features = false [dev-dependencies.rand] -version = "0.3.15" - -[dev-dependencies.rustls] -version = "0.16" +version = "0.8.4" [dev-dependencies.serde] version = "1.0.0" @@ -84,20 +102,22 @@ version = "1.0.0" version = "1.0.0" [dev-dependencies.tokio] -version = "0.2" -features = ["dns", "macros", "rt-core", "sync", "tcp"] +version = "1" +features = [ + "rt-multi-thread", + "macros", + "sync", + "net", +] [dev-dependencies.tokio-rustls] -version = "0.12.0" +version = "0.23.2" [dev-dependencies.walkdir] -version = "1.0.0" - -[dev-dependencies.webpki] -version = "0.21" +version = "2.3.2" [dev-dependencies.webpki-roots] -version = "0.17" +version = "0.22.2" [features] stream = [] diff --git a/third_party/rust/h2/README.md b/third_party/rust/h2/README.md index 21f00a500fb7..2e15999149ff 100644 --- a/third_party/rust/h2/README.md +++ b/third_party/rust/h2/README.md @@ -1,6 +1,6 @@ # H2 -A Tokio aware, HTTP/2.0 client & server implementation for Rust. +A Tokio aware, HTTP/2 client & server implementation for Rust. [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Crates.io](https://img.shields.io/crates/v/h2.svg)](https://crates.io/crates/h2) @@ -12,24 +12,23 @@ More information about this crate can be found in the [crate documentation][dox] ## Features -* Client and server HTTP/2.0 implementation. -* Implements the full HTTP/2.0 specification. +* Client and server HTTP/2 implementation. +* Implements the full HTTP/2 specification. * Passes [h2spec](https://github.com/summerwind/h2spec). * Focus on performance and correctness. * Built on [Tokio](https://tokio.rs). ## Non goals -This crate is intended to only be an implementation of the HTTP/2.0 +This crate is intended to only be an implementation of the HTTP/2 specification. It does not handle: * Managing TCP connections * HTTP 1.0 upgrade * TLS -* Any feature not described by the HTTP/2.0 specification. +* Any feature not described by the HTTP/2 specification. -The intent is that this crate will eventually be used by -[hyper](https://github.com/hyperium/hyper), which will provide all of these features. +This crate is now used by [hyper](https://github.com/hyperium/hyper), which will provide all of these features. ## Usage @@ -37,7 +36,7 @@ To use `h2`, first add this to your `Cargo.toml`: ```toml [dependencies] -h2 = "0.2" +h2 = "0.3" ``` Next, add this to your crate: @@ -56,7 +55,7 @@ fn main() { **How does h2 compare to [solicit] or [rust-http2]?** -The h2 library has implemented more of the details of the HTTP/2.0 specification +The h2 library has implemented more of the details of the HTTP/2 specification than any other Rust library. It also passes the [h2spec] set of tests. The h2 library is rapidly approaching "production ready" quality. diff --git a/third_party/rust/h2/examples/akamai.rs b/third_party/rust/h2/examples/akamai.rs index 29d8a9347408..e522b37ff239 100644 --- a/third_party/rust/h2/examples/akamai.rs +++ b/third_party/rust/h2/examples/akamai.rs @@ -3,9 +3,9 @@ use http::{Method, Request}; use tokio::net::TcpStream; use tokio_rustls::TlsConnector; -use rustls::Session; -use webpki::DNSNameRef; +use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore, ServerName}; +use std::convert::TryFrom; use std::error::Error; use std::net::ToSocketAddrs; @@ -16,9 +16,19 @@ pub async fn main() -> Result<(), Box> { let _ = env_logger::try_init(); let tls_client_config = std::sync::Arc::new({ - let mut c = rustls::ClientConfig::new(); - c.root_store - .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); + let mut root_store = RootCertStore::empty(); + root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| { + OwnedTrustAnchor::from_subject_spki_name_constraints( + ta.subject, + ta.spki, + ta.name_constraints, + ) + })); + + let mut c = tokio_rustls::rustls::ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(root_store) + .with_no_client_auth(); c.alpn_protocols.push(ALPN_H2.as_bytes().to_owned()); c }); @@ -33,13 +43,13 @@ pub async fn main() -> Result<(), Box> { println!("ADDR: {:?}", addr); let tcp = TcpStream::connect(&addr).await?; - let dns_name = DNSNameRef::try_from_ascii_str("http2.akamai.com").unwrap(); + let dns_name = ServerName::try_from("http2.akamai.com").unwrap(); let connector = TlsConnector::from(tls_client_config); let res = connector.connect(dns_name, tcp).await; let tls = res.unwrap(); { let (_, session) = tls.get_ref(); - let negotiated_protocol = session.get_alpn_protocol(); + let negotiated_protocol = session.alpn_protocol(); assert_eq!( Some(ALPN_H2.as_bytes()), negotiated_protocol.as_ref().map(|x| &**x) diff --git a/third_party/rust/h2/examples/server.rs b/third_party/rust/h2/examples/server.rs index 1753b7a2e8fa..6d6490db0836 100644 --- a/third_party/rust/h2/examples/server.rs +++ b/third_party/rust/h2/examples/server.rs @@ -1,21 +1,23 @@ use std::error::Error; use bytes::Bytes; -use h2::server; +use h2::server::{self, SendResponse}; +use h2::RecvStream; +use http::Request; use tokio::net::{TcpListener, TcpStream}; #[tokio::main] async fn main() -> Result<(), Box> { let _ = env_logger::try_init(); - let mut listener = TcpListener::bind("127.0.0.1:5928").await?; + let listener = TcpListener::bind("127.0.0.1:5928").await?; println!("listening on {:?}", listener.local_addr()); loop { if let Ok((socket, _peer_addr)) = listener.accept().await { tokio::spawn(async move { - if let Err(e) = handle(socket).await { + if let Err(e) = serve(socket).await { println!(" -> err={:?}", e); } }); @@ -23,22 +25,41 @@ async fn main() -> Result<(), Box> { } } -async fn handle(socket: TcpStream) -> Result<(), Box> { +async fn serve(socket: TcpStream) -> Result<(), Box> { let mut connection = server::handshake(socket).await?; println!("H2 connection bound"); while let Some(result) = connection.accept().await { - let (request, mut respond) = result?; - println!("GOT request: {:?}", request); - let response = http::Response::new(()); - - let mut send = respond.send_response(response, false)?; - - println!(">>>> sending data"); - send.send_data(Bytes::from_static(b"hello world"), true)?; + let (request, respond) = result?; + tokio::spawn(async move { + if let Err(e) = handle_request(request, respond).await { + println!("error while handling request: {}", e); + } + }); } - println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~"); + println!("~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~"); + Ok(()) +} + +async fn handle_request( + mut request: Request, + mut respond: SendResponse, +) -> Result<(), Box> { + println!("GOT request: {:?}", request); + + let body = request.body_mut(); + while let Some(data) = body.data().await { + let data = data?; + println!("<<<< recv {:?}", data); + let _ = body.flow_control().release_capacity(data.len()); + } + + let response = http::Response::new(()); + let mut send = respond.send_response(response, false)?; + println!(">>>> send"); + send.send_data(Bytes::from_static(b"hello "), false)?; + send.send_data(Bytes::from_static(b"world\n"), true)?; Ok(()) } diff --git a/third_party/rust/h2/src/client.rs b/third_party/rust/h2/src/client.rs index 63514e322378..e75cd3507761 100644 --- a/third_party/rust/h2/src/client.rs +++ b/third_party/rust/h2/src/client.rs @@ -1,18 +1,18 @@ -//! Client implementation of the HTTP/2.0 protocol. +//! Client implementation of the HTTP/2 protocol. //! //! # Getting started //! -//! Running an HTTP/2.0 client requires the caller to establish the underlying +//! Running an HTTP/2 client requires the caller to establish the underlying //! connection as well as get the connection to a state that is ready to begin -//! the HTTP/2.0 handshake. See [here](../index.html#handshake) for more +//! the HTTP/2 handshake. See [here](../index.html#handshake) for more //! details. //! //! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote //! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades. //! //! Once a connection is obtained, it is passed to [`handshake`], which will -//! begin the [HTTP/2.0 handshake]. This returns a future that completes once -//! the handshake process is performed and HTTP/2.0 streams may be initialized. +//! begin the [HTTP/2 handshake]. This returns a future that completes once +//! the handshake process is performed and HTTP/2 streams may be initialized. //! //! [`handshake`] uses default configuration values. There are a number of //! settings that can be changed by using [`Builder`] instead. @@ -26,16 +26,16 @@ //! # Making requests //! //! Requests are made using the [`SendRequest`] handle provided by the handshake -//! future. Once a request is submitted, an HTTP/2.0 stream is initialized and +//! future. Once a request is submitted, an HTTP/2 stream is initialized and //! the request is sent to the server. //! //! A request body and request trailers are sent using [`SendRequest`] and the //! server's response is returned once the [`ResponseFuture`] future completes. //! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by -//! [`SendRequest::send_request`] and are tied to the HTTP/2.0 stream +//! [`SendRequest::send_request`] and are tied to the HTTP/2 stream //! initialized by the sent request. //! -//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2.0 +//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2 //! stream can be created, i.e. as long as the current number of active streams //! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the //! caller will be notified once an existing stream closes, freeing capacity for @@ -131,13 +131,14 @@ //! [`SendRequest`]: struct.SendRequest.html //! [`ResponseFuture`]: struct.ResponseFuture.html //! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready -//! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader //! [`Builder`]: struct.Builder.html //! [`Error`]: ../struct.Error.html -use crate::codec::{Codec, RecvError, SendError, UserError}; +use crate::codec::{Codec, SendError, UserError}; +use crate::ext::Protocol; use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId}; -use crate::proto; +use crate::proto::{self, Error}; use crate::{FlowControl, PingPong, RecvStream, SendStream}; use bytes::{Buf, Bytes}; @@ -149,8 +150,9 @@ use std::task::{Context, Poll}; use std::time::Duration; use std::usize; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use tracing::Instrument; -/// Initializes new HTTP/2.0 streams on a connection by sending a request. +/// Initializes new HTTP/2 streams on a connection by sending a request. /// /// This type does no work itself. Instead, it is a handle to the inner /// connection state held by [`Connection`]. If the associated connection @@ -160,7 +162,7 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; /// / threads than their associated [`Connection`] instance. Internally, there /// is a buffer used to stage requests before they get written to the /// connection. There is no guarantee that requests get written to the -/// connection in FIFO order as HTTP/2.0 prioritization logic can play a role. +/// connection in FIFO order as HTTP/2 prioritization logic can play a role. /// /// [`SendRequest`] implements [`Clone`], enabling the creation of many /// instances that are backed by a single connection. @@ -183,10 +185,10 @@ pub struct ReadySendRequest { inner: Option>, } -/// Manages all state associated with an HTTP/2.0 client connection. +/// Manages all state associated with an HTTP/2 client connection. /// /// A `Connection` is backed by an I/O resource (usually a TCP socket) and -/// implements the HTTP/2.0 client logic for that connection. It is responsible +/// implements the HTTP/2 client logic for that connection. It is responsible /// for driving the internal state forward, performing the work requested of the /// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`], /// [`RecvStream`]). @@ -219,7 +221,7 @@ pub struct ReadySendRequest { /// // Submit the connection handle to an executor. /// tokio::spawn(async { connection.await.expect("connection failed"); }); /// -/// // Now, use `send_request` to initialize HTTP/2.0 streams. +/// // Now, use `send_request` to initialize HTTP/2 streams. /// // ... /// # Ok(()) /// # } @@ -273,7 +275,7 @@ pub struct PushPromises { /// Methods can be chained in order to set the configuration values. /// /// The client is constructed by calling [`handshake`] and passing the I/O -/// handle that will back the HTTP/2.0 server. +/// handle that will back the HTTP/2 server. /// /// New instances of `Builder` are obtained via [`Builder::new`]. /// @@ -293,7 +295,7 @@ pub struct PushPromises { /// # async fn doc(my_io: T) /// -> Result<((SendRequest, Connection)), h2::Error> /// # { -/// // `client_fut` is a future representing the completion of the HTTP/2.0 +/// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -318,6 +320,9 @@ pub struct Builder { /// Initial target window size for new connections. initial_target_connection_window_size: Option, + /// Maximum amount of bytes to "buffer" for writing per stream. + max_send_buffer_size: usize, + /// Maximum number of locally reset streams to keep at a time. reset_stream_max: usize, @@ -338,7 +343,7 @@ impl SendRequest where B: Buf + 'static, { - /// Returns `Ready` when the connection can initialize a new HTTP/2.0 + /// Returns `Ready` when the connection can initialize a new HTTP/2 /// stream. /// /// This function must return `Ready` before `send_request` is called. When @@ -386,16 +391,16 @@ where ReadySendRequest { inner: Some(self) } } - /// Sends a HTTP/2.0 request to the server. + /// Sends a HTTP/2 request to the server. /// - /// `send_request` initializes a new HTTP/2.0 stream on the associated + /// `send_request` initializes a new HTTP/2 stream on the associated /// connection, then sends the given request using this new stream. Only the /// request head is sent. /// /// On success, a [`ResponseFuture`] instance and [`SendStream`] instance /// are returned. The [`ResponseFuture`] instance is used to get the /// server's response and the [`SendStream`] instance is used to send a - /// request body or trailers to the server over the same HTTP/2.0 stream. + /// request body or trailers to the server over the same HTTP/2 stream. /// /// To send a request body or trailers, set `end_of_stream` to `false`. /// Then, use the returned [`SendStream`] instance to stream request body @@ -516,6 +521,19 @@ where (response, stream) }) } + + /// Returns whether the [extended CONNECT protocol][1] is enabled or not. + /// + /// This setting is configured by the server peer by sending the + /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value recieved from the + /// remote. + /// + /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 + pub fn is_extended_connect_protocol_enabled(&self) -> bool { + self.inner.is_extended_connect_protocol_enabled() + } } impl fmt::Debug for SendRequest @@ -600,7 +618,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -613,6 +631,7 @@ impl Builder { /// ``` pub fn new() -> Builder { Builder { + max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, initial_target_connection_window_size: None, @@ -642,7 +661,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -677,7 +696,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_connection_window_size(1_000_000) @@ -692,7 +711,7 @@ impl Builder { self } - /// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the + /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the /// configured client is able to accept. /// /// The sender may send data frames that are **smaller** than this value, @@ -711,7 +730,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .max_frame_size(1_000_000) @@ -751,7 +770,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .max_header_list_size(16 * 1024) @@ -786,7 +805,7 @@ impl Builder { /// a protocol level error. Instead, the `h2` library will immediately reset /// the stream. /// - /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// See [Section 5.1.2] in the HTTP/2 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// @@ -800,7 +819,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .max_concurrent_streams(1000) @@ -827,7 +846,7 @@ impl Builder { /// Sending streams past the limit returned by the peer will be treated /// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM. /// - /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// See [Section 5.1.2] in the HTTP/2 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// @@ -841,7 +860,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_max_send_streams(1000) @@ -858,7 +877,7 @@ impl Builder { /// Sets the maximum number of concurrent locally reset streams. /// - /// When a stream is explicitly reset, the HTTP/2.0 specification requires + /// When a stream is explicitly reset, the HTTP/2 specification requires /// that any further frames received for that stream must be ignored for /// "some time". /// @@ -886,7 +905,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .max_concurrent_reset_streams(1000) @@ -903,7 +922,7 @@ impl Builder { /// Sets the duration to remember locally reset streams. /// - /// When a stream is explicitly reset, the HTTP/2.0 specification requires + /// When a stream is explicitly reset, the HTTP/2 specification requires /// that any further frames received for that stream must be ignored for /// "some time". /// @@ -932,7 +951,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .reset_stream_duration(Duration::from_secs(10)) @@ -947,6 +966,24 @@ impl Builder { self } + /// Sets the maximum send buffer size per stream. + /// + /// Once a stream has buffered up to (or over) the maximum, the stream's + /// flow control will not "poll" additional capacity. Once bytes for the + /// stream have been written to the connection, the send buffer capacity + /// will be freed up again. + /// + /// The default is currently ~400MB, but may change. + /// + /// # Panics + /// + /// This function panics if `max` is larger than `u32::MAX`. + pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.max_send_buffer_size = max; + self + } + /// Enables or disables server push promises. /// /// This value is included in the initial SETTINGS handshake. When set, the @@ -954,7 +991,7 @@ impl Builder { /// false in the initial SETTINGS handshake guarantees that the remote server /// will never send a push promise. /// - /// This setting can be changed during the life of a single HTTP/2.0 + /// This setting can be changed during the life of a single HTTP/2 /// connection by sending another settings frame updating the value. /// /// Default value: `true`. @@ -970,7 +1007,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .enable_push(false) @@ -996,22 +1033,22 @@ impl Builder { self } - /// Creates a new configured HTTP/2.0 client backed by `io`. + /// Creates a new configured HTTP/2 client backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence - /// the [HTTP/2.0 handshake]. The handshake is completed once both the connection + /// the [HTTP/2 handshake]. The handshake is completed once both the connection /// preface and the initial settings frame is sent by the client. /// /// The handshake future does not wait for the initial settings frame from the /// server. /// /// Returns a future which resolves to the [`Connection`] / [`SendRequest`] - /// tuple once the HTTP/2.0 handshake has been completed. + /// tuple once the HTTP/2 handshake has been completed. /// /// This function also allows the caller to configure the send payload data /// type. See [Outbound data type] for more details. /// - /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader + /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [`Connection`]: struct.Connection.html /// [`SendRequest`]: struct.SendRequest.html /// [Outbound data type]: ../index.html#outbound-data-type. @@ -1028,7 +1065,7 @@ impl Builder { /// # async fn doc(my_io: T) /// -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .handshake(my_io); @@ -1048,7 +1085,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest<&'static [u8]>, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .handshake::<_, &'static [u8]>(my_io); @@ -1075,19 +1112,19 @@ impl Default for Builder { } } -/// Creates a new configured HTTP/2.0 client with default configuration +/// Creates a new configured HTTP/2 client with default configuration /// values backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence -/// the [HTTP/2.0 handshake]. See [Handshake] for more details. +/// the [HTTP/2 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] / [`SendRequest`] -/// tuple once the HTTP/2.0 handshake has been completed. The returned +/// tuple once the HTTP/2 handshake has been completed. The returned /// [`Connection`] instance will be using default configuration values. Use /// [`Builder`] to customize the configuration values used by a [`Connection`] /// instance. /// -/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// [`SendRequest`]: struct.SendRequest.html @@ -1102,7 +1139,7 @@ impl Default for Builder { /// # async fn doc(my_io: T) -> Result<(), h2::Error> /// # { /// let (send_request, connection) = client::handshake(my_io).await?; -/// // The HTTP/2.0 handshake has completed, now start polling +/// // The HTTP/2 handshake has completed, now start polling /// // `connection` and use `send_request` to send requests to the /// // server. /// # Ok(()) @@ -1115,11 +1152,28 @@ where T: AsyncRead + AsyncWrite + Unpin, { let builder = Builder::new(); - builder.handshake(io).await + builder + .handshake(io) + .instrument(tracing::trace_span!("client_handshake")) + .await } // ===== impl Connection ===== +async fn bind_connection(io: &mut T) -> Result<(), crate::Error> +where + T: AsyncRead + AsyncWrite + Unpin, +{ + tracing::debug!("binding client connection"); + + let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + io.write_all(msg).await.map_err(crate::Error::from_io)?; + + tracing::debug!("client connection bound"); + + Ok(()) +} + impl Connection where T: AsyncRead + AsyncWrite + Unpin, @@ -1129,12 +1183,7 @@ where mut io: T, builder: Builder, ) -> Result<(SendRequest, Connection), crate::Error> { - log::debug!("binding client connection"); - - let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; - io.write_all(msg).await.map_err(crate::Error::from_io)?; - - log::debug!("client connection bound"); + bind_connection(&mut io).await?; // Create the codec let mut codec = Codec::new(io); @@ -1157,6 +1206,7 @@ where proto::Config { next_stream_id: builder.stream_id, initial_max_send_streams: builder.initial_max_send_streams, + max_send_buffer_size: builder.max_send_buffer_size, reset_stream_duration: builder.reset_stream_duration, reset_stream_max: builder.reset_stream_max, settings: builder.settings.clone(), @@ -1224,6 +1274,33 @@ where pub fn ping_pong(&mut self) -> Option { self.inner.take_user_pings().map(PingPong::new) } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by this client. + /// + /// This limit is configured by the server peer by sending the + /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value recieved from the + /// remote. + /// + /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + pub fn max_concurrent_send_streams(&self) -> usize { + self.inner.max_send_streams() + } + /// Returns the maximum number of concurrent streams that may be initiated + /// by the server on this connection. + /// + /// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS` + /// parameter][1] sent in a `SETTINGS` frame that has been + /// acknowledged by the remote peer. The value to be sent is configured by + /// the [`Builder::max_concurrent_streams`][2] method before handshaking + /// with the remote peer. + /// + /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + /// [2]: ../struct.Builder.html#method.max_concurrent_streams + pub fn max_concurrent_recv_streams(&self) -> usize { + self.inner.max_recv_streams() + } } impl Future for Connection @@ -1375,6 +1452,7 @@ impl Peer { pub fn convert_send_message( id: StreamId, request: Request<()>, + protocol: Option, end_of_stream: bool, ) -> Result { use http::request::Parts; @@ -1394,7 +1472,7 @@ impl Peer { // Build the set pseudo header set. All requests will include `method` // and `path`. - let mut pseudo = Pseudo::request(method, uri); + let mut pseudo = Pseudo::request(method, uri, protocol); if pseudo.scheme.is_none() { // If the scheme is not set, then there are a two options. @@ -1414,7 +1492,7 @@ impl Peer { return Err(UserError::MissingUriSchemeAndAuthority.into()); } else { // This is acceptable as per the above comment. However, - // HTTP/2.0 requires that a scheme is set. Since we are + // HTTP/2 requires that a scheme is set. Since we are // forwarding an HTTP 1.1 request, the scheme is set to // "http". pseudo.set_scheme(uri::Scheme::HTTP); @@ -1438,6 +1516,8 @@ impl Peer { impl proto::Peer for Peer { type Poll = Response<()>; + const NAME: &'static str = "Client"; + fn r#dyn() -> proto::DynPeer { proto::DynPeer::Client } @@ -1450,7 +1530,7 @@ impl proto::Peer for Peer { pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId, - ) -> Result { + ) -> Result { let mut b = Response::builder(); b = b.version(Version::HTTP_2); @@ -1464,10 +1544,7 @@ impl proto::Peer for Peer { Err(_) => { // TODO: Should there be more specialized handling for different // kinds of errors - return Err(RecvError::Stream { - id: stream_id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); } }; diff --git a/third_party/rust/h2/src/codec/error.rs b/third_party/rust/h2/src/codec/error.rs index 2c6b2961d096..0acb913e525f 100644 --- a/third_party/rust/h2/src/codec/error.rs +++ b/third_party/rust/h2/src/codec/error.rs @@ -1,26 +1,12 @@ -use crate::frame::{Reason, StreamId}; +use crate::proto::Error; use std::{error, fmt, io}; -/// Errors that are received -#[derive(Debug)] -pub enum RecvError { - Connection(Reason), - Stream { id: StreamId, reason: Reason }, - Io(io::Error), -} - /// Errors caused by sending a message #[derive(Debug)] pub enum SendError { - /// User error + Connection(Error), User(UserError), - - /// Connection error prevents sending. - Connection(Reason), - - /// I/O error - Io(io::Error), } /// Errors caused by users of the library @@ -35,9 +21,6 @@ pub enum UserError { /// The payload size is too big PayloadTooBig, - /// A header size is too big - HeaderTooBig, - /// The application attempted to initiate too many streams to remote. Rejected, @@ -63,28 +46,9 @@ pub enum UserError { /// Tries to update local SETTINGS while ACK has not been received. SendSettingsWhilePending, -} -// ===== impl RecvError ===== - -impl From for RecvError { - fn from(src: io::Error) -> Self { - RecvError::Io(src) - } -} - -impl error::Error for RecvError {} - -impl fmt::Display for RecvError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::RecvError::*; - - match *self { - Connection(ref reason) => reason.fmt(fmt), - Stream { ref reason, .. } => reason.fmt(fmt), - Io(ref e) => e.fmt(fmt), - } - } + /// Tries to send push promise to peer who has disabled server push + PeerDisabledServerPush, } // ===== impl SendError ===== @@ -93,19 +57,16 @@ impl error::Error for SendError {} impl fmt::Display for SendError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::SendError::*; - match *self { - User(ref e) => e.fmt(fmt), - Connection(ref reason) => reason.fmt(fmt), - Io(ref e) => e.fmt(fmt), + Self::Connection(ref e) => e.fmt(fmt), + Self::User(ref e) => e.fmt(fmt), } } } impl From for SendError { fn from(src: io::Error) -> Self { - SendError::Io(src) + Self::Connection(src.into()) } } @@ -127,7 +88,6 @@ impl fmt::Display for UserError { InactiveStreamId => "inactive stream", UnexpectedFrameType => "unexpected frame type", PayloadTooBig => "payload too big", - HeaderTooBig => "header too big", Rejected => "rejected", ReleaseCapacityTooBig => "release capacity too big", OverflowedStreamId => "stream ID overflowed", @@ -136,6 +96,7 @@ impl fmt::Display for UserError { PollResetAfterSendResponse => "poll_reset after send_response is illegal", SendPingWhilePending => "send_ping before received previous pong", SendSettingsWhilePending => "sending SETTINGS before received previous ACK", + PeerDisabledServerPush => "sending PUSH_PROMISE to peer who disabled server push", }) } } diff --git a/third_party/rust/h2/src/codec/framed_read.rs b/third_party/rust/h2/src/codec/framed_read.rs index 76a236ed2cd4..7c3bbb3ba2e2 100644 --- a/third_party/rust/h2/src/codec/framed_read.rs +++ b/third_party/rust/h2/src/codec/framed_read.rs @@ -1,8 +1,8 @@ -use crate::codec::RecvError; use crate::frame::{self, Frame, Kind, Reason}; use crate::frame::{ DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE, }; +use crate::proto::Error; use crate::hpack; @@ -59,247 +59,6 @@ impl FramedRead { } } - fn decode_frame(&mut self, mut bytes: BytesMut) -> Result, RecvError> { - use self::RecvError::*; - - log::trace!("decoding frame from {}B", bytes.len()); - - // Parse the head - let head = frame::Head::parse(&bytes); - - if self.partial.is_some() && head.kind() != Kind::Continuation { - proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - - let kind = head.kind(); - - log::trace!(" -> kind={:?}", kind); - - macro_rules! header_block { - ($frame:ident, $head:ident, $bytes:ident) => ({ - // Drop the frame header - // TODO: Change to drain: carllerche/bytes#130 - let _ = $bytes.split_to(frame::HEADER_LEN); - - // Parse the header frame w/o parsing the payload - let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) { - Ok(res) => res, - Err(frame::Error::InvalidDependencyId) => { - proto_err!(stream: "invalid HEADERS dependency ID"); - // A stream cannot depend on itself. An endpoint MUST - // treat this as a stream error (Section 5.4.2) of type - // `PROTOCOL_ERROR`. - return Err(Stream { - id: $head.stream_id(), - reason: Reason::PROTOCOL_ERROR, - }); - }, - Err(e) => { - proto_err!(conn: "failed to load frame; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - }; - - let is_end_headers = frame.is_end_headers(); - - // Load the HPACK encoded headers - match frame.load_hpack(&mut payload, self.max_header_list_size, &mut self.hpack) { - Ok(_) => {}, - Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, - Err(frame::Error::MalformedMessage) => { - let id = $head.stream_id(); - proto_err!(stream: "malformed header block; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); - }, - Err(e) => { - proto_err!(conn: "failed HPACK decoding; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - } - - if is_end_headers { - frame.into() - } else { - log::trace!("loaded partial header block"); - // Defer returning the frame - self.partial = Some(Partial { - frame: Continuable::$frame(frame), - buf: payload, - }); - - return Ok(None); - } - }); - } - - let frame = match kind { - Kind::Settings => { - let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Ping => { - let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load PING frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::WindowUpdate => { - let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Data => { - let _ = bytes.split_to(frame::HEADER_LEN); - let res = frame::Data::load(head, bytes.freeze()); - - // TODO: Should this always be connection level? Probably not... - res.map_err(|e| { - proto_err!(conn: "failed to load DATA frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Headers => header_block!(Headers, head, bytes), - Kind::Reset => { - let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load RESET frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::GoAway => { - let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::PushPromise => header_block!(PushPromise, head, bytes), - Kind::Priority => { - if head.stream_id() == 0 { - // Invalid stream identifier - proto_err!(conn: "invalid stream ID 0"); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - - match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { - Ok(frame) => frame.into(), - Err(frame::Error::InvalidDependencyId) => { - // A stream cannot depend on itself. An endpoint MUST - // treat this as a stream error (Section 5.4.2) of type - // `PROTOCOL_ERROR`. - let id = head.stream_id(); - proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); - } - Err(e) => { - proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - } - } - Kind::Continuation => { - let is_end_headers = (head.flag() & 0x4) == 0x4; - - let mut partial = match self.partial.take() { - Some(partial) => partial, - None => { - proto_err!(conn: "received unexpected CONTINUATION frame"); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - }; - - // The stream identifiers must match - if partial.frame.stream_id() != head.stream_id() { - proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - - // Extend the buf - if partial.buf.is_empty() { - partial.buf = bytes.split_off(frame::HEADER_LEN); - } else { - if partial.frame.is_over_size() { - // If there was left over bytes previously, they may be - // needed to continue decoding, even though we will - // be ignoring this frame. This is done to keep the HPACK - // decoder state up-to-date. - // - // Still, we need to be careful, because if a malicious - // attacker were to try to send a gigantic string, such - // that it fits over multiple header blocks, we could - // grow memory uncontrollably again, and that'd be a shame. - // - // Instead, we use a simple heuristic to determine if - // we should continue to ignore decoding, or to tell - // the attacker to go away. - if partial.buf.len() + bytes.len() > self.max_header_list_size { - proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); - return Err(Connection(Reason::COMPRESSION_ERROR)); - } - } - partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); - } - - match partial.frame.load_hpack( - &mut partial.buf, - self.max_header_list_size, - &mut self.hpack, - ) { - Ok(_) => {} - Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) - if !is_end_headers => {} - Err(frame::Error::MalformedMessage) => { - let id = head.stream_id(); - proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); - } - Err(e) => { - proto_err!(conn: "failed HPACK decoding; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - } - - if is_end_headers { - partial.frame.into() - } else { - self.partial = Some(partial); - return Ok(None); - } - } - Kind::Unknown => { - // Unknown frames are ignored - return Ok(None); - } - }; - - Ok(Some(frame)) - } - pub fn get_ref(&self) -> &T { self.inner.get_ref() } @@ -331,35 +90,279 @@ impl FramedRead { } } +/// Decodes a frame. +/// +/// This method is intentionally de-generified and outlined because it is very large. +fn decode_frame( + hpack: &mut hpack::Decoder, + max_header_list_size: usize, + partial_inout: &mut Option, + mut bytes: BytesMut, +) -> Result, Error> { + let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len()); + let _e = span.enter(); + + tracing::trace!("decoding frame from {}B", bytes.len()); + + // Parse the head + let head = frame::Head::parse(&bytes); + + if partial_inout.is_some() && head.kind() != Kind::Continuation { + proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + } + + let kind = head.kind(); + + tracing::trace!(frame.kind = ?kind); + + macro_rules! header_block { + ($frame:ident, $head:ident, $bytes:ident) => ({ + // Drop the frame header + // TODO: Change to drain: carllerche/bytes#130 + let _ = $bytes.split_to(frame::HEADER_LEN); + + // Parse the header frame w/o parsing the payload + let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) { + Ok(res) => res, + Err(frame::Error::InvalidDependencyId) => { + proto_err!(stream: "invalid HEADERS dependency ID"); + // A stream cannot depend on itself. An endpoint MUST + // treat this as a stream error (Section 5.4.2) of type + // `PROTOCOL_ERROR`. + return Err(Error::library_reset($head.stream_id(), Reason::PROTOCOL_ERROR)); + }, + Err(e) => { + proto_err!(conn: "failed to load frame; err={:?}", e); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + } + }; + + let is_end_headers = frame.is_end_headers(); + + // Load the HPACK encoded headers + match frame.load_hpack(&mut payload, max_header_list_size, hpack) { + Ok(_) => {}, + Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, + Err(frame::Error::MalformedMessage) => { + let id = $head.stream_id(); + proto_err!(stream: "malformed header block; stream={:?}", id); + return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); + }, + Err(e) => { + proto_err!(conn: "failed HPACK decoding; err={:?}", e); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + } + } + + if is_end_headers { + frame.into() + } else { + tracing::trace!("loaded partial header block"); + // Defer returning the frame + *partial_inout = Some(Partial { + frame: Continuable::$frame(frame), + buf: payload, + }); + + return Ok(None); + } + }); + } + + let frame = match kind { + Kind::Settings => { + let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); + Error::library_go_away(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::Ping => { + let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + proto_err!(conn: "failed to load PING frame; err={:?}", e); + Error::library_go_away(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::WindowUpdate => { + let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); + Error::library_go_away(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::Data => { + let _ = bytes.split_to(frame::HEADER_LEN); + let res = frame::Data::load(head, bytes.freeze()); + + // TODO: Should this always be connection level? Probably not... + res.map_err(|e| { + proto_err!(conn: "failed to load DATA frame; err={:?}", e); + Error::library_go_away(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::Headers => header_block!(Headers, head, bytes), + Kind::Reset => { + let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); + res.map_err(|e| { + proto_err!(conn: "failed to load RESET frame; err={:?}", e); + Error::library_go_away(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::GoAway => { + let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); + res.map_err(|e| { + proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); + Error::library_go_away(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::PushPromise => header_block!(PushPromise, head, bytes), + Kind::Priority => { + if head.stream_id() == 0 { + // Invalid stream identifier + proto_err!(conn: "invalid stream ID 0"); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + } + + match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { + Ok(frame) => frame.into(), + Err(frame::Error::InvalidDependencyId) => { + // A stream cannot depend on itself. An endpoint MUST + // treat this as a stream error (Section 5.4.2) of type + // `PROTOCOL_ERROR`. + let id = head.stream_id(); + proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id); + return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); + } + Err(e) => { + proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + } + } + } + Kind::Continuation => { + let is_end_headers = (head.flag() & 0x4) == 0x4; + + let mut partial = match partial_inout.take() { + Some(partial) => partial, + None => { + proto_err!(conn: "received unexpected CONTINUATION frame"); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + } + }; + + // The stream identifiers must match + if partial.frame.stream_id() != head.stream_id() { + proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + } + + // Extend the buf + if partial.buf.is_empty() { + partial.buf = bytes.split_off(frame::HEADER_LEN); + } else { + if partial.frame.is_over_size() { + // If there was left over bytes previously, they may be + // needed to continue decoding, even though we will + // be ignoring this frame. This is done to keep the HPACK + // decoder state up-to-date. + // + // Still, we need to be careful, because if a malicious + // attacker were to try to send a gigantic string, such + // that it fits over multiple header blocks, we could + // grow memory uncontrollably again, and that'd be a shame. + // + // Instead, we use a simple heuristic to determine if + // we should continue to ignore decoding, or to tell + // the attacker to go away. + if partial.buf.len() + bytes.len() > max_header_list_size { + proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); + return Err(Error::library_go_away(Reason::COMPRESSION_ERROR).into()); + } + } + partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); + } + + match partial + .frame + .load_hpack(&mut partial.buf, max_header_list_size, hpack) + { + Ok(_) => {} + Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {} + Err(frame::Error::MalformedMessage) => { + let id = head.stream_id(); + proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); + return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); + } + Err(e) => { + proto_err!(conn: "failed HPACK decoding; err={:?}", e); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + } + } + + if is_end_headers { + partial.frame.into() + } else { + *partial_inout = Some(partial); + return Ok(None); + } + } + Kind::Unknown => { + // Unknown frames are ignored + return Ok(None); + } + }; + + Ok(Some(frame)) +} + impl Stream for FramedRead where T: AsyncRead + Unpin, { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let span = tracing::trace_span!("FramedRead::poll_next"); + let _e = span.enter(); loop { - log::trace!("poll"); + tracing::trace!("poll"); let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) { Some(Ok(bytes)) => bytes, Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))), None => return Poll::Ready(None), }; - log::trace!("poll; bytes={}B", bytes.len()); - if let Some(frame) = self.decode_frame(bytes)? { - log::debug!("received; frame={:?}", frame); + tracing::trace!(read.bytes = bytes.len()); + let Self { + ref mut hpack, + max_header_list_size, + ref mut partial, + .. + } = *self; + if let Some(frame) = decode_frame(hpack, max_header_list_size, partial, bytes)? { + tracing::debug!(?frame, "received"); return Poll::Ready(Some(Ok(frame))); } } } } -fn map_err(err: io::Error) -> RecvError { +fn map_err(err: io::Error) -> Error { if let io::ErrorKind::InvalidData = err.kind() { if let Some(custom) = err.get_ref() { if custom.is::() { - return RecvError::Connection(Reason::FRAME_SIZE_ERROR); + return Error::library_go_away(Reason::FRAME_SIZE_ERROR); } } } diff --git a/third_party/rust/h2/src/codec/framed_write.rs b/third_party/rust/h2/src/codec/framed_write.rs index c63f122287e2..4b1b4accc455 100644 --- a/third_party/rust/h2/src/codec/framed_write.rs +++ b/third_party/rust/h2/src/codec/framed_write.rs @@ -3,15 +3,12 @@ use crate::codec::UserError::*; use crate::frame::{self, Frame, FrameSize}; use crate::hpack; -use bytes::{ - buf::{BufExt, BufMutExt}, - Buf, BufMut, BytesMut, -}; +use bytes::{Buf, BufMut, BytesMut}; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use std::io::{self, Cursor}; +use std::io::{self, Cursor, IoSlice}; // A macro to get around a method needing to borrow &mut self macro_rules! limited_write_buf { @@ -26,6 +23,11 @@ pub struct FramedWrite { /// Upstream `AsyncWrite` inner: T, + encoder: Encoder, +} + +#[derive(Debug)] +struct Encoder { /// HPACK encoder hpack: hpack::Encoder, @@ -42,6 +44,9 @@ pub struct FramedWrite { /// Max frame size, this is specified by the peer max_frame_size: FrameSize, + + /// Whether or not the wrapped `AsyncWrite` supports vectored IO. + is_write_vectored: bool, } #[derive(Debug)] @@ -50,7 +55,7 @@ enum Next { Continuation(frame::Continuation), } -/// Initialze the connection with this amount of write buffer. +/// Initialize the connection with this amount of write buffer. /// /// The minimum MAX_FRAME_SIZE is 16kb, so always be able to send a HEADERS /// frame that big. @@ -71,13 +76,17 @@ where B: Buf, { pub fn new(inner: T) -> FramedWrite { + let is_write_vectored = inner.is_write_vectored(); FramedWrite { inner, - hpack: hpack::Encoder::default(), - buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), - next: None, - last_data_frame: None, - max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, + encoder: Encoder { + hpack: hpack::Encoder::default(), + buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), + next: None, + last_data_frame: None, + max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, + is_write_vectored, + }, } } @@ -86,11 +95,11 @@ where /// Calling this function may result in the current contents of the buffer /// to be flushed to `T`. pub fn poll_ready(&mut self, cx: &mut Context) -> Poll> { - if !self.has_capacity() { + if !self.encoder.has_capacity() { // Try flushing ready!(self.flush(cx))?; - if !self.has_capacity() { + if !self.encoder.has_capacity() { return Poll::Pending; } } @@ -103,10 +112,124 @@ where /// `poll_ready` must be called first to ensure that a frame may be /// accepted. pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { + self.encoder.buffer(item) + } + + /// Flush buffered data to the wire + pub fn flush(&mut self, cx: &mut Context) -> Poll> { + let span = tracing::trace_span!("FramedWrite::flush"); + let _e = span.enter(); + + loop { + while !self.encoder.is_empty() { + match self.encoder.next { + Some(Next::Data(ref mut frame)) => { + tracing::trace!(queued_data_frame = true); + let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut()); + ready!(write( + &mut self.inner, + self.encoder.is_write_vectored, + &mut buf, + cx, + ))? + } + _ => { + tracing::trace!(queued_data_frame = false); + ready!(write( + &mut self.inner, + self.encoder.is_write_vectored, + &mut self.encoder.buf, + cx, + ))? + } + } + } + + match self.encoder.unset_frame() { + ControlFlow::Continue => (), + ControlFlow::Break => break, + } + } + + tracing::trace!("flushing buffer"); + // Flush the upstream + ready!(Pin::new(&mut self.inner).poll_flush(cx))?; + + Poll::Ready(Ok(())) + } + + /// Close the codec + pub fn shutdown(&mut self, cx: &mut Context) -> Poll> { + ready!(self.flush(cx))?; + Pin::new(&mut self.inner).poll_shutdown(cx) + } +} + +fn write( + writer: &mut T, + is_write_vectored: bool, + buf: &mut B, + cx: &mut Context<'_>, +) -> Poll> +where + T: AsyncWrite + Unpin, + B: Buf, +{ + // TODO(eliza): when tokio-util 0.5.1 is released, this + // could just use `poll_write_buf`... + const MAX_IOVS: usize = 64; + let n = if is_write_vectored { + let mut bufs = [IoSlice::new(&[]); MAX_IOVS]; + let cnt = buf.chunks_vectored(&mut bufs); + ready!(Pin::new(writer).poll_write_vectored(cx, &bufs[..cnt]))? + } else { + ready!(Pin::new(writer).poll_write(cx, buf.chunk()))? + }; + buf.advance(n); + Ok(()).into() +} + +#[must_use] +enum ControlFlow { + Continue, + Break, +} + +impl Encoder +where + B: Buf, +{ + fn unset_frame(&mut self) -> ControlFlow { + // Clear internal buffer + self.buf.set_position(0); + self.buf.get_mut().clear(); + + // The data frame has been written, so unset it + match self.next.take() { + Some(Next::Data(frame)) => { + self.last_data_frame = Some(frame); + debug_assert!(self.is_empty()); + ControlFlow::Break + } + Some(Next::Continuation(frame)) => { + // Buffer the continuation frame, then try to write again + let mut buf = limited_write_buf!(self); + if let Some(continuation) = frame.encode(&mut buf) { + self.next = Some(Next::Continuation(continuation)); + } + ControlFlow::Continue + } + None => ControlFlow::Break, + } + } + + fn buffer(&mut self, item: Frame) -> Result<(), UserError> { // Ensure that we have enough capacity to accept the write. assert!(self.has_capacity()); + let span = tracing::trace_span!("FramedWrite::buffer", frame = ?item); + let _e = span.enter(); - log::debug!("send; frame={:?}", item); + tracing::debug!(frame = ?item, "send"); match item { Frame::Data(mut v) => { @@ -150,103 +273,37 @@ where } Frame::Settings(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded settings; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded settings"); } Frame::GoAway(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded go_away; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded go_away"); } Frame::Ping(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded ping; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded ping"); } Frame::WindowUpdate(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded window_update; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded window_update"); } Frame::Priority(_) => { /* v.encode(self.buf.get_mut()); - log::trace!("encoded priority; rem={:?}", self.buf.remaining()); + tracing::trace!("encoded priority; rem={:?}", self.buf.remaining()); */ unimplemented!(); } Frame::Reset(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded reset; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded reset"); } } Ok(()) } - /// Flush buffered data to the wire - pub fn flush(&mut self, cx: &mut Context) -> Poll> { - log::trace!("flush"); - - loop { - while !self.is_empty() { - match self.next { - Some(Next::Data(ref mut frame)) => { - log::trace!(" -> queued data frame"); - let mut buf = (&mut self.buf).chain(frame.payload_mut()); - ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut buf))?; - } - _ => { - log::trace!(" -> not a queued data frame"); - ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut self.buf))?; - } - } - } - - // Clear internal buffer - self.buf.set_position(0); - self.buf.get_mut().clear(); - - // The data frame has been written, so unset it - match self.next.take() { - Some(Next::Data(frame)) => { - self.last_data_frame = Some(frame); - debug_assert!(self.is_empty()); - break; - } - Some(Next::Continuation(frame)) => { - // Buffer the continuation frame, then try to write again - let mut buf = limited_write_buf!(self); - if let Some(continuation) = frame.encode(&mut self.hpack, &mut buf) { - // We previously had a CONTINUATION, and after encoding - // it, we got *another* one? Let's just double check - // that at least some progress is being made... - if self.buf.get_ref().len() == frame::HEADER_LEN { - // If *only* the CONTINUATION frame header was - // written, and *no* header fields, we're stuck - // in a loop... - panic!("CONTINUATION frame write loop; header value too big to encode"); - } - - self.next = Some(Next::Continuation(continuation)); - } - } - None => { - break; - } - } - } - - log::trace!("flushing buffer"); - // Flush the upstream - ready!(Pin::new(&mut self.inner).poll_flush(cx))?; - - Poll::Ready(Ok(())) - } - - /// Close the codec - pub fn shutdown(&mut self, cx: &mut Context) -> Poll> { - ready!(self.flush(cx))?; - Pin::new(&mut self.inner).poll_shutdown(cx) - } - fn has_capacity(&self) -> bool { self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY } @@ -259,26 +316,32 @@ where } } +impl Encoder { + fn max_frame_size(&self) -> usize { + self.max_frame_size as usize + } +} + impl FramedWrite { /// Returns the max frame size that can be sent pub fn max_frame_size(&self) -> usize { - self.max_frame_size as usize + self.encoder.max_frame_size() } /// Set the peer's max frame size. pub fn set_max_frame_size(&mut self, val: usize) { assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize); - self.max_frame_size = val as FrameSize; + self.encoder.max_frame_size = val as FrameSize; } /// Set the peer's header table size. pub fn set_header_table_size(&mut self, val: usize) { - self.hpack.update_max_size(val); + self.encoder.hpack.update_max_size(val); } /// Retrieve the last data frame that has been sent pub fn take_last_data_frame(&mut self) -> Option> { - self.last_data_frame.take() + self.encoder.last_data_frame.take() } pub fn get_mut(&mut self) -> &mut T { @@ -287,25 +350,13 @@ impl FramedWrite { } impl AsyncRead for FramedWrite { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit]) -> bool { - self.inner.prepare_uninitialized_buffer(buf) - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf, + ) -> Poll> { Pin::new(&mut self.inner).poll_read(cx, buf) } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut Buf, - ) -> Poll> { - Pin::new(&mut self.inner).poll_read_buf(cx, buf) - } } // We never project the Pin to `B`. diff --git a/third_party/rust/h2/src/codec/mod.rs b/third_party/rust/h2/src/codec/mod.rs index 7d0ab73d8f55..359adf6e47d4 100644 --- a/third_party/rust/h2/src/codec/mod.rs +++ b/third_party/rust/h2/src/codec/mod.rs @@ -2,12 +2,13 @@ mod error; mod framed_read; mod framed_write; -pub use self::error::{RecvError, SendError, UserError}; +pub use self::error::{SendError, UserError}; use self::framed_read::FramedRead; use self::framed_write::FramedWrite; use crate::frame::{self, Data, Frame}; +use crate::proto::Error; use bytes::Buf; use futures_core::Stream; @@ -155,7 +156,7 @@ impl Stream for Codec where T: AsyncRead + Unpin, { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_next(cx) diff --git a/third_party/rust/h2/src/error.rs b/third_party/rust/h2/src/error.rs index 372bac2eea72..6c8f6ed80d79 100644 --- a/third_party/rust/h2/src/error.rs +++ b/third_party/rust/h2/src/error.rs @@ -1,11 +1,13 @@ use crate::codec::{SendError, UserError}; -use crate::proto; +use crate::frame::StreamId; +use crate::proto::{self, Initiator}; +use bytes::Bytes; use std::{error, fmt, io}; pub use crate::frame::Reason; -/// Represents HTTP/2.0 operation errors. +/// Represents HTTP/2 operation errors. /// /// `Error` covers error cases raised by protocol errors caused by the /// peer, I/O (transport) errors, and errors caused by the user of the library. @@ -22,11 +24,14 @@ pub struct Error { #[derive(Debug)] enum Kind { - /// An error caused by an action taken by the remote peer. - /// - /// This is either an error received by the peer or caused by an invalid - /// action taken by the peer (i.e. a protocol error). - Proto(Reason), + /// A RST_STREAM frame was received or sent. + Reset(StreamId, Reason, Initiator), + + /// A GO_AWAY frame was received or sent. + GoAway(Bytes, Reason, Initiator), + + /// The user created an error from a bare Reason. + Reason(Reason), /// An error resulting from an invalid action taken by the user of this /// library. @@ -45,12 +50,14 @@ impl Error { /// action taken by the peer (i.e. a protocol error). pub fn reason(&self) -> Option { match self.kind { - Kind::Proto(reason) => Some(reason), + Kind::Reset(_, reason, _) | Kind::GoAway(_, reason, _) | Kind::Reason(reason) => { + Some(reason) + } _ => None, } } - /// Returns the true if the error is an io::Error + /// Returns true if the error is an io::Error pub fn is_io(&self) -> bool { match self.kind { Kind::Io(_) => true, @@ -79,6 +86,21 @@ impl Error { kind: Kind::Io(err), } } + + /// Returns true if the error is from a `GOAWAY`. + pub fn is_go_away(&self) -> bool { + matches!(self.kind, Kind::GoAway(..)) + } + + /// Returns true if the error was received in a frame from the remote. + /// + /// Such as from a received `RST_STREAM` or `GOAWAY` frame. + pub fn is_remote(&self) -> bool { + matches!( + self.kind, + Kind::GoAway(_, _, Initiator::Remote) | Kind::Reset(_, _, Initiator::Remote) + ) + } } impl From for Error { @@ -87,8 +109,13 @@ impl From for Error { Error { kind: match src { - Proto(reason) => Kind::Proto(reason), - Io(e) => Kind::Io(e), + Reset(stream_id, reason, initiator) => Kind::Reset(stream_id, reason, initiator), + GoAway(debug_data, reason, initiator) => { + Kind::GoAway(debug_data, reason, initiator) + } + Io(kind, inner) => { + Kind::Io(inner.map_or_else(|| kind.into(), |inner| io::Error::new(kind, inner))) + } }, } } @@ -97,7 +124,7 @@ impl From for Error { impl From for Error { fn from(src: Reason) -> Error { Error { - kind: Kind::Proto(src), + kind: Kind::Reason(src), } } } @@ -106,8 +133,7 @@ impl From for Error { fn from(src: SendError) -> Error { match src { SendError::User(e) => e.into(), - SendError::Connection(reason) => reason.into(), - SendError::Io(e) => Error::from_io(e), + SendError::Connection(e) => e.into(), } } } @@ -122,14 +148,51 @@ impl From for Error { impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::Kind::*; + let debug_data = match self.kind { + Kind::Reset(_, reason, Initiator::User) => { + return write!(fmt, "stream error sent by user: {}", reason) + } + Kind::Reset(_, reason, Initiator::Library) => { + return write!(fmt, "stream error detected: {}", reason) + } + Kind::Reset(_, reason, Initiator::Remote) => { + return write!(fmt, "stream error received: {}", reason) + } + Kind::GoAway(ref debug_data, reason, Initiator::User) => { + write!(fmt, "connection error sent by user: {}", reason)?; + debug_data + } + Kind::GoAway(ref debug_data, reason, Initiator::Library) => { + write!(fmt, "connection error detected: {}", reason)?; + debug_data + } + Kind::GoAway(ref debug_data, reason, Initiator::Remote) => { + write!(fmt, "connection error received: {}", reason)?; + debug_data + } + Kind::Reason(reason) => return write!(fmt, "protocol error: {}", reason), + Kind::User(ref e) => return write!(fmt, "user error: {}", e), + Kind::Io(ref e) => return e.fmt(fmt), + }; - match self.kind { - Proto(ref reason) => write!(fmt, "protocol error: {}", reason), - User(ref e) => write!(fmt, "user error: {}", e), - Io(ref e) => fmt::Display::fmt(e, fmt), + if !debug_data.is_empty() { + write!(fmt, " ({:?})", debug_data)?; } + + Ok(()) } } impl error::Error for Error {} + +#[cfg(test)] +mod tests { + use super::Error; + use crate::Reason; + + #[test] + fn error_from_reason() { + let err = Error::from(Reason::HTTP_1_1_REQUIRED); + assert_eq!(err.reason(), Some(Reason::HTTP_1_1_REQUIRED)); + } +} diff --git a/third_party/rust/h2/src/ext.rs b/third_party/rust/h2/src/ext.rs new file mode 100644 index 000000000000..cf383a4950c7 --- /dev/null +++ b/third_party/rust/h2/src/ext.rs @@ -0,0 +1,55 @@ +//! Extensions specific to the HTTP/2 protocol. + +use crate::hpack::BytesStr; + +use bytes::Bytes; +use std::fmt; + +/// Represents the `:protocol` pseudo-header used by +/// the [Extended CONNECT Protocol]. +/// +/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 +#[derive(Clone, Eq, PartialEq)] +pub struct Protocol { + value: BytesStr, +} + +impl Protocol { + /// Converts a static string to a protocol name. + pub const fn from_static(value: &'static str) -> Self { + Self { + value: BytesStr::from_static(value), + } + } + + /// Returns a str representation of the header. + pub fn as_str(&self) -> &str { + self.value.as_str() + } + + pub(crate) fn try_from(bytes: Bytes) -> Result { + Ok(Self { + value: BytesStr::try_from(bytes)?, + }) + } +} + +impl<'a> From<&'a str> for Protocol { + fn from(value: &'a str) -> Self { + Self { + value: BytesStr::from(value), + } + } +} + +impl AsRef<[u8]> for Protocol { + fn as_ref(&self) -> &[u8] { + self.value.as_ref() + } +} + +impl fmt::Debug for Protocol { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.value.fmt(f) + } +} diff --git a/third_party/rust/h2/src/frame/data.rs b/third_party/rust/h2/src/frame/data.rs index 91de52df9e36..e253d5e23dcb 100644 --- a/third_party/rust/h2/src/frame/data.rs +++ b/third_party/rust/h2/src/frame/data.rs @@ -36,7 +36,7 @@ impl Data { } } - /// Returns the stream identifer that this frame is associated with. + /// Returns the stream identifier that this frame is associated with. /// /// This cannot be a zero stream identifier. pub fn stream_id(&self) -> StreamId { @@ -63,7 +63,7 @@ impl Data { } } - /// Returns whther the `PADDED` flag is set on this frame. + /// Returns whether the `PADDED` flag is set on this frame. #[cfg(feature = "unstable")] pub fn is_padded(&self) -> bool { self.flags.is_padded() diff --git a/third_party/rust/h2/src/frame/go_away.rs b/third_party/rust/h2/src/frame/go_away.rs index a46ba7a37cbd..91d9c4c6b5d1 100644 --- a/third_party/rust/h2/src/frame/go_away.rs +++ b/third_party/rust/h2/src/frame/go_away.rs @@ -29,8 +29,7 @@ impl GoAway { self.error_code } - #[cfg(feature = "unstable")] - pub fn debug_data(&self) -> &[u8] { + pub fn debug_data(&self) -> &Bytes { &self.debug_data } @@ -51,7 +50,7 @@ impl GoAway { } pub fn encode(&self, dst: &mut B) { - log::trace!("encoding GO_AWAY; code={:?}", self.error_code); + tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code); let head = Head::new(Kind::GoAway, 0, StreamId::zero()); head.encode(8, dst); dst.put_u32(self.last_stream_id.into()); diff --git a/third_party/rust/h2/src/frame/head.rs b/third_party/rust/h2/src/frame/head.rs index 2abc08e1d2ae..38be2f6973b1 100644 --- a/third_party/rust/h2/src/frame/head.rs +++ b/third_party/rust/h2/src/frame/head.rs @@ -36,7 +36,7 @@ impl Head { } } - /// Parse an HTTP/2.0 frame header + /// Parse an HTTP/2 frame header pub fn parse(header: &[u8]) -> Head { let (stream_id, _) = StreamId::parse(&header[5..]); diff --git a/third_party/rust/h2/src/frame/headers.rs b/third_party/rust/h2/src/frame/headers.rs index 2491d8da018c..bcb9050133ea 100644 --- a/third_party/rust/h2/src/frame/headers.rs +++ b/third_party/rust/h2/src/frame/headers.rs @@ -1,21 +1,17 @@ use super::{util, StreamDependency, StreamId}; +use crate::ext::Protocol; use crate::frame::{Error, Frame, Head, Kind}; use crate::hpack::{self, BytesStr}; use http::header::{self, HeaderName, HeaderValue}; use http::{uri, HeaderMap, Method, Request, StatusCode, Uri}; -use bytes::{Bytes, BytesMut}; +use bytes::{BufMut, Bytes, BytesMut}; use std::fmt; use std::io::Cursor; -type EncodeBuf<'a> = bytes::buf::ext::Limit<&'a mut BytesMut>; - -// Minimum MAX_FRAME_SIZE is 16kb, so save some arbitrary space for frame -// head and other header bits. -const MAX_HEADER_LENGTH: usize = 1024 * 16 - 100; - +type EncodeBuf<'a> = bytes::buf::Limit<&'a mut BytesMut>; /// Header frame /// /// This could be either a request or a response. @@ -71,6 +67,7 @@ pub struct Pseudo { pub scheme: Option, pub authority: Option, pub path: Option, + pub protocol: Option, // Response pub status: Option, @@ -100,11 +97,7 @@ struct HeaderBlock { #[derive(Debug)] struct EncodingHeaderBlock { - /// Argument to pass to the HPACK encoder to resume encoding - hpack: Option, - - /// remaining headers to encode - headers: Iter, + hpack: Bytes, } const END_STREAM: u8 = 0x1; @@ -153,7 +146,11 @@ impl Headers { let flags = HeadersFlag(head.flag()); let mut pad = 0; - log::trace!("loading headers; flags={:?}", flags); + tracing::trace!("loading headers; flags={:?}", flags); + + if head.stream_id().is_zero() { + return Err(Error::InvalidStreamId); + } // Read the padding length if flags.is_padded() { @@ -241,10 +238,6 @@ impl Headers { self.header_block.is_over_size } - pub(crate) fn has_too_big_field(&self) -> bool { - self.header_block.has_too_big_field() - } - pub fn into_parts(self) -> (Pseudo, HeaderMap) { (self.header_block.pseudo, self.header_block.fields) } @@ -254,6 +247,11 @@ impl Headers { &mut self.header_block.pseudo } + /// Whether it has status 1xx + pub(crate) fn is_informational(&self) -> bool { + self.header_block.pseudo.is_informational() + } + pub fn fields(&self) -> &HeaderMap { &self.header_block.fields } @@ -274,8 +272,8 @@ impl Headers { let head = self.head(); self.header_block - .into_encoding() - .encode(&head, encoder, dst, |_| {}) + .into_encoding(encoder) + .encode(&head, dst, |_| {}) } fn head(&self) -> Head { @@ -296,6 +294,10 @@ impl fmt::Debug for Headers { .field("stream_id", &self.stream_id) .field("flags", &self.flags); + if let Some(ref protocol) = self.header_block.pseudo.protocol { + builder.field("protocol", protocol); + } + if let Some(ref dep) = self.stream_dep { builder.field("stream_dep", dep); } @@ -398,6 +400,10 @@ impl PushPromise { let flags = PushPromiseFlag(head.flag()); let mut pad = 0; + if head.stream_id().is_zero() { + return Err(Error::InvalidStreamId); + } + // Read the padding length if flags.is_padded() { if src.is_empty() { @@ -475,8 +481,6 @@ impl PushPromise { encoder: &mut hpack::Encoder, dst: &mut EncodeBuf<'_>, ) -> Option { - use bytes::BufMut; - // At this point, the `is_end_headers` flag should always be set debug_assert!(self.flags.is_end_headers()); @@ -484,8 +488,8 @@ impl PushPromise { let promised_id = self.promised_id; self.header_block - .into_encoding() - .encode(&head, encoder, dst, |dst| { + .into_encoding(encoder) + .encode(&head, dst, |dst| { dst.put_u32(promised_id.into()); }) } @@ -524,38 +528,39 @@ impl Continuation { Head::new(Kind::Continuation, END_HEADERS, self.stream_id) } - pub fn encode( - self, - encoder: &mut hpack::Encoder, - dst: &mut EncodeBuf<'_>, - ) -> Option { + pub fn encode(self, dst: &mut EncodeBuf<'_>) -> Option { // Get the CONTINUATION frame head let head = self.head(); - self.header_block.encode(&head, encoder, dst, |_| {}) + self.header_block.encode(&head, dst, |_| {}) } } // ===== impl Pseudo ===== impl Pseudo { - pub fn request(method: Method, uri: Uri) -> Self { + pub fn request(method: Method, uri: Uri, protocol: Option) -> Self { let parts = uri::Parts::from(uri); let mut path = parts .path_and_query - .map(|v| Bytes::copy_from_slice(v.as_str().as_bytes())) - .unwrap_or_else(Bytes::new); + .map(|v| BytesStr::from(v.as_str())) + .unwrap_or(BytesStr::from_static("")); - if path.is_empty() && method != Method::OPTIONS { - path = Bytes::from_static(b"/"); + match method { + Method::OPTIONS | Method::CONNECT => {} + _ if path.is_empty() => { + path = BytesStr::from_static("/"); + } + _ => {} } let mut pseudo = Pseudo { method: Some(method), scheme: None, authority: None, - path: Some(unsafe { BytesStr::from_utf8_unchecked(path) }), + path: Some(path).filter(|p| !p.is_empty()), + protocol, status: None, }; @@ -569,9 +574,7 @@ impl Pseudo { // If the URI includes an authority component, add it to the pseudo // headers if let Some(authority) = parts.authority { - pseudo.set_authority(unsafe { - BytesStr::from_utf8_unchecked(Bytes::copy_from_slice(authority.as_str().as_bytes())) - }); + pseudo.set_authority(BytesStr::from(authority.as_str())); } pseudo @@ -583,34 +586,45 @@ impl Pseudo { scheme: None, authority: None, path: None, + protocol: None, status: Some(status), } } + #[cfg(feature = "unstable")] + pub fn set_status(&mut self, value: StatusCode) { + self.status = Some(value); + } + pub fn set_scheme(&mut self, scheme: uri::Scheme) { - let bytes = match scheme.as_str() { - "http" => Bytes::from_static(b"http"), - "https" => Bytes::from_static(b"https"), - s => Bytes::copy_from_slice(s.as_bytes()), + let bytes_str = match scheme.as_str() { + "http" => BytesStr::from_static("http"), + "https" => BytesStr::from_static("https"), + s => BytesStr::from(s), }; - self.scheme = Some(unsafe { BytesStr::from_utf8_unchecked(bytes) }); + self.scheme = Some(bytes_str); + } + + #[cfg(feature = "unstable")] + pub fn set_protocol(&mut self, protocol: Protocol) { + self.protocol = Some(protocol); } pub fn set_authority(&mut self, authority: BytesStr) { self.authority = Some(authority); } + + /// Whether it has status 1xx + pub(crate) fn is_informational(&self) -> bool { + self.status + .map_or(false, |status| status.is_informational()) + } } // ===== impl EncodingHeaderBlock ===== impl EncodingHeaderBlock { - fn encode( - mut self, - head: &Head, - encoder: &mut hpack::Encoder, - dst: &mut EncodeBuf<'_>, - f: F, - ) -> Option + fn encode(mut self, head: &Head, dst: &mut EncodeBuf<'_>, f: F) -> Option where F: FnOnce(&mut EncodeBuf<'_>), { @@ -626,15 +640,17 @@ impl EncodingHeaderBlock { f(dst); // Now, encode the header payload - let continuation = match encoder.encode(self.hpack, &mut self.headers, dst) { - hpack::Encode::Full => None, - hpack::Encode::Partial(state) => Some(Continuation { + let continuation = if self.hpack.len() > dst.remaining_mut() { + dst.put_slice(&self.hpack.split_to(dst.remaining_mut())); + + Some(Continuation { stream_id: head.stream_id(), - header_block: EncodingHeaderBlock { - hpack: Some(state), - headers: self.headers, - }, - }), + header_block: self, + }) + } else { + dst.put_slice(&self.hpack); + + None }; // Compute the header block length @@ -682,6 +698,10 @@ impl Iterator for Iter { return Some(Path(path)); } + if let Some(protocol) = pseudo.protocol.take() { + return Some(Protocol(protocol)); + } + if let Some(status) = pseudo.status.take() { return Some(Status(status)); } @@ -817,19 +837,19 @@ impl HeaderBlock { macro_rules! set_pseudo { ($field:ident, $val:expr) => {{ if reg { - log::trace!("load_hpack; header malformed -- pseudo not at head of block"); + tracing::trace!("load_hpack; header malformed -- pseudo not at head of block"); malformed = true; } else if self.pseudo.$field.is_some() { - log::trace!("load_hpack; header malformed -- repeated pseudo"); + tracing::trace!("load_hpack; header malformed -- repeated pseudo"); malformed = true; } else { let __val = $val; headers_size += - decoded_header_size(stringify!($ident).len() + 1, __val.as_str().len()); + decoded_header_size(stringify!($field).len() + 1, __val.as_str().len()); if headers_size < max_header_list_size { self.pseudo.$field = Some(__val); } else if !self.is_over_size { - log::trace!("load_hpack; header list size over max"); + tracing::trace!("load_hpack; header list size over max"); self.is_over_size = true; } } @@ -856,10 +876,13 @@ impl HeaderBlock { || name == "keep-alive" || name == "proxy-connection" { - log::trace!("load_hpack; connection level header"); + tracing::trace!("load_hpack; connection level header"); malformed = true; } else if name == header::TE && value != "trailers" { - log::trace!("load_hpack; TE header not set to trailers; val={:?}", value); + tracing::trace!( + "load_hpack; TE header not set to trailers; val={:?}", + value + ); malformed = true; } else { reg = true; @@ -868,7 +891,7 @@ impl HeaderBlock { if headers_size < max_header_list_size { self.fields.append(name, value); } else if !self.is_over_size { - log::trace!("load_hpack; header list size over max"); + tracing::trace!("load_hpack; header list size over max"); self.is_over_size = true; } } @@ -877,30 +900,35 @@ impl HeaderBlock { Method(v) => set_pseudo!(method, v), Scheme(v) => set_pseudo!(scheme, v), Path(v) => set_pseudo!(path, v), + Protocol(v) => set_pseudo!(protocol, v), Status(v) => set_pseudo!(status, v), } }); if let Err(e) = res { - log::trace!("hpack decoding error; err={:?}", e); + tracing::trace!("hpack decoding error; err={:?}", e); return Err(e.into()); } if malformed { - log::trace!("malformed message"); + tracing::trace!("malformed message"); return Err(Error::MalformedMessage); } Ok(()) } - fn into_encoding(self) -> EncodingHeaderBlock { + fn into_encoding(self, encoder: &mut hpack::Encoder) -> EncodingHeaderBlock { + let mut hpack = BytesMut::new(); + let headers = Iter { + pseudo: Some(self.pseudo), + fields: self.fields.into_iter(), + }; + + encoder.encode(headers, &mut hpack); + EncodingHeaderBlock { - hpack: None, - headers: Iter { - pseudo: Some(self.pseudo), - fields: self.fields.into_iter(), - }, + hpack: hpack.freeze(), } } @@ -933,48 +961,79 @@ impl HeaderBlock { .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len())) .sum::() } - - /// Iterate over all pseudos and headers to see if any individual pair - /// would be too large to encode. - pub(crate) fn has_too_big_field(&self) -> bool { - macro_rules! pseudo_size { - ($name:ident) => {{ - self.pseudo - .$name - .as_ref() - .map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len())) - .unwrap_or(0) - }}; - } - - if pseudo_size!(method) > MAX_HEADER_LENGTH { - return true; - } - - if pseudo_size!(scheme) > MAX_HEADER_LENGTH { - return true; - } - - if pseudo_size!(authority) > MAX_HEADER_LENGTH { - return true; - } - - if pseudo_size!(path) > MAX_HEADER_LENGTH { - return true; - } - - // skip :status, its never going to be too big - - for (name, value) in &self.fields { - if decoded_header_size(name.as_str().len(), value.len()) > MAX_HEADER_LENGTH { - return true; - } - } - - false - } } fn decoded_header_size(name: usize, value: usize) -> usize { name + value + 32 } + +#[cfg(test)] +mod test { + use std::iter::FromIterator; + + use http::HeaderValue; + + use super::*; + use crate::frame; + use crate::hpack::{huffman, Encoder}; + + #[test] + fn test_nameless_header_at_resume() { + let mut encoder = Encoder::default(); + let mut dst = BytesMut::new(); + + let headers = Headers::new( + StreamId::ZERO, + Default::default(), + HeaderMap::from_iter(vec![ + ( + HeaderName::from_static("hello"), + HeaderValue::from_static("world"), + ), + ( + HeaderName::from_static("hello"), + HeaderValue::from_static("zomg"), + ), + ( + HeaderName::from_static("hello"), + HeaderValue::from_static("sup"), + ), + ]), + ); + + let continuation = headers + .encode(&mut encoder, &mut (&mut dst).limit(frame::HEADER_LEN + 8)) + .unwrap(); + + assert_eq!(17, dst.len()); + assert_eq!([0, 0, 8, 1, 0, 0, 0, 0, 0], &dst[0..9]); + assert_eq!(&[0x40, 0x80 | 4], &dst[9..11]); + assert_eq!("hello", huff_decode(&dst[11..15])); + assert_eq!(0x80 | 4, dst[15]); + + let mut world = dst[16..17].to_owned(); + + dst.clear(); + + assert!(continuation + .encode(&mut (&mut dst).limit(frame::HEADER_LEN + 16)) + .is_none()); + + world.extend_from_slice(&dst[9..12]); + assert_eq!("world", huff_decode(&world)); + + assert_eq!(24, dst.len()); + assert_eq!([0, 0, 15, 9, 4, 0, 0, 0, 0], &dst[0..9]); + + // // Next is not indexed + assert_eq!(&[15, 47, 0x80 | 3], &dst[12..15]); + assert_eq!("zomg", huff_decode(&dst[15..18])); + assert_eq!(&[15, 47, 0x80 | 3], &dst[18..21]); + assert_eq!("sup", huff_decode(&dst[21..])); + } + + fn huff_decode(src: &[u8]) -> BytesMut { + let mut buf = BytesMut::new(); + huffman::decode(src, &mut buf).unwrap() + } +} diff --git a/third_party/rust/h2/src/frame/mod.rs b/third_party/rust/h2/src/frame/mod.rs index 4c49d6bb13ec..5a682b6346ff 100644 --- a/third_party/rust/h2/src/frame/mod.rs +++ b/third_party/rust/h2/src/frame/mod.rs @@ -15,7 +15,6 @@ use std::fmt; /// let buf: [u8; 4] = [0, 0, 0, 1]; /// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); /// ``` -#[macro_escape] macro_rules! unpack_octets_4 { // TODO: Get rid of this macro ($buf:expr, $offset:expr, $tip:ty) => { diff --git a/third_party/rust/h2/src/frame/ping.rs b/third_party/rust/h2/src/frame/ping.rs index 1802ec18591b..241d06ea176f 100644 --- a/third_party/rust/h2/src/frame/ping.rs +++ b/third_party/rust/h2/src/frame/ping.rs @@ -85,7 +85,7 @@ impl Ping { pub fn encode(&self, dst: &mut B) { let sz = self.payload.len(); - log::trace!("encoding PING; ack={} len={}", self.ack, sz); + tracing::trace!("encoding PING; ack={} len={}", self.ack, sz); let flags = if self.ack { ACK_FLAG } else { 0 }; let head = Head::new(Kind::Ping, flags, StreamId::zero()); diff --git a/third_party/rust/h2/src/frame/reason.rs b/third_party/rust/h2/src/frame/reason.rs index 031b6cd928c8..ff5e2012f806 100644 --- a/third_party/rust/h2/src/frame/reason.rs +++ b/third_party/rust/h2/src/frame/reason.rs @@ -1,6 +1,6 @@ use std::fmt; -/// HTTP/2.0 error codes. +/// HTTP/2 error codes. /// /// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the /// reasons for the stream or connection error. For example, diff --git a/third_party/rust/h2/src/frame/reset.rs b/third_party/rust/h2/src/frame/reset.rs index 6edecf1a3e57..39f6ac20229c 100644 --- a/third_party/rust/h2/src/frame/reset.rs +++ b/third_party/rust/h2/src/frame/reset.rs @@ -2,7 +2,7 @@ use crate::frame::{self, Error, Head, Kind, Reason, StreamId}; use bytes::BufMut; -#[derive(Debug, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct Reset { stream_id: StreamId, error_code: Reason, @@ -38,7 +38,7 @@ impl Reset { } pub fn encode(&self, dst: &mut B) { - log::trace!( + tracing::trace!( "encoding RESET; id={:?} code={:?}", self.stream_id, self.error_code diff --git a/third_party/rust/h2/src/frame/settings.rs b/third_party/rust/h2/src/frame/settings.rs index c70938144053..080d0f4e58a6 100644 --- a/third_party/rust/h2/src/frame/settings.rs +++ b/third_party/rust/h2/src/frame/settings.rs @@ -13,6 +13,7 @@ pub struct Settings { initial_window_size: Option, max_frame_size: Option, max_header_list_size: Option, + enable_connect_protocol: Option, } /// An enum that lists all valid settings that can be sent in a SETTINGS @@ -27,6 +28,7 @@ pub enum Setting { InitialWindowSize(u32), MaxFrameSize(u32), MaxHeaderListSize(u32), + EnableConnectProtocol(u32), } #[derive(Copy, Clone, Eq, PartialEq, Default)] @@ -99,14 +101,22 @@ impl Settings { self.max_header_list_size = size; } - pub fn is_push_enabled(&self) -> bool { - self.enable_push.unwrap_or(1) != 0 + pub fn is_push_enabled(&self) -> Option { + self.enable_push.map(|val| val != 0) } pub fn set_enable_push(&mut self, enable: bool) { self.enable_push = Some(enable as u32); } + pub fn is_extended_connect_protocol_enabled(&self) -> Option { + self.enable_connect_protocol.map(|val| val != 0) + } + + pub fn set_enable_connect_protocol(&mut self, val: Option) { + self.enable_connect_protocol = val; + } + pub fn header_table_size(&self) -> Option { self.header_table_size } @@ -141,7 +151,7 @@ impl Settings { // Ensure the payload length is correct, each setting is 6 bytes long. if payload.len() % 6 != 0 { - log::debug!("invalid settings payload length; len={:?}", payload.len()); + tracing::debug!("invalid settings payload length; len={:?}", payload.len()); return Err(Error::InvalidPayloadAckSettings); } @@ -181,6 +191,14 @@ impl Settings { Some(MaxHeaderListSize(val)) => { settings.max_header_list_size = Some(val); } + Some(EnableConnectProtocol(val)) => match val { + 0 | 1 => { + settings.enable_connect_protocol = Some(val); + } + _ => { + return Err(Error::InvalidSettingValue); + } + }, None => {} } } @@ -199,13 +217,13 @@ impl Settings { let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero()); let payload_len = self.payload_len(); - log::trace!("encoding SETTINGS; len={}", payload_len); + tracing::trace!("encoding SETTINGS; len={}", payload_len); head.encode(payload_len, dst); // Encode the settings self.for_each(|setting| { - log::trace!("encoding setting; val={:?}", setting); + tracing::trace!("encoding setting; val={:?}", setting); setting.encode(dst) }); } @@ -236,6 +254,10 @@ impl Settings { if let Some(v) = self.max_header_list_size { f(MaxHeaderListSize(v)); } + + if let Some(v) = self.enable_connect_protocol { + f(EnableConnectProtocol(v)); + } } } @@ -269,6 +291,9 @@ impl fmt::Debug for Settings { Setting::MaxHeaderListSize(v) => { builder.field("max_header_list_size", &v); } + Setting::EnableConnectProtocol(v) => { + builder.field("enable_connect_protocol", &v); + } }); builder.finish() @@ -291,6 +316,7 @@ impl Setting { 4 => Some(InitialWindowSize(val)), 5 => Some(MaxFrameSize(val)), 6 => Some(MaxHeaderListSize(val)), + 8 => Some(EnableConnectProtocol(val)), _ => None, } } @@ -322,6 +348,7 @@ impl Setting { InitialWindowSize(v) => (4, v), MaxFrameSize(v) => (5, v), MaxHeaderListSize(v) => (6, v), + EnableConnectProtocol(v) => (8, v), }; dst.put_u16(kind); diff --git a/third_party/rust/h2/src/frame/window_update.rs b/third_party/rust/h2/src/frame/window_update.rs index 72c1c2581a59..eed2ce17ec26 100644 --- a/third_party/rust/h2/src/frame/window_update.rs +++ b/third_party/rust/h2/src/frame/window_update.rs @@ -48,7 +48,7 @@ impl WindowUpdate { } pub fn encode(&self, dst: &mut B) { - log::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); + tracing::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); let head = Head::new(Kind::WindowUpdate, 0, self.stream_id); head.encode(4, dst); dst.put_u32(self.size_increment); diff --git a/third_party/rust/h2/src/fuzz_bridge.rs b/third_party/rust/h2/src/fuzz_bridge.rs new file mode 100644 index 000000000000..3ea8b591c91d --- /dev/null +++ b/third_party/rust/h2/src/fuzz_bridge.rs @@ -0,0 +1,28 @@ +#[cfg(fuzzing)] +pub mod fuzz_logic { + use crate::hpack; + use bytes::BytesMut; + use http::header::HeaderName; + use std::io::Cursor; + + pub fn fuzz_hpack(data_: &[u8]) { + let mut decoder_ = hpack::Decoder::new(0); + let mut buf = BytesMut::new(); + buf.extend(data_); + let _dec_res = decoder_.decode(&mut Cursor::new(&mut buf), |_h| {}); + + if let Ok(s) = std::str::from_utf8(data_) { + if let Ok(h) = http::Method::from_bytes(s.as_bytes()) { + let m_ = hpack::Header::Method(h); + let mut encoder = hpack::Encoder::new(0, 0); + let _res = encode(&mut encoder, vec![m_]); + } + } + } + + fn encode(e: &mut hpack::Encoder, hdrs: Vec>>) -> BytesMut { + let mut dst = BytesMut::with_capacity(1024); + e.encode(&mut hdrs.into_iter(), &mut dst); + dst + } +} diff --git a/third_party/rust/h2/src/hpack/decoder.rs b/third_party/rust/h2/src/hpack/decoder.rs index 4befa8702ae2..988b48db110b 100644 --- a/third_party/rust/h2/src/hpack/decoder.rs +++ b/third_party/rust/h2/src/hpack/decoder.rs @@ -142,6 +142,12 @@ struct Table { max_size: usize, } +struct StringMarker { + offset: usize, + len: usize, + string: Option, +} + // ===== impl Decoder ===== impl Decoder { @@ -183,7 +189,10 @@ impl Decoder { self.last_max_update = size; } - log::trace!("decode"); + let span = tracing::trace_span!("hpack::decode"); + let _e = span.enter(); + + tracing::trace!("decode"); while let Some(ty) = peek_u8(src) { // At this point we are always at the beginning of the next block @@ -191,14 +200,14 @@ impl Decoder { // determined from the first byte. match Representation::load(ty)? { Indexed => { - log::trace!(" Indexed; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"Indexed"); can_resize = false; let entry = self.decode_indexed(src)?; consume(src); f(entry); } LiteralWithIndexing => { - log::trace!(" LiteralWithIndexing; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"LiteralWithIndexing"); can_resize = false; let entry = self.decode_literal(src, true)?; @@ -209,14 +218,14 @@ impl Decoder { f(entry); } LiteralWithoutIndexing => { - log::trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"LiteralWithoutIndexing"); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); f(entry); } LiteralNeverIndexed => { - log::trace!(" LiteralNeverIndexed; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"LiteralNeverIndexed"); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); @@ -226,7 +235,7 @@ impl Decoder { f(entry); } SizeUpdate => { - log::trace!(" SizeUpdate; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"SizeUpdate"); if !can_resize { return Err(DecoderError::InvalidMaxDynamicSize); } @@ -248,10 +257,10 @@ impl Decoder { return Err(DecoderError::InvalidMaxDynamicSize); } - log::debug!( - "Decoder changed max table size from {} to {}", - self.table.size(), - new_size + tracing::debug!( + from = self.table.size(), + to = new_size, + "Decoder changed max table size" ); self.table.set_max_size(new_size); @@ -276,10 +285,13 @@ impl Decoder { // First, read the header name if table_idx == 0 { + let old_pos = buf.position(); + let name_marker = self.try_decode_string(buf)?; + let value_marker = self.try_decode_string(buf)?; + buf.set_position(old_pos); // Read the name as a literal - let name = self.decode_string(buf)?; - let value = self.decode_string(buf)?; - + let name = name_marker.consume(buf); + let value = value_marker.consume(buf); Header::new(name, value) } else { let e = self.table.get(table_idx)?; @@ -289,7 +301,11 @@ impl Decoder { } } - fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result { + fn try_decode_string( + &mut self, + buf: &mut Cursor<&mut BytesMut>, + ) -> Result { + let old_pos = buf.position(); const HUFF_FLAG: u8 = 0b1000_0000; // The first bit in the first byte contains the huffman encoded flag. @@ -302,25 +318,38 @@ impl Decoder { let len = decode_int(buf, 7)?; if len > buf.remaining() { - log::trace!( - "decode_string underflow; len={}; remaining={}", - len, - buf.remaining() - ); + tracing::trace!(len, remaining = buf.remaining(), "decode_string underflow",); return Err(DecoderError::NeedMore(NeedMore::StringUnderflow)); } + let offset = (buf.position() - old_pos) as usize; if huff { let ret = { - let raw = &buf.bytes()[..len]; - huffman::decode(raw, &mut self.buffer).map(BytesMut::freeze) + let raw = &buf.chunk()[..len]; + huffman::decode(raw, &mut self.buffer).map(|buf| StringMarker { + offset, + len, + string: Some(BytesMut::freeze(buf)), + }) }; buf.advance(len); - return ret; + ret + } else { + buf.advance(len); + Ok(StringMarker { + offset, + len, + string: None, + }) } + } - Ok(take(buf, len)) + fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result { + let old_pos = buf.position(); + let marker = self.try_decode_string(buf)?; + buf.set_position(old_pos); + Ok(marker.consume(buf)) } } @@ -420,7 +449,7 @@ fn decode_int(buf: &mut B, prefix_size: u8) -> Result(buf: &mut B) -> Option { if buf.has_remaining() { - Some(buf.bytes()[0]) + Some(buf.chunk()[0]) } else { None } @@ -434,6 +463,19 @@ fn take(buf: &mut Cursor<&mut BytesMut>, n: usize) -> Bytes { head.freeze() } +impl StringMarker { + fn consume(self, buf: &mut Cursor<&mut BytesMut>) -> Bytes { + buf.advance(self.offset); + match self.string { + Some(string) => { + buf.advance(self.len); + string + } + None => take(buf, self.len), + } + } +} + fn consume(buf: &mut Cursor<&mut BytesMut>) { // remove bytes from the internal BytesMut when they have been successfully // decoded. This is a more permanent cursor position, which will be @@ -578,13 +620,13 @@ pub fn get_static(idx: usize) -> Header { use http::header::HeaderValue; match idx { - 1 => Header::Authority(from_static("")), + 1 => Header::Authority(BytesStr::from_static("")), 2 => Header::Method(Method::GET), 3 => Header::Method(Method::POST), - 4 => Header::Path(from_static("/")), - 5 => Header::Path(from_static("/index.html")), - 6 => Header::Scheme(from_static("http")), - 7 => Header::Scheme(from_static("https")), + 4 => Header::Path(BytesStr::from_static("/")), + 5 => Header::Path(BytesStr::from_static("/index.html")), + 6 => Header::Scheme(BytesStr::from_static("http")), + 7 => Header::Scheme(BytesStr::from_static("https")), 8 => Header::Status(StatusCode::OK), 9 => Header::Status(StatusCode::NO_CONTENT), 10 => Header::Status(StatusCode::PARTIAL_CONTENT), @@ -784,10 +826,6 @@ pub fn get_static(idx: usize) -> Header { } } -fn from_static(s: &'static str) -> BytesStr { - unsafe { BytesStr::from_utf8_unchecked(Bytes::from_static(s.as_bytes())) } -} - #[cfg(test)] mod test { use super::*; @@ -852,7 +890,51 @@ mod test { fn huff_encode(src: &[u8]) -> BytesMut { let mut buf = BytesMut::new(); - huffman::encode(src, &mut buf).unwrap(); + huffman::encode(src, &mut buf); buf } + + #[test] + fn test_decode_continuation_header_with_non_huff_encoded_name() { + let mut de = Decoder::new(0); + let value = huff_encode(b"bar"); + let mut buf = BytesMut::new(); + // header name is non_huff encoded + buf.extend(&[0b01000000, 0x00 | 3]); + buf.extend(b"foo"); + // header value is partial + buf.extend(&[0x80 | 3]); + buf.extend(&value[0..1]); + + let mut res = vec![]; + let e = de + .decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }) + .unwrap_err(); + // decode error because the header value is partial + assert_eq!(e, DecoderError::NeedMore(NeedMore::StringUnderflow)); + + // extend buf with the remaining header value + buf.extend(&value[1..]); + let _ = de + .decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }) + .unwrap(); + + assert_eq!(res.len(), 1); + assert_eq!(de.table.size(), 0); + + match res[0] { + Header::Field { + ref name, + ref value, + } => { + assert_eq!(name, "foo"); + assert_eq!(value, "bar"); + } + _ => panic!(), + } + } } diff --git a/third_party/rust/h2/src/hpack/encoder.rs b/third_party/rust/h2/src/hpack/encoder.rs index ef177485f15b..76b373830ebd 100644 --- a/third_party/rust/h2/src/hpack/encoder.rs +++ b/third_party/rust/h2/src/hpack/encoder.rs @@ -1,34 +1,15 @@ use super::table::{Index, Table}; use super::{huffman, Header}; -use bytes::{buf::ext::Limit, BufMut, BytesMut}; +use bytes::{BufMut, BytesMut}; use http::header::{HeaderName, HeaderValue}; -type DstBuf<'a> = Limit<&'a mut BytesMut>; - #[derive(Debug)] pub struct Encoder { table: Table, size_update: Option, } -#[derive(Debug)] -pub enum Encode { - Full, - Partial(EncodeState), -} - -#[derive(Debug)] -pub struct EncodeState { - index: Index, - value: Option, -} - -#[derive(Debug, PartialEq, Eq)] -pub enum EncoderError { - BufferOverflow, -} - #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum SizeUpdate { One(usize), @@ -77,56 +58,24 @@ impl Encoder { } /// Encode a set of headers into the provide buffer - pub fn encode( - &mut self, - resume: Option, - headers: &mut I, - dst: &mut DstBuf<'_>, - ) -> Encode + pub fn encode(&mut self, headers: I, dst: &mut BytesMut) where - I: Iterator>>, + I: IntoIterator>>, { - let pos = position(dst); + let span = tracing::trace_span!("hpack::encode"); + let _e = span.enter(); - if let Err(e) = self.encode_size_updates(dst) { - if e == EncoderError::BufferOverflow { - rewind(dst, pos); - } - - unreachable!("encode_size_updates errored"); - } + self.encode_size_updates(dst); let mut last_index = None; - if let Some(resume) = resume { - let pos = position(dst); - - let res = match resume.value { - Some(ref value) => self.encode_header_without_name(&resume.index, value, dst), - None => self.encode_header(&resume.index, dst), - }; - - if res.is_err() { - rewind(dst, pos); - return Encode::Partial(resume); - } - last_index = Some(resume.index); - } - for header in headers { - let pos = position(dst); - match header.reify() { // The header has an associated name. In which case, try to // index it in the table. Ok(header) => { let index = self.table.index(header); - let res = self.encode_header(&index, dst); - - if res.is_err() { - rewind(dst, pos); - return Encode::Partial(EncodeState { index, value: None }); - } + self.encode_header(&index, dst); last_index = Some(index); } @@ -135,77 +84,61 @@ impl Encoder { // which case, we skip table lookup and just use the same index // as the previous entry. Err(value) => { - let res = self.encode_header_without_name( + self.encode_header_without_name( last_index.as_ref().unwrap_or_else(|| { panic!("encoding header without name, but no previous index to use for name"); }), &value, dst, ); - - if res.is_err() { - rewind(dst, pos); - return Encode::Partial(EncodeState { - index: last_index.unwrap(), // checked just above - value: Some(value), - }); - } } - }; + } } - - Encode::Full } - fn encode_size_updates(&mut self, dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { + fn encode_size_updates(&mut self, dst: &mut BytesMut) { match self.size_update.take() { Some(SizeUpdate::One(val)) => { self.table.resize(val); - encode_size_update(val, dst)?; + encode_size_update(val, dst); } Some(SizeUpdate::Two(min, max)) => { self.table.resize(min); self.table.resize(max); - encode_size_update(min, dst)?; - encode_size_update(max, dst)?; + encode_size_update(min, dst); + encode_size_update(max, dst); } None => {} } - - Ok(()) } - fn encode_header(&mut self, index: &Index, dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { + fn encode_header(&mut self, index: &Index, dst: &mut BytesMut) { match *index { Index::Indexed(idx, _) => { - encode_int(idx, 7, 0x80, dst)?; + encode_int(idx, 7, 0x80, dst); } Index::Name(idx, _) => { let header = self.table.resolve(&index); - encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst)?; + encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst); } Index::Inserted(_) => { let header = self.table.resolve(&index); assert!(!header.is_sensitive()); - if !dst.has_remaining_mut() { - return Err(EncoderError::BufferOverflow); - } - dst.put_u8(0b0100_0000); - encode_str(header.name().as_slice(), dst)?; - encode_str(header.value_slice(), dst)?; + encode_str(header.name().as_slice(), dst); + encode_str(header.value_slice(), dst); } Index::InsertedValue(idx, _) => { let header = self.table.resolve(&index); assert!(!header.is_sensitive()); - encode_int(idx, 6, 0b0100_0000, dst)?; - encode_str(header.value_slice(), dst)?; + encode_int(idx, 6, 0b0100_0000, dst); + encode_str(header.value_slice(), dst); } Index::NotIndexed(_) => { let header = self.table.resolve(&index); @@ -215,19 +148,17 @@ impl Encoder { header.value_slice(), header.is_sensitive(), dst, - )?; + ); } } - - Ok(()) } fn encode_header_without_name( &mut self, last: &Index, value: &HeaderValue, - dst: &mut DstBuf<'_>, - ) -> Result<(), EncoderError> { + dst: &mut BytesMut, + ) { match *last { Index::Indexed(..) | Index::Name(..) @@ -235,7 +166,7 @@ impl Encoder { | Index::InsertedValue(..) => { let idx = self.table.resolve_idx(last); - encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst)?; + encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst); } Index::NotIndexed(_) => { let last = self.table.resolve(last); @@ -245,11 +176,9 @@ impl Encoder { value.as_ref(), value.is_sensitive(), dst, - )?; + ); } } - - Ok(()) } } @@ -259,52 +188,32 @@ impl Default for Encoder { } } -fn encode_size_update(val: usize, dst: &mut B) -> Result<(), EncoderError> { +fn encode_size_update(val: usize, dst: &mut BytesMut) { encode_int(val, 5, 0b0010_0000, dst) } -fn encode_not_indexed( - name: usize, - value: &[u8], - sensitive: bool, - dst: &mut DstBuf<'_>, -) -> Result<(), EncoderError> { +fn encode_not_indexed(name: usize, value: &[u8], sensitive: bool, dst: &mut BytesMut) { if sensitive { - encode_int(name, 4, 0b10000, dst)?; + encode_int(name, 4, 0b10000, dst); } else { - encode_int(name, 4, 0, dst)?; + encode_int(name, 4, 0, dst); } - encode_str(value, dst)?; - Ok(()) + encode_str(value, dst); } -fn encode_not_indexed2( - name: &[u8], - value: &[u8], - sensitive: bool, - dst: &mut DstBuf<'_>, -) -> Result<(), EncoderError> { - if !dst.has_remaining_mut() { - return Err(EncoderError::BufferOverflow); - } - +fn encode_not_indexed2(name: &[u8], value: &[u8], sensitive: bool, dst: &mut BytesMut) { if sensitive { dst.put_u8(0b10000); } else { dst.put_u8(0); } - encode_str(name, dst)?; - encode_str(value, dst)?; - Ok(()) + encode_str(name, dst); + encode_str(value, dst); } -fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { - if !dst.has_remaining_mut() { - return Err(EncoderError::BufferOverflow); - } - +fn encode_str(val: &[u8], dst: &mut BytesMut) { if !val.is_empty() { let idx = position(dst); @@ -312,50 +221,43 @@ fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { dst.put_u8(0); // Encode with huffman - huffman::encode(val, dst)?; + huffman::encode(val, dst); let huff_len = position(dst) - (idx + 1); if encode_int_one_byte(huff_len, 7) { // Write the string head - dst.get_mut()[idx] = 0x80 | huff_len as u8; + dst[idx] = 0x80 | huff_len as u8; } else { - // Write the head to a placeholer + // Write the head to a placeholder const PLACEHOLDER_LEN: usize = 8; let mut buf = [0u8; PLACEHOLDER_LEN]; let head_len = { let mut head_dst = &mut buf[..]; - encode_int(huff_len, 7, 0x80, &mut head_dst)?; + encode_int(huff_len, 7, 0x80, &mut head_dst); PLACEHOLDER_LEN - head_dst.remaining_mut() }; - if dst.remaining_mut() < head_len { - return Err(EncoderError::BufferOverflow); - } - // This is just done to reserve space in the destination dst.put_slice(&buf[1..head_len]); - let written = dst.get_mut(); // Shift the header forward for i in 0..huff_len { let src_i = idx + 1 + (huff_len - (i + 1)); let dst_i = idx + head_len + (huff_len - (i + 1)); - written[dst_i] = written[src_i]; + dst[dst_i] = dst[src_i]; } // Copy in the head for i in 0..head_len { - written[idx + i] = buf[i]; + dst[idx + i] = buf[i]; } } } else { // Write an empty string dst.put_u8(0); } - - Ok(()) } /// Encode an integer into the given destination buffer @@ -364,47 +266,25 @@ fn encode_int( prefix_bits: usize, // The number of bits in the prefix first_byte: u8, // The base upon which to start encoding the int dst: &mut B, -) -> Result<(), EncoderError> { - let mut rem = dst.remaining_mut(); - - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - +) { if encode_int_one_byte(value, prefix_bits) { dst.put_u8(first_byte | value as u8); - return Ok(()); + return; } let low = (1 << prefix_bits) - 1; value -= low; - if value > 0x0fff_ffff { - panic!("value out of range"); - } - dst.put_u8(first_byte | low as u8); - rem -= 1; while value >= 128 { - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - dst.put_u8(0b1000_0000 | value as u8); - rem -= 1; value >>= 7; } - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - dst.put_u8(value as u8); - - Ok(()) } /// Returns true if the in the int can be fully encoded in the first byte. @@ -412,19 +292,14 @@ fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool { value < (1 << prefix_bits) - 1 } -fn position(buf: &DstBuf<'_>) -> usize { - buf.get_ref().len() -} - -fn rewind(buf: &mut DstBuf<'_>, pos: usize) { - buf.get_mut().truncate(pos); +fn position(buf: &BytesMut) -> usize { + buf.len() } #[cfg(test)] mod test { use super::*; use crate::hpack::Header; - use bytes::buf::BufMutExt; use http::*; #[test] @@ -802,49 +677,15 @@ mod test { } #[test] - fn test_nameless_header_at_resume() { + fn test_large_size_update() { let mut encoder = Encoder::default(); - let max_len = 15; - let mut dst = BytesMut::with_capacity(64); - let mut input = vec![ - Header::Field { - name: Some("hello".parse().unwrap()), - value: HeaderValue::from_bytes(b"world").unwrap(), - }, - Header::Field { - name: None, - value: HeaderValue::from_bytes(b"zomg").unwrap(), - }, - Header::Field { - name: None, - value: HeaderValue::from_bytes(b"sup").unwrap(), - }, - ] - .into_iter(); + encoder.update_max_size(1912930560); + assert_eq!(Some(SizeUpdate::One(1912930560)), encoder.size_update); - let resume = match encoder.encode(None, &mut input, &mut (&mut dst).limit(max_len)) { - Encode::Partial(r) => r, - _ => panic!("encode should be partial"), - }; - - assert_eq!(&[0x40, 0x80 | 4], &dst[0..2]); - assert_eq!("hello", huff_decode(&dst[2..6])); - assert_eq!(0x80 | 4, dst[6]); - assert_eq!("world", huff_decode(&dst[7..11])); - - dst.clear(); - - match encoder.encode(Some(resume), &mut input, &mut (&mut dst).limit(max_len)) { - Encode::Full => {} - unexpected => panic!("resume returned unexpected: {:?}", unexpected), - } - - // Next is not indexed - assert_eq!(&[15, 47, 0x80 | 3], &dst[0..3]); - assert_eq!("zomg", huff_decode(&dst[3..6])); - assert_eq!(&[15, 47, 0x80 | 3], &dst[6..9]); - assert_eq!("sup", huff_decode(&dst[9..])); + let mut dst = BytesMut::with_capacity(6); + encoder.encode_size_updates(&mut dst); + assert_eq!([63, 225, 129, 148, 144, 7], &dst[..]); } #[test] @@ -855,7 +696,7 @@ mod test { fn encode(e: &mut Encoder, hdrs: Vec>>) -> BytesMut { let mut dst = BytesMut::with_capacity(1024); - e.encode(None, &mut hdrs.into_iter(), &mut (&mut dst).limit(1024)); + e.encode(&mut hdrs.into_iter(), &mut dst); dst } diff --git a/third_party/rust/h2/src/hpack/header.rs b/third_party/rust/h2/src/hpack/header.rs index 74369506cb67..e6df555abf21 100644 --- a/third_party/rust/h2/src/hpack/header.rs +++ b/third_party/rust/h2/src/hpack/header.rs @@ -1,11 +1,12 @@ use super::{DecoderError, NeedMore}; +use crate::ext::Protocol; use bytes::Bytes; use http::header::{HeaderName, HeaderValue}; use http::{Method, StatusCode}; use std::fmt; -/// HTTP/2.0 Header +/// HTTP/2 Header #[derive(Debug, Clone, Eq, PartialEq)] pub enum Header { Field { name: T, value: HeaderValue }, @@ -14,6 +15,7 @@ pub enum Header { Method(Method), Scheme(BytesStr), Path(BytesStr), + Protocol(Protocol), Status(StatusCode), } @@ -25,6 +27,7 @@ pub enum Name<'a> { Method, Scheme, Path, + Protocol, Status, } @@ -51,6 +54,7 @@ impl Header> { Method(v) => Method(v), Scheme(v) => Scheme(v), Path(v) => Path(v), + Protocol(v) => Protocol(v), Status(v) => Status(v), }) } @@ -79,6 +83,10 @@ impl Header { let value = BytesStr::try_from(value)?; Ok(Header::Path(value)) } + b"protocol" => { + let value = Protocol::try_from(value)?; + Ok(Header::Protocol(value)) + } b"status" => { let status = StatusCode::from_bytes(&value)?; Ok(Header::Status(status)) @@ -104,6 +112,7 @@ impl Header { Header::Method(ref v) => 32 + 7 + v.as_ref().len(), Header::Scheme(ref v) => 32 + 7 + v.len(), Header::Path(ref v) => 32 + 5 + v.len(), + Header::Protocol(ref v) => 32 + 9 + v.as_str().len(), Header::Status(_) => 32 + 7 + 3, } } @@ -116,6 +125,7 @@ impl Header { Header::Method(..) => Name::Method, Header::Scheme(..) => Name::Scheme, Header::Path(..) => Name::Path, + Header::Protocol(..) => Name::Protocol, Header::Status(..) => Name::Status, } } @@ -127,6 +137,7 @@ impl Header { Header::Method(ref v) => v.as_ref().as_ref(), Header::Scheme(ref v) => v.as_ref(), Header::Path(ref v) => v.as_ref(), + Header::Protocol(ref v) => v.as_ref(), Header::Status(ref v) => v.as_str().as_ref(), } } @@ -156,6 +167,10 @@ impl Header { Header::Path(ref b) => a == b, _ => false, }, + Header::Protocol(ref a) => match *other { + Header::Protocol(ref b) => a == b, + _ => false, + }, Header::Status(ref a) => match *other { Header::Status(ref b) => a == b, _ => false, @@ -205,6 +220,7 @@ impl From
for Header> { Header::Method(v) => Header::Method(v), Header::Scheme(v) => Header::Scheme(v), Header::Path(v) => Header::Path(v), + Header::Protocol(v) => Header::Protocol(v), Header::Status(v) => Header::Status(v), } } @@ -221,6 +237,7 @@ impl<'a> Name<'a> { Name::Method => Ok(Header::Method(Method::from_bytes(&*value)?)), Name::Scheme => Ok(Header::Scheme(BytesStr::try_from(value)?)), Name::Path => Ok(Header::Path(BytesStr::try_from(value)?)), + Name::Protocol => Ok(Header::Protocol(Protocol::try_from(value)?)), Name::Status => { match StatusCode::from_bytes(&value) { Ok(status) => Ok(Header::Status(status)), @@ -238,6 +255,7 @@ impl<'a> Name<'a> { Name::Method => b":method", Name::Scheme => b":scheme", Name::Path => b":path", + Name::Protocol => b":protocol", Name::Status => b":status", } } @@ -246,8 +264,12 @@ impl<'a> Name<'a> { // ===== impl BytesStr ===== impl BytesStr { - pub(crate) unsafe fn from_utf8_unchecked(bytes: Bytes) -> Self { - BytesStr(bytes) + pub(crate) const fn from_static(value: &'static str) -> Self { + BytesStr(Bytes::from_static(value.as_bytes())) + } + + pub(crate) fn from(value: &str) -> Self { + BytesStr(Bytes::copy_from_slice(value.as_bytes())) } #[doc(hidden)] diff --git a/third_party/rust/h2/src/hpack/huffman/mod.rs b/third_party/rust/h2/src/hpack/huffman/mod.rs index b8db8b4d3967..07b3fd925b0d 100644 --- a/third_party/rust/h2/src/hpack/huffman/mod.rs +++ b/third_party/rust/h2/src/hpack/huffman/mod.rs @@ -1,7 +1,7 @@ mod table; use self::table::{DECODE_TABLE, ENCODE_TABLE}; -use crate::hpack::{DecoderError, EncoderError}; +use crate::hpack::DecoderError; use bytes::{BufMut, BytesMut}; @@ -40,11 +40,9 @@ pub fn decode(src: &[u8], buf: &mut BytesMut) -> Result Ok(buf.split()) } -// TODO: return error when there is not enough room to encode the value -pub fn encode(src: &[u8], dst: &mut B) -> Result<(), EncoderError> { +pub fn encode(src: &[u8], dst: &mut BytesMut) { let mut bits: u64 = 0; let mut bits_left = 40; - let mut rem = dst.remaining_mut(); for &b in src { let (nbits, code) = ENCODE_TABLE[b as usize]; @@ -53,29 +51,18 @@ pub fn encode(src: &[u8], dst: &mut B) -> Result<(), EncoderError> { bits_left -= nbits; while bits_left <= 32 { - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - dst.put_u8((bits >> 32) as u8); bits <<= 8; bits_left += 8; - rem -= 1; } } if bits_left != 40 { - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - // This writes the EOS token bits |= (1 << bits_left) - 1; dst.put_u8((bits >> 32) as u8); } - - Ok(()) } impl Decoder { @@ -144,17 +131,17 @@ mod test { #[test] fn encode_single_byte() { - let mut dst = Vec::with_capacity(1); + let mut dst = BytesMut::with_capacity(1); - encode(b"o", &mut dst).unwrap(); + encode(b"o", &mut dst); assert_eq!(&dst[..], &[0b00111111]); dst.clear(); - encode(b"0", &mut dst).unwrap(); + encode(b"0", &mut dst); assert_eq!(&dst[..], &[0x0 + 7]); dst.clear(); - encode(b"A", &mut dst).unwrap(); + encode(b"A", &mut dst); assert_eq!(&dst[..], &[(0x21 << 2) + 3]); } @@ -185,9 +172,9 @@ mod test { ]; for s in DATA { - let mut dst = Vec::with_capacity(s.len()); + let mut dst = BytesMut::with_capacity(s.len()); - encode(s.as_bytes(), &mut dst).unwrap(); + encode(s.as_bytes(), &mut dst); let decoded = decode(&dst).unwrap(); @@ -201,9 +188,9 @@ mod test { &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; for s in DATA { - let mut dst = Vec::with_capacity(s.len()); + let mut dst = BytesMut::with_capacity(s.len()); - encode(s, &mut dst).unwrap(); + encode(s, &mut dst); let decoded = decode(&dst).unwrap(); diff --git a/third_party/rust/h2/src/hpack/mod.rs b/third_party/rust/h2/src/hpack/mod.rs index 365b0057fe95..12c75d553588 100644 --- a/third_party/rust/h2/src/hpack/mod.rs +++ b/third_party/rust/h2/src/hpack/mod.rs @@ -1,12 +1,12 @@ mod decoder; mod encoder; pub(crate) mod header; -mod huffman; +pub(crate) mod huffman; mod table; #[cfg(test)] mod test; pub use self::decoder::{Decoder, DecoderError, NeedMore}; -pub use self::encoder::{Encode, EncodeState, Encoder, EncoderError}; +pub use self::encoder::Encoder; pub use self::header::{BytesStr, Header}; diff --git a/third_party/rust/h2/src/hpack/table.rs b/third_party/rust/h2/src/hpack/table.rs index e7c8ce76096b..0124f216d5ad 100644 --- a/third_party/rust/h2/src/hpack/table.rs +++ b/third_party/rust/h2/src/hpack/table.rs @@ -597,7 +597,7 @@ impl Table { } assert!(dist <= their_dist, - "could not find entry; actual={}; desired={};" + + "could not find entry; actual={}; desired={}" + "probe={}, dist={}; their_dist={}; index={}; msg={}", actual, desired, probe, dist, their_dist, index.wrapping_sub(self.inserted), msg); @@ -751,6 +751,7 @@ fn index_static(header: &Header) -> Option<(usize, bool)> { "/index.html" => Some((5, true)), _ => Some((4, false)), }, + Header::Protocol(..) => None, Header::Status(ref v) => match u16::from(*v) { 200 => Some((8, true)), 204 => Some((9, true)), diff --git a/third_party/rust/h2/src/hpack/test/fixture.rs b/third_party/rust/h2/src/hpack/test/fixture.rs index 20ee1275b695..3428c39583f6 100644 --- a/third_party/rust/h2/src/hpack/test/fixture.rs +++ b/third_party/rust/h2/src/hpack/test/fixture.rs @@ -1,6 +1,6 @@ use crate::hpack::{Decoder, Encoder, Header}; -use bytes::{buf::BufMutExt, BytesMut}; +use bytes::BytesMut; use hex::FromHex; use serde_json::Value; @@ -107,11 +107,7 @@ fn test_story(story: Value) { }) .collect(); - encoder.encode( - None, - &mut input.clone().into_iter(), - &mut (&mut buf).limit(limit), - ); + encoder.encode(&mut input.clone().into_iter(), &mut buf); decoder .decode(&mut Cursor::new(&mut buf), |e| { @@ -138,6 +134,7 @@ fn key_str(e: &Header) -> &str { Header::Method(..) => ":method", Header::Scheme(..) => ":scheme", Header::Path(..) => ":path", + Header::Protocol(..) => ":protocol", Header::Status(..) => ":status", } } @@ -149,6 +146,7 @@ fn value_str(e: &Header) -> &str { Header::Method(ref m) => m.as_str(), Header::Scheme(ref v) => &**v, Header::Path(ref v) => &**v, + Header::Protocol(ref v) => v.as_str(), Header::Status(ref v) => v.as_str(), } } diff --git a/third_party/rust/h2/src/hpack/test/fuzz.rs b/third_party/rust/h2/src/hpack/test/fuzz.rs index dbf9b3c8f5f4..ad0d47b6b1e3 100644 --- a/third_party/rust/h2/src/hpack/test/fuzz.rs +++ b/third_party/rust/h2/src/hpack/test/fuzz.rs @@ -1,14 +1,15 @@ -use crate::hpack::{Decoder, Encode, Encoder, Header}; +use crate::hpack::{Decoder, Encoder, Header}; use http::header::{HeaderName, HeaderValue}; -use bytes::{buf::BufMutExt, Bytes, BytesMut}; +use bytes::BytesMut; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; -use rand::{Rng, SeedableRng, StdRng}; +use rand::distributions::Slice; +use rand::rngs::StdRng; +use rand::{thread_rng, Rng, SeedableRng}; use std::io::Cursor; -const MIN_CHUNK: usize = 16; const MAX_CHUNK: usize = 2 * 1024; #[test] @@ -36,17 +37,8 @@ fn hpack_fuzz_seeded() { #[derive(Debug, Clone)] struct FuzzHpack { - // The magic seed that makes the test case reproducible - seed: [usize; 4], - // The set of headers to encode / decode frames: Vec, - - // The list of chunk sizes to do it in - chunks: Vec, - - // Number of times reduced - reduced: usize, } #[derive(Debug, Clone)] @@ -56,9 +48,9 @@ struct HeaderFrame { } impl FuzzHpack { - fn new(seed: [usize; 4]) -> FuzzHpack { + fn new(seed: [u8; 32]) -> FuzzHpack { // Seed the RNG - let mut rng = StdRng::from_seed(&seed); + let mut rng = StdRng::from_seed(seed); // Generates a bunch of source headers let mut source: Vec>> = vec![]; @@ -68,12 +60,12 @@ impl FuzzHpack { } // Actual test run headers - let num: usize = rng.gen_range(40, 500); + let num: usize = rng.gen_range(40..500); let mut frames: Vec = vec![]; let mut added = 0; - let skew: i32 = rng.gen_range(1, 5); + let skew: i32 = rng.gen_range(1..5); // Rough number of headers to add while added < num { @@ -82,24 +74,24 @@ impl FuzzHpack { headers: vec![], }; - match rng.gen_range(0, 20) { + match rng.gen_range(0..20) { 0 => { // Two resizes - let high = rng.gen_range(128, MAX_CHUNK * 2); - let low = rng.gen_range(0, high); + let high = rng.gen_range(128..MAX_CHUNK * 2); + let low = rng.gen_range(0..high); frame.resizes.extend(&[low, high]); } 1..=3 => { - frame.resizes.push(rng.gen_range(128, MAX_CHUNK * 2)); + frame.resizes.push(rng.gen_range(128..MAX_CHUNK * 2)); } _ => {} } let mut is_name_required = true; - for _ in 0..rng.gen_range(1, (num - added) + 1) { - let x: f64 = rng.gen_range(0.0, 1.0); + for _ in 0..rng.gen_range(1..(num - added) + 1) { + let x: f64 = rng.gen_range(0.0..1.0); let x = x.powi(skew); let i = (x * source.len() as f64) as usize; @@ -128,23 +120,10 @@ impl FuzzHpack { frames.push(frame); } - // Now, generate the buffer sizes used to encode - let mut chunks = vec![]; - - for _ in 0..rng.gen_range(0, 100) { - chunks.push(rng.gen_range(MIN_CHUNK, MAX_CHUNK)); - } - - FuzzHpack { - seed: seed, - frames: frames, - chunks: chunks, - reduced: 0, - } + FuzzHpack { frames } } fn run(self) { - let mut chunks = self.chunks; let frames = self.frames; let mut expect = vec![]; @@ -173,11 +152,7 @@ impl FuzzHpack { } } - let mut input = frame.headers.into_iter(); - let mut index = None; - - let mut max_chunk = chunks.pop().unwrap_or(MAX_CHUNK); - let mut buf = BytesMut::with_capacity(max_chunk); + let mut buf = BytesMut::new(); if let Some(max) = frame.resizes.iter().max() { decoder.queue_size_update(*max); @@ -188,25 +163,7 @@ impl FuzzHpack { encoder.update_max_size(*resize); } - loop { - match encoder.encode(index.take(), &mut input, &mut (&mut buf).limit(max_chunk)) { - Encode::Full => break, - Encode::Partial(i) => { - index = Some(i); - - // Decode the chunk! - decoder - .decode(&mut Cursor::new(&mut buf), |h| { - let e = expect.remove(0); - assert_eq!(h, e); - }) - .expect("partial decode"); - - max_chunk = chunks.pop().unwrap_or(MAX_CHUNK); - buf = BytesMut::with_capacity(max_chunk); - } - } - } + encoder.encode(frame.headers, &mut buf); // Decode the chunk! decoder @@ -222,31 +179,31 @@ impl FuzzHpack { } impl Arbitrary for FuzzHpack { - fn arbitrary(g: &mut G) -> Self { - FuzzHpack::new(quickcheck::Rng::gen(g)) + fn arbitrary(_: &mut Gen) -> Self { + FuzzHpack::new(thread_rng().gen()) } } fn gen_header(g: &mut StdRng) -> Header> { use http::{Method, StatusCode}; - if g.gen_weighted_bool(10) { - match g.next_u32() % 5 { + if g.gen_ratio(1, 10) { + match g.gen_range(0u32..5) { 0 => { let value = gen_string(g, 4, 20); Header::Authority(to_shared(value)) } 1 => { - let method = match g.next_u32() % 6 { + let method = match g.gen_range(0u32..6) { 0 => Method::GET, 1 => Method::POST, 2 => Method::PUT, 3 => Method::PATCH, 4 => Method::DELETE, 5 => { - let n: usize = g.gen_range(3, 7); + let n: usize = g.gen_range(3..7); let bytes: Vec = (0..n) - .map(|_| g.choose(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap().clone()) + .map(|_| *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap())) .collect(); Method::from_bytes(&bytes).unwrap() @@ -257,7 +214,7 @@ fn gen_header(g: &mut StdRng) -> Header> { Header::Method(method) } 2 => { - let value = match g.next_u32() % 2 { + let value = match g.gen_range(0u32..2) { 0 => "http", 1 => "https", _ => unreachable!(), @@ -266,7 +223,7 @@ fn gen_header(g: &mut StdRng) -> Header> { Header::Scheme(to_shared(value.to_string())) } 3 => { - let value = match g.next_u32() % 100 { + let value = match g.gen_range(0u32..100) { 0 => "/".to_string(), 1 => "/index.html".to_string(), _ => gen_string(g, 2, 20), @@ -282,14 +239,14 @@ fn gen_header(g: &mut StdRng) -> Header> { _ => unreachable!(), } } else { - let name = if g.gen_weighted_bool(10) { + let name = if g.gen_ratio(1, 10) { None } else { Some(gen_header_name(g)) }; let mut value = gen_header_value(g); - if g.gen_weighted_bool(30) { + if g.gen_ratio(1, 30) { value.set_sensitive(true); } @@ -300,84 +257,86 @@ fn gen_header(g: &mut StdRng) -> Header> { fn gen_header_name(g: &mut StdRng) -> HeaderName { use http::header; - if g.gen_weighted_bool(2) { - g.choose(&[ - header::ACCEPT, - header::ACCEPT_CHARSET, - header::ACCEPT_ENCODING, - header::ACCEPT_LANGUAGE, - header::ACCEPT_RANGES, - header::ACCESS_CONTROL_ALLOW_CREDENTIALS, - header::ACCESS_CONTROL_ALLOW_HEADERS, - header::ACCESS_CONTROL_ALLOW_METHODS, - header::ACCESS_CONTROL_ALLOW_ORIGIN, - header::ACCESS_CONTROL_EXPOSE_HEADERS, - header::ACCESS_CONTROL_MAX_AGE, - header::ACCESS_CONTROL_REQUEST_HEADERS, - header::ACCESS_CONTROL_REQUEST_METHOD, - header::AGE, - header::ALLOW, - header::ALT_SVC, - header::AUTHORIZATION, - header::CACHE_CONTROL, - header::CONNECTION, - header::CONTENT_DISPOSITION, - header::CONTENT_ENCODING, - header::CONTENT_LANGUAGE, - header::CONTENT_LENGTH, - header::CONTENT_LOCATION, - header::CONTENT_RANGE, - header::CONTENT_SECURITY_POLICY, - header::CONTENT_SECURITY_POLICY_REPORT_ONLY, - header::CONTENT_TYPE, - header::COOKIE, - header::DNT, - header::DATE, - header::ETAG, - header::EXPECT, - header::EXPIRES, - header::FORWARDED, - header::FROM, - header::HOST, - header::IF_MATCH, - header::IF_MODIFIED_SINCE, - header::IF_NONE_MATCH, - header::IF_RANGE, - header::IF_UNMODIFIED_SINCE, - header::LAST_MODIFIED, - header::LINK, - header::LOCATION, - header::MAX_FORWARDS, - header::ORIGIN, - header::PRAGMA, - header::PROXY_AUTHENTICATE, - header::PROXY_AUTHORIZATION, - header::PUBLIC_KEY_PINS, - header::PUBLIC_KEY_PINS_REPORT_ONLY, - header::RANGE, - header::REFERER, - header::REFERRER_POLICY, - header::REFRESH, - header::RETRY_AFTER, - header::SERVER, - header::SET_COOKIE, - header::STRICT_TRANSPORT_SECURITY, - header::TE, - header::TRAILER, - header::TRANSFER_ENCODING, - header::USER_AGENT, - header::UPGRADE, - header::UPGRADE_INSECURE_REQUESTS, - header::VARY, - header::VIA, - header::WARNING, - header::WWW_AUTHENTICATE, - header::X_CONTENT_TYPE_OPTIONS, - header::X_DNS_PREFETCH_CONTROL, - header::X_FRAME_OPTIONS, - header::X_XSS_PROTECTION, - ]) - .unwrap() + if g.gen_ratio(1, 2) { + g.sample( + Slice::new(&[ + header::ACCEPT, + header::ACCEPT_CHARSET, + header::ACCEPT_ENCODING, + header::ACCEPT_LANGUAGE, + header::ACCEPT_RANGES, + header::ACCESS_CONTROL_ALLOW_CREDENTIALS, + header::ACCESS_CONTROL_ALLOW_HEADERS, + header::ACCESS_CONTROL_ALLOW_METHODS, + header::ACCESS_CONTROL_ALLOW_ORIGIN, + header::ACCESS_CONTROL_EXPOSE_HEADERS, + header::ACCESS_CONTROL_MAX_AGE, + header::ACCESS_CONTROL_REQUEST_HEADERS, + header::ACCESS_CONTROL_REQUEST_METHOD, + header::AGE, + header::ALLOW, + header::ALT_SVC, + header::AUTHORIZATION, + header::CACHE_CONTROL, + header::CONNECTION, + header::CONTENT_DISPOSITION, + header::CONTENT_ENCODING, + header::CONTENT_LANGUAGE, + header::CONTENT_LENGTH, + header::CONTENT_LOCATION, + header::CONTENT_RANGE, + header::CONTENT_SECURITY_POLICY, + header::CONTENT_SECURITY_POLICY_REPORT_ONLY, + header::CONTENT_TYPE, + header::COOKIE, + header::DNT, + header::DATE, + header::ETAG, + header::EXPECT, + header::EXPIRES, + header::FORWARDED, + header::FROM, + header::HOST, + header::IF_MATCH, + header::IF_MODIFIED_SINCE, + header::IF_NONE_MATCH, + header::IF_RANGE, + header::IF_UNMODIFIED_SINCE, + header::LAST_MODIFIED, + header::LINK, + header::LOCATION, + header::MAX_FORWARDS, + header::ORIGIN, + header::PRAGMA, + header::PROXY_AUTHENTICATE, + header::PROXY_AUTHORIZATION, + header::PUBLIC_KEY_PINS, + header::PUBLIC_KEY_PINS_REPORT_ONLY, + header::RANGE, + header::REFERER, + header::REFERRER_POLICY, + header::REFRESH, + header::RETRY_AFTER, + header::SERVER, + header::SET_COOKIE, + header::STRICT_TRANSPORT_SECURITY, + header::TE, + header::TRAILER, + header::TRANSFER_ENCODING, + header::USER_AGENT, + header::UPGRADE, + header::UPGRADE_INSECURE_REQUESTS, + header::VARY, + header::VIA, + header::WARNING, + header::WWW_AUTHENTICATE, + header::X_CONTENT_TYPE_OPTIONS, + header::X_DNS_PREFETCH_CONTROL, + header::X_FRAME_OPTIONS, + header::X_XSS_PROTECTION, + ]) + .unwrap(), + ) .clone() } else { let value = gen_string(g, 1, 25); @@ -394,9 +353,7 @@ fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { let bytes: Vec<_> = (min..max) .map(|_| { // Chars to pick from - g.choose(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----") - .unwrap() - .clone() + *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----").unwrap()) }) .collect(); @@ -404,6 +361,5 @@ fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { } fn to_shared(src: String) -> crate::hpack::BytesStr { - let b: Bytes = src.into(); - unsafe { crate::hpack::BytesStr::from_utf8_unchecked(b) } + crate::hpack::BytesStr::from(src.as_str()) } diff --git a/third_party/rust/h2/src/lib.rs b/third_party/rust/h2/src/lib.rs index 8fd77b39b2a4..be42b100e0eb 100644 --- a/third_party/rust/h2/src/lib.rs +++ b/third_party/rust/h2/src/lib.rs @@ -1,6 +1,6 @@ -//! An asynchronous, HTTP/2.0 server and client implementation. +//! An asynchronous, HTTP/2 server and client implementation. //! -//! This library implements the [HTTP/2.0] specification. The implementation is +//! This library implements the [HTTP/2] specification. The implementation is //! asynchronous, using [futures] as the basis for the API. The implementation //! is also decoupled from TCP or TLS details. The user must handle ALPN and //! HTTP/1.1 upgrades themselves. @@ -11,7 +11,7 @@ //! //! ```toml //! [dependencies] -//! h2 = "0.2" +//! h2 = "0.3" //! ``` //! //! # Layout @@ -24,19 +24,19 @@ //! # Handshake //! //! Both the client and the server require a connection to already be in a state -//! ready to start the HTTP/2.0 handshake. This library does not provide +//! ready to start the HTTP/2 handshake. This library does not provide //! facilities to do this. //! -//! There are three ways to reach an appropriate state to start the HTTP/2.0 +//! There are three ways to reach an appropriate state to start the HTTP/2 //! handshake. //! //! * Opening an HTTP/1.1 connection and performing an [upgrade]. //! * Opening a connection with TLS and use ALPN to negotiate the protocol. //! * Open a connection with prior knowledge, i.e. both the client and the //! server assume that the connection is immediately ready to start the -//! HTTP/2.0 handshake once opened. +//! HTTP/2 handshake once opened. //! -//! Once the connection is ready to start the HTTP/2.0 handshake, it can be +//! Once the connection is ready to start the HTTP/2 handshake, it can be //! passed to [`server::handshake`] or [`client::handshake`]. At this point, the //! library will start the handshake process, which consists of: //! @@ -48,10 +48,10 @@ //! //! # Flow control //! -//! [Flow control] is a fundamental feature of HTTP/2.0. The `h2` library +//! [Flow control] is a fundamental feature of HTTP/2. The `h2` library //! exposes flow control to the user. //! -//! An HTTP/2.0 client or server may not send unlimited data to the peer. When a +//! An HTTP/2 client or server may not send unlimited data to the peer. When a //! stream is initiated, both the client and the server are provided with an //! initial window size for that stream. A window size is the number of bytes //! the endpoint can send to the peer. At any point in time, the peer may @@ -66,7 +66,7 @@ //! Managing flow control for outbound data is done through [`SendStream`]. See //! the struct level documentation for those two types for more details. //! -//! [HTTP/2.0]: https://http2.github.io/ +//! [HTTP/2]: https://http2.github.io/ //! [futures]: https://docs.rs/futures/ //! [`client`]: client/index.html //! [`server`]: server/index.html @@ -78,16 +78,16 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.2.5")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.13")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] macro_rules! proto_err { (conn: $($msg:tt)+) => { - log::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) + tracing::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) }; (stream: $($msg:tt)+) => { - log::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) + tracing::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) }; } @@ -104,8 +104,14 @@ macro_rules! ready { mod codec; mod error; mod hpack; + +#[cfg(not(feature = "unstable"))] mod proto; +#[cfg(feature = "unstable")] +#[allow(missing_docs)] +pub mod proto; + #[cfg(not(feature = "unstable"))] mod frame; @@ -114,19 +120,24 @@ mod frame; pub mod frame; pub mod client; +pub mod ext; pub mod server; mod share; +#[cfg(fuzzing)] +#[cfg_attr(feature = "unstable", allow(missing_docs))] +pub mod fuzz_bridge; + pub use crate::error::{Error, Reason}; pub use crate::share::{FlowControl, Ping, PingPong, Pong, RecvStream, SendStream, StreamId}; #[cfg(feature = "unstable")] -pub use codec::{Codec, RecvError, SendError, UserError}; +pub use codec::{Codec, SendError, UserError}; use std::task::Poll; // TODO: Get rid of this trait once https://github.com/rust-lang/rust/pull/63512 -// is stablized. +// is stabilized. trait PollExt { /// Changes the success value of this `Poll` with the closure provided. fn map_ok_(self, f: F) -> Poll>> diff --git a/third_party/rust/h2/src/proto/connection.rs b/third_party/rust/h2/src/proto/connection.rs index 49c123efaba0..cd011a1d583c 100644 --- a/third_party/rust/h2/src/proto/connection.rs +++ b/third_party/rust/h2/src/proto/connection.rs @@ -1,6 +1,6 @@ -use crate::codec::{RecvError, UserError}; +use crate::codec::UserError; use crate::frame::{Reason, StreamId}; -use crate::{client, frame, proto, server}; +use crate::{client, frame, server}; use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE; use crate::proto::*; @@ -17,6 +17,19 @@ use tokio::io::{AsyncRead, AsyncWrite}; /// An H2 connection #[derive(Debug)] pub(crate) struct Connection +where + P: Peer, +{ + /// Read / write frame values + codec: Codec>, + + inner: ConnectionInner, +} + +// Extracted part of `Connection` which does not depend on `T`. Reduces the amount of duplicated +// method instantiations. +#[derive(Debug)] +struct ConnectionInner where P: Peer, { @@ -27,10 +40,7 @@ where /// /// This exists separately from State in order to support /// graceful shutdown. - error: Option, - - /// Read / write frame values - codec: Codec>, + error: Option, /// Pending GOAWAY frames to write. go_away: GoAway, @@ -44,14 +54,30 @@ where /// Stream state handler streams: Streams, + /// A `tracing` span tracking the lifetime of the connection. + span: tracing::Span, + /// Client or server _phantom: PhantomData

, } +struct DynConnection<'a, B: Buf = Bytes> { + state: &'a mut State, + + go_away: &'a mut GoAway, + + streams: DynStreams<'a, B>, + + error: &'a mut Option, + + ping_pong: &'a mut PingPong, +} + #[derive(Debug, Clone)] pub(crate) struct Config { pub next_stream_id: StreamId, pub initial_max_send_streams: usize, + pub max_send_buffer_size: usize, pub reset_stream_duration: Duration, pub reset_stream_max: usize, pub settings: frame::Settings, @@ -63,10 +89,10 @@ enum State { Open, /// The codec must be flushed - Closing(Reason), + Closing(Reason, Initiator), /// In a closed state - Closed(Reason), + Closed(Reason, Initiator), } impl Connection @@ -76,58 +102,92 @@ where B: Buf, { pub fn new(codec: Codec>, config: Config) -> Connection { - let streams = Streams::new(streams::Config { - local_init_window_sz: config - .settings - .initial_window_size() - .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), - initial_max_send_streams: config.initial_max_send_streams, - local_next_stream_id: config.next_stream_id, - local_push_enabled: config.settings.is_push_enabled(), - local_reset_duration: config.reset_stream_duration, - local_reset_max: config.reset_stream_max, - remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, - remote_max_initiated: config - .settings - .max_concurrent_streams() - .map(|max| max as usize), - }); + fn streams_config(config: &Config) -> streams::Config { + streams::Config { + local_init_window_sz: config + .settings + .initial_window_size() + .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), + initial_max_send_streams: config.initial_max_send_streams, + local_max_buffer_size: config.max_send_buffer_size, + local_next_stream_id: config.next_stream_id, + local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), + extended_connect_protocol_enabled: config + .settings + .is_extended_connect_protocol_enabled() + .unwrap_or(false), + local_reset_duration: config.reset_stream_duration, + local_reset_max: config.reset_stream_max, + remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, + remote_max_initiated: config + .settings + .max_concurrent_streams() + .map(|max| max as usize), + } + } + let streams = Streams::new(streams_config(&config)); Connection { - state: State::Open, - error: None, codec, - go_away: GoAway::new(), - ping_pong: PingPong::new(), - settings: Settings::new(config.settings), - streams, - _phantom: PhantomData, + inner: ConnectionInner { + state: State::Open, + error: None, + go_away: GoAway::new(), + ping_pong: PingPong::new(), + settings: Settings::new(config.settings), + streams, + span: tracing::debug_span!("Connection", peer = %P::NAME), + _phantom: PhantomData, + }, } } /// connection flow control pub(crate) fn set_target_window_size(&mut self, size: WindowSize) { - self.streams.set_target_connection_window_size(size); + self.inner.streams.set_target_connection_window_size(size); } /// Send a new SETTINGS frame with an updated initial window size. pub(crate) fn set_initial_window_size(&mut self, size: WindowSize) -> Result<(), UserError> { let mut settings = frame::Settings::default(); settings.set_initial_window_size(Some(size)); - self.settings.send_settings(settings) + self.inner.settings.send_settings(settings) + } + + /// Send a new SETTINGS frame with extended CONNECT protocol enabled. + pub(crate) fn set_enable_connect_protocol(&mut self) -> Result<(), UserError> { + let mut settings = frame::Settings::default(); + settings.set_enable_connect_protocol(Some(1)); + self.inner.settings.send_settings(settings) + } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by this peer. + pub(crate) fn max_send_streams(&self) -> usize { + self.inner.streams.max_send_streams() + } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by the remote peer. + pub(crate) fn max_recv_streams(&self) -> usize { + self.inner.streams.max_recv_streams() } /// Returns `Ready` when the connection is ready to receive a frame. /// - /// Returns `RecvError` as this may raise errors that are caused by delayed + /// Returns `Error` as this may raise errors that are caused by delayed /// processing of received frames. - fn poll_ready(&mut self, cx: &mut Context) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context) -> Poll> { + let _e = self.inner.span.enter(); + let span = tracing::trace_span!("poll_ready"); + let _e = span.enter(); // The order of these calls don't really matter too much - ready!(self.ping_pong.send_pending_pong(cx, &mut self.codec))?; - ready!(self.ping_pong.send_pending_ping(cx, &mut self.codec))?; + ready!(self.inner.ping_pong.send_pending_pong(cx, &mut self.codec))?; + ready!(self.inner.ping_pong.send_pending_ping(cx, &mut self.codec))?; ready!(self + .inner .settings - .poll_send(cx, &mut self.codec, &mut self.streams))?; - ready!(self.streams.send_pending_refusal(cx, &mut self.codec))?; + .poll_send(cx, &mut self.codec, &mut self.inner.streams))?; + ready!(self.inner.streams.send_pending_refusal(cx, &mut self.codec))?; Poll::Ready(Ok(())) } @@ -137,50 +197,31 @@ where /// This will return `Some(reason)` if the connection should be closed /// afterwards. If this is a graceful shutdown, this returns `None`. fn poll_go_away(&mut self, cx: &mut Context) -> Poll>> { - self.go_away.send_pending_go_away(cx, &mut self.codec) - } - - fn go_away(&mut self, id: StreamId, e: Reason) { - let frame = frame::GoAway::new(id, e); - self.streams.send_go_away(id); - self.go_away.go_away(frame); - } - - fn go_away_now(&mut self, e: Reason) { - let last_processed_id = self.streams.last_processed_id(); - let frame = frame::GoAway::new(last_processed_id, e); - self.go_away.go_away_now(frame); + self.inner.go_away.send_pending_go_away(cx, &mut self.codec) } pub fn go_away_from_user(&mut self, e: Reason) { - let last_processed_id = self.streams.last_processed_id(); - let frame = frame::GoAway::new(last_processed_id, e); - self.go_away.go_away_from_user(frame); - - // Notify all streams of reason we're abruptly closing. - self.streams.recv_err(&proto::Error::Proto(e)); + self.inner.as_dyn().go_away_from_user(e) } - fn take_error(&mut self, ours: Reason) -> Poll> { - let reason = if let Some(theirs) = self.error.take() { - match (ours, theirs) { - // If either side reported an error, return that - // to the user. - (Reason::NO_ERROR, err) | (err, Reason::NO_ERROR) => err, - // If both sides reported an error, give their - // error back to th user. We assume our error - // was a consequence of their error, and less - // important. - (_, theirs) => theirs, - } - } else { - ours - }; + fn take_error(&mut self, ours: Reason, initiator: Initiator) -> Result<(), Error> { + let (debug_data, theirs) = self + .inner + .error + .take() + .as_ref() + .map_or((Bytes::new(), Reason::NO_ERROR), |frame| { + (frame.debug_data().clone(), frame.reason()) + }); - if reason == Reason::NO_ERROR { - Poll::Ready(Ok(())) - } else { - Poll::Ready(Err(proto::Error::Proto(reason))) + match (ours, theirs) { + (Reason::NO_ERROR, Reason::NO_ERROR) => return Ok(()), + (ours, Reason::NO_ERROR) => Err(Error::GoAway(Bytes::new(), ours, initiator)), + // If both sides reported an error, give their + // error back to th user. We assume our error + // was a consequence of their error, and less + // important. + (_, theirs) => Err(Error::remote_go_away(debug_data, theirs)), } } @@ -189,102 +230,71 @@ where pub fn maybe_close_connection_if_no_streams(&mut self) { // If we poll() and realize that there are no streams or references // then we can close the connection by transitioning to GOAWAY - if !self.streams.has_streams_or_other_references() { - self.go_away_now(Reason::NO_ERROR); + if !self.inner.streams.has_streams_or_other_references() { + self.inner.as_dyn().go_away_now(Reason::NO_ERROR); } } pub(crate) fn take_user_pings(&mut self) -> Option { - self.ping_pong.take_user_pings() + self.inner.ping_pong.take_user_pings() } /// Advances the internal state of the connection. - pub fn poll(&mut self, cx: &mut Context) -> Poll> { - use crate::codec::RecvError::*; + pub fn poll(&mut self, cx: &mut Context) -> Poll> { + // XXX(eliza): cloning the span is unfortunately necessary here in + // order to placate the borrow checker — `self` is mutably borrowed by + // `poll2`, which means that we can't borrow `self.span` to enter it. + // The clone is just an atomic ref bump. + let span = self.inner.span.clone(); + let _e = span.enter(); + let span = tracing::trace_span!("poll"); + let _e = span.enter(); loop { + tracing::trace!(connection.state = ?self.inner.state); // TODO: probably clean up this glob of code - match self.state { + match self.inner.state { // When open, continue to poll a frame State::Open => { - match self.poll2(cx) { - // The connection has shutdown normally - Poll::Ready(Ok(())) => self.state = State::Closing(Reason::NO_ERROR), + let result = match self.poll2(cx) { + Poll::Ready(result) => result, // The connection is not ready to make progress Poll::Pending => { // Ensure all window updates have been sent. // // This will also handle flushing `self.codec` - ready!(self.streams.poll_complete(cx, &mut self.codec))?; + ready!(self.inner.streams.poll_complete(cx, &mut self.codec))?; - if (self.error.is_some() || self.go_away.should_close_on_idle()) - && !self.streams.has_streams() + if (self.inner.error.is_some() + || self.inner.go_away.should_close_on_idle()) + && !self.inner.streams.has_streams() { - self.go_away_now(Reason::NO_ERROR); + self.inner.as_dyn().go_away_now(Reason::NO_ERROR); continue; } return Poll::Pending; } - // Attempting to read a frame resulted in a connection level - // error. This is handled by setting a GOAWAY frame followed by - // terminating the connection. - Poll::Ready(Err(Connection(e))) => { - log::debug!("Connection::poll; connection error={:?}", e); + }; - // We may have already sent a GOAWAY for this error, - // if so, don't send another, just flush and close up. - if let Some(reason) = self.go_away.going_away_reason() { - if reason == e { - log::trace!(" -> already going away"); - self.state = State::Closing(e); - continue; - } - } - - // Reset all active streams - self.streams.recv_err(&e.into()); - self.go_away_now(e); - } - // Attempting to read a frame resulted in a stream level error. - // This is handled by resetting the frame then trying to read - // another frame. - Poll::Ready(Err(Stream { id, reason })) => { - log::trace!("stream error; id={:?}; reason={:?}", id, reason); - self.streams.send_reset(id, reason); - } - // Attempting to read a frame resulted in an I/O error. All - // active streams must be reset. - // - // TODO: Are I/O errors recoverable? - Poll::Ready(Err(Io(e))) => { - log::debug!("Connection::poll; IO error={:?}", e); - let e = e.into(); - - // Reset all active streams - self.streams.recv_err(&e); - - // Return the error - return Poll::Ready(Err(e)); - } - } + self.inner.as_dyn().handle_poll2_result(result)? } - State::Closing(reason) => { - log::trace!("connection closing after flush"); + State::Closing(reason, initiator) => { + tracing::trace!("connection closing after flush"); // Flush/shutdown the codec ready!(self.codec.shutdown(cx))?; // Transition the state to error - self.state = State::Closed(reason); + self.inner.state = State::Closed(reason, initiator); + } + State::Closed(reason, initiator) => { + return Poll::Ready(self.take_error(reason, initiator)); } - State::Closed(reason) => return self.take_error(reason), } } } - fn poll2(&mut self, cx: &mut Context) -> Poll> { - use crate::frame::Frame::*; - + fn poll2(&mut self, cx: &mut Context) -> Poll> { // This happens outside of the loop to prevent needing to do a clock // check and then comparison of the queue possibly multiple times a // second (and thus, the clock wouldn't have changed enough to matter). @@ -297,13 +307,13 @@ where // - poll_go_away may buffer a graceful shutdown GOAWAY frame // - If it has, we've also added a PING to be sent in poll_ready if let Some(reason) = ready!(self.poll_go_away(cx)?) { - if self.go_away.should_close_now() { - if self.go_away.is_user_initiated() { + if self.inner.go_away.should_close_now() { + if self.inner.go_away.is_user_initiated() { // A user initiated abrupt shutdown shouldn't return // the same error back to the user. return Poll::Ready(Ok(())); } else { - return Poll::Ready(Err(RecvError::Connection(reason))); + return Poll::Ready(Err(Error::library_go_away(reason))); } } // Only NO_ERROR should be waiting for idle @@ -315,61 +325,20 @@ where } ready!(self.poll_ready(cx))?; - match ready!(Pin::new(&mut self.codec).poll_next(cx)?) { - Some(Headers(frame)) => { - log::trace!("recv HEADERS; frame={:?}", frame); - self.streams.recv_headers(frame)?; + match self + .inner + .as_dyn() + .recv_frame(ready!(Pin::new(&mut self.codec).poll_next(cx)?))? + { + ReceivedFrame::Settings(frame) => { + self.inner.settings.recv_settings( + frame, + &mut self.codec, + &mut self.inner.streams, + )?; } - Some(Data(frame)) => { - log::trace!("recv DATA; frame={:?}", frame); - self.streams.recv_data(frame)?; - } - Some(Reset(frame)) => { - log::trace!("recv RST_STREAM; frame={:?}", frame); - self.streams.recv_reset(frame)?; - } - Some(PushPromise(frame)) => { - log::trace!("recv PUSH_PROMISE; frame={:?}", frame); - self.streams.recv_push_promise(frame)?; - } - Some(Settings(frame)) => { - log::trace!("recv SETTINGS; frame={:?}", frame); - self.settings - .recv_settings(frame, &mut self.codec, &mut self.streams)?; - } - Some(GoAway(frame)) => { - log::trace!("recv GOAWAY; frame={:?}", frame); - // This should prevent starting new streams, - // but should allow continuing to process current streams - // until they are all EOS. Once they are, State should - // transition to GoAway. - self.streams.recv_go_away(&frame)?; - self.error = Some(frame.reason()); - } - Some(Ping(frame)) => { - log::trace!("recv PING; frame={:?}", frame); - let status = self.ping_pong.recv_ping(frame); - if status.is_shutdown() { - assert!( - self.go_away.is_going_away(), - "received unexpected shutdown ping" - ); - - let last_processed_id = self.streams.last_processed_id(); - self.go_away(last_processed_id, Reason::NO_ERROR); - } - } - Some(WindowUpdate(frame)) => { - log::trace!("recv WINDOW_UPDATE; frame={:?}", frame); - self.streams.recv_window_update(frame)?; - } - Some(Priority(frame)) => { - log::trace!("recv PRIORITY; frame={:?}", frame); - // TODO: handle - } - None => { - log::trace!("codec closed"); - self.streams.recv_eof(false).expect("mutex poisoned"); + ReceivedFrame::Continue => (), + ReceivedFrame::Done => { return Poll::Ready(Ok(())); } } @@ -377,17 +346,193 @@ where } fn clear_expired_reset_streams(&mut self) { - self.streams.clear_expired_reset_streams(); + self.inner.streams.clear_expired_reset_streams(); } } +impl ConnectionInner +where + P: Peer, + B: Buf, +{ + fn as_dyn(&mut self) -> DynConnection<'_, B> { + let ConnectionInner { + state, + go_away, + streams, + error, + ping_pong, + .. + } = self; + let streams = streams.as_dyn(); + DynConnection { + state, + go_away, + streams, + error, + ping_pong, + } + } +} + +impl DynConnection<'_, B> +where + B: Buf, +{ + fn go_away(&mut self, id: StreamId, e: Reason) { + let frame = frame::GoAway::new(id, e); + self.streams.send_go_away(id); + self.go_away.go_away(frame); + } + + fn go_away_now(&mut self, e: Reason) { + let last_processed_id = self.streams.last_processed_id(); + let frame = frame::GoAway::new(last_processed_id, e); + self.go_away.go_away_now(frame); + } + + fn go_away_from_user(&mut self, e: Reason) { + let last_processed_id = self.streams.last_processed_id(); + let frame = frame::GoAway::new(last_processed_id, e); + self.go_away.go_away_from_user(frame); + + // Notify all streams of reason we're abruptly closing. + self.streams.handle_error(Error::user_go_away(e)); + } + + fn handle_poll2_result(&mut self, result: Result<(), Error>) -> Result<(), Error> { + match result { + // The connection has shutdown normally + Ok(()) => { + *self.state = State::Closing(Reason::NO_ERROR, Initiator::Library); + Ok(()) + } + // Attempting to read a frame resulted in a connection level + // error. This is handled by setting a GOAWAY frame followed by + // terminating the connection. + Err(Error::GoAway(debug_data, reason, initiator)) => { + let e = Error::GoAway(debug_data, reason, initiator); + tracing::debug!(error = ?e, "Connection::poll; connection error"); + + // We may have already sent a GOAWAY for this error, + // if so, don't send another, just flush and close up. + if self + .go_away + .going_away() + .map_or(false, |frame| frame.reason() == reason) + { + tracing::trace!(" -> already going away"); + *self.state = State::Closing(reason, initiator); + return Ok(()); + } + + // Reset all active streams + self.streams.handle_error(e); + self.go_away_now(reason); + Ok(()) + } + // Attempting to read a frame resulted in a stream level error. + // This is handled by resetting the frame then trying to read + // another frame. + Err(Error::Reset(id, reason, initiator)) => { + debug_assert_eq!(initiator, Initiator::Library); + tracing::trace!(?id, ?reason, "stream error"); + self.streams.send_reset(id, reason); + Ok(()) + } + // Attempting to read a frame resulted in an I/O error. All + // active streams must be reset. + // + // TODO: Are I/O errors recoverable? + Err(Error::Io(e, inner)) => { + tracing::debug!(error = ?e, "Connection::poll; IO error"); + let e = Error::Io(e, inner); + + // Reset all active streams + self.streams.handle_error(e.clone()); + + // Return the error + Err(e) + } + } + } + + fn recv_frame(&mut self, frame: Option) -> Result { + use crate::frame::Frame::*; + match frame { + Some(Headers(frame)) => { + tracing::trace!(?frame, "recv HEADERS"); + self.streams.recv_headers(frame)?; + } + Some(Data(frame)) => { + tracing::trace!(?frame, "recv DATA"); + self.streams.recv_data(frame)?; + } + Some(Reset(frame)) => { + tracing::trace!(?frame, "recv RST_STREAM"); + self.streams.recv_reset(frame)?; + } + Some(PushPromise(frame)) => { + tracing::trace!(?frame, "recv PUSH_PROMISE"); + self.streams.recv_push_promise(frame)?; + } + Some(Settings(frame)) => { + tracing::trace!(?frame, "recv SETTINGS"); + return Ok(ReceivedFrame::Settings(frame)); + } + Some(GoAway(frame)) => { + tracing::trace!(?frame, "recv GOAWAY"); + // This should prevent starting new streams, + // but should allow continuing to process current streams + // until they are all EOS. Once they are, State should + // transition to GoAway. + self.streams.recv_go_away(&frame)?; + *self.error = Some(frame); + } + Some(Ping(frame)) => { + tracing::trace!(?frame, "recv PING"); + let status = self.ping_pong.recv_ping(frame); + if status.is_shutdown() { + assert!( + self.go_away.is_going_away(), + "received unexpected shutdown ping" + ); + + let last_processed_id = self.streams.last_processed_id(); + self.go_away(last_processed_id, Reason::NO_ERROR); + } + } + Some(WindowUpdate(frame)) => { + tracing::trace!(?frame, "recv WINDOW_UPDATE"); + self.streams.recv_window_update(frame)?; + } + Some(Priority(frame)) => { + tracing::trace!(?frame, "recv PRIORITY"); + // TODO: handle + } + None => { + tracing::trace!("codec closed"); + self.streams.recv_eof(false).expect("mutex poisoned"); + return Ok(ReceivedFrame::Done); + } + } + Ok(ReceivedFrame::Continue) + } +} + +enum ReceivedFrame { + Settings(frame::Settings), + Continue, + Done, +} + impl Connection where T: AsyncRead + AsyncWrite, B: Buf, { pub(crate) fn streams(&self) -> &Streams { - &self.streams + &self.inner.streams } } @@ -397,12 +542,12 @@ where B: Buf, { pub fn next_incoming(&mut self) -> Option> { - self.streams.next_incoming() + self.inner.streams.next_incoming() } // Graceful shutdown only makes sense for server peers. pub fn go_away_gracefully(&mut self) { - if self.go_away.is_going_away() { + if self.inner.go_away.is_going_away() { // No reason to start a new one. return; } @@ -418,11 +563,11 @@ where // > send another GOAWAY frame with an updated last stream identifier. // > This ensures that a connection can be cleanly shut down without // > losing requests. - self.go_away(StreamId::MAX, Reason::NO_ERROR); + self.inner.as_dyn().go_away(StreamId::MAX, Reason::NO_ERROR); // We take the advice of waiting 1 RTT literally, and wait // for a pong before proceeding. - self.ping_pong.ping_shutdown(); + self.inner.ping_pong.ping_shutdown(); } } @@ -433,6 +578,6 @@ where { fn drop(&mut self) { // Ignore errors as this indicates that the mutex is poisoned. - let _ = self.streams.recv_eof(true); + let _ = self.inner.streams.recv_eof(true); } } diff --git a/third_party/rust/h2/src/proto/error.rs b/third_party/rust/h2/src/proto/error.rs index c3ee20d03d02..197237263726 100644 --- a/third_party/rust/h2/src/proto/error.rs +++ b/third_party/rust/h2/src/proto/error.rs @@ -1,53 +1,87 @@ -use crate::codec::{RecvError, SendError}; -use crate::frame::Reason; +use crate::codec::SendError; +use crate::frame::{Reason, StreamId}; +use bytes::Bytes; +use std::fmt; use std::io; /// Either an H2 reason or an I/O error -#[derive(Debug)] +#[derive(Clone, Debug)] pub enum Error { - Proto(Reason), - Io(io::Error), + Reset(StreamId, Reason, Initiator), + GoAway(Bytes, Reason, Initiator), + Io(io::ErrorKind, Option), +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Initiator { + User, + Library, + Remote, } impl Error { - /// Clone the error for internal purposes. - /// - /// `io::Error` is not `Clone`, so we only copy the `ErrorKind`. - pub(super) fn shallow_clone(&self) -> Error { + pub(crate) fn is_local(&self) -> bool { match *self { - Error::Proto(reason) => Error::Proto(reason), - Error::Io(ref io) => Error::Io(io::Error::from(io.kind())), + Self::Reset(_, _, initiator) | Self::GoAway(_, _, initiator) => initiator.is_local(), + Self::Io(..) => true, + } + } + + pub(crate) fn user_go_away(reason: Reason) -> Self { + Self::GoAway(Bytes::new(), reason, Initiator::User) + } + + pub(crate) fn library_reset(stream_id: StreamId, reason: Reason) -> Self { + Self::Reset(stream_id, reason, Initiator::Library) + } + + pub(crate) fn library_go_away(reason: Reason) -> Self { + Self::GoAway(Bytes::new(), reason, Initiator::Library) + } + + pub(crate) fn remote_reset(stream_id: StreamId, reason: Reason) -> Self { + Self::Reset(stream_id, reason, Initiator::Remote) + } + + pub(crate) fn remote_go_away(debug_data: Bytes, reason: Reason) -> Self { + Self::GoAway(debug_data, reason, Initiator::Remote) + } +} + +impl Initiator { + fn is_local(&self) -> bool { + match *self { + Self::User | Self::Library => true, + Self::Remote => false, } } } -impl From for Error { - fn from(src: Reason) -> Self { - Error::Proto(src) +impl fmt::Display for Error { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + Self::Reset(_, reason, _) | Self::GoAway(_, reason, _) => reason.fmt(fmt), + Self::Io(_, Some(ref inner)) => inner.fmt(fmt), + Self::Io(kind, None) => io::Error::from(kind).fmt(fmt), + } + } +} + +impl From for Error { + fn from(src: io::ErrorKind) -> Self { + Error::Io(src.into(), None) } } impl From for Error { fn from(src: io::Error) -> Self { - Error::Io(src) - } -} - -impl From for RecvError { - fn from(src: Error) -> RecvError { - match src { - Error::Proto(reason) => RecvError::Connection(reason), - Error::Io(e) => RecvError::Io(e), - } + Error::Io(src.kind(), src.get_ref().map(|inner| inner.to_string())) } } impl From for SendError { - fn from(src: Error) -> SendError { - match src { - Error::Proto(reason) => SendError::Connection(reason), - Error::Io(e) => SendError::Io(e), - } + fn from(src: Error) -> Self { + Self::Connection(src) } } diff --git a/third_party/rust/h2/src/proto/go_away.rs b/third_party/rust/h2/src/proto/go_away.rs index 91d37b642a1b..759427878481 100644 --- a/third_party/rust/h2/src/proto/go_away.rs +++ b/third_party/rust/h2/src/proto/go_away.rs @@ -31,7 +31,7 @@ pub(super) struct GoAway { /// well, and we wouldn't want to save that here to accidentally dump in logs, /// or waste struct space.) #[derive(Debug)] -struct GoingAway { +pub(crate) struct GoingAway { /// Stores the highest stream ID of a GOAWAY that has been sent. /// /// It's illegal to send a subsequent GOAWAY with a higher ID. @@ -98,9 +98,9 @@ impl GoAway { self.is_user_initiated } - /// Return the last Reason we've sent. - pub fn going_away_reason(&self) -> Option { - self.going_away.as_ref().map(|g| g.reason) + /// Returns the going away info, if any. + pub fn going_away(&self) -> Option<&GoingAway> { + self.going_away.as_ref() } /// Returns if the connection should close now, or wait until idle. @@ -141,7 +141,7 @@ impl GoAway { return Poll::Ready(Some(Ok(reason))); } else if self.should_close_now() { - return match self.going_away_reason() { + return match self.going_away().map(|going_away| going_away.reason) { Some(reason) => Poll::Ready(Some(Ok(reason))), None => Poll::Ready(None), }; @@ -150,3 +150,9 @@ impl GoAway { Poll::Ready(None) } } + +impl GoingAway { + pub(crate) fn reason(&self) -> Reason { + self.reason + } +} diff --git a/third_party/rust/h2/src/proto/mod.rs b/third_party/rust/h2/src/proto/mod.rs index f9e068b58352..5ec7bf992a85 100644 --- a/third_party/rust/h2/src/proto/mod.rs +++ b/third_party/rust/h2/src/proto/mod.rs @@ -7,10 +7,10 @@ mod settings; mod streams; pub(crate) use self::connection::{Config, Connection}; -pub(crate) use self::error::Error; +pub use self::error::{Error, Initiator}; pub(crate) use self::peer::{Dyn as DynPeer, Peer}; pub(crate) use self::ping_pong::UserPings; -pub(crate) use self::streams::{OpaqueStreamRef, StreamRef, Streams}; +pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams}; pub(crate) use self::streams::{Open, PollReset, Prioritized}; use crate::codec::Codec; @@ -33,3 +33,4 @@ pub type WindowSize = u32; pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; +pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400; diff --git a/third_party/rust/h2/src/proto/peer.rs b/third_party/rust/h2/src/proto/peer.rs index 8d327fbfc607..d62d9e24e0ac 100644 --- a/third_party/rust/h2/src/proto/peer.rs +++ b/third_party/rust/h2/src/proto/peer.rs @@ -1,7 +1,6 @@ -use crate::codec::RecvError; use crate::error::Reason; use crate::frame::{Pseudo, StreamId}; -use crate::proto::Open; +use crate::proto::{Error, Open}; use http::{HeaderMap, Request, Response}; @@ -11,6 +10,7 @@ use std::fmt; pub(crate) trait Peer { /// Message type polled from the transport type Poll: fmt::Debug; + const NAME: &'static str; fn r#dyn() -> Dyn; @@ -20,7 +20,7 @@ pub(crate) trait Peer { pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId, - ) -> Result; + ) -> Result; fn is_local_init(id: StreamId) -> bool { assert!(!id.is_zero()); @@ -60,7 +60,7 @@ impl Dyn { pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId, - ) -> Result { + ) -> Result { if self.is_server() { crate::server::Peer::convert_poll_message(pseudo, fields, stream_id) .map(PollMessage::Server) @@ -71,12 +71,12 @@ impl Dyn { } /// Returns true if the remote peer can initiate a stream with the given ID. - pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), RecvError> { + pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), Error> { if self.is_server() { // Ensure that the ID is a valid client initiated ID if mode.is_push_promise() || !id.is_client_initiated() { proto_err!(conn: "cannot open stream {:?} - not client initiated", id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } Ok(()) @@ -84,7 +84,7 @@ impl Dyn { // Ensure that the ID is a valid server initiated ID if !mode.is_push_promise() || !id.is_server_initiated() { proto_err!(conn: "cannot open stream {:?} - not server initiated", id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } Ok(()) diff --git a/third_party/rust/h2/src/proto/ping_pong.rs b/third_party/rust/h2/src/proto/ping_pong.rs index 0022d4a5ba74..844c5fbb915a 100644 --- a/third_party/rust/h2/src/proto/ping_pong.rs +++ b/third_party/rust/h2/src/proto/ping_pong.rs @@ -107,7 +107,7 @@ impl PingPong { &Ping::SHUTDOWN, "pending_ping should be for shutdown", ); - log::trace!("recv PING SHUTDOWN ack"); + tracing::trace!("recv PING SHUTDOWN ack"); return ReceivedPing::Shutdown; } @@ -117,7 +117,7 @@ impl PingPong { if let Some(ref users) = self.user_pings { if ping.payload() == &Ping::USER && users.receive_pong() { - log::trace!("recv PING USER ack"); + tracing::trace!("recv PING USER ack"); return ReceivedPing::Unknown; } } @@ -125,7 +125,7 @@ impl PingPong { // else we were acked a ping we didn't send? // The spec doesn't require us to do anything about this, // so for resiliency, just ignore it for now. - log::warn!("recv PING ack that we never sent: {:?}", ping); + tracing::warn!("recv PING ack that we never sent: {:?}", ping); ReceivedPing::Unknown } else { // Save the ping's payload to be sent as an acknowledgement. @@ -211,11 +211,16 @@ impl ReceivedPing { impl UserPings { pub(crate) fn send_ping(&self) -> Result<(), Option> { - let prev = self.0.state.compare_and_swap( - USER_STATE_EMPTY, // current - USER_STATE_PENDING_PING, // new - Ordering::AcqRel, - ); + let prev = self + .0 + .state + .compare_exchange( + USER_STATE_EMPTY, // current + USER_STATE_PENDING_PING, // new + Ordering::AcqRel, + Ordering::Acquire, + ) + .unwrap_or_else(|v| v); match prev { USER_STATE_EMPTY => { @@ -234,11 +239,16 @@ impl UserPings { // Must register before checking state, in case state were to change // before we could register, and then the ping would just be lost. self.0.pong_task.register(cx.waker()); - let prev = self.0.state.compare_and_swap( - USER_STATE_RECEIVED_PONG, // current - USER_STATE_EMPTY, // new - Ordering::AcqRel, - ); + let prev = self + .0 + .state + .compare_exchange( + USER_STATE_RECEIVED_PONG, // current + USER_STATE_EMPTY, // new + Ordering::AcqRel, + Ordering::Acquire, + ) + .unwrap_or_else(|v| v); match prev { USER_STATE_RECEIVED_PONG => Poll::Ready(Ok(())), @@ -252,11 +262,16 @@ impl UserPings { impl UserPingsRx { fn receive_pong(&self) -> bool { - let prev = self.0.state.compare_and_swap( - USER_STATE_PENDING_PONG, // current - USER_STATE_RECEIVED_PONG, // new - Ordering::AcqRel, - ); + let prev = self + .0 + .state + .compare_exchange( + USER_STATE_PENDING_PONG, // current + USER_STATE_RECEIVED_PONG, // new + Ordering::AcqRel, + Ordering::Acquire, + ) + .unwrap_or_else(|v| v); if prev == USER_STATE_PENDING_PONG { self.0.pong_task.wake(); diff --git a/third_party/rust/h2/src/proto/settings.rs b/third_party/rust/h2/src/proto/settings.rs index b1d91e65264a..6cc6172099c5 100644 --- a/third_party/rust/h2/src/proto/settings.rs +++ b/third_party/rust/h2/src/proto/settings.rs @@ -1,4 +1,4 @@ -use crate::codec::{RecvError, UserError}; +use crate::codec::UserError; use crate::error::Reason; use crate::frame; use crate::proto::*; @@ -40,7 +40,7 @@ impl Settings { frame: frame::Settings, codec: &mut Codec, streams: &mut Streams, - ) -> Result<(), RecvError> + ) -> Result<(), Error> where T: AsyncWrite + Unpin, B: Buf, @@ -50,7 +50,7 @@ impl Settings { if frame.is_ack() { match &self.local { Local::WaitingAck(local) => { - log::debug!("received settings ACK; applying {:?}", local); + tracing::debug!("received settings ACK; applying {:?}", local); if let Some(max) = local.max_frame_size() { codec.set_max_recv_frame_size(max as usize); @@ -68,7 +68,7 @@ impl Settings { // We haven't sent any SETTINGS frames to be ACKed, so // this is very bizarre! Remote is either buggy or malicious. proto_err!(conn: "received unexpected settings ack"); - Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } } else { @@ -85,7 +85,7 @@ impl Settings { match &self.local { Local::ToSend(..) | Local::WaitingAck(..) => Err(UserError::SendSettingsWhilePending), Local::Synced => { - log::trace!("queue to send local settings: {:?}", frame); + tracing::trace!("queue to send local settings: {:?}", frame); self.local = Local::ToSend(frame); Ok(()) } @@ -97,7 +97,7 @@ impl Settings { cx: &mut Context, dst: &mut Codec, streams: &mut Streams, - ) -> Poll> + ) -> Poll> where T: AsyncWrite + Unpin, B: Buf, @@ -115,7 +115,9 @@ impl Settings { // Buffer the settings frame dst.buffer(frame.into()).expect("invalid settings frame"); - log::trace!("ACK sent; applying settings"); + tracing::trace!("ACK sent; applying settings"); + + streams.apply_remote_settings(settings)?; if let Some(val) = settings.header_table_size() { dst.set_send_header_table_size(val as usize); @@ -124,8 +126,6 @@ impl Settings { if let Some(val) = settings.max_frame_size() { dst.set_max_send_frame_size(val as usize); } - - streams.apply_remote_settings(settings)?; } self.remote = None; @@ -139,7 +139,7 @@ impl Settings { // Buffer the settings frame dst.buffer(settings.clone().into()) .expect("invalid settings frame"); - log::trace!("local settings sent; waiting for ack: {:?}", settings); + tracing::trace!("local settings sent; waiting for ack: {:?}", settings); self.local = Local::WaitingAck(settings.clone()); } diff --git a/third_party/rust/h2/src/proto/streams/buffer.rs b/third_party/rust/h2/src/proto/streams/buffer.rs index 652f2eda1934..2648a410e442 100644 --- a/third_party/rust/h2/src/proto/streams/buffer.rs +++ b/third_party/rust/h2/src/proto/streams/buffer.rs @@ -92,13 +92,4 @@ impl Deque { None => None, } } - - /* - pub fn peek_front<'a, T>(&self, buf: &'a Buffer) -> Option<&'a T> { - match self.indices { - Some(idxs) => Some(&buf.slab[idxs.head].value), - None => None, - } - } - */ } diff --git a/third_party/rust/h2/src/proto/streams/counts.rs b/third_party/rust/h2/src/proto/streams/counts.rs index bcd07e814dad..70dfc7851d66 100644 --- a/third_party/rust/h2/src/proto/streams/counts.rs +++ b/third_party/rust/h2/src/proto/streams/counts.rs @@ -133,7 +133,7 @@ impl Counts { // TODO: move this to macro? pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) { - log::trace!( + tracing::trace!( "transition_after; stream={:?}; state={:?}; is_closed={:?}; \ pending_send_empty={:?}; buffered_send_data={}; \ num_recv={}; num_send={}", @@ -155,7 +155,7 @@ impl Counts { } if stream.is_counted { - log::trace!("dec_num_streams; stream={:?}", stream.id); + tracing::trace!("dec_num_streams; stream={:?}", stream.id); // Decrement the number of active streams. self.dec_num_streams(&mut stream); } @@ -167,6 +167,18 @@ impl Counts { } } + /// Returns the maximum number of streams that can be initiated by this + /// peer. + pub(crate) fn max_send_streams(&self) -> usize { + self.max_send_streams + } + + /// Returns the maximum number of streams that can be initiated by the + /// remote peer. + pub(crate) fn max_recv_streams(&self) -> usize { + self.max_recv_streams + } + fn dec_num_streams(&mut self, stream: &mut store::Ptr) { assert!(stream.is_counted); diff --git a/third_party/rust/h2/src/proto/streams/flow_control.rs b/third_party/rust/h2/src/proto/streams/flow_control.rs index f3cea169961b..4a47f08ddf63 100644 --- a/third_party/rust/h2/src/proto/streams/flow_control.rs +++ b/third_party/rust/h2/src/proto/streams/flow_control.rs @@ -120,7 +120,7 @@ impl FlowControl { return Err(Reason::FLOW_CONTROL_ERROR); } - log::trace!( + tracing::trace!( "inc_window; sz={}; old={}; new={}", sz, self.window_size, @@ -136,7 +136,7 @@ impl FlowControl { /// This is called after receiving a SETTINGS frame with a lower /// INITIAL_WINDOW_SIZE value. pub fn dec_send_window(&mut self, sz: WindowSize) { - log::trace!( + tracing::trace!( "dec_window; sz={}; window={}, available={}", sz, self.window_size, @@ -151,7 +151,7 @@ impl FlowControl { /// This is called after receiving a SETTINGS ACK frame with a lower /// INITIAL_WINDOW_SIZE value. pub fn dec_recv_window(&mut self, sz: WindowSize) { - log::trace!( + tracing::trace!( "dec_recv_window; sz={}; window={}, available={}", sz, self.window_size, @@ -165,7 +165,7 @@ impl FlowControl { /// Decrements the window reflecting data has actually been sent. The caller /// must ensure that the window has capacity. pub fn send_data(&mut self, sz: WindowSize) { - log::trace!( + tracing::trace!( "send_data; sz={}; window={}; available={}", sz, self.window_size, @@ -173,7 +173,7 @@ impl FlowControl { ); // Ensure that the argument is correct - assert!(sz <= self.window_size); + assert!(self.window_size >= sz as usize); // Update values self.window_size -= sz; @@ -206,38 +206,22 @@ impl Window { } } -impl PartialEq for Window { - fn eq(&self, other: &WindowSize) -> bool { +impl PartialEq for Window { + fn eq(&self, other: &usize) -> bool { if self.0 < 0 { false } else { - (self.0 as WindowSize).eq(other) + (self.0 as usize).eq(other) } } } -impl PartialEq for WindowSize { - fn eq(&self, other: &Window) -> bool { - other.eq(self) - } -} - -impl PartialOrd for Window { - fn partial_cmp(&self, other: &WindowSize) -> Option<::std::cmp::Ordering> { +impl PartialOrd for Window { + fn partial_cmp(&self, other: &usize) -> Option<::std::cmp::Ordering> { if self.0 < 0 { Some(::std::cmp::Ordering::Less) } else { - (self.0 as WindowSize).partial_cmp(other) - } - } -} - -impl PartialOrd for WindowSize { - fn partial_cmp(&self, other: &Window) -> Option<::std::cmp::Ordering> { - if other.0 < 0 { - Some(::std::cmp::Ordering::Greater) - } else { - self.partial_cmp(&(other.0 as WindowSize)) + (self.0 as usize).partial_cmp(other) } } } diff --git a/third_party/rust/h2/src/proto/streams/mod.rs b/third_party/rust/h2/src/proto/streams/mod.rs index 508d9a1e39e1..de2a2c85a0c1 100644 --- a/third_party/rust/h2/src/proto/streams/mod.rs +++ b/third_party/rust/h2/src/proto/streams/mod.rs @@ -12,7 +12,7 @@ mod streams; pub(crate) use self::prioritize::Prioritized; pub(crate) use self::recv::Open; pub(crate) use self::send::PollReset; -pub(crate) use self::streams::{OpaqueStreamRef, StreamRef, Streams}; +pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams}; use self::buffer::Buffer; use self::counts::Counts; @@ -41,12 +41,18 @@ pub struct Config { /// MAX_CONCURRENT_STREAMS specified in the frame. pub initial_max_send_streams: usize, + /// Max amount of DATA bytes to buffer per stream. + pub local_max_buffer_size: usize, + /// The stream ID to start the next local stream with pub local_next_stream_id: StreamId, /// If the local peer is willing to receive push promises pub local_push_enabled: bool, + /// If extended connect protocol is enabled. + pub extended_connect_protocol_enabled: bool, + /// How long a locally reset stream should ignore frames pub local_reset_duration: Duration, diff --git a/third_party/rust/h2/src/proto/streams/prioritize.rs b/third_party/rust/h2/src/proto/streams/prioritize.rs index a13393282b75..c2904aca9b38 100644 --- a/third_party/rust/h2/src/proto/streams/prioritize.rs +++ b/third_party/rust/h2/src/proto/streams/prioritize.rs @@ -6,7 +6,7 @@ use crate::frame::{Reason, StreamId}; use crate::codec::UserError; use crate::codec::UserError::*; -use bytes::buf::ext::{BufExt, Take}; +use bytes::buf::{Buf, Take}; use std::io; use std::task::{Context, Poll, Waker}; use std::{cmp, fmt, mem}; @@ -18,7 +18,7 @@ use std::{cmp, fmt, mem}; /// This is because "idle" stream IDs – those which have been initiated but /// have yet to receive frames – will be implicitly closed on receipt of a /// frame on a higher stream ID. If these queues was not ordered by stream -/// IDs, some mechanism would be necessary to ensure that the lowest-numberedh] +/// IDs, some mechanism would be necessary to ensure that the lowest-numbered] /// idle stream is opened first. #[derive(Debug)] pub(super) struct Prioritize { @@ -51,6 +51,9 @@ pub(super) struct Prioritize { /// What `DATA` frame is currently being sent in the codec. in_flight_data_frame: InFlightData, + + /// The maximum amount of bytes a stream should buffer. + max_buffer_size: usize, } #[derive(Debug, Eq, PartialEq)] @@ -84,7 +87,7 @@ impl Prioritize { flow.assign_capacity(config.remote_init_window_sz); - log::trace!("Prioritize::new; flow={:?}", flow); + tracing::trace!("Prioritize::new; flow={:?}", flow); Prioritize { pending_send: store::Queue::new(), @@ -93,9 +96,14 @@ impl Prioritize { flow, last_opened_id: StreamId::ZERO, in_flight_data_frame: InFlightData::Nothing, + max_buffer_size: config.local_max_buffer_size, } } + pub(crate) fn max_buffer_size(&self) -> usize { + self.max_buffer_size + } + /// Queue a frame to be sent to the remote pub fn queue_frame( &mut self, @@ -104,6 +112,8 @@ impl Prioritize { stream: &mut store::Ptr, task: &mut Option, ) { + let span = tracing::trace_span!("Prioritize::queue_frame", ?stream.id); + let _e = span.enter(); // Queue the frame in the buffer stream.pending_send.push_back(buffer, frame); self.schedule_send(stream, task); @@ -112,7 +122,7 @@ impl Prioritize { pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option) { // If the stream is waiting to be opened, nothing more to do. if stream.is_send_ready() { - log::trace!("schedule_send; {:?}", stream.id); + tracing::trace!(?stream.id, "schedule_send"); // Queue the stream self.pending_send.push(stream); @@ -156,20 +166,19 @@ impl Prioritize { } // Update the buffered data counter - stream.buffered_send_data += sz; + stream.buffered_send_data += sz as usize; - log::trace!( - "send_data; sz={}; buffered={}; requested={}", - sz, - stream.buffered_send_data, - stream.requested_send_capacity - ); + let span = + tracing::trace_span!("send_data", sz, requested = stream.requested_send_capacity); + let _e = span.enter(); + tracing::trace!(buffered = stream.buffered_send_data); // Implicitly request more send capacity if not enough has been // requested yet. - if stream.requested_send_capacity < stream.buffered_send_data { + if (stream.requested_send_capacity as usize) < stream.buffered_send_data { // Update the target requested capacity - stream.requested_send_capacity = stream.buffered_send_data; + stream.requested_send_capacity = + cmp::min(stream.buffered_send_data, WindowSize::MAX as usize) as WindowSize; self.try_assign_capacity(stream); } @@ -179,10 +188,9 @@ impl Prioritize { self.reserve_capacity(0, stream, counts); } - log::trace!( - "send_data (2); available={}; buffered={}", - stream.send_flow.available(), - stream.buffered_send_data + tracing::trace!( + available = %stream.send_flow.available(), + buffered = stream.buffered_send_data, ); // The `stream.buffered_send_data == 0` check is here so that, if a zero @@ -214,31 +222,32 @@ impl Prioritize { stream: &mut store::Ptr, counts: &mut Counts, ) { - log::trace!( - "reserve_capacity; stream={:?}; requested={:?}; effective={:?}; curr={:?}", - stream.id, - capacity, - capacity + stream.buffered_send_data, - stream.requested_send_capacity + let span = tracing::trace_span!( + "reserve_capacity", + ?stream.id, + requested = capacity, + effective = (capacity as usize) + stream.buffered_send_data, + curr = stream.requested_send_capacity ); + let _e = span.enter(); // Actual capacity is `capacity` + the current amount of buffered data. // If it were less, then we could never send out the buffered data. - let capacity = capacity + stream.buffered_send_data; + let capacity = (capacity as usize) + stream.buffered_send_data; - if capacity == stream.requested_send_capacity { + if capacity == stream.requested_send_capacity as usize { // Nothing to do - } else if capacity < stream.requested_send_capacity { + } else if capacity < stream.requested_send_capacity as usize { // Update the target requested capacity - stream.requested_send_capacity = capacity; + stream.requested_send_capacity = capacity as WindowSize; // Currently available capacity assigned to the stream let available = stream.send_flow.available().as_size(); // If the stream has more assigned capacity than requested, reclaim // some for the connection - if available > capacity { - let diff = available - capacity; + if available as usize > capacity { + let diff = available - capacity as WindowSize; stream.send_flow.claim_capacity(diff); @@ -252,7 +261,8 @@ impl Prioritize { } // Update the target requested capacity - stream.requested_send_capacity = capacity; + stream.requested_send_capacity = + cmp::min(capacity, WindowSize::MAX as usize) as WindowSize; // Try to assign additional capacity to the stream. If none is // currently available, the stream will be queued to receive some @@ -266,13 +276,14 @@ impl Prioritize { inc: WindowSize, stream: &mut store::Ptr, ) -> Result<(), Reason> { - log::trace!( - "recv_stream_window_update; stream={:?}; state={:?}; inc={}; flow={:?}", - stream.id, - stream.state, + let span = tracing::trace_span!( + "recv_stream_window_update", + ?stream.id, + ?stream.state, inc, - stream.send_flow + flow = ?stream.send_flow ); + let _e = span.enter(); if stream.state.is_send_closed() && stream.buffered_send_data == 0 { // We can't send any data, so don't bother doing anything else. @@ -315,8 +326,8 @@ impl Prioritize { /// it to the connection pub fn reclaim_reserved_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { // only reclaim requested capacity that isn't already buffered - if stream.requested_send_capacity > stream.buffered_send_data { - let reserved = stream.requested_send_capacity - stream.buffered_send_data; + if stream.requested_send_capacity as usize > stream.buffered_send_data { + let reserved = stream.requested_send_capacity - stream.buffered_send_data as WindowSize; stream.send_flow.claim_capacity(reserved); self.assign_connection_capacity(reserved, stream, counts); @@ -324,9 +335,11 @@ impl Prioritize { } pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) { + let span = tracing::trace_span!("clear_pending_capacity"); + let _e = span.enter(); while let Some(stream) = self.pending_capacity.pop(store) { counts.transition(stream, |_, stream| { - log::trace!("clear_pending_capacity; stream={:?}", stream.id); + tracing::trace!(?stream.id, "clear_pending_capacity"); }) } } @@ -339,7 +352,8 @@ impl Prioritize { ) where R: Resolve, { - log::trace!("assign_connection_capacity; inc={}", inc); + let span = tracing::trace_span!("assign_connection_capacity", inc); + let _e = span.enter(); self.flow.assign_capacity(inc); @@ -373,7 +387,7 @@ impl Prioritize { // Total requested should never go below actual assigned // (Note: the window size can go lower than assigned) - debug_assert!(total_requested >= stream.send_flow.available()); + debug_assert!(stream.send_flow.available() <= total_requested as usize); // The amount of additional capacity that the stream requests. // Don't assign more than the window has available! @@ -382,15 +396,14 @@ impl Prioritize { // Can't assign more than what is available stream.send_flow.window_size() - stream.send_flow.available().as_size(), ); - - log::trace!( - "try_assign_capacity; stream={:?}, requested={}; additional={}; buffered={}; window={}; conn={}", - stream.id, - total_requested, + let span = tracing::trace_span!("try_assign_capacity", ?stream.id); + let _e = span.enter(); + tracing::trace!( + requested = total_requested, additional, - stream.buffered_send_data, - stream.send_flow.window_size(), - self.flow.available() + buffered = stream.buffered_send_data, + window = stream.send_flow.window_size(), + conn = %self.flow.available() ); if additional == 0 { @@ -416,24 +429,23 @@ impl Prioritize { // TODO: Should prioritization factor into this? let assign = cmp::min(conn_available, additional); - log::trace!(" assigning; stream={:?}, capacity={}", stream.id, assign,); + tracing::trace!(capacity = assign, "assigning"); // Assign the capacity to the stream - stream.assign_capacity(assign); + stream.assign_capacity(assign, self.max_buffer_size); // Claim the capacity from the connection self.flow.claim_capacity(assign); } - log::trace!( - "try_assign_capacity(2); available={}; requested={}; buffered={}; has_unavailable={:?}", - stream.send_flow.available(), - stream.requested_send_capacity, - stream.buffered_send_data, - stream.send_flow.has_unavailable() + tracing::trace!( + available = %stream.send_flow.available(), + requested = stream.requested_send_capacity, + buffered = stream.buffered_send_data, + has_unavailable = %stream.send_flow.has_unavailable() ); - if stream.send_flow.available() < stream.requested_send_capacity + if stream.send_flow.available() < stream.requested_send_capacity as usize && stream.send_flow.has_unavailable() { // The stream requires additional capacity and the stream's @@ -485,14 +497,14 @@ impl Prioritize { // The max frame length let max_frame_len = dst.max_send_frame_size(); - log::trace!("poll_complete"); + tracing::trace!("poll_complete"); loop { self.schedule_pending_open(store, counts); match self.pop_frame(buffer, store, max_frame_len, counts) { Some(frame) => { - log::trace!("writing frame={:?}", frame); + tracing::trace!(?frame, "writing"); debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing); if let Frame::Data(ref frame) = frame { @@ -538,47 +550,62 @@ impl Prioritize { where B: Buf, { - log::trace!("try reclaim frame"); + let span = tracing::trace_span!("try_reclaim_frame"); + let _e = span.enter(); // First check if there are any data chunks to take back if let Some(frame) = dst.take_last_data_frame() { - log::trace!( - " -> reclaimed; frame={:?}; sz={}", - frame, - frame.payload().inner.get_ref().remaining() - ); + self.reclaim_frame_inner(buffer, store, frame) + } else { + false + } + } - let mut eos = false; - let key = frame.payload().stream; + fn reclaim_frame_inner( + &mut self, + buffer: &mut Buffer>, + store: &mut Store, + frame: frame::Data>, + ) -> bool + where + B: Buf, + { + tracing::trace!( + ?frame, + sz = frame.payload().inner.get_ref().remaining(), + "reclaimed" + ); - match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { - InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), - InFlightData::Drop => { - log::trace!("not reclaiming frame for cancelled stream"); - return false; - } - InFlightData::DataFrame(k) => { - debug_assert_eq!(k, key); - } + let mut eos = false; + let key = frame.payload().stream; + + match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { + InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), + InFlightData::Drop => { + tracing::trace!("not reclaiming frame for cancelled stream"); + return false; + } + InFlightData::DataFrame(k) => { + debug_assert_eq!(k, key); + } + } + + let mut frame = frame.map(|prioritized| { + // TODO: Ensure fully written + eos = prioritized.end_of_stream; + prioritized.inner.into_inner() + }); + + if frame.payload().has_remaining() { + let mut stream = store.resolve(key); + + if eos { + frame.set_end_stream(true); } - let mut frame = frame.map(|prioritized| { - // TODO: Ensure fully written - eos = prioritized.end_of_stream; - prioritized.inner.into_inner() - }); + self.push_back_frame(frame.into(), buffer, &mut stream); - if frame.payload().has_remaining() { - let mut stream = store.resolve(key); - - if eos { - frame.set_end_stream(true); - } - - self.push_back_frame(frame.into(), buffer, &mut stream); - - return true; - } + return true; } false @@ -603,11 +630,12 @@ impl Prioritize { } pub fn clear_queue(&mut self, buffer: &mut Buffer>, stream: &mut store::Ptr) { - log::trace!("clear_queue; stream={:?}", stream.id); + let span = tracing::trace_span!("clear_queue", ?stream.id); + let _e = span.enter(); // TODO: make this more efficient? while let Some(frame) = stream.pending_send.pop_front(buffer) { - log::trace!("dropping; frame={:?}", frame); + tracing::trace!(?frame, "dropping"); } stream.buffered_send_data = 0; @@ -644,16 +672,14 @@ impl Prioritize { where B: Buf, { - log::trace!("pop_frame"); + let span = tracing::trace_span!("pop_frame"); + let _e = span.enter(); loop { match self.pending_send.pop(store) { Some(mut stream) => { - log::trace!( - "pop_frame; stream={:?}; stream.state={:?}", - stream.id, - stream.state - ); + let span = tracing::trace_span!("popped", ?stream.id, ?stream.state); + let _e = span.enter(); // It's possible that this stream, besides having data to send, // is also queued to send a reset, and thus is already in the queue @@ -662,11 +688,7 @@ impl Prioritize { // To be safe, we just always ask the stream. let is_pending_reset = stream.is_pending_reset_expiration(); - log::trace!( - " --> stream={:?}; is_pending_reset={:?};", - stream.id, - is_pending_reset - ); + tracing::trace!(is_pending_reset); let frame = match stream.pending_send.pop_front(buffer) { Some(Frame::Data(mut frame)) => { @@ -675,25 +697,20 @@ impl Prioritize { let stream_capacity = stream.send_flow.available(); let sz = frame.payload().remaining(); - log::trace!( - " --> data frame; stream={:?}; sz={}; eos={:?}; window={}; \ - available={}; requested={}; buffered={};", - frame.stream_id(), + tracing::trace!( sz, - frame.is_end_stream(), - stream_capacity, - stream.send_flow.available(), - stream.requested_send_capacity, - stream.buffered_send_data, + eos = frame.is_end_stream(), + window = %stream_capacity, + available = %stream.send_flow.available(), + requested = stream.requested_send_capacity, + buffered = stream.buffered_send_data, + "data frame" ); // Zero length data frames always have capacity to // be sent. if sz > 0 && stream_capacity == 0 { - log::trace!( - " --> stream capacity is 0; requested={}", - stream.requested_send_capacity - ); + tracing::trace!("stream capacity is 0"); // Ensure that the stream is waiting for // connection level capacity @@ -721,34 +738,43 @@ impl Prioritize { // capacity at this point. debug_assert!(len <= self.flow.window_size()); - log::trace!(" --> sending data frame; len={}", len); + tracing::trace!(len, "sending data frame"); // Update the flow control - log::trace!(" -- updating stream flow --"); - stream.send_flow.send_data(len); + tracing::trace_span!("updating stream flow").in_scope(|| { + stream.send_flow.send_data(len); - // Decrement the stream's buffered data counter - debug_assert!(stream.buffered_send_data >= len); - stream.buffered_send_data -= len; - stream.requested_send_capacity -= len; + // Decrement the stream's buffered data counter + debug_assert!(stream.buffered_send_data >= len as usize); + stream.buffered_send_data -= len as usize; + stream.requested_send_capacity -= len; - // Assign the capacity back to the connection that - // was just consumed from the stream in the previous - // line. - self.flow.assign_capacity(len); + // If the capacity was limited because of the + // max_send_buffer_size, then consider waking + // the send task again... + stream.notify_if_can_buffer_more(self.max_buffer_size); - log::trace!(" -- updating connection flow --"); - self.flow.send_data(len); + // Assign the capacity back to the connection that + // was just consumed from the stream in the previous + // line. + self.flow.assign_capacity(len); + }); - // Wrap the frame's data payload to ensure that the - // correct amount of data gets written. + let (eos, len) = tracing::trace_span!("updating connection flow") + .in_scope(|| { + self.flow.send_data(len); - let eos = frame.is_end_stream(); - let len = len as usize; + // Wrap the frame's data payload to ensure that the + // correct amount of data gets written. - if frame.payload().remaining() > len { - frame.set_end_stream(false); - } + let eos = frame.is_end_stream(); + let len = len as usize; + + if frame.payload().remaining() > len { + frame.set_end_stream(false); + } + (eos, len) + }); Frame::Data(frame.map(|buf| Prioritized { inner: buf.take(len), @@ -780,7 +806,10 @@ impl Prioritize { }), None => { if let Some(reason) = stream.state.get_scheduled_reset() { - stream.state.set_reset(reason); + let stream_id = stream.id; + stream + .state + .set_reset(stream_id, reason, Initiator::Library); let frame = frame::Reset::new(stream.id, reason); Frame::Reset(frame) @@ -789,7 +818,7 @@ impl Prioritize { // had data buffered to be sent, but all the frames are cleared // in clear_queue(). Instead of doing O(N) traversal through queue // to remove, lets just ignore the stream here. - log::trace!("removing dangling stream from pending_send"); + tracing::trace!("removing dangling stream from pending_send"); // Since this should only happen as a consequence of `clear_queue`, // we must be in a closed state of some kind. debug_assert!(stream.state.is_closed()); @@ -799,7 +828,7 @@ impl Prioritize { } }; - log::trace!("pop_frame; frame={:?}", frame); + tracing::trace!("pop_frame; frame={:?}", frame); if cfg!(debug_assertions) && stream.state.is_idle() { debug_assert!(stream.id > self.last_opened_id); @@ -824,11 +853,11 @@ impl Prioritize { } fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { - log::trace!("schedule_pending_open"); + tracing::trace!("schedule_pending_open"); // check for any pending open streams while counts.can_inc_num_send_streams() { if let Some(mut stream) = self.pending_open.pop(store) { - log::trace!("schedule_pending_open; stream={:?}", stream.id); + tracing::trace!("schedule_pending_open; stream={:?}", stream.id); counts.inc_num_send_streams(&mut stream); self.pending_send.push(&mut stream); @@ -850,8 +879,12 @@ where self.inner.remaining() } - fn bytes(&self) -> &[u8] { - self.inner.bytes() + fn chunk(&self) -> &[u8] { + self.inner.chunk() + } + + fn chunks_vectored<'a>(&'a self, dst: &mut [std::io::IoSlice<'a>]) -> usize { + self.inner.chunks_vectored(dst) } fn advance(&mut self, cnt: usize) { diff --git a/third_party/rust/h2/src/proto/streams/recv.rs b/third_party/rust/h2/src/proto/streams/recv.rs index f0e23a4ad9b8..3af1af3a1d80 100644 --- a/third_party/rust/h2/src/proto/streams/recv.rs +++ b/third_party/rust/h2/src/proto/streams/recv.rs @@ -1,7 +1,7 @@ use super::*; -use crate::codec::{RecvError, UserError}; -use crate::frame::{PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; -use crate::{frame, proto}; +use crate::codec::UserError; +use crate::frame::{self, PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; +use crate::proto::{self, Error}; use std::task::Context; use http::{HeaderMap, Request, Response}; @@ -54,8 +54,11 @@ pub(super) struct Recv { /// Refused StreamId, this represents a frame that must be sent out. refused: Option, - /// If push promises are allowed to be recevied. + /// If push promises are allowed to be received. is_push_enabled: bool, + + /// If extended connect protocol is enabled. + is_extended_connect_protocol_enabled: bool, } #[derive(Debug)] @@ -68,7 +71,7 @@ pub(super) enum Event { #[derive(Debug)] pub(super) enum RecvHeaderBlockError { Oversize(T), - State(RecvError), + State(Error), } #[derive(Debug)] @@ -77,12 +80,6 @@ pub(crate) enum Open { Headers, } -#[derive(Debug, Clone, Copy)] -struct Indices { - head: store::Key, - tail: store::Key, -} - impl Recv { pub fn new(peer: peer::Dyn, config: &Config) -> Self { let next_stream_id = if peer.is_server() { 1 } else { 2 }; @@ -109,6 +106,7 @@ impl Recv { buffer: Buffer::new(), refused: None, is_push_enabled: config.local_push_enabled, + is_extended_connect_protocol_enabled: config.extended_connect_protocol_enabled, } } @@ -130,7 +128,7 @@ impl Recv { id: StreamId, mode: Open, counts: &mut Counts, - ) -> Result, RecvError> { + ) -> Result, Error> { assert!(self.refused.is_none()); counts.peer().ensure_can_open(id, mode)?; @@ -138,7 +136,7 @@ impl Recv { let next_id = self.next_stream_id()?; if id < next_id { proto_err!(conn: "id ({:?}) < next_id ({:?})", id, next_id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } self.next_stream_id = id.next_id(); @@ -160,8 +158,8 @@ impl Recv { stream: &mut store::Ptr, counts: &mut Counts, ) -> Result<(), RecvHeaderBlockError>> { - log::trace!("opening stream; init_window={}", self.init_window_sz); - let is_initial = stream.state.recv_open(frame.is_end_stream())?; + tracing::trace!("opening stream; init_window={}", self.init_window_sz); + let is_initial = stream.state.recv_open(&frame)?; if is_initial { // TODO: be smarter about this logic @@ -182,11 +180,7 @@ impl Recv { Ok(v) => v, Err(()) => { proto_err!(stream: "could not parse content-length; stream={:?}", stream.id); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - } - .into()); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); } }; @@ -206,7 +200,7 @@ impl Recv { // So, if peer is a server, we'll send a 431. In either case, // an error is recorded, which will send a REFUSED_STREAM, // since we don't want any of the data frames either. - log::debug!( + tracing::debug!( "stream error REQUEST_HEADER_FIELDS_TOO_LARGE -- \ recv_headers: frame is over size; stream={:?}", stream.id @@ -226,15 +220,25 @@ impl Recv { let stream_id = frame.stream_id(); let (pseudo, fields) = frame.into_parts(); - let message = counts - .peer() - .convert_poll_message(pseudo, fields, stream_id)?; - // Push the frame onto the stream's recv buffer - stream - .pending_recv - .push_back(&mut self.buffer, Event::Headers(message)); - stream.notify_recv(); + if pseudo.protocol.is_some() { + if counts.peer().is_server() && !self.is_extended_connect_protocol_enabled { + proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); + } + } + + if !pseudo.is_informational() { + let message = counts + .peer() + .convert_poll_message(pseudo, fields, stream_id)?; + + // Push the frame onto the stream's recv buffer + stream + .pending_recv + .push_back(&mut self.buffer, Event::Headers(message)); + stream.notify_recv(); + } // Only servers can receive a headers frame that initiates the stream. // This is verified in `Streams` before calling this function. @@ -316,16 +320,13 @@ impl Recv { &mut self, frame: frame::Headers, stream: &mut store::Ptr, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { // Transition the state stream.state.recv_close()?; if stream.ensure_content_length_zero().is_err() { proto_err!(stream: "recv_trailers: content-length is not zero; stream={:?};", stream.id); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); } let trailers = frame.into_fields(); @@ -341,7 +342,7 @@ impl Recv { /// Releases capacity of the connection pub fn release_connection_capacity(&mut self, capacity: WindowSize, task: &mut Option) { - log::trace!( + tracing::trace!( "release_connection_capacity; size={}, connection in_flight_data={}", capacity, self.in_flight_data, @@ -367,7 +368,7 @@ impl Recv { stream: &mut store::Ptr, task: &mut Option, ) -> Result<(), UserError> { - log::trace!("release_capacity; size={}", capacity); + tracing::trace!("release_capacity; size={}", capacity); if capacity > stream.in_flight_recv_data { return Err(UserError::ReleaseCapacityTooBig); @@ -401,7 +402,7 @@ impl Recv { return; } - log::trace!( + tracing::trace!( "auto-release closed stream ({:?}) capacity: {:?}", stream.id, stream.in_flight_recv_data, @@ -426,7 +427,7 @@ impl Recv { /// The `task` is an optional parked task for the `Connection` that might /// be blocked on needing more window capacity. pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option) { - log::trace!( + tracing::trace!( "set_target_connection_window; target={}; available={}, reserved={}", target, self.flow.available(), @@ -459,61 +460,59 @@ impl Recv { &mut self, settings: &frame::Settings, store: &mut Store, - ) -> Result<(), RecvError> { - let target = if let Some(val) = settings.initial_window_size() { - val - } else { - return Ok(()); - }; - - let old_sz = self.init_window_sz; - self.init_window_sz = target; - - log::trace!("update_initial_window_size; new={}; old={}", target, old_sz,); - - // Per RFC 7540 §6.9.2: - // - // In addition to changing the flow-control window for streams that are - // not yet active, a SETTINGS frame can alter the initial flow-control - // window size for streams with active flow-control windows (that is, - // streams in the "open" or "half-closed (remote)" state). When the - // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust - // the size of all stream flow-control windows that it maintains by the - // difference between the new value and the old value. - // - // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available - // space in a flow-control window to become negative. A sender MUST - // track the negative flow-control window and MUST NOT send new - // flow-controlled frames until it receives WINDOW_UPDATE frames that - // cause the flow-control window to become positive. - - if target < old_sz { - // We must decrease the (local) window on every open stream. - let dec = old_sz - target; - log::trace!("decrementing all windows; dec={}", dec); - - store.for_each(|mut stream| { - stream.recv_flow.dec_recv_window(dec); - Ok(()) - }) - } else if target > old_sz { - // We must increase the (local) window on every open stream. - let inc = target - old_sz; - log::trace!("incrementing all windows; inc={}", inc); - store.for_each(|mut stream| { - // XXX: Shouldn't the peer have already noticed our - // overflow and sent us a GOAWAY? - stream - .recv_flow - .inc_window(inc) - .map_err(RecvError::Connection)?; - stream.recv_flow.assign_capacity(inc); - Ok(()) - }) - } else { - // size is the same... so do nothing - Ok(()) + ) -> Result<(), proto::Error> { + if let Some(val) = settings.is_extended_connect_protocol_enabled() { + self.is_extended_connect_protocol_enabled = val; } + + if let Some(target) = settings.initial_window_size() { + let old_sz = self.init_window_sz; + self.init_window_sz = target; + + tracing::trace!("update_initial_window_size; new={}; old={}", target, old_sz,); + + // Per RFC 7540 §6.9.2: + // + // In addition to changing the flow-control window for streams that are + // not yet active, a SETTINGS frame can alter the initial flow-control + // window size for streams with active flow-control windows (that is, + // streams in the "open" or "half-closed (remote)" state). When the + // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust + // the size of all stream flow-control windows that it maintains by the + // difference between the new value and the old value. + // + // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available + // space in a flow-control window to become negative. A sender MUST + // track the negative flow-control window and MUST NOT send new + // flow-controlled frames until it receives WINDOW_UPDATE frames that + // cause the flow-control window to become positive. + + if target < old_sz { + // We must decrease the (local) window on every open stream. + let dec = old_sz - target; + tracing::trace!("decrementing all windows; dec={}", dec); + + store.for_each(|mut stream| { + stream.recv_flow.dec_recv_window(dec); + }) + } else if target > old_sz { + // We must increase the (local) window on every open stream. + let inc = target - old_sz; + tracing::trace!("incrementing all windows; inc={}", inc); + store.try_for_each(|mut stream| { + // XXX: Shouldn't the peer have already noticed our + // overflow and sent us a GOAWAY? + stream + .recv_flow + .inc_window(inc) + .map_err(proto::Error::library_go_away)?; + stream.recv_flow.assign_capacity(inc); + Ok::<_, proto::Error>(()) + })?; + } + } + + Ok(()) } pub fn is_end_stream(&self, stream: &store::Ptr) -> bool { @@ -524,11 +523,7 @@ impl Recv { stream.pending_recv.is_empty() } - pub fn recv_data( - &mut self, - frame: frame::Data, - stream: &mut store::Ptr, - ) -> Result<(), RecvError> { + pub fn recv_data(&mut self, frame: frame::Data, stream: &mut store::Ptr) -> Result<(), Error> { let sz = frame.payload().len(); // This should have been enforced at the codec::FramedRead layer, so @@ -546,10 +541,10 @@ impl Recv { // Receiving a DATA frame when not expecting one is a protocol // error. proto_err!(conn: "unexpected DATA frame; stream={:?}", stream.id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } - log::trace!( + tracing::trace!( "recv_data; size={}; connection={}; stream={}", sz, self.flow.window_size(), @@ -557,11 +552,11 @@ impl Recv { ); if is_ignoring_frame { - log::trace!( + tracing::trace!( "recv_data; frame ignored on locally reset {:?} for some time", stream.id, ); - return self.ignore_data(sz); + return Ok(self.ignore_data(sz)?); } // Ensure that there is enough capacity on the connection before acting @@ -577,10 +572,7 @@ impl Recv { // So, for violating the **stream** window, we can send either a // stream or connection error. We've opted to send a stream // error. - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::FLOW_CONTROL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::FLOW_CONTROL_ERROR)); } if stream.dec_content_length(frame.payload().len()).is_err() { @@ -589,10 +581,7 @@ impl Recv { stream.id, frame.payload().len(), ); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); } if frame.is_end_stream() { @@ -602,15 +591,12 @@ impl Recv { stream.id, frame.payload().len(), ); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); } if stream.state.recv_close().is_err() { proto_err!(conn: "recv_data: failed to transition to closed state; stream={:?}", stream.id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); } } @@ -629,7 +615,7 @@ impl Recv { Ok(()) } - pub fn ignore_data(&mut self, sz: WindowSize) -> Result<(), RecvError> { + pub fn ignore_data(&mut self, sz: WindowSize) -> Result<(), Error> { // Ensure that there is enough capacity on the connection... self.consume_connection_window(sz)?; @@ -645,14 +631,14 @@ impl Recv { Ok(()) } - pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), RecvError> { + pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), Error> { if self.flow.window_size() < sz { - log::debug!( + tracing::debug!( "connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});", self.flow.window_size(), sz, ); - return Err(RecvError::Connection(Reason::FLOW_CONTROL_ERROR)); + return Err(Error::library_go_away(Reason::FLOW_CONTROL_ERROR)); } // Update connection level flow control @@ -667,7 +653,7 @@ impl Recv { &mut self, frame: frame::PushPromise, stream: &mut store::Ptr, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { stream.state.reserve_remote()?; if frame.is_over_size() { // A frame is over size if the decoded header block was bigger than @@ -681,15 +667,15 @@ impl Recv { // So, if peer is a server, we'll send a 431. In either case, // an error is recorded, which will send a REFUSED_STREAM, // since we don't want any of the data frames either. - log::debug!( + tracing::debug!( "stream error REFUSED_STREAM -- recv_push_promise: \ headers frame is over size; promised_id={:?};", frame.promised_id(), ); - return Err(RecvError::Stream { - id: frame.promised_id(), - reason: Reason::REFUSED_STREAM, - }); + return Err(Error::library_reset( + frame.promised_id(), + Reason::REFUSED_STREAM, + )); } let promised_id = frame.promised_id(); @@ -712,10 +698,7 @@ impl Recv { promised_id, ), } - return Err(RecvError::Stream { - id: promised_id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(promised_id, Reason::PROTOCOL_ERROR)); } use super::peer::PollMessage::*; @@ -730,7 +713,7 @@ impl Recv { pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { if let Ok(next) = self.next_stream_id { if id >= next { - log::debug!( + tracing::debug!( "stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}", id ); @@ -745,18 +728,16 @@ impl Recv { /// Handle remote sending an explicit RST_STREAM. pub fn recv_reset(&mut self, frame: frame::Reset, stream: &mut Stream) { // Notify the stream - stream - .state - .recv_reset(frame.reason(), stream.is_pending_send); + stream.state.recv_reset(frame, stream.is_pending_send); stream.notify_send(); stream.notify_recv(); } - /// Handle a received error - pub fn recv_err(&mut self, err: &proto::Error, stream: &mut Stream) { + /// Handle a connection-level error + pub fn handle_error(&mut self, err: &proto::Error, stream: &mut Stream) { // Receive an error - stream.state.recv_err(err); + stream.state.handle_error(err); // If a receiver is waiting, notify it stream.notify_send(); @@ -787,11 +768,11 @@ impl Recv { self.max_stream_id } - pub fn next_stream_id(&self) -> Result { + pub fn next_stream_id(&self) -> Result { if let Ok(id) = self.next_stream_id { Ok(id) } else { - Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } @@ -805,11 +786,21 @@ impl Recv { } } + pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) { + if let Ok(next_id) = self.next_stream_id { + // !Peer::is_local_init should have been called beforehand + debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated()); + if id >= next_id { + self.next_stream_id = id.next_id(); + } + } + } + /// Returns true if the remote peer can reserve a stream with the given ID. - pub fn ensure_can_reserve(&self) -> Result<(), RecvError> { + pub fn ensure_can_reserve(&self) -> Result<(), Error> { if !self.is_push_enabled { proto_err!(conn: "recv_push_promise: push is disabled"); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } Ok(()) @@ -821,7 +812,7 @@ impl Recv { return; } - log::trace!("enqueue_reset_expiration; {:?}", stream.id); + tracing::trace!("enqueue_reset_expiration; {:?}", stream.id); if !counts.can_inc_num_reset_streams() { // try to evict 1 stream if possible @@ -864,13 +855,18 @@ impl Recv { } pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { - let now = Instant::now(); - let reset_duration = self.reset_duration; - while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| { - let reset_at = stream.reset_at.expect("reset_at must be set if in queue"); - now - reset_at > reset_duration - }) { - counts.transition_after(stream, true); + if !self.pending_reset_expired.is_empty() { + let now = Instant::now(); + let reset_duration = self.reset_duration; + while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| { + let reset_at = stream.reset_at.expect("reset_at must be set if in queue"); + // rust-lang/rust#86470 tracks a bug in the standard library where `Instant` + // subtraction can panic (because, on some platforms, `Instant` isn't actually + // monotonic). We use a saturating operation to avoid this panic here. + now.saturating_duration_since(reset_at) > reset_duration + }) { + counts.transition_after(stream, true); + } } } @@ -891,7 +887,7 @@ impl Recv { fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_window_updates.pop(store) { counts.transition(stream, |_, stream| { - log::trace!("clear_stream_window_update_queue; stream={:?}", stream.id); + tracing::trace!("clear_stream_window_update_queue; stream={:?}", stream.id); }) } } @@ -981,7 +977,7 @@ impl Recv { }; counts.transition(stream, |_, stream| { - log::trace!("pending_window_updates -- pop; stream={:?}", stream.id); + tracing::trace!("pending_window_updates -- pop; stream={:?}", stream.id); debug_assert!(!stream.is_pending_window_update); if !stream.state.is_recv_streaming() { @@ -1094,8 +1090,8 @@ impl Open { // ===== impl RecvHeaderBlockError ===== -impl From for RecvHeaderBlockError { - fn from(err: RecvError) -> Self { +impl From for RecvHeaderBlockError { + fn from(err: Error) -> Self { RecvHeaderBlockError::State(err) } } diff --git a/third_party/rust/h2/src/proto/streams/send.rs b/third_party/rust/h2/src/proto/streams/send.rs index 4d38593ecb41..2c5a38c80150 100644 --- a/third_party/rust/h2/src/proto/streams/send.rs +++ b/third_party/rust/h2/src/proto/streams/send.rs @@ -2,8 +2,9 @@ use super::{ store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId, StreamIdOverflow, WindowSize, }; -use crate::codec::{RecvError, UserError}; +use crate::codec::UserError; use crate::frame::{self, Reason}; +use crate::proto::{Error, Initiator}; use bytes::Buf; use http; @@ -32,6 +33,11 @@ pub(super) struct Send { /// Prioritization layer prioritize: Prioritize, + + is_push_enabled: bool, + + /// If extended connect protocol is enabled. + is_extended_connect_protocol_enabled: bool, } /// A value to detect which public API has called `poll_reset`. @@ -49,6 +55,8 @@ impl Send { max_stream_id: StreamId::MAX, next_stream_id: Ok(config.local_next_stream_id), prioritize: Prioritize::new(config), + is_push_enabled: true, + is_extended_connect_protocol_enabled: false, } } @@ -77,11 +85,11 @@ impl Send { || fields.contains_key("keep-alive") || fields.contains_key("proxy-connection") { - log::debug!("illegal connection-specific headers found"); + tracing::debug!("illegal connection-specific headers found"); return Err(UserError::MalformedHeaders); } else if let Some(te) = fields.get(http::header::TE) { if te != "trailers" { - log::debug!("illegal connection-specific headers found"); + tracing::debug!("illegal connection-specific headers found"); return Err(UserError::MalformedHeaders); } } @@ -95,7 +103,11 @@ impl Send { stream: &mut store::Ptr, task: &mut Option, ) -> Result<(), UserError> { - log::trace!( + if !self.is_push_enabled { + return Err(UserError::PeerDisabledServerPush); + } + + tracing::trace!( "send_push_promise; frame={:?}; init_window={:?}", frame, self.init_window_sz @@ -118,7 +130,7 @@ impl Send { counts: &mut Counts, task: &mut Option, ) -> Result<(), UserError> { - log::trace!( + tracing::trace!( "send_headers; frame={:?}; init_window={:?}", frame, self.init_window_sz @@ -126,10 +138,6 @@ impl Send { Self::check_headers(frame.fields())?; - if frame.has_too_big_field() { - return Err(UserError::HeaderTooBig); - } - let end_stream = frame.is_end_stream(); // Update the state @@ -158,6 +166,7 @@ impl Send { pub fn send_reset( &mut self, reason: Reason, + initiator: Initiator, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, @@ -166,14 +175,16 @@ impl Send { let is_reset = stream.state.is_reset(); let is_closed = stream.state.is_closed(); let is_empty = stream.pending_send.is_empty(); + let stream_id = stream.id; - log::trace!( - "send_reset(..., reason={:?}, stream={:?}, ..., \ + tracing::trace!( + "send_reset(..., reason={:?}, initiator={:?}, stream={:?}, ..., \ is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \ state={:?} \ ", reason, - stream.id, + initiator, + stream_id, is_reset, is_closed, is_empty, @@ -182,23 +193,23 @@ impl Send { if is_reset { // Don't double reset - log::trace!( + tracing::trace!( " -> not sending RST_STREAM ({:?} is already reset)", - stream.id + stream_id ); return; } // Transition the state to reset no matter what. - stream.state.set_reset(reason); + stream.state.set_reset(stream_id, reason, initiator); // If closed AND the send queue is flushed, then the stream cannot be // reset explicitly, either. Implicit resets can still be queued. if is_closed && is_empty { - log::trace!( + tracing::trace!( " -> not sending explicit RST_STREAM ({:?} was closed \ and send queue was flushed)", - stream.id + stream_id ); return; } @@ -211,7 +222,7 @@ impl Send { let frame = frame::Reset::new(stream.id, reason); - log::trace!("send_reset -- queueing; frame={:?}", frame); + tracing::trace!("send_reset -- queueing; frame={:?}", frame); self.prioritize .queue_frame(frame.into(), buffer, stream, task); self.prioritize.reclaim_all_capacity(stream, counts); @@ -263,13 +274,9 @@ impl Send { return Err(UserError::UnexpectedFrameType); } - if frame.has_too_big_field() { - return Err(UserError::HeaderTooBig); - } - stream.state.send_close(); - log::trace!("send_trailers -- queuing; frame={:?}", frame); + tracing::trace!("send_trailers -- queuing; frame={:?}", frame); self.prioritize .queue_frame(frame.into(), buffer, stream, task); @@ -326,14 +333,12 @@ impl Send { /// Current available stream send capacity pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize { - let available = stream.send_flow.available().as_size(); + let available = stream.send_flow.available().as_size() as usize; let buffered = stream.buffered_send_data; - if available <= buffered { - 0 - } else { - available - buffered - } + available + .min(self.prioritize.max_buffer_size()) + .saturating_sub(buffered) as WindowSize } pub fn poll_reset( @@ -370,9 +375,16 @@ impl Send { task: &mut Option, ) -> Result<(), Reason> { if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) { - log::debug!("recv_stream_window_update !!; err={:?}", e); + tracing::debug!("recv_stream_window_update !!; err={:?}", e); - self.send_reset(Reason::FLOW_CONTROL_ERROR, buffer, stream, counts, task); + self.send_reset( + Reason::FLOW_CONTROL_ERROR, + Initiator::Library, + buffer, + stream, + counts, + task, + ); return Err(e); } @@ -380,7 +392,7 @@ impl Send { Ok(()) } - pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), RecvError> { + pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), Error> { if last_stream_id > self.max_stream_id { // The remote endpoint sent a `GOAWAY` frame indicating a stream // that we never sent, or that we have already terminated on account @@ -393,14 +405,14 @@ impl Send { "recv_go_away: last_stream_id ({:?}) > max_stream_id ({:?})", last_stream_id, self.max_stream_id, ); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } self.max_stream_id = last_stream_id; Ok(()) } - pub fn recv_err( + pub fn handle_error( &mut self, buffer: &mut Buffer>, stream: &mut store::Ptr, @@ -418,7 +430,11 @@ impl Send { store: &mut Store, counts: &mut Counts, task: &mut Option, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { + if let Some(val) = settings.is_extended_connect_protocol_enabled() { + self.is_extended_connect_protocol_enabled = val; + } + // Applies an update to the remote endpoint's initial window size. // // Per RFC 7540 §6.9.2: @@ -443,7 +459,7 @@ impl Send { if val < old_val { // We must decrease the (remote) window on every open stream. let dec = old_val - val; - log::trace!("decrementing all windows; dec={}", dec); + tracing::trace!("decrementing all windows; dec={}", dec); let mut total_reclaimed = 0; store.for_each(|mut stream| { @@ -469,7 +485,7 @@ impl Send { 0 }; - log::trace!( + tracing::trace!( "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", stream.id, dec, @@ -480,22 +496,24 @@ impl Send { // TODO: Should this notify the producer when the capacity // of a stream is reduced? Maybe it should if the capacity // is reduced to zero, allowing the producer to stop work. - - Ok::<_, RecvError>(()) - })?; + }); self.prioritize .assign_connection_capacity(total_reclaimed, store, counts); } else if val > old_val { let inc = val - old_val; - store.for_each(|mut stream| { + store.try_for_each(|mut stream| { self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) - .map_err(RecvError::Connection) + .map_err(Error::library_go_away) })?; } } + if let Some(val) = settings.is_push_enabled() { + self.is_push_enabled = val + } + Ok(()) } @@ -530,4 +548,18 @@ impl Send { true } } + + pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) { + if let Ok(next_id) = self.next_stream_id { + // Peer::is_local_init should have been called beforehand + debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated()); + if id >= next_id { + self.next_stream_id = id.next_id(); + } + } + } + + pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { + self.is_extended_connect_protocol_enabled + } } diff --git a/third_party/rust/h2/src/proto/streams/state.rs b/third_party/rust/h2/src/proto/streams/state.rs index 26323124db48..9931d41b1cdf 100644 --- a/third_party/rust/h2/src/proto/streams/state.rs +++ b/third_party/rust/h2/src/proto/streams/state.rs @@ -1,9 +1,8 @@ use std::io; -use crate::codec::UserError::*; -use crate::codec::{RecvError, UserError}; -use crate::frame::Reason; -use crate::proto::{self, PollReset}; +use crate::codec::UserError; +use crate::frame::{self, Reason, StreamId}; +use crate::proto::{self, Error, Initiator, PollReset}; use self::Inner::*; use self::Peer::*; @@ -53,7 +52,7 @@ pub struct State { inner: Inner, } -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] enum Inner { Idle, // TODO: these states shouldn't count against concurrency limits: @@ -71,12 +70,10 @@ enum Peer { Streaming, } -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone)] enum Cause { EndStream, - Proto(Reason), - LocallyReset(Reason), - Io, + Error(Error), /// This indicates to the connection that a reset frame must be sent out /// once the send queue has been flushed. @@ -85,7 +82,7 @@ enum Cause { /// - User drops all references to a stream, so we want to CANCEL the it. /// - Header block size was too large, so we want to REFUSE, possibly /// after sending a 431 response frame. - Scheduled(Reason), + ScheduledLibraryReset(Reason), } impl State { @@ -123,7 +120,7 @@ impl State { } _ => { // All other transitions result in a protocol error - return Err(UnexpectedFrameType); + return Err(UserError::UnexpectedFrameType); } }; @@ -133,9 +130,9 @@ impl State { /// Opens the receive-half of the stream when a HEADERS frame is received. /// /// Returns true if this transitions the state to Open. - pub fn recv_open(&mut self, eos: bool) -> Result { - let remote = Streaming; + pub fn recv_open(&mut self, frame: &frame::Headers) -> Result { let mut initial = false; + let eos = frame.is_end_stream(); self.inner = match self.inner { Idle => { @@ -146,7 +143,12 @@ impl State { } else { Open { local: AwaitingHeaders, - remote, + remote: if frame.is_informational() { + tracing::trace!("skipping 1xx response headers"); + AwaitingHeaders + } else { + Streaming + }, } } } @@ -155,6 +157,9 @@ impl State { if eos { Closed(Cause::EndStream) + } else if frame.is_informational() { + tracing::trace!("skipping 1xx response headers"); + ReservedRemote } else { HalfClosedLocal(Streaming) } @@ -166,20 +171,31 @@ impl State { if eos { HalfClosedRemote(local) } else { - Open { local, remote } + Open { + local, + remote: if frame.is_informational() { + tracing::trace!("skipping 1xx response headers"); + AwaitingHeaders + } else { + Streaming + }, + } } } HalfClosedLocal(AwaitingHeaders) => { if eos { Closed(Cause::EndStream) + } else if frame.is_informational() { + tracing::trace!("skipping 1xx response headers"); + HalfClosedLocal(AwaitingHeaders) } else { - HalfClosedLocal(remote) + HalfClosedLocal(Streaming) } } - state => { + ref state => { // All other transitions result in a protocol error proto_err!(conn: "recv_open: in unexpected state {:?}", state); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; @@ -187,15 +203,15 @@ impl State { } /// Transition from Idle -> ReservedRemote - pub fn reserve_remote(&mut self) -> Result<(), RecvError> { + pub fn reserve_remote(&mut self) -> Result<(), Error> { match self.inner { Idle => { self.inner = ReservedRemote; Ok(()) } - state => { + ref state => { proto_err!(conn: "reserve_remote: in unexpected state {:?}", state); - Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } } @@ -212,22 +228,22 @@ impl State { } /// Indicates that the remote side will not send more data to the local. - pub fn recv_close(&mut self) -> Result<(), RecvError> { + pub fn recv_close(&mut self) -> Result<(), Error> { match self.inner { Open { local, .. } => { // The remote side will continue to receive data. - log::trace!("recv_close: Open => HalfClosedRemote({:?})", local); + tracing::trace!("recv_close: Open => HalfClosedRemote({:?})", local); self.inner = HalfClosedRemote(local); Ok(()) } HalfClosedLocal(..) => { - log::trace!("recv_close: HalfClosedLocal => Closed"); + tracing::trace!("recv_close: HalfClosedLocal => Closed"); self.inner = Closed(Cause::EndStream); Ok(()) } - state => { + ref state => { proto_err!(conn: "recv_close: in unexpected state {:?}", state); - Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } } @@ -235,9 +251,9 @@ impl State { /// The remote explicitly sent a RST_STREAM. /// /// # Arguments - /// - `reason`: the reason field of the received RST_STREAM frame. + /// - `frame`: the received RST_STREAM frame. /// - `queued`: true if this stream has frames in the pending send queue. - pub fn recv_reset(&mut self, reason: Reason, queued: bool) { + pub fn recv_reset(&mut self, frame: frame::Reset, queued: bool) { match self.inner { // If the stream is already in a `Closed` state, do nothing, // provided that there are no frames still in the send queue. @@ -256,30 +272,28 @@ impl State { // In either of these cases, we want to overwrite the stream's // previous state with the received RST_STREAM, so that the queue // will be cleared by `Prioritize::pop_frame`. - state => { - log::trace!( - "recv_reset; reason={:?}; state={:?}; queued={:?}", - reason, + ref state => { + tracing::trace!( + "recv_reset; frame={:?}; state={:?}; queued={:?}", + frame, state, queued ); - self.inner = Closed(Cause::Proto(reason)); + self.inner = Closed(Cause::Error(Error::remote_reset( + frame.stream_id(), + frame.reason(), + ))); } } } - /// We noticed a protocol error. - pub fn recv_err(&mut self, err: &proto::Error) { - use crate::proto::Error::*; - + /// Handle a connection-level error. + pub fn handle_error(&mut self, err: &proto::Error) { match self.inner { Closed(..) => {} _ => { - log::trace!("recv_err; err={:?}", err); - self.inner = Closed(match *err { - Proto(reason) => Cause::LocallyReset(reason), - Io(..) => Cause::Io, - }); + tracing::trace!("handle_error; err={:?}", err); + self.inner = Closed(Cause::Error(err.clone())); } } } @@ -287,9 +301,9 @@ impl State { pub fn recv_eof(&mut self) { match self.inner { Closed(..) => {} - s => { - log::trace!("recv_eof; state={:?}", s); - self.inner = Closed(Cause::Io); + ref state => { + tracing::trace!("recv_eof; state={:?}", state); + self.inner = Closed(Cause::Error(io::ErrorKind::BrokenPipe.into())); } } } @@ -299,46 +313,46 @@ impl State { match self.inner { Open { remote, .. } => { // The remote side will continue to receive data. - log::trace!("send_close: Open => HalfClosedLocal({:?})", remote); + tracing::trace!("send_close: Open => HalfClosedLocal({:?})", remote); self.inner = HalfClosedLocal(remote); } HalfClosedRemote(..) => { - log::trace!("send_close: HalfClosedRemote => Closed"); + tracing::trace!("send_close: HalfClosedRemote => Closed"); self.inner = Closed(Cause::EndStream); } - state => panic!("send_close: unexpected state {:?}", state), + ref state => panic!("send_close: unexpected state {:?}", state), } } /// Set the stream state to reset locally. - pub fn set_reset(&mut self, reason: Reason) { - self.inner = Closed(Cause::LocallyReset(reason)); + pub fn set_reset(&mut self, stream_id: StreamId, reason: Reason, initiator: Initiator) { + self.inner = Closed(Cause::Error(Error::Reset(stream_id, reason, initiator))); } /// Set the stream state to a scheduled reset. pub fn set_scheduled_reset(&mut self, reason: Reason) { debug_assert!(!self.is_closed()); - self.inner = Closed(Cause::Scheduled(reason)); + self.inner = Closed(Cause::ScheduledLibraryReset(reason)); } pub fn get_scheduled_reset(&self) -> Option { match self.inner { - Closed(Cause::Scheduled(reason)) => Some(reason), + Closed(Cause::ScheduledLibraryReset(reason)) => Some(reason), _ => None, } } pub fn is_scheduled_reset(&self) -> bool { match self.inner { - Closed(Cause::Scheduled(..)) => true, + Closed(Cause::ScheduledLibraryReset(..)) => true, _ => false, } } pub fn is_local_reset(&self) -> bool { match self.inner { - Closed(Cause::LocallyReset(_)) => true, - Closed(Cause::Scheduled(..)) => true, + Closed(Cause::Error(ref e)) => e.is_local(), + Closed(Cause::ScheduledLibraryReset(..)) => true, _ => false, } } @@ -417,10 +431,10 @@ impl State { pub fn ensure_recv_open(&self) -> Result { // TODO: Is this correct? match self.inner { - Closed(Cause::Proto(reason)) - | Closed(Cause::LocallyReset(reason)) - | Closed(Cause::Scheduled(reason)) => Err(proto::Error::Proto(reason)), - Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into())), + Closed(Cause::Error(ref e)) => Err(e.clone()), + Closed(Cause::ScheduledLibraryReset(reason)) => { + Err(proto::Error::library_go_away(reason)) + } Closed(Cause::EndStream) | HalfClosedRemote(..) | ReservedLocal => Ok(false), _ => Ok(true), } @@ -429,10 +443,10 @@ impl State { /// Returns a reason if the stream has been reset. pub(super) fn ensure_reason(&self, mode: PollReset) -> Result, crate::Error> { match self.inner { - Closed(Cause::Proto(reason)) - | Closed(Cause::LocallyReset(reason)) - | Closed(Cause::Scheduled(reason)) => Ok(Some(reason)), - Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into()).into()), + Closed(Cause::Error(Error::Reset(_, reason, _))) + | Closed(Cause::Error(Error::GoAway(_, reason, _))) + | Closed(Cause::ScheduledLibraryReset(reason)) => Ok(Some(reason)), + Closed(Cause::Error(ref e)) => Err(e.clone().into()), Open { local: Streaming, .. } diff --git a/third_party/rust/h2/src/proto/streams/store.rs b/third_party/rust/h2/src/proto/streams/store.rs index 09d8a64a679d..3e34b7cb293c 100644 --- a/third_party/rust/h2/src/proto/streams/store.rs +++ b/third_party/rust/h2/src/proto/streams/store.rs @@ -4,6 +4,7 @@ use slab; use indexmap::{self, IndexMap}; +use std::convert::Infallible; use std::fmt; use std::marker::PhantomData; use std::ops; @@ -128,7 +129,20 @@ impl Store { } } - pub fn for_each(&mut self, mut f: F) -> Result<(), E> + pub(crate) fn for_each(&mut self, mut f: F) + where + F: FnMut(Ptr), + { + match self.try_for_each(|ptr| { + f(ptr); + Ok::<_, Infallible>(()) + }) { + Ok(()) => (), + Err(infallible) => match infallible {}, + } + } + + pub fn try_for_each(&mut self, mut f: F) -> Result<(), E> where F: FnMut(Ptr) -> Result<(), E>, { @@ -244,10 +258,10 @@ where /// /// If the stream is already contained by the list, return `false`. pub fn push(&mut self, stream: &mut store::Ptr) -> bool { - log::trace!("Queue::push"); + tracing::trace!("Queue::push"); if N::is_queued(stream) { - log::trace!(" -> already queued"); + tracing::trace!(" -> already queued"); return false; } @@ -259,7 +273,7 @@ where // Queue the stream match self.indices { Some(ref mut idxs) => { - log::trace!(" -> existing entries"); + tracing::trace!(" -> existing entries"); // Update the current tail node to point to `stream` let key = stream.key(); @@ -269,7 +283,7 @@ where idxs.tail = stream.key(); } None => { - log::trace!(" -> first entry"); + tracing::trace!(" -> first entry"); self.indices = Some(store::Indices { head: stream.key(), tail: stream.key(), @@ -304,6 +318,10 @@ where None } + pub fn is_empty(&self) -> bool { + self.indices.is_none() + } + pub fn pop_if<'a, R, F>(&mut self, store: &'a mut R, f: F) -> Option> where R: Resolve, diff --git a/third_party/rust/h2/src/proto/streams/stream.rs b/third_party/rust/h2/src/proto/streams/stream.rs index 398672049945..36d515bad2a2 100644 --- a/third_party/rust/h2/src/proto/streams/stream.rs +++ b/third_party/rust/h2/src/proto/streams/stream.rs @@ -45,7 +45,7 @@ pub(super) struct Stream { /// Amount of data buffered at the prioritization layer. /// TODO: Technically this could be greater than the window size... - pub buffered_send_data: WindowSize, + pub buffered_send_data: usize, /// Task tracking additional send capacity (i.e. window updates). send_task: Option, @@ -260,21 +260,31 @@ impl Stream { self.ref_count == 0 && !self.state.is_closed() } - pub fn assign_capacity(&mut self, capacity: WindowSize) { + pub fn assign_capacity(&mut self, capacity: WindowSize, max_buffer_size: usize) { debug_assert!(capacity > 0); - self.send_capacity_inc = true; self.send_flow.assign_capacity(capacity); - log::trace!( - " assigned capacity to stream; available={}; buffered={}; id={:?}", + tracing::trace!( + " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={}", self.send_flow.available(), self.buffered_send_data, - self.id + self.id, + max_buffer_size ); + self.notify_if_can_buffer_more(max_buffer_size); + } + + /// If the capacity was limited because of the max_send_buffer_size, + /// then consider waking the send task again... + pub fn notify_if_can_buffer_more(&mut self, max_buffer_size: usize) { + let available = self.send_flow.available().as_size() as usize; + let buffered = self.buffered_send_data; + // Only notify if the capacity exceeds the amount of buffered data - if self.send_flow.available() > self.buffered_send_data { - log::trace!(" notifying task"); + if available.min(max_buffer_size) > buffered { + self.send_capacity_inc = true; + tracing::trace!(" notifying task"); self.notify_send(); } } @@ -286,7 +296,11 @@ impl Stream { Some(val) => *rem = val, None => return Err(()), }, - ContentLength::Head => return Err(()), + ContentLength::Head => { + if len != 0 { + return Err(()); + } + } _ => {} } diff --git a/third_party/rust/h2/src/proto/streams/streams.rs b/third_party/rust/h2/src/proto/streams/streams.rs index 8f618619412e..3e7ae97d9ea6 100644 --- a/third_party/rust/h2/src/proto/streams/streams.rs +++ b/third_party/rust/h2/src/proto/streams/streams.rs @@ -1,9 +1,10 @@ use super::recv::RecvHeaderBlockError; use super::store::{self, Entry, Resolve, Store}; use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId}; -use crate::codec::{Codec, RecvError, SendError, UserError}; +use crate::codec::{Codec, SendError, UserError}; +use crate::ext::Protocol; use crate::frame::{self, Frame, Reason}; -use crate::proto::{peer, Open, Peer, WindowSize}; +use crate::proto::{peer, Error, Initiator, Open, Peer, WindowSize}; use crate::{client, proto, server}; use bytes::{Buf, Bytes}; @@ -21,7 +22,7 @@ where P: Peer, { /// Holds most of the connection and stream related state for processing - /// HTTP/2.0 frames associated with streams. + /// HTTP/2 frames associated with streams. inner: Arc>, /// This is the queue of frames to be written to the wire. This is split out @@ -37,6 +38,17 @@ where _p: ::std::marker::PhantomData

, } +// Like `Streams` but with a `peer::Dyn` field instead of a static `P: Peer` type parameter. +// Ensures that the methods only get one instantiation, instead of two (client and server) +#[derive(Debug)] +pub(crate) struct DynStreams<'a, B> { + inner: &'a Mutex, + + send_buffer: &'a SendBuffer, + + peer: peer::Dyn, +} + /// Reference to the stream state #[derive(Debug)] pub(crate) struct StreamRef { @@ -101,17 +113,7 @@ where let peer = P::r#dyn(); Streams { - inner: Arc::new(Mutex::new(Inner { - counts: Counts::new(peer, &config), - actions: Actions { - recv: Recv::new(peer, &config), - send: Send::new(&config), - task: None, - conn_error: None, - }, - store: Store::new(), - refs: 1, - })), + inner: Inner::new(peer, config), send_buffer: Arc::new(SendBuffer::new()), _p: ::std::marker::PhantomData, } @@ -126,448 +128,19 @@ where .set_target_connection_window(size, &mut me.actions.task) } - /// Process inbound headers - pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), RecvError> { - let id = frame.stream_id(); - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - // The GOAWAY process has begun. All streams with a greater ID than - // specified as part of GOAWAY should be ignored. - if id > me.actions.recv.max_stream_id() { - log::trace!( - "id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", - id, - me.actions.recv.max_stream_id() - ); - return Ok(()); - } - - let key = match me.store.find_entry(id) { - Entry::Occupied(e) => e.key(), - Entry::Vacant(e) => { - // Client: it's possible to send a request, and then send - // a RST_STREAM while the response HEADERS were in transit. - // - // Server: we can't reset a stream before having received - // the request headers, so don't allow. - if !P::is_server() { - // This may be response headers for a stream we've already - // forgotten about... - if me.actions.may_have_forgotten_stream::

(id) { - log::debug!( - "recv_headers for old stream={:?}, sending STREAM_CLOSED", - id, - ); - return Err(RecvError::Stream { - id, - reason: Reason::STREAM_CLOSED, - }); - } - } - - match me.actions.recv.open(id, Open::Headers, &mut me.counts)? { - Some(stream_id) => { - let stream = Stream::new( - stream_id, - me.actions.send.init_window_sz(), - me.actions.recv.init_window_sz(), - ); - - e.insert(stream) - } - None => return Ok(()), - } - } - }; - - let stream = me.store.resolve(key); - - if stream.state.is_local_reset() { - // Locally reset streams must ignore frames "for some time". - // This is because the remote may have sent trailers before - // receiving the RST_STREAM frame. - log::trace!("recv_headers; ignoring trailers on {:?}", stream.id); - return Ok(()); - } - - let actions = &mut me.actions; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.counts.transition(stream, |counts, stream| { - log::trace!( - "recv_headers; stream={:?}; state={:?}", - stream.id, - stream.state - ); - - let res = if stream.state.is_recv_headers() { - match actions.recv.recv_headers(frame, stream, counts) { - Ok(()) => Ok(()), - Err(RecvHeaderBlockError::Oversize(resp)) => { - if let Some(resp) = resp { - let sent = actions.send.send_headers( - resp, send_buffer, stream, counts, &mut actions.task); - debug_assert!(sent.is_ok(), "oversize response should not fail"); - - actions.send.schedule_implicit_reset( - stream, - Reason::REFUSED_STREAM, - counts, - &mut actions.task); - - actions.recv.enqueue_reset_expiration(stream, counts); - - Ok(()) - } else { - Err(RecvError::Stream { - id: stream.id, - reason: Reason::REFUSED_STREAM, - }) - } - }, - Err(RecvHeaderBlockError::State(err)) => Err(err), - } - } else { - if !frame.is_end_stream() { - // Receiving trailers that don't set EOS is a "malformed" - // message. Malformed messages are a stream error. - proto_err!(stream: "recv_headers: trailers frame was not EOS; stream={:?}", stream.id); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - }); - } - - actions.recv.recv_trailers(frame, stream) - }; - - actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) - }) - } - - pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let id = frame.stream_id(); - - let stream = match me.store.find_mut(&id) { - Some(stream) => stream, - None => { - // The GOAWAY process has begun. All streams with a greater ID - // than specified as part of GOAWAY should be ignored. - if id > me.actions.recv.max_stream_id() { - log::trace!( - "id ({:?}) > max_stream_id ({:?}), ignoring DATA", - id, - me.actions.recv.max_stream_id() - ); - return Ok(()); - } - - if me.actions.may_have_forgotten_stream::

(id) { - log::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,); - - let sz = frame.payload().len(); - // This should have been enforced at the codec::FramedRead layer, so - // this is just a sanity check. - assert!(sz <= super::MAX_WINDOW_SIZE as usize); - let sz = sz as WindowSize; - - me.actions.recv.ignore_data(sz)?; - return Err(RecvError::Stream { - id, - reason: Reason::STREAM_CLOSED, - }); - } - - proto_err!(conn: "recv_data: stream not found; id={:?}", id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); - } - }; - - let actions = &mut me.actions; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.counts.transition(stream, |counts, stream| { - let sz = frame.payload().len(); - let res = actions.recv.recv_data(frame, stream); - - // Any stream error after receiving a DATA frame means - // we won't give the data to the user, and so they can't - // release the capacity. We do it automatically. - if let Err(RecvError::Stream { .. }) = res { - actions - .recv - .release_connection_capacity(sz as WindowSize, &mut None); - } - actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) - }) - } - - pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let id = frame.stream_id(); - - if id.is_zero() { - proto_err!(conn: "recv_reset: invalid stream ID 0"); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); - } - - // The GOAWAY process has begun. All streams with a greater ID than - // specified as part of GOAWAY should be ignored. - if id > me.actions.recv.max_stream_id() { - log::trace!( - "id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", - id, - me.actions.recv.max_stream_id() - ); - return Ok(()); - } - - let stream = match me.store.find_mut(&id) { - Some(stream) => stream, - None => { - // TODO: Are there other error cases? - me.actions - .ensure_not_idle(me.counts.peer(), id) - .map_err(RecvError::Connection)?; - - return Ok(()); - } - }; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - let actions = &mut me.actions; - - me.counts.transition(stream, |counts, stream| { - actions.recv.recv_reset(frame, stream); - actions.send.recv_err(send_buffer, stream, counts); - assert!(stream.state.is_closed()); - Ok(()) - }) - } - - /// Handle a received error and return the ID of the last processed stream. - pub fn recv_err(&mut self, err: &proto::Error) -> StreamId { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let actions = &mut me.actions; - let counts = &mut me.counts; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - let last_processed_id = actions.recv.last_processed_id(); - - me.store - .for_each(|stream| { - counts.transition(stream, |counts, stream| { - actions.recv.recv_err(err, &mut *stream); - actions.send.recv_err(send_buffer, stream, counts); - Ok::<_, ()>(()) - }) - }) - .unwrap(); - - actions.conn_error = Some(err.shallow_clone()); - - last_processed_id - } - - pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let actions = &mut me.actions; - let counts = &mut me.counts; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - let last_stream_id = frame.last_stream_id(); - - actions.send.recv_go_away(last_stream_id)?; - - let err = frame.reason().into(); - - me.store - .for_each(|stream| { - if stream.id > last_stream_id { - counts.transition(stream, |counts, stream| { - actions.recv.recv_err(&err, &mut *stream); - actions.send.recv_err(send_buffer, stream, counts); - Ok::<_, ()>(()) - }) - } else { - Ok::<_, ()>(()) - } - }) - .unwrap(); - - actions.conn_error = Some(err); - - Ok(()) - } - - pub fn last_processed_id(&self) -> StreamId { - self.inner.lock().unwrap().actions.recv.last_processed_id() - } - - pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), RecvError> { - let id = frame.stream_id(); - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - if id.is_zero() { - me.actions - .send - .recv_connection_window_update(frame, &mut me.store, &mut me.counts) - .map_err(RecvError::Connection)?; - } else { - // The remote may send window updates for streams that the local now - // considers closed. It's ok... - if let Some(mut stream) = me.store.find_mut(&id) { - // This result is ignored as there is nothing to do when there - // is an error. The stream is reset by the function on error and - // the error is informational. - let _ = me.actions.send.recv_stream_window_update( - frame.size_increment(), - send_buffer, - &mut stream, - &mut me.counts, - &mut me.actions.task, - ); - } else { - me.actions - .ensure_not_idle(me.counts.peer(), id) - .map_err(RecvError::Connection)?; - } - } - - Ok(()) - } - - pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let id = frame.stream_id(); - let promised_id = frame.promised_id(); - - // First, ensure that the initiating stream is still in a valid state. - let parent_key = match me.store.find_mut(&id) { - Some(stream) => { - // The GOAWAY process has begun. All streams with a greater ID - // than specified as part of GOAWAY should be ignored. - if id > me.actions.recv.max_stream_id() { - log::trace!( - "id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", - id, - me.actions.recv.max_stream_id() - ); - return Ok(()); - } - - // The stream must be receive open - stream.state.ensure_recv_open()?; - stream.key() - } - None => { - proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state"); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); - } - }; - - // TODO: Streams in the reserved states do not count towards the concurrency - // limit. However, it seems like there should be a cap otherwise this - // could grow in memory indefinitely. - - // Ensure that we can reserve streams - me.actions.recv.ensure_can_reserve()?; - - // Next, open the stream. - // - // If `None` is returned, then the stream is being refused. There is no - // further work to be done. - if me - .actions - .recv - .open(promised_id, Open::PushPromise, &mut me.counts)? - .is_none() - { - return Ok(()); - } - - // Try to handle the frame and create a corresponding key for the pushed stream - // this requires a bit of indirection to make the borrow checker happy. - let child_key: Option = { - // Create state for the stream - let stream = me.store.insert(promised_id, { - Stream::new( - promised_id, - me.actions.send.init_window_sz(), - me.actions.recv.init_window_sz(), - ) - }); - - let actions = &mut me.actions; - - me.counts.transition(stream, |counts, stream| { - let stream_valid = actions.recv.recv_push_promise(frame, stream); - - match stream_valid { - Ok(()) => Ok(Some(stream.key())), - _ => { - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - actions - .reset_on_recv_stream_err( - &mut *send_buffer, - stream, - counts, - stream_valid, - ) - .map(|()| None) - } - } - })? - }; - // If we're successful, push the headers and stream... - if let Some(child) = child_key { - let mut ppp = me.store[parent_key].pending_push_promises.take(); - ppp.push(&mut me.store.resolve(child)); - - let parent = &mut me.store.resolve(parent_key); - parent.pending_push_promises = ppp; - parent.notify_recv(); - }; - - Ok(()) - } - pub fn next_incoming(&mut self) -> Option> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; - let key = me.actions.recv.next_incoming(&mut me.store); - // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding - // the lock, so it can't. - me.refs += 1; - key.map(|key| { + me.actions.recv.next_incoming(&mut me.store).map(|key| { let stream = &mut me.store.resolve(key); - log::trace!( + tracing::trace!( "next_incoming; id={:?}, state={:?}", stream.id, stream.state ); + // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding + // the lock, so it can't. + me.refs += 1; StreamRef { opaque: OpaqueStreamRef::new(self.inner.clone(), stream), send_buffer: self.send_buffer.clone(), @@ -605,33 +178,10 @@ where T: AsyncWrite + Unpin, { let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - // Send WINDOW_UPDATE frames first - // - // TODO: It would probably be better to interleave updates w/ data - // frames. - ready!(me - .actions - .recv - .poll_complete(cx, &mut me.store, &mut me.counts, dst))?; - - // Send any other pending frames - ready!(me - .actions - .send - .poll_complete(cx, send_buffer, &mut me.store, &mut me.counts, dst))?; - - // Nothing else to do, track the task - me.actions.task = Some(cx.waker().clone()); - - Poll::Ready(Ok(())) + me.poll_complete(&self.send_buffer, cx, dst) } - pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { + pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; @@ -649,7 +199,7 @@ where ) } - pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { + pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; @@ -658,13 +208,18 @@ where pub fn send_request( &mut self, - request: Request<()>, + mut request: Request<()>, end_of_stream: bool, pending: Option<&OpaqueStreamRef>, ) -> Result, SendError> { use super::stream::ContentLength; use http::Method; + let protocol = request.extensions_mut().remove::(); + + // Clear before taking lock, incase extensions contain a StreamRef. + request.extensions_mut().clear(); + // TODO: There is a hazard with assigning a stream ID before the // prioritize layer. If prioritization reorders new streams, this // implicitly closes the earlier stream IDs. @@ -709,7 +264,8 @@ where } // Convert the message - let headers = client::Peer::convert_send_message(stream_id, request, end_of_stream)?; + let headers = + client::Peer::convert_send_message(stream_id, request, protocol, end_of_stream)?; let mut stream = me.store.insert(stream.id, stream); @@ -743,31 +299,612 @@ where }) } + pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { + self.inner + .lock() + .unwrap() + .actions + .send + .is_extended_connect_protocol_enabled() + } +} + +impl DynStreams<'_, B> { + pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), Error> { + let mut me = self.inner.lock().unwrap(); + + me.recv_headers(self.peer, &self.send_buffer, frame) + } + + pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), Error> { + let mut me = self.inner.lock().unwrap(); + me.recv_data(self.peer, &self.send_buffer, frame) + } + + pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), Error> { + let mut me = self.inner.lock().unwrap(); + + me.recv_reset(&self.send_buffer, frame) + } + + /// Notify all streams that a connection-level error happened. + pub fn handle_error(&mut self, err: proto::Error) -> StreamId { + let mut me = self.inner.lock().unwrap(); + me.handle_error(&self.send_buffer, err) + } + + pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), Error> { + let mut me = self.inner.lock().unwrap(); + me.recv_go_away(&self.send_buffer, frame) + } + + pub fn last_processed_id(&self) -> StreamId { + self.inner.lock().unwrap().actions.recv.last_processed_id() + } + + pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), Error> { + let mut me = self.inner.lock().unwrap(); + me.recv_window_update(&self.send_buffer, frame) + } + + pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), Error> { + let mut me = self.inner.lock().unwrap(); + me.recv_push_promise(&self.send_buffer, frame) + } + + pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { + let mut me = self.inner.lock().map_err(|_| ())?; + me.recv_eof(&self.send_buffer, clear_pending_accept) + } + pub fn send_reset(&mut self, id: StreamId, reason: Reason) { let mut me = self.inner.lock().unwrap(); - let me = &mut *me; + me.send_reset(&self.send_buffer, id, reason) + } - let key = match me.store.find_entry(id) { + pub fn send_go_away(&mut self, last_processed_id: StreamId) { + let mut me = self.inner.lock().unwrap(); + me.actions.recv.go_away(last_processed_id); + } +} + +impl Inner { + fn new(peer: peer::Dyn, config: Config) -> Arc> { + Arc::new(Mutex::new(Inner { + counts: Counts::new(peer, &config), + actions: Actions { + recv: Recv::new(peer, &config), + send: Send::new(&config), + task: None, + conn_error: None, + }, + store: Store::new(), + refs: 1, + })) + } + + fn recv_headers( + &mut self, + peer: peer::Dyn, + send_buffer: &SendBuffer, + frame: frame::Headers, + ) -> Result<(), Error> { + let id = frame.stream_id(); + + // The GOAWAY process has begun. All streams with a greater ID than + // specified as part of GOAWAY should be ignored. + if id > self.actions.recv.max_stream_id() { + tracing::trace!( + "id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", + id, + self.actions.recv.max_stream_id() + ); + return Ok(()); + } + + let key = match self.store.find_entry(id) { Entry::Occupied(e) => e.key(), Entry::Vacant(e) => { + // Client: it's possible to send a request, and then send + // a RST_STREAM while the response HEADERS were in transit. + // + // Server: we can't reset a stream before having received + // the request headers, so don't allow. + if !peer.is_server() { + // This may be response headers for a stream we've already + // forgotten about... + if self.actions.may_have_forgotten_stream(peer, id) { + tracing::debug!( + "recv_headers for old stream={:?}, sending STREAM_CLOSED", + id, + ); + return Err(Error::library_reset(id, Reason::STREAM_CLOSED)); + } + } + + match self + .actions + .recv + .open(id, Open::Headers, &mut self.counts)? + { + Some(stream_id) => { + let stream = Stream::new( + stream_id, + self.actions.send.init_window_sz(), + self.actions.recv.init_window_sz(), + ); + + e.insert(stream) + } + None => return Ok(()), + } + } + }; + + let stream = self.store.resolve(key); + + if stream.state.is_local_reset() { + // Locally reset streams must ignore frames "for some time". + // This is because the remote may have sent trailers before + // receiving the RST_STREAM frame. + tracing::trace!("recv_headers; ignoring trailers on {:?}", stream.id); + return Ok(()); + } + + let actions = &mut self.actions; + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + self.counts.transition(stream, |counts, stream| { + tracing::trace!( + "recv_headers; stream={:?}; state={:?}", + stream.id, + stream.state + ); + + let res = if stream.state.is_recv_headers() { + match actions.recv.recv_headers(frame, stream, counts) { + Ok(()) => Ok(()), + Err(RecvHeaderBlockError::Oversize(resp)) => { + if let Some(resp) = resp { + let sent = actions.send.send_headers( + resp, send_buffer, stream, counts, &mut actions.task); + debug_assert!(sent.is_ok(), "oversize response should not fail"); + + actions.send.schedule_implicit_reset( + stream, + Reason::REFUSED_STREAM, + counts, + &mut actions.task); + + actions.recv.enqueue_reset_expiration(stream, counts); + + Ok(()) + } else { + Err(Error::library_reset(stream.id, Reason::REFUSED_STREAM)) + } + }, + Err(RecvHeaderBlockError::State(err)) => Err(err), + } + } else { + if !frame.is_end_stream() { + // Receiving trailers that don't set EOS is a "malformed" + // message. Malformed messages are a stream error. + proto_err!(stream: "recv_headers: trailers frame was not EOS; stream={:?}", stream.id); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); + } + + actions.recv.recv_trailers(frame, stream) + }; + + actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) + }) + } + + fn recv_data( + &mut self, + peer: peer::Dyn, + send_buffer: &SendBuffer, + frame: frame::Data, + ) -> Result<(), Error> { + let id = frame.stream_id(); + + let stream = match self.store.find_mut(&id) { + Some(stream) => stream, + None => { + // The GOAWAY process has begun. All streams with a greater ID + // than specified as part of GOAWAY should be ignored. + if id > self.actions.recv.max_stream_id() { + tracing::trace!( + "id ({:?}) > max_stream_id ({:?}), ignoring DATA", + id, + self.actions.recv.max_stream_id() + ); + return Ok(()); + } + + if self.actions.may_have_forgotten_stream(peer, id) { + tracing::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,); + + let sz = frame.payload().len(); + // This should have been enforced at the codec::FramedRead layer, so + // this is just a sanity check. + assert!(sz <= super::MAX_WINDOW_SIZE as usize); + let sz = sz as WindowSize; + + self.actions.recv.ignore_data(sz)?; + return Err(Error::library_reset(id, Reason::STREAM_CLOSED)); + } + + proto_err!(conn: "recv_data: stream not found; id={:?}", id); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + } + }; + + let actions = &mut self.actions; + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + self.counts.transition(stream, |counts, stream| { + let sz = frame.payload().len(); + let res = actions.recv.recv_data(frame, stream); + + // Any stream error after receiving a DATA frame means + // we won't give the data to the user, and so they can't + // release the capacity. We do it automatically. + if let Err(Error::Reset(..)) = res { + actions + .recv + .release_connection_capacity(sz as WindowSize, &mut None); + } + actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) + }) + } + + fn recv_reset( + &mut self, + send_buffer: &SendBuffer, + frame: frame::Reset, + ) -> Result<(), Error> { + let id = frame.stream_id(); + + if id.is_zero() { + proto_err!(conn: "recv_reset: invalid stream ID 0"); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + } + + // The GOAWAY process has begun. All streams with a greater ID than + // specified as part of GOAWAY should be ignored. + if id > self.actions.recv.max_stream_id() { + tracing::trace!( + "id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", + id, + self.actions.recv.max_stream_id() + ); + return Ok(()); + } + + let stream = match self.store.find_mut(&id) { + Some(stream) => stream, + None => { + // TODO: Are there other error cases? + self.actions + .ensure_not_idle(self.counts.peer(), id) + .map_err(Error::library_go_away)?; + + return Ok(()); + } + }; + + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + let actions = &mut self.actions; + + self.counts.transition(stream, |counts, stream| { + actions.recv.recv_reset(frame, stream); + actions.send.handle_error(send_buffer, stream, counts); + assert!(stream.state.is_closed()); + Ok(()) + }) + } + + fn recv_window_update( + &mut self, + send_buffer: &SendBuffer, + frame: frame::WindowUpdate, + ) -> Result<(), Error> { + let id = frame.stream_id(); + + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + if id.is_zero() { + self.actions + .send + .recv_connection_window_update(frame, &mut self.store, &mut self.counts) + .map_err(Error::library_go_away)?; + } else { + // The remote may send window updates for streams that the local now + // considers closed. It's ok... + if let Some(mut stream) = self.store.find_mut(&id) { + // This result is ignored as there is nothing to do when there + // is an error. The stream is reset by the function on error and + // the error is informational. + let _ = self.actions.send.recv_stream_window_update( + frame.size_increment(), + send_buffer, + &mut stream, + &mut self.counts, + &mut self.actions.task, + ); + } else { + self.actions + .ensure_not_idle(self.counts.peer(), id) + .map_err(Error::library_go_away)?; + } + } + + Ok(()) + } + + fn handle_error(&mut self, send_buffer: &SendBuffer, err: proto::Error) -> StreamId { + let actions = &mut self.actions; + let counts = &mut self.counts; + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + let last_processed_id = actions.recv.last_processed_id(); + + self.store.for_each(|stream| { + counts.transition(stream, |counts, stream| { + actions.recv.handle_error(&err, &mut *stream); + actions.send.handle_error(send_buffer, stream, counts); + }) + }); + + actions.conn_error = Some(err); + + last_processed_id + } + + fn recv_go_away( + &mut self, + send_buffer: &SendBuffer, + frame: &frame::GoAway, + ) -> Result<(), Error> { + let actions = &mut self.actions; + let counts = &mut self.counts; + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + let last_stream_id = frame.last_stream_id(); + + actions.send.recv_go_away(last_stream_id)?; + + let err = Error::remote_go_away(frame.debug_data().clone(), frame.reason()); + + self.store.for_each(|stream| { + if stream.id > last_stream_id { + counts.transition(stream, |counts, stream| { + actions.recv.handle_error(&err, &mut *stream); + actions.send.handle_error(send_buffer, stream, counts); + }) + } + }); + + actions.conn_error = Some(err); + + Ok(()) + } + + fn recv_push_promise( + &mut self, + send_buffer: &SendBuffer, + frame: frame::PushPromise, + ) -> Result<(), Error> { + let id = frame.stream_id(); + let promised_id = frame.promised_id(); + + // First, ensure that the initiating stream is still in a valid state. + let parent_key = match self.store.find_mut(&id) { + Some(stream) => { + // The GOAWAY process has begun. All streams with a greater ID + // than specified as part of GOAWAY should be ignored. + if id > self.actions.recv.max_stream_id() { + tracing::trace!( + "id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", + id, + self.actions.recv.max_stream_id() + ); + return Ok(()); + } + + // The stream must be receive open + stream.state.ensure_recv_open()?; + stream.key() + } + None => { + proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state"); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + } + }; + + // TODO: Streams in the reserved states do not count towards the concurrency + // limit. However, it seems like there should be a cap otherwise this + // could grow in memory indefinitely. + + // Ensure that we can reserve streams + self.actions.recv.ensure_can_reserve()?; + + // Next, open the stream. + // + // If `None` is returned, then the stream is being refused. There is no + // further work to be done. + if self + .actions + .recv + .open(promised_id, Open::PushPromise, &mut self.counts)? + .is_none() + { + return Ok(()); + } + + // Try to handle the frame and create a corresponding key for the pushed stream + // this requires a bit of indirection to make the borrow checker happy. + let child_key: Option = { + // Create state for the stream + let stream = self.store.insert(promised_id, { + Stream::new( + promised_id, + self.actions.send.init_window_sz(), + self.actions.recv.init_window_sz(), + ) + }); + + let actions = &mut self.actions; + + self.counts.transition(stream, |counts, stream| { + let stream_valid = actions.recv.recv_push_promise(frame, stream); + + match stream_valid { + Ok(()) => Ok(Some(stream.key())), + _ => { + let mut send_buffer = send_buffer.inner.lock().unwrap(); + actions + .reset_on_recv_stream_err( + &mut *send_buffer, + stream, + counts, + stream_valid, + ) + .map(|()| None) + } + } + })? + }; + // If we're successful, push the headers and stream... + if let Some(child) = child_key { + let mut ppp = self.store[parent_key].pending_push_promises.take(); + ppp.push(&mut self.store.resolve(child)); + + let parent = &mut self.store.resolve(parent_key); + parent.pending_push_promises = ppp; + parent.notify_recv(); + }; + + Ok(()) + } + + fn recv_eof( + &mut self, + send_buffer: &SendBuffer, + clear_pending_accept: bool, + ) -> Result<(), ()> { + let actions = &mut self.actions; + let counts = &mut self.counts; + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + if actions.conn_error.is_none() { + actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); + } + + tracing::trace!("Streams::recv_eof"); + + self.store.for_each(|stream| { + counts.transition(stream, |counts, stream| { + actions.recv.recv_eof(stream); + + // This handles resetting send state associated with the + // stream + actions.send.handle_error(send_buffer, stream, counts); + }) + }); + + actions.clear_queues(clear_pending_accept, &mut self.store, counts); + Ok(()) + } + + fn poll_complete( + &mut self, + send_buffer: &SendBuffer, + cx: &mut Context, + dst: &mut Codec>, + ) -> Poll> + where + T: AsyncWrite + Unpin, + B: Buf, + { + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + // Send WINDOW_UPDATE frames first + // + // TODO: It would probably be better to interleave updates w/ data + // frames. + ready!(self + .actions + .recv + .poll_complete(cx, &mut self.store, &mut self.counts, dst))?; + + // Send any other pending frames + ready!(self.actions.send.poll_complete( + cx, + send_buffer, + &mut self.store, + &mut self.counts, + dst + ))?; + + // Nothing else to do, track the task + self.actions.task = Some(cx.waker().clone()); + + Poll::Ready(Ok(())) + } + + fn send_reset(&mut self, send_buffer: &SendBuffer, id: StreamId, reason: Reason) { + let key = match self.store.find_entry(id) { + Entry::Occupied(e) => e.key(), + Entry::Vacant(e) => { + // Resetting a stream we don't know about? That could be OK... + // + // 1. As a server, we just received a request, but that request + // was bad, so we're resetting before even accepting it. + // This is totally fine. + // + // 2. The remote may have sent us a frame on new stream that + // it's *not* supposed to have done, and thus, we don't know + // the stream. In that case, sending a reset will "open" the + // stream in our store. Maybe that should be a connection + // error instead? At least for now, we need to update what + // our vision of the next stream is. + if self.counts.peer().is_local_init(id) { + // We normally would open this stream, so update our + // next-send-id record. + self.actions.send.maybe_reset_next_stream_id(id); + } else { + // We normally would recv this stream, so update our + // next-recv-id record. + self.actions.recv.maybe_reset_next_stream_id(id); + } + let stream = Stream::new(id, 0, 0); e.insert(stream) } }; - let stream = me.store.resolve(key); - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let stream = self.store.resolve(key); + let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; - me.actions - .send_reset(stream, reason, &mut me.counts, send_buffer); - } - - pub fn send_go_away(&mut self, last_processed_id: StreamId) { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - let actions = &mut me.actions; - actions.recv.go_away(last_processed_id); + self.actions.send_reset( + stream, + reason, + Initiator::Library, + &mut self.counts, + send_buffer, + ); } } @@ -788,7 +925,7 @@ where if let Some(pending) = pending { let mut stream = me.store.resolve(pending.key); - log::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); + tracing::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); if stream.is_pending_open { stream.wait_send(cx); return Poll::Pending; @@ -802,39 +939,32 @@ impl Streams where P: Peer, { + pub fn as_dyn(&self) -> DynStreams { + let Self { + inner, + send_buffer, + _p, + } = self; + DynStreams { + inner, + send_buffer, + peer: P::r#dyn(), + } + } + /// This function is safe to call multiple times. /// /// A `Result` is returned to avoid panicking if the mutex is poisoned. pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { - let mut me = self.inner.lock().map_err(|_| ())?; - let me = &mut *me; + self.as_dyn().recv_eof(clear_pending_accept) + } - let actions = &mut me.actions; - let counts = &mut me.counts; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; + pub(crate) fn max_send_streams(&self) -> usize { + self.inner.lock().unwrap().counts.max_send_streams() + } - if actions.conn_error.is_none() { - actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); - } - - log::trace!("Streams::recv_eof"); - - me.store - .for_each(|stream| { - counts.transition(stream, |counts, stream| { - actions.recv.recv_eof(stream); - - // This handles resetting send state associated with the - // stream - actions.send.recv_err(send_buffer, stream, counts); - Ok::<_, ()>(()) - }) - }) - .expect("recv_eof"); - - actions.clear_queues(clear_pending_accept, &mut me.store, counts); - Ok(()) + pub(crate) fn max_recv_streams(&self) -> usize { + self.inner.lock().unwrap().counts.max_recv_streams() } #[cfg(feature = "unstable")] @@ -880,7 +1010,14 @@ where P: Peer, { fn drop(&mut self) { - let _ = self.inner.lock().map(|mut inner| inner.refs -= 1); + if let Ok(mut inner) = self.inner.lock() { + inner.refs -= 1; + if inner.refs == 1 { + if let Some(task) = inner.actions.task.take() { + task.wake(); + } + } + } } } @@ -940,14 +1077,16 @@ impl StreamRef { let send_buffer = &mut *send_buffer; me.actions - .send_reset(stream, reason, &mut me.counts, send_buffer); + .send_reset(stream, reason, Initiator::User, &mut me.counts, send_buffer); } pub fn send_response( &mut self, - response: Response<()>, + mut response: Response<()>, end_of_stream: bool, ) -> Result<(), UserError> { + // Clear before taking lock, incase extensions contain a StreamRef. + response.extensions_mut().clear(); let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; @@ -965,7 +1104,12 @@ impl StreamRef { }) } - pub fn send_push_promise(&mut self, request: Request<()>) -> Result, UserError> { + pub fn send_push_promise( + &mut self, + mut request: Request<()>, + ) -> Result, UserError> { + // Clear before taking lock, incase extensions contain a StreamRef. + request.extensions_mut().clear(); let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; @@ -1006,6 +1150,7 @@ impl StreamRef { return Err(err.into()); } + me.refs += 1; let opaque = OpaqueStreamRef::new(self.opaque.inner.clone(), &mut me.store.resolve(child_key)); @@ -1265,7 +1410,7 @@ fn drop_stream_ref(inner: &Mutex, key: store::Key) { Ok(inner) => inner, Err(_) => { if ::std::thread::panicking() { - log::trace!("StreamRef::drop; mutex poisoned"); + tracing::trace!("StreamRef::drop; mutex poisoned"); return; } else { panic!("StreamRef::drop; mutex poisoned"); @@ -1277,7 +1422,7 @@ fn drop_stream_ref(inner: &Mutex, key: store::Key) { me.refs -= 1; let mut stream = me.store.resolve(key); - log::trace!("drop_stream_ref; stream={:?}", stream); + tracing::trace!("drop_stream_ref; stream={:?}", stream); // decrement the stream's ref count by 1. stream.ref_dec(); @@ -1340,12 +1485,19 @@ impl Actions { &mut self, stream: store::Ptr, reason: Reason, + initiator: Initiator, counts: &mut Counts, send_buffer: &mut Buffer>, ) { counts.transition(stream, |counts, stream| { - self.send - .send_reset(reason, send_buffer, stream, counts, &mut self.task); + self.send.send_reset( + reason, + initiator, + send_buffer, + stream, + counts, + &mut self.task, + ); self.recv.enqueue_reset_expiration(stream, counts); // if a RecvStream is parked, ensure it's notified stream.notify_recv(); @@ -1357,12 +1509,13 @@ impl Actions { buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, - res: Result<(), RecvError>, - ) -> Result<(), RecvError> { - if let Err(RecvError::Stream { reason, .. }) = res { + res: Result<(), Error>, + ) -> Result<(), Error> { + if let Err(Error::Reset(stream_id, reason, initiator)) = res { + debug_assert_eq!(stream_id, stream.id); // Reset the stream. self.send - .send_reset(reason, buffer, stream, counts, &mut self.task); + .send_reset(reason, initiator, buffer, stream, counts, &mut self.task); Ok(()) } else { res @@ -1379,7 +1532,7 @@ impl Actions { fn ensure_no_conn_error(&self) -> Result<(), proto::Error> { if let Some(ref err) = self.conn_error { - Err(err.shallow_clone()) + Err(err.clone()) } else { Ok(()) } @@ -1394,11 +1547,11 @@ impl Actions { /// is more likely to be latency/memory constraints that caused this, /// and not a bad actor. So be less catastrophic, the spec allows /// us to send another RST_STREAM of STREAM_CLOSED. - fn may_have_forgotten_stream(&self, id: StreamId) -> bool { + fn may_have_forgotten_stream(&self, peer: peer::Dyn, id: StreamId) -> bool { if id.is_zero() { return false; } - if P::is_local_init(id) { + if peer.is_local_init(id) { self.send.may_have_created_stream(id) } else { self.recv.may_have_created_stream(id) diff --git a/third_party/rust/h2/src/server.rs b/third_party/rust/h2/src/server.rs index 59247b596198..16a50da4b315 100644 --- a/third_party/rust/h2/src/server.rs +++ b/third_party/rust/h2/src/server.rs @@ -1,10 +1,10 @@ -//! Server implementation of the HTTP/2.0 protocol. +//! Server implementation of the HTTP/2 protocol. //! //! # Getting started //! -//! Running an HTTP/2.0 server requires the caller to manage accepting the +//! Running an HTTP/2 server requires the caller to manage accepting the //! connections as well as getting the connections to a state that is ready to -//! begin the HTTP/2.0 handshake. See [here](../index.html#handshake) for more +//! begin the HTTP/2 handshake. See [here](../index.html#handshake) for more //! details. //! //! This could be as basic as using Tokio's [`TcpListener`] to accept @@ -12,8 +12,8 @@ //! upgrades. //! //! Once a connection is obtained, it is passed to [`handshake`], -//! which will begin the [HTTP/2.0 handshake]. This returns a future that -//! completes once the handshake process is performed and HTTP/2.0 streams may +//! which will begin the [HTTP/2 handshake]. This returns a future that +//! completes once the handshake process is performed and HTTP/2 streams may //! be received. //! //! [`handshake`] uses default configuration values. There are a number of @@ -21,12 +21,12 @@ //! //! # Inbound streams //! -//! The [`Connection`] instance is used to accept inbound HTTP/2.0 streams. It +//! The [`Connection`] instance is used to accept inbound HTTP/2 streams. It //! does this by implementing [`futures::Stream`]. When a new stream is -//! received, a call to [`Connection::poll`] will return `(request, response)`. +//! received, a call to [`Connection::accept`] will return `(request, response)`. //! The `request` handle (of type [`http::Request`]) contains the //! HTTP request head as well as provides a way to receive the inbound data -//! stream and the trailers. The `response` handle (of type [`SendStream`]) +//! stream and the trailers. The `response` handle (of type [`SendResponse`]) //! allows responding to the request, stream the response payload, send //! trailers, and send push promises. //! @@ -36,19 +36,19 @@ //! # Managing the connection //! //! The [`Connection`] instance is used to manage connection state. The caller -//! is required to call either [`Connection::poll`] or +//! is required to call either [`Connection::accept`] or //! [`Connection::poll_close`] in order to advance the connection state. Simply //! operating on [`SendStream`] or [`RecvStream`] will have no effect unless the //! connection state is advanced. //! -//! It is not required to call **both** [`Connection::poll`] and +//! It is not required to call **both** [`Connection::accept`] and //! [`Connection::poll_close`]. If the caller is ready to accept a new stream, -//! then only [`Connection::poll`] should be called. When the caller **does +//! then only [`Connection::accept`] should be called. When the caller **does //! not** want to accept a new stream, [`Connection::poll_close`] should be //! called. //! //! The [`Connection`] instance should only be dropped once -//! [`Connection::poll_close`] returns `Ready`. Once [`Connection::poll`] +//! [`Connection::poll_close`] returns `Ready`. Once [`Connection::accept`] //! returns `Ready(None)`, there will no longer be any more inbound streams. At //! this point, only [`Connection::poll_close`] should be called. //! @@ -59,9 +59,9 @@ //! //! # Example //! -//! A basic HTTP/2.0 server example that runs over TCP and assumes [prior +//! A basic HTTP/2 server example that runs over TCP and assumes [prior //! knowledge], i.e. both the client and the server assume that the TCP socket -//! will use the HTTP/2.0 protocol without prior negotiation. +//! will use the HTTP/2 protocol without prior negotiation. //! //! ```no_run //! use h2::server; @@ -77,9 +77,9 @@ //! if let Ok((socket, _peer_addr)) = listener.accept().await { //! // Spawn a new task to process each connection. //! tokio::spawn(async { -//! // Start the HTTP/2.0 connection handshake +//! // Start the HTTP/2 connection handshake //! let mut h2 = server::handshake(socket).await.unwrap(); -//! // Accept all inbound HTTP/2.0 streams sent over the +//! // Accept all inbound HTTP/2 streams sent over the //! // connection. //! while let Some(request) = h2.accept().await { //! let (request, mut respond) = request.unwrap(); @@ -104,7 +104,7 @@ //! //! [prior knowledge]: http://httpwg.org/specs/rfc7540.html#known-http //! [`handshake`]: fn.handshake.html -//! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader //! [`Builder`]: struct.Builder.html //! [`Connection`]: struct.Connection.html //! [`Connection::poll`]: struct.Connection.html#method.poll @@ -115,9 +115,9 @@ //! [`SendStream`]: ../struct.SendStream.html //! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html -use crate::codec::{Codec, RecvError, UserError}; +use crate::codec::{Codec, UserError}; use crate::frame::{self, Pseudo, PushPromiseHeaderError, Reason, Settings, StreamId}; -use crate::proto::{self, Config, Prioritized}; +use crate::proto::{self, Config, Error, Prioritized}; use crate::{FlowControl, PingPong, RecvStream, SendStream}; use bytes::{Buf, Bytes}; @@ -126,10 +126,11 @@ use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; -use std::{convert, fmt, io, mem}; -use tokio::io::{AsyncRead, AsyncWrite}; +use std::{fmt, io}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tracing::instrument::{Instrument, Instrumented}; -/// In progress HTTP/2.0 connection handshake future. +/// In progress HTTP/2 connection handshake future. /// /// This type implements `Future`, yielding a `Connection` instance once the /// handshake has completed. @@ -149,12 +150,14 @@ pub struct Handshake { builder: Builder, /// The current state of the handshake. state: Handshaking, + /// Span tracking the handshake + span: tracing::Span, } -/// Accepts inbound HTTP/2.0 streams on a connection. +/// Accepts inbound HTTP/2 streams on a connection. /// /// A `Connection` is backed by an I/O resource (usually a TCP socket) and -/// implements the HTTP/2.0 server logic for that connection. It is responsible +/// implements the HTTP/2 server logic for that connection. It is responsible /// for receiving inbound streams initiated by the client as well as driving the /// internal state forward. /// @@ -179,9 +182,11 @@ pub struct Handshake { /// # async fn doc(my_io: T) { /// let mut server = server::handshake(my_io).await.unwrap(); /// while let Some(request) = server.accept().await { -/// let (request, respond) = request.unwrap(); -/// // Process the request and send the response back to the client -/// // using `respond`. +/// tokio::spawn(async move { +/// let (request, respond) = request.unwrap(); +/// // Process the request and send the response back to the client +/// // using `respond`. +/// }); /// } /// # } /// # @@ -197,7 +202,7 @@ pub struct Connection { /// Methods can be chained in order to set the configuration values. /// /// The server is constructed by calling [`handshake`] and passing the I/O -/// handle that will back the HTTP/2.0 server. +/// handle that will back the HTTP/2 server. /// /// New instances of `Builder` are obtained via [`Builder::new`]. /// @@ -216,7 +221,7 @@ pub struct Connection { /// # fn doc(my_io: T) /// # -> Handshake /// # { -/// // `server_fut` is a future representing the completion of the HTTP/2.0 +/// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -240,6 +245,9 @@ pub struct Builder { /// Initial target window size for new connections. initial_target_connection_window_size: Option, + + /// Maximum amount of bytes to "buffer" for writing per stream. + max_send_buffer_size: usize, } /// Send a response back to the client @@ -252,7 +260,7 @@ pub struct Builder { /// stream. /// /// If the `SendResponse` instance is dropped without sending a response, then -/// the HTTP/2.0 stream will be reset. +/// the HTTP/2 stream will be reset. /// /// See [module] level docs for more details. /// @@ -271,7 +279,7 @@ pub struct SendResponse { /// It can not be used to initiate push promises. /// /// If the `SendPushedResponse` instance is dropped without sending a response, then -/// the HTTP/2.0 stream will be reset. +/// the HTTP/2 stream will be reset. /// /// See [module] level docs for more details. /// @@ -290,11 +298,11 @@ impl fmt::Debug for SendPushedResponse { /// Stages of an in-progress handshake. enum Handshaking { /// State 1. Connection is flushing pending SETTINGS frame. - Flushing(Flush>), + Flushing(Instrumented>>), /// State 2. Connection is waiting for the client preface. - ReadingPreface(ReadPreface>), - /// Dummy state for `mem::replace`. - Empty, + ReadingPreface(Instrumented>>), + /// State 3. Handshake is done, polling again would panic. + Done, } /// Flush a Sink @@ -313,18 +321,18 @@ pub(crate) struct Peer; const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; -/// Creates a new configured HTTP/2.0 server with default configuration +/// Creates a new configured HTTP/2 server with default configuration /// values backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence -/// the [HTTP/2.0 handshake]. See [Handshake] for more details. +/// the [HTTP/2 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] instance once the -/// HTTP/2.0 handshake has been completed. The returned [`Connection`] +/// HTTP/2 handshake has been completed. The returned [`Connection`] /// instance will be using default configuration values. Use [`Builder`] to /// customize the configuration values used by a [`Connection`] instance. /// -/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// @@ -338,8 +346,8 @@ const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// # async fn doc(my_io: T) /// # { /// let connection = server::handshake(my_io).await.unwrap(); -/// // The HTTP/2.0 handshake has completed, now use `connection` to -/// // accept inbound HTTP/2.0 streams. +/// // The HTTP/2 handshake has completed, now use `connection` to +/// // accept inbound HTTP/2 streams. /// # } /// # /// # pub fn main() {} @@ -359,6 +367,9 @@ where B: Buf + 'static, { fn handshake2(io: T, builder: Builder) -> Handshake { + let span = tracing::trace_span!("server_handshake"); + let entered = span.enter(); + // Create the codec. let mut codec = Codec::new(io); @@ -376,9 +387,16 @@ where .expect("invalid SETTINGS frame"); // Create the handshake future. - let state = Handshaking::from(codec); + let state = + Handshaking::Flushing(Flush::new(codec).instrument(tracing::trace_span!("flush"))); - Handshake { builder, state } + drop(entered); + + Handshake { + builder, + state, + span, + } } /// Accept the next incoming request on this connection. @@ -402,7 +420,7 @@ where } if let Some(inner) = self.connection.next_incoming() { - log::trace!("received incoming"); + tracing::trace!("received incoming"); let (head, _) = inner.take_request().into_parts(); let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); @@ -456,6 +474,19 @@ where Ok(()) } + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + /// + /// # Errors + /// + /// Returns an error if a previous call is still pending acknowledgement + /// from the remote endpoint. + pub fn enable_connect_protocol(&mut self) -> Result<(), crate::Error> { + self.connection.set_enable_connect_protocol()?; + Ok(()) + } + /// Returns `Ready` when the underlying connection has closed. /// /// If any new inbound streams are received during a call to `poll_closed`, @@ -517,6 +548,34 @@ where pub fn ping_pong(&mut self) -> Option { self.connection.take_user_pings().map(PingPong::new) } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by the server on this connection. + /// + /// This limit is configured by the client peer by sending the + /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value recieved from the + /// remote. + /// + /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + pub fn max_concurrent_send_streams(&self) -> usize { + self.connection.max_send_streams() + } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by the client on this connection. + /// + /// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS` + /// parameter][1] sent in a `SETTINGS` frame that has been + /// acknowledged by the remote peer. The value to be sent is configured by + /// the [`Builder::max_concurrent_streams`][2] method before handshaking + /// with the remote peer. + /// + /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + /// [2]: ../struct.Builder.html#method.max_concurrent_streams + pub fn max_concurrent_recv_streams(&self) -> usize { + self.connection.max_recv_streams() + } } #[cfg(feature = "stream")] @@ -561,7 +620,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -578,6 +637,7 @@ impl Builder { reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, settings: Settings::default(), initial_target_connection_window_size: None, + max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, } } @@ -600,7 +660,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -634,7 +694,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .initial_connection_window_size(1_000_000) @@ -649,7 +709,7 @@ impl Builder { self } - /// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the + /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the /// configured server is able to accept. /// /// The sender may send data frames that are **smaller** than this value, @@ -667,7 +727,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .max_frame_size(1_000_000) @@ -706,7 +766,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .max_header_list_size(16 * 1024) @@ -741,7 +801,7 @@ impl Builder { /// a protocol level error. Instead, the `h2` library will immediately reset /// the stream. /// - /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// See [Section 5.1.2] in the HTTP/2 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// @@ -754,7 +814,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .max_concurrent_streams(1000) @@ -773,7 +833,7 @@ impl Builder { /// /// When a stream is explicitly reset by either calling /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance - /// before completing the stream, the HTTP/2.0 specification requires that + /// before completing the stream, the HTTP/2 specification requires that /// any further frames received for that stream must be ignored for "some /// time". /// @@ -800,7 +860,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .max_concurrent_reset_streams(1000) @@ -815,11 +875,29 @@ impl Builder { self } + /// Sets the maximum send buffer size per stream. + /// + /// Once a stream has buffered up to (or over) the maximum, the stream's + /// flow control will not "poll" additional capacity. Once bytes for the + /// stream have been written to the connection, the send buffer capacity + /// will be freed up again. + /// + /// The default is currently ~400MB, but may change. + /// + /// # Panics + /// + /// This function panics if `max` is larger than `u32::MAX`. + pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.max_send_buffer_size = max; + self + } + /// Sets the maximum number of concurrent locally reset streams. /// /// When a stream is explicitly reset by either calling /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance - /// before completing the stream, the HTTP/2.0 specification requires that + /// before completing the stream, the HTTP/2 specification requires that /// any further frames received for that stream must be ignored for "some /// time". /// @@ -847,7 +925,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .reset_stream_duration(Duration::from_secs(10)) @@ -862,18 +940,26 @@ impl Builder { self } - /// Creates a new configured HTTP/2.0 server backed by `io`. + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + pub fn enable_connect_protocol(&mut self) -> &mut Self { + self.settings.set_enable_connect_protocol(Some(1)); + self + } + + /// Creates a new configured HTTP/2 server backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence - /// the [HTTP/2.0 handshake]. See [Handshake] for more details. + /// the [HTTP/2 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] instance once the - /// HTTP/2.0 handshake has been completed. + /// HTTP/2 handshake has been completed. /// /// This function also allows the caller to configure the send payload data /// type. See [Outbound data type] for more details. /// - /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader + /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// [Outbound data type]: ../index.html#outbound-data-type. @@ -889,7 +975,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .handshake(my_io); @@ -909,7 +995,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut: Handshake<_, &'static [u8]> = Builder::new() /// .handshake(my_io); @@ -1019,7 +1105,7 @@ impl SendResponse { /// /// # Panics /// - /// If the lock on the strean store has been poisoned. + /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> crate::StreamId { crate::StreamId::from_internal(self.inner.stream_id()) } @@ -1091,7 +1177,7 @@ impl SendPushedResponse { /// /// # Panics /// - /// If the lock on the strean store has been poisoned. + /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> crate::StreamId { self.inner.stream_id() } @@ -1146,8 +1232,10 @@ where let mut rem = PREFACE.len() - self.pos; while rem > 0 { - let n = ready!(Pin::new(self.inner_mut()).poll_read(cx, &mut buf[..rem])) + let mut buf = ReadBuf::new(&mut buf[..rem]); + ready!(Pin::new(self.inner_mut()).poll_read(cx, &mut buf)) .map_err(crate::Error::from_io)?; + let n = buf.filled().len(); if n == 0 { return Poll::Ready(Err(crate::Error::from_io(io::Error::new( io::ErrorKind::UnexpectedEof, @@ -1155,10 +1243,10 @@ where )))); } - if PREFACE[self.pos..self.pos + n] != buf[..n] { + if &PREFACE[self.pos..self.pos + n] != buf.filled() { proto_err!(conn: "read_preface: invalid preface"); // TODO: Should this just write the GO_AWAY frame directly? - return Poll::Ready(Err(Reason::PROTOCOL_ERROR.into())); + return Poll::Ready(Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into())); } self.pos += n; @@ -1179,63 +1267,61 @@ where type Output = Result, crate::Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - log::trace!("Handshake::poll(); state={:?};", self.state); - use crate::server::Handshaking::*; + let span = self.span.clone(); // XXX(eliza): T_T + let _e = span.enter(); + tracing::trace!(state = ?self.state); - self.state = if let Flushing(ref mut flush) = self.state { - // We're currently flushing a pending SETTINGS frame. Poll the - // flush future, and, if it's completed, advance our state to wait - // for the client preface. - let codec = match Pin::new(flush).poll(cx)? { - Poll::Pending => { - log::trace!("Handshake::poll(); flush.poll()=Pending"); - return Poll::Pending; + loop { + match &mut self.state { + Handshaking::Flushing(flush) => { + // We're currently flushing a pending SETTINGS frame. Poll the + // flush future, and, if it's completed, advance our state to wait + // for the client preface. + let codec = match Pin::new(flush).poll(cx)? { + Poll::Pending => { + tracing::trace!(flush.poll = %"Pending"); + return Poll::Pending; + } + Poll::Ready(flushed) => { + tracing::trace!(flush.poll = %"Ready"); + flushed + } + }; + self.state = Handshaking::ReadingPreface( + ReadPreface::new(codec).instrument(tracing::trace_span!("read_preface")), + ); } - Poll::Ready(flushed) => { - log::trace!("Handshake::poll(); flush.poll()=Ready"); - flushed - } - }; - Handshaking::from(ReadPreface::new(codec)) - } else { - // Otherwise, we haven't actually advanced the state, but we have - // to replace it with itself, because we have to return a value. - // (note that the assignment to `self.state` has to be outside of - // the `if let` block above in order to placate the borrow checker). - mem::replace(&mut self.state, Handshaking::Empty) - }; - let poll = if let ReadingPreface(ref mut read) = self.state { - // We're now waiting for the client preface. Poll the `ReadPreface` - // future. If it has completed, we will create a `Connection` handle - // for the connection. - Pin::new(read).poll(cx) - // Actually creating the `Connection` has to occur outside of this - // `if let` block, because we've borrowed `self` mutably in order - // to poll the state and won't be able to borrow the SETTINGS frame - // as well until we release the borrow for `poll()`. - } else { - unreachable!("Handshake::poll() state was not advanced completely!") - }; - poll?.map(|codec| { - let connection = proto::Connection::new( - codec, - Config { - next_stream_id: 2.into(), - // Server does not need to locally initiate any streams - initial_max_send_streams: 0, - reset_stream_duration: self.builder.reset_stream_duration, - reset_stream_max: self.builder.reset_stream_max, - settings: self.builder.settings.clone(), - }, - ); + Handshaking::ReadingPreface(read) => { + let codec = ready!(Pin::new(read).poll(cx)?); - log::trace!("Handshake::poll(); connection established!"); - let mut c = Connection { connection }; - if let Some(sz) = self.builder.initial_target_connection_window_size { - c.set_target_window_size(sz); + self.state = Handshaking::Done; + + let connection = proto::Connection::new( + codec, + Config { + next_stream_id: 2.into(), + // Server does not need to locally initiate any streams + initial_max_send_streams: 0, + max_send_buffer_size: self.builder.max_send_buffer_size, + reset_stream_duration: self.builder.reset_stream_duration, + reset_stream_max: self.builder.reset_stream_max, + settings: self.builder.settings.clone(), + }, + ); + + tracing::trace!("connection established!"); + let mut c = Connection { connection }; + if let Some(sz) = self.builder.initial_target_connection_window_size { + c.set_target_window_size(sz); + } + + return Poll::Ready(Ok(c)); + } + Handshaking::Done => { + panic!("Handshaking::poll() called again after handshaking was complete") + } } - Ok(c) - }) + } } } @@ -1289,15 +1375,15 @@ impl Peer { if let Err(e) = frame::PushPromise::validate_request(&request) { use PushPromiseHeaderError::*; match e { - NotSafeAndCacheable => log::debug!( - "convert_push_message: method {} is not safe and cacheable; promised_id={:?}", + NotSafeAndCacheable => tracing::debug!( + ?promised_id, + "convert_push_message: method {} is not safe and cacheable", request.method(), - promised_id, ), - InvalidContentLength(e) => log::debug!( - "convert_push_message; promised request has invalid content-length {:?}; promised_id={:?}", + InvalidContentLength(e) => tracing::debug!( + ?promised_id, + "convert_push_message; promised request has invalid content-length {:?}", e, - promised_id, ), } return Err(UserError::MalformedHeaders); @@ -1314,7 +1400,7 @@ impl Peer { _, ) = request.into_parts(); - let pseudo = Pseudo::request(method, uri); + let pseudo = Pseudo::request(method, uri, None); Ok(frame::PushPromise::new( stream_id, @@ -1328,6 +1414,8 @@ impl Peer { impl proto::Peer for Peer { type Poll = Request<()>; + const NAME: &'static str = "Server"; + fn is_server() -> bool { true } @@ -1340,20 +1428,17 @@ impl proto::Peer for Peer { pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId, - ) -> Result { + ) -> Result { use http::{uri, Version}; let mut b = Request::builder(); macro_rules! malformed { ($($arg:tt)*) => {{ - log::debug!($($arg)*); - return Err(RecvError::Stream { - id: stream_id, - reason: Reason::PROTOCOL_ERROR, - }); + tracing::debug!($($arg)*); + return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); }} - }; + } b = b.version(Version::HTTP_2); @@ -1365,10 +1450,13 @@ impl proto::Peer for Peer { malformed!("malformed headers: missing method"); } - // Specifying :status for a request is a protocol error + let has_protocol = pseudo.protocol.is_some(); + if !is_connect && has_protocol { + malformed!("malformed headers: :protocol on non-CONNECT request"); + } + if pseudo.status.is_some() { - log::trace!("malformed headers: :status field on request; PROTOCOL_ERROR"); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + malformed!("malformed headers: :status field on request"); } // Convert the URI @@ -1389,7 +1477,7 @@ impl proto::Peer for Peer { // A :scheme is required, except CONNECT. if let Some(scheme) = pseudo.scheme { - if is_connect { + if is_connect && !has_protocol { malformed!(":scheme in CONNECT"); } let maybe_scheme = scheme.parse(); @@ -1407,12 +1495,12 @@ impl proto::Peer for Peer { if parts.authority.is_some() { parts.scheme = Some(scheme); } - } else if !is_connect { + } else if !is_connect || has_protocol { malformed!("malformed headers: missing scheme"); } if let Some(path) = pseudo.path { - if is_connect { + if is_connect && !has_protocol { malformed!(":path in CONNECT"); } @@ -1425,6 +1513,8 @@ impl proto::Peer for Peer { parts.path_and_query = Some(maybe_path.or_else(|why| { malformed!("malformed headers: malformed path ({:?}): {}", path, why,) })?); + } else if is_connect && has_protocol { + malformed!("malformed headers: missing path in extended CONNECT"); } b = b.uri(parts); @@ -1435,10 +1525,7 @@ impl proto::Peer for Peer { // TODO: Should there be more specialized handling for different // kinds of errors proto_err!(stream: "error building request: {}; stream={:?}", e, stream_id); - return Err(RecvError::Stream { - id: stream_id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); } }; @@ -1457,42 +1544,9 @@ where #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { - Handshaking::Flushing(_) => write!(f, "Handshaking::Flushing(_)"), - Handshaking::ReadingPreface(_) => write!(f, "Handshaking::ReadingPreface(_)"), - Handshaking::Empty => write!(f, "Handshaking::Empty"), + Handshaking::Flushing(_) => f.write_str("Flushing(_)"), + Handshaking::ReadingPreface(_) => f.write_str("ReadingPreface(_)"), + Handshaking::Done => f.write_str("Done"), } } } - -impl convert::From>> for Handshaking -where - T: AsyncRead + AsyncWrite, - B: Buf, -{ - #[inline] - fn from(flush: Flush>) -> Self { - Handshaking::Flushing(flush) - } -} - -impl convert::From>> for Handshaking -where - T: AsyncRead + AsyncWrite, - B: Buf, -{ - #[inline] - fn from(read: ReadPreface>) -> Self { - Handshaking::ReadingPreface(read) - } -} - -impl convert::From>> for Handshaking -where - T: AsyncRead + AsyncWrite, - B: Buf, -{ - #[inline] - fn from(codec: Codec>) -> Self { - Handshaking::from(Flush::new(codec)) - } -} diff --git a/third_party/rust/h2/src/share.rs b/third_party/rust/h2/src/share.rs index 06291068d061..2a4ff1cddcb3 100644 --- a/third_party/rust/h2/src/share.rs +++ b/third_party/rust/h2/src/share.rs @@ -16,7 +16,7 @@ use std::task::{Context, Poll}; /// # Overview /// /// A `SendStream` is provided by [`SendRequest`] and [`SendResponse`] once the -/// HTTP/2.0 message header has been sent sent. It is used to stream the message +/// HTTP/2 message header has been sent sent. It is used to stream the message /// body and send the message trailers. See method level documentation for more /// details. /// @@ -35,7 +35,7 @@ use std::task::{Context, Poll}; /// /// # Flow control /// -/// In HTTP/2.0, data cannot be sent to the remote peer unless there is +/// In HTTP/2, data cannot be sent to the remote peer unless there is /// available window capacity on both the stream and the connection. When a data /// frame is sent, both the stream window and the connection window are /// decremented. When the stream level window reaches zero, no further data can @@ -44,7 +44,7 @@ use std::task::{Context, Poll}; /// /// When the remote peer is ready to receive more data, it sends `WINDOW_UPDATE` /// frames. These frames increment the windows. See the [specification] for more -/// details on the principles of HTTP/2.0 flow control. +/// details on the principles of HTTP/2 flow control. /// /// The implications for sending data are that the caller **should** ensure that /// both the stream and the connection has available window capacity before @@ -115,7 +115,7 @@ pub struct StreamId(u32); /// Receives the body stream and trailers from the remote peer. /// /// A `RecvStream` is provided by [`client::ResponseFuture`] and -/// [`server::Connection`] with the received HTTP/2.0 message head (the response +/// [`server::Connection`] with the received HTTP/2 message head (the response /// and request head respectively). /// /// A `RecvStream` instance is used to receive the streaming message body and @@ -125,11 +125,6 @@ pub struct StreamId(u32); /// See method level documentation for more details on receiving data. See /// [`FlowControl`] for more details on inbound flow control. /// -/// Note that this type implements [`Stream`], yielding the received data frames. -/// When this implementation is used, the capacity is immediately released when -/// the data is yielded. It is recommended to only use this API when the data -/// will not be retained in memory for extended periods of time. -/// /// [`client::ResponseFuture`]: client/struct.ResponseFuture.html /// [`server::Connection`]: server/struct.Connection.html /// [`FlowControl`]: struct.FlowControl.html @@ -173,12 +168,12 @@ pub struct RecvStream { /// /// # Scenarios /// -/// Following is a basic scenario with an HTTP/2.0 connection containing a +/// Following is a basic scenario with an HTTP/2 connection containing a /// single active stream. /// /// * A new stream is activated. The receive window is initialized to 1024 (the /// value of the initial window size for this connection). -/// * A `DATA` frame is received containing a payload of 400 bytes. +/// * A `DATA` frame is received containing a payload of 600 bytes. /// * The receive window size is reduced to 424 bytes. /// * [`release_capacity`] is called with 200. /// * The receive window size is now 624 bytes. The peer may send no more than @@ -406,7 +401,7 @@ impl RecvStream { futures_util::future::poll_fn(move |cx| self.poll_trailers(cx)).await } - #[doc(hidden)] + /// Poll for the next data frame. pub fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { self.inner.inner.poll_data(cx).map_err_(Into::into) } diff --git a/third_party/rust/http-body/.cargo-checksum.json b/third_party/rust/http-body/.cargo-checksum.json index 1ed56997023d..dd8581af9b08 100644 --- a/third_party/rust/http-body/.cargo-checksum.json +++ b/third_party/rust/http-body/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"f5b7d70cb2c2177a4c718c17ae87c452cec35525b3e43c56d9cd1c33c78c0ded","Cargo.toml":"264c2707f00a87f511e5f131ce2e4bd67d3458346e86600b15f99e647d559e0b","LICENSE":"0345e2b98685e3807fd802a2478085dcae35023e3da59b5a00f712504314d83a","README.md":"0f90f61ee419eefd4104005ef6900445fafce9a710dd1989463f3cebaf0fafe8","src/lib.rs":"6126e071569d147e36e8377135ddc456b4c89c00a7688487c84fd4e8ef6c0c17","src/next.rs":"d6863067b20c4bb42dced5c17bd954816b1338ce53e8d34ab81dbe240a1601cf","src/size_hint.rs":"017ed58c59b446b93aa4922e35b596490bf8f03af37c631610cc6576f1c21439","tests/is_end_stream.rs":"3a66d80d064f8a447bfa9fd212c2f91855604b1b41f554da3a029bc4a5be3a7e"},"package":"13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b"} \ No newline at end of file +{"files":{"CHANGELOG.md":"b225eb6c706ab729255efa55603551151216b9e5f96dd722469ff5f28d843338","Cargo.toml":"037a31b6a59c860c5fe9860ba07b91f0141311453af66275b6d1336db2b1b1fb","LICENSE":"0345e2b98685e3807fd802a2478085dcae35023e3da59b5a00f712504314d83a","README.md":"0f90f61ee419eefd4104005ef6900445fafce9a710dd1989463f3cebaf0fafe8","src/combinators/box_body.rs":"d27dfa9f289c9c8d1fe714415fb5df5bdaafafb80a5cff66fbbe720841e806bf","src/combinators/map_data.rs":"3063f44d1318feeec639eff6544e7fb91ad9abf9a295770af4cc69b48a691796","src/combinators/map_err.rs":"9db485a5904579147673ac7f9f347e322d283d95a421daaf5541d048045eec7e","src/combinators/mod.rs":"c9e32f64ab2f4866d14256fff4256ba61d4c1bcfaf2748754c561de3abe1eccd","src/empty.rs":"3e44cee68410101cb8bf88c0de504885075c084357e83bcd3a6761ba5c7c58d2","src/full.rs":"efcbf6831d32271170e2ed86c328bfb887aec0c93689f1218ab5a18c319b0fa8","src/lib.rs":"140ff217ecf7e361b9f083dc664f1d65be176b235a71fdb1e8031eafda989a38","src/limited.rs":"485fc1c58bba29d2c7afdb4a032cd0e3c3578979ccd71f7459ddcd67e0f16077","src/next.rs":"d6863067b20c4bb42dced5c17bd954816b1338ce53e8d34ab81dbe240a1601cf","src/size_hint.rs":"017ed58c59b446b93aa4922e35b596490bf8f03af37c631610cc6576f1c21439","tests/is_end_stream.rs":"3a66d80d064f8a447bfa9fd212c2f91855604b1b41f554da3a029bc4a5be3a7e"},"package":"d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"} \ No newline at end of file diff --git a/third_party/rust/http-body/CHANGELOG.md b/third_party/rust/http-body/CHANGELOG.md index 76e6dc8db405..4708a20075ee 100644 --- a/third_party/rust/http-body/CHANGELOG.md +++ b/third_party/rust/http-body/CHANGELOG.md @@ -1,3 +1,37 @@ +# Unreleased + +None. + +# 0.4.5 (May 20, 2022) + +- Add `String` impl for `Body`. +- Add `Limited` body implementation. + +# 0.4.4 (October 22, 2021) + +- Add `UnsyncBoxBody` and `Body::boxed_unsync`. + +# 0.4.3 (August 8, 2021) + +- Implement `Default` for `BoxBody`. + +# 0.4.2 (May 8, 2021) + +- Correctly override `Body::size_hint` and `Body::is_end_stream` for `Empty`. +- Add `Full` which is a body that consists of a single chunk. + +# 0.4.1 (March 18, 2021) + +- Add combinators to `Body`: + - `map_data`: Change the `Data` chunks produced by the body. + - `map_err`: Change the `Error`s produced by the body. + - `boxed`: Convert the `Body` into a boxed trait object. +- Add `Empty`. + +# 0.4.0 (December 23, 2020) + +- Update `bytes` to v1.0. + # 0.3.1 (December 13, 2019) - Implement `Body` for `http::Request` and `http::Response`. diff --git a/third_party/rust/http-body/Cargo.toml b/third_party/rust/http-body/Cargo.toml index f9cc50faf8e8..202ce7de8490 100644 --- a/third_party/rust/http-body/Cargo.toml +++ b/third_party/rust/http-body/Cargo.toml @@ -3,27 +3,43 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "http-body" -version = "0.3.1" -authors = ["Carl Lerche ", "Lucio Franco ", "Sean McArthur "] -description = "Trait representing an asynchronous, streaming, HTTP request or response body.\n" -documentation = "https://docs.rs/http-body/0.3.0/http-body" +version = "0.4.5" +authors = [ + "Carl Lerche ", + "Lucio Franco ", + "Sean McArthur ", +] +description = """ +Trait representing an asynchronous, streaming, HTTP request or response body. +""" +documentation = "https://docs.rs/http-body" readme = "README.md" keywords = ["http"] categories = ["web-programming"] license = "MIT" repository = "https://github.com/hyperium/http-body" + [dependencies.bytes] -version = "0.5" +version = "1" [dependencies.http] version = "0.2" + +[dependencies.pin-project-lite] +version = "0.2" + +[dev-dependencies.tokio] +version = "1" +features = [ + "macros", + "rt", +] diff --git a/third_party/rust/http-body/src/combinators/box_body.rs b/third_party/rust/http-body/src/combinators/box_body.rs new file mode 100644 index 000000000000..97c8313fd77c --- /dev/null +++ b/third_party/rust/http-body/src/combinators/box_body.rs @@ -0,0 +1,134 @@ +use crate::Body; +use bytes::Buf; +use std::{ + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +/// A boxed [`Body`] trait object. +pub struct BoxBody { + inner: Pin + Send + Sync + 'static>>, +} + +/// A boxed [`Body`] trait object that is !Sync. +pub struct UnsyncBoxBody { + inner: Pin + Send + 'static>>, +} + +impl BoxBody { + /// Create a new `BoxBody`. + pub fn new(body: B) -> Self + where + B: Body + Send + Sync + 'static, + D: Buf, + { + Self { + inner: Box::pin(body), + } + } +} + +impl fmt::Debug for BoxBody { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BoxBody").finish() + } +} + +impl Body for BoxBody +where + D: Buf, +{ + type Data = D; + type Error = E; + + fn poll_data( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + self.inner.as_mut().poll_data(cx) + } + + fn poll_trailers( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + self.inner.as_mut().poll_trailers(cx) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> crate::SizeHint { + self.inner.size_hint() + } +} + +impl Default for BoxBody +where + D: Buf + 'static, +{ + fn default() -> Self { + BoxBody::new(crate::Empty::new().map_err(|err| match err {})) + } +} + +// === UnsyncBoxBody === +impl UnsyncBoxBody { + /// Create a new `BoxBody`. + pub fn new(body: B) -> Self + where + B: Body + Send + 'static, + D: Buf, + { + Self { + inner: Box::pin(body), + } + } +} + +impl fmt::Debug for UnsyncBoxBody { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("UnsyncBoxBody").finish() + } +} + +impl Body for UnsyncBoxBody +where + D: Buf, +{ + type Data = D; + type Error = E; + + fn poll_data( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + self.inner.as_mut().poll_data(cx) + } + + fn poll_trailers( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + self.inner.as_mut().poll_trailers(cx) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> crate::SizeHint { + self.inner.size_hint() + } +} + +impl Default for UnsyncBoxBody +where + D: Buf + 'static, +{ + fn default() -> Self { + UnsyncBoxBody::new(crate::Empty::new().map_err(|err| match err {})) + } +} diff --git a/third_party/rust/http-body/src/combinators/map_data.rs b/third_party/rust/http-body/src/combinators/map_data.rs new file mode 100644 index 000000000000..6d9c5a896485 --- /dev/null +++ b/third_party/rust/http-body/src/combinators/map_data.rs @@ -0,0 +1,94 @@ +use crate::Body; +use bytes::Buf; +use pin_project_lite::pin_project; +use std::{ + any::type_name, + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +pin_project! { + /// Body returned by the [`map_data`] combinator. + /// + /// [`map_data`]: crate::util::BodyExt::map_data + #[derive(Clone, Copy)] + pub struct MapData { + #[pin] + inner: B, + f: F + } +} + +impl MapData { + #[inline] + pub(crate) fn new(body: B, f: F) -> Self { + Self { inner: body, f } + } + + /// Get a reference to the inner body + pub fn get_ref(&self) -> &B { + &self.inner + } + + /// Get a mutable reference to the inner body + pub fn get_mut(&mut self) -> &mut B { + &mut self.inner + } + + /// Get a pinned mutable reference to the inner body + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + self.project().inner + } + + /// Consume `self`, returning the inner body + pub fn into_inner(self) -> B { + self.inner + } +} + +impl Body for MapData +where + B: Body, + F: FnMut(B::Data) -> B2, + B2: Buf, +{ + type Data = B2; + type Error = B::Error; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let this = self.project(); + match this.inner.poll_data(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(None) => Poll::Ready(None), + Poll::Ready(Some(Ok(data))) => Poll::Ready(Some(Ok((this.f)(data)))), + Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))), + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + self.project().inner.poll_trailers(cx) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } +} + +impl fmt::Debug for MapData +where + B: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("MapData") + .field("inner", &self.inner) + .field("f", &type_name::()) + .finish() + } +} diff --git a/third_party/rust/http-body/src/combinators/map_err.rs b/third_party/rust/http-body/src/combinators/map_err.rs new file mode 100644 index 000000000000..c77168deb5a9 --- /dev/null +++ b/third_party/rust/http-body/src/combinators/map_err.rs @@ -0,0 +1,97 @@ +use crate::Body; +use pin_project_lite::pin_project; +use std::{ + any::type_name, + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +pin_project! { + /// Body returned by the [`map_err`] combinator. + /// + /// [`map_err`]: crate::util::BodyExt::map_err + #[derive(Clone, Copy)] + pub struct MapErr { + #[pin] + inner: B, + f: F + } +} + +impl MapErr { + #[inline] + pub(crate) fn new(body: B, f: F) -> Self { + Self { inner: body, f } + } + + /// Get a reference to the inner body + pub fn get_ref(&self) -> &B { + &self.inner + } + + /// Get a mutable reference to the inner body + pub fn get_mut(&mut self) -> &mut B { + &mut self.inner + } + + /// Get a pinned mutable reference to the inner body + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + self.project().inner + } + + /// Consume `self`, returning the inner body + pub fn into_inner(self) -> B { + self.inner + } +} + +impl Body for MapErr +where + B: Body, + F: FnMut(B::Error) -> E, +{ + type Data = B::Data; + type Error = E; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let this = self.project(); + match this.inner.poll_data(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(None) => Poll::Ready(None), + Poll::Ready(Some(Ok(data))) => Poll::Ready(Some(Ok(data))), + Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err((this.f)(err)))), + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + let this = self.project(); + this.inner.poll_trailers(cx).map_err(this.f) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> crate::SizeHint { + self.inner.size_hint() + } +} + +impl fmt::Debug for MapErr +where + B: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("MapErr") + .field("inner", &self.inner) + .field("f", &type_name::()) + .finish() + } +} diff --git a/third_party/rust/http-body/src/combinators/mod.rs b/third_party/rust/http-body/src/combinators/mod.rs new file mode 100644 index 000000000000..c52f36755452 --- /dev/null +++ b/third_party/rust/http-body/src/combinators/mod.rs @@ -0,0 +1,11 @@ +//! Combinators for the `Body` trait. + +mod box_body; +mod map_data; +mod map_err; + +pub use self::{ + box_body::{BoxBody, UnsyncBoxBody}, + map_data::MapData, + map_err::MapErr, +}; diff --git a/third_party/rust/http-body/src/empty.rs b/third_party/rust/http-body/src/empty.rs new file mode 100644 index 000000000000..7d63ceb054a9 --- /dev/null +++ b/third_party/rust/http-body/src/empty.rs @@ -0,0 +1,75 @@ +use super::{Body, SizeHint}; +use bytes::Buf; +use http::HeaderMap; +use std::{ + convert::Infallible, + fmt, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; + +/// A body that is always empty. +pub struct Empty { + _marker: PhantomData D>, +} + +impl Empty { + /// Create a new `Empty`. + pub fn new() -> Self { + Self::default() + } +} + +impl Body for Empty { + type Data = D; + type Error = Infallible; + + #[inline] + fn poll_data( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + Poll::Ready(None) + } + + #[inline] + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(None)) + } + + fn is_end_stream(&self) -> bool { + true + } + + fn size_hint(&self) -> SizeHint { + SizeHint::with_exact(0) + } +} + +impl fmt::Debug for Empty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Empty").finish() + } +} + +impl Default for Empty { + fn default() -> Self { + Self { + _marker: PhantomData, + } + } +} + +impl Clone for Empty { + fn clone(&self) -> Self { + Self { + _marker: PhantomData, + } + } +} + +impl Copy for Empty {} diff --git a/third_party/rust/http-body/src/full.rs b/third_party/rust/http-body/src/full.rs new file mode 100644 index 000000000000..f1d063b0aebe --- /dev/null +++ b/third_party/rust/http-body/src/full.rs @@ -0,0 +1,151 @@ +use crate::{Body, SizeHint}; +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use pin_project_lite::pin_project; +use std::borrow::Cow; +use std::convert::{Infallible, TryFrom}; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// A body that consists of a single chunk. + #[derive(Clone, Copy, Debug)] + pub struct Full { + data: Option, + } +} + +impl Full +where + D: Buf, +{ + /// Create a new `Full`. + pub fn new(data: D) -> Self { + let data = if data.has_remaining() { + Some(data) + } else { + None + }; + Full { data } + } +} + +impl Body for Full +where + D: Buf, +{ + type Data = D; + type Error = Infallible; + + fn poll_data( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + Poll::Ready(self.data.take().map(Ok)) + } + + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(None)) + } + + fn is_end_stream(&self) -> bool { + self.data.is_none() + } + + fn size_hint(&self) -> SizeHint { + self.data + .as_ref() + .map(|data| SizeHint::with_exact(u64::try_from(data.remaining()).unwrap())) + .unwrap_or_else(|| SizeHint::with_exact(0)) + } +} + +impl Default for Full +where + D: Buf, +{ + /// Create an empty `Full`. + fn default() -> Self { + Full { data: None } + } +} + +impl From for Full +where + D: Buf + From, +{ + fn from(bytes: Bytes) -> Self { + Full::new(D::from(bytes)) + } +} + +impl From> for Full +where + D: Buf + From>, +{ + fn from(vec: Vec) -> Self { + Full::new(D::from(vec)) + } +} + +impl From<&'static [u8]> for Full +where + D: Buf + From<&'static [u8]>, +{ + fn from(slice: &'static [u8]) -> Self { + Full::new(D::from(slice)) + } +} + +impl From> for Full +where + D: Buf + From<&'static B> + From, + B: ToOwned + ?Sized, +{ + fn from(cow: Cow<'static, B>) -> Self { + match cow { + Cow::Borrowed(b) => Full::new(D::from(b)), + Cow::Owned(o) => Full::new(D::from(o)), + } + } +} + +impl From for Full +where + D: Buf + From, +{ + fn from(s: String) -> Self { + Full::new(D::from(s)) + } +} + +impl From<&'static str> for Full +where + D: Buf + From<&'static str>, +{ + fn from(slice: &'static str) -> Self { + Full::new(D::from(slice)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn full_returns_some() { + let mut full = Full::new(&b"hello"[..]); + assert_eq!(full.size_hint().exact(), Some(b"hello".len() as u64)); + assert_eq!(full.data().await, Some(Ok(&b"hello"[..]))); + assert!(full.data().await.is_none()); + } + + #[tokio::test] + async fn empty_full_returns_none() { + assert!(Full::<&[u8]>::default().data().await.is_none()); + assert!(Full::new(&b""[..]).data().await.is_none()); + } +} diff --git a/third_party/rust/http-body/src/lib.rs b/third_party/rust/http-body/src/lib.rs index 0d0e061d980f..84efd9169f87 100644 --- a/third_party/rust/http-body/src/lib.rs +++ b/third_party/rust/http-body/src/lib.rs @@ -1,5 +1,10 @@ -#![doc(html_root_url = "https://docs.rs/http-body/0.3.1")] -#![deny(missing_debug_implementations, missing_docs, unreachable_pub)] +#![doc(html_root_url = "https://docs.rs/http-body/0.4.5")] +#![deny( + missing_debug_implementations, + missing_docs, + unreachable_pub, + broken_intra_doc_links +)] #![cfg_attr(test, deny(warnings))] //! Asynchronous HTTP request or response body. @@ -8,14 +13,24 @@ //! //! [`Body`]: trait.Body.html +mod empty; +mod full; +mod limited; mod next; mod size_hint; +pub mod combinators; + +pub use self::empty::Empty; +pub use self::full::Full; +pub use self::limited::{LengthLimitError, Limited}; pub use self::next::{Data, Trailers}; pub use self::size_hint::SizeHint; -use bytes::Buf; +use self::combinators::{BoxBody, MapData, MapErr, UnsyncBoxBody}; +use bytes::{Buf, Bytes}; use http::HeaderMap; +use std::convert::Infallible; use std::ops; use std::pin::Pin; use std::task::{Context, Poll}; @@ -83,6 +98,41 @@ pub trait Body { { Trailers(self) } + + /// Maps this body's data value to a different value. + fn map_data(self, f: F) -> MapData + where + Self: Sized, + F: FnMut(Self::Data) -> B, + B: Buf, + { + MapData::new(self, f) + } + + /// Maps this body's error value to a different value. + fn map_err(self, f: F) -> MapErr + where + Self: Sized, + F: FnMut(Self::Error) -> E, + { + MapErr::new(self, f) + } + + /// Turn this body into a boxed trait object. + fn boxed(self) -> BoxBody + where + Self: Sized + Send + Sync + 'static, + { + BoxBody::new(self) + } + + /// Turn this body into a boxed trait object that is !Sync. + fn boxed_unsync(self) -> UnsyncBoxBody + where + Self: Sized + Send + 'static, + { + UnsyncBoxBody::new(self) + } } impl Body for &mut T { @@ -236,6 +286,38 @@ impl Body for http::Response { } } +impl Body for String { + type Data = Bytes; + type Error = Infallible; + + fn poll_data( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if !self.is_empty() { + let s = std::mem::take(&mut *self); + Poll::Ready(Some(Ok(s.into_bytes().into()))) + } else { + Poll::Ready(None) + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(None)) + } + + fn is_end_stream(&self) -> bool { + self.is_empty() + } + + fn size_hint(&self) -> SizeHint { + SizeHint::with_exact(self.len() as u64) + } +} + #[cfg(test)] fn _assert_bounds() { fn can_be_trait_object(_: &dyn Body>, Error = std::io::Error>) {} diff --git a/third_party/rust/http-body/src/limited.rs b/third_party/rust/http-body/src/limited.rs new file mode 100644 index 000000000000..a40add91f9ee --- /dev/null +++ b/third_party/rust/http-body/src/limited.rs @@ -0,0 +1,299 @@ +use crate::{Body, SizeHint}; +use bytes::Buf; +use http::HeaderMap; +use pin_project_lite::pin_project; +use std::error::Error; +use std::fmt; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// A length limited body. + /// + /// This body will return an error if more than the configured number + /// of bytes are returned on polling the wrapped body. + #[derive(Clone, Copy, Debug)] + pub struct Limited { + remaining: usize, + #[pin] + inner: B, + } +} + +impl Limited { + /// Create a new `Limited`. + pub fn new(inner: B, limit: usize) -> Self { + Self { + remaining: limit, + inner, + } + } +} + +impl Body for Limited +where + B: Body, + B::Error: Into>, +{ + type Data = B::Data; + type Error = Box; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let this = self.project(); + let res = match this.inner.poll_data(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => None, + Poll::Ready(Some(Ok(data))) => { + if data.remaining() > *this.remaining { + *this.remaining = 0; + Some(Err(LengthLimitError.into())) + } else { + *this.remaining -= data.remaining(); + Some(Ok(data)) + } + } + Poll::Ready(Some(Err(err))) => Some(Err(err.into())), + }; + + Poll::Ready(res) + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + let this = self.project(); + let res = match this.inner.poll_trailers(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Ok(data)) => Ok(data), + Poll::Ready(Err(err)) => Err(err.into()), + }; + + Poll::Ready(res) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + use std::convert::TryFrom; + match u64::try_from(self.remaining) { + Ok(n) => { + let mut hint = self.inner.size_hint(); + if hint.lower() >= n { + hint.set_exact(n) + } else if let Some(max) = hint.upper() { + hint.set_upper(n.min(max)) + } else { + hint.set_upper(n) + } + hint + } + Err(_) => self.inner.size_hint(), + } + } +} + +/// An error returned when body length exceeds the configured limit. +#[derive(Debug)] +#[non_exhaustive] +pub struct LengthLimitError; + +impl fmt::Display for LengthLimitError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("length limit exceeded") + } +} + +impl Error for LengthLimitError {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Full; + use bytes::Bytes; + use std::convert::Infallible; + + #[tokio::test] + async fn read_for_body_under_limit_returns_data() { + const DATA: &[u8] = b"testing"; + let inner = Full::new(Bytes::from(DATA)); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(7); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let data = body.data().await.unwrap().unwrap(); + assert_eq!(data, DATA); + hint.set_upper(0); + assert_eq!(body.size_hint().upper(), hint.upper()); + + assert!(matches!(body.data().await, None)); + } + + #[tokio::test] + async fn read_for_body_over_limit_returns_error() { + const DATA: &[u8] = b"testing a string that is too long"; + let inner = Full::new(Bytes::from(DATA)); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(8); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let error = body.data().await.unwrap().unwrap_err(); + assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); + } + + struct Chunky(&'static [&'static [u8]]); + + impl Body for Chunky { + type Data = &'static [u8]; + type Error = Infallible; + + fn poll_data( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + let mut this = self; + match this.0.split_first().map(|(&head, tail)| (Ok(head), tail)) { + Some((data, new_tail)) => { + this.0 = new_tail; + + Poll::Ready(Some(data)) + } + None => Poll::Ready(None), + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(Some(HeaderMap::new()))) + } + } + + #[tokio::test] + async fn read_for_chunked_body_around_limit_returns_first_chunk_but_returns_error_on_over_limit_chunk( + ) { + const DATA: &[&[u8]] = &[b"testing ", b"a string that is too long"]; + let inner = Chunky(DATA); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(8); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let data = body.data().await.unwrap().unwrap(); + assert_eq!(data, DATA[0]); + hint.set_upper(0); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let error = body.data().await.unwrap().unwrap_err(); + assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); + } + + #[tokio::test] + async fn read_for_chunked_body_over_limit_on_first_chunk_returns_error() { + const DATA: &[&[u8]] = &[b"testing a string", b" that is too long"]; + let inner = Chunky(DATA); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(8); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let error = body.data().await.unwrap().unwrap_err(); + assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); + } + + #[tokio::test] + async fn read_for_chunked_body_under_limit_is_okay() { + const DATA: &[&[u8]] = &[b"test", b"ing!"]; + let inner = Chunky(DATA); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(8); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let data = body.data().await.unwrap().unwrap(); + assert_eq!(data, DATA[0]); + hint.set_upper(4); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let data = body.data().await.unwrap().unwrap(); + assert_eq!(data, DATA[1]); + hint.set_upper(0); + assert_eq!(body.size_hint().upper(), hint.upper()); + + assert!(matches!(body.data().await, None)); + } + + #[tokio::test] + async fn read_for_trailers_propagates_inner_trailers() { + const DATA: &[&[u8]] = &[b"test", b"ing!"]; + let inner = Chunky(DATA); + let body = &mut Limited::new(inner, 8); + let trailers = body.trailers().await.unwrap(); + assert_eq!(trailers, Some(HeaderMap::new())) + } + + #[derive(Debug)] + enum ErrorBodyError { + Data, + Trailers, + } + + impl fmt::Display for ErrorBodyError { + fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } + } + + impl Error for ErrorBodyError {} + + struct ErrorBody; + + impl Body for ErrorBody { + type Data = &'static [u8]; + type Error = ErrorBodyError; + + fn poll_data( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + Poll::Ready(Some(Err(ErrorBodyError::Data))) + } + + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Err(ErrorBodyError::Trailers)) + } + } + + #[tokio::test] + async fn read_for_body_returning_error_propagates_error() { + let body = &mut Limited::new(ErrorBody, 8); + let error = body.data().await.unwrap().unwrap_err(); + assert!(matches!(error.downcast_ref(), Some(ErrorBodyError::Data))); + } + + #[tokio::test] + async fn trailers_for_body_returning_error_propagates_error() { + let body = &mut Limited::new(ErrorBody, 8); + let error = body.trailers().await.unwrap_err(); + assert!(matches!( + error.downcast_ref(), + Some(ErrorBodyError::Trailers) + )); + } +} diff --git a/third_party/rust/http/.cargo-checksum.json b/third_party/rust/http/.cargo-checksum.json index 854b6dfa0e6d..4e38e1e1ca8c 100644 --- a/third_party/rust/http/.cargo-checksum.json +++ b/third_party/rust/http/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"696ee047ee2073a2c5419b1f8849faebe5ea13141d3ce9af0820fbc803c27467","Cargo.toml":"7d62394e0f4d480355e134f6602bc9987cd6860e8d515f54f128b93e6a015991","LICENSE-APACHE":"8bb1b50b0e5c9399ae33bd35fab2769010fa6c14e8860c729a52295d84896b7a","LICENSE-MIT":"dc91f8200e4b2a1f9261035d4c18c33c246911a6c0f7b543d75347e61b249cff","README.md":"006da76b9d578dc69162a15462840fea998b1a2fcc14f689f91d2c92cab91c8f","benches/header_map/basic.rs":"7aa1f667be9b7fd2aab249a6ca6cfa2f06229c79d3406a8354d07bf9ccf09fd8","benches/header_map/mod.rs":"c1a4c7291b94d2477c1aa6721a5cfbd3dc63242baf14b836138273830cc67990","benches/header_map/vec_map.rs":"48f4eb7a90b6f0844a445924cf24c80acfaf76f909acca4492bfeb9b1ea9e4a4","benches/header_name.rs":"ae65e76147a0251cf0191edbed5961c99cc9407f2eba006d8f59b4c2dc636c1e","benches/header_value.rs":"7aa588f2155c513b7aebfe0b1cc7f7c6b0a699fafd817fe2fee8e46fb131bd76","benches/method.rs":"1913b8c95ecf542e9e50e20ff645f4024036e9b89f04a83e06303bf423053508","benches/uri.rs":"fbc88c25bc27d39162c674c162d1e40c8139452fbfb891a7caebb3c225baa1ae","src/byte_str.rs":"f003f02bc7fa087382c1d54a4d87b16e1cc9289385a93ebf39ad79ff663ef042","src/convert.rs":"a31a4351cd3ee36a58ff4f5b30ce2c8967cde8486faea2d2673a8f8cb74b3204","src/error.rs":"8163a0b4f659a0f0070559568a7b553a5370553a4570bb496811ca3b978046bf","src/extensions.rs":"4d8c0534bd444c331b7dfe263ee84feac9306c6fc2cf5d14793f01ab0144a5e6","src/header/map.rs":"b5dba93de3b9f5035b5f8c444e8c21d1f53b3c65cc258aff7a284366a55123fa","src/header/mod.rs":"b91ee39f0f76aba6a2ca4e818126d0185272c150c29a43dfbec60891d213c45a","src/header/name.rs":"4bac13980f1162c7753345aec3b91db257859274f55da0fb5ea3d074ca286ac7","src/header/value.rs":"ffe6cce472ad21cd72cba4d60f615797f586cb16f1bcb0cd66dace7a9b4eba9f","src/lib.rs":"e28d0c142a8edcd84da28e5bb5ca8da56e8a4b3cb07ddf849217910e02432d84","src/method.rs":"1de55021d29174c462ed60042d0d263276af131a25c4d942c91afb09622644f8","src/request.rs":"8c76af67f41c62774c298016ab562a1ab0e8f6d9ab5617e0294a10c0200e5f74","src/response.rs":"5a17fafd73e6d51f2fe43af26143aa2390384c09045d8b634b68515118665bdb","src/status.rs":"fd9d1c1670bde5f94934ff2a9fa9c7f2db5bbe32a750e4e202bf2775b5c5cac3","src/uri/authority.rs":"4df1371b3bd751dd8b5c4b88a4dc67eac3c691748992223e78d7eb859832ebe0","src/uri/builder.rs":"875506b3a603a6e35557548ed0cf3beb7de0a4d1c898316e7293f3bc2ffb05c5","src/uri/mod.rs":"673e6fa2f50426e82225c5456cc5d7aa6bf8988d18ccc6ea74adbe1490c54610","src/uri/path.rs":"72a300e800a85dd9901a18dae3d3f0beacb783472d66a70615486194f43cd118","src/uri/port.rs":"a30793678abc96e833d026d96f060244183ab631e19eafbbad8e4643c7bb9d86","src/uri/scheme.rs":"59e6f12d3e1e1ee982e68a4a6556f25e94073ca3d77c372b6d8d71daf8f62f2a","src/uri/tests.rs":"61f88b73490c2442ec12cb0829aa1ddd28f1bce874b4fc6dd7a544c80280aeb1","src/version.rs":"623ef60a450203b051f3457e2f095508b66aaaa799b1447fb1b34d92cb2e7d62","tests/header_map.rs":"b81993a9042c21fd64114fb099b1666ea4685246f4f1c76bac72b825b6a3741b","tests/header_map_fuzz.rs":"a71387a8e1a3906f713a00cde443790cd7a86b7e37cafa2f9ed4d432cbf97026","tests/status_code.rs":"2e61889b07a87645465847b6245b681e022e7867a9d25f5e07f71c5ba02e7c7a"},"package":"1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b"} \ No newline at end of file +{"files":{"CHANGELOG.md":"204da5d7af7b01398181567e787363a489ce8dec0c114c5b9a9c6e053b4b57f5","Cargo.toml":"a341df7544c2e37863e0dc99b8a8639824a933547e1ba0bed9dbe5a7228feea9","LICENSE-APACHE":"8bb1b50b0e5c9399ae33bd35fab2769010fa6c14e8860c729a52295d84896b7a","LICENSE-MIT":"dc91f8200e4b2a1f9261035d4c18c33c246911a6c0f7b543d75347e61b249cff","README.md":"114c0e7cc1ac2336fb83acc2d3b40add27c513fcb1e047e07e3a2ba78b16a976","benches/header_map/basic.rs":"7aa1f667be9b7fd2aab249a6ca6cfa2f06229c79d3406a8354d07bf9ccf09fd8","benches/header_map/mod.rs":"c1a4c7291b94d2477c1aa6721a5cfbd3dc63242baf14b836138273830cc67990","benches/header_map/vec_map.rs":"48f4eb7a90b6f0844a445924cf24c80acfaf76f909acca4492bfeb9b1ea9e4a4","benches/header_name.rs":"0c6ea9862807f7973b9e6577ef8959b3f41da04837cde326faa73d028caeb420","benches/header_value.rs":"7aa588f2155c513b7aebfe0b1cc7f7c6b0a699fafd817fe2fee8e46fb131bd76","benches/method.rs":"1913b8c95ecf542e9e50e20ff645f4024036e9b89f04a83e06303bf423053508","benches/uri.rs":"fbc88c25bc27d39162c674c162d1e40c8139452fbfb891a7caebb3c225baa1ae","src/byte_str.rs":"4767ad7bb6e5cda64a63e00ec549c1318e9305acb64d22d5cfbe54b8540b425b","src/convert.rs":"a31a4351cd3ee36a58ff4f5b30ce2c8967cde8486faea2d2673a8f8cb74b3204","src/error.rs":"8163a0b4f659a0f0070559568a7b553a5370553a4570bb496811ca3b978046bf","src/extensions.rs":"5f85c3e1eef53d0fcbd4a24a6c13828790dac74ad60f71cad365e14d39b196a6","src/header/map.rs":"b5dba93de3b9f5035b5f8c444e8c21d1f53b3c65cc258aff7a284366a55123fa","src/header/mod.rs":"b91ee39f0f76aba6a2ca4e818126d0185272c150c29a43dfbec60891d213c45a","src/header/name.rs":"9843c042ac6e1bf35875f44ea242f32a72ac5411756c9381891f1a71f855f654","src/header/value.rs":"ffea8236f38178fa3dd600b893d1eb8b698e3a052aaad2dbdda4a14e1b3c7108","src/lib.rs":"e39dcb7983c40e8edbdc6f2f3b86f65871991b2a13be3679db10035dba90d378","src/method.rs":"1de55021d29174c462ed60042d0d263276af131a25c4d942c91afb09622644f8","src/request.rs":"8c76af67f41c62774c298016ab562a1ab0e8f6d9ab5617e0294a10c0200e5f74","src/response.rs":"5a17fafd73e6d51f2fe43af26143aa2390384c09045d8b634b68515118665bdb","src/status.rs":"fd9d1c1670bde5f94934ff2a9fa9c7f2db5bbe32a750e4e202bf2775b5c5cac3","src/uri/authority.rs":"4df1371b3bd751dd8b5c4b88a4dc67eac3c691748992223e78d7eb859832ebe0","src/uri/builder.rs":"875506b3a603a6e35557548ed0cf3beb7de0a4d1c898316e7293f3bc2ffb05c5","src/uri/mod.rs":"a70ef96edd8b7920b404a88a5d7bf09b382bf24b93e5d28dfc82b83bd77e2ce7","src/uri/path.rs":"72a300e800a85dd9901a18dae3d3f0beacb783472d66a70615486194f43cd118","src/uri/port.rs":"a30793678abc96e833d026d96f060244183ab631e19eafbbad8e4643c7bb9d86","src/uri/scheme.rs":"59e6f12d3e1e1ee982e68a4a6556f25e94073ca3d77c372b6d8d71daf8f62f2a","src/uri/tests.rs":"61f88b73490c2442ec12cb0829aa1ddd28f1bce874b4fc6dd7a544c80280aeb1","src/version.rs":"623ef60a450203b051f3457e2f095508b66aaaa799b1447fb1b34d92cb2e7d62","tests/header_map.rs":"b81993a9042c21fd64114fb099b1666ea4685246f4f1c76bac72b825b6a3741b","tests/header_map_fuzz.rs":"a71387a8e1a3906f713a00cde443790cd7a86b7e37cafa2f9ed4d432cbf97026","tests/status_code.rs":"4c1bd08baffa6265aad5e837b189c269a3bef9031984b37980c24a8c671ac22c"},"package":"ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb"} \ No newline at end of file diff --git a/third_party/rust/http/CHANGELOG.md b/third_party/rust/http/CHANGELOG.md index a7eab4dac303..6a77a86e5834 100644 --- a/third_party/rust/http/CHANGELOG.md +++ b/third_party/rust/http/CHANGELOG.md @@ -1,3 +1,13 @@ +# 0.2.7 (April 28, 2022) + +* Add `extend()` method to `Extensions`. +* Add `From` and `From` impls for `Uri`. +* Make `HeaderName::from_static` a `const fn`. + +# 0.2.6 (December 30, 2021) + +* Upgrade internal `itoa` dependency to 1.0. + # 0.2.5 (September 21, 2021) * Add `is_empty()` and `len()` methods to `Extensions`. diff --git a/third_party/rust/http/Cargo.toml b/third_party/rust/http/Cargo.toml index 0a823ef265a1..ce94f50b10c8 100644 --- a/third_party/rust/http/Cargo.toml +++ b/third_party/rust/http/Cargo.toml @@ -11,15 +11,22 @@ [package] edition = "2018" +rust-version = "1.49.0" name = "http" -version = "0.2.5" -authors = ["Alex Crichton ", "Carl Lerche ", "Sean McArthur "] -description = "A set of types for representing HTTP requests and responses.\n" +version = "0.2.7" +authors = [ + "Alex Crichton ", + "Carl Lerche ", + "Sean McArthur ", +] +description = """ +A set of types for representing HTTP requests and responses. +""" documentation = "https://docs.rs/http" readme = "README.md" keywords = ["http"] categories = ["web-programming"] -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/hyperium/http" [[bench]] @@ -41,6 +48,7 @@ path = "benches/method.rs" [[bench]] name = "uri" path = "benches/uri.rs" + [dependencies.bytes] version = "1" @@ -48,7 +56,8 @@ version = "1" version = "1.0.5" [dependencies.itoa] -version = "0.4.1" +version = "1" + [dev-dependencies.doc-comment] version = "0.3" diff --git a/third_party/rust/http/README.md b/third_party/rust/http/README.md index 9a9f1542e1f8..3f26c7cec510 100644 --- a/third_party/rust/http/README.md +++ b/third_party/rust/http/README.md @@ -23,8 +23,6 @@ http = "0.2" Next, add this to your crate: ```rust -extern crate http; - use http::{Request, Response}; fn main() { @@ -37,8 +35,6 @@ fn main() { Create an HTTP request: ```rust -extern crate http; - use http::Request; fn main() { @@ -53,8 +49,6 @@ fn main() { Create an HTTP response: ```rust -extern crate http; - use http::{Response, StatusCode}; fn main() { @@ -70,8 +64,8 @@ fn main() { Licensed under either of -- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) # Contribution diff --git a/third_party/rust/http/benches/header_name.rs b/third_party/rust/http/benches/header_name.rs index d65f7d940d2e..4249f9877801 100644 --- a/third_party/rust/http/benches/header_name.rs +++ b/third_party/rust/http/benches/header_name.rs @@ -130,6 +130,128 @@ fn make_all_known_headers() -> Vec> { ] } +static ALL_KNOWN_HEADERS: &[&str] = &[ + // Standard request headers + "a-im", + "accept", + "accept-charset", + "accept-datetime", + "accept-encoding", + "accept-language", + "access-control-request-method", + "authorization", + "cache-control", + "connection", + "permanent", + "content-length", + "content-md5", + "content-type", + "cookie", + "date", + "expect", + "forwarded", + "from", + "host", + "permanent", + "http2-settings", + "if-match", + "if-modified-since", + "if-none-match", + "if-range", + "if-unmodified-since", + "max-forwards", + "origin", + "pragma", + "proxy-authorization", + "range", + "referer", + "te", + "user-agent", + "upgrade", + "via", + "warning", + // common_non_standard + "upgrade-insecure-requests", + "upgrade-insecure-requests", + "x-requested-with", + "dnt", + "x-forwarded-for", + "x-forwarded-host", + "x-forwarded-proto", + "front-end-https", + "x-http-method-override", + "x-att-deviceid", + "x-wap-profile", + "proxy-connection", + "x-uidh", + "x-csrf-token", + "x-request-id", + "x-correlation-id", + "save-data", + // standard_response_headers + "accept-patch", + "accept-ranges", + "access-control-allow-credentials", + "access-control-allow-headers", + "access-control-allow-methods", + "access-control-allow-origin", + "access-control-expose-headers", + "access-control-max-age", + "age", + "allow", + "alt-svc", + "cache-control", + "connection", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-md5", + "content-range", + "content-type", + "date", + "delta-base", + "etag", + "expires", + "im", + "last-modified", + "link", + "location", + "p3p", + "permanent", + "pragma", + "proxy-authenticate", + "public-key-pins", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "tk", + "trailer", + "transfer-encoding", + "upgrade", + "vary", + "via", + "warning", + "www-authenticate", + "x-frame-options", + // common_non_standard_response + "content-security-policy", + "refresh", + "status", + "timing-allow-origin", + "x-content-duration", + "x-content-security-policy", + "x-content-type-options", + "x-correlation-id", + "x-powered-by", + "x-request-id", + "x-ua-compatible", + "x-webkit-csp", + "x-xss-protection", +]; + #[bench] fn header_name_easy(b: &mut Bencher) { let name = b"Content-type"; @@ -138,6 +260,14 @@ fn header_name_easy(b: &mut Bencher) { }); } +#[bench] +fn header_name_custom(b: &mut Bencher) { + let name = b"Foo-Bar-Baz-Blah"; + b.iter(|| { + HeaderName::from_bytes(&name[..]).unwrap(); + }); +} + #[bench] fn header_name_bad(b: &mut Bencher) { let name = b"bad header name"; @@ -155,3 +285,12 @@ fn header_name_various(b: &mut Bencher) { } }); } + +#[bench] +fn header_name_from_static(b: &mut Bencher) { + b.iter(|| { + for name in ALL_KNOWN_HEADERS { + HeaderName::from_static(name); + } + }); +} diff --git a/third_party/rust/http/src/byte_str.rs b/third_party/rust/http/src/byte_str.rs index 04e3e15ed277..e83ff75d2024 100644 --- a/third_party/rust/http/src/byte_str.rs +++ b/third_party/rust/http/src/byte_str.rs @@ -18,7 +18,7 @@ impl ByteStr { } #[inline] - pub fn from_static(val: &'static str) -> ByteStr { + pub const fn from_static(val: &'static str) -> ByteStr { ByteStr { // Invariant: val is a str so contains vaid UTF-8. bytes: Bytes::from_static(val.as_bytes()), diff --git a/third_party/rust/http/src/extensions.rs b/third_party/rust/http/src/extensions.rs index e0b7ad1ae6a6..7e815df77264 100644 --- a/third_party/rust/http/src/extensions.rs +++ b/third_party/rust/http/src/extensions.rs @@ -188,6 +188,39 @@ impl Extensions { .as_ref() .map_or(0, |map| map.len()) } + + /// Extends `self` with another `Extensions`. + /// + /// If an instance of a specific type exists in both, the one in `self` is overwritten with the + /// one from `other`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext_a = Extensions::new(); + /// ext_a.insert(8u8); + /// ext_a.insert(16u16); + /// + /// let mut ext_b = Extensions::new(); + /// ext_b.insert(4u8); + /// ext_b.insert("hello"); + /// + /// ext_a.extend(ext_b); + /// assert_eq!(ext_a.len(), 3); + /// assert_eq!(ext_a.get::(), Some(&4u8)); + /// assert_eq!(ext_a.get::(), Some(&16u16)); + /// assert_eq!(ext_a.get::<&'static str>().copied(), Some("hello")); + /// ``` + pub fn extend(&mut self, other: Self) { + if let Some(other) = other.map { + if let Some(map) = &mut self.map { + map.extend(*other); + } else { + self.map = Some(other); + } + } + } } impl fmt::Debug for Extensions { diff --git a/third_party/rust/http/src/header/name.rs b/third_party/rust/http/src/header/name.rs index eb17a4239bdf..f8872257e2b9 100644 --- a/third_party/rust/http/src/header/name.rs +++ b/third_party/rust/http/src/header/name.rs @@ -64,7 +64,7 @@ macro_rules! standard_headers { ( $( $(#[$docs:meta])* - ($konst:ident, $upcase:ident, $name:expr); + ($konst:ident, $upcase:ident, $name_bytes:literal); )+ ) => { #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] @@ -85,52 +85,60 @@ macro_rules! standard_headers { #[inline] fn as_str(&self) -> &'static str { match *self { + // Safety: test_parse_standard_headers ensures these &[u8]s are &str-safe. $( - StandardHeader::$konst => $name, + StandardHeader::$konst => unsafe { std::str::from_utf8_unchecked( $name_bytes ) }, )+ } } + + const fn from_bytes(name_bytes: &[u8]) -> Option { + match name_bytes { + $( + $name_bytes => Some(StandardHeader::$konst), + )+ + _ => None, + } + } } #[cfg(test)] - const TEST_HEADERS: &'static [(StandardHeader, &'static str)] = &[ + const TEST_HEADERS: &'static [(StandardHeader, &'static [u8])] = &[ $( - (StandardHeader::$konst, $name), + (StandardHeader::$konst, $name_bytes), )+ ]; #[test] fn test_parse_standard_headers() { - for &(std, name) in TEST_HEADERS { + for &(std, name_bytes) in TEST_HEADERS { // Test lower case - assert_eq!(HeaderName::from_bytes(name.as_bytes()).unwrap(), HeaderName::from(std)); + assert_eq!(HeaderName::from_bytes(name_bytes).unwrap(), HeaderName::from(std)); // Test upper case - let upper = name.to_uppercase().to_string(); + let upper = std::str::from_utf8(name_bytes).expect("byte string constants are all utf-8").to_uppercase(); assert_eq!(HeaderName::from_bytes(upper.as_bytes()).unwrap(), HeaderName::from(std)); } } #[test] fn test_standard_headers_into_bytes() { - for &(std, name) in TEST_HEADERS { + for &(std, name_bytes) in TEST_HEADERS { + let name = std::str::from_utf8(name_bytes).unwrap(); let std = HeaderName::from(std); // Test lower case - let name_bytes = name.as_bytes(); let bytes: Bytes = HeaderName::from_bytes(name_bytes).unwrap().inner.into(); - assert_eq!(bytes, name_bytes); + assert_eq!(bytes, name); assert_eq!(HeaderName::from_bytes(name_bytes).unwrap(), std); // Test upper case - let upper = name.to_uppercase().to_string(); + let upper = name.to_uppercase(); let bytes: Bytes = HeaderName::from_bytes(upper.as_bytes()).unwrap().inner.into(); - assert_eq!(bytes, name.as_bytes()); + assert_eq!(bytes, name_bytes); assert_eq!(HeaderName::from_bytes(upper.as_bytes()).unwrap(), std); - - } } @@ -154,7 +162,7 @@ standard_headers! { /// where the request is done: when fetching a CSS stylesheet a different /// value is set for the request than when fetching an image, video or a /// script. - (Accept, ACCEPT, "accept"); + (Accept, ACCEPT, b"accept"); /// Advertises which character set the client is able to understand. /// @@ -169,7 +177,7 @@ standard_headers! { /// theoretically send back a 406 (Not Acceptable) error code. But, for a /// better user experience, this is rarely done and the more common way is /// to ignore the Accept-Charset header in this case. - (AcceptCharset, ACCEPT_CHARSET, "accept-charset"); + (AcceptCharset, ACCEPT_CHARSET, b"accept-charset"); /// Advertises which content encoding the client is able to understand. /// @@ -197,7 +205,7 @@ standard_headers! { /// forbidden, by an identity;q=0 or a *;q=0 without another explicitly set /// value for identity, the server must never send back a 406 Not Acceptable /// error. - (AcceptEncoding, ACCEPT_ENCODING, "accept-encoding"); + (AcceptEncoding, ACCEPT_ENCODING, b"accept-encoding"); /// Advertises which languages the client is able to understand. /// @@ -222,7 +230,7 @@ standard_headers! { /// send back a 406 (Not Acceptable) error code. But, for a better user /// experience, this is rarely done and more common way is to ignore the /// Accept-Language header in this case. - (AcceptLanguage, ACCEPT_LANGUAGE, "accept-language"); + (AcceptLanguage, ACCEPT_LANGUAGE, b"accept-language"); /// Marker used by the server to advertise partial request support. /// @@ -232,7 +240,7 @@ standard_headers! { /// /// In presence of an Accept-Ranges header, the browser may try to resume an /// interrupted download, rather than to start it from the start again. - (AcceptRanges, ACCEPT_RANGES, "accept-ranges"); + (AcceptRanges, ACCEPT_RANGES, b"accept-ranges"); /// Preflight response indicating if the response to the request can be /// exposed to the page. @@ -257,7 +265,7 @@ standard_headers! { /// be set on both sides (the Access-Control-Allow-Credentials header and in /// the XHR or Fetch request) in order for the CORS request with credentials /// to succeed. - (AccessControlAllowCredentials, ACCESS_CONTROL_ALLOW_CREDENTIALS, "access-control-allow-credentials"); + (AccessControlAllowCredentials, ACCESS_CONTROL_ALLOW_CREDENTIALS, b"access-control-allow-credentials"); /// Preflight response indicating permitted HTTP headers. /// @@ -273,33 +281,33 @@ standard_headers! { /// /// This header is required if the request has an /// Access-Control-Request-Headers header. - (AccessControlAllowHeaders, ACCESS_CONTROL_ALLOW_HEADERS, "access-control-allow-headers"); + (AccessControlAllowHeaders, ACCESS_CONTROL_ALLOW_HEADERS, b"access-control-allow-headers"); /// Preflight header response indicating permitted access methods. /// /// The Access-Control-Allow-Methods response header specifies the method or /// methods allowed when accessing the resource in response to a preflight /// request. - (AccessControlAllowMethods, ACCESS_CONTROL_ALLOW_METHODS, "access-control-allow-methods"); + (AccessControlAllowMethods, ACCESS_CONTROL_ALLOW_METHODS, b"access-control-allow-methods"); /// Indicates whether the response can be shared with resources with the /// given origin. - (AccessControlAllowOrigin, ACCESS_CONTROL_ALLOW_ORIGIN, "access-control-allow-origin"); + (AccessControlAllowOrigin, ACCESS_CONTROL_ALLOW_ORIGIN, b"access-control-allow-origin"); /// Indicates which headers can be exposed as part of the response by /// listing their names. - (AccessControlExposeHeaders, ACCESS_CONTROL_EXPOSE_HEADERS, "access-control-expose-headers"); + (AccessControlExposeHeaders, ACCESS_CONTROL_EXPOSE_HEADERS, b"access-control-expose-headers"); /// Indicates how long the results of a preflight request can be cached. - (AccessControlMaxAge, ACCESS_CONTROL_MAX_AGE, "access-control-max-age"); + (AccessControlMaxAge, ACCESS_CONTROL_MAX_AGE, b"access-control-max-age"); /// Informs the server which HTTP headers will be used when an actual /// request is made. - (AccessControlRequestHeaders, ACCESS_CONTROL_REQUEST_HEADERS, "access-control-request-headers"); + (AccessControlRequestHeaders, ACCESS_CONTROL_REQUEST_HEADERS, b"access-control-request-headers"); /// Informs the server know which HTTP method will be used when the actual /// request is made. - (AccessControlRequestMethod, ACCESS_CONTROL_REQUEST_METHOD, "access-control-request-method"); + (AccessControlRequestMethod, ACCESS_CONTROL_REQUEST_METHOD, b"access-control-request-method"); /// Indicates the time in seconds the object has been in a proxy cache. /// @@ -307,7 +315,7 @@ standard_headers! { /// probably just fetched from the origin server; otherwise It is usually /// calculated as a difference between the proxy's current date and the Date /// general header included in the HTTP response. - (Age, AGE, "age"); + (Age, AGE, b"age"); /// Lists the set of methods support by a resource. /// @@ -316,16 +324,16 @@ standard_headers! { /// empty Allow header indicates that the resource allows no request /// methods, which might occur temporarily for a given resource, for /// example. - (Allow, ALLOW, "allow"); + (Allow, ALLOW, b"allow"); /// Advertises the availability of alternate services to clients. - (AltSvc, ALT_SVC, "alt-svc"); + (AltSvc, ALT_SVC, b"alt-svc"); /// Contains the credentials to authenticate a user agent with a server. /// /// Usually this header is included after the server has responded with a /// 401 Unauthorized status and the WWW-Authenticate header. - (Authorization, AUTHORIZATION, "authorization"); + (Authorization, AUTHORIZATION, b"authorization"); /// Specifies directives for caching mechanisms in both requests and /// responses. @@ -333,7 +341,7 @@ standard_headers! { /// Caching directives are unidirectional, meaning that a given directive in /// a request is not implying that the same directive is to be given in the /// response. - (CacheControl, CACHE_CONTROL, "cache-control"); + (CacheControl, CACHE_CONTROL, b"cache-control"); /// Controls whether or not the network connection stays open after the /// current transaction finishes. @@ -348,7 +356,7 @@ standard_headers! { /// to consume them and not to forward them further. Standard hop-by-hop /// headers can be listed too (it is often the case of Keep-Alive, but this /// is not mandatory. - (Connection, CONNECTION, "connection"); + (Connection, CONNECTION, b"connection"); /// Indicates if the content is expected to be displayed inline. /// @@ -368,7 +376,7 @@ standard_headers! { /// to HTTP forms and POST requests. Only the value form-data, as well as /// the optional directive name and filename, can be used in the HTTP /// context. - (ContentDisposition, CONTENT_DISPOSITION, "content-disposition"); + (ContentDisposition, CONTENT_DISPOSITION, b"content-disposition"); /// Used to compress the media-type. /// @@ -380,7 +388,7 @@ standard_headers! { /// use this field, but some types of resources, like jpeg images, are /// already compressed. Sometimes using additional compression doesn't /// reduce payload size and can even make the payload longer. - (ContentEncoding, CONTENT_ENCODING, "content-encoding"); + (ContentEncoding, CONTENT_ENCODING, b"content-encoding"); /// Used to describe the languages intended for the audience. /// @@ -395,13 +403,13 @@ standard_headers! { /// intended for all language audiences. Multiple language tags are also /// possible, as well as applying the Content-Language header to various /// media types and not only to textual documents. - (ContentLanguage, CONTENT_LANGUAGE, "content-language"); + (ContentLanguage, CONTENT_LANGUAGE, b"content-language"); /// Indicates the size of the entity-body. /// /// The header value must be a decimal indicating the number of octets sent /// to the recipient. - (ContentLength, CONTENT_LENGTH, "content-length"); + (ContentLength, CONTENT_LENGTH, b"content-length"); /// Indicates an alternate location for the returned data. /// @@ -414,10 +422,10 @@ standard_headers! { /// without the need of further content negotiation. Location is a header /// associated with the response, while Content-Location is associated with /// the entity returned. - (ContentLocation, CONTENT_LOCATION, "content-location"); + (ContentLocation, CONTENT_LOCATION, b"content-location"); /// Indicates where in a full body message a partial message belongs. - (ContentRange, CONTENT_RANGE, "content-range"); + (ContentRange, CONTENT_RANGE, b"content-range"); /// Allows controlling resources the user agent is allowed to load for a /// given page. @@ -425,7 +433,7 @@ standard_headers! { /// With a few exceptions, policies mostly involve specifying server origins /// and script endpoints. This helps guard against cross-site scripting /// attacks (XSS). - (ContentSecurityPolicy, CONTENT_SECURITY_POLICY, "content-security-policy"); + (ContentSecurityPolicy, CONTENT_SECURITY_POLICY, b"content-security-policy"); /// Allows experimenting with policies by monitoring their effects. /// @@ -433,7 +441,7 @@ standard_headers! { /// developers to experiment with policies by monitoring (but not enforcing) /// their effects. These violation reports consist of JSON documents sent /// via an HTTP POST request to the specified URI. - (ContentSecurityPolicyReportOnly, CONTENT_SECURITY_POLICY_REPORT_ONLY, "content-security-policy-report-only"); + (ContentSecurityPolicyReportOnly, CONTENT_SECURITY_POLICY_REPORT_ONLY, b"content-security-policy-report-only"); /// Used to indicate the media type of the resource. /// @@ -445,23 +453,23 @@ standard_headers! { /// /// In requests, (such as POST or PUT), the client tells the server what /// type of data is actually sent. - (ContentType, CONTENT_TYPE, "content-type"); + (ContentType, CONTENT_TYPE, b"content-type"); /// Contains stored HTTP cookies previously sent by the server with the /// Set-Cookie header. /// /// The Cookie header might be omitted entirely, if the privacy setting of /// the browser are set to block them, for example. - (Cookie, COOKIE, "cookie"); + (Cookie, COOKIE, b"cookie"); /// Indicates the client's tracking preference. /// /// This header lets users indicate whether they would prefer privacy rather /// than personalized content. - (Dnt, DNT, "dnt"); + (Dnt, DNT, b"dnt"); /// Contains the date and time at which the message was originated. - (Date, DATE, "date"); + (Date, DATE, b"date"); /// Identifier for a specific version of a resource. /// @@ -477,7 +485,7 @@ standard_headers! { /// to quickly determine whether two representations of a resource are the /// same, but they might also be set to persist indefinitely by a tracking /// server. - (Etag, ETAG, "etag"); + (Etag, ETAG, b"etag"); /// Indicates expectations that need to be fulfilled by the server in order /// to properly handle the request. @@ -496,7 +504,7 @@ standard_headers! { /// /// No common browsers send the Expect header, but some other clients such /// as cURL do so by default. - (Expect, EXPECT, "expect"); + (Expect, EXPECT, b"expect"); /// Contains the date/time after which the response is considered stale. /// @@ -505,7 +513,7 @@ standard_headers! { /// /// If there is a Cache-Control header with the "max-age" or "s-max-age" /// directive in the response, the Expires header is ignored. - (Expires, EXPIRES, "expires"); + (Expires, EXPIRES, b"expires"); /// Contains information from the client-facing side of proxy servers that /// is altered or lost when a proxy is involved in the path of the request. @@ -517,7 +525,7 @@ standard_headers! { /// location-dependent content and by design it exposes privacy sensitive /// information, such as the IP address of the client. Therefore the user's /// privacy must be kept in mind when deploying this header. - (Forwarded, FORWARDED, "forwarded"); + (Forwarded, FORWARDED, b"forwarded"); /// Contains an Internet email address for a human user who controls the /// requesting user agent. @@ -526,7 +534,7 @@ standard_headers! { /// header should be sent, so you can be contacted if problems occur on /// servers, such as if the robot is sending excessive, unwanted, or invalid /// requests. - (From, FROM, "from"); + (From, FROM, b"from"); /// Specifies the domain name of the server and (optionally) the TCP port /// number on which the server is listening. @@ -537,7 +545,7 @@ standard_headers! { /// A Host header field must be sent in all HTTP/1.1 request messages. A 400 /// (Bad Request) status code will be sent to any HTTP/1.1 request message /// that lacks a Host header field or contains more than one. - (Host, HOST, "host"); + (Host, HOST, b"host"); /// Makes a request conditional based on the E-Tag. /// @@ -562,7 +570,7 @@ standard_headers! { /// that has been done since the original resource was fetched. If the /// request cannot be fulfilled, the 412 (Precondition Failed) response is /// returned. - (IfMatch, IF_MATCH, "if-match"); + (IfMatch, IF_MATCH, b"if-match"); /// Makes a request conditional based on the modification date. /// @@ -579,7 +587,7 @@ standard_headers! { /// /// The most common use case is to update a cached entity that has no /// associated ETag. - (IfModifiedSince, IF_MODIFIED_SINCE, "if-modified-since"); + (IfModifiedSince, IF_MODIFIED_SINCE, b"if-modified-since"); /// Makes a request conditional based on the E-Tag. /// @@ -615,7 +623,7 @@ standard_headers! { /// guaranteeing that another upload didn't happen before, losing the data /// of the previous put; this problems is the variation of the lost update /// problem. - (IfNoneMatch, IF_NONE_MATCH, "if-none-match"); + (IfNoneMatch, IF_NONE_MATCH, b"if-none-match"); /// Makes a request conditional based on range. /// @@ -631,7 +639,7 @@ standard_headers! { /// The most common use case is to resume a download, to guarantee that the /// stored resource has not been modified since the last fragment has been /// received. - (IfRange, IF_RANGE, "if-range"); + (IfRange, IF_RANGE, b"if-range"); /// Makes the request conditional based on the last modification date. /// @@ -652,14 +660,14 @@ standard_headers! { /// * In conjunction with a range request with a If-Range header, it can be /// used to ensure that the new fragment requested comes from an unmodified /// document. - (IfUnmodifiedSince, IF_UNMODIFIED_SINCE, "if-unmodified-since"); + (IfUnmodifiedSince, IF_UNMODIFIED_SINCE, b"if-unmodified-since"); /// Content-Types that are acceptable for the response. - (LastModified, LAST_MODIFIED, "last-modified"); + (LastModified, LAST_MODIFIED, b"last-modified"); /// Allows the server to point an interested client to another resource /// containing metadata about the requested resource. - (Link, LINK, "link"); + (Link, LINK, b"link"); /// Indicates the URL to redirect a page to. /// @@ -690,11 +698,11 @@ standard_headers! { /// when content negotiation happened, without the need of further content /// negotiation. Location is a header associated with the response, while /// Content-Location is associated with the entity returned. - (Location, LOCATION, "location"); + (Location, LOCATION, b"location"); /// Indicates the max number of intermediaries the request should be sent /// through. - (MaxForwards, MAX_FORWARDS, "max-forwards"); + (MaxForwards, MAX_FORWARDS, b"max-forwards"); /// Indicates where a fetch originates from. /// @@ -702,7 +710,7 @@ standard_headers! { /// sent with CORS requests, as well as with POST requests. It is similar to /// the Referer header, but, unlike this header, it doesn't disclose the /// whole path. - (Origin, ORIGIN, "origin"); + (Origin, ORIGIN, b"origin"); /// HTTP/1.0 header usually used for backwards compatibility. /// @@ -710,7 +718,7 @@ standard_headers! { /// that may have various effects along the request-response chain. It is /// used for backwards compatibility with HTTP/1.0 caches where the /// Cache-Control HTTP/1.1 header is not yet present. - (Pragma, PRAGMA, "pragma"); + (Pragma, PRAGMA, b"pragma"); /// Defines the authentication method that should be used to gain access to /// a proxy. @@ -728,14 +736,14 @@ standard_headers! { /// /// The `proxy-authenticate` header is sent along with a `407 Proxy /// Authentication Required`. - (ProxyAuthenticate, PROXY_AUTHENTICATE, "proxy-authenticate"); + (ProxyAuthenticate, PROXY_AUTHENTICATE, b"proxy-authenticate"); /// Contains the credentials to authenticate a user agent to a proxy server. /// /// This header is usually included after the server has responded with a /// 407 Proxy Authentication Required status and the Proxy-Authenticate /// header. - (ProxyAuthorization, PROXY_AUTHORIZATION, "proxy-authorization"); + (ProxyAuthorization, PROXY_AUTHORIZATION, b"proxy-authorization"); /// Associates a specific cryptographic public key with a certain server. /// @@ -743,14 +751,14 @@ standard_headers! { /// or several keys are pinned and none of them are used by the server, the /// browser will not accept the response as legitimate, and will not display /// it. - (PublicKeyPins, PUBLIC_KEY_PINS, "public-key-pins"); + (PublicKeyPins, PUBLIC_KEY_PINS, b"public-key-pins"); /// Sends reports of pinning violation to the report-uri specified in the /// header. /// /// Unlike `Public-Key-Pins`, this header still allows browsers to connect /// to the server if the pinning is violated. - (PublicKeyPinsReportOnly, PUBLIC_KEY_PINS_REPORT_ONLY, "public-key-pins-report-only"); + (PublicKeyPinsReportOnly, PUBLIC_KEY_PINS_REPORT_ONLY, b"public-key-pins-report-only"); /// Indicates the part of a document that the server should return. /// @@ -760,7 +768,7 @@ standard_headers! { /// the ranges are invalid, the server returns the 416 Range Not Satisfiable /// error. The server can also ignore the Range header and return the whole /// document with a 200 status code. - (Range, RANGE, "range"); + (Range, RANGE, b"range"); /// Contains the address of the previous web page from which a link to the /// currently requested page was followed. @@ -768,15 +776,15 @@ standard_headers! { /// The Referer header allows servers to identify where people are visiting /// them from and may use that data for analytics, logging, or optimized /// caching, for example. - (Referer, REFERER, "referer"); + (Referer, REFERER, b"referer"); /// Governs which referrer information should be included with requests /// made. - (ReferrerPolicy, REFERRER_POLICY, "referrer-policy"); + (ReferrerPolicy, REFERRER_POLICY, b"referrer-policy"); /// Informs the web browser that the current page or frame should be /// refreshed. - (Refresh, REFRESH, "refresh"); + (Refresh, REFRESH, b"refresh"); /// The Retry-After response HTTP header indicates how long the user agent /// should wait before making a follow-up request. There are two main cases @@ -788,20 +796,20 @@ standard_headers! { /// * When sent with a redirect response, such as 301 (Moved Permanently), /// it indicates the minimum time that the user agent is asked to wait /// before issuing the redirected request. - (RetryAfter, RETRY_AFTER, "retry-after"); + (RetryAfter, RETRY_AFTER, b"retry-after"); /// The |Sec-WebSocket-Accept| header field is used in the WebSocket /// opening handshake. It is sent from the server to the client to /// confirm that the server is willing to initiate the WebSocket /// connection. - (SecWebSocketAccept, SEC_WEBSOCKET_ACCEPT, "sec-websocket-accept"); + (SecWebSocketAccept, SEC_WEBSOCKET_ACCEPT, b"sec-websocket-accept"); /// The |Sec-WebSocket-Extensions| header field is used in the WebSocket /// opening handshake. It is initially sent from the client to the /// server, and then subsequently sent from the server to the client, to /// agree on a set of protocol-level extensions to use for the duration /// of the connection. - (SecWebSocketExtensions, SEC_WEBSOCKET_EXTENSIONS, "sec-websocket-extensions"); + (SecWebSocketExtensions, SEC_WEBSOCKET_EXTENSIONS, b"sec-websocket-extensions"); /// The |Sec-WebSocket-Key| header field is used in the WebSocket opening /// handshake. It is sent from the client to the server to provide part @@ -810,14 +818,14 @@ standard_headers! { /// does not accept connections from non-WebSocket clients (e.g., HTTP /// clients) that are being abused to send data to unsuspecting WebSocket /// servers. - (SecWebSocketKey, SEC_WEBSOCKET_KEY, "sec-websocket-key"); + (SecWebSocketKey, SEC_WEBSOCKET_KEY, b"sec-websocket-key"); /// The |Sec-WebSocket-Protocol| header field is used in the WebSocket /// opening handshake. It is sent from the client to the server and back /// from the server to the client to confirm the subprotocol of the /// connection. This enables scripts to both select a subprotocol and be /// sure that the server agreed to serve that subprotocol. - (SecWebSocketProtocol, SEC_WEBSOCKET_PROTOCOL, "sec-websocket-protocol"); + (SecWebSocketProtocol, SEC_WEBSOCKET_PROTOCOL, b"sec-websocket-protocol"); /// The |Sec-WebSocket-Version| header field is used in the WebSocket /// opening handshake. It is sent from the client to the server to @@ -825,7 +833,7 @@ standard_headers! { /// servers to correctly interpret the opening handshake and subsequent /// data being sent from the data, and close the connection if the server /// cannot interpret that data in a safe manner. - (SecWebSocketVersion, SEC_WEBSOCKET_VERSION, "sec-websocket-version"); + (SecWebSocketVersion, SEC_WEBSOCKET_VERSION, b"sec-websocket-version"); /// Contains information about the software used by the origin server to /// handle the request. @@ -834,13 +842,13 @@ standard_headers! { /// potentially reveal internal implementation details that might make it /// (slightly) easier for attackers to find and exploit known security /// holes. - (Server, SERVER, "server"); + (Server, SERVER, b"server"); /// Used to send cookies from the server to the user agent. - (SetCookie, SET_COOKIE, "set-cookie"); + (SetCookie, SET_COOKIE, b"set-cookie"); /// Tells the client to communicate with HTTPS instead of using HTTP. - (StrictTransportSecurity, STRICT_TRANSPORT_SECURITY, "strict-transport-security"); + (StrictTransportSecurity, STRICT_TRANSPORT_SECURITY, b"strict-transport-security"); /// Informs the server of transfer encodings willing to be accepted as part /// of the response. @@ -850,11 +858,11 @@ standard_headers! { /// recipients and you that don't have to specify "chunked" using the TE /// header. However, it is useful for setting if the client is accepting /// trailer fields in a chunked transfer coding using the "trailers" value. - (Te, TE, "te"); + (Te, TE, b"te"); /// Allows the sender to include additional fields at the end of chunked /// messages. - (Trailer, TRAILER, "trailer"); + (Trailer, TRAILER, b"trailer"); /// Specifies the form of encoding used to safely transfer the entity to the /// client. @@ -868,18 +876,18 @@ standard_headers! { /// When present on a response to a `HEAD` request that has no body, it /// indicates the value that would have applied to the corresponding `GET` /// message. - (TransferEncoding, TRANSFER_ENCODING, "transfer-encoding"); + (TransferEncoding, TRANSFER_ENCODING, b"transfer-encoding"); /// Contains a string that allows identifying the requesting client's /// software. - (UserAgent, USER_AGENT, "user-agent"); + (UserAgent, USER_AGENT, b"user-agent"); /// Used as part of the exchange to upgrade the protocol. - (Upgrade, UPGRADE, "upgrade"); + (Upgrade, UPGRADE, b"upgrade"); /// Sends a signal to the server expressing the client’s preference for an /// encrypted and authenticated response. - (UpgradeInsecureRequests, UPGRADE_INSECURE_REQUESTS, "upgrade-insecure-requests"); + (UpgradeInsecureRequests, UPGRADE_INSECURE_REQUESTS, b"upgrade-insecure-requests"); /// Determines how to match future requests with cached responses. /// @@ -891,7 +899,7 @@ standard_headers! { /// /// The `vary` header should be set on a 304 Not Modified response exactly /// like it would have been set on an equivalent 200 OK response. - (Vary, VARY, "vary"); + (Vary, VARY, b"vary"); /// Added by proxies to track routing. /// @@ -900,7 +908,7 @@ standard_headers! { /// It is used for tracking message forwards, avoiding request loops, and /// identifying the protocol capabilities of senders along the /// request/response chain. - (Via, VIA, "via"); + (Via, VIA, b"via"); /// General HTTP header contains information about possible problems with /// the status of the message. @@ -908,11 +916,11 @@ standard_headers! { /// More than one `warning` header may appear in a response. Warning header /// fields can in general be applied to any message, however some warn-codes /// are specific to caches and can only be applied to response messages. - (Warning, WARNING, "warning"); + (Warning, WARNING, b"warning"); /// Defines the authentication method that should be used to gain access to /// a resource. - (WwwAuthenticate, WWW_AUTHENTICATE, "www-authenticate"); + (WwwAuthenticate, WWW_AUTHENTICATE, b"www-authenticate"); /// Marker used by the server to indicate that the MIME types advertised in /// the `content-type` headers should not be changed and be followed. @@ -927,7 +935,7 @@ standard_headers! { /// less aggressive. /// /// Site security testers usually expect this header to be set. - (XContentTypeOptions, X_CONTENT_TYPE_OPTIONS, "x-content-type-options"); + (XContentTypeOptions, X_CONTENT_TYPE_OPTIONS, b"x-content-type-options"); /// Controls DNS prefetching. /// @@ -940,7 +948,7 @@ standard_headers! { /// This prefetching is performed in the background, so that the DNS is /// likely to have been resolved by the time the referenced items are /// needed. This reduces latency when the user clicks a link. - (XDnsPrefetchControl, X_DNS_PREFETCH_CONTROL, "x-dns-prefetch-control"); + (XDnsPrefetchControl, X_DNS_PREFETCH_CONTROL, b"x-dns-prefetch-control"); /// Indicates whether or not a browser should be allowed to render a page in /// a frame. @@ -950,7 +958,7 @@ standard_headers! { /// /// The added security is only provided if the user accessing the document /// is using a browser supporting `x-frame-options`. - (XFrameOptions, X_FRAME_OPTIONS, "x-frame-options"); + (XFrameOptions, X_FRAME_OPTIONS, b"x-frame-options"); /// Stop pages from loading when an XSS attack is detected. /// @@ -961,7 +969,7 @@ standard_headers! { /// implement a strong Content-Security-Policy that disables the use of /// inline JavaScript ('unsafe-inline'), they can still provide protections /// for users of older web browsers that don't yet support CSP. - (XXssProtection, X_XSS_PROTECTION, "x-xss-protection"); + (XXssProtection, X_XSS_PROTECTION, b"x-xss-protection"); } /// Valid header name characters @@ -1039,602 +1047,30 @@ const HEADER_CHARS_H2: [u8; 256] = [ 0, 0, 0, 0, 0, 0 // 25x ]; -#[cfg(any(not(debug_assertions), not(target_arch = "wasm32")))] -macro_rules! eq { - (($($cmp:expr,)*) $v:ident[$n:expr] ==) => { - $($cmp) && * - }; - (($($cmp:expr,)*) $v:ident[$n:expr] == $a:tt $($rest:tt)*) => { - eq!(($($cmp,)* $v[$n] == $a,) $v[$n+1] == $($rest)*) - }; - ($v:ident == $($rest:tt)+) => { - eq!(() $v[0] == $($rest)+) - }; - ($v:ident[$n:expr] == $($rest:tt)+) => { - eq!(() $v[$n] == $($rest)+) - }; -} - -#[cfg(any(not(debug_assertions), not(target_arch = "wasm32")))] -/// This version is best under optimized mode, however in a wasm debug compile, -/// the `eq` macro expands to 1 + 1 + 1 + 1... and wasm explodes when this chain gets too long -/// See https://github.com/DenisKolodin/yew/issues/478 fn parse_hdr<'a>( data: &'a [u8], b: &'a mut [u8; 64], table: &[u8; 256], ) -> Result, InvalidHeaderName> { - use self::StandardHeader::*; - - let len = data.len(); - - let validate = |buf: &'a [u8], len: usize| { - let buf = &buf[..len]; - if buf.iter().any(|&b| b == 0) { - Err(InvalidHeaderName::new()) - } else { - Ok(HdrName::custom(buf, true)) - } - }; - - - macro_rules! to_lower { - ($d:ident, $src:ident, 1) => { $d[0] = table[$src[0] as usize]; }; - ($d:ident, $src:ident, 2) => { to_lower!($d, $src, 1); $d[1] = table[$src[1] as usize]; }; - ($d:ident, $src:ident, 3) => { to_lower!($d, $src, 2); $d[2] = table[$src[2] as usize]; }; - ($d:ident, $src:ident, 4) => { to_lower!($d, $src, 3); $d[3] = table[$src[3] as usize]; }; - ($d:ident, $src:ident, 5) => { to_lower!($d, $src, 4); $d[4] = table[$src[4] as usize]; }; - ($d:ident, $src:ident, 6) => { to_lower!($d, $src, 5); $d[5] = table[$src[5] as usize]; }; - ($d:ident, $src:ident, 7) => { to_lower!($d, $src, 6); $d[6] = table[$src[6] as usize]; }; - ($d:ident, $src:ident, 8) => { to_lower!($d, $src, 7); $d[7] = table[$src[7] as usize]; }; - ($d:ident, $src:ident, 9) => { to_lower!($d, $src, 8); $d[8] = table[$src[8] as usize]; }; - ($d:ident, $src:ident, 10) => { to_lower!($d, $src, 9); $d[9] = table[$src[9] as usize]; }; - ($d:ident, $src:ident, 11) => { to_lower!($d, $src, 10); $d[10] = table[$src[10] as usize]; }; - ($d:ident, $src:ident, 12) => { to_lower!($d, $src, 11); $d[11] = table[$src[11] as usize]; }; - ($d:ident, $src:ident, 13) => { to_lower!($d, $src, 12); $d[12] = table[$src[12] as usize]; }; - ($d:ident, $src:ident, 14) => { to_lower!($d, $src, 13); $d[13] = table[$src[13] as usize]; }; - ($d:ident, $src:ident, 15) => { to_lower!($d, $src, 14); $d[14] = table[$src[14] as usize]; }; - ($d:ident, $src:ident, 16) => { to_lower!($d, $src, 15); $d[15] = table[$src[15] as usize]; }; - ($d:ident, $src:ident, 17) => { to_lower!($d, $src, 16); $d[16] = table[$src[16] as usize]; }; - ($d:ident, $src:ident, 18) => { to_lower!($d, $src, 17); $d[17] = table[$src[17] as usize]; }; - ($d:ident, $src:ident, 19) => { to_lower!($d, $src, 18); $d[18] = table[$src[18] as usize]; }; - ($d:ident, $src:ident, 20) => { to_lower!($d, $src, 19); $d[19] = table[$src[19] as usize]; }; - ($d:ident, $src:ident, 21) => { to_lower!($d, $src, 20); $d[20] = table[$src[20] as usize]; }; - ($d:ident, $src:ident, 22) => { to_lower!($d, $src, 21); $d[21] = table[$src[21] as usize]; }; - ($d:ident, $src:ident, 23) => { to_lower!($d, $src, 22); $d[22] = table[$src[22] as usize]; }; - ($d:ident, $src:ident, 24) => { to_lower!($d, $src, 23); $d[23] = table[$src[23] as usize]; }; - ($d:ident, $src:ident, 25) => { to_lower!($d, $src, 24); $d[24] = table[$src[24] as usize]; }; - ($d:ident, $src:ident, 26) => { to_lower!($d, $src, 25); $d[25] = table[$src[25] as usize]; }; - ($d:ident, $src:ident, 27) => { to_lower!($d, $src, 26); $d[26] = table[$src[26] as usize]; }; - ($d:ident, $src:ident, 28) => { to_lower!($d, $src, 27); $d[27] = table[$src[27] as usize]; }; - ($d:ident, $src:ident, 29) => { to_lower!($d, $src, 28); $d[28] = table[$src[28] as usize]; }; - ($d:ident, $src:ident, 30) => { to_lower!($d, $src, 29); $d[29] = table[$src[29] as usize]; }; - ($d:ident, $src:ident, 31) => { to_lower!($d, $src, 30); $d[30] = table[$src[30] as usize]; }; - ($d:ident, $src:ident, 32) => { to_lower!($d, $src, 31); $d[31] = table[$src[31] as usize]; }; - ($d:ident, $src:ident, 33) => { to_lower!($d, $src, 32); $d[32] = table[$src[32] as usize]; }; - ($d:ident, $src:ident, 34) => { to_lower!($d, $src, 33); $d[33] = table[$src[33] as usize]; }; - ($d:ident, $src:ident, 35) => { to_lower!($d, $src, 34); $d[34] = table[$src[34] as usize]; }; - } - - match len { + match data.len() { 0 => Err(InvalidHeaderName::new()), - 2 => { - to_lower!(b, data, 2); - - if eq!(b == b't' b'e') { - Ok(Te.into()) - } else { - validate(b, len) - } - } - 3 => { - to_lower!(b, data, 3); - - if eq!(b == b'a' b'g' b'e') { - Ok(Age.into()) - } else if eq!(b == b'v' b'i' b'a') { - Ok(Via.into()) - } else if eq!(b == b'd' b'n' b't') { - Ok(Dnt.into()) - } else { - validate(b, len) - } - } - 4 => { - to_lower!(b, data, 4); - - if eq!(b == b'd' b'a' b't' b'e') { - Ok(Date.into()) - } else if eq!(b == b'e' b't' b'a' b'g') { - Ok(Etag.into()) - } else if eq!(b == b'f' b'r' b'o' b'm') { - Ok(From.into()) - } else if eq!(b == b'h' b'o' b's' b't') { - Ok(Host.into()) - } else if eq!(b == b'l' b'i' b'n' b'k') { - Ok(Link.into()) - } else if eq!(b == b'v' b'a' b'r' b'y') { - Ok(Vary.into()) - } else { - validate(b, len) - } - } - 5 => { - to_lower!(b, data, 5); - - if eq!(b == b'a' b'l' b'l' b'o' b'w') { - Ok(Allow.into()) - } else if eq!(b == b'r' b'a' b'n' b'g' b'e') { - Ok(Range.into()) - } else { - validate(b, len) - } - } - 6 => { - to_lower!(b, data, 6); - - if eq!(b == b'a' b'c' b'c' b'e' b'p' b't') { - return Ok(Accept.into()); - } else if eq!(b == b'c' b'o' b'o' b'k' b'i' b'e') { - return Ok(Cookie.into()); - } else if eq!(b == b'e' b'x' b'p' b'e' b'c' b't') { - return Ok(Expect.into()); - } else if eq!(b == b'o' b'r' b'i' b'g' b'i' b'n') { - return Ok(Origin.into()); - } else if eq!(b == b'p' b'r' b'a' b'g' b'm' b'a') { - return Ok(Pragma.into()); - } else if b[0] == b's' { - if eq!(b[1] == b'e' b'r' b'v' b'e' b'r') { - return Ok(Server.into()); - } - } - - validate(b, len) - } - 7 => { - to_lower!(b, data, 7); - - if eq!(b == b'a' b'l' b't' b'-' b's' b'v' b'c') { - Ok(AltSvc.into()) - } else if eq!(b == b'e' b'x' b'p' b'i' b'r' b'e' b's') { - Ok(Expires.into()) - } else if eq!(b == b'r' b'e' b'f' b'e' b'r' b'e' b'r') { - Ok(Referer.into()) - } else if eq!(b == b'r' b'e' b'f' b'r' b'e' b's' b'h') { - Ok(Refresh.into()) - } else if eq!(b == b't' b'r' b'a' b'i' b'l' b'e' b'r') { - Ok(Trailer.into()) - } else if eq!(b == b'u' b'p' b'g' b'r' b'a' b'd' b'e') { - Ok(Upgrade.into()) - } else if eq!(b == b'w' b'a' b'r' b'n' b'i' b'n' b'g') { - Ok(Warning.into()) - } else { - validate(b, len) - } - } - 8 => { - to_lower!(b, data, 8); - - if eq!(b == b'i' b'f' b'-') { - if eq!(b[3] == b'm' b'a' b't' b'c' b'h') { - return Ok(IfMatch.into()); - } else if eq!(b[3] == b'r' b'a' b'n' b'g' b'e') { - return Ok(IfRange.into()); - } - } else if eq!(b == b'l' b'o' b'c' b'a' b't' b'i' b'o' b'n') { - return Ok(Location.into()); - } - - validate(b, len) - } - 9 => { - to_lower!(b, data, 9); - - if eq!(b == b'f' b'o' b'r' b'w' b'a' b'r' b'd' b'e' b'd') { - Ok(Forwarded.into()) - } else { - validate(b, len) - } - } - 10 => { - to_lower!(b, data, 10); - - if eq!(b == b'c' b'o' b'n' b'n' b'e' b'c' b't' b'i' b'o' b'n') { - Ok(Connection.into()) - } else if eq!(b == b's' b'e' b't' b'-' b'c' b'o' b'o' b'k' b'i' b'e') { - Ok(SetCookie.into()) - } else if eq!(b == b'u' b's' b'e' b'r' b'-' b'a' b'g' b'e' b'n' b't') { - Ok(UserAgent.into()) - } else { - validate(b, len) - } - } - 11 => { - to_lower!(b, data, 11); - - if eq!(b == b'r' b'e' b't' b'r' b'y' b'-' b'a' b'f' b't' b'e' b'r') { - Ok(RetryAfter.into()) - } else { - validate(b, len) - } - } - 12 => { - to_lower!(b, data, 12); - - if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b't' b'y' b'p' b'e') { - Ok(ContentType.into()) - } else if eq!(b == b'm' b'a' b'x' b'-' b'f' b'o' b'r' b'w' b'a' b'r' b'd' b's') { - Ok(MaxForwards.into()) - } else { - validate(b, len) - } - } - 13 => { - to_lower!(b, data, 13); - - if b[0] == b'a' { - if eq!(b[1] == b'c' b'c' b'e' b'p' b't' b'-' b'r' b'a' b'n' b'g' b'e' b's') { - return Ok(AcceptRanges.into()); - } else if eq!(b[1] == b'u' b't' b'h' b'o' b'r' b'i' b'z' b'a' b't' b'i' b'o' b'n') { - return Ok(Authorization.into()); - } - } else if b[0] == b'c' { - if eq!(b[1] == b'a' b'c' b'h' b'e' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l') { - return Ok(CacheControl.into()); - } else if eq!(b[1] == b'o' b'n' b't' b'e' b'n' b't' b'-' b'r' b'a' b'n' b'g' b'e' ) - { - return Ok(ContentRange.into()); - } - } else if eq!(b == b'i' b'f' b'-' b'n' b'o' b'n' b'e' b'-' b'm' b'a' b't' b'c' b'h') { - return Ok(IfNoneMatch.into()); - } else if eq!(b == b'l' b'a' b's' b't' b'-' b'm' b'o' b'd' b'i' b'f' b'i' b'e' b'd') { - return Ok(LastModified.into()); - } - - validate(b, len) - } - 14 => { - to_lower!(b, data, 14); - - if eq!(b == b'a' b'c' b'c' b'e' b'p' b't' b'-' b'c' b'h' b'a' b'r' b's' b'e' b't') { - Ok(AcceptCharset.into()) - } else if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b'l' b'e' b'n' b'g' b't' b'h') - { - Ok(ContentLength.into()) - } else { - validate(b, len) - } - } - 15 => { - to_lower!(b, data, 15); - - if eq!(b == b'a' b'c' b'c' b'e' b'p' b't' b'-') { // accept- - if eq!(b[7] == b'e' b'n' b'c' b'o' b'd' b'i' b'n' b'g') { - return Ok(AcceptEncoding.into()) - } else if eq!(b[7] == b'l' b'a' b'n' b'g' b'u' b'a' b'g' b'e') { - return Ok(AcceptLanguage.into()) - } - } else if eq!(b == b'p' b'u' b'b' b'l' b'i' b'c' b'-' b'k' b'e' b'y' b'-' b'p' b'i' b'n' b's') { - return Ok(PublicKeyPins.into()) - } else if eq!(b == b'x' b'-' b'f' b'r' b'a' b'm' b'e' b'-' b'o' b'p' b't' b'i' b'o' b'n' b's') { - return Ok(XFrameOptions.into()) - } - else if eq!(b == b'r' b'e' b'f' b'e' b'r' b'r' b'e' b'r' b'-' b'p' b'o' b'l' b'i' b'c' b'y') { - return Ok(ReferrerPolicy.into()) - } - - validate(b, len) - } - 16 => { - to_lower!(b, data, 16); - - if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-') { - if eq!(b[8] == b'l' b'a' b'n' b'g' b'u' b'a' b'g' b'e') { - return Ok(ContentLanguage.into()) - } else if eq!(b[8] == b'l' b'o' b'c' b'a' b't' b'i' b'o' b'n') { - return Ok(ContentLocation.into()) - } else if eq!(b[8] == b'e' b'n' b'c' b'o' b'd' b'i' b'n' b'g') { - return Ok(ContentEncoding.into()) - } - } else if eq!(b == b'w' b'w' b'w' b'-' b'a' b'u' b't' b'h' b'e' b'n' b't' b'i' b'c' b'a' b't' b'e') { - return Ok(WwwAuthenticate.into()) - } else if eq!(b == b'x' b'-' b'x' b's' b's' b'-' b'p' b'r' b'o' b't' b'e' b'c' b't' b'i' b'o' b'n') { - return Ok(XXssProtection.into()) - } - - validate(b, len) - } - 17 => { - to_lower!(b, data, 17); - - if eq!(b == b't' b'r' b'a' b'n' b's' b'f' b'e' b'r' b'-' b'e' b'n' b'c' b'o' b'd' b'i' b'n' b'g') { - Ok(TransferEncoding.into()) - } else if eq!(b == b'i' b'f' b'-' b'm' b'o' b'd' b'i' b'f' b'i' b'e' b'd' b'-' b's' b'i' b'n' b'c' b'e') { - Ok(IfModifiedSince.into()) - } else if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'k' b'e' b'y') { - Ok(SecWebSocketKey.into()) - } else { - validate(b, len) - } - } - 18 => { - to_lower!(b, data, 18); - - if eq!(b == b'p' b'r' b'o' b'x' b'y' b'-' b'a' b'u' b't' b'h' b'e' b'n' b't' b'i' b'c' b'a' b't' b'e') { - Ok(ProxyAuthenticate.into()) - } else { - validate(b, len) - } - } - 19 => { - to_lower!(b, data, 19); - - if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b'd' b'i' b's' b'p' b'o' b's' b'i' b't' b'i' b'o' b'n') { - Ok(ContentDisposition.into()) - } else if eq!(b == b'i' b'f' b'-' b'u' b'n' b'm' b'o' b'd' b'i' b'f' b'i' b'e' b'd' b'-' b's' b'i' b'n' b'c' b'e') { - Ok(IfUnmodifiedSince.into()) - } else if eq!(b == b'p' b'r' b'o' b'x' b'y' b'-' b'a' b'u' b't' b'h' b'o' b'r' b'i' b'z' b'a' b't' b'i' b'o' b'n') { - Ok(ProxyAuthorization.into()) - } else { - validate(b, len) - } - } - 20 => { - to_lower!(b, data, 20); - - if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'a' b'c' b'c' b'e' b'p' b't') { - Ok(SecWebSocketAccept.into()) - } else { - validate(b, len) - } - } - 21 => { - to_lower!(b, data, 21); - - if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'v' b'e' b'r' b's' b'i' b'o' b'n') { - Ok(SecWebSocketVersion.into()) - } else { - validate(b, len) - } - } - 22 => { - to_lower!(b, data, 22); - - if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'm' b'a' b'x' b'-' b'a' b'g' b'e') { - Ok(AccessControlMaxAge.into()) - } else if eq!(b == b'x' b'-' b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b't' b'y' b'p' b'e' b'-' b'o' b'p' b't' b'i' b'o' b'n' b's') { - Ok(XContentTypeOptions.into()) - } else if eq!(b == b'x' b'-' b'd' b'n' b's' b'-' b'p' b'r' b'e' b'f' b'e' b't' b'c' b'h' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l') { - Ok(XDnsPrefetchControl.into()) - } else if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'p' b'r' b'o' b't' b'o' b'c' b'o' b'l') { - Ok(SecWebSocketProtocol.into()) - } else { - validate(b, len) - } - } - 23 => { - to_lower!(b, data, 23); - - if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b's' b'e' b'c' b'u' b'r' b'i' b't' b'y' b'-' b'p' b'o' b'l' b'i' b'c' b'y') { - Ok(ContentSecurityPolicy.into()) - } else { - validate(b, len) - } - } - 24 => { - to_lower!(b, data, 24); - - if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'e' b'x' b't' b'e' b'n' b's' b'i' b'o' b'n' b's') { - Ok(SecWebSocketExtensions.into()) - } else { - validate(b, len) - } - } - 25 => { - to_lower!(b, data, 25); - - if eq!(b == b's' b't' b'r' b'i' b'c' b't' b'-' b't' b'r' b'a' b'n' b's' b'p' b'o' b'r' b't' b'-' b's' b'e' b'c' b'u' b'r' b'i' b't' b'y') { - Ok(StrictTransportSecurity.into()) - } else if eq!(b == b'u' b'p' b'g' b'r' b'a' b'd' b'e' b'-' b'i' b'n' b's' b'e' b'c' b'u' b'r' b'e' b'-' b'r' b'e' b'q' b'u' b'e' b's' b't' b's') { - Ok(UpgradeInsecureRequests.into()) - } else { - validate(b, len) - } - } - 27 => { - to_lower!(b, data, 27); - - if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'a' b'l' b'l' b'o' b'w' b'-' b'o' b'r' b'i' b'g' b'i' b'n') { - Ok(AccessControlAllowOrigin.into()) - } else if eq!(b == b'p' b'u' b'b' b'l' b'i' b'c' b'-' b'k' b'e' b'y' b'-' b'p' b'i' b'n' b's' b'-' b'r' b'e' b'p' b'o' b'r' b't' b'-' b'o' b'n' b'l' b'y') { - Ok(PublicKeyPinsReportOnly.into()) - } else { - validate(b, len) - } - } - 28 => { - to_lower!(b, data, 28); - - if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'a' b'l' b'l' b'o' b'w' b'-') { - if eq!(b[21] == b'h' b'e' b'a' b'd' b'e' b'r' b's') { - return Ok(AccessControlAllowHeaders.into()) - } else if eq!(b[21] == b'm' b'e' b't' b'h' b'o' b'd' b's') { - return Ok(AccessControlAllowMethods.into()) - } - } - - validate(b, len) - } - 29 => { - to_lower!(b, data, 29); - - if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-') { - if eq!(b[15] == b'e' b'x' b'p' b'o' b's' b'e' b'-' b'h' b'e' b'a' b'd' b'e' b'r' b's') { - return Ok(AccessControlExposeHeaders.into()) - } else if eq!(b[15] == b'r' b'e' b'q' b'u' b'e' b's' b't' b'-' b'm' b'e' b't' b'h' b'o' b'd') { - return Ok(AccessControlRequestMethod.into()) - } - } - - validate(b, len) - } - 30 => { - to_lower!(b, data, 30); - - if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'r' b'e' b'q' b'u' b'e' b's' b't' b'-' b'h' b'e' b'a' b'd' b'e' b'r' b's') { - Ok(AccessControlRequestHeaders.into()) - } else { - validate(b, len) - } - } - 32 => { - to_lower!(b, data, 32); - - if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'a' b'l' b'l' b'o' b'w' b'-' b'c' b'r' b'e' b'd' b'e' b'n' b't' b'i' b'a' b'l' b's') { - Ok(AccessControlAllowCredentials.into()) - } else { - validate(b, len) - } - } - 35 => { - to_lower!(b, data, 35); - - if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b's' b'e' b'c' b'u' b'r' b'i' b't' b'y' b'-' b'p' b'o' b'l' b'i' b'c' b'y' b'-' b'r' b'e' b'p' b'o' b'r' b't' b'-' b'o' b'n' b'l' b'y') { - Ok(ContentSecurityPolicyReportOnly.into()) - } else { - validate(b, len) - } - } - len if len < 64 => { - for i in 0..len { - b[i] = table[data[i] as usize]; - } - validate(b, len) - } - len if len <= super::MAX_HEADER_NAME_LEN => { - Ok(HdrName::custom(data, false)) - } - _ => Err(InvalidHeaderName::new()), - } -} - -#[cfg(all(debug_assertions, target_arch = "wasm32"))] -/// This version works best in debug mode in wasm -fn parse_hdr<'a>( - data: &'a [u8], - b: &'a mut [u8; 64], - table: &[u8; 256], -) -> Result, InvalidHeaderName> { - use self::StandardHeader::*; - - let len = data.len(); - - let validate = |buf: &'a [u8], len: usize| { - let buf = &buf[..len]; - if buf.iter().any(|&b| b == 0) { - Err(InvalidHeaderName::new()) - } else { - Ok(HdrName::custom(buf, true)) - } - }; - - assert!( - len < super::MAX_HEADER_NAME_LEN, - "header name too long -- max length is {}", - super::MAX_HEADER_NAME_LEN - ); - - match len { - 0 => Err(InvalidHeaderName::new()), - len if len > 64 => Ok(HdrName::custom(data, false)), - len => { + len @ 1..=64 => { // Read from data into the buffer - transforming using `table` as we go data.iter().zip(b.iter_mut()).for_each(|(index, out)| *out = table[*index as usize]); - match &b[0..len] { - b"te" => Ok(Te.into()), - b"age" => Ok(Age.into()), - b"via" => Ok(Via.into()), - b"dnt" => Ok(Dnt.into()), - b"date" => Ok(Date.into()), - b"etag" => Ok(Etag.into()), - b"from" => Ok(From.into()), - b"host" => Ok(Host.into()), - b"link" => Ok(Link.into()), - b"vary" => Ok(Vary.into()), - b"allow" => Ok(Allow.into()), - b"range" => Ok(Range.into()), - b"accept" => Ok(Accept.into()), - b"cookie" => Ok(Cookie.into()), - b"expect" => Ok(Expect.into()), - b"origin" => Ok(Origin.into()), - b"pragma" => Ok(Pragma.into()), - b"server" => Ok(Server.into()), - b"alt-svc" => Ok(AltSvc.into()), - b"expires" => Ok(Expires.into()), - b"referer" => Ok(Referer.into()), - b"refresh" => Ok(Refresh.into()), - b"trailer" => Ok(Trailer.into()), - b"upgrade" => Ok(Upgrade.into()), - b"warning" => Ok(Warning.into()), - b"if-match" => Ok(IfMatch.into()), - b"if-range" => Ok(IfRange.into()), - b"location" => Ok(Location.into()), - b"forwarded" => Ok(Forwarded.into()), - b"connection" => Ok(Connection.into()), - b"set-cookie" => Ok(SetCookie.into()), - b"user-agent" => Ok(UserAgent.into()), - b"retry-after" => Ok(RetryAfter.into()), - b"content-type" => Ok(ContentType.into()), - b"max-forwards" => Ok(MaxForwards.into()), - b"accept-ranges" => Ok(AcceptRanges.into()), - b"authorization" => Ok(Authorization.into()), - b"cache-control" => Ok(CacheControl.into()), - b"content-range" => Ok(ContentRange.into()), - b"if-none-match" => Ok(IfNoneMatch.into()), - b"last-modified" => Ok(LastModified.into()), - b"accept-charset" => Ok(AcceptCharset.into()), - b"content-length" => Ok(ContentLength.into()), - b"accept-encoding" => Ok(AcceptEncoding.into()), - b"accept-language" => Ok(AcceptLanguage.into()), - b"public-key-pins" => Ok(PublicKeyPins.into()), - b"x-frame-options" => Ok(XFrameOptions.into()), - b"referrer-policy" => Ok(ReferrerPolicy.into()), - b"content-language" => Ok(ContentLanguage.into()), - b"content-location" => Ok(ContentLocation.into()), - b"content-encoding" => Ok(ContentEncoding.into()), - b"www-authenticate" => Ok(WwwAuthenticate.into()), - b"x-xss-protection" => Ok(XXssProtection.into()), - b"transfer-encoding" => Ok(TransferEncoding.into()), - b"if-modified-since" => Ok(IfModifiedSince.into()), - b"sec-websocket-key" => Ok(SecWebSocketKey.into()), - b"proxy-authenticate" => Ok(ProxyAuthenticate.into()), - b"content-disposition" => Ok(ContentDisposition.into()), - b"if-unmodified-since" => Ok(IfUnmodifiedSince.into()), - b"proxy-authorization" => Ok(ProxyAuthorization.into()), - b"sec-websocket-accept" => Ok(SecWebSocketAccept.into()), - b"sec-websocket-version" => Ok(SecWebSocketVersion.into()), - b"access-control-max-age" => Ok(AccessControlMaxAge.into()), - b"x-content-type-options" => Ok(XContentTypeOptions.into()), - b"x-dns-prefetch-control" => Ok(XDnsPrefetchControl.into()), - b"sec-websocket-protocol" => Ok(SecWebSocketProtocol.into()), - b"content-security-policy" => Ok(ContentSecurityPolicy.into()), - b"sec-websocket-extensions" => Ok(SecWebSocketExtensions.into()), - b"strict-transport-security" => Ok(StrictTransportSecurity.into()), - b"upgrade-insecure-requests" => Ok(UpgradeInsecureRequests.into()), - b"access-control-allow-origin" => Ok(AccessControlAllowOrigin.into()), - b"public-key-pins-report-only" => Ok(PublicKeyPinsReportOnly.into()), - b"access-control-allow-headers" => Ok(AccessControlAllowHeaders.into()), - b"access-control-allow-methods" => Ok(AccessControlAllowMethods.into()), - b"access-control-expose-headers" => Ok(AccessControlExposeHeaders.into()), - b"access-control-request-method" => Ok(AccessControlRequestMethod.into()), - b"access-control-request-headers" => Ok(AccessControlRequestHeaders.into()), - b"access-control-allow-credentials" => Ok(AccessControlAllowCredentials.into()), - b"content-security-policy-report-only" => { - Ok(ContentSecurityPolicyReportOnly.into()) + let name = &b[0..len]; + match StandardHeader::from_bytes(name) { + Some(sh) => Ok(sh.into()), + None => { + if name.contains(&0) { + Err(InvalidHeaderName::new()) + } else { + Ok(HdrName::custom(name, true)) + } } - other => validate(other, len), } } + 65..=super::MAX_HEADER_NAME_LEN => Ok(HdrName::custom(data, false)), + _ => Err(InvalidHeaderName::new()), } } @@ -1727,12 +1163,34 @@ impl HeaderName { /// Converts a static string to a HTTP header name. /// - /// This function panics when the static string is a invalid header. - /// /// This function requires the static string to only contain lowercase /// characters, numerals and symbols, as per the HTTP/2.0 specification /// and header names internal representation within this library. /// + /// # Panics + /// + /// This function panics when the static string is a invalid header. + /// + /// Until [Allow panicking in constants](https://github.com/rust-lang/rfcs/pull/2345) + /// makes its way into stable, the panic message at compile-time is + /// going to look cryptic, but should at least point at your header value: + /// + /// ```text + /// error: any use of this value will cause an error + /// --> http/src/header/name.rs:1241:13 + /// | + /// 1241 | ([] as [u8; 0])[0]; // Invalid header name + /// | ^^^^^^^^^^^^^^^^^^ + /// | | + /// | index out of bounds: the length is 0 but the index is 0 + /// | inside `http::HeaderName::from_static` at http/src/header/name.rs:1241:13 + /// | inside `INVALID_NAME` at src/main.rs:3:34 + /// | + /// ::: src/main.rs:3:1 + /// | + /// 3 | const INVALID_NAME: HeaderName = HeaderName::from_static("Capitalized"); + /// | ------------------------------------------------------------------------ + /// ``` /// /// # Examples /// @@ -1760,33 +1218,31 @@ impl HeaderName { /// let a = HeaderName::from_static("foobar"); /// let b = HeaderName::from_static("FOOBAR"); // This line panics! /// ``` - #[allow(deprecated)] - pub fn from_static(src: &'static str) -> HeaderName { - let bytes = src.as_bytes(); - #[allow(deprecated)] - let mut buf = unsafe { mem::uninitialized() }; - match parse_hdr(bytes, &mut buf, &HEADER_CHARS_H2) { - Ok(hdr_name) => match hdr_name.inner { - Repr::Standard(std) => std.into(), - Repr::Custom(MaybeLower { buf: _, lower: true }) => { - let val = ByteStr::from_static(src); - Custom(val).into() - }, - Repr::Custom(MaybeLower { buf: _, lower: false }) => { - // With lower false, the string is left unchecked by - // parse_hdr and must be validated manually. - for &b in bytes.iter() { - if HEADER_CHARS_H2[b as usize] == 0 { - panic!("invalid header name") - } - } + #[allow(unconditional_panic)] // required for the panic circumvention + pub const fn from_static(src: &'static str) -> HeaderName { + let name_bytes = src.as_bytes(); + if let Some(standard) = StandardHeader::from_bytes(name_bytes) { + return HeaderName{ + inner: Repr::Standard(standard), + }; + } - let val = ByteStr::from_static(src); - Custom(val).into() + if name_bytes.len() == 0 || name_bytes.len() > super::MAX_HEADER_NAME_LEN || { + let mut i = 0; + loop { + if i >= name_bytes.len() { + break false; + } else if HEADER_CHARS_H2[name_bytes[i] as usize] == 0 { + break true; } - }, + i += 1; + } + } { + ([] as [u8; 0])[0]; // Invalid header name + } - Err(_) => panic!("invalid header name") + HeaderName { + inner: Repr::Custom(Custom(ByteStr::from_static(src))) } } @@ -2169,24 +1625,36 @@ mod tests { } } + const ONE_TOO_LONG: &[u8] = &[b'a'; super::super::MAX_HEADER_NAME_LEN+1]; + #[test] fn test_invalid_name_lengths() { assert!( HeaderName::from_bytes(&[]).is_err(), "zero-length header name is an error", ); - let mut long = vec![b'a'; super::super::MAX_HEADER_NAME_LEN]; + + let long = &ONE_TOO_LONG[0..super::super::MAX_HEADER_NAME_LEN]; + + let long_str = std::str::from_utf8(long).unwrap(); + assert_eq!(HeaderName::from_static(long_str), long_str); // shouldn't panic! + assert!( - HeaderName::from_bytes(long.as_slice()).is_ok(), + HeaderName::from_bytes(long).is_ok(), "max header name length is ok", ); - long.push(b'a'); assert!( - HeaderName::from_bytes(long.as_slice()).is_err(), + HeaderName::from_bytes(ONE_TOO_LONG).is_err(), "longer than max header name length is an error", ); } + #[test] + #[should_panic] + fn test_static_invalid_name_lengths() { + let _ = HeaderName::from_static(unsafe { std::str::from_utf8_unchecked(ONE_TOO_LONG) }); + } + #[test] fn test_from_hdr_name() { use self::StandardHeader::Vary; diff --git a/third_party/rust/http/src/header/value.rs b/third_party/rust/http/src/header/value.rs index 5b368a959edc..bf05f16f4eaf 100644 --- a/third_party/rust/http/src/header/value.rs +++ b/third_party/rust/http/src/header/value.rs @@ -2,6 +2,7 @@ use bytes::{Bytes, BytesMut}; use std::convert::TryFrom; use std::error::Error; +use std::fmt::Write; use std::str::FromStr; use std::{cmp, fmt, mem, str}; @@ -78,7 +79,7 @@ impl HeaderValue { /// assert_eq!(val, "hello"); /// ``` #[inline] - #[allow(unconditional_panic)] // required for the panic circumventon + #[allow(unconditional_panic)] // required for the panic circumvention pub const fn from_static(src: &'static str) -> HeaderValue { let bytes = src.as_bytes(); let mut i = 0; @@ -427,7 +428,7 @@ macro_rules! from_integers { // full value fits inline, so don't allocate! BytesMut::new() }; - let _ = ::itoa::fmt(&mut buf, num); + let _ = buf.write_str(::itoa::Buffer::new().format(num)); HeaderValue { inner: buf.freeze(), is_sensitive: false, diff --git a/third_party/rust/http/src/lib.rs b/third_party/rust/http/src/lib.rs index db93cad14b4f..ebf24ad9e5e8 100644 --- a/third_party/rust/http/src/lib.rs +++ b/third_party/rust/http/src/lib.rs @@ -1,4 +1,4 @@ -#![doc(html_root_url = "https://docs.rs/http/0.2.5")] +#![doc(html_root_url = "https://docs.rs/http/0.2.7")] //! A general purpose library of common HTTP types //! diff --git a/third_party/rust/http/src/uri/mod.rs b/third_party/rust/http/src/uri/mod.rs index 57c052f716b5..30be83b57073 100644 --- a/third_party/rust/http/src/uri/mod.rs +++ b/third_party/rust/http/src/uri/mod.rs @@ -201,7 +201,40 @@ impl Uri { Builder::new() } - /// Attempt to convert a `Uri` from `Parts` + /// Attempt to convert a `Parts` into a `Uri`. + /// + /// # Examples + /// + /// Relative URI + /// + /// ``` + /// # use http::uri::*; + /// let mut parts = Parts::default(); + /// parts.path_and_query = Some("/foo".parse().unwrap()); + /// + /// let uri = Uri::from_parts(parts).unwrap(); + /// + /// assert_eq!(uri.path(), "/foo"); + /// + /// assert!(uri.scheme().is_none()); + /// assert!(uri.authority().is_none()); + /// ``` + /// + /// Absolute URI + /// + /// ``` + /// # use http::uri::*; + /// let mut parts = Parts::default(); + /// parts.scheme = Some("http".parse().unwrap()); + /// parts.authority = Some("foo.com".parse().unwrap()); + /// parts.path_and_query = Some("/foo".parse().unwrap()); + /// + /// let uri = Uri::from_parts(parts).unwrap(); + /// + /// assert_eq!(uri.scheme().unwrap().as_str(), "http"); + /// assert_eq!(uri.authority().unwrap(), "foo.com"); + /// assert_eq!(uri.path(), "/foo"); + /// ``` pub fn from_parts(src: Parts) -> Result { if src.scheme.is_some() { if src.authority.is_none() { @@ -491,8 +524,6 @@ impl Uri { /// authority /// ``` /// - /// This function will be renamed to `authority` in the next semver release. - /// /// # Examples /// /// Absolute URI @@ -738,40 +769,29 @@ impl<'a> TryFrom<&'a Uri> for Uri { } } -/// Convert a `Uri` from parts -/// -/// # Examples -/// -/// Relative URI -/// -/// ``` -/// # use http::uri::*; -/// let mut parts = Parts::default(); -/// parts.path_and_query = Some("/foo".parse().unwrap()); -/// -/// let uri = Uri::from_parts(parts).unwrap(); -/// -/// assert_eq!(uri.path(), "/foo"); -/// -/// assert!(uri.scheme().is_none()); -/// assert!(uri.authority().is_none()); -/// ``` -/// -/// Absolute URI -/// -/// ``` -/// # use http::uri::*; -/// let mut parts = Parts::default(); -/// parts.scheme = Some("http".parse().unwrap()); -/// parts.authority = Some("foo.com".parse().unwrap()); -/// parts.path_and_query = Some("/foo".parse().unwrap()); -/// -/// let uri = Uri::from_parts(parts).unwrap(); -/// -/// assert_eq!(uri.scheme().unwrap().as_str(), "http"); -/// assert_eq!(uri.authority().unwrap(), "foo.com"); -/// assert_eq!(uri.path(), "/foo"); -/// ``` +/// Convert an `Authority` into a `Uri`. +impl From for Uri { + fn from(authority: Authority) -> Self { + Self { + scheme: Scheme::empty(), + authority, + path_and_query: PathAndQuery::empty(), + } + } +} + +/// Convert a `PathAndQuery` into a `Uri`. +impl From for Uri { + fn from(path_and_query: PathAndQuery) -> Self { + Self { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query, + } + } +} + +/// Convert a `Uri` into `Parts` impl From for Parts { fn from(src: Uri) -> Self { let path_and_query = if src.has_path() { diff --git a/third_party/rust/http/tests/status_code.rs b/third_party/rust/http/tests/status_code.rs index 45875450d6f6..160df6bad5d3 100644 --- a/third_party/rust/http/tests/status_code.rs +++ b/third_party/rust/http/tests/status_code.rs @@ -31,3 +31,52 @@ fn roundtrip() { assert_eq!(sstr, status.as_str()); } } + +#[test] +fn is_informational() { + assert!(status_code(100).is_informational()); + assert!(status_code(199).is_informational()); + + assert!(!status_code(200).is_informational()); +} + +#[test] +fn is_success() { + assert!(status_code(200).is_success()); + assert!(status_code(299).is_success()); + + assert!(!status_code(199).is_success()); + assert!(!status_code(300).is_success()); +} + +#[test] +fn is_redirection() { + assert!(status_code(300).is_redirection()); + assert!(status_code(399).is_redirection()); + + assert!(!status_code(299).is_redirection()); + assert!(!status_code(400).is_redirection()); +} + +#[test] +fn is_client_error() { + assert!(status_code(400).is_client_error()); + assert!(status_code(499).is_client_error()); + + assert!(!status_code(399).is_client_error()); + assert!(!status_code(500).is_client_error()); +} + +#[test] +fn is_server_error() { + assert!(status_code(500).is_server_error()); + assert!(status_code(599).is_server_error()); + + assert!(!status_code(499).is_server_error()); + assert!(!status_code(600).is_server_error()); +} + +/// Helper method for readability +fn status_code(status_code: u16) -> StatusCode { + StatusCode::from_u16(status_code).unwrap() +} diff --git a/third_party/rust/hyper/.cargo-checksum.json b/third_party/rust/hyper/.cargo-checksum.json index eb8ff45da72c..6063c81daeec 100644 --- a/third_party/rust/hyper/.cargo-checksum.json +++ b/third_party/rust/hyper/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.lock":"7983382bd267d086c1c3667d8524fa6317a28023b6c92d4044842ebe88198d98","Cargo.toml":"be4870bfe8a52b49c804044d21d7d09dde4486e745ec611eb084f7a544375a58","LICENSE":"0008012e192415992a45a8161c08d7adb429f8032de62d43afc9fd2c6f0b5323","src/body/aggregate.rs":"5392490a5d17c01cb631da3d5b06daab465afcf970acef53c2aff56243010e01","src/body/body.rs":"80be09104039ecf6aef72e934e53b6da695d3fc60d1b94ed2c1eb20f0339ce81","src/body/mod.rs":"ea7048935f3ac6885ff109ef8ecc0ccec31c2781c170a7a6fc13dfc85f905154","src/body/to_bytes.rs":"d9b8236782ae12ef090758c4eed10f429c0ddeb7f2274a7ecc4555e5562c0a4b","src/client/conn.rs":"326f34a4fa11e3abc372ee2b78ee975af54c6b245717b5e0849cd3cb815c0e38","src/client/connect/dns.rs":"75b7cce1d934b471f8531e626fe45f22a0920aeeadc276cba47f48c2b20f208f","src/client/connect/http.rs":"4b974f54f99caa131f545fdd18df468dd84343ab500153e28cb1f46e742b315b","src/client/connect/mod.rs":"45eef7442c23af3197ad25ffc61f5b7d595fa12f3f5fef2328879d3e8f853a65","src/client/dispatch.rs":"7292363f982857bf4e9481183f33f969cfcc953bd8703951c0c4ab15aa524044","src/client/mod.rs":"0b1a0519318036f7ea0dc46e9642c29a9207f6a4249818f9cf0477bf1029c77e","src/client/pool.rs":"10c92f07331cd11a7f626cf6fc12fe8181b4b0bf9c6436afefb7eb3d470d8536","src/client/service.rs":"54802c50753cc6f1cb19e92e284dcadd2bce667f9c72108ae7ff7ab173d73aa3","src/client/tests.rs":"ac9783128c74fe0369c9d03716b784940e70aca08288f806358adcc67a09fb43","src/common/buf.rs":"63c4459583e27ff52689934e1ff65b3cd02fee693d1ee7836a3e409e49c9baf2","src/common/drain.rs":"6d08d7da77182c7e1746e5426c129129e9cfc3c3c8f0cfd0faca5269bd0ee7f4","src/common/exec.rs":"033112c084dc34b8181bfb6af2c3834ef5c21d01045f70d8158b67a95e9ba340","src/common/io/mod.rs":"6f8e4518df7f24d81fc59b46a2deb61557e8d92700bdc62539fe0f034066fc89","src/common/io/rewind.rs":"9b720ded0a77ff374f1e23cc7badb1147aec1f462ce26719b4426bef37d90bf5","src/common/lazy.rs":"b4913637f573a78cfc532352aebe816d8e0f9ea37edbd2c37bb4c545a008b597","src/common/mod.rs":"e028ce8954bc9033acf28b06d60c6004e7bdd29d37918ef3072b087ccfb0a335","src/common/never.rs":"4cef634f0330e8d939201b44cfbe10db8deddb9e86ce8ef1e8e2ae08e553e7bb","src/common/sync_wrapper.rs":"ca310408b04bd927fbaa2167c7aa41ac02698f3d96e614f30fd5a2ca3299e086","src/common/task.rs":"1f9b921023516456fad6a06db0863f0a23f118e692fe396166e24decd8410f5d","src/common/watch.rs":"eb6db13fbb266ec11200ffa1f5e7a9a132d7e6555373895efde1e2daea428e03","src/error.rs":"ad5fe2585baa64f1cde8df5b0443ceb196f32c5feb0a99f9fe428d337733ff91","src/headers.rs":"d59aa3584bf8038da244566450ba6a52fb8c4ef49a5badf316a358f87ed8d930","src/lib.rs":"cf72cf837137a7620ccbf021c6d1865a5b66f77eb53237c16dd21e4efaa920c9","src/mock.rs":"8b455312be74af6c4a27b3beba85a9493c86d43097947c5aad8a5e9dc0dcdbb3","src/proto/h1/conn.rs":"f3a6f150d164d678b6a1073bac735fc2656c2e0715d82caa8a7d4fd4a5f5f90b","src/proto/h1/date.rs":"e112dc662d0e468272e6d3b218c3961c69309cb0bba137064f6e0e9c85ae9dd3","src/proto/h1/decode.rs":"56bd9a88f2b80f65a7640bcbe6597036e3948e3b559322765466cc86e3b76d71","src/proto/h1/dispatch.rs":"e6ad3792f953f921e264bff29d94c6c85640a77fec1ba6b873b158a93a1b9407","src/proto/h1/encode.rs":"5b2cb6ea4ccbf82f6d910d2c0a3dffc2c0eb7a4c6794af3c3eb291d9038a2a25","src/proto/h1/io.rs":"983c60286694ae6c06d8febd39d071d5fd029e0ba8c2e59fe5d81fe920a5afe5","src/proto/h1/mod.rs":"5612a268681611fcad78171f81aae1107755dd5f7ccf51fcb8b42459b14fbce1","src/proto/h1/role.rs":"4019352ffd943e81dbf2c31eb616cc48f8149d04fe9a1f3d3e0b6631e840d23c","src/proto/h2/client.rs":"2c8c0b90d61e90a5b24891ee800aec6075008b48702008982e4fcf4c05ee499a","src/proto/h2/mod.rs":"1d51539198f264a5e389fdfa88355a844c1021458f1c2b4e6a388076fec2a8d6","src/proto/h2/ping.rs":"a51ef5311f66e59c8bffab44ee6034dccf6907bc349091f20cc4978b3c019a8b","src/proto/h2/server.rs":"0102ec2ec46854e3f2977f406a14edb2b54ffaecb04518dc878abf9be143465f","src/proto/mod.rs":"9735de55a78203336959d3d4c7632665a7bd2427ad77352da9ffea1871aab706","src/rt.rs":"62e32501cb2f1830abeaae463ac0fe08ac32e08ee83bcaba7db26a5d8c56db45","src/server/accept.rs":"d7f90b4429e725e3d414a24c0e1e49891373d754b42fc2b8275c92c9a6f12194","src/server/conn.rs":"227b22391c95ce7954a672448fedcbf4a4477b2845086d301a2a390a83c458a4","src/server/mod.rs":"44e4c8fab9666eeafa9811db96f8b5ddfee61c8e675e6b12ba306cc4c530aa9f","src/server/shutdown.rs":"2e901400d9eca3b1f1e3da74ced39d86c52ecd7c4aa60c3105e878f04717d5cb","src/server/tcp.rs":"f0a06367a78e81f48dcf78477b101e4780cc8a4e7a649bb9e9574de83c1e7273","src/service/http.rs":"3e7c2c49e5710f9229d7b01f6f7a4e9f5c7c7360f14870f726aa470311998e65","src/service/make.rs":"ad766245b44bf703a85fcbf840e4ff6ae95a5b7e767659e8583a32dc517a42a6","src/service/mod.rs":"715c2a6ea62571044ac15615e270b244971d0d0846c6b851b8843b5a1afe4950","src/service/oneshot.rs":"baa76056cb0a31a3f23a40ede10b3a72fe93c490124912470ecf12555812c88d","src/service/util.rs":"90452fc10c8316fdc1f0227c4fcef3cfb94e4b2d8fd52ef8c0e276cd3f1cadd5","src/upgrade.rs":"d4c180acb814efd1a1f82b25638fa7d4bd7688c686c88f0c91666b35c3d9d132"},"package":"a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f"} \ No newline at end of file +{"files":{"Cargo.lock":"93840ddf509f2167f5d9b661e49484526e990414fa3414f49e8313c41238edb1","Cargo.toml":"74e68f5404237647f098475b8d5fc13427b9a9900526171508fb417dcfcdd6ac","LICENSE":"25dfd9ec24ebbee73dc93e687526cd4c26deae17bc2179ea0fe3e5dc96105b9b","src/body/aggregate.rs":"bcba09b9dd91deee04f17383cb0d7ac5d876f8d462e08031cd3dfca205f1d118","src/body/body.rs":"a5e97bedaf41587346ce50dca03541d2e71f349fbe13e00173b4e123aa4ffcdf","src/body/length.rs":"2d186f50f05b0562db92d3e1d272521c60c9ad259222ccb1a9310b1ff380a64e","src/body/mod.rs":"2870b04df5eef6a307a6f4699f99dbad54f5373bc95da4e6b29277efb645058f","src/body/to_bytes.rs":"7432a884ec6f655030fe38aca9d33b75ffb2b40d9995a2ace89e81a451f1aa84","src/cfg.rs":"de5fee5bba45a982c10e8f09fc24677be5494bf6da0b39a3a132f0f6eb3fe41e","src/client/client.rs":"a57277f7992cbd992566c7423a0a4716a8b4e0ebf519c1c15705a131505b5f61","src/client/conn.rs":"5e3f686d575855518ac0b5cee3ce8d633d59015d9b252668997894c8fd117bb7","src/client/connect/dns.rs":"f3167a4e4fff56cbc2bcbd5b0eb6a15d5e3c7c002af2a11b7ecd055179535cf2","src/client/connect/http.rs":"57e8fcff426c38088f534d5118cd8bb8010936f7c50846e95a70cf84805b05d7","src/client/connect/mod.rs":"93f4c1230e2f667a19b05a1a6f428fd4cfae34a66a01361ad4f8c1538dd53d12","src/client/dispatch.rs":"41dd9d593822a58c60886b16f7ea0355e25e5915fc3db5230c1092296bc7af1a","src/client/mod.rs":"d5580cda5e7dc9c5240c72a9ea0005d34b5412c28067ab3fa2301ba4cf49d9fa","src/client/pool.rs":"bffee52b070689466f88d2fe7044f4571e59ddae4e5c91adce7dbfd298ccbd54","src/client/service.rs":"7a7e221d06b2246403b1f1269f765fb8f3e026ff7c3b73db369f5952697e262a","src/client/tests.rs":"f7eb2d1dba18e1bd69990f59e61b73a0477db5cc4de68fd64bd7cd36a9406072","src/common/buf.rs":"c762dc2688828ffc88f0549ceddeef65e57d404c245118bcacf3dd8d402bc9cc","src/common/date.rs":"f9a1a63aa7e8f5d6f5800cd56e7e6182cf07e781089930afb529263c1d8adf14","src/common/drain.rs":"262b64cc88e9146dfa3732a25ddb22dfef7cf801cebbd87fc1234b5e3e1d62bd","src/common/exec.rs":"2b9d403e06ef4838553704974350ca767e9b5451c24bd27c9faadc6e94fc486d","src/common/io/mod.rs":"6f8e4518df7f24d81fc59b46a2deb61557e8d92700bdc62539fe0f034066fc89","src/common/io/rewind.rs":"ba6704fe0b0f3d1af1184c680c97818962b1d8251e790cacfc9d56281cef23cd","src/common/lazy.rs":"f1a7ccad912492c23238f4c854e01b77e81db864455842cdc2890d76e464ff20","src/common/mod.rs":"76576a0b5a2fae70e40e397290314181c60bd548b6d8993ffd265ce6f305f343","src/common/never.rs":"b45b6a85f827081cdb7884907e7884c78540308102072dae563320acce8185fa","src/common/sync_wrapper.rs":"76206c2a52eeb62cdba279d620a4aef52134c6ac782a9f61e741edc8b653cb50","src/common/task.rs":"fd6444762d25ea6beb3b9f73bacf3b47c9f3f87e8e2037f58788b5d54767ce94","src/common/watch.rs":"eb6db13fbb266ec11200ffa1f5e7a9a132d7e6555373895efde1e2daea428e03","src/error.rs":"1dcd2edf849ac6af9d486fa4493f6f66b199a00038253958e77bf3fc061c6517","src/ext.rs":"47cc6476fe33cd28eddadf103a05d574eec2e23260a83ef65daeea1e13a254c5","src/ffi/body.rs":"941fb8b79097e5a4eec0c611a1cd5db24bed3479f1a14cf754e33d19f6d25854","src/ffi/client.rs":"f4e0dd5fadb8ef96cf25d4edde3d86a915a897d0acd9cfaecc83c89886ffb836","src/ffi/error.rs":"de3d8c1eb3818b438ed28a9dea800dfdac47bf2dd21a7c3e5fc10cb331b6e39f","src/ffi/http_types.rs":"c242139f65d1da8449431c087d3f20e32c733188b50628e3152667eb3ec0c956","src/ffi/io.rs":"ab176e866c179a589d22e3aa7410612615e60117019d1e589f9732c36a2282da","src/ffi/macros.rs":"8e1fe58244295db1d19aceeb0e9a777fe484ccc20194fae88b54208e0cbeb515","src/ffi/mod.rs":"0e52ae3586c6a960ae68e862561aabcee690a1e23c6d5d1045fcdc3c74b7fc96","src/ffi/task.rs":"c0fcf9df3f96ea0f7170381c82fa82ed9fbd71443570a30539bff8152ca1a88e","src/headers.rs":"edc1362c894a38eb4d401cbb88485a846bcf0e748a38048f00d4e6da49d1ddf6","src/lib.rs":"fd943252250f7ba5ccac7d77a102cc37249caf0971a7c1ddf6f92452da1176f3","src/mock.rs":"8b455312be74af6c4a27b3beba85a9493c86d43097947c5aad8a5e9dc0dcdbb3","src/proto/h1/conn.rs":"36428818e6d7bbe08fb14051a2865870830c21b900d992aff59b4ad3e41a84c9","src/proto/h1/decode.rs":"73ddd8ffcec24a4c943d86996d43f7367c59bb44b57e804ee711ff8ba97593ef","src/proto/h1/dispatch.rs":"b22de5cbf1c5f98ec016b8ce968a5c5b6479438fce00de9de6ccbd2007da565c","src/proto/h1/encode.rs":"3a360356431ff3f011226cf81fb4eeb63cfe9ca25140e41e2e3907f6d64725f9","src/proto/h1/io.rs":"2efc8567f57efa3c811539b2237f5c52a73f9f870e102a73aa2551ea7e0f0fa3","src/proto/h1/mod.rs":"7f6bdd55a3055ee0cf6c5f4c20b819372e440240eb9de8f7e4cb75eb86afc400","src/proto/h1/role.rs":"3d65096783c61e0b01d969764c0f47353926b0f789df3048c0dabaa370ef1870","src/proto/h2/client.rs":"a7e88b95e5a14d07da3330ed31b9aa575c94b999d6bfeea0426b9dbfaf9256ea","src/proto/h2/mod.rs":"a142cc41c9e8403d25ad1fb4ad93fc2182705858d95859f116adb8704256263a","src/proto/h2/ping.rs":"81620d250ba4f5e5e6782937415d6eb49623bb5b2a6dd73b643a18dfdcdec2c0","src/proto/h2/server.rs":"f944b70affd4db52b053c4f5765c118c3ae0cd2e42a3bcdb53f3f05b5d0771f9","src/proto/mod.rs":"54810a69162af5ebc370e14c0f89b94bf111412470bbe3c1e341809edf2f1ad1","src/rt.rs":"1ef7d4bb3ad6637c6f37189e30d740c061a3c98ca008d266a99f865130909427","src/server/accept.rs":"9a78398b0e9d8104248288b39cbcda8f9968a866214b7d793e548a5cf359752e","src/server/conn.rs":"128dbe1443b3855a3dd48f1ff61d3aa4cb61ad38f9b8503b37d0d728fba880b5","src/server/mod.rs":"a8e7bdb2098b2a19739d5a3a07f3cffdc6f3dde661194fdb04f9f050ca4bf9c7","src/server/server.rs":"96e9ecf31ecfa45af20a2a80e0d1ce72cf7c2c383d85f885f07b7c43317f0cb0","src/server/shutdown.rs":"6fdfef4e8ed5aa1f0f5d618d53ae5f779b9da5fb7f6ef3127e0d587cdf2acf57","src/server/tcp.rs":"1c900efe8328b345312ea467cc1ef837a36e3163b3f3032f6a1c10fddb4f2c9c","src/service/http.rs":"3e7c2c49e5710f9229d7b01f6f7a4e9f5c7c7360f14870f726aa470311998e65","src/service/make.rs":"7e3d956fd0602b47d3adc520f9a614cb047323d3d162faf9162066dccefe4d11","src/service/mod.rs":"92c05f08a175fb847868a02e7aca96176df1237458d40a17a7a6aa377476df90","src/service/oneshot.rs":"871b51c455d58dd3a6db85bd60e3a663d914ee748fe0c455c4523407533c8b1a","src/service/util.rs":"90452fc10c8316fdc1f0227c4fcef3cfb94e4b2d8fd52ef8c0e276cd3f1cadd5","src/upgrade.rs":"668ebf06e51db3bb12f804bf7ec14c2a1dda6bcc3db57d45f81d59b4aa24f6c4"},"package":"b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2"} \ No newline at end of file diff --git a/third_party/rust/hyper/Cargo.lock b/third_party/rust/hyper/Cargo.lock index 6194e5e139ca..225e630327b5 100644 --- a/third_party/rust/hyper/Cargo.lock +++ b/third_party/rust/hyper/Cargo.lock @@ -1,723 +1,769 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "aho-corasick" -version = "0.7.10" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ - "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", +] + +[[package]] +name = "async-stream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi", + "libc", + "winapi", ] [[package]] name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bitflags" -version = "1.2.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "bytes" -version = "0.5.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cfg-if" -version = "0.1.10" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "env_logger" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ - "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "atty", + "humantime", + "log", + "regex", + "termcolor", ] [[package]] name = "fnv" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] -name = "fuchsia-zircon" -version = "0.3.3" +name = "form_urlencoded" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "percent-encoding", ] -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "futures-channel" -version = "0.3.4" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core", ] [[package]] name = "futures-core" -version = "0.3.4" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-sink" -version = "0.3.4" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.4" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-util" -version = "0.3.4" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", ] [[package]] name = "h2" -version = "0.2.4" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", ] [[package]] -name = "hermit-abi" -version = "0.1.10" +name = "hashbrown" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "http" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "fnv", + "itoa", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "http", + "pin-project-lite", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "humantime" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" dependencies = [ - "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error", ] [[package]] name = "hyper" -version = "0.13.6" +version = "0.14.18" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "h2 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pretty_env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "socket2 0.3.12 (registry+https://github.com/rust-lang/crates.io-index)", - "spmc 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-test 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "libc", + "matches", + "num_cpus", + "pin-project-lite", + "pnet_datalink", + "pretty_env_logger", + "serde", + "serde_json", + "socket2", + "spmc", + "tokio", + "tokio-test", + "tokio-util", + "tower", + "tower-service", + "tracing", + "url", + "want", ] [[package]] name = "idna" -version = "0.1.5" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "indexmap" -version = "1.3.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ - "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "hashbrown", ] [[package]] -name = "iovec" -version = "0.1.4" +name = "ipnetwork" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "serde", ] [[package]] name = "itoa" -version = "0.4.5" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.68" +version = "0.2.112" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" [[package]] name = "log" -version = "0.4.8" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", ] [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "mio" -version = "0.6.21" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "log", + "miow", + "ntapi", + "winapi", ] [[package]] name = "miow" -version = "0.2.1" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] -name = "net2" -version = "0.2.33" +name = "ntapi" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "num_cpus" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ - "hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi", + "libc", ] [[package]] name = "percent-encoding" -version = "1.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ - "pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "pin-project-lite" -version = "0.1.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" -version = "0.1.0-alpha.4" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pnet_base" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4688aa497ef62129f302a5800ebde67825f8ff129f43690ca84099f6620bed" + +[[package]] +name = "pnet_datalink" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59001c9c4d9d23bf2f61afaaf134a766fd6932ba2557c606b9112157053b9ac7" +dependencies = [ + "ipnetwork", + "libc", + "pnet_base", + "pnet_sys", + "winapi", +] + +[[package]] +name = "pnet_sys" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7589e4c4e7ed72a3ffdff8a65d3bea84e8c3a23e19d0a10e8f45efdf632fff15" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "pretty_env_logger" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" dependencies = [ - "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger", + "log", ] [[package]] name = "proc-macro2" -version = "1.0.10" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid", ] [[package]] name = "quick-error" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.3" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", ] -[[package]] -name = "redox_syscall" -version = "0.1.56" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "regex" -version = "1.3.6" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ - "aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick", + "memchr", + "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.17" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "ryu" -version = "1.0.3" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "serde" -version = "1.0.105" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" +dependencies = [ + "serde_derive", +] [[package]] name = "serde_derive" -version = "1.0.105" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "serde_json" -version = "1.0.50" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" dependencies = [ - "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa", + "ryu", + "serde", ] [[package]] name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "smallvec" -version = "1.2.0" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "socket2" -version = "0.3.12" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "winapi", ] [[package]] name = "spmc" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02a8428da277a8e3a15271d79943e80ccc2ef254e78813a166a08d65e4c3ece5" [[package]] name = "syn" -version = "1.0.17" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] name = "termcolor" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" dependencies = [ - "winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util", ] [[package]] -name = "thread_local" -version = "1.0.1" +name = "tinyvec" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tinyvec_macros", ] [[package]] -name = "time" -version = "0.1.42" +name = "tinyvec_macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.13" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-macros 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "pin-project-lite", + "tokio-macros", + "winapi", ] [[package]] name = "tokio-macros" -version = "0.2.5" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-stream" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", ] [[package]] name = "tokio-test" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", ] [[package]] -name = "tower-service" -version = "0.3.0" +name = "tower" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5651b5f6860a99bd1adb59dbfe1db8beb433e73709d9032b413a77e2fb7c066a" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] [[package]] -name = "tower-util" +name = "tower-layer" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + +[[package]] +name = "tower-service" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" + +[[package]] +name = "tracing" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ - "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +dependencies = [ + "lazy_static", ] [[package]] name = "try-lock" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" -version = "0.1.12" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tinyvec", ] [[package]] name = "unicode-xid" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "url" -version = "1.7.2" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ - "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "form_urlencoded", + "idna", + "matches", + "percent-encoding", ] [[package]] name = "want" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log", + "try-lock", ] [[package]] name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[metadata] -"checksum aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" -"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -"checksum futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" -"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" -"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" -"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" -"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" -"checksum h2 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" -"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" -"checksum http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" -"checksum http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -"checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" -"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" -"checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" -"checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" -"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" -"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" -"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" -"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" -"checksum pin-project 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7804a463a8d9572f13453c516a5faea534a2403d7ced2f0c7e100eeff072772c" -"checksum pin-project-internal 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" -"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" -"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" -"checksum pretty_env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" -"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" -"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" -"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" -"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" -"checksum serde 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)" = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff" -"checksum serde_derive 1.0.105 (registry+https://github.com/rust-lang/crates.io-index)" = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8" -"checksum serde_json 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "78a7a12c167809363ec3bd7329fc0a3369056996de43c4b37ef3cd54a6ce4867" -"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -"checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" -"checksum socket2 0.3.12 (registry+https://github.com/rust-lang/crates.io-index)" = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" -"checksum spmc 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02a8428da277a8e3a15271d79943e80ccc2ef254e78813a166a08d65e4c3ece5" -"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" -"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" -"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum tokio 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "0fa5e81d6bc4e67fe889d5783bd2a128ab2e0cfa487e0be16b6a8d177b101616" -"checksum tokio-macros 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" -"checksum tokio-test 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "09cf9705471976fa5fc6817d3fbc9c4ff9696a6647af0e5c1870c81ca7445b05" -"checksum tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -"checksum tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" -"checksum tower-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d1093c19826d33807c72511e68f73b4a0469a3f22c2bd5f7d5212178b4b89674" -"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" -"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -"checksum want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/hyper/Cargo.toml b/third_party/rust/hyper/Cargo.toml index d7d759dc1039..db2d0d42b0a2 100644 --- a/third_party/rust/hyper/Cargo.toml +++ b/third_party/rust/hyper/Cargo.toml @@ -3,29 +3,54 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "hyper" -version = "0.13.6" +version = "0.14.18" authors = ["Sean McArthur "] -include = ["Cargo.toml", "LICENSE", "src/**/*"] +include = [ + "Cargo.toml", + "LICENSE", + "src/**/*", +] description = "A fast and correct HTTP library." homepage = "https://hyper.rs" documentation = "https://docs.rs/hyper" readme = "README.md" -keywords = ["http", "hyper", "hyperium"] -categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"] +keywords = [ + "http", + "hyper", + "hyperium", +] +categories = [ + "network-programming", + "web-programming::http-client", + "web-programming::http-server", +] license = "MIT" repository = "https://github.com/hyperium/hyper" + [package.metadata.docs.rs] -features = ["runtime", "stream"] +features = [ + "ffi", + "full", +] +rustdoc-args = [ + "--cfg", + "docsrs", + "--cfg", + "hyper_unstable_ffi", +] + +[package.metadata.playground] +features = ["full"] + [profile.bench] codegen-units = 1 incremental = false @@ -37,119 +62,125 @@ incremental = false [[example]] name = "client" path = "examples/client.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "client_json" path = "examples/client_json.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "echo" path = "examples/echo.rs" -required-features = ["runtime", "stream"] +required-features = ["full"] [[example]] name = "gateway" path = "examples/gateway.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "hello" path = "examples/hello.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "http_proxy" path = "examples/http_proxy.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "multi_server" path = "examples/multi_server.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "params" path = "examples/params.rs" -required-features = ["runtime", "stream"] +required-features = ["full"] [[example]] name = "send_file" path = "examples/send_file.rs" -required-features = ["runtime"] +required-features = ["full"] + +[[example]] +name = "service_struct_impl" +path = "examples/service_struct_impl.rs" +required-features = ["full"] [[example]] name = "single_threaded" path = "examples/single_threaded.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "state" path = "examples/state.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "tower_client" path = "examples/tower_client.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "tower_server" path = "examples/tower_server.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "upgrades" path = "examples/upgrades.rs" -required-features = ["runtime"] +required-features = ["full"] [[example]] name = "web_api" path = "examples/web_api.rs" -required-features = ["runtime", "stream"] +required-features = ["full"] [[test]] name = "client" path = "tests/client.rs" -required-features = ["runtime", "stream"] +required-features = ["full"] [[test]] name = "integration" path = "tests/integration.rs" -required-features = ["runtime", "stream"] +required-features = ["full"] [[test]] name = "server" path = "tests/server.rs" -required-features = ["runtime"] +required-features = ["full"] [[bench]] name = "body" path = "benches/body.rs" -required-features = ["runtime", "stream"] +required-features = ["full"] [[bench]] name = "connect" path = "benches/connect.rs" -required-features = ["runtime"] +required-features = ["full"] [[bench]] name = "end_to_end" path = "benches/end_to_end.rs" -required-features = ["runtime"] +required-features = ["full"] [[bench]] name = "pipeline" path = "benches/pipeline.rs" -required-features = ["runtime"] +required-features = ["full"] [[bench]] name = "server" path = "benches/server.rs" -required-features = ["runtime", "stream"] +required-features = ["full"] + [dependencies.bytes] -version = "0.5" +version = "1" [dependencies.futures-channel] version = "0.3" @@ -163,42 +194,50 @@ version = "0.3" default-features = false [dependencies.h2] -version = "0.2.2" +version = "0.3.9" +optional = true [dependencies.http] version = "0.2" [dependencies.http-body] -version = "0.3.1" +version = "0.4" [dependencies.httparse] +version = "1.6" + +[dependencies.httpdate] version = "1.0" [dependencies.itoa] -version = "0.4.1" +version = "1" -[dependencies.log] -version = "0.4" - -[dependencies.pin-project] -version = "0.4" - -[dependencies.socket2] -version = "0.3" +[dependencies.libc] +version = "0.2" optional = true -[dependencies.time] -version = "0.1" +[dependencies.pin-project-lite] +version = "0.2.4" + +[dependencies.socket2] +version = "0.4" +optional = true [dependencies.tokio] -version = "0.2.5" +version = "1" features = ["sync"] [dependencies.tower-service] version = "0.3" +[dependencies.tracing] +version = "0.1" +features = ["std"] +default-features = false + [dependencies.want] version = "0.3" + [dev-dependencies.futures-util] version = "0.3" features = ["alloc"] @@ -215,9 +254,7 @@ version = "0.4" [dev-dependencies.serde] version = "1.0" - -[dev-dependencies.serde_derive] -version = "1.0" +features = ["derive"] [dev-dependencies.serde_json] version = "1.0" @@ -226,26 +263,65 @@ version = "1.0" version = "0.3" [dev-dependencies.tokio] -version = "0.2.2" -features = ["fs", "macros", "io-std", "rt-util", "sync", "time", "test-util"] +version = "1" +features = [ + "fs", + "macros", + "io-std", + "io-util", + "rt", + "rt-multi-thread", + "sync", + "time", + "test-util", +] [dev-dependencies.tokio-test] -version = "0.2" +version = "0.4" [dev-dependencies.tokio-util] -version = "0.3" +version = "0.6" features = ["codec"] -[dev-dependencies.tower-util] -version = "0.3" +[dev-dependencies.tower] +version = "0.4" +features = [ + "make", + "util", +] [dev-dependencies.url] -version = "1.0" +version = "2.2" [features] __internal_happy_eyeballs_tests = [] -default = ["runtime", "stream"] +client = [] +default = [] +ffi = ["libc"] +full = [ + "client", + "http1", + "http2", + "server", + "stream", + "runtime", +] +http1 = [] +http2 = ["h2"] nightly = [] -runtime = ["tcp", "tokio/rt-core"] +runtime = [ + "tcp", + "tokio/rt", + "tokio/time", +] +server = [] stream = [] -tcp = ["socket2", "tokio/blocking", "tokio/tcp", "tokio/time"] +tcp = [ + "socket2", + "tokio/net", + "tokio/rt", + "tokio/time", +] + +[target."cfg(any(target_os = \"linux\", target_os = \"macos\"))".dev-dependencies.pnet_datalink] +version = "0.27.2" diff --git a/third_party/rust/hyper/LICENSE b/third_party/rust/hyper/LICENSE index ac660a41c99e..bc1e966ed96d 100644 --- a/third_party/rust/hyper/LICENSE +++ b/third_party/rust/hyper/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2018 Sean McArthur +Copyright (c) 2014-2021 Sean McArthur Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -17,4 +17,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/third_party/rust/hyper/src/body/aggregate.rs b/third_party/rust/hyper/src/body/aggregate.rs index 97b6c2d91f2c..99662419d394 100644 --- a/third_party/rust/hyper/src/body/aggregate.rs +++ b/third_party/rust/hyper/src/body/aggregate.rs @@ -7,6 +7,12 @@ use crate::common::buf::BufList; /// /// The returned `impl Buf` groups the `Buf`s from the `HttpBody` without /// copying them. This is ideal if you don't require a contiguous buffer. +/// +/// # Note +/// +/// Care needs to be taken if the remote is untrusted. The function doesn't implement any length +/// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the +/// `Content-Length` is a possibility, but it is not strictly mandated to be present. pub async fn aggregate(body: T) -> Result where T: HttpBody, diff --git a/third_party/rust/hyper/src/body/body.rs b/third_party/rust/hyper/src/body/body.rs index 355f0d87136d..9dc1a034f980 100644 --- a/third_party/rust/hyper/src/body/body.rs +++ b/third_party/rust/hyper/src/body/body.rs @@ -4,25 +4,34 @@ use std::error::Error as StdError; use std::fmt; use bytes::Bytes; -use futures_channel::{mpsc, oneshot}; +use futures_channel::mpsc; +use futures_channel::oneshot; use futures_core::Stream; // for mpsc::Receiver #[cfg(feature = "stream")] use futures_util::TryStreamExt; use http::HeaderMap; use http_body::{Body as HttpBody, SizeHint}; +use super::DecodedLength; +#[cfg(feature = "stream")] use crate::common::sync_wrapper::SyncWrapper; -use crate::common::{task, watch, Future, Never, Pin, Poll}; +use crate::common::Future; +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] +use crate::common::Never; +use crate::common::{task, watch, Pin, Poll}; +#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] use crate::proto::h2::ping; -use crate::proto::DecodedLength; -use crate::upgrade::OnUpgrade; type BodySender = mpsc::Sender>; +type TrailersSender = oneshot::Sender; /// A stream of `Bytes`, used when receiving bodies. /// /// A good default [`HttpBody`](crate::body::HttpBody) to use in many /// applications. +/// +/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes) +/// or [`body::aggregate`](crate::body::aggregate). #[must_use = "streams do nothing unless polled"] pub struct Body { kind: Kind, @@ -36,13 +45,17 @@ enum Kind { Chan { content_length: DecodedLength, want_tx: watch::Sender, - rx: mpsc::Receiver>, + data_rx: mpsc::Receiver>, + trailers_rx: oneshot::Receiver, }, + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] H2 { ping: ping::Recorder, content_length: DecodedLength, recv: h2::RecvStream, }, + #[cfg(feature = "ffi")] + Ffi(crate::ffi::UserBody), #[cfg(feature = "stream")] Wrapped( SyncWrapper< @@ -61,28 +74,42 @@ struct Extra { /// a brand new connection, since the pool didn't know about the idle /// connection yet. delayed_eof: Option, - on_upgrade: OnUpgrade, } +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] type DelayEofUntil = oneshot::Receiver; enum DelayEof { /// Initial state, stream hasn't seen EOF yet. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] NotEof(DelayEofUntil), /// Transitions to this state once we've seen `poll` try to /// return EOF (`None`). This future is then polled, and /// when it completes, the Body finally returns EOF (`None`). + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] Eof(DelayEofUntil), } -/// A sender half used with `Body::channel()`. +/// A sender half created through [`Body::channel()`]. /// -/// Useful when wanting to stream chunks from another thread. See -/// [`Body::channel`](Body::channel) for more. +/// Useful when wanting to stream chunks from another thread. +/// +/// ## Body Closing +/// +/// Note that the request body will always be closed normally when the sender is dropped (meaning +/// that the empty terminating chunk will be sent to the remote). If you desire to close the +/// connection with an incomplete response (e.g. in the case of an error during asynchronous +/// processing), call the [`Sender::abort()`] method to abort the body in an abnormal fashion. +/// +/// [`Body::channel()`]: struct.Body.html#method.channel +/// [`Sender::abort()`]: struct.Sender.html#method.abort #[must_use = "Sender does nothing unless sent on"] pub struct Sender { want_rx: watch::Receiver, - tx: BodySender, + data_tx: BodySender, + trailers_tx: Option, } const WANT_PENDING: usize = 1; @@ -113,7 +140,8 @@ impl Body { } pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Body) { - let (tx, rx) = mpsc::channel(0); + let (data_tx, data_rx) = mpsc::channel(0); + let (trailers_tx, trailers_rx) = oneshot::channel(); // If wanter is true, `Sender::poll_ready()` won't becoming ready // until the `Body` has been polled for data once. @@ -121,11 +149,16 @@ impl Body { let (want_tx, want_rx) = watch::channel(want); - let tx = Sender { want_rx, tx }; + let tx = Sender { + want_rx, + data_tx, + trailers_tx: Some(trailers_tx), + }; let rx = Body::new(Kind::Chan { content_length, want_tx, - rx, + data_rx, + trailers_rx, }); (tx, rx) @@ -153,6 +186,7 @@ impl Body { /// This function requires enabling the `stream` feature in your /// `Cargo.toml`. #[cfg(feature = "stream")] + #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] pub fn wrap_stream(stream: S) -> Body where S: Stream> + Send + 'static, @@ -163,24 +197,21 @@ impl Body { Body::new(Kind::Wrapped(SyncWrapper::new(Box::pin(mapped)))) } - /// Converts this `Body` into a `Future` of a pending HTTP upgrade. - /// - /// See [the `upgrade` module](crate::upgrade) for more. - pub fn on_upgrade(self) -> OnUpgrade { - self.extra - .map(|ex| ex.on_upgrade) - .unwrap_or_else(OnUpgrade::none) - } - fn new(kind: Kind) -> Body { Body { kind, extra: None } } + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] pub(crate) fn h2( recv: h2::RecvStream, - content_length: DecodedLength, + mut content_length: DecodedLength, ping: ping::Recorder, ) -> Self { + // If the stream is already EOS, then the "unknown length" is clearly + // actually ZERO. + if !content_length.is_exact() && recv.is_end_stream() { + content_length = DecodedLength::ZERO; + } let body = Body::new(Kind::H2 { ping, content_length, @@ -190,13 +221,8 @@ impl Body { body } - pub(crate) fn set_on_upgrade(&mut self, upgrade: OnUpgrade) { - debug_assert!(!upgrade.is_none(), "set_on_upgrade with empty upgrade"); - let extra = self.extra_mut(); - debug_assert!(extra.on_upgrade.is_none(), "set_on_upgrade twice"); - extra.on_upgrade = upgrade; - } - + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] pub(crate) fn delayed_eof(&mut self, fut: DelayEofUntil) { self.extra_mut().delayed_eof = Some(DelayEof::NotEof(fut)); } @@ -207,17 +233,16 @@ impl Body { .and_then(|extra| extra.delayed_eof.take()) } + #[cfg(any(feature = "http1", feature = "http2"))] fn extra_mut(&mut self) -> &mut Extra { - self.extra.get_or_insert_with(|| { - Box::new(Extra { - delayed_eof: None, - on_upgrade: OnUpgrade::none(), - }) - }) + self.extra + .get_or_insert_with(|| Box::new(Extra { delayed_eof: None })) } fn poll_eof(&mut self, cx: &mut task::Context<'_>) -> Poll>> { match self.take_delayed_eof() { + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] Some(DelayEof::NotEof(mut delay)) => match self.poll_inner(cx) { ok @ Poll::Ready(Some(Ok(..))) | ok @ Poll::Pending => { self.extra_mut().delayed_eof = Some(DelayEof::NotEof(delay)); @@ -233,6 +258,8 @@ impl Body { }, Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), }, + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] Some(DelayEof::Eof(mut delay)) => match Pin::new(&mut delay).poll(cx) { Poll::Ready(Ok(never)) => match never {}, Poll::Pending => { @@ -241,21 +268,42 @@ impl Body { } Poll::Ready(Err(_done)) => Poll::Ready(None), }, + #[cfg(any( + not(any(feature = "http1", feature = "http2")), + not(feature = "client") + ))] + Some(delay_eof) => match delay_eof {}, None => self.poll_inner(cx), } } + #[cfg(feature = "ffi")] + pub(crate) fn as_ffi_mut(&mut self) -> &mut crate::ffi::UserBody { + match self.kind { + Kind::Ffi(ref mut body) => return body, + _ => { + self.kind = Kind::Ffi(crate::ffi::UserBody::new()); + } + } + + match self.kind { + Kind::Ffi(ref mut body) => body, + _ => unreachable!(), + } + } + fn poll_inner(&mut self, cx: &mut task::Context<'_>) -> Poll>> { match self.kind { Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)), Kind::Chan { content_length: ref mut len, - ref mut rx, + ref mut data_rx, ref mut want_tx, + .. } => { want_tx.send(WANT_READY); - match ready!(Pin::new(rx).poll_next(cx)?) { + match ready!(Pin::new(data_rx).poll_next(cx)?) { Some(chunk) => { len.sub_if(chunk.len() as u64); Poll::Ready(Some(Ok(chunk))) @@ -263,6 +311,7 @@ impl Body { None => Poll::Ready(None), } } + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] Kind::H2 { ref ping, recv: ref mut h2, @@ -278,6 +327,9 @@ impl Body { None => Poll::Ready(None), }, + #[cfg(feature = "ffi")] + Kind::Ffi(ref mut body) => body.poll_data(cx), + #[cfg(feature = "stream")] Kind::Wrapped(ref mut s) => match ready!(s.get_mut().as_mut().poll_next(cx)) { Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))), @@ -286,6 +338,7 @@ impl Body { } } + #[cfg(feature = "http1")] pub(super) fn take_full_data(&mut self) -> Option { if let Kind::Once(ref mut chunk) = self.kind { chunk.take() @@ -315,10 +368,11 @@ impl HttpBody for Body { } fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut self: Pin<&mut Self>, + #[cfg_attr(not(feature = "http2"), allow(unused))] cx: &mut task::Context<'_>, ) -> Poll, Self::Error>> { match self.kind { + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] Kind::H2 { recv: ref mut h2, ref ping, @@ -330,6 +384,15 @@ impl HttpBody for Body { } Err(e) => Poll::Ready(Err(crate::Error::new_h2(e))), }, + Kind::Chan { + ref mut trailers_rx, + .. + } => match ready!(Pin::new(trailers_rx).poll(cx)) { + Ok(t) => Poll::Ready(Ok(Some(t))), + Err(_) => Poll::Ready(Ok(None)), + }, + #[cfg(feature = "ffi")] + Kind::Ffi(ref mut body) => body.poll_trailers(cx), _ => Poll::Ready(Ok(None)), } } @@ -338,27 +401,38 @@ impl HttpBody for Body { match self.kind { Kind::Once(ref val) => val.is_none(), Kind::Chan { content_length, .. } => content_length == DecodedLength::ZERO, + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(), + #[cfg(feature = "ffi")] + Kind::Ffi(..) => false, #[cfg(feature = "stream")] Kind::Wrapped(..) => false, } } fn size_hint(&self) -> SizeHint { + macro_rules! opt_len { + ($content_length:expr) => {{ + let mut hint = SizeHint::default(); + + if let Some(content_length) = $content_length.into_opt() { + hint.set_exact(content_length); + } + + hint + }}; + } + match self.kind { Kind::Once(Some(ref val)) => SizeHint::with_exact(val.len() as u64), Kind::Once(None) => SizeHint::with_exact(0), #[cfg(feature = "stream")] Kind::Wrapped(..) => SizeHint::default(), - Kind::Chan { content_length, .. } | Kind::H2 { content_length, .. } => { - let mut hint = SizeHint::default(); - - if let Some(content_length) = content_length.into_opt() { - hint.set_exact(content_length); - } - - hint - } + Kind::Chan { content_length, .. } => opt_len!(content_length), + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] + Kind::H2 { content_length, .. } => opt_len!(content_length), + #[cfg(feature = "ffi")] + Kind::Ffi(..) => SizeHint::default(), } } } @@ -474,7 +548,7 @@ impl Sender { pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { // Check if the receiver end has tried polling for the body yet ready!(self.poll_want(cx)?); - self.tx + self.data_tx .poll_ready(cx) .map_err(|_| crate::Error::new_closed()) } @@ -492,14 +566,23 @@ impl Sender { futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await } - /// Send data on this channel when it is ready. + /// Send data on data channel when it is ready. pub async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> { self.ready().await?; - self.tx + self.data_tx .try_send(Ok(chunk)) .map_err(|_| crate::Error::new_closed()) } + /// Send trailers on trailers channel. + pub async fn send_trailers(&mut self, trailers: HeaderMap) -> crate::Result<()> { + let tx = match self.trailers_tx.take() { + Some(tx) => tx, + None => return Err(crate::Error::new_closed()), + }; + tx.send(trailers).map_err(|_| crate::Error::new_closed()) + } + /// Try to send data on this channel. /// /// # Errors @@ -513,7 +596,7 @@ impl Sender { /// that doesn't have an async context. If in an async context, prefer /// `send_data()` instead. pub fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> { - self.tx + self.data_tx .try_send(Ok(chunk)) .map_err(|err| err.into_inner().expect("just sent Ok")) } @@ -521,14 +604,15 @@ impl Sender { /// Aborts the body in an abnormal fashion. pub fn abort(self) { let _ = self - .tx + .data_tx // clone so the send works even if buffer is full .clone() .try_send(Err(crate::Error::new_body_write_aborted())); } + #[cfg(feature = "http1")] pub(crate) fn send_error(&mut self, err: crate::Error) { - let _ = self.tx.try_send(Err(err)); + let _ = self.data_tx.try_send(Err(err)); } } @@ -574,7 +658,7 @@ mod tests { assert_eq!( mem::size_of::(), - mem::size_of::() * 4, + mem::size_of::() * 5, "Sender" ); diff --git a/third_party/rust/hyper/src/body/length.rs b/third_party/rust/hyper/src/body/length.rs new file mode 100644 index 000000000000..e2bbee803930 --- /dev/null +++ b/third_party/rust/hyper/src/body/length.rs @@ -0,0 +1,123 @@ +use std::fmt; + +#[derive(Clone, Copy, PartialEq, Eq)] +pub(crate) struct DecodedLength(u64); + +#[cfg(any(feature = "http1", feature = "http2"))] +impl From> for DecodedLength { + fn from(len: Option) -> Self { + len.and_then(|len| { + // If the length is u64::MAX, oh well, just reported chunked. + Self::checked_new(len).ok() + }) + .unwrap_or(DecodedLength::CHUNKED) + } +} + +#[cfg(any(feature = "http1", feature = "http2", test))] +const MAX_LEN: u64 = std::u64::MAX - 2; + +impl DecodedLength { + pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(::std::u64::MAX); + pub(crate) const CHUNKED: DecodedLength = DecodedLength(::std::u64::MAX - 1); + pub(crate) const ZERO: DecodedLength = DecodedLength(0); + + #[cfg(test)] + pub(crate) fn new(len: u64) -> Self { + debug_assert!(len <= MAX_LEN); + DecodedLength(len) + } + + /// Takes the length as a content-length without other checks. + /// + /// Should only be called if previously confirmed this isn't + /// CLOSE_DELIMITED or CHUNKED. + #[inline] + #[cfg(feature = "http1")] + pub(crate) fn danger_len(self) -> u64 { + debug_assert!(self.0 < Self::CHUNKED.0); + self.0 + } + + /// Converts to an Option representing a Known or Unknown length. + pub(crate) fn into_opt(self) -> Option { + match self { + DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None, + DecodedLength(known) => Some(known), + } + } + + /// Checks the `u64` is within the maximum allowed for content-length. + #[cfg(any(feature = "http1", feature = "http2"))] + pub(crate) fn checked_new(len: u64) -> Result { + use tracing::warn; + + if len <= MAX_LEN { + Ok(DecodedLength(len)) + } else { + warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN); + Err(crate::error::Parse::TooLarge) + } + } + + pub(crate) fn sub_if(&mut self, amt: u64) { + match *self { + DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (), + DecodedLength(ref mut known) => { + *known -= amt; + } + } + } + + /// Returns whether this represents an exact length. + /// + /// This includes 0, which of course is an exact known length. + /// + /// It would return false if "chunked" or otherwise size-unknown. + #[cfg(feature = "http2")] + pub(crate) fn is_exact(&self) -> bool { + self.0 <= MAX_LEN + } +} + +impl fmt::Debug for DecodedLength { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + DecodedLength::CLOSE_DELIMITED => f.write_str("CLOSE_DELIMITED"), + DecodedLength::CHUNKED => f.write_str("CHUNKED"), + DecodedLength(n) => f.debug_tuple("DecodedLength").field(&n).finish(), + } + } +} + +impl fmt::Display for DecodedLength { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + DecodedLength::CLOSE_DELIMITED => f.write_str("close-delimited"), + DecodedLength::CHUNKED => f.write_str("chunked encoding"), + DecodedLength::ZERO => f.write_str("empty"), + DecodedLength(n) => write!(f, "content-length ({} bytes)", n), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sub_if_known() { + let mut len = DecodedLength::new(30); + len.sub_if(20); + + assert_eq!(len.0, 10); + } + + #[test] + fn sub_if_chunked() { + let mut len = DecodedLength::CHUNKED; + len.sub_if(20); + + assert_eq!(len, DecodedLength::CHUNKED); + } +} diff --git a/third_party/rust/hyper/src/body/mod.rs b/third_party/rust/hyper/src/body/mod.rs index ce704f5bd251..5e2181e941c0 100644 --- a/third_party/rust/hyper/src/body/mod.rs +++ b/third_party/rust/hyper/src/body/mod.rs @@ -17,18 +17,22 @@ pub use bytes::{Buf, Bytes}; pub use http_body::Body as HttpBody; +pub use http_body::SizeHint; pub use self::aggregate::aggregate; pub use self::body::{Body, Sender}; +pub(crate) use self::length::DecodedLength; pub use self::to_bytes::to_bytes; mod aggregate; mod body; +mod length; mod to_bytes; /// An optimization to try to take a full body if immediately available. /// /// This is currently limited to *only* `hyper::Body`s. +#[cfg(feature = "http1")] pub(crate) fn take_full_data(body: &mut T) -> Option { use std::any::{Any, TypeId}; diff --git a/third_party/rust/hyper/src/body/to_bytes.rs b/third_party/rust/hyper/src/body/to_bytes.rs index 4cce7857d7d9..c3c48d344002 100644 --- a/third_party/rust/hyper/src/body/to_bytes.rs +++ b/third_party/rust/hyper/src/body/to_bytes.rs @@ -7,6 +7,43 @@ use super::HttpBody; /// This may require copying the data into a single buffer. If you don't need /// a contiguous buffer, prefer the [`aggregate`](crate::body::aggregate()) /// function. +/// +/// # Note +/// +/// Care needs to be taken if the remote is untrusted. The function doesn't implement any length +/// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the +/// `Content-Length` is a possibility, but it is not strictly mandated to be present. +/// +/// # Example +/// +/// ``` +/// # #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] +/// # async fn doc() -> hyper::Result<()> { +/// use hyper::{body::HttpBody}; +/// +/// # let request = hyper::Request::builder() +/// # .method(hyper::Method::POST) +/// # .uri("http://httpbin.org/post") +/// # .header("content-type", "application/json") +/// # .body(hyper::Body::from(r#"{"library":"hyper"}"#)).unwrap(); +/// # let client = hyper::Client::new(); +/// let response = client.request(request).await?; +/// +/// const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024; +/// +/// let response_content_length = match response.body().size_hint().upper() { +/// Some(v) => v, +/// None => MAX_ALLOWED_RESPONSE_SIZE + 1 // Just to protect ourselves from a malicious response +/// }; +/// +/// if response_content_length < MAX_ALLOWED_RESPONSE_SIZE { +/// let body_bytes = hyper::body::to_bytes(response.into_body()).await?; +/// println!("body: {:?}", body_bytes); +/// } +/// +/// # Ok(()) +/// # } +/// ``` pub async fn to_bytes(body: T) -> Result where T: HttpBody, @@ -23,7 +60,7 @@ where let second = if let Some(buf) = body.data().await { buf? } else { - return Ok(first.to_bytes()); + return Ok(first.copy_to_bytes(first.remaining())); }; // With more than 1 buf, we gotta flatten into a Vec first. diff --git a/third_party/rust/hyper/src/cfg.rs b/third_party/rust/hyper/src/cfg.rs new file mode 100644 index 000000000000..71a5351d213f --- /dev/null +++ b/third_party/rust/hyper/src/cfg.rs @@ -0,0 +1,44 @@ +macro_rules! cfg_feature { + ( + #![$meta:meta] + $($item:item)* + ) => { + $( + #[cfg($meta)] + #[cfg_attr(docsrs, doc(cfg($meta)))] + $item + )* + } +} + +macro_rules! cfg_proto { + ($($item:item)*) => { + cfg_feature! { + #![all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server"), + )] + $($item)* + } + } +} + +cfg_proto! { + macro_rules! cfg_client { + ($($item:item)*) => { + cfg_feature! { + #![feature = "client"] + $($item)* + } + } + } + + macro_rules! cfg_server { + ($($item:item)*) => { + cfg_feature! { + #![feature = "server"] + $($item)* + } + } + } +} diff --git a/third_party/rust/hyper/src/client/client.rs b/third_party/rust/hyper/src/client/client.rs new file mode 100644 index 000000000000..cfdd267a11a4 --- /dev/null +++ b/third_party/rust/hyper/src/client/client.rs @@ -0,0 +1,1462 @@ +use std::error::Error as StdError; +use std::fmt; +use std::mem; +use std::time::Duration; + +use futures_channel::oneshot; +use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; +use http::header::{HeaderValue, HOST}; +use http::uri::{Port, Scheme}; +use http::{Method, Request, Response, Uri, Version}; +use tracing::{debug, trace, warn}; + +use super::conn; +use super::connect::{self, sealed::Connect, Alpn, Connected, Connection}; +use super::pool::{ + self, CheckoutIsClosedError, Key as PoolKey, Pool, Poolable, Pooled, Reservation, +}; +#[cfg(feature = "tcp")] +use super::HttpConnector; +use crate::body::{Body, HttpBody}; +use crate::common::{exec::BoxSendFuture, sync_wrapper::SyncWrapper, lazy as hyper_lazy, task, Future, Lazy, Pin, Poll}; +use crate::rt::Executor; + +/// A Client to make outgoing HTTP requests. +/// +/// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The +/// underlying connection pool will be reused. +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +pub struct Client { + config: Config, + conn_builder: conn::Builder, + connector: C, + pool: Pool>, +} + +#[derive(Clone, Copy, Debug)] +struct Config { + retry_canceled_requests: bool, + set_host: bool, + ver: Ver, +} + +/// A `Future` that will resolve to an HTTP Response. +/// +/// This is returned by `Client::request` (and `Client::get`). +#[must_use = "futures do nothing unless polled"] +pub struct ResponseFuture { + inner: SyncWrapper>> + Send>>>, +} + +// ===== impl Client ===== + +#[cfg(feature = "tcp")] +impl Client { + /// Create a new Client with the default [config](Builder). + /// + /// # Note + /// + /// The default connector does **not** handle TLS. Speaking to `https` + /// destinations will require [configuring a connector that implements + /// TLS](https://hyper.rs/guides/client/configuration). + #[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] + #[inline] + pub fn new() -> Client { + Builder::default().build_http() + } +} + +#[cfg(feature = "tcp")] +impl Default for Client { + fn default() -> Client { + Client::new() + } +} + +impl Client<(), Body> { + /// Create a builder to configure a new `Client`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "runtime")] + /// # fn run () { + /// use std::time::Duration; + /// use hyper::Client; + /// + /// let client = Client::builder() + /// .pool_idle_timeout(Duration::from_secs(30)) + /// .http2_only(true) + /// .build_http(); + /// # let infer: Client<_, hyper::Body> = client; + /// # drop(infer); + /// # } + /// # fn main() {} + /// ``` + #[inline] + pub fn builder() -> Builder { + Builder::default() + } +} + +impl Client +where + C: Connect + Clone + Send + Sync + 'static, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + /// Send a `GET` request to the supplied `Uri`. + /// + /// # Note + /// + /// This requires that the `HttpBody` type have a `Default` implementation. + /// It *should* return an "empty" version of itself, such that + /// `HttpBody::is_end_stream` is `true`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "runtime")] + /// # fn run () { + /// use hyper::{Client, Uri}; + /// + /// let client = Client::new(); + /// + /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); + /// # } + /// # fn main() {} + /// ``` + pub fn get(&self, uri: Uri) -> ResponseFuture + where + B: Default, + { + let body = B::default(); + if !body.is_end_stream() { + warn!("default HttpBody used for get() does not return true for is_end_stream"); + } + + let mut req = Request::new(body); + *req.uri_mut() = uri; + self.request(req) + } + + /// Send a constructed `Request` using this `Client`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "runtime")] + /// # fn run () { + /// use hyper::{Body, Method, Client, Request}; + /// + /// let client = Client::new(); + /// + /// let req = Request::builder() + /// .method(Method::POST) + /// .uri("http://httpbin.org/post") + /// .body(Body::from("Hallo!")) + /// .expect("request builder"); + /// + /// let future = client.request(req); + /// # } + /// # fn main() {} + /// ``` + pub fn request(&self, mut req: Request) -> ResponseFuture { + let is_http_connect = req.method() == Method::CONNECT; + match req.version() { + Version::HTTP_11 => (), + Version::HTTP_10 => { + if is_http_connect { + warn!("CONNECT is not allowed for HTTP/1.0"); + return ResponseFuture::new(future::err( + crate::Error::new_user_unsupported_request_method(), + )); + } + } + Version::HTTP_2 => (), + // completely unsupported HTTP version (like HTTP/0.9)! + other => return ResponseFuture::error_version(other), + }; + + let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { + Ok(s) => s, + Err(err) => { + return ResponseFuture::new(future::err(err)); + } + }; + + ResponseFuture::new(self.clone().retryably_send_request(req, pool_key)) + } + + async fn retryably_send_request( + self, + mut req: Request, + pool_key: PoolKey, + ) -> crate::Result> { + let uri = req.uri().clone(); + + loop { + req = match self.send_request(req, pool_key.clone()).await { + Ok(resp) => return Ok(resp), + Err(ClientError::Normal(err)) => return Err(err), + Err(ClientError::Canceled { + connection_reused, + mut req, + reason, + }) => { + if !self.config.retry_canceled_requests || !connection_reused { + // if client disabled, don't retry + // a fresh connection means we definitely can't retry + return Err(reason); + } + + trace!( + "unstarted request canceled, trying again (reason={:?})", + reason + ); + *req.uri_mut() = uri.clone(); + req + } + } + } + } + + async fn send_request( + &self, + mut req: Request, + pool_key: PoolKey, + ) -> Result, ClientError> { + let mut pooled = match self.connection_for(pool_key).await { + Ok(pooled) => pooled, + Err(ClientConnectError::Normal(err)) => return Err(ClientError::Normal(err)), + Err(ClientConnectError::H2CheckoutIsClosed(reason)) => { + return Err(ClientError::Canceled { + connection_reused: true, + req, + reason, + }) + } + }; + + if pooled.is_http1() { + if req.version() == Version::HTTP_2 { + warn!("Connection is HTTP/1, but request requires HTTP/2"); + return Err(ClientError::Normal( + crate::Error::new_user_unsupported_version(), + )); + } + + if self.config.set_host { + let uri = req.uri().clone(); + req.headers_mut().entry(HOST).or_insert_with(|| { + let hostname = uri.host().expect("authority implies host"); + if let Some(port) = get_non_default_port(&uri) { + let s = format!("{}:{}", hostname, port); + HeaderValue::from_str(&s) + } else { + HeaderValue::from_str(hostname) + } + .expect("uri host is valid header value") + }); + } + + // CONNECT always sends authority-form, so check it first... + if req.method() == Method::CONNECT { + authority_form(req.uri_mut()); + } else if pooled.conn_info.is_proxied { + absolute_form(req.uri_mut()); + } else { + origin_form(req.uri_mut()); + } + } else if req.method() == Method::CONNECT { + authority_form(req.uri_mut()); + } + + let fut = pooled + .send_request_retryable(req) + .map_err(ClientError::map_with_reused(pooled.is_reused())); + + // If the Connector included 'extra' info, add to Response... + let extra_info = pooled.conn_info.extra.clone(); + let fut = fut.map_ok(move |mut res| { + if let Some(extra) = extra_info { + extra.set(res.extensions_mut()); + } + res + }); + + // As of futures@0.1.21, there is a race condition in the mpsc + // channel, such that sending when the receiver is closing can + // result in the message being stuck inside the queue. It won't + // ever notify until the Sender side is dropped. + // + // To counteract this, we must check if our senders 'want' channel + // has been closed after having tried to send. If so, error out... + if pooled.is_closed() { + return fut.await; + } + + let mut res = fut.await?; + + // If pooled is HTTP/2, we can toss this reference immediately. + // + // when pooled is dropped, it will try to insert back into the + // pool. To delay that, spawn a future that completes once the + // sender is ready again. + // + // This *should* only be once the related `Connection` has polled + // for a new request to start. + // + // It won't be ready if there is a body to stream. + if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { + drop(pooled); + } else if !res.body().is_end_stream() { + let (delayed_tx, delayed_rx) = oneshot::channel(); + res.body_mut().delayed_eof(delayed_rx); + let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { + // At this point, `pooled` is dropped, and had a chance + // to insert into the pool (if conn was idle) + drop(delayed_tx); + }); + + self.conn_builder.exec.execute(on_idle); + } else { + // There's no body to delay, but the connection isn't + // ready yet. Only re-insert when it's ready + let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); + + self.conn_builder.exec.execute(on_idle); + } + + Ok(res) + } + + async fn connection_for( + &self, + pool_key: PoolKey, + ) -> Result>, ClientConnectError> { + // This actually races 2 different futures to try to get a ready + // connection the fastest, and to reduce connection churn. + // + // - If the pool has an idle connection waiting, that's used + // immediately. + // - Otherwise, the Connector is asked to start connecting to + // the destination Uri. + // - Meanwhile, the pool Checkout is watching to see if any other + // request finishes and tries to insert an idle connection. + // - If a new connection is started, but the Checkout wins after + // (an idle connection became available first), the started + // connection future is spawned into the runtime to complete, + // and then be inserted into the pool as an idle connection. + let checkout = self.pool.checkout(pool_key.clone()); + let connect = self.connect_to(pool_key); + let is_ver_h2 = self.config.ver == Ver::Http2; + + // The order of the `select` is depended on below... + + match future::select(checkout, connect).await { + // Checkout won, connect future may have been started or not. + // + // If it has, let it finish and insert back into the pool, + // so as to not waste the socket... + Either::Left((Ok(checked_out), connecting)) => { + // This depends on the `select` above having the correct + // order, such that if the checkout future were ready + // immediately, the connect future will never have been + // started. + // + // If it *wasn't* ready yet, then the connect future will + // have been started... + if connecting.started() { + let bg = connecting + .map_err(|err| { + trace!("background connect error: {}", err); + }) + .map(|_pooled| { + // dropping here should just place it in + // the Pool for us... + }); + // An execute error here isn't important, we're just trying + // to prevent a waste of a socket... + self.conn_builder.exec.execute(bg); + } + Ok(checked_out) + } + // Connect won, checkout can just be dropped. + Either::Right((Ok(connected), _checkout)) => Ok(connected), + // Either checkout or connect could get canceled: + // + // 1. Connect is canceled if this is HTTP/2 and there is + // an outstanding HTTP/2 connecting task. + // 2. Checkout is canceled if the pool cannot deliver an + // idle connection reliably. + // + // In both cases, we should just wait for the other future. + Either::Left((Err(err), connecting)) => { + if err.is_canceled() { + connecting.await.map_err(ClientConnectError::Normal) + } else { + Err(ClientConnectError::Normal(err)) + } + } + Either::Right((Err(err), checkout)) => { + if err.is_canceled() { + checkout.await.map_err(move |err| { + if is_ver_h2 + && err.is_canceled() + && err.find_source::().is_some() + { + ClientConnectError::H2CheckoutIsClosed(err) + } else { + ClientConnectError::Normal(err) + } + }) + } else { + Err(ClientConnectError::Normal(err)) + } + } + } + } + + fn connect_to( + &self, + pool_key: PoolKey, + ) -> impl Lazy>>> + Unpin { + let executor = self.conn_builder.exec.clone(); + let pool = self.pool.clone(); + #[cfg(not(feature = "http2"))] + let conn_builder = self.conn_builder.clone(); + #[cfg(feature = "http2")] + let mut conn_builder = self.conn_builder.clone(); + let ver = self.config.ver; + let is_ver_h2 = ver == Ver::Http2; + let connector = self.connector.clone(); + let dst = domain_as_uri(pool_key.clone()); + hyper_lazy(move || { + // Try to take a "connecting lock". + // + // If the pool_key is for HTTP/2, and there is already a + // connection being established, then this can't take a + // second lock. The "connect_to" future is Canceled. + let connecting = match pool.connecting(&pool_key, ver) { + Some(lock) => lock, + None => { + let canceled = + crate::Error::new_canceled().with("HTTP/2 connection in progress"); + return Either::Right(future::err(canceled)); + } + }; + Either::Left( + connector + .connect(connect::sealed::Internal, dst) + .map_err(crate::Error::new_connect) + .and_then(move |io| { + let connected = io.connected(); + // If ALPN is h2 and we aren't http2_only already, + // then we need to convert our pool checkout into + // a single HTTP2 one. + let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { + match connecting.alpn_h2(&pool) { + Some(lock) => { + trace!("ALPN negotiated h2, updating pool"); + lock + } + None => { + // Another connection has already upgraded, + // the pool checkout should finish up for us. + let canceled = crate::Error::new_canceled() + .with("ALPN upgraded to HTTP/2"); + return Either::Right(future::err(canceled)); + } + } + } else { + connecting + }; + + #[cfg_attr(not(feature = "http2"), allow(unused))] + let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; + #[cfg(feature = "http2")] + { + conn_builder.http2_only(is_h2); + } + + Either::Left(Box::pin(async move { + let (tx, conn) = conn_builder.handshake(io).await?; + + trace!("handshake complete, spawning background dispatcher task"); + executor.execute( + conn.map_err(|e| debug!("client connection error: {}", e)) + .map(|_| ()), + ); + + // Wait for 'conn' to ready up before we + // declare this tx as usable + let tx = tx.when_ready().await?; + + let tx = { + #[cfg(feature = "http2")] + { + if is_h2 { + PoolTx::Http2(tx.into_http2()) + } else { + PoolTx::Http1(tx) + } + } + #[cfg(not(feature = "http2"))] + PoolTx::Http1(tx) + }; + + Ok(pool.pooled( + connecting, + PoolClient { + conn_info: connected, + tx, + }, + )) + })) + }), + ) + }) + } +} + +impl tower_service::Service> for Client +where + C: Connect + Clone + Send + Sync + 'static, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Response = Response; + type Error = crate::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.request(req) + } +} + +impl tower_service::Service> for &'_ Client +where + C: Connect + Clone + Send + Sync + 'static, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Response = Response; + type Error = crate::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.request(req) + } +} + +impl Clone for Client { + fn clone(&self) -> Client { + Client { + config: self.config.clone(), + conn_builder: self.conn_builder.clone(), + connector: self.connector.clone(), + pool: self.pool.clone(), + } + } +} + +impl fmt::Debug for Client { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Client").finish() + } +} + +// ===== impl ResponseFuture ===== + +impl ResponseFuture { + fn new(value: F) -> Self + where + F: Future>> + Send + 'static, + { + Self { + inner: SyncWrapper::new(Box::pin(value)) + } + } + + fn error_version(ver: Version) -> Self { + warn!("Request has unsupported version \"{:?}\"", ver); + ResponseFuture::new(Box::pin(future::err( + crate::Error::new_user_unsupported_version(), + ))) + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Future") + } +} + +impl Future for ResponseFuture { + type Output = crate::Result>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + self.inner.get_mut().as_mut().poll(cx) + } +} + +// ===== impl PoolClient ===== + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +struct PoolClient { + conn_info: Connected, + tx: PoolTx, +} + +enum PoolTx { + Http1(conn::SendRequest), + #[cfg(feature = "http2")] + Http2(conn::Http2SendRequest), +} + +impl PoolClient { + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + match self.tx { + PoolTx::Http1(ref mut tx) => tx.poll_ready(cx), + #[cfg(feature = "http2")] + PoolTx::Http2(_) => Poll::Ready(Ok(())), + } + } + + fn is_http1(&self) -> bool { + !self.is_http2() + } + + fn is_http2(&self) -> bool { + match self.tx { + PoolTx::Http1(_) => false, + #[cfg(feature = "http2")] + PoolTx::Http2(_) => true, + } + } + + fn is_ready(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_ready(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_ready(), + } + } + + fn is_closed(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_closed(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_closed(), + } + } +} + +impl PoolClient { + fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (crate::Error, Option>)>> + where + B: Send, + { + match self.tx { + #[cfg(not(feature = "http2"))] + PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req), + #[cfg(feature = "http2")] + PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)), + #[cfg(feature = "http2")] + PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)), + } + } +} + +impl Poolable for PoolClient +where + B: Send + 'static, +{ + fn is_open(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_ready(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_ready(), + } + } + + fn reserve(self) -> Reservation { + match self.tx { + PoolTx::Http1(tx) => Reservation::Unique(PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http1(tx), + }), + #[cfg(feature = "http2")] + PoolTx::Http2(tx) => { + let b = PoolClient { + conn_info: self.conn_info.clone(), + tx: PoolTx::Http2(tx.clone()), + }; + let a = PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http2(tx), + }; + Reservation::Shared(a, b) + } + } + } + + fn can_share(&self) -> bool { + self.is_http2() + } +} + +// ===== impl ClientError ===== + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +enum ClientError { + Normal(crate::Error), + Canceled { + connection_reused: bool, + req: Request, + reason: crate::Error, + }, +} + +impl ClientError { + fn map_with_reused(conn_reused: bool) -> impl Fn((crate::Error, Option>)) -> Self { + move |(err, orig_req)| { + if let Some(req) = orig_req { + ClientError::Canceled { + connection_reused: conn_reused, + reason: err, + req, + } + } else { + ClientError::Normal(err) + } + } + } +} + +enum ClientConnectError { + Normal(crate::Error), + H2CheckoutIsClosed(crate::Error), +} + +/// A marker to identify what version a pooled connection is. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub(super) enum Ver { + Auto, + Http2, +} + +fn origin_form(uri: &mut Uri) { + let path = match uri.path_and_query() { + Some(path) if path.as_str() != "/" => { + let mut parts = ::http::uri::Parts::default(); + parts.path_and_query = Some(path.clone()); + Uri::from_parts(parts).expect("path is valid uri") + } + _none_or_just_slash => { + debug_assert!(Uri::default() == "/"); + Uri::default() + } + }; + *uri = path +} + +fn absolute_form(uri: &mut Uri) { + debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); + debug_assert!( + uri.authority().is_some(), + "absolute_form needs an authority" + ); + // If the URI is to HTTPS, and the connector claimed to be a proxy, + // then it *should* have tunneled, and so we don't want to send + // absolute-form in that case. + if uri.scheme() == Some(&Scheme::HTTPS) { + origin_form(uri); + } +} + +fn authority_form(uri: &mut Uri) { + if let Some(path) = uri.path_and_query() { + // `https://hyper.rs` would parse with `/` path, don't + // annoy people about that... + if path != "/" { + warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); + } + } + *uri = match uri.authority() { + Some(auth) => { + let mut parts = ::http::uri::Parts::default(); + parts.authority = Some(auth.clone()); + Uri::from_parts(parts).expect("authority is valid") + } + None => { + unreachable!("authority_form with relative uri"); + } + }; +} + +fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result { + let uri_clone = uri.clone(); + match (uri_clone.scheme(), uri_clone.authority()) { + (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), + (None, Some(auth)) if is_http_connect => { + let scheme = match auth.port_u16() { + Some(443) => { + set_scheme(uri, Scheme::HTTPS); + Scheme::HTTPS + } + _ => { + set_scheme(uri, Scheme::HTTP); + Scheme::HTTP + } + }; + Ok((scheme, auth.clone())) + } + _ => { + debug!("Client requires absolute-form URIs, received: {:?}", uri); + Err(crate::Error::new_user_absolute_uri_required()) + } + } +} + +fn domain_as_uri((scheme, auth): PoolKey) -> Uri { + http::uri::Builder::new() + .scheme(scheme) + .authority(auth) + .path_and_query("/") + .build() + .expect("domain is valid Uri") +} + +fn set_scheme(uri: &mut Uri, scheme: Scheme) { + debug_assert!( + uri.scheme().is_none(), + "set_scheme expects no existing scheme" + ); + let old = mem::replace(uri, Uri::default()); + let mut parts: ::http::uri::Parts = old.into(); + parts.scheme = Some(scheme); + parts.path_and_query = Some("/".parse().expect("slash is a valid path")); + *uri = Uri::from_parts(parts).expect("scheme is valid"); +} + +fn get_non_default_port(uri: &Uri) -> Option> { + match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { + (Some(443), true) => None, + (Some(80), false) => None, + _ => uri.port(), + } +} + +fn is_schema_secure(uri: &Uri) -> bool { + uri.scheme_str() + .map(|scheme_str| matches!(scheme_str, "wss" | "https")) + .unwrap_or_default() +} + +/// A builder to configure a new [`Client`](Client). +/// +/// # Example +/// +/// ``` +/// # #[cfg(feature = "runtime")] +/// # fn run () { +/// use std::time::Duration; +/// use hyper::Client; +/// +/// let client = Client::builder() +/// .pool_idle_timeout(Duration::from_secs(30)) +/// .http2_only(true) +/// .build_http(); +/// # let infer: Client<_, hyper::Body> = client; +/// # drop(infer); +/// # } +/// # fn main() {} +/// ``` +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +#[derive(Clone)] +pub struct Builder { + client_config: Config, + conn_builder: conn::Builder, + pool_config: pool::Config, +} + +impl Default for Builder { + fn default() -> Self { + Self { + client_config: Config { + retry_canceled_requests: true, + set_host: true, + ver: Ver::Auto, + }, + conn_builder: conn::Builder::new(), + pool_config: pool::Config { + idle_timeout: Some(Duration::from_secs(90)), + max_idle_per_host: std::usize::MAX, + }, + } + } +} + +impl Builder { + #[doc(hidden)] + #[deprecated( + note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)" + )] + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + if !val { + // disable + self.pool_max_idle_per_host(0) + } else if self.pool_config.max_idle_per_host == 0 { + // enable + self.pool_max_idle_per_host(std::usize::MAX) + } else { + // already enabled + self + } + } + + #[doc(hidden)] + #[deprecated(note = "renamed to `pool_idle_timeout`")] + pub fn keep_alive_timeout(&mut self, val: D) -> &mut Self + where + D: Into>, + { + self.pool_idle_timeout(val) + } + + /// Set an optional timeout for idle sockets being kept-alive. + /// + /// Pass `None` to disable timeout. + /// + /// Default is 90 seconds. + pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self + where + D: Into>, + { + self.pool_config.idle_timeout = val.into(); + self + } + + #[doc(hidden)] + #[deprecated(note = "renamed to `pool_max_idle_per_host`")] + pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { + self.pool_config.max_idle_per_host = max_idle; + self + } + + /// Sets the maximum idle connection per host allowed in the pool. + /// + /// Default is `usize::MAX` (no limit). + pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { + self.pool_config.max_idle_per_host = max_idle; + self + } + + // HTTP/1 options + + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `http1_max_buf_size` option. + /// + /// Default is an adaptive read buffer. + pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { + self.conn_builder.http1_read_buf_exact_size(Some(sz)); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `http1_read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { + self.conn_builder.http1_max_buf_size(max); + self + } + + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when + /// parsing. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { + self.conn_builder + .http1_allow_spaces_after_header_name_in_responses(val); + self + } + + /// Set whether HTTP/1 connections will accept obsolete line folding for + /// header values. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > A server that receives an obs-fold in a request message that is not + /// > within a message/http container MUST either reject the message by + /// > sending a 400 (Bad Request), preferably with a representation + /// > explaining that obsolete line folding is unacceptable, or replace + /// > each received obs-fold with one or more SP octets prior to + /// > interpreting the field value or forwarding the message downstream. + /// + /// > A proxy or gateway that receives an obs-fold in a response message + /// > that is not within a message/http container MUST either discard the + /// > message and replace it with a 502 (Bad Gateway) response, preferably + /// > with a representation explaining that unacceptable line folding was + /// > received, or replace each received obs-fold with one or more SP + /// > octets prior to interpreting the field value or forwarding the + /// > message downstream. + /// + /// > A user agent that receives an obs-fold in a response message that is + /// > not within a message/http container MUST replace each received + /// > obs-fold with one or more SP octets prior to interpreting the field + /// > value. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self { + self.conn_builder + .http1_allow_obsolete_multiline_headers_in_responses(val); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { + self.conn_builder.http1_writev(enabled); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { + self.conn_builder.http1_title_case_headers(val); + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { + self.conn_builder.http1_preserve_header_case(val); + self + } + + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + pub fn http09_responses(&mut self, val: bool) -> &mut Self { + self.conn_builder.http09_responses(val); + self + } + + /// Set whether the connection **must** use HTTP/2. + /// + /// The destination must either allow HTTP2 Prior Knowledge, or the + /// `Connect` should be configured to do use ALPN to upgrade to `h2` + /// as part of the connection process. This will not make the `Client` + /// utilize ALPN by itself. + /// + /// Note that setting this to true prevents HTTP/1 from being allowed. + /// + /// Default is false. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_only(&mut self, val: bool) -> &mut Self { + self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + self.conn_builder + .http2_initial_stream_window_size(sz.into()); + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_connection_window_size( + &mut self, + sz: impl Into>, + ) -> &mut Self { + self.conn_builder + .http2_initial_connection_window_size(sz.into()); + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `http2_initial_stream_window_size` and + /// `http2_initial_connection_window_size`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { + self.conn_builder.http2_adaptive_window(enabled); + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + self.conn_builder.http2_max_frame_size(sz); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_interval( + &mut self, + interval: impl Into>, + ) -> &mut Self { + self.conn_builder.http2_keep_alive_interval(interval); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.conn_builder.http2_keep_alive_timeout(timeout); + self + } + + /// Sets whether HTTP2 keep-alive should apply while the connection is idle. + /// + /// If disabled, keep-alive pings are only sent while there are open + /// request/responses streams. If enabled, pings are also sent when no + /// streams are active. Does nothing if `http2_keep_alive_interval` is + /// disabled. + /// + /// Default is `false`. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { + self.conn_builder.http2_keep_alive_while_idle(enabled); + self + } + + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.conn_builder.http2_max_concurrent_reset_streams(max); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently 1MB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { + self.conn_builder.http2_max_send_buf_size(max); + self + } + + /// Set whether to retry requests that get disrupted before ever starting + /// to write. + /// + /// This means a request that is queued, and gets given an idle, reused + /// connection, and then encounters an error immediately as the idle + /// connection was found to be unusable. + /// + /// When this is set to `false`, the related `ResponseFuture` would instead + /// resolve to an `Error::Cancel`. + /// + /// Default is `true`. + #[inline] + pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { + self.client_config.retry_canceled_requests = val; + self + } + + /// Set whether to automatically add the `Host` header to requests. + /// + /// If true, and a request does not include a `Host` header, one will be + /// added automatically, derived from the authority of the `Uri`. + /// + /// Default is `true`. + #[inline] + pub fn set_host(&mut self, val: bool) -> &mut Self { + self.client_config.set_host = val; + self + } + + /// Provide an executor to execute background `Connection` tasks. + pub fn executor(&mut self, exec: E) -> &mut Self + where + E: Executor + Send + Sync + 'static, + { + self.conn_builder.executor(exec); + self + } + + /// Builder a client with this configuration and the default `HttpConnector`. + #[cfg(feature = "tcp")] + pub fn build_http(&self) -> Client + where + B: HttpBody + Send, + B::Data: Send, + { + let mut connector = HttpConnector::new(); + if self.pool_config.is_enabled() { + connector.set_keepalive(self.pool_config.idle_timeout); + } + self.build(connector) + } + + /// Combine the configuration of this builder with a connector to create a `Client`. + pub fn build(&self, connector: C) -> Client + where + C: Connect + Clone, + B: HttpBody + Send, + B::Data: Send, + { + Client { + config: self.client_config, + conn_builder: self.conn_builder.clone(), + connector, + pool: Pool::new(self.pool_config, &self.conn_builder.exec), + } + } +} + +impl fmt::Debug for Builder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Builder") + .field("client_config", &self.client_config) + .field("conn_builder", &self.conn_builder) + .field("pool_config", &self.pool_config) + .finish() + } +} + +#[cfg(test)] +mod unit_tests { + use super::*; + + #[test] + fn response_future_is_sync() { + fn assert_sync() {} + assert_sync::(); + } + + #[test] + fn set_relative_uri_with_implicit_path() { + let mut uri = "http://hyper.rs".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/"); + } + + #[test] + fn test_origin_form() { + let mut uri = "http://hyper.rs/guides".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/guides"); + + let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/guides?foo=bar"); + } + + #[test] + fn test_absolute_form() { + let mut uri = "http://hyper.rs/guides".parse().unwrap(); + absolute_form(&mut uri); + assert_eq!(uri.to_string(), "http://hyper.rs/guides"); + + let mut uri = "https://hyper.rs/guides".parse().unwrap(); + absolute_form(&mut uri); + assert_eq!(uri.to_string(), "/guides"); + } + + #[test] + fn test_authority_form() { + let _ = pretty_env_logger::try_init(); + + let mut uri = "http://hyper.rs".parse().unwrap(); + authority_form(&mut uri); + assert_eq!(uri.to_string(), "hyper.rs"); + + let mut uri = "hyper.rs".parse().unwrap(); + authority_form(&mut uri); + assert_eq!(uri.to_string(), "hyper.rs"); + } + + #[test] + fn test_extract_domain_connect_no_port() { + let mut uri = "hyper.rs".parse().unwrap(); + let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain"); + assert_eq!(scheme, *"http"); + assert_eq!(host, "hyper.rs"); + } + + #[test] + fn test_is_secure() { + assert_eq!( + is_schema_secure(&"http://hyper.rs".parse::().unwrap()), + false + ); + assert_eq!(is_schema_secure(&"hyper.rs".parse::().unwrap()), false); + assert_eq!( + is_schema_secure(&"wss://hyper.rs".parse::().unwrap()), + true + ); + assert_eq!( + is_schema_secure(&"ws://hyper.rs".parse::().unwrap()), + false + ); + } + + #[test] + fn test_get_non_default_port() { + assert!(get_non_default_port(&"http://hyper.rs".parse::().unwrap()).is_none()); + assert!(get_non_default_port(&"http://hyper.rs:80".parse::().unwrap()).is_none()); + assert!(get_non_default_port(&"https://hyper.rs:443".parse::().unwrap()).is_none()); + assert!(get_non_default_port(&"hyper.rs:80".parse::().unwrap()).is_none()); + + assert_eq!( + get_non_default_port(&"http://hyper.rs:123".parse::().unwrap()) + .unwrap() + .as_u16(), + 123 + ); + assert_eq!( + get_non_default_port(&"https://hyper.rs:80".parse::().unwrap()) + .unwrap() + .as_u16(), + 80 + ); + assert_eq!( + get_non_default_port(&"hyper.rs:123".parse::().unwrap()) + .unwrap() + .as_u16(), + 123 + ); + } +} diff --git a/third_party/rust/hyper/src/client/conn.rs b/third_party/rust/hyper/src/client/conn.rs index 713127043630..85bc366be949 100644 --- a/third_party/rust/hyper/src/client/conn.rs +++ b/third_party/rust/hyper/src/client/conn.rs @@ -7,41 +7,117 @@ //! //! If don't have need to manage connections yourself, consider using the //! higher-level [Client](super) API. +//! +//! ## Example +//! A simple example that uses the `SendRequest` struct to talk HTTP over a Tokio TCP stream +//! ```no_run +//! # #[cfg(all(feature = "client", feature = "http1", feature = "runtime"))] +//! # mod rt { +//! use tower::ServiceExt; +//! use http::{Request, StatusCode}; +//! use hyper::{client::conn, Body}; +//! use tokio::net::TcpStream; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box> { +//! let target_stream = TcpStream::connect("example.com:80").await?; +//! +//! let (mut request_sender, connection) = conn::handshake(target_stream).await?; +//! +//! // spawn a task to poll the connection and drive the HTTP state +//! tokio::spawn(async move { +//! if let Err(e) = connection.await { +//! eprintln!("Error in connection: {}", e); +//! } +//! }); +//! +//! let request = Request::builder() +//! // We need to manually add the host header because SendRequest does not +//! .header("Host", "example.com") +//! .method("GET") +//! .body(Body::from(""))?; +//! let response = request_sender.send_request(request).await?; +//! assert!(response.status() == StatusCode::OK); +//! +//! // To send via the same connection again, it may not work as it may not be ready, +//! // so we have to wait until the request_sender becomes ready. +//! request_sender.ready().await?; +//! let request = Request::builder() +//! .header("Host", "example.com") +//! .method("GET") +//! .body(Body::from(""))?; +//! let response = request_sender.send_request(request).await?; +//! assert!(response.status() == StatusCode::OK); +//! Ok(()) +//! } +//! +//! # } +//! ``` use std::error::Error as StdError; use std::fmt; -use std::mem; +#[cfg(not(all(feature = "http1", feature = "http2")))] +use std::marker::PhantomData; use std::sync::Arc; -#[cfg(feature = "runtime")] +#[cfg(all(feature = "runtime", feature = "http2"))] use std::time::Duration; use bytes::Bytes; use futures_util::future::{self, Either, FutureExt as _}; -use pin_project::{pin_project, project}; +use httparse::ParserConfig; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use tower_service::Service; +use tracing::{debug, trace}; use super::dispatch; use crate::body::HttpBody; -use crate::common::{task, BoxSendFuture, Exec, Executor, Future, Pin, Poll}; +#[cfg(not(all(feature = "http1", feature = "http2")))] +use crate::common::Never; +use crate::common::{ + exec::{BoxSendFuture, Exec}, + task, Future, Pin, Poll, +}; use crate::proto; +use crate::rt::Executor; +#[cfg(feature = "http1")] use crate::upgrade::Upgraded; use crate::{Body, Request, Response}; -type Http1Dispatcher = proto::dispatch::Dispatcher, B, T, R>; +#[cfg(feature = "http1")] +type Http1Dispatcher = + proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; -#[pin_project] -enum ProtoClient -where - B: HttpBody, -{ - H1(#[pin] Http1Dispatcher), - H2(#[pin] proto::h2::ClientTask), +#[cfg(not(feature = "http1"))] +type Http1Dispatcher = (Never, PhantomData<(T, Pin>)>); + +#[cfg(feature = "http2")] +type Http2ClientTask = proto::h2::ClientTask; + +#[cfg(not(feature = "http2"))] +type Http2ClientTask = (Never, PhantomData>>); + +pin_project! { + #[project = ProtoClientProj] + enum ProtoClient + where + B: HttpBody, + { + H1 { + #[pin] + h1: Http1Dispatcher, + }, + H2 { + #[pin] + h2: Http2ClientTask, + }, + } } /// Returns a handshake future over some IO. /// /// This is a shortcut for `Builder::new().handshake(io)`. +/// See [`client::conn`](crate::client::conn) for more. pub async fn handshake( io: T, ) -> crate::Result<(SendRequest, Connection)> @@ -75,12 +151,26 @@ where #[derive(Clone, Debug)] pub struct Builder { pub(super) exec: Exec, - h1_writev: bool, + h09_responses: bool, + h1_parser_config: ParserConfig, + h1_writev: Option, h1_title_case_headers: bool, + h1_preserve_header_case: bool, h1_read_buf_exact_size: Option, h1_max_buf_size: Option, - http2: bool, + #[cfg(feature = "ffi")] + h1_headers_raw: bool, + #[cfg(feature = "http2")] h2_builder: proto::h2::client::Config, + version: Proto, +} + +#[derive(Clone, Debug)] +enum Proto { + #[cfg(feature = "http1")] + Http1, + #[cfg(feature = "http2")] + Http2, } /// A future returned by `SendRequest::send_request`. @@ -122,6 +212,7 @@ pub struct Parts { // A `SendRequest` that can be cloned to send HTTP2 requests. // private for now, probably not a great idea of a type... #[must_use = "futures do nothing unless polled"] +#[cfg(feature = "http2")] pub(super) struct Http2SendRequest { dispatch: dispatch::UnboundedSender, Response>, } @@ -136,12 +227,13 @@ impl SendRequest { self.dispatch.poll_ready(cx) } - pub(super) fn when_ready(self) -> impl Future> { + pub(super) async fn when_ready(self) -> crate::Result { let mut me = Some(self); future::poll_fn(move |cx| { ready!(me.as_mut().unwrap().poll_ready(cx))?; Poll::Ready(Ok(me.take().unwrap())) }) + .await } pub(super) fn is_ready(&self) -> bool { @@ -152,6 +244,7 @@ impl SendRequest { self.dispatch.is_closed() } + #[cfg(feature = "http2")] pub(super) fn into_http2(self) -> Http2SendRequest { Http2SendRequest { dispatch: self.dispatch.unbound(), @@ -217,7 +310,7 @@ where ResponseFuture { inner } } - pub(crate) fn send_request_retryable( + pub(super) fn send_request_retryable( &mut self, req: Request, ) -> impl Future, (crate::Error, Option>)>> + Unpin @@ -269,6 +362,7 @@ impl fmt::Debug for SendRequest { // ===== impl Http2SendRequest +#[cfg(feature = "http2")] impl Http2SendRequest { pub(super) fn is_ready(&self) -> bool { self.dispatch.is_ready() @@ -279,6 +373,7 @@ impl Http2SendRequest { } } +#[cfg(feature = "http2")] impl Http2SendRequest where B: HttpBody + 'static, @@ -310,12 +405,14 @@ where } } +#[cfg(feature = "http2")] impl fmt::Debug for Http2SendRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Http2SendRequest").finish() } } +#[cfg(feature = "http2")] impl Clone for Http2SendRequest { fn clone(&self) -> Self { Http2SendRequest { @@ -337,17 +434,22 @@ where /// /// Only works for HTTP/1 connections. HTTP/2 connections will panic. pub fn into_parts(self) -> Parts { - let (io, read_buf, _) = match self.inner.expect("already upgraded") { - ProtoClient::H1(h1) => h1.into_inner(), - ProtoClient::H2(_h2) => { + match self.inner.expect("already upgraded") { + #[cfg(feature = "http1")] + ProtoClient::H1 { h1 } => { + let (io, read_buf, _) = h1.into_inner(); + Parts { + io, + read_buf, + _inner: (), + } + } + ProtoClient::H2 { .. } => { panic!("http2 cannot into_inner"); } - }; - Parts { - io, - read_buf, - _inner: (), + #[cfg(not(feature = "http1"))] + ProtoClient::H1 { h1 } => match h1.0 {}, } } @@ -364,8 +466,15 @@ where /// to work with this function; or use the `without_shutdown` wrapper. pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { match *self.inner.as_mut().expect("already upgraded") { - ProtoClient::H1(ref mut h1) => h1.poll_without_shutdown(cx), - ProtoClient::H2(ref mut h2) => Pin::new(h2).poll(cx).map_ok(|_| ()), + #[cfg(feature = "http1")] + ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx), + #[cfg(feature = "http2")] + ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()), + + #[cfg(not(feature = "http1"))] + ProtoClient::H1 { ref mut h1 } => match h1.0 {}, + #[cfg(not(feature = "http2"))] + ProtoClient::H2 { ref mut h2, .. } => match h2.0 {}, } } @@ -378,6 +487,23 @@ where Poll::Ready(Ok(conn.take().unwrap().into_parts())) }) } + + /// Returns whether the [extended CONNECT protocol][1] is enabled or not. + /// + /// This setting is configured by the server peer by sending the + /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value recieved from the + /// remote. + /// + /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 + #[cfg(feature = "http2")] + pub fn http2_is_extended_connect_protocol_enabled(&self) -> bool { + match self.inner.as_ref().unwrap() { + ProtoClient::H1 { .. } => false, + ProtoClient::H2 { h2 } => h2.is_extended_connect_protocol_enabled(), + } + } } impl Future for Connection @@ -392,16 +518,18 @@ where fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { proto::Dispatched::Shutdown => Poll::Ready(Ok(())), - proto::Dispatched::Upgrade(pending) => { - let h1 = match mem::replace(&mut self.inner, None) { - Some(ProtoClient::H1(h1)) => h1, - _ => unreachable!("Upgrade expects h1"), - }; - - let (io, buf, _) = h1.into_inner(); - pending.fulfill(Upgraded::new(io, buf)); - Poll::Ready(Ok(())) - } + #[cfg(feature = "http1")] + proto::Dispatched::Upgrade(pending) => match self.inner.take() { + Some(ProtoClient::H1 { h1 }) => { + let (io, buf, _) = h1.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + Poll::Ready(Ok(())) + } + _ => { + drop(pending); + unreachable!("Upgrade expects h1"); + } + }, } } } @@ -424,12 +552,21 @@ impl Builder { pub fn new() -> Builder { Builder { exec: Exec::Default, - h1_writev: true, + h09_responses: false, + h1_writev: None, h1_read_buf_exact_size: None, + h1_parser_config: Default::default(), h1_title_case_headers: false, + h1_preserve_header_case: false, h1_max_buf_size: None, - http2: false, + #[cfg(feature = "ffi")] + h1_headers_raw: false, + #[cfg(feature = "http2")] h2_builder: Default::default(), + #[cfg(feature = "http1")] + version: Proto::Http1, + #[cfg(not(feature = "http1"))] + version: Proto::Http2, } } @@ -442,23 +579,154 @@ impl Builder { self } - pub(super) fn h1_writev(&mut self, enabled: bool) -> &mut Builder { - self.h1_writev = enabled; + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder { + self.h09_responses = enabled; self } - pub(super) fn h1_title_case_headers(&mut self, enabled: bool) -> &mut Builder { + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn http1_allow_spaces_after_header_name_in_responses( + &mut self, + enabled: bool, + ) -> &mut Builder { + self.h1_parser_config + .allow_spaces_after_header_name_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections will accept obsolete line folding for + /// header values. + /// + /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when + /// parsing. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > A server that receives an obs-fold in a request message that is not + /// > within a message/http container MUST either reject the message by + /// > sending a 400 (Bad Request), preferably with a representation + /// > explaining that obsolete line folding is unacceptable, or replace + /// > each received obs-fold with one or more SP octets prior to + /// > interpreting the field value or forwarding the message downstream. + /// + /// > A proxy or gateway that receives an obs-fold in a response message + /// > that is not within a message/http container MUST either discard the + /// > message and replace it with a 502 (Bad Gateway) response, preferably + /// > with a representation explaining that unacceptable line folding was + /// > received, or replace each received obs-fold with one or more SP + /// > octets prior to interpreting the field value or forwarding the + /// > message downstream. + /// + /// > A user agent that receives an obs-fold in a response message that is + /// > not within a message/http container MUST replace each received + /// > obs-fold with one or more SP octets prior to interpreting the field + /// > value. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn http1_allow_obsolete_multiline_headers_in_responses( + &mut self, + enabled: bool, + ) -> &mut Builder { + self.h1_parser_config + .allow_obsolete_multiline_headers_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { + self.h1_writev = Some(enabled); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Builder { self.h1_title_case_headers = enabled; self } - pub(super) fn h1_read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_case = enabled; + self + } + + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `http1_max_buf_size` option. + /// + /// Default is an adaptive read buffer. + pub fn http1_read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { self.h1_read_buf_exact_size = sz; self.h1_max_buf_size = None; self } - pub(super) fn h1_max_buf_size(&mut self, max: usize) -> &mut Self { + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `http1_read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { assert!( max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, "the max_buf_size cannot be smaller than the minimum that h1 specifies." @@ -469,11 +737,21 @@ impl Builder { self } + #[cfg(feature = "ffi")] + pub(crate) fn http1_headers_raw(&mut self, enabled: bool) -> &mut Self { + self.h1_headers_raw = enabled; + self + } + /// Sets whether HTTP2 is required. /// /// Default is false. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_only(&mut self, enabled: bool) -> &mut Builder { - self.http2 = enabled; + if enabled { + self.version = Proto::Http2 + } self } @@ -485,6 +763,8 @@ impl Builder { /// If not set, hyper will use a default. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; @@ -498,6 +778,8 @@ impl Builder { /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_connection_window_size( &mut self, sz: impl Into>, @@ -514,6 +796,8 @@ impl Builder { /// Enabling this will override the limits set in /// `http2_initial_stream_window_size` and /// `http2_initial_connection_window_size`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { use proto::h2::SPEC_WINDOW_SIZE; @@ -530,6 +814,8 @@ impl Builder { /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.max_frame_size = sz; @@ -548,6 +834,8 @@ impl Builder { /// /// Requires the `runtime` cargo feature to be enabled. #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_interval( &mut self, interval: impl Into>, @@ -567,6 +855,8 @@ impl Builder { /// /// Requires the `runtime` cargo feature to be enabled. #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.h2_builder.keep_alive_timeout = timeout; self @@ -585,12 +875,48 @@ impl Builder { /// /// Requires the `runtime` cargo feature to be enabled. #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { self.h2_builder.keep_alive_while_idle = enabled; self } + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_concurrent_reset_streams = Some(max); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently 1MB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + /// Constructs a connection with the configured options and IO. + /// See [`client::conn`](crate::client::conn) for more. + /// + /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will + /// do nothing. pub fn handshake( &self, io: T, @@ -604,30 +930,51 @@ impl Builder { let opts = self.clone(); async move { - trace!("client handshake HTTP/{}", if opts.http2 { 2 } else { 1 }); + trace!("client handshake {:?}", opts.version); let (tx, rx) = dispatch::channel(); - let proto = if !opts.http2 { - let mut conn = proto::Conn::new(io); - if !opts.h1_writev { - conn.set_write_strategy_flatten(); + let proto = match opts.version { + #[cfg(feature = "http1")] + Proto::Http1 => { + let mut conn = proto::Conn::new(io); + conn.set_h1_parser_config(opts.h1_parser_config); + if let Some(writev) = opts.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } + } + if opts.h1_title_case_headers { + conn.set_title_case_headers(); + } + if opts.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + if opts.h09_responses { + conn.set_h09_responses(); + } + + #[cfg(feature = "ffi")] + conn.set_raw_headers(opts.h1_headers_raw); + + if let Some(sz) = opts.h1_read_buf_exact_size { + conn.set_read_buf_exact_size(sz); + } + if let Some(max) = opts.h1_max_buf_size { + conn.set_max_buf_size(max); + } + let cd = proto::h1::dispatch::Client::new(rx); + let dispatch = proto::h1::Dispatcher::new(cd, conn); + ProtoClient::H1 { h1: dispatch } } - if opts.h1_title_case_headers { - conn.set_title_case_headers(); + #[cfg(feature = "http2")] + Proto::Http2 => { + let h2 = + proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone()) + .await?; + ProtoClient::H2 { h2 } } - if let Some(sz) = opts.h1_read_buf_exact_size { - conn.set_read_buf_exact_size(sz); - } - if let Some(max) = opts.h1_max_buf_size { - conn.set_max_buf_size(max); - } - let cd = proto::h1::dispatch::Client::new(rx); - let dispatch = proto::h1::Dispatcher::new(cd, conn); - ProtoClient::H1(dispatch) - } else { - let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone()) - .await?; - ProtoClient::H2(h2) }; Ok(( @@ -677,12 +1024,17 @@ where { type Output = crate::Result; - #[project] fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - #[project] match self.project() { - ProtoClient::H1(c) => c.poll(cx), - ProtoClient::H2(c) => c.poll(cx), + #[cfg(feature = "http1")] + ProtoClientProj::H1 { h1 } => h1.poll(cx), + #[cfg(feature = "http2")] + ProtoClientProj::H2 { h2, .. } => h2.poll(cx), + + #[cfg(not(feature = "http1"))] + ProtoClientProj::H1 { h1 } => match h1.0 {}, + #[cfg(not(feature = "http2"))] + ProtoClientProj::H2 { h2, .. } => match h2.0 {}, } } } diff --git a/third_party/rust/hyper/src/client/connect/dns.rs b/third_party/rust/hyper/src/client/connect/dns.rs index acffb8b9e58f..e4465078b31d 100644 --- a/third_party/rust/hyper/src/client/connect/dns.rs +++ b/third_party/rust/hyper/src/client/connect/dns.rs @@ -9,21 +9,21 @@ //! # Resolvers are `Service`s //! //! A resolver is just a -//! `Service>`. +//! `Service>`. //! //! A simple resolver that ignores the name and always returns a specific //! address: //! //! ```rust,ignore -//! use std::{convert::Infallible, iter, net::IpAddr}; +//! use std::{convert::Infallible, iter, net::SocketAddr}; //! //! let resolver = tower::service_fn(|_name| async { -//! Ok::<_, Infallible>(iter::once(IpAddr::from([127, 0, 0, 1]))) +//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080)))) //! }); //! ``` use std::error::Error; use std::future::Future; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; use std::pin::Pin; use std::str::FromStr; use std::task::{self, Poll}; @@ -31,13 +31,14 @@ use std::{fmt, io, vec}; use tokio::task::JoinHandle; use tower_service::Service; +use tracing::debug; pub(super) use self::sealed::Resolve; /// A domain name to resolve into IP addresses. #[derive(Clone, Hash, Eq, PartialEq)] pub struct Name { - host: String, + host: Box, } /// A resolver using blocking `getaddrinfo` calls in a threadpool. @@ -48,16 +49,16 @@ pub struct GaiResolver { /// An iterator of IP addresses returned from `getaddrinfo`. pub struct GaiAddrs { - inner: IpAddrs, + inner: SocketAddrs, } /// A future to resolve a name returned by `GaiResolver`. pub struct GaiFuture { - inner: JoinHandle>, + inner: JoinHandle>, } impl Name { - pub(super) fn new(host: String) -> Name { + pub(super) fn new(host: Box) -> Name { Name { host } } @@ -84,7 +85,7 @@ impl FromStr for Name { fn from_str(host: &str) -> Result { // Possibly add validation later - Ok(Name::new(host.to_owned())) + Ok(Name::new(host.into())) } } @@ -121,7 +122,7 @@ impl Service for GaiResolver { debug!("resolving host={:?}", name.host); (&*name.host, 0) .to_socket_addrs() - .map(|i| IpAddrs { iter: i }) + .map(|i| SocketAddrs { iter: i }) }); GaiFuture { inner: blocking } @@ -141,7 +142,13 @@ impl Future for GaiFuture { Pin::new(&mut self.inner).poll(cx).map(|res| match res { Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }), Ok(Err(err)) => Err(err), - Err(join_err) => panic!("gai background task failed: {:?}", join_err), + Err(join_err) => { + if join_err.is_cancelled() { + Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) + } else { + panic!("gai background task failed: {:?}", join_err) + } + } }) } } @@ -152,11 +159,17 @@ impl fmt::Debug for GaiFuture { } } +impl Drop for GaiFuture { + fn drop(&mut self) { + self.inner.abort(); + } +} + impl Iterator for GaiAddrs { - type Item = IpAddr; + type Item = SocketAddr; fn next(&mut self) -> Option { - self.inner.next().map(|sa| sa.ip()) + self.inner.next() } } @@ -166,55 +179,60 @@ impl fmt::Debug for GaiAddrs { } } -pub(super) struct IpAddrs { +pub(super) struct SocketAddrs { iter: vec::IntoIter, } -impl IpAddrs { +impl SocketAddrs { pub(super) fn new(addrs: Vec) -> Self { - IpAddrs { + SocketAddrs { iter: addrs.into_iter(), } } - pub(super) fn try_parse(host: &str, port: u16) -> Option { + pub(super) fn try_parse(host: &str, port: u16) -> Option { if let Ok(addr) = host.parse::() { let addr = SocketAddrV4::new(addr, port); - return Some(IpAddrs { + return Some(SocketAddrs { iter: vec![SocketAddr::V4(addr)].into_iter(), }); } - let host = host.trim_start_matches('[').trim_end_matches(']'); if let Ok(addr) = host.parse::() { let addr = SocketAddrV6::new(addr, port, 0, 0); - return Some(IpAddrs { + return Some(SocketAddrs { iter: vec![SocketAddr::V6(addr)].into_iter(), }); } None } - pub(super) fn split_by_preference(self, local_addr: Option) -> (IpAddrs, IpAddrs) { - if let Some(local_addr) = local_addr { - let preferred = self - .iter - .filter(|addr| addr.is_ipv6() == local_addr.is_ipv6()) - .collect(); + #[inline] + fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs { + SocketAddrs::new(self.iter.filter(predicate).collect()) + } - (IpAddrs::new(preferred), IpAddrs::new(vec![])) - } else { - let preferring_v6 = self - .iter - .as_slice() - .first() - .map(SocketAddr::is_ipv6) - .unwrap_or(false); + pub(super) fn split_by_preference( + self, + local_addr_ipv4: Option, + local_addr_ipv6: Option, + ) -> (SocketAddrs, SocketAddrs) { + match (local_addr_ipv4, local_addr_ipv6) { + (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])), + (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])), + _ => { + let preferring_v6 = self + .iter + .as_slice() + .first() + .map(SocketAddr::is_ipv6) + .unwrap_or(false); - let (preferred, fallback) = self - .iter - .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); + let (preferred, fallback) = self + .iter + .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); - (IpAddrs::new(preferred), IpAddrs::new(fallback)) + (SocketAddrs::new(preferred), SocketAddrs::new(fallback)) + } } } @@ -227,7 +245,7 @@ impl IpAddrs { } } -impl Iterator for IpAddrs { +impl Iterator for SocketAddrs { type Item = SocketAddr; #[inline] fn next(&mut self) -> Option { @@ -300,13 +318,13 @@ impl Future for TokioThreadpoolGaiFuture { */ mod sealed { - use super::{IpAddr, Name}; + use super::{SocketAddr, Name}; use crate::common::{task, Future, Poll}; use tower_service::Service; // "Trait alias" for `Service` pub trait Resolve { - type Addrs: Iterator; + type Addrs: Iterator; type Error: Into>; type Future: Future>; @@ -317,7 +335,7 @@ mod sealed { impl Resolve for S where S: Service, - S::Response: Iterator, + S::Response: Iterator, S::Error: Into>, { type Addrs = S::Response; @@ -334,7 +352,7 @@ mod sealed { } } -pub(crate) async fn resolve(resolver: &mut R, name: Name) -> Result +pub(super) async fn resolve(resolver: &mut R, name: Name) -> Result where R: Resolve, { @@ -349,34 +367,50 @@ mod tests { #[test] fn test_ip_addrs_split_by_preference() { - let v4_addr = (Ipv4Addr::new(127, 0, 0, 1), 80).into(); - let v6_addr = (Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 80).into(); + let ip_v4 = Ipv4Addr::new(127, 0, 0, 1); + let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + let v4_addr = (ip_v4, 80).into(); + let v6_addr = (ip_v6, 80).into(); - let (mut preferred, mut fallback) = IpAddrs { + let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } - .split_by_preference(None); + .split_by_preference(None, None); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.next().unwrap().is_ipv6()); - let (mut preferred, mut fallback) = IpAddrs { + let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v6_addr, v4_addr].into_iter(), } - .split_by_preference(None); + .split_by_preference(None, None); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.next().unwrap().is_ipv4()); - let (mut preferred, fallback) = IpAddrs { + let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } - .split_by_preference(Some(v4_addr.ip())); + .split_by_preference(Some(ip_v4), Some(ip_v6)); + assert!(preferred.next().unwrap().is_ipv4()); + assert!(fallback.next().unwrap().is_ipv6()); + + let (mut preferred, mut fallback) = SocketAddrs { + iter: vec![v6_addr, v4_addr].into_iter(), + } + .split_by_preference(Some(ip_v4), Some(ip_v6)); + assert!(preferred.next().unwrap().is_ipv6()); + assert!(fallback.next().unwrap().is_ipv4()); + + let (mut preferred, fallback) = SocketAddrs { + iter: vec![v4_addr, v6_addr].into_iter(), + } + .split_by_preference(Some(ip_v4), None); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.is_empty()); - let (mut preferred, fallback) = IpAddrs { + let (mut preferred, fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } - .split_by_preference(Some(v6_addr.ip())); + .split_by_preference(None, Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.is_empty()); } @@ -388,17 +422,4 @@ mod tests { assert_eq!(name.as_str(), DOMAIN); assert_eq!(name.to_string(), DOMAIN); } - - #[test] - fn ip_addrs_try_parse_v6() { - let dst = ::http::Uri::from_static("http://[::1]:8080/"); - - let mut addrs = - IpAddrs::try_parse(dst.host().expect("host"), dst.port_u16().expect("port")) - .expect("try_parse"); - - let expected = "[::1]:8080".parse::().expect("expected"); - - assert_eq!(addrs.next(), Some(expected)); - } } diff --git a/third_party/rust/hyper/src/client/connect/http.rs b/third_party/rust/hyper/src/client/connect/http.rs index 03a79a04c599..afe7b155eb76 100644 --- a/third_party/rust/hyper/src/client/connect/http.rs +++ b/third_party/rust/hyper/src/client/connect/http.rs @@ -3,7 +3,7 @@ use std::fmt; use std::future::Future; use std::io; use std::marker::PhantomData; -use std::net::{IpAddr, SocketAddr}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::pin::Pin; use std::sync::Arc; use std::task::{self, Poll}; @@ -11,9 +11,10 @@ use std::time::Duration; use futures_util::future::Either; use http::uri::{Scheme, Uri}; -use pin_project::pin_project; -use tokio::net::TcpStream; -use tokio::time::Delay; +use pin_project_lite::pin_project; +use tokio::net::{TcpSocket, TcpStream}; +use tokio::time::Sleep; +use tracing::{debug, trace, warn}; use super::dns::{self, resolve, GaiResolver, Resolve}; use super::{Connected, Connection}; @@ -27,6 +28,7 @@ use super::{Connected, Connection}; /// /// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes /// transport information such as the remote socket address used. +#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] #[derive(Clone)] pub struct HttpConnector { config: Arc, @@ -64,6 +66,7 @@ pub struct HttpConnector { #[derive(Clone, Debug)] pub struct HttpInfo { remote_addr: SocketAddr, + local_addr: SocketAddr, } #[derive(Clone)] @@ -72,7 +75,8 @@ struct Config { enforce_http: bool, happy_eyeballs_timeout: Option, keep_alive_timeout: Option, - local_address: Option, + local_address_ipv4: Option, + local_address_ipv6: Option, nodelay: bool, reuse_address: bool, send_buffer_size: Option, @@ -103,7 +107,7 @@ impl HttpConnector { impl HttpConnector { /// Construct a new HttpConnector. /// - /// Takes a `Resolve` to handle DNS lookups. + /// Takes a [`Resolver`](crate::client::connect::dns#resolvers-are-services) to handle DNS lookups. pub fn new_with_resolver(resolver: R) -> HttpConnector { HttpConnector { config: Arc::new(Config { @@ -111,7 +115,8 @@ impl HttpConnector { enforce_http: true, happy_eyeballs_timeout: Some(Duration::from_millis(300)), keep_alive_timeout: None, - local_address: None, + local_address_ipv4: None, + local_address_ipv6: None, nodelay: false, reuse_address: false, send_buffer_size: None, @@ -166,7 +171,26 @@ impl HttpConnector { /// Default is `None`. #[inline] pub fn set_local_address(&mut self, addr: Option) { - self.config_mut().local_address = addr; + let (v4, v6) = match addr { + Some(IpAddr::V4(a)) => (Some(a), None), + Some(IpAddr::V6(a)) => (None, Some(a)), + _ => (None, None), + }; + + let cfg = self.config_mut(); + + cfg.local_address_ipv4 = v4; + cfg.local_address_ipv6 = v6; + } + + /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's + /// preferences) before connection. + #[inline] + pub fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) { + let cfg = self.config_mut(); + + cfg.local_address_ipv4 = Some(addr_ipv4); + cfg.local_address_ipv6 = Some(addr_ipv6); } /// Set the connect timeout. @@ -250,97 +274,86 @@ where } } +fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { + trace!( + "Http::connect; scheme={:?}, host={:?}, port={:?}", + dst.scheme(), + dst.host(), + dst.port(), + ); + + if config.enforce_http { + if dst.scheme() != Some(&Scheme::HTTP) { + return Err(ConnectError { + msg: INVALID_NOT_HTTP.into(), + cause: None, + }); + } + } else if dst.scheme().is_none() { + return Err(ConnectError { + msg: INVALID_MISSING_SCHEME.into(), + cause: None, + }); + } + + let host = match dst.host() { + Some(s) => s, + None => { + return Err(ConnectError { + msg: INVALID_MISSING_HOST.into(), + cause: None, + }) + } + }; + let port = match dst.port() { + Some(port) => port.as_u16(), + None => { + if dst.scheme() == Some(&Scheme::HTTPS) { + 443 + } else { + 80 + } + } + }; + + Ok((host, port)) +} + impl HttpConnector where R: Resolve, { async fn call_async(&mut self, dst: Uri) -> Result { - trace!( - "Http::connect; scheme={:?}, host={:?}, port={:?}", - dst.scheme(), - dst.host(), - dst.port(), - ); - - if self.config.enforce_http { - if dst.scheme() != Some(&Scheme::HTTP) { - return Err(ConnectError { - msg: INVALID_NOT_HTTP.into(), - cause: None, - }); - } - } else if dst.scheme().is_none() { - return Err(ConnectError { - msg: INVALID_MISSING_SCHEME.into(), - cause: None, - }); - } - - let host = match dst.host() { - Some(s) => s, - None => { - return Err(ConnectError { - msg: INVALID_MISSING_HOST.into(), - cause: None, - }) - } - }; - let port = match dst.port() { - Some(port) => port.as_u16(), - None => { - if dst.scheme() == Some(&Scheme::HTTPS) { - 443 - } else { - 80 - } - } - }; - let config = &self.config; + let (host, port) = get_host_port(config, &dst)?; + let host = host.trim_start_matches('[').trim_end_matches(']'); + // If the host is already an IP addr (v4 or v6), // skip resolving the dns and start connecting right away. - let addrs = if let Some(addrs) = dns::IpAddrs::try_parse(host, port) { + let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { addrs } else { let addrs = resolve(&mut self.resolver, dns::Name::new(host.into())) .await .map_err(ConnectError::dns)?; - let addrs = addrs.map(|addr| SocketAddr::new(addr, port)).collect(); - dns::IpAddrs::new(addrs) + let addrs = addrs + .map(|mut addr| { + addr.set_port(port); + addr + }) + .collect(); + dns::SocketAddrs::new(addrs) }; - let c = ConnectingTcp::new( - config.local_address, - addrs, - config.connect_timeout, - config.happy_eyeballs_timeout, - config.reuse_address, - ); + let c = ConnectingTcp::new(addrs, config); - let sock = c - .connect() - .await - .map_err(ConnectError::m("tcp connect error"))?; + let sock = c.connect().await?; - if let Some(dur) = config.keep_alive_timeout { - sock.set_keepalive(Some(dur)) - .map_err(ConnectError::m("tcp set_keepalive error"))?; + if let Err(e) = sock.set_nodelay(config.nodelay) { + warn!("tcp set_nodelay error: {}", e); } - if let Some(size) = config.send_buffer_size { - sock.set_send_buffer_size(size) - .map_err(ConnectError::m("tcp set_send_buffer_size error"))?; - } - - if let Some(size) = config.recv_buffer_size { - sock.set_recv_buffer_size(size) - .map_err(ConnectError::m("tcp set_recv_buffer_size error"))?; - } - - sock.set_nodelay(config.nodelay) - .map_err(ConnectError::m("tcp set_nodelay error"))?; - Ok(sock) } } @@ -348,8 +361,8 @@ where impl Connection for TcpStream { fn connected(&self) -> Connected { let connected = Connected::new(); - if let Ok(remote_addr) = self.peer_addr() { - connected.extra(HttpInfo { remote_addr }) + if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { + connected.extra(HttpInfo { remote_addr, local_addr }) } else { connected } @@ -361,20 +374,26 @@ impl HttpInfo { pub fn remote_addr(&self) -> SocketAddr { self.remote_addr } + + /// Get the local address of the transport used. + pub fn local_addr(&self) -> SocketAddr { + self.local_addr + } } -// Not publicly exported (so missing_docs doesn't trigger). -// -// We return this `Future` instead of the `Pin>` directly -// so that users don't rely on it fitting in a `Pin>` slot -// (and thus we can change the type in the future). -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[allow(missing_debug_implementations)] -pub struct HttpConnecting { - #[pin] - fut: BoxConnecting, - _marker: PhantomData, +pin_project! { + // Not publicly exported (so missing_docs doesn't trigger). + // + // We return this `Future` instead of the `Pin>` directly + // so that users don't rely on it fitting in a `Pin>` slot + // (and thus we can change the type in the future). + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct HttpConnecting { + #[pin] + fut: BoxConnecting, + _marker: PhantomData, + } } type ConnectResult = Result; @@ -453,64 +472,55 @@ impl StdError for ConnectError { } } -struct ConnectingTcp { - local_addr: Option, +struct ConnectingTcp<'a> { preferred: ConnectingTcpRemote, fallback: Option, - reuse_address: bool, + config: &'a Config, } -impl ConnectingTcp { - fn new( - local_addr: Option, - remote_addrs: dns::IpAddrs, - connect_timeout: Option, - fallback_timeout: Option, - reuse_address: bool, - ) -> ConnectingTcp { - if let Some(fallback_timeout) = fallback_timeout { - let (preferred_addrs, fallback_addrs) = remote_addrs.split_by_preference(local_addr); +impl<'a> ConnectingTcp<'a> { + fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self { + if let Some(fallback_timeout) = config.happy_eyeballs_timeout { + let (preferred_addrs, fallback_addrs) = remote_addrs + .split_by_preference(config.local_address_ipv4, config.local_address_ipv6); if fallback_addrs.is_empty() { return ConnectingTcp { - local_addr, - preferred: ConnectingTcpRemote::new(preferred_addrs, connect_timeout), + preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), fallback: None, - reuse_address, + config, }; } ConnectingTcp { - local_addr, - preferred: ConnectingTcpRemote::new(preferred_addrs, connect_timeout), + preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), fallback: Some(ConnectingTcpFallback { - delay: tokio::time::delay_for(fallback_timeout), - remote: ConnectingTcpRemote::new(fallback_addrs, connect_timeout), + delay: tokio::time::sleep(fallback_timeout), + remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout), }), - reuse_address, + config, } } else { ConnectingTcp { - local_addr, - preferred: ConnectingTcpRemote::new(remote_addrs, connect_timeout), + preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout), fallback: None, - reuse_address, + config, } } } } struct ConnectingTcpFallback { - delay: Delay, + delay: Sleep, remote: ConnectingTcpRemote, } struct ConnectingTcpRemote { - addrs: dns::IpAddrs, + addrs: dns::SocketAddrs, connect_timeout: Option, } impl ConnectingTcpRemote { - fn new(addrs: dns::IpAddrs, connect_timeout: Option) -> Self { + fn new(addrs: dns::SocketAddrs, connect_timeout: Option) -> Self { let connect_timeout = connect_timeout.map(|t| t / (addrs.len() as u32)); Self { @@ -521,15 +531,11 @@ impl ConnectingTcpRemote { } impl ConnectingTcpRemote { - async fn connect( - &mut self, - local_addr: &Option, - reuse_address: bool, - ) -> io::Result { + async fn connect(&mut self, config: &Config) -> Result { let mut err = None; for addr in &mut self.addrs { debug!("connecting to {}", addr); - match connect(&addr, local_addr, reuse_address, self.connect_timeout)?.await { + match connect(&addr, config, self.connect_timeout)?.await { Ok(tcp) => { debug!("connected to {}", addr); return Ok(tcp); @@ -541,45 +547,119 @@ impl ConnectingTcpRemote { } } - Err(err.take().expect("missing connect error")) + match err { + Some(e) => Err(e), + None => Err(ConnectError::new( + "tcp connect error", + std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"), + )), + } } } +fn bind_local_address( + socket: &socket2::Socket, + dst_addr: &SocketAddr, + local_addr_ipv4: &Option, + local_addr_ipv6: &Option, +) -> io::Result<()> { + match (*dst_addr, local_addr_ipv4, local_addr_ipv6) { + (SocketAddr::V4(_), Some(addr), _) => { + socket.bind(&SocketAddr::new(addr.clone().into(), 0).into())?; + } + (SocketAddr::V6(_), _, Some(addr)) => { + socket.bind(&SocketAddr::new(addr.clone().into(), 0).into())?; + } + _ => { + if cfg!(windows) { + // Windows requires a socket be bound before calling connect + let any: SocketAddr = match *dst_addr { + SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), + SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), + }; + socket.bind(&any.into())?; + } + } + } + + Ok(()) +} + fn connect( addr: &SocketAddr, - local_addr: &Option, - reuse_address: bool, + config: &Config, connect_timeout: Option, -) -> io::Result>> { - use socket2::{Domain, Protocol, Socket, Type}; - let domain = match *addr { - SocketAddr::V4(_) => Domain::ipv4(), - SocketAddr::V6(_) => Domain::ipv6(), +) -> Result>, ConnectError> { + // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the + // keepalive timeout, it would be nice to use that instead of socket2, + // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance... + use socket2::{Domain, Protocol, Socket, TcpKeepalive, Type}; + use std::convert::TryInto; + + let domain = Domain::for_address(*addr); + let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) + .map_err(ConnectError::m("tcp open error"))?; + + // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is + // responsible for ensuring O_NONBLOCK is set. + socket + .set_nonblocking(true) + .map_err(ConnectError::m("tcp set_nonblocking error"))?; + + if let Some(dur) = config.keep_alive_timeout { + let conf = TcpKeepalive::new().with_time(dur); + if let Err(e) = socket.set_tcp_keepalive(&conf) { + warn!("tcp set_keepalive error: {}", e); + } + } + + bind_local_address( + &socket, + addr, + &config.local_address_ipv4, + &config.local_address_ipv6, + ) + .map_err(ConnectError::m("tcp bind local error"))?; + + #[cfg(unix)] + let socket = unsafe { + // Safety: `from_raw_fd` is only safe to call if ownership of the raw + // file descriptor is transferred. Since we call `into_raw_fd` on the + // socket2 socket, it gives up ownership of the fd and will not close + // it, so this is safe. + use std::os::unix::io::{FromRawFd, IntoRawFd}; + TcpSocket::from_raw_fd(socket.into_raw_fd()) + }; + #[cfg(windows)] + let socket = unsafe { + // Safety: `from_raw_socket` is only safe to call if ownership of the raw + // Windows SOCKET is transferred. Since we call `into_raw_socket` on the + // socket2 socket, it gives up ownership of the SOCKET and will not close + // it, so this is safe. + use std::os::windows::io::{FromRawSocket, IntoRawSocket}; + TcpSocket::from_raw_socket(socket.into_raw_socket()) }; - let socket = Socket::new(domain, Type::stream(), Some(Protocol::tcp()))?; - if reuse_address { - socket.set_reuse_address(true)?; + if config.reuse_address { + if let Err(e) = socket.set_reuseaddr(true) { + warn!("tcp set_reuse_address error: {}", e); + } } - if let Some(ref local_addr) = *local_addr { - // Caller has requested this socket be bound before calling connect - socket.bind(&SocketAddr::new(local_addr.clone(), 0).into())?; - } else if cfg!(windows) { - // Windows requires a socket be bound before calling connect - let any: SocketAddr = match *addr { - SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), - SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), - }; - socket.bind(&any.into())?; + if let Some(size) = config.send_buffer_size { + if let Err(e) = socket.set_send_buffer_size(size.try_into().unwrap_or(std::u32::MAX)) { + warn!("tcp set_buffer_size error: {}", e); + } } - let addr = *addr; - - let std_tcp = socket.into_tcp_stream(); + if let Some(size) = config.recv_buffer_size { + if let Err(e) = socket.set_recv_buffer_size(size.try_into().unwrap_or(std::u32::MAX)) { + warn!("tcp set_recv_buffer_size error: {}", e); + } + } + let connect = socket.connect(*addr); Ok(async move { - let connect = TcpStream::connect_std(std_tcp, &addr); match connect_timeout { Some(dur) => match tokio::time::timeout(dur, connect).await { Ok(Ok(s)) => Ok(s), @@ -588,27 +668,26 @@ fn connect( }, None => connect.await, } + .map_err(ConnectError::m("tcp connect error")) }) } -impl ConnectingTcp { - async fn connect(mut self) -> io::Result { - let Self { - ref local_addr, - reuse_address, - .. - } = self; +impl ConnectingTcp<'_> { + async fn connect(mut self) -> Result { match self.fallback { - None => self.preferred.connect(local_addr, reuse_address).await, + None => self.preferred.connect(self.config).await, Some(mut fallback) => { - let preferred_fut = self.preferred.connect(local_addr, reuse_address); + let preferred_fut = self.preferred.connect(self.config); futures_util::pin_mut!(preferred_fut); - let fallback_fut = fallback.remote.connect(local_addr, reuse_address); + let fallback_fut = fallback.remote.connect(self.config); futures_util::pin_mut!(fallback_fut); + let fallback_delay = fallback.delay; + futures_util::pin_mut!(fallback_delay); + let (result, future) = - match futures_util::future::select(preferred_fut, fallback.delay).await { + match futures_util::future::select(preferred_fut, fallback_delay).await { Either::Left((result, _fallback_delay)) => { (result, Either::Right(fallback_fut)) } @@ -639,7 +718,7 @@ mod tests { use ::http::Uri; use super::super::sealed::{Connect, ConnectSvc}; - use super::HttpConnector; + use super::{Config, ConnectError, HttpConnector}; async fn connect( connector: C, @@ -660,6 +739,32 @@ mod tests { assert_eq!(&*err.msg, super::INVALID_NOT_HTTP); } + #[cfg(any(target_os = "linux", target_os = "macos"))] + fn get_local_ips() -> (Option, Option) { + use std::net::{IpAddr, TcpListener}; + + let mut ip_v4 = None; + let mut ip_v6 = None; + + let ips = pnet_datalink::interfaces() + .into_iter() + .flat_map(|i| i.ips.into_iter().map(|n| n.ip())); + + for ip in ips { + match ip { + IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip), + IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip), + _ => (), + } + + if ip_v4.is_some() && ip_v6.is_some() { + break; + } + } + + (ip_v4, ip_v6) + } + #[tokio::test] async fn test_errors_missing_scheme() { let dst = "example.domain".parse().unwrap(); @@ -670,6 +775,44 @@ mod tests { assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME); } + // NOTE: pnet crate that we use in this test doesn't compile on Windows + #[cfg(any(target_os = "linux", target_os = "macos"))] + #[tokio::test] + async fn local_address() { + use std::net::{IpAddr, TcpListener}; + let _ = pretty_env_logger::try_init(); + + let (bind_ip_v4, bind_ip_v6) = get_local_ips(); + let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); + let port = server4.local_addr().unwrap().port(); + let server6 = TcpListener::bind(&format!("[::1]:{}", port)).unwrap(); + + let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move { + let mut connector = HttpConnector::new(); + + match (bind_ip_v4, bind_ip_v6) { + (Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6), + (Some(v4), None) => connector.set_local_address(Some(v4.into())), + (None, Some(v6)) => connector.set_local_address(Some(v6.into())), + _ => unreachable!(), + } + + connect(connector, dst.parse().unwrap()).await.unwrap(); + + let (_, client_addr) = server.accept().unwrap(); + + assert_eq!(client_addr.ip(), expected_ip); + }; + + if let Some(ip) = bind_ip_v4 { + assert_client_ip(format!("http://127.0.0.1:{}", port), server4, ip.into()).await; + } + + if let Some(ip) = bind_ip_v6 { + assert_client_ip(format!("http://[::1]:{}", port), server6, ip.into()).await; + } + } + #[test] #[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)] fn client_happy_eyeballs() { @@ -683,10 +826,8 @@ mod tests { let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server4.local_addr().unwrap(); let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap(); - let mut rt = tokio::runtime::Builder::new() - .enable_io() - .enable_time() - .basic_scheduler() + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() .build() .unwrap(); @@ -790,15 +931,21 @@ mod tests { .iter() .map(|host| (host.clone(), addr.port()).into()) .collect(); - let connecting_tcp = ConnectingTcp::new( - None, - dns::IpAddrs::new(addrs), - None, - Some(fallback_timeout), - false, - ); + let cfg = Config { + local_address_ipv4: None, + local_address_ipv6: None, + connect_timeout: None, + keep_alive_timeout: None, + happy_eyeballs_timeout: Some(fallback_timeout), + nodelay: false, + reuse_address: false, + enforce_http: false, + send_buffer_size: None, + recv_buffer_size: None, + }; + let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg); let start = Instant::now(); - Ok::<_, io::Error>((start, connecting_tcp.connect().await?)) + Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?)) }) .unwrap(); let res = if stream.peer_addr().unwrap().is_ipv4() { diff --git a/third_party/rust/hyper/src/client/connect/mod.rs b/third_party/rust/hyper/src/client/connect/mod.rs index 01e2dcf9d3ba..862a0e65c13d 100644 --- a/third_party/rust/hyper/src/client/connect/mod.rs +++ b/third_party/rust/hyper/src/client/connect/mod.rs @@ -26,6 +26,8 @@ //! Or, fully written out: //! //! ``` +//! # #[cfg(feature = "runtime")] +//! # mod rt { //! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}}; //! use hyper::{service::Service, Uri}; //! use tokio::net::TcpStream; @@ -50,6 +52,7 @@ //! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337)))) //! } //! } +//! # } //! ``` //! //! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a @@ -59,31 +62,41 @@ //! `Client` like this: //! //! ``` +//! # #[cfg(feature = "runtime")] +//! # fn rt () { //! # let connector = hyper::client::HttpConnector::new(); //! // let connector = ... //! //! let client = hyper::Client::builder() //! .build::<_, hyper::Body>(connector); +//! # } //! ``` //! //! //! [`HttpConnector`]: HttpConnector //! [`Service`]: crate::service::Service -//! [`Uri`]: http::Uri +//! [`Uri`]: ::http::Uri //! [`AsyncRead`]: tokio::io::AsyncRead //! [`AsyncWrite`]: tokio::io::AsyncWrite //! [`Connection`]: Connection use std::fmt; -use ::http::Response; +use ::http::Extensions; -#[cfg(feature = "tcp")] -pub mod dns; -#[cfg(feature = "tcp")] -mod http; -#[cfg(feature = "tcp")] -pub use self::http::{HttpConnector, HttpInfo}; -pub use self::sealed::Connect; +cfg_feature! { + #![feature = "tcp"] + + pub use self::http::{HttpConnector, HttpInfo}; + + pub mod dns; + mod http; +} + +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + pub use self::sealed::Connect; +} /// Describes a type returned by a connector. pub trait Connection { @@ -143,6 +156,11 @@ impl Connected { self } + /// Determines if the connected transport is to an HTTP proxy. + pub fn is_proxied(&self) -> bool { + self.is_proxied + } + /// Set extra connection information to be set in the extensions of every `Response`. pub fn extra(mut self, extra: T) -> Connected { if let Some(prev) = self.extra { @@ -153,15 +171,27 @@ impl Connected { self } - /// Set that the connected transport negotiated HTTP/2 as it's - /// next protocol. + /// Copies the extra connection information into an `Extensions` map. + pub fn get_extras(&self, extensions: &mut Extensions) { + if let Some(extra) = &self.extra { + extra.set(extensions); + } + } + + /// Set that the connected transport negotiated HTTP/2 as its next protocol. pub fn negotiated_h2(mut self) -> Connected { self.alpn = Alpn::H2; self } + /// Determines if the connected transport negotiated HTTP/2 as its next protocol. + pub fn is_negotiated_h2(&self) -> bool { + self.alpn == Alpn::H2 + } + // Don't public expose that `Connected` is `Clone`, unsure if we want to // keep that contract... + #[cfg(feature = "http2")] pub(super) fn clone(&self) -> Connected { Connected { alpn: self.alpn.clone(), @@ -174,7 +204,7 @@ impl Connected { // ===== impl Extra ===== impl Extra { - pub(super) fn set(&self, res: &mut Response) { + pub(super) fn set(&self, res: &mut Extensions) { self.0.set(res); } } @@ -193,7 +223,7 @@ impl fmt::Debug for Extra { trait ExtraInner: Send + Sync { fn clone_box(&self) -> Box; - fn set(&self, res: &mut Response); + fn set(&self, res: &mut Extensions); } // This indirection allows the `Connected` to have a type-erased "extra" value, @@ -210,8 +240,8 @@ where Box::new(self.clone()) } - fn set(&self, res: &mut Response) { - res.extensions_mut().insert(self.0.clone()); + fn set(&self, res: &mut Extensions) { + res.insert(self.0.clone()); } } @@ -231,12 +261,13 @@ where Box::new(self.clone()) } - fn set(&self, res: &mut Response) { + fn set(&self, res: &mut Extensions) { self.0.set(res); - res.extensions_mut().insert(self.1.clone()); + res.insert(self.1.clone()); } } +#[cfg(any(feature = "http1", feature = "http2"))] pub(super) mod sealed { use std::error::Error as StdError; @@ -334,13 +365,13 @@ mod tests { fn test_connected_extra() { let c1 = Connected::new().extra(Ex1(41)); - let mut res1 = crate::Response::new(crate::Body::empty()); + let mut ex = ::http::Extensions::new(); - assert_eq!(res1.extensions().get::(), None); + assert_eq!(ex.get::(), None); - c1.extra.as_ref().expect("c1 extra").set(&mut res1); + c1.extra.as_ref().expect("c1 extra").set(&mut ex); - assert_eq!(res1.extensions().get::(), Some(&Ex1(41))); + assert_eq!(ex.get::(), Some(&Ex1(41))); } #[test] @@ -353,17 +384,17 @@ mod tests { .extra(Ex2("zoom")) .extra(Ex3("pew pew")); - let mut res1 = crate::Response::new(crate::Body::empty()); + let mut ex1 = ::http::Extensions::new(); - assert_eq!(res1.extensions().get::(), None); - assert_eq!(res1.extensions().get::(), None); - assert_eq!(res1.extensions().get::(), None); + assert_eq!(ex1.get::(), None); + assert_eq!(ex1.get::(), None); + assert_eq!(ex1.get::(), None); - c1.extra.as_ref().expect("c1 extra").set(&mut res1); + c1.extra.as_ref().expect("c1 extra").set(&mut ex1); - assert_eq!(res1.extensions().get::(), Some(&Ex1(45))); - assert_eq!(res1.extensions().get::(), Some(&Ex2("zoom"))); - assert_eq!(res1.extensions().get::(), Some(&Ex3("pew pew"))); + assert_eq!(ex1.get::(), Some(&Ex1(45))); + assert_eq!(ex1.get::(), Some(&Ex2("zoom"))); + assert_eq!(ex1.get::(), Some(&Ex3("pew pew"))); // Just like extensions, inserting the same type overrides previous type. let c2 = Connected::new() @@ -371,11 +402,11 @@ mod tests { .extra(Ex2("hiccup")) .extra(Ex1(99)); - let mut res2 = crate::Response::new(crate::Body::empty()); + let mut ex2 = ::http::Extensions::new(); - c2.extra.as_ref().expect("c2 extra").set(&mut res2); + c2.extra.as_ref().expect("c2 extra").set(&mut ex2); - assert_eq!(res2.extensions().get::(), Some(&Ex1(99))); - assert_eq!(res2.extensions().get::(), Some(&Ex2("hiccup"))); + assert_eq!(ex2.get::(), Some(&Ex1(99))); + assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); } } diff --git a/third_party/rust/hyper/src/client/dispatch.rs b/third_party/rust/hyper/src/client/dispatch.rs index 9a580f85ac4a..1d2b87eb00d6 100644 --- a/third_party/rust/hyper/src/client/dispatch.rs +++ b/third_party/rust/hyper/src/client/dispatch.rs @@ -1,12 +1,17 @@ -use futures_util::future; +#[cfg(feature = "http2")] +use std::future::Future; + +use futures_util::FutureExt; use tokio::sync::{mpsc, oneshot}; -use crate::common::{task, Future, Pin, Poll}; +#[cfg(feature = "http2")] +use crate::common::Pin; +use crate::common::{task, Poll}; -pub type RetryPromise = oneshot::Receiver)>>; -pub type Promise = oneshot::Receiver>; +pub(crate) type RetryPromise = oneshot::Receiver)>>; +pub(crate) type Promise = oneshot::Receiver>; -pub fn channel() -> (Sender, Receiver) { +pub(crate) fn channel() -> (Sender, Receiver) { let (tx, rx) = mpsc::unbounded_channel(); let (giver, taker) = want::new(); let tx = Sender { @@ -22,7 +27,7 @@ pub fn channel() -> (Sender, Receiver) { /// /// While the inner sender is unbounded, the Giver is used to determine /// if the Receiver is ready for another request. -pub struct Sender { +pub(crate) struct Sender { /// One message is always allowed, even if the Receiver hasn't asked /// for it yet. This boolean keeps track of whether we've sent one /// without notice. @@ -40,24 +45,25 @@ pub struct Sender { /// /// Cannot poll the Giver, but can still use it to determine if the Receiver /// has been dropped. However, this version can be cloned. -pub struct UnboundedSender { +#[cfg(feature = "http2")] +pub(crate) struct UnboundedSender { /// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked. giver: want::SharedGiver, inner: mpsc::UnboundedSender>, } impl Sender { - pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { self.giver .poll_want(cx) .map_err(|_| crate::Error::new_closed()) } - pub fn is_ready(&self) -> bool { + pub(crate) fn is_ready(&self) -> bool { self.giver.is_wanting() } - pub fn is_closed(&self) -> bool { + pub(crate) fn is_closed(&self) -> bool { self.giver.is_canceled() } @@ -74,7 +80,7 @@ impl Sender { } } - pub fn try_send(&mut self, val: T) -> Result, T> { + pub(crate) fn try_send(&mut self, val: T) -> Result, T> { if !self.can_send() { return Err(val); } @@ -85,7 +91,7 @@ impl Sender { .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } - pub fn send(&mut self, val: T) -> Result, T> { + pub(crate) fn send(&mut self, val: T) -> Result, T> { if !self.can_send() { return Err(val); } @@ -96,7 +102,8 @@ impl Sender { .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } - pub fn unbound(self) -> UnboundedSender { + #[cfg(feature = "http2")] + pub(crate) fn unbound(self) -> UnboundedSender { UnboundedSender { giver: self.giver.shared(), inner: self.inner, @@ -104,16 +111,17 @@ impl Sender { } } +#[cfg(feature = "http2")] impl UnboundedSender { - pub fn is_ready(&self) -> bool { + pub(crate) fn is_ready(&self) -> bool { !self.giver.is_canceled() } - pub fn is_closed(&self) -> bool { + pub(crate) fn is_closed(&self) -> bool { self.giver.is_canceled() } - pub fn try_send(&mut self, val: T) -> Result, T> { + pub(crate) fn try_send(&mut self, val: T) -> Result, T> { let (tx, rx) = oneshot::channel(); self.inner .send(Envelope(Some((val, Callback::Retry(tx))))) @@ -122,6 +130,7 @@ impl UnboundedSender { } } +#[cfg(feature = "http2")] impl Clone for UnboundedSender { fn clone(&self) -> Self { UnboundedSender { @@ -131,13 +140,13 @@ impl Clone for UnboundedSender { } } -pub struct Receiver { +pub(crate) struct Receiver { inner: mpsc::UnboundedReceiver>, taker: want::Taker, } impl Receiver { - pub(crate) fn poll_next( + pub(crate) fn poll_recv( &mut self, cx: &mut task::Context<'_>, ) -> Poll)>> { @@ -152,15 +161,17 @@ impl Receiver { } } + #[cfg(feature = "http1")] pub(crate) fn close(&mut self) { self.taker.cancel(); self.inner.close(); } + #[cfg(feature = "http1")] pub(crate) fn try_recv(&mut self) -> Option<(T, Callback)> { - match self.inner.try_recv() { - Ok(mut env) => env.0.take(), - Err(_) => None, + match self.inner.recv().now_or_never() { + Some(Some(mut env)) => env.0.take(), + _ => None, } } } @@ -186,12 +197,13 @@ impl Drop for Envelope { } } -pub enum Callback { +pub(crate) enum Callback { Retry(oneshot::Sender)>>), NoRetry(oneshot::Sender>), } impl Callback { + #[cfg(feature = "http2")] pub(crate) fn is_canceled(&self) -> bool { match *self { Callback::Retry(ref tx) => tx.is_closed(), @@ -217,10 +229,14 @@ impl Callback { } } - pub(crate) fn send_when( + #[cfg(feature = "http2")] + pub(crate) async fn send_when( self, mut when: impl Future)>> + Unpin, - ) -> impl Future { + ) { + use futures_util::future; + use tracing::trace; + let mut cb = Some(self); // "select" on this callback being canceled, and the future completing @@ -242,6 +258,7 @@ impl Callback { } } }) + .await } } @@ -263,7 +280,7 @@ mod tests { type Output = Option<(T, Callback)>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.poll_next(cx) + self.poll_recv(cx) } } @@ -325,6 +342,7 @@ mod tests { let _ = tx.try_send(Custom(2)).expect("2 ready"); } + #[cfg(feature = "http2")] #[test] fn unbounded_sender_doesnt_bound_on_want() { let (tx, rx) = channel::(); @@ -344,9 +362,8 @@ mod tests { fn giver_queue_throughput(b: &mut test::Bencher) { use crate::{Body, Request, Response}; - let mut rt = tokio::runtime::Builder::new() + let rt = tokio::runtime::Builder::new_current_thread() .enable_all() - .basic_scheduler() .build() .unwrap(); let (mut tx, mut rx) = channel::, Response>(); @@ -368,9 +385,8 @@ mod tests { #[cfg(feature = "nightly")] #[bench] fn giver_queue_not_ready(b: &mut test::Bencher) { - let mut rt = tokio::runtime::Builder::new() + let rt = tokio::runtime::Builder::new_current_thread() .enable_all() - .basic_scheduler() .build() .unwrap(); let (_tx, mut rx) = channel::(); diff --git a/third_party/rust/hyper/src/client/mod.rs b/third_party/rust/hyper/src/client/mod.rs index f0ccb0eb5789..734bda8819b5 100644 --- a/third_party/rust/hyper/src/client/mod.rs +++ b/third_party/rust/hyper/src/client/mod.rs @@ -27,10 +27,10 @@ //! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs). //! //! ``` +//! # #[cfg(all(feature = "tcp", feature = "client", any(feature = "http1", feature = "http2")))] +//! # async fn fetch_httpbin() -> hyper::Result<()> { //! use hyper::{body::HttpBody as _, Client, Uri}; //! -//! # #[cfg(feature = "tcp")] -//! # async fn fetch_httpbin() -> hyper::Result<()> { //! let client = Client::new(); //! //! // Make a GET /ip to 'http://httpbin.org' @@ -48,1180 +48,21 @@ //! # fn main () {} //! ``` -use std::error::Error as StdError; -use std::fmt; -use std::mem; -use std::time::Duration; - -use futures_channel::oneshot; -use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; -use http::header::{HeaderValue, HOST}; -use http::uri::Scheme; -use http::{Method, Request, Response, Uri, Version}; - -use self::connect::{sealed::Connect, Alpn, Connected, Connection}; -use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation}; -use crate::body::{Body, HttpBody}; -use crate::common::{lazy as hyper_lazy, task, BoxSendFuture, Executor, Future, Lazy, Pin, Poll}; - #[cfg(feature = "tcp")] pub use self::connect::HttpConnector; -pub mod conn; pub mod connect; -pub(crate) mod dispatch; -mod pool; -pub mod service; -#[cfg(test)] +#[cfg(all(test, feature = "runtime"))] mod tests; -/// A Client to make outgoing HTTP requests. -pub struct Client { - config: Config, - conn_builder: conn::Builder, - connector: C, - pool: Pool>, -} - -#[derive(Clone, Copy, Debug)] -struct Config { - retry_canceled_requests: bool, - set_host: bool, - ver: Ver, -} - -/// A `Future` that will resolve to an HTTP Response. -/// -/// This is returned by `Client::request` (and `Client::get`). -#[must_use = "futures do nothing unless polled"] -pub struct ResponseFuture { - inner: Pin>> + Send>>, -} - -// ===== impl Client ===== - -#[cfg(feature = "tcp")] -impl Client { - /// Create a new Client with the default [config](Builder). - /// - /// # Note - /// - /// The default connector does **not** handle TLS. Speaking to `https` - /// destinations will require [configuring a connector that implements - /// TLS](https://hyper.rs/guides/client/configuration). - #[inline] - pub fn new() -> Client { - Builder::default().build_http() - } -} - -#[cfg(feature = "tcp")] -impl Default for Client { - fn default() -> Client { - Client::new() - } -} - -impl Client<(), Body> { - /// Create a builder to configure a new `Client`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use std::time::Duration; - /// use hyper::Client; - /// - /// let client = Client::builder() - /// .pool_idle_timeout(Duration::from_secs(30)) - /// .http2_only(true) - /// .build_http(); - /// # let infer: Client<_, hyper::Body> = client; - /// # drop(infer); - /// # } - /// # fn main() {} - /// ``` - #[inline] - pub fn builder() -> Builder { - Builder::default() - } -} - -impl Client -where - C: Connect + Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - /// Send a `GET` request to the supplied `Uri`. - /// - /// # Note - /// - /// This requires that the `HttpBody` type have a `Default` implementation. - /// It *should* return an "empty" version of itself, such that - /// `HttpBody::is_end_stream` is `true`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use hyper::{Client, Uri}; - /// - /// let client = Client::new(); - /// - /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); - /// # } - /// # fn main() {} - /// ``` - pub fn get(&self, uri: Uri) -> ResponseFuture - where - B: Default, - { - let body = B::default(); - if !body.is_end_stream() { - warn!("default HttpBody used for get() does not return true for is_end_stream"); - } - - let mut req = Request::new(body); - *req.uri_mut() = uri; - self.request(req) - } - - /// Send a constructed `Request` using this `Client`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use hyper::{Body, Client, Request}; - /// - /// let client = Client::new(); - /// - /// let req = Request::builder() - /// .method("POST") - /// .uri("http://httpin.org/post") - /// .body(Body::from("Hallo!")) - /// .expect("request builder"); - /// - /// let future = client.request(req); - /// # } - /// # fn main() {} - /// ``` - pub fn request(&self, mut req: Request) -> ResponseFuture { - let is_http_connect = req.method() == Method::CONNECT; - match req.version() { - Version::HTTP_11 => (), - Version::HTTP_10 => { - if is_http_connect { - warn!("CONNECT is not allowed for HTTP/1.0"); - return ResponseFuture::new(Box::new(future::err( - crate::Error::new_user_unsupported_request_method(), - ))); - } - } - other_h2 @ Version::HTTP_2 => { - if self.config.ver != Ver::Http2 { - return ResponseFuture::error_version(other_h2); - } - } - // completely unsupported HTTP version (like HTTP/0.9)! - other => return ResponseFuture::error_version(other), - }; - - let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { - Ok(s) => s, - Err(err) => { - return ResponseFuture::new(Box::new(future::err(err))); - } - }; - - ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key))) - } - - fn retryably_send_request( - &self, - req: Request, - pool_key: PoolKey, - ) -> impl Future>> { - let client = self.clone(); - let uri = req.uri().clone(); - - let mut send_fut = client.send_request(req, pool_key.clone()); - future::poll_fn(move |cx| loop { - match ready!(Pin::new(&mut send_fut).poll(cx)) { - Ok(resp) => return Poll::Ready(Ok(resp)), - Err(ClientError::Normal(err)) => return Poll::Ready(Err(err)), - Err(ClientError::Canceled { - connection_reused, - mut req, - reason, - }) => { - if !client.config.retry_canceled_requests || !connection_reused { - // if client disabled, don't retry - // a fresh connection means we definitely can't retry - return Poll::Ready(Err(reason)); - } - - trace!( - "unstarted request canceled, trying again (reason={:?})", - reason - ); - *req.uri_mut() = uri.clone(); - send_fut = client.send_request(req, pool_key.clone()); - } - } - }) - } - - fn send_request( - &self, - mut req: Request, - pool_key: PoolKey, - ) -> impl Future, ClientError>> + Unpin { - let conn = self.connection_for(pool_key); - - let set_host = self.config.set_host; - let executor = self.conn_builder.exec.clone(); - conn.and_then(move |mut pooled| { - if pooled.is_http1() { - if set_host { - let uri = req.uri().clone(); - req.headers_mut().entry(HOST).or_insert_with(|| { - let hostname = uri.host().expect("authority implies host"); - if let Some(port) = uri.port() { - let s = format!("{}:{}", hostname, port); - HeaderValue::from_str(&s) - } else { - HeaderValue::from_str(hostname) - } - .expect("uri host is valid header value") - }); - } - - // CONNECT always sends authority-form, so check it first... - if req.method() == Method::CONNECT { - authority_form(req.uri_mut()); - } else if pooled.conn_info.is_proxied { - absolute_form(req.uri_mut()); - } else { - origin_form(req.uri_mut()); - }; - } else if req.method() == Method::CONNECT { - debug!("client does not support CONNECT requests over HTTP2"); - return Either::Left(future::err(ClientError::Normal( - crate::Error::new_user_unsupported_request_method(), - ))); - } - - let fut = pooled - .send_request_retryable(req) - .map_err(ClientError::map_with_reused(pooled.is_reused())); - - // If the Connector included 'extra' info, add to Response... - let extra_info = pooled.conn_info.extra.clone(); - let fut = fut.map_ok(move |mut res| { - if let Some(extra) = extra_info { - extra.set(&mut res); - } - res - }); - - // As of futures@0.1.21, there is a race condition in the mpsc - // channel, such that sending when the receiver is closing can - // result in the message being stuck inside the queue. It won't - // ever notify until the Sender side is dropped. - // - // To counteract this, we must check if our senders 'want' channel - // has been closed after having tried to send. If so, error out... - if pooled.is_closed() { - return Either::Right(Either::Left(fut)); - } - - Either::Right(Either::Right(fut.map_ok(move |mut res| { - // If pooled is HTTP/2, we can toss this reference immediately. - // - // when pooled is dropped, it will try to insert back into the - // pool. To delay that, spawn a future that completes once the - // sender is ready again. - // - // This *should* only be once the related `Connection` has polled - // for a new request to start. - // - // It won't be ready if there is a body to stream. - if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { - drop(pooled); - } else if !res.body().is_end_stream() { - let (delayed_tx, delayed_rx) = oneshot::channel(); - res.body_mut().delayed_eof(delayed_rx); - let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { - // At this point, `pooled` is dropped, and had a chance - // to insert into the pool (if conn was idle) - drop(delayed_tx); - }); - - executor.execute(on_idle); - } else { - // There's no body to delay, but the connection isn't - // ready yet. Only re-insert when it's ready - let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); - - executor.execute(on_idle); - } - res - }))) - }) - } - - fn connection_for( - &self, - pool_key: PoolKey, - ) -> impl Future>, ClientError>> { - // This actually races 2 different futures to try to get a ready - // connection the fastest, and to reduce connection churn. - // - // - If the pool has an idle connection waiting, that's used - // immediately. - // - Otherwise, the Connector is asked to start connecting to - // the destination Uri. - // - Meanwhile, the pool Checkout is watching to see if any other - // request finishes and tries to insert an idle connection. - // - If a new connection is started, but the Checkout wins after - // (an idle connection became available first), the started - // connection future is spawned into the runtime to complete, - // and then be inserted into the pool as an idle connection. - let checkout = self.pool.checkout(pool_key.clone()); - let connect = self.connect_to(pool_key); - - let executor = self.conn_builder.exec.clone(); - // The order of the `select` is depended on below... - future::select(checkout, connect).then(move |either| match either { - // Checkout won, connect future may have been started or not. - // - // If it has, let it finish and insert back into the pool, - // so as to not waste the socket... - Either::Left((Ok(checked_out), connecting)) => { - // This depends on the `select` above having the correct - // order, such that if the checkout future were ready - // immediately, the connect future will never have been - // started. - // - // If it *wasn't* ready yet, then the connect future will - // have been started... - if connecting.started() { - let bg = connecting - .map_err(|err| { - trace!("background connect error: {}", err); - }) - .map(|_pooled| { - // dropping here should just place it in - // the Pool for us... - }); - // An execute error here isn't important, we're just trying - // to prevent a waste of a socket... - executor.execute(bg); - } - Either::Left(future::ok(checked_out)) - } - // Connect won, checkout can just be dropped. - Either::Right((Ok(connected), _checkout)) => Either::Left(future::ok(connected)), - // Either checkout or connect could get canceled: - // - // 1. Connect is canceled if this is HTTP/2 and there is - // an outstanding HTTP/2 connecting task. - // 2. Checkout is canceled if the pool cannot deliver an - // idle connection reliably. - // - // In both cases, we should just wait for the other future. - Either::Left((Err(err), connecting)) => Either::Right(Either::Left({ - if err.is_canceled() { - Either::Left(connecting.map_err(ClientError::Normal)) - } else { - Either::Right(future::err(ClientError::Normal(err))) - } - })), - Either::Right((Err(err), checkout)) => Either::Right(Either::Right({ - if err.is_canceled() { - Either::Left(checkout.map_err(ClientError::Normal)) - } else { - Either::Right(future::err(ClientError::Normal(err))) - } - })), - }) - } - - fn connect_to( - &self, - pool_key: PoolKey, - ) -> impl Lazy>>> + Unpin { - let executor = self.conn_builder.exec.clone(); - let pool = self.pool.clone(); - let mut conn_builder = self.conn_builder.clone(); - let ver = self.config.ver; - let is_ver_h2 = ver == Ver::Http2; - let connector = self.connector.clone(); - let dst = domain_as_uri(pool_key.clone()); - hyper_lazy(move || { - // Try to take a "connecting lock". - // - // If the pool_key is for HTTP/2, and there is already a - // connection being established, then this can't take a - // second lock. The "connect_to" future is Canceled. - let connecting = match pool.connecting(&pool_key, ver) { - Some(lock) => lock, - None => { - let canceled = - crate::Error::new_canceled().with("HTTP/2 connection in progress"); - return Either::Right(future::err(canceled)); - } - }; - Either::Left( - connector - .connect(connect::sealed::Internal, dst) - .map_err(crate::Error::new_connect) - .and_then(move |io| { - let connected = io.connected(); - // If ALPN is h2 and we aren't http2_only already, - // then we need to convert our pool checkout into - // a single HTTP2 one. - let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { - match connecting.alpn_h2(&pool) { - Some(lock) => { - trace!("ALPN negotiated h2, updating pool"); - lock - } - None => { - // Another connection has already upgraded, - // the pool checkout should finish up for us. - let canceled = crate::Error::new_canceled() - .with("ALPN upgraded to HTTP/2"); - return Either::Right(future::err(canceled)); - } - } - } else { - connecting - }; - let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; - Either::Left(Box::pin( - conn_builder - .http2_only(is_h2) - .handshake(io) - .and_then(move |(tx, conn)| { - trace!( - "handshake complete, spawning background dispatcher task" - ); - executor.execute( - conn.map_err(|e| debug!("client connection error: {}", e)) - .map(|_| ()), - ); - - // Wait for 'conn' to ready up before we - // declare this tx as usable - tx.when_ready() - }) - .map_ok(move |tx| { - pool.pooled( - connecting, - PoolClient { - conn_info: connected, - tx: if is_h2 { - PoolTx::Http2(tx.into_http2()) - } else { - PoolTx::Http1(tx) - }, - }, - ) - }), - )) - }), - ) - }) - } -} - -impl tower_service::Service> for Client -where - C: Connect + Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Response = Response; - type Error = crate::Error; - type Future = ResponseFuture; - - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: Request) -> Self::Future { - self.request(req) - } -} - -impl Clone for Client { - fn clone(&self) -> Client { - Client { - config: self.config.clone(), - conn_builder: self.conn_builder.clone(), - connector: self.connector.clone(), - pool: self.pool.clone(), - } - } -} - -impl fmt::Debug for Client { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Client").finish() - } -} - -// ===== impl ResponseFuture ===== - -impl ResponseFuture { - fn new(fut: Box>> + Send>) -> Self { - Self { inner: fut.into() } - } - - fn error_version(ver: Version) -> Self { - warn!("Request has unsupported version \"{:?}\"", ver); - ResponseFuture::new(Box::new(future::err( - crate::Error::new_user_unsupported_version(), - ))) - } -} - -impl fmt::Debug for ResponseFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Future") - } -} - -impl Future for ResponseFuture { - type Output = crate::Result>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx) - } -} - -// ===== impl PoolClient ===== - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -struct PoolClient { - conn_info: Connected, - tx: PoolTx, -} - -enum PoolTx { - Http1(conn::SendRequest), - Http2(conn::Http2SendRequest), -} - -impl PoolClient { - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - match self.tx { - PoolTx::Http1(ref mut tx) => tx.poll_ready(cx), - PoolTx::Http2(_) => Poll::Ready(Ok(())), - } - } - - fn is_http1(&self) -> bool { - !self.is_http2() - } - - fn is_http2(&self) -> bool { - match self.tx { - PoolTx::Http1(_) => false, - PoolTx::Http2(_) => true, - } - } - - fn is_ready(&self) -> bool { - match self.tx { - PoolTx::Http1(ref tx) => tx.is_ready(), - PoolTx::Http2(ref tx) => tx.is_ready(), - } - } - - fn is_closed(&self) -> bool { - match self.tx { - PoolTx::Http1(ref tx) => tx.is_closed(), - PoolTx::Http2(ref tx) => tx.is_closed(), - } - } -} - -impl PoolClient { - fn send_request_retryable( - &mut self, - req: Request, - ) -> impl Future, (crate::Error, Option>)>> - where - B: Send, - { - match self.tx { - PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)), - PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)), - } - } -} - -impl Poolable for PoolClient -where - B: Send + 'static, -{ - fn is_open(&self) -> bool { - match self.tx { - PoolTx::Http1(ref tx) => tx.is_ready(), - PoolTx::Http2(ref tx) => tx.is_ready(), - } - } - - fn reserve(self) -> Reservation { - match self.tx { - PoolTx::Http1(tx) => Reservation::Unique(PoolClient { - conn_info: self.conn_info, - tx: PoolTx::Http1(tx), - }), - PoolTx::Http2(tx) => { - let b = PoolClient { - conn_info: self.conn_info.clone(), - tx: PoolTx::Http2(tx.clone()), - }; - let a = PoolClient { - conn_info: self.conn_info, - tx: PoolTx::Http2(tx), - }; - Reservation::Shared(a, b) - } - } - } - - fn can_share(&self) -> bool { - self.is_http2() - } -} - -// ===== impl ClientError ===== - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -enum ClientError { - Normal(crate::Error), - Canceled { - connection_reused: bool, - req: Request, - reason: crate::Error, - }, -} - -impl ClientError { - fn map_with_reused(conn_reused: bool) -> impl Fn((crate::Error, Option>)) -> Self { - move |(err, orig_req)| { - if let Some(req) = orig_req { - ClientError::Canceled { - connection_reused: conn_reused, - reason: err, - req, - } - } else { - ClientError::Normal(err) - } - } - } -} - -/// A marker to identify what version a pooled connection is. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -enum Ver { - Auto, - Http2, -} - -fn origin_form(uri: &mut Uri) { - let path = match uri.path_and_query() { - Some(path) if path.as_str() != "/" => { - let mut parts = ::http::uri::Parts::default(); - parts.path_and_query = Some(path.clone()); - Uri::from_parts(parts).expect("path is valid uri") - } - _none_or_just_slash => { - debug_assert!(Uri::default() == "/"); - Uri::default() - } - }; - *uri = path -} - -fn absolute_form(uri: &mut Uri) { - debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); - debug_assert!( - uri.authority().is_some(), - "absolute_form needs an authority" - ); - // If the URI is to HTTPS, and the connector claimed to be a proxy, - // then it *should* have tunneled, and so we don't want to send - // absolute-form in that case. - if uri.scheme() == Some(&Scheme::HTTPS) { - origin_form(uri); - } -} - -fn authority_form(uri: &mut Uri) { - if log_enabled!(::log::Level::Warn) { - if let Some(path) = uri.path_and_query() { - // `https://hyper.rs` would parse with `/` path, don't - // annoy people about that... - if path != "/" { - warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); - } - } - } - *uri = match uri.authority() { - Some(auth) => { - let mut parts = ::http::uri::Parts::default(); - parts.authority = Some(auth.clone()); - Uri::from_parts(parts).expect("authority is valid") - } - None => { - unreachable!("authority_form with relative uri"); - } - }; -} - -fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result { - let uri_clone = uri.clone(); - match (uri_clone.scheme(), uri_clone.authority()) { - (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), - (None, Some(auth)) if is_http_connect => { - let scheme = match auth.port_u16() { - Some(443) => { - set_scheme(uri, Scheme::HTTPS); - Scheme::HTTPS - } - _ => { - set_scheme(uri, Scheme::HTTP); - Scheme::HTTP - } - }; - Ok((scheme, auth.clone())) - } - _ => { - debug!("Client requires absolute-form URIs, received: {:?}", uri); - Err(crate::Error::new_user_absolute_uri_required()) - } - } -} - -fn domain_as_uri((scheme, auth): PoolKey) -> Uri { - http::uri::Builder::new() - .scheme(scheme) - .authority(auth) - .path_and_query("/") - .build() - .expect("domain is valid Uri") -} - -fn set_scheme(uri: &mut Uri, scheme: Scheme) { - debug_assert!( - uri.scheme().is_none(), - "set_scheme expects no existing scheme" - ); - let old = mem::replace(uri, Uri::default()); - let mut parts: ::http::uri::Parts = old.into(); - parts.scheme = Some(scheme); - parts.path_and_query = Some("/".parse().expect("slash is a valid path")); - *uri = Uri::from_parts(parts).expect("scheme is valid"); -} - -/// A builder to configure a new [`Client`](Client). -/// -/// # Example -/// -/// ``` -/// # #[cfg(feature = "runtime")] -/// # fn run () { -/// use std::time::Duration; -/// use hyper::Client; -/// -/// let client = Client::builder() -/// .pool_idle_timeout(Duration::from_secs(30)) -/// .http2_only(true) -/// .build_http(); -/// # let infer: Client<_, hyper::Body> = client; -/// # drop(infer); -/// # } -/// # fn main() {} -/// ``` -#[derive(Clone)] -pub struct Builder { - client_config: Config, - conn_builder: conn::Builder, - pool_config: pool::Config, -} - -impl Default for Builder { - fn default() -> Self { - Self { - client_config: Config { - retry_canceled_requests: true, - set_host: true, - ver: Ver::Auto, - }, - conn_builder: conn::Builder::new(), - pool_config: pool::Config { - idle_timeout: Some(Duration::from_secs(90)), - max_idle_per_host: std::usize::MAX, - }, - } - } -} - -impl Builder { - #[doc(hidden)] - #[deprecated( - note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)" - )] - pub fn keep_alive(&mut self, val: bool) -> &mut Self { - if !val { - // disable - self.pool_max_idle_per_host(0) - } else if self.pool_config.max_idle_per_host == 0 { - // enable - self.pool_max_idle_per_host(std::usize::MAX) - } else { - // already enabled - self - } - } - - #[doc(hidden)] - #[deprecated(note = "renamed to `pool_idle_timeout`")] - pub fn keep_alive_timeout(&mut self, val: D) -> &mut Self - where - D: Into>, - { - self.pool_idle_timeout(val) - } - - /// Set an optional timeout for idle sockets being kept-alive. - /// - /// Pass `None` to disable timeout. - /// - /// Default is 90 seconds. - pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self - where - D: Into>, - { - self.pool_config.idle_timeout = val.into(); - self - } - - #[doc(hidden)] - #[deprecated(note = "renamed to `pool_max_idle_per_host`")] - pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { - self.pool_config.max_idle_per_host = max_idle; - self - } - - /// Sets the maximum idle connection per host allowed in the pool. - /// - /// Default is `usize::MAX` (no limit). - pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { - self.pool_config.max_idle_per_host = max_idle; - self - } - - // HTTP/1 options - - /// Set whether HTTP/1 connections should try to use vectored writes, - /// or always flatten into a single buffer. - /// - /// Note that setting this to false may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Default is `true`. - pub fn http1_writev(&mut self, val: bool) -> &mut Self { - self.conn_builder.h1_writev(val); - self - } - - /// Sets the exact size of the read buffer to *always* use. - /// - /// Note that setting this option unsets the `http1_max_buf_size` option. - /// - /// Default is an adaptive read buffer. - pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { - self.conn_builder.h1_read_buf_exact_size(Some(sz)); - self - } - - /// Set the maximum buffer size for the connection. - /// - /// Default is ~400kb. - /// - /// Note that setting this option unsets the `http1_read_exact_buf_size` option. - /// - /// # Panics - /// - /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. - pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { - self.conn_builder.h1_max_buf_size(max); - self - } - - /// Set whether HTTP/1 connections will write header names as title case at - /// the socket level. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { - self.conn_builder.h1_title_case_headers(val); - self - } - - /// Set whether the connection **must** use HTTP/2. - /// - /// The destination must either allow HTTP2 Prior Knowledge, or the - /// `Connect` should be configured to do use ALPN to upgrade to `h2` - /// as part of the connection process. This will not make the `Client` - /// utilize ALPN by itself. - /// - /// Note that setting this to true prevents HTTP/1 from being allowed. - /// - /// Default is false. - pub fn http2_only(&mut self, val: bool) -> &mut Self { - self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { - self.conn_builder - .http2_initial_stream_window_size(sz.into()); - self - } - - /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - pub fn http2_initial_connection_window_size( - &mut self, - sz: impl Into>, - ) -> &mut Self { - self.conn_builder - .http2_initial_connection_window_size(sz.into()); - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. - pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { - self.conn_builder.http2_adaptive_window(enabled); - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - pub fn http2_keep_alive_interval( - &mut self, - interval: impl Into>, - ) -> &mut Self { - self.conn_builder.http2_keep_alive_interval(interval); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { - self.conn_builder.http2_keep_alive_timeout(timeout); - self - } - - /// Sets whether HTTP2 keep-alive should apply while the connection is idle. - /// - /// If disabled, keep-alive pings are only sent while there are open - /// request/responses streams. If enabled, pings are also sent when no - /// streams are active. Does nothing if `http2_keep_alive_interval` is - /// disabled. - /// - /// Default is `false`. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { - self.conn_builder.http2_keep_alive_while_idle(enabled); - self - } - - /// Set whether to retry requests that get disrupted before ever starting - /// to write. - /// - /// This means a request that is queued, and gets given an idle, reused - /// connection, and then encounters an error immediately as the idle - /// connection was found to be unusable. - /// - /// When this is set to `false`, the related `ResponseFuture` would instead - /// resolve to an `Error::Cancel`. - /// - /// Default is `true`. - #[inline] - pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { - self.client_config.retry_canceled_requests = val; - self - } - - /// Set whether to automatically add the `Host` header to requests. - /// - /// If true, and a request does not include a `Host` header, one will be - /// added automatically, derived from the authority of the `Uri`. - /// - /// Default is `true`. - #[inline] - pub fn set_host(&mut self, val: bool) -> &mut Self { - self.client_config.set_host = val; - self - } - - /// Provide an executor to execute background `Connection` tasks. - pub fn executor(&mut self, exec: E) -> &mut Self - where - E: Executor + Send + Sync + 'static, - { - self.conn_builder.executor(exec); - self - } - - /// Builder a client with this configuration and the default `HttpConnector`. - #[cfg(feature = "tcp")] - pub fn build_http(&self) -> Client - where - B: HttpBody + Send, - B::Data: Send, - { - let mut connector = HttpConnector::new(); - if self.pool_config.is_enabled() { - connector.set_keepalive(self.pool_config.idle_timeout); - } - self.build(connector) - } - - /// Combine the configuration of this builder with a connector to create a `Client`. - pub fn build(&self, connector: C) -> Client - where - C: Connect + Clone, - B: HttpBody + Send, - B::Data: Send, - { - Client { - config: self.client_config, - conn_builder: self.conn_builder.clone(), - connector, - pool: Pool::new(self.pool_config, &self.conn_builder.exec), - } - } -} - -impl fmt::Debug for Builder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Builder") - .field("client_config", &self.client_config) - .field("conn_builder", &self.conn_builder) - .field("pool_config", &self.pool_config) - .finish() - } -} - -#[cfg(test)] -mod unit_tests { - use super::*; - - #[test] - fn set_relative_uri_with_implicit_path() { - let mut uri = "http://hyper.rs".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/"); - } - - #[test] - fn test_origin_form() { - let mut uri = "http://hyper.rs/guides".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/guides"); - - let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/guides?foo=bar"); - } - - #[test] - fn test_absolute_form() { - let mut uri = "http://hyper.rs/guides".parse().unwrap(); - absolute_form(&mut uri); - assert_eq!(uri.to_string(), "http://hyper.rs/guides"); - - let mut uri = "https://hyper.rs/guides".parse().unwrap(); - absolute_form(&mut uri); - assert_eq!(uri.to_string(), "/guides"); - } - - #[test] - fn test_authority_form() { - let _ = pretty_env_logger::try_init(); - - let mut uri = "http://hyper.rs".parse().unwrap(); - authority_form(&mut uri); - assert_eq!(uri.to_string(), "hyper.rs"); - - let mut uri = "hyper.rs".parse().unwrap(); - authority_form(&mut uri); - assert_eq!(uri.to_string(), "hyper.rs"); - } - - #[test] - fn test_extract_domain_connect_no_port() { - let mut uri = "hyper.rs".parse().unwrap(); - let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain"); - assert_eq!(scheme, *"http"); - assert_eq!(host, "hyper.rs"); - } +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + pub use self::client::{Builder, Client, ResponseFuture}; + + mod client; + pub mod conn; + pub(super) mod dispatch; + mod pool; + pub mod service; } diff --git a/third_party/rust/hyper/src/client/pool.rs b/third_party/rust/hyper/src/client/pool.rs index 8c1ee24c0d4b..b9772d688d3f 100644 --- a/third_party/rust/hyper/src/client/pool.rs +++ b/third_party/rust/hyper/src/client/pool.rs @@ -1,4 +1,5 @@ use std::collections::{HashMap, HashSet, VecDeque}; +use std::error::Error as StdError; use std::fmt; use std::ops::{Deref, DerefMut}; use std::sync::{Arc, Mutex, Weak}; @@ -9,9 +10,10 @@ use std::time::{Duration, Instant}; use futures_channel::oneshot; #[cfg(feature = "runtime")] use tokio::time::{Duration, Instant, Interval}; +use tracing::{debug, trace}; -use super::Ver; -use crate::common::{task, Exec, Future, Pin, Poll, Unpin}; +use super::client::Ver; +use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin}; // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] @@ -45,6 +47,7 @@ pub(super) enum Reservation { /// This connection could be used multiple times, the first one will be /// reinserted into the `idle` pool, and the second will be given to /// the `Checkout`. + #[cfg(feature = "http2")] Shared(T, T), /// This connection requires unique access. It will be returned after /// use is complete. @@ -99,7 +102,7 @@ impl Config { } impl Pool { - pub fn new(config: Config, __exec: &Exec) -> Pool { + pub(super) fn new(config: Config, __exec: &Exec) -> Pool { let inner = if config.is_enabled() { Some(Arc::new(Mutex::new(PoolInner { connecting: HashSet::new(), @@ -139,7 +142,7 @@ impl Pool { impl Pool { /// Returns a `Checkout` which is a future that resolves if an idle /// connection becomes available. - pub fn checkout(&self, key: Key) -> Checkout { + pub(super) fn checkout(&self, key: Key) -> Checkout { Checkout { key, pool: self.clone(), @@ -199,9 +202,14 @@ impl Pool { } */ - pub(super) fn pooled(&self, mut connecting: Connecting, value: T) -> Pooled { + pub(super) fn pooled( + &self, + #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting, + value: T, + ) -> Pooled { let (value, pool_ref) = if let Some(ref enabled) = self.inner { match value.reserve() { + #[cfg(feature = "http2")] Reservation::Shared(to_insert, to_return) => { let mut inner = enabled.lock().unwrap(); inner.put(connecting.key.clone(), to_insert, enabled); @@ -291,6 +299,7 @@ impl<'a, T: Poolable + 'a> IdlePopper<'a, T> { } let value = match entry.value.reserve() { + #[cfg(feature = "http2")] Reservation::Shared(to_reinsert, to_checkout) => { self.list.push(Idle { idle_at: Instant::now(), @@ -325,6 +334,7 @@ impl PoolInner { if !tx.is_canceled() { let reserved = value.take().expect("value already sent"); let reserved = match reserved.reserve() { + #[cfg(feature = "http2")] Reservation::Shared(to_keep, to_send) => { value = Some(to_keep); to_send @@ -448,7 +458,9 @@ impl PoolInner { trace!("idle interval evicting closed for {:?}", key); return false; } - if now - entry.idle_at > dur { + + // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470. + if now.saturating_duration_since(entry.idle_at) > dur { trace!("idle interval evicting expired for {:?}", key); return false; } @@ -481,11 +493,11 @@ pub(super) struct Pooled { } impl Pooled { - pub fn is_reused(&self) -> bool { + pub(super) fn is_reused(&self) -> bool { self.is_reused } - pub fn is_pool_enabled(&self) -> bool { + pub(super) fn is_pool_enabled(&self) -> bool { self.pool.0.is_some() } @@ -552,28 +564,40 @@ pub(super) struct Checkout { waiter: Option>, } +#[derive(Debug)] +pub(super) struct CheckoutIsClosedError; + +impl StdError for CheckoutIsClosedError {} + +impl fmt::Display for CheckoutIsClosedError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("checked out connection was closed") + } +} + impl Checkout { fn poll_waiter( &mut self, cx: &mut task::Context<'_>, ) -> Poll>>> { - static CANCELED: &str = "pool checkout failed"; if let Some(mut rx) = self.waiter.take() { match Pin::new(&mut rx).poll(cx) { Poll::Ready(Ok(value)) => { if value.is_open() { Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) } else { - Poll::Ready(Some(Err(crate::Error::new_canceled().with(CANCELED)))) + Poll::Ready(Some(Err( + crate::Error::new_canceled().with(CheckoutIsClosedError) + ))) } } Poll::Pending => { self.waiter = Some(rx); Poll::Pending } - Poll::Ready(Err(_canceled)) => { - Poll::Ready(Some(Err(crate::Error::new_canceled().with(CANCELED)))) - } + Poll::Ready(Err(_canceled)) => Poll::Ready(Some(Err( + crate::Error::new_canceled().with("request has been canceled") + ))), } } else { Poll::Ready(None) @@ -699,29 +723,35 @@ impl Expiration { fn expires(&self, instant: Instant) -> bool { match self.0 { - Some(timeout) => instant.elapsed() > timeout, + // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470. + Some(timeout) => Instant::now().saturating_duration_since(instant) > timeout, None => false, } } } #[cfg(feature = "runtime")] -struct IdleTask { - interval: Interval, - pool: WeakOpt>>, - // This allows the IdleTask to be notified as soon as the entire - // Pool is fully dropped, and shutdown. This channel is never sent on, - // but Err(Canceled) will be received when the Pool is dropped. - pool_drop_notifier: oneshot::Receiver, +pin_project_lite::pin_project! { + struct IdleTask { + #[pin] + interval: Interval, + pool: WeakOpt>>, + // This allows the IdleTask to be notified as soon as the entire + // Pool is fully dropped, and shutdown. This channel is never sent on, + // but Err(Canceled) will be received when the Pool is dropped. + #[pin] + pool_drop_notifier: oneshot::Receiver, + } } #[cfg(feature = "runtime")] impl Future for IdleTask { type Output = (); - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + let mut this = self.project(); loop { - match Pin::new(&mut self.pool_drop_notifier).poll(cx) { + match this.pool_drop_notifier.as_mut().poll(cx) { Poll::Ready(Ok(n)) => match n {}, Poll::Pending => (), Poll::Ready(Err(_canceled)) => { @@ -730,9 +760,9 @@ impl Future for IdleTask { } } - ready!(self.interval.poll_tick(cx)); + ready!(this.interval.as_mut().poll_tick(cx)); - if let Some(inner) = self.pool.upgrade() { + if let Some(inner) = this.pool.upgrade() { if let Ok(mut inner) = inner.lock() { trace!("idle interval checking for expired"); inner.clear_expired(); @@ -764,7 +794,7 @@ mod tests { use std::time::Duration; use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; - use crate::common::{task, Exec, Future, Pin}; + use crate::common::{exec::Exec, task, Future, Pin}; /// Test unique reservations. #[derive(Debug, PartialEq, Eq)] @@ -850,7 +880,7 @@ mod tests { let pooled = pool.pooled(c(key.clone()), Uniq(41)); drop(pooled); - tokio::time::delay_for(pool.locked().timeout.unwrap()).await; + tokio::time::sleep(pool.locked().timeout.unwrap()).await; let mut checkout = pool.checkout(key); let poll_once = PollOnce(&mut checkout); let is_not_ready = poll_once.await.is_none(); @@ -871,7 +901,7 @@ mod tests { pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); - tokio::time::delay_for(pool.locked().timeout.unwrap()).await; + tokio::time::sleep(pool.locked().timeout.unwrap()).await; let mut checkout = pool.checkout(key.clone()); let poll_once = PollOnce(&mut checkout); @@ -978,6 +1008,7 @@ mod tests { #[derive(Debug)] struct CanClose { + #[allow(unused)] val: i32, closed: bool, } diff --git a/third_party/rust/hyper/src/client/service.rs b/third_party/rust/hyper/src/client/service.rs index 4013c5e54efe..406f61edc963 100644 --- a/third_party/rust/hyper/src/client/service.rs +++ b/third_party/rust/hyper/src/client/service.rs @@ -6,6 +6,8 @@ use std::error::Error as StdError; use std::future::Future; use std::marker::PhantomData; +use tracing::debug; + use super::conn::{Builder, SendRequest}; use crate::{ body::HttpBody, diff --git a/third_party/rust/hyper/src/client/tests.rs b/third_party/rust/hyper/src/client/tests.rs index e955cb60c60d..0a281a637d0b 100644 --- a/third_party/rust/hyper/src/client/tests.rs +++ b/third_party/rust/hyper/src/client/tests.rs @@ -7,7 +7,7 @@ use super::Client; #[tokio::test] async fn client_connect_uri_argument() { - let connector = tower_util::service_fn(|dst: http::Uri| { + let connector = tower::service_fn(|dst: http::Uri| { assert_eq!(dst.scheme(), Some(&http::uri::Scheme::HTTP)); assert_eq!(dst.host(), Some("example.local")); assert_eq!(dst.port(), None); diff --git a/third_party/rust/hyper/src/common/buf.rs b/third_party/rust/hyper/src/common/buf.rs index 8f71b7bbad84..64e9333ead43 100644 --- a/third_party/rust/hyper/src/common/buf.rs +++ b/third_party/rust/hyper/src/common/buf.rs @@ -1,7 +1,7 @@ use std::collections::VecDeque; use std::io::IoSlice; -use bytes::Buf; +use bytes::{Buf, BufMut, Bytes, BytesMut}; pub(crate) struct BufList { bufs: VecDeque, @@ -21,6 +21,7 @@ impl BufList { } #[inline] + #[cfg(feature = "http1")] pub(crate) fn bufs_cnt(&self) -> usize { self.bufs.len() } @@ -33,8 +34,8 @@ impl Buf for BufList { } #[inline] - fn bytes(&self) -> &[u8] { - self.bufs.front().map(Buf::bytes).unwrap_or_default() + fn chunk(&self) -> &[u8] { + self.bufs.front().map(Buf::chunk).unwrap_or_default() } #[inline] @@ -56,17 +57,95 @@ impl Buf for BufList { } #[inline] - fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { + fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { if dst.is_empty() { return 0; } let mut vecs = 0; for buf in &self.bufs { - vecs += buf.bytes_vectored(&mut dst[vecs..]); + vecs += buf.chunks_vectored(&mut dst[vecs..]); if vecs == dst.len() { break; } } vecs } + + #[inline] + fn copy_to_bytes(&mut self, len: usize) -> Bytes { + // Our inner buffer may have an optimized version of copy_to_bytes, and if the whole + // request can be fulfilled by the front buffer, we can take advantage. + match self.bufs.front_mut() { + Some(front) if front.remaining() == len => { + let b = front.copy_to_bytes(len); + self.bufs.pop_front(); + b + } + Some(front) if front.remaining() > len => front.copy_to_bytes(len), + _ => { + assert!(len <= self.remaining(), "`len` greater than remaining"); + let mut bm = BytesMut::with_capacity(len); + bm.put(self.take(len)); + bm.freeze() + } + } + } +} + +#[cfg(test)] +mod tests { + use std::ptr; + + use super::*; + + fn hello_world_buf() -> BufList { + BufList { + bufs: vec![Bytes::from("Hello"), Bytes::from(" "), Bytes::from("World")].into(), + } + } + + #[test] + fn to_bytes_shorter() { + let mut bufs = hello_world_buf(); + let old_ptr = bufs.chunk().as_ptr(); + let start = bufs.copy_to_bytes(4); + assert_eq!(start, "Hell"); + assert!(ptr::eq(old_ptr, start.as_ptr())); + assert_eq!(bufs.chunk(), b"o"); + assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr())); + assert_eq!(bufs.remaining(), 7); + } + + #[test] + fn to_bytes_eq() { + let mut bufs = hello_world_buf(); + let old_ptr = bufs.chunk().as_ptr(); + let start = bufs.copy_to_bytes(5); + assert_eq!(start, "Hello"); + assert!(ptr::eq(old_ptr, start.as_ptr())); + assert_eq!(bufs.chunk(), b" "); + assert_eq!(bufs.remaining(), 6); + } + + #[test] + fn to_bytes_longer() { + let mut bufs = hello_world_buf(); + let start = bufs.copy_to_bytes(7); + assert_eq!(start, "Hello W"); + assert_eq!(bufs.remaining(), 4); + } + + #[test] + fn one_long_buf_to_bytes() { + let mut buf = BufList::new(); + buf.push(b"Hello World" as &[_]); + assert_eq!(buf.copy_to_bytes(5), "Hello"); + assert_eq!(buf.chunk(), b" World"); + } + + #[test] + #[should_panic(expected = "`len` greater than remaining")] + fn buf_to_bytes_too_many() { + hello_world_buf().copy_to_bytes(42); + } } diff --git a/third_party/rust/hyper/src/common/date.rs b/third_party/rust/hyper/src/common/date.rs new file mode 100644 index 000000000000..a436fc07c008 --- /dev/null +++ b/third_party/rust/hyper/src/common/date.rs @@ -0,0 +1,124 @@ +use std::cell::RefCell; +use std::fmt::{self, Write}; +use std::str; +use std::time::{Duration, SystemTime}; + +#[cfg(feature = "http2")] +use http::header::HeaderValue; +use httpdate::HttpDate; + +// "Sun, 06 Nov 1994 08:49:37 GMT".len() +pub(crate) const DATE_VALUE_LENGTH: usize = 29; + +#[cfg(feature = "http1")] +pub(crate) fn extend(dst: &mut Vec) { + CACHED.with(|cache| { + dst.extend_from_slice(cache.borrow().buffer()); + }) +} + +#[cfg(feature = "http1")] +pub(crate) fn update() { + CACHED.with(|cache| { + cache.borrow_mut().check(); + }) +} + +#[cfg(feature = "http2")] +pub(crate) fn update_and_header_value() -> HeaderValue { + CACHED.with(|cache| { + let mut cache = cache.borrow_mut(); + cache.check(); + HeaderValue::from_bytes(cache.buffer()).expect("Date format should be valid HeaderValue") + }) +} + +struct CachedDate { + bytes: [u8; DATE_VALUE_LENGTH], + pos: usize, + next_update: SystemTime, +} + +thread_local!(static CACHED: RefCell = RefCell::new(CachedDate::new())); + +impl CachedDate { + fn new() -> Self { + let mut cache = CachedDate { + bytes: [0; DATE_VALUE_LENGTH], + pos: 0, + next_update: SystemTime::now(), + }; + cache.update(cache.next_update); + cache + } + + fn buffer(&self) -> &[u8] { + &self.bytes[..] + } + + fn check(&mut self) { + let now = SystemTime::now(); + if now > self.next_update { + self.update(now); + } + } + + fn update(&mut self, now: SystemTime) { + self.render(now); + self.next_update = now + Duration::new(1, 0); + } + + fn render(&mut self, now: SystemTime) { + self.pos = 0; + let _ = write!(self, "{}", HttpDate::from(now)); + debug_assert!(self.pos == DATE_VALUE_LENGTH); + } +} + +impl fmt::Write for CachedDate { + fn write_str(&mut self, s: &str) -> fmt::Result { + let len = s.len(); + self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes()); + self.pos += len; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(feature = "nightly")] + use test::Bencher; + + #[test] + fn test_date_len() { + assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len()); + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_date_check(b: &mut Bencher) { + let mut date = CachedDate::new(); + // cache the first update + date.check(); + + b.iter(|| { + date.check(); + }); + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_date_render(b: &mut Bencher) { + let mut date = CachedDate::new(); + let now = SystemTime::now(); + date.render(now); + b.bytes = date.buffer().len() as u64; + + b.iter(|| { + date.render(now); + test::black_box(&date); + }); + } +} diff --git a/third_party/rust/hyper/src/common/drain.rs b/third_party/rust/hyper/src/common/drain.rs index 7abb9f9dedf9..174da876dfb3 100644 --- a/third_party/rust/hyper/src/common/drain.rs +++ b/third_party/rust/hyper/src/common/drain.rs @@ -1,52 +1,35 @@ use std::mem; -use pin_project::pin_project; -use tokio::sync::{mpsc, watch}; +use pin_project_lite::pin_project; +use tokio::sync::watch; -use super::{task, Future, Never, Pin, Poll}; +use super::{task, Future, Pin, Poll}; -// Sentinel value signaling that the watch is still open -#[derive(Clone, Copy)] -enum Action { - Open, - // Closed isn't sent via the `Action` type, but rather once - // the watch::Sender is dropped. +pub(crate) fn channel() -> (Signal, Watch) { + let (tx, rx) = watch::channel(()); + (Signal { tx }, Watch { rx }) } -pub fn channel() -> (Signal, Watch) { - let (tx, rx) = watch::channel(Action::Open); - let (drained_tx, drained_rx) = mpsc::channel(1); - ( - Signal { - drained_rx, - _tx: tx, - }, - Watch { drained_tx, rx }, - ) +pub(crate) struct Signal { + tx: watch::Sender<()>, } -pub struct Signal { - drained_rx: mpsc::Receiver, - _tx: watch::Sender, -} - -pub struct Draining { - drained_rx: mpsc::Receiver, -} +pub(crate) struct Draining(Pin + Send + Sync>>); #[derive(Clone)] -pub struct Watch { - drained_tx: mpsc::Sender, - rx: watch::Receiver, +pub(crate) struct Watch { + rx: watch::Receiver<()>, } -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct Watching { - #[pin] - future: F, - state: State, - watch: Watch, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct Watching { + #[pin] + future: F, + state: State, + watch: Pin + Send + Sync>>, + _rx: watch::Receiver<()>, + } } enum State { @@ -55,11 +38,9 @@ enum State { } impl Signal { - pub fn drain(self) -> Draining { - // Simply dropping `self.tx` will signal the watchers - Draining { - drained_rx: self.drained_rx, - } + pub(crate) fn drain(self) -> Draining { + let _ = self.tx.send(()); + Draining(Box::pin(async move { self.tx.closed().await })) } } @@ -67,23 +48,27 @@ impl Future for Draining { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match ready!(self.drained_rx.poll_recv(cx)) { - Some(never) => match never {}, - None => Poll::Ready(()), - } + Pin::new(&mut self.as_mut().0).poll(cx) } } impl Watch { - pub fn watch(self, future: F, on_drain: FN) -> Watching + pub(crate) fn watch(self, future: F, on_drain: FN) -> Watching where F: Future, FN: FnOnce(Pin<&mut F>), { + let Self { mut rx } = self; + let _rx = rx.clone(); Watching { future, state: State::Watch(on_drain), - watch: self, + watch: Box::pin(async move { + let _ = rx.changed().await; + }), + // Keep the receiver alive until the future completes, so that + // dropping it can signal that draining has completed. + _rx, } } } @@ -100,12 +85,12 @@ where loop { match mem::replace(me.state, State::Draining) { State::Watch(on_drain) => { - match me.watch.rx.poll_recv_ref(cx) { - Poll::Ready(None) => { + match Pin::new(&mut me.watch).poll(cx) { + Poll::Ready(()) => { // Drain has been triggered! on_drain(me.future.as_mut()); } - Poll::Ready(Some(_ /*State::Open*/)) | Poll::Pending => { + Poll::Pending => { *me.state = State::Watch(on_drain); return me.future.poll(cx); } diff --git a/third_party/rust/hyper/src/common/exec.rs b/third_party/rust/hyper/src/common/exec.rs index f4e80ead5aef..b6da9a276b66 100644 --- a/third_party/rust/hyper/src/common/exec.rs +++ b/third_party/rust/hyper/src/common/exec.rs @@ -3,26 +3,29 @@ use std::future::Future; use std::pin::Pin; use std::sync::Arc; -use crate::body::{Body, HttpBody}; +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] +use crate::body::Body; +#[cfg(feature = "server")] +use crate::body::HttpBody; +#[cfg(all(feature = "http2", feature = "server"))] use crate::proto::h2::server::H2Stream; +use crate::rt::Executor; +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] use crate::server::conn::spawn_all::{NewSvcTask, Watcher}; +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] use crate::service::HttpService; -/// An executor of futures. -pub trait Executor { - /// Place the future into the executor to be run. - fn execute(&self, fut: Fut); -} - -pub trait H2Exec: Clone { +#[cfg(feature = "server")] +pub trait ConnStreamExec: Clone { fn execute_h2stream(&mut self, fut: H2Stream); } +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub trait NewSvcExec, E, W: Watcher>: Clone { fn execute_new_svc(&mut self, fut: NewSvcTask); } -pub type BoxSendFuture = Pin + Send>>; +pub(crate) type BoxSendFuture = Pin + Send>>; // Either the user provides an executor for background tasks, or we use // `tokio::spawn`. @@ -64,7 +67,8 @@ impl fmt::Debug for Exec { } } -impl H2Exec for Exec +#[cfg(feature = "server")] +impl ConnStreamExec for Exec where H2Stream: Future + Send + 'static, B: HttpBody, @@ -74,6 +78,7 @@ where } } +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] impl NewSvcExec for Exec where NewSvcTask: Future + Send + 'static, @@ -87,7 +92,8 @@ where // ==== impl Executor ===== -impl H2Exec for E +#[cfg(feature = "server")] +impl ConnStreamExec for E where E: Executor> + Clone, H2Stream: Future, @@ -98,6 +104,7 @@ where } } +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] impl NewSvcExec for E where E: Executor> + Clone, @@ -109,3 +116,30 @@ where self.execute(fut) } } + +// If http2 is not enable, we just have a stub here, so that the trait bounds +// that *would* have been needed are still checked. Why? +// +// Because enabling `http2` shouldn't suddenly add new trait bounds that cause +// a compilation error. +#[cfg(not(feature = "http2"))] +#[allow(missing_debug_implementations)] +pub struct H2Stream(std::marker::PhantomData<(F, B)>); + +#[cfg(not(feature = "http2"))] +impl Future for H2Stream +where + F: Future, E>>, + B: crate::body::HttpBody, + B::Error: Into>, + E: Into>, +{ + type Output = (); + + fn poll( + self: Pin<&mut Self>, + _cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + unreachable!() + } +} diff --git a/third_party/rust/hyper/src/common/io/rewind.rs b/third_party/rust/hyper/src/common/io/rewind.rs index 14650697c31b..58f1de6c89fc 100644 --- a/third_party/rust/hyper/src/common/io/rewind.rs +++ b/third_party/rust/hyper/src/common/io/rewind.rs @@ -2,7 +2,7 @@ use std::marker::Unpin; use std::{cmp, io}; use bytes::{Buf, Bytes}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use crate::common::{task, Pin, Poll}; @@ -14,6 +14,7 @@ pub(crate) struct Rewind { } impl Rewind { + #[cfg(any(all(feature = "http2", feature = "server"), test))] pub(crate) fn new(io: T) -> Self { Rewind { pre: None, @@ -28,6 +29,7 @@ impl Rewind { } } + #[cfg(any(all(feature = "http1", feature = "http2", feature = "server"), test))] pub(crate) fn rewind(&mut self, bs: Bytes) { debug_assert!(self.pre.is_none()); self.pre = Some(bs); @@ -37,36 +39,33 @@ impl Rewind { (self.inner, self.pre.unwrap_or_else(Bytes::new)) } - pub(crate) fn get_mut(&mut self) -> &mut T { - &mut self.inner - } + // pub(crate) fn get_mut(&mut self) -> &mut T { + // &mut self.inner + // } } impl AsyncRead for Rewind where T: AsyncRead + Unpin, { - #[inline] - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit]) -> bool { - self.inner.prepare_uninitialized_buffer(buf) - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { if let Some(mut prefix) = self.pre.take() { // If there are no remaining bytes, let the bytes get dropped. if !prefix.is_empty() { - let copy_len = cmp::min(prefix.len(), buf.len()); - prefix.copy_to_slice(&mut buf[..copy_len]); + let copy_len = cmp::min(prefix.len(), buf.remaining()); + // TODO: There should be a way to do following two lines cleaner... + buf.put_slice(&prefix[..copy_len]); + prefix.advance(copy_len); // Put back whats left if !prefix.is_empty() { self.pre = Some(prefix); } - return Poll::Ready(Ok(copy_len)); + return Poll::Ready(Ok(())); } } Pin::new(&mut self.inner).poll_read(cx, buf) @@ -85,6 +84,14 @@ where Pin::new(&mut self.inner).poll_write(cx, buf) } + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + bufs: &[io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) + } + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_flush(cx) } @@ -93,13 +100,8 @@ where Pin::new(&mut self.inner).poll_shutdown(cx) } - #[inline] - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut B, - ) -> Poll> { - Pin::new(&mut self.inner).poll_write_buf(cx, buf) + fn is_write_vectored(&self) -> bool { + self.inner.is_write_vectored() } } diff --git a/third_party/rust/hyper/src/common/lazy.rs b/third_party/rust/hyper/src/common/lazy.rs index 4d2e322c2c4d..272207730364 100644 --- a/third_party/rust/hyper/src/common/lazy.rs +++ b/third_party/rust/hyper/src/common/lazy.rs @@ -1,4 +1,4 @@ -use std::mem; +use pin_project_lite::pin_project; use super::{task, Future, Pin, Poll}; @@ -12,31 +12,38 @@ where R: Future + Unpin, { Lazy { - inner: Inner::Init(func), + inner: Inner::Init { func }, } } // FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub(crate) struct Lazy { - inner: Inner, +pin_project! { + #[allow(missing_debug_implementations)] + pub(crate) struct Lazy { + #[pin] + inner: Inner, + } } -enum Inner { - Init(F), - Fut(R), - Empty, +pin_project! { + #[project = InnerProj] + #[project_replace = InnerProjReplace] + enum Inner { + Init { func: F }, + Fut { #[pin] fut: R }, + Empty, + } } impl Started for Lazy where F: FnOnce() -> R, - R: Future + Unpin, + R: Future, { fn started(&self) -> bool { match self.inner { - Inner::Init(_) => false, - Inner::Fut(_) | Inner::Empty => true, + Inner::Init { .. } => false, + Inner::Fut { .. } | Inner::Empty => true, } } } @@ -44,26 +51,26 @@ where impl Future for Lazy where F: FnOnce() -> R, - R: Future + Unpin, + R: Future, { type Output = R::Output; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - if let Inner::Fut(ref mut f) = self.inner { - return Pin::new(f).poll(cx); + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + let mut this = self.project(); + + if let InnerProj::Fut { fut } = this.inner.as_mut().project() { + return fut.poll(cx); } - match mem::replace(&mut self.inner, Inner::Empty) { - Inner::Init(func) => { - let mut fut = func(); - let ret = Pin::new(&mut fut).poll(cx); - self.inner = Inner::Fut(fut); - ret + match this.inner.as_mut().project_replace(Inner::Empty) { + InnerProjReplace::Init { func } => { + this.inner.set(Inner::Fut { fut: func() }); + if let InnerProj::Fut { fut } = this.inner.project() { + return fut.poll(cx); + } + unreachable!() } _ => unreachable!("lazy state wrong"), } } } - -// The closure `F` is never pinned -impl Unpin for Lazy {} diff --git a/third_party/rust/hyper/src/common/mod.rs b/third_party/rust/hyper/src/common/mod.rs index d9d62bc2bace..e38c6f5c7ac3 100644 --- a/third_party/rust/hyper/src/common/mod.rs +++ b/third_party/rust/hyper/src/common/mod.rs @@ -8,20 +8,32 @@ macro_rules! ready { } pub(crate) mod buf; +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] +pub(crate) mod date; +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub(crate) mod drain; +#[cfg(any(feature = "http1", feature = "http2", feature = "server"))] pub(crate) mod exec; pub(crate) mod io; +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] mod lazy; mod never; +#[cfg(any( + feature = "stream", + all(feature = "client", any(feature = "http1", feature = "http2")) +))] pub(crate) mod sync_wrapper; pub(crate) mod task; pub(crate) mod watch; -pub use self::exec::Executor; -pub(crate) use self::exec::{BoxSendFuture, Exec}; +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] pub(crate) use self::lazy::{lazy, Started as Lazy}; -pub use self::never::Never; +#[cfg(any(feature = "http1", feature = "http2", feature = "runtime"))] +pub(crate) use self::never::Never; pub(crate) use self::task::Poll; // group up types normally needed for `Future` -pub(crate) use std::{future::Future, marker::Unpin, pin::Pin}; +cfg_proto! { + pub(crate) use std::marker::Unpin; +} +pub(crate) use std::{future::Future, pin::Pin}; diff --git a/third_party/rust/hyper/src/common/never.rs b/third_party/rust/hyper/src/common/never.rs index f4fdb95ddd91..f143caf60fce 100644 --- a/third_party/rust/hyper/src/common/never.rs +++ b/third_party/rust/hyper/src/common/never.rs @@ -6,7 +6,7 @@ use std::error::Error; use std::fmt; #[derive(Debug)] -pub enum Never {} +pub(crate) enum Never {} impl fmt::Display for Never { fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/third_party/rust/hyper/src/common/sync_wrapper.rs b/third_party/rust/hyper/src/common/sync_wrapper.rs index 1e4aa4039ccc..704d1a6712b1 100644 --- a/third_party/rust/hyper/src/common/sync_wrapper.rs +++ b/third_party/rust/hyper/src/common/sync_wrapper.rs @@ -1,11 +1,6 @@ /* * This is a copy of the sync_wrapper crate. */ -//! A mutual exclusion primitive that relies on static type information only -//! -//! This library is inspired by [this discussion](https://internals.rust-lang.org/t/what-shall-sync-mean-across-an-await/12020/2). -#![doc(html_logo_url = "https://developer.actyx.com/img/logo.svg")] -#![doc(html_favicon_url = "https://developer.actyx.com/img/favicon.ico")] /// A mutual exclusion primitive that relies on static type information only /// @@ -46,7 +41,7 @@ /// [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#method.poll /// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html #[repr(transparent)] -pub struct SyncWrapper(T); +pub(crate) struct SyncWrapper(T); impl SyncWrapper { /// Creates a new SyncWrapper containing the given value. @@ -58,7 +53,7 @@ impl SyncWrapper { /// /// let wrapped = SyncWrapper::new(42); /// ``` - pub fn new(value: T) -> Self { + pub(crate) fn new(value: T) -> Self { Self(value) } @@ -82,7 +77,7 @@ impl SyncWrapper { /// *value = 0; /// assert_eq!(*wrapped.get_mut(), 0); /// ``` - pub fn get_mut(&mut self) -> &mut T { + pub(crate) fn get_mut(&mut self) -> &mut T { &mut self.0 } @@ -105,7 +100,7 @@ impl SyncWrapper { /// assert_eq!(wrapped.into_inner(), 42); /// ``` #[allow(dead_code)] - pub fn into_inner(self) -> T { + pub(crate) fn into_inner(self) -> T { self.0 } } diff --git a/third_party/rust/hyper/src/common/task.rs b/third_party/rust/hyper/src/common/task.rs index bfccfe3bfe49..ec70c957d646 100644 --- a/third_party/rust/hyper/src/common/task.rs +++ b/third_party/rust/hyper/src/common/task.rs @@ -1,9 +1,11 @@ +#[cfg(feature = "http1")] use super::Never; pub(crate) use std::task::{Context, Poll}; /// A function to help "yield" a future, such that it is re-scheduled immediately. /// /// Useful for spin counts, so a future doesn't hog too much time. +#[cfg(feature = "http1")] pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { cx.waker().wake_by_ref(); Poll::Pending diff --git a/third_party/rust/hyper/src/error.rs b/third_party/rust/hyper/src/error.rs index 2cffe3d9e4f3..20acf3a7a568 100644 --- a/third_party/rust/hyper/src/error.rs +++ b/third_party/rust/hyper/src/error.rs @@ -1,7 +1,6 @@ //! Error and Result module. use std::error::Error as StdError; use std::fmt; -use std::io; /// Result type often returned from methods that can have hyper `Error`s. pub type Result = std::result::Result; @@ -18,123 +17,183 @@ struct ErrorImpl { cause: Option, } -#[derive(Debug, PartialEq)] -pub(crate) enum Kind { +#[derive(Debug)] +pub(super) enum Kind { Parse(Parse), User(User), /// A message reached EOF, but is not complete. + #[allow(unused)] IncompleteMessage, /// A connection received a message (or bytes) when not waiting for one. + #[cfg(feature = "http1")] UnexpectedMessage, /// A pending item was dropped before ever being processed. Canceled, /// Indicates a channel (client or body sender) is closed. ChannelClosed, /// An `io::Error` that occurred while trying to read or write to a network stream. + #[cfg(any(feature = "http1", feature = "http2"))] Io, /// Error occurred while connecting. + #[allow(unused)] Connect, /// Error creating a TcpListener. - #[cfg(feature = "tcp")] + #[cfg(all(feature = "tcp", feature = "server"))] Listen, /// Error accepting on an Incoming stream. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] Accept, + /// User took too long to send headers + #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + HeaderTimeout, /// Error while reading a body from connection. + #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] Body, /// Error while writing a body to connection. + #[cfg(any(feature = "http1", feature = "http2"))] BodyWrite, - /// The body write was aborted. - BodyWriteAborted, /// Error calling AsyncWrite::shutdown() + #[cfg(feature = "http1")] Shutdown, /// A general error from h2. + #[cfg(feature = "http2")] Http2, } -#[derive(Debug, PartialEq)] -pub(crate) enum Parse { +#[derive(Debug)] +pub(super) enum Parse { Method, Version, + #[cfg(feature = "http1")] VersionH2, Uri, - Header, + #[cfg_attr(not(all(feature = "http1", feature = "server")), allow(unused))] + UriTooLong, + Header(Header), TooLarge, Status, + #[cfg_attr(debug_assertions, allow(unused))] + Internal, } -#[derive(Debug, PartialEq)] -pub(crate) enum User { +#[derive(Debug)] +pub(super) enum Header { + Token, + #[cfg(feature = "http1")] + ContentLengthInvalid, + #[cfg(all(feature = "http1", feature = "server"))] + TransferEncodingInvalid, + #[cfg(feature = "http1")] + TransferEncodingUnexpected, +} + +#[derive(Debug)] +pub(super) enum User { /// Error calling user's HttpBody::poll_data(). + #[cfg(any(feature = "http1", feature = "http2"))] Body, + /// The user aborted writing of the outgoing body. + BodyWriteAborted, /// Error calling user's MakeService. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] MakeService, /// Error from future of user's Service. + #[cfg(any(feature = "http1", feature = "http2"))] Service, /// User tried to send a certain header in an unexpected context. /// /// For example, sending both `content-length` and `transfer-encoding`. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] UnexpectedHeader, /// User tried to create a Request with bad version. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] UnsupportedVersion, /// User tried to create a CONNECT Request with the Client. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] UnsupportedRequestMethod, /// User tried to respond with a 1xx (not 101) response code. + #[cfg(feature = "http1")] + #[cfg(feature = "server")] UnsupportedStatusCode, /// User tried to send a Request with Client with non-absolute URI. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] AbsoluteUriRequired, /// User tried polling for an upgrade that doesn't exist. NoUpgrade, /// User polled for an upgrade, but low-level API is not using upgrades. + #[cfg(feature = "http1")] ManualUpgrade, + + /// User called `server::Connection::without_shutdown()` on an HTTP/2 conn. + #[cfg(feature = "server")] + WithoutShutdownNonHttp1, + + /// User aborted in an FFI callback. + #[cfg(feature = "ffi")] + AbortedByCallback, } // Sentinel type to indicate the error was caused by a timeout. #[derive(Debug)] -pub(crate) struct TimedOut; +pub(super) struct TimedOut; impl Error { /// Returns true if this was an HTTP parse error. pub fn is_parse(&self) -> bool { - match self.inner.kind { - Kind::Parse(_) => true, - _ => false, - } + matches!(self.inner.kind, Kind::Parse(_)) + } + + /// Returns true if this was an HTTP parse error caused by a message that was too large. + pub fn is_parse_too_large(&self) -> bool { + matches!( + self.inner.kind, + Kind::Parse(Parse::TooLarge) | Kind::Parse(Parse::UriTooLong) + ) + } + + /// Returns true if this was an HTTP parse error caused by an invalid response status code or + /// reason phrase. + pub fn is_parse_status(&self) -> bool { + matches!(self.inner.kind, Kind::Parse(Parse::Status)) } /// Returns true if this error was caused by user code. pub fn is_user(&self) -> bool { - match self.inner.kind { - Kind::User(_) => true, - _ => false, - } + matches!(self.inner.kind, Kind::User(_)) } /// Returns true if this was about a `Request` that was canceled. pub fn is_canceled(&self) -> bool { - self.inner.kind == Kind::Canceled + matches!(self.inner.kind, Kind::Canceled) } /// Returns true if a sender's channel is closed. pub fn is_closed(&self) -> bool { - self.inner.kind == Kind::ChannelClosed + matches!(self.inner.kind, Kind::ChannelClosed) } /// Returns true if this was an error from `Connect`. pub fn is_connect(&self) -> bool { - self.inner.kind == Kind::Connect + matches!(self.inner.kind, Kind::Connect) } /// Returns true if the connection closed before a message could complete. pub fn is_incomplete_message(&self) -> bool { - self.inner.kind == Kind::IncompleteMessage + matches!(self.inner.kind, Kind::IncompleteMessage) } /// Returns true if the body write was aborted. pub fn is_body_write_aborted(&self) -> bool { - self.inner.kind == Kind::BodyWriteAborted + matches!(self.inner.kind, Kind::User(User::BodyWriteAborted)) } /// Returns true if the error was caused by a timeout. @@ -147,22 +206,23 @@ impl Error { self.inner.cause } - pub(crate) fn new(kind: Kind) -> Error { + pub(super) fn new(kind: Kind) -> Error { Error { inner: Box::new(ErrorImpl { kind, cause: None }), } } - pub(crate) fn with>(mut self, cause: C) -> Error { + pub(super) fn with>(mut self, cause: C) -> Error { self.inner.cause = Some(cause.into()); self } - pub(crate) fn kind(&self) -> &Kind { + #[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))] + pub(super) fn kind(&self) -> &Kind { &self.inner.kind } - fn find_source(&self) -> Option<&E> { + pub(crate) fn find_source(&self) -> Option<&E> { let mut cause = self.source(); while let Some(err) = cause { if let Some(ref typed) = err.downcast_ref() { @@ -175,7 +235,8 @@ impl Error { None } - pub(crate) fn h2_reason(&self) -> h2::Reason { + #[cfg(feature = "http2")] + pub(super) fn h2_reason(&self) -> h2::Reason { // Find an h2::Reason somewhere in the cause stack, if it exists, // otherwise assume an INTERNAL_ERROR. self.find_source::() @@ -183,108 +244,151 @@ impl Error { .unwrap_or(h2::Reason::INTERNAL_ERROR) } - pub(crate) fn new_canceled() -> Error { + pub(super) fn new_canceled() -> Error { Error::new(Kind::Canceled) } - pub(crate) fn new_incomplete() -> Error { + #[cfg(feature = "http1")] + pub(super) fn new_incomplete() -> Error { Error::new(Kind::IncompleteMessage) } - pub(crate) fn new_too_large() -> Error { + #[cfg(feature = "http1")] + pub(super) fn new_too_large() -> Error { Error::new(Kind::Parse(Parse::TooLarge)) } - pub(crate) fn new_version_h2() -> Error { + #[cfg(feature = "http1")] + pub(super) fn new_version_h2() -> Error { Error::new(Kind::Parse(Parse::VersionH2)) } - pub(crate) fn new_unexpected_message() -> Error { + #[cfg(feature = "http1")] + pub(super) fn new_unexpected_message() -> Error { Error::new(Kind::UnexpectedMessage) } - pub(crate) fn new_io(cause: io::Error) -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + pub(super) fn new_io(cause: std::io::Error) -> Error { Error::new(Kind::Io).with(cause) } - #[cfg(feature = "tcp")] - pub(crate) fn new_listen>(cause: E) -> Error { + #[cfg(all(feature = "server", feature = "tcp"))] + pub(super) fn new_listen>(cause: E) -> Error { Error::new(Kind::Listen).with(cause) } - pub(crate) fn new_accept>(cause: E) -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + pub(super) fn new_accept>(cause: E) -> Error { Error::new(Kind::Accept).with(cause) } - pub(crate) fn new_connect>(cause: E) -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + pub(super) fn new_connect>(cause: E) -> Error { Error::new(Kind::Connect).with(cause) } - pub(crate) fn new_closed() -> Error { + pub(super) fn new_closed() -> Error { Error::new(Kind::ChannelClosed) } - pub(crate) fn new_body>(cause: E) -> Error { + #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] + pub(super) fn new_body>(cause: E) -> Error { Error::new(Kind::Body).with(cause) } - pub(crate) fn new_body_write>(cause: E) -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + pub(super) fn new_body_write>(cause: E) -> Error { Error::new(Kind::BodyWrite).with(cause) } - pub(crate) fn new_body_write_aborted() -> Error { - Error::new(Kind::BodyWriteAborted) + pub(super) fn new_body_write_aborted() -> Error { + Error::new(Kind::User(User::BodyWriteAborted)) } fn new_user(user: User) -> Error { Error::new(Kind::User(user)) } - pub(crate) fn new_user_header() -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + pub(super) fn new_user_header() -> Error { Error::new_user(User::UnexpectedHeader) } - pub(crate) fn new_user_unsupported_version() -> Error { + #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + pub(super) fn new_header_timeout() -> Error { + Error::new(Kind::HeaderTimeout) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + pub(super) fn new_user_unsupported_version() -> Error { Error::new_user(User::UnsupportedVersion) } - pub(crate) fn new_user_unsupported_request_method() -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + pub(super) fn new_user_unsupported_request_method() -> Error { Error::new_user(User::UnsupportedRequestMethod) } - pub(crate) fn new_user_unsupported_status_code() -> Error { + #[cfg(feature = "http1")] + #[cfg(feature = "server")] + pub(super) fn new_user_unsupported_status_code() -> Error { Error::new_user(User::UnsupportedStatusCode) } - pub(crate) fn new_user_absolute_uri_required() -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + pub(super) fn new_user_absolute_uri_required() -> Error { Error::new_user(User::AbsoluteUriRequired) } - pub(crate) fn new_user_no_upgrade() -> Error { + pub(super) fn new_user_no_upgrade() -> Error { Error::new_user(User::NoUpgrade) } - pub(crate) fn new_user_manual_upgrade() -> Error { + #[cfg(feature = "http1")] + pub(super) fn new_user_manual_upgrade() -> Error { Error::new_user(User::ManualUpgrade) } - pub(crate) fn new_user_make_service>(cause: E) -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + pub(super) fn new_user_make_service>(cause: E) -> Error { Error::new_user(User::MakeService).with(cause) } - pub(crate) fn new_user_service>(cause: E) -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + pub(super) fn new_user_service>(cause: E) -> Error { Error::new_user(User::Service).with(cause) } - pub(crate) fn new_user_body>(cause: E) -> Error { + #[cfg(any(feature = "http1", feature = "http2"))] + pub(super) fn new_user_body>(cause: E) -> Error { Error::new_user(User::Body).with(cause) } - pub(crate) fn new_shutdown(cause: io::Error) -> Error { + #[cfg(feature = "server")] + pub(super) fn new_without_shutdown_not_h1() -> Error { + Error::new(Kind::User(User::WithoutShutdownNonHttp1)) + } + + #[cfg(feature = "http1")] + pub(super) fn new_shutdown(cause: std::io::Error) -> Error { Error::new(Kind::Shutdown).with(cause) } - pub(crate) fn new_h2(cause: ::h2::Error) -> Error { + #[cfg(feature = "ffi")] + pub(super) fn new_user_aborted_by_callback() -> Error { + Error::new_user(User::AbortedByCallback) + } + + #[cfg(feature = "http2")] + pub(super) fn new_h2(cause: ::h2::Error) -> Error { if cause.is_io() { Error::new_io(cause.into_io().expect("h2::Error::is_io")) } else { @@ -292,42 +396,95 @@ impl Error { } } + /// The error's standalone message, without the message from the source. + pub fn message(&self) -> impl fmt::Display + '_ { + self.description() + } + fn description(&self) -> &str { match self.inner.kind { Kind::Parse(Parse::Method) => "invalid HTTP method parsed", Kind::Parse(Parse::Version) => "invalid HTTP version parsed", + #[cfg(feature = "http1")] Kind::Parse(Parse::VersionH2) => "invalid HTTP version parsed (found HTTP2 preface)", Kind::Parse(Parse::Uri) => "invalid URI", - Kind::Parse(Parse::Header) => "invalid HTTP header parsed", + Kind::Parse(Parse::UriTooLong) => "URI too long", + Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed", + #[cfg(feature = "http1")] + Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => { + "invalid content-length parsed" + } + #[cfg(all(feature = "http1", feature = "server"))] + Kind::Parse(Parse::Header(Header::TransferEncodingInvalid)) => { + "invalid transfer-encoding parsed" + } + #[cfg(feature = "http1")] + Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => { + "unexpected transfer-encoding parsed" + } Kind::Parse(Parse::TooLarge) => "message head is too large", Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed", + Kind::Parse(Parse::Internal) => { + "internal error inside Hyper and/or its dependencies, please report" + } Kind::IncompleteMessage => "connection closed before message completed", + #[cfg(feature = "http1")] Kind::UnexpectedMessage => "received unexpected message from connection", Kind::ChannelClosed => "channel closed", Kind::Connect => "error trying to connect", Kind::Canceled => "operation was canceled", - #[cfg(feature = "tcp")] + #[cfg(all(feature = "server", feature = "tcp"))] Kind::Listen => "error creating server listener", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] Kind::Accept => "error accepting connection", + #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + Kind::HeaderTimeout => "read header from client timeout", + #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] Kind::Body => "error reading a body from connection", + #[cfg(any(feature = "http1", feature = "http2"))] Kind::BodyWrite => "error writing a body to connection", - Kind::BodyWriteAborted => "body write aborted", + #[cfg(feature = "http1")] Kind::Shutdown => "error shutting down connection", + #[cfg(feature = "http2")] Kind::Http2 => "http2 error", + #[cfg(any(feature = "http1", feature = "http2"))] Kind::Io => "connection error", + #[cfg(any(feature = "http1", feature = "http2"))] Kind::User(User::Body) => "error from user's HttpBody stream", + Kind::User(User::BodyWriteAborted) => "user body write aborted", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] Kind::User(User::MakeService) => "error from user's MakeService", + #[cfg(any(feature = "http1", feature = "http2"))] Kind::User(User::Service) => "error from user's Service", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] Kind::User(User::UnexpectedHeader) => "user sent unexpected header", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] Kind::User(User::UnsupportedVersion) => "request has unsupported HTTP version", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] Kind::User(User::UnsupportedRequestMethod) => "request has unsupported HTTP method", + #[cfg(feature = "http1")] + #[cfg(feature = "server")] Kind::User(User::UnsupportedStatusCode) => { "response has 1xx status code, not supported by server" } + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] Kind::User(User::AbsoluteUriRequired) => "client requires absolute-form URIs", Kind::User(User::NoUpgrade) => "no upgrade available", + #[cfg(feature = "http1")] Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use", + #[cfg(feature = "server")] + Kind::User(User::WithoutShutdownNonHttp1) => { + "without_shutdown() called on a non-HTTP/1 connection" + } + #[cfg(feature = "ffi")] + Kind::User(User::AbortedByCallback) => "operation aborted by an application callback", } } } @@ -369,13 +526,29 @@ impl From for Error { } } +#[cfg(feature = "http1")] +impl Parse { + pub(crate) fn content_length_invalid() -> Self { + Parse::Header(Header::ContentLengthInvalid) + } + + #[cfg(all(feature = "http1", feature = "server"))] + pub(crate) fn transfer_encoding_invalid() -> Self { + Parse::Header(Header::TransferEncodingInvalid) + } + + pub(crate) fn transfer_encoding_unexpected() -> Self { + Parse::Header(Header::TransferEncodingUnexpected) + } +} + impl From for Parse { fn from(err: httparse::Error) -> Parse { match err { httparse::Error::HeaderName | httparse::Error::HeaderValue | httparse::Error::NewLine - | httparse::Error::Token => Parse::Header, + | httparse::Error::Token => Parse::Header(Header::Token), httparse::Error::Status => Parse::Status, httparse::Error::TooManyHeaders => Parse::TooLarge, httparse::Error::Version => Parse::Version, @@ -432,18 +605,21 @@ mod tests { assert_eq!(mem::size_of::(), mem::size_of::()); } + #[cfg(feature = "http2")] #[test] fn h2_reason_unknown() { let closed = Error::new_closed(); assert_eq!(closed.h2_reason(), h2::Reason::INTERNAL_ERROR); } + #[cfg(feature = "http2")] #[test] fn h2_reason_one_level() { let body_err = Error::new_user_body(h2::Error::from(h2::Reason::ENHANCE_YOUR_CALM)); assert_eq!(body_err.h2_reason(), h2::Reason::ENHANCE_YOUR_CALM); } + #[cfg(feature = "http2")] #[test] fn h2_reason_nested() { let recvd = Error::new_h2(h2::Error::from(h2::Reason::HTTP_1_1_REQUIRED)); diff --git a/third_party/rust/hyper/src/ext.rs b/third_party/rust/hyper/src/ext.rs new file mode 100644 index 000000000000..e9d458778423 --- /dev/null +++ b/third_party/rust/hyper/src/ext.rs @@ -0,0 +1,122 @@ +//! HTTP extensions. + +use bytes::Bytes; +#[cfg(feature = "http1")] +use http::header::{HeaderName, IntoHeaderName, ValueIter}; +use http::HeaderMap; +#[cfg(feature = "http2")] +use std::fmt; + +#[cfg(feature = "http2")] +/// Represents the `:protocol` pseudo-header used by +/// the [Extended CONNECT Protocol]. +/// +/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 +#[derive(Clone, Eq, PartialEq)] +pub struct Protocol { + inner: h2::ext::Protocol, +} + +#[cfg(feature = "http2")] +impl Protocol { + /// Converts a static string to a protocol name. + pub const fn from_static(value: &'static str) -> Self { + Self { + inner: h2::ext::Protocol::from_static(value), + } + } + + /// Returns a str representation of the header. + pub fn as_str(&self) -> &str { + self.inner.as_str() + } + + pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self { + Self { inner } + } + + pub(crate) fn into_inner(self) -> h2::ext::Protocol { + self.inner + } +} + +#[cfg(feature = "http2")] +impl<'a> From<&'a str> for Protocol { + fn from(value: &'a str) -> Self { + Self { + inner: h2::ext::Protocol::from(value), + } + } +} + +#[cfg(feature = "http2")] +impl AsRef<[u8]> for Protocol { + fn as_ref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +#[cfg(feature = "http2")] +impl fmt::Debug for Protocol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// A map from header names to their original casing as received in an HTTP message. +/// +/// If an HTTP/1 response `res` is parsed on a connection whose option +/// [`http1_preserve_header_case`] was set to true and the response included +/// the following headers: +/// +/// ```ignore +/// x-Bread: Baguette +/// X-BREAD: Pain +/// x-bread: Ficelle +/// ``` +/// +/// Then `res.extensions().get::()` will return a map with: +/// +/// ```ignore +/// HeaderCaseMap({ +/// "x-bread": ["x-Bread", "X-BREAD", "x-bread"], +/// }) +/// ``` +/// +/// [`http1_preserve_header_case`]: /client/struct.Client.html#method.http1_preserve_header_case +#[derive(Clone, Debug)] +pub(crate) struct HeaderCaseMap(HeaderMap); + +#[cfg(feature = "http1")] +impl HeaderCaseMap { + /// Returns a view of all spellings associated with that header name, + /// in the order they were found. + pub(crate) fn get_all<'a>( + &'a self, + name: &HeaderName, + ) -> impl Iterator + 'a> + 'a { + self.get_all_internal(name).into_iter() + } + + /// Returns a view of all spellings associated with that header name, + /// in the order they were found. + pub(crate) fn get_all_internal<'a>(&'a self, name: &HeaderName) -> ValueIter<'_, Bytes> { + self.0.get_all(name).into_iter() + } + + pub(crate) fn default() -> Self { + Self(Default::default()) + } + + #[cfg(any(test, feature = "ffi"))] + pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) { + self.0.insert(name, orig); + } + + pub(crate) fn append(&mut self, name: N, orig: Bytes) + where + N: IntoHeaderName, + { + self.0.append(name, orig); + } +} diff --git a/third_party/rust/hyper/src/ffi/body.rs b/third_party/rust/hyper/src/ffi/body.rs new file mode 100644 index 000000000000..39ba5beffb54 --- /dev/null +++ b/third_party/rust/hyper/src/ffi/body.rs @@ -0,0 +1,229 @@ +use std::ffi::c_void; +use std::mem::ManuallyDrop; +use std::ptr; +use std::task::{Context, Poll}; + +use http::HeaderMap; +use libc::{c_int, size_t}; + +use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType}; +use super::{UserDataPointer, HYPER_ITER_CONTINUE}; +use crate::body::{Body, Bytes, HttpBody as _}; + +/// A streaming HTTP body. +pub struct hyper_body(pub(super) Body); + +/// A buffer of bytes that is sent or received on a `hyper_body`. +pub struct hyper_buf(pub(crate) Bytes); + +pub(crate) struct UserBody { + data_func: hyper_body_data_callback, + userdata: *mut c_void, +} + +// ===== Body ===== + +type hyper_body_foreach_callback = extern "C" fn(*mut c_void, *const hyper_buf) -> c_int; + +type hyper_body_data_callback = + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut *mut hyper_buf) -> c_int; + +ffi_fn! { + /// Create a new "empty" body. + /// + /// If not configured, this body acts as an empty payload. + fn hyper_body_new() -> *mut hyper_body { + Box::into_raw(Box::new(hyper_body(Body::empty()))) + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Free a `hyper_body *`. + fn hyper_body_free(body: *mut hyper_body) { + drop(non_null!(Box::from_raw(body) ?= ())); + } +} + +ffi_fn! { + /// Return a task that will poll the body for the next buffer of data. + /// + /// The task value may have different types depending on the outcome: + /// + /// - `HYPER_TASK_BUF`: Success, and more data was received. + /// - `HYPER_TASK_ERROR`: An error retrieving the data. + /// - `HYPER_TASK_EMPTY`: The body has finished streaming data. + /// + /// This does not consume the `hyper_body *`, so it may be used to again. + /// However, it MUST NOT be used or freed until the related task completes. + fn hyper_body_data(body: *mut hyper_body) -> *mut hyper_task { + // This doesn't take ownership of the Body, so don't allow destructor + let mut body = ManuallyDrop::new(non_null!(Box::from_raw(body) ?= ptr::null_mut())); + + Box::into_raw(hyper_task::boxed(async move { + body.0.data().await.map(|res| res.map(hyper_buf)) + })) + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Return a task that will poll the body and execute the callback with each + /// body chunk that is received. + /// + /// The `hyper_buf` pointer is only a borrowed reference, it cannot live outside + /// the execution of the callback. You must make a copy to retain it. + /// + /// The callback should return `HYPER_ITER_CONTINUE` to continue iterating + /// chunks as they are received, or `HYPER_ITER_BREAK` to cancel. + /// + /// This will consume the `hyper_body *`, you shouldn't use it anymore or free it. + fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut hyper_task { + let mut body = non_null!(Box::from_raw(body) ?= ptr::null_mut()); + let userdata = UserDataPointer(userdata); + + Box::into_raw(hyper_task::boxed(async move { + while let Some(item) = body.0.data().await { + let chunk = item?; + if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) { + return Err(crate::Error::new_user_aborted_by_callback()); + } + } + Ok(()) + })) + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Set userdata on this body, which will be passed to callback functions. + fn hyper_body_set_userdata(body: *mut hyper_body, userdata: *mut c_void) { + let b = non_null!(&mut *body ?= ()); + b.0.as_ffi_mut().userdata = userdata; + } +} + +ffi_fn! { + /// Set the data callback for this body. + /// + /// The callback is called each time hyper needs to send more data for the + /// body. It is passed the value from `hyper_body_set_userdata`. + /// + /// If there is data available, the `hyper_buf **` argument should be set + /// to a `hyper_buf *` containing the data, and `HYPER_POLL_READY` should + /// be returned. + /// + /// Returning `HYPER_POLL_READY` while the `hyper_buf **` argument points + /// to `NULL` will indicate the body has completed all data. + /// + /// If there is more data to send, but it isn't yet available, a + /// `hyper_waker` should be saved from the `hyper_context *` argument, and + /// `HYPER_POLL_PENDING` should be returned. You must wake the saved waker + /// to signal the task when data is available. + /// + /// If some error has occurred, you can return `HYPER_POLL_ERROR` to abort + /// the body. + fn hyper_body_set_data_func(body: *mut hyper_body, func: hyper_body_data_callback) { + let b = non_null!{ &mut *body ?= () }; + b.0.as_ffi_mut().data_func = func; + } +} + +// ===== impl UserBody ===== + +impl UserBody { + pub(crate) fn new() -> UserBody { + UserBody { + data_func: data_noop, + userdata: std::ptr::null_mut(), + } + } + + pub(crate) fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { + let mut out = std::ptr::null_mut(); + match (self.data_func)(self.userdata, hyper_context::wrap(cx), &mut out) { + super::task::HYPER_POLL_READY => { + if out.is_null() { + Poll::Ready(None) + } else { + let buf = unsafe { Box::from_raw(out) }; + Poll::Ready(Some(Ok(buf.0))) + } + } + super::task::HYPER_POLL_PENDING => Poll::Pending, + super::task::HYPER_POLL_ERROR => { + Poll::Ready(Some(Err(crate::Error::new_body_write_aborted()))) + } + unexpected => Poll::Ready(Some(Err(crate::Error::new_body_write(format!( + "unexpected hyper_body_data_func return code {}", + unexpected + ))))), + } + } + + pub(crate) fn poll_trailers( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll>> { + Poll::Ready(Ok(None)) + } +} + +/// cbindgen:ignore +extern "C" fn data_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _: *mut *mut hyper_buf, +) -> c_int { + super::task::HYPER_POLL_READY +} + +unsafe impl Send for UserBody {} +unsafe impl Sync for UserBody {} + +// ===== Bytes ===== + +ffi_fn! { + /// Create a new `hyper_buf *` by copying the provided bytes. + /// + /// This makes an owned copy of the bytes, so the `buf` argument can be + /// freed or changed afterwards. + /// + /// This returns `NULL` if allocating a new buffer fails. + fn hyper_buf_copy(buf: *const u8, len: size_t) -> *mut hyper_buf { + let slice = unsafe { + std::slice::from_raw_parts(buf, len) + }; + Box::into_raw(Box::new(hyper_buf(Bytes::copy_from_slice(slice)))) + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Get a pointer to the bytes in this buffer. + /// + /// This should be used in conjunction with `hyper_buf_len` to get the length + /// of the bytes data. + /// + /// This pointer is borrowed data, and not valid once the `hyper_buf` is + /// consumed/freed. + fn hyper_buf_bytes(buf: *const hyper_buf) -> *const u8 { + unsafe { (*buf).0.as_ptr() } + } ?= ptr::null() +} + +ffi_fn! { + /// Get the length of the bytes this buffer contains. + fn hyper_buf_len(buf: *const hyper_buf) -> size_t { + unsafe { (*buf).0.len() } + } +} + +ffi_fn! { + /// Free this buffer. + fn hyper_buf_free(buf: *mut hyper_buf) { + drop(unsafe { Box::from_raw(buf) }); + } +} + +unsafe impl AsTaskType for hyper_buf { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_BUF + } +} diff --git a/third_party/rust/hyper/src/ffi/client.rs b/third_party/rust/hyper/src/ffi/client.rs new file mode 100644 index 000000000000..1e5f29d548be --- /dev/null +++ b/third_party/rust/hyper/src/ffi/client.rs @@ -0,0 +1,162 @@ +use std::ptr; +use std::sync::Arc; + +use libc::c_int; + +use crate::client::conn; +use crate::rt::Executor as _; + +use super::error::hyper_code; +use super::http_types::{hyper_request, hyper_response}; +use super::io::hyper_io; +use super::task::{hyper_executor, hyper_task, hyper_task_return_type, AsTaskType, WeakExec}; + +/// An options builder to configure an HTTP client connection. +pub struct hyper_clientconn_options { + builder: conn::Builder, + /// Use a `Weak` to prevent cycles. + exec: WeakExec, +} + +/// An HTTP client connection handle. +/// +/// These are used to send a request on a single connection. It's possible to +/// send multiple requests on a single connection, such as when HTTP/1 +/// keep-alive or HTTP/2 is used. +pub struct hyper_clientconn { + tx: conn::SendRequest, +} + +// ===== impl hyper_clientconn ===== + +ffi_fn! { + /// Starts an HTTP client connection handshake using the provided IO transport + /// and options. + /// + /// Both the `io` and the `options` are consumed in this function call. + /// + /// The returned `hyper_task *` must be polled with an executor until the + /// handshake completes, at which point the value can be taken. + fn hyper_clientconn_handshake(io: *mut hyper_io, options: *mut hyper_clientconn_options) -> *mut hyper_task { + let options = non_null! { Box::from_raw(options) ?= ptr::null_mut() }; + let io = non_null! { Box::from_raw(io) ?= ptr::null_mut() }; + + Box::into_raw(hyper_task::boxed(async move { + options.builder.handshake::<_, crate::Body>(io) + .await + .map(|(tx, conn)| { + options.exec.execute(Box::pin(async move { + let _ = conn.await; + })); + hyper_clientconn { tx } + }) + })) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Send a request on the client connection. + /// + /// Returns a task that needs to be polled until it is ready. When ready, the + /// task yields a `hyper_response *`. + fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut hyper_task { + let mut req = non_null! { Box::from_raw(req) ?= ptr::null_mut() }; + + // Update request with original-case map of headers + req.finalize_request(); + + let fut = non_null! { &mut *conn ?= ptr::null_mut() }.tx.send_request(req.0); + + let fut = async move { + fut.await.map(hyper_response::wrap) + }; + + Box::into_raw(hyper_task::boxed(fut)) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Free a `hyper_clientconn *`. + fn hyper_clientconn_free(conn: *mut hyper_clientconn) { + drop(non_null! { Box::from_raw(conn) ?= () }); + } +} + +unsafe impl AsTaskType for hyper_clientconn { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_CLIENTCONN + } +} + +// ===== impl hyper_clientconn_options ===== + +ffi_fn! { + /// Creates a new set of HTTP clientconn options to be used in a handshake. + fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { + let mut builder = conn::Builder::new(); + builder.http1_preserve_header_case(true); + + Box::into_raw(Box::new(hyper_clientconn_options { + builder, + exec: WeakExec::new(), + })) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Free a `hyper_clientconn_options *`. + fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) { + drop(non_null! { Box::from_raw(opts) ?= () }); + } +} + +ffi_fn! { + /// Set the client background task executor. + /// + /// This does not consume the `options` or the `exec`. + fn hyper_clientconn_options_exec(opts: *mut hyper_clientconn_options, exec: *const hyper_executor) { + let opts = non_null! { &mut *opts ?= () }; + + let exec = non_null! { Arc::from_raw(exec) ?= () }; + let weak_exec = hyper_executor::downgrade(&exec); + std::mem::forget(exec); + + opts.builder.executor(weak_exec.clone()); + opts.exec = weak_exec; + } +} + +ffi_fn! { + /// Set the whether to use HTTP2. + /// + /// Pass `0` to disable, `1` to enable. + fn hyper_clientconn_options_http2(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { + #[cfg(feature = "http2")] + { + let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; + opts.builder.http2_only(enabled != 0); + hyper_code::HYPERE_OK + } + + #[cfg(not(feature = "http2"))] + { + drop(opts); + drop(enabled); + hyper_code::HYPERE_FEATURE_NOT_ENABLED + } + } +} + +ffi_fn! { + /// Set the whether to include a copy of the raw headers in responses + /// received on this connection. + /// + /// Pass `0` to disable, `1` to enable. + /// + /// If enabled, see `hyper_response_headers_raw()` for usage. + fn hyper_clientconn_options_headers_raw(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { + let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; + opts.builder.http1_headers_raw(enabled != 0); + hyper_code::HYPERE_OK + } +} diff --git a/third_party/rust/hyper/src/ffi/error.rs b/third_party/rust/hyper/src/ffi/error.rs new file mode 100644 index 000000000000..015e595aee14 --- /dev/null +++ b/third_party/rust/hyper/src/ffi/error.rs @@ -0,0 +1,85 @@ +use libc::size_t; + +/// A more detailed error object returned by some hyper functions. +pub struct hyper_error(crate::Error); + +/// A return code for many of hyper's methods. +#[repr(C)] +pub enum hyper_code { + /// All is well. + HYPERE_OK, + /// General error, details in the `hyper_error *`. + HYPERE_ERROR, + /// A function argument was invalid. + HYPERE_INVALID_ARG, + /// The IO transport returned an EOF when one wasn't expected. + /// + /// This typically means an HTTP request or response was expected, but the + /// connection closed cleanly without sending (all of) it. + HYPERE_UNEXPECTED_EOF, + /// Aborted by a user supplied callback. + HYPERE_ABORTED_BY_CALLBACK, + /// An optional hyper feature was not enabled. + #[cfg_attr(feature = "http2", allow(unused))] + HYPERE_FEATURE_NOT_ENABLED, + /// The peer sent an HTTP message that could not be parsed. + HYPERE_INVALID_PEER_MESSAGE, +} + +// ===== impl hyper_error ===== + +impl hyper_error { + fn code(&self) -> hyper_code { + use crate::error::Kind as ErrorKind; + use crate::error::User; + + match self.0.kind() { + ErrorKind::Parse(_) => hyper_code::HYPERE_INVALID_PEER_MESSAGE, + ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF, + ErrorKind::User(User::AbortedByCallback) => hyper_code::HYPERE_ABORTED_BY_CALLBACK, + // TODO: add more variants + _ => hyper_code::HYPERE_ERROR, + } + } + + fn print_to(&self, dst: &mut [u8]) -> usize { + use std::io::Write; + + let mut dst = std::io::Cursor::new(dst); + + // A write! error doesn't matter. As much as possible will have been + // written, and the Cursor position will know how far that is (even + // if that is zero). + let _ = write!(dst, "{}", &self.0); + dst.position() as usize + } +} + +ffi_fn! { + /// Frees a `hyper_error`. + fn hyper_error_free(err: *mut hyper_error) { + drop(non_null!(Box::from_raw(err) ?= ())); + } +} + +ffi_fn! { + /// Get an equivalent `hyper_code` from this error. + fn hyper_error_code(err: *const hyper_error) -> hyper_code { + non_null!(&*err ?= hyper_code::HYPERE_INVALID_ARG).code() + } +} + +ffi_fn! { + /// Print the details of this error to a buffer. + /// + /// The `dst_len` value must be the maximum length that the buffer can + /// store. + /// + /// The return value is number of bytes that were written to `dst`. + fn hyper_error_print(err: *const hyper_error, dst: *mut u8, dst_len: size_t) -> size_t { + let dst = unsafe { + std::slice::from_raw_parts_mut(dst, dst_len) + }; + non_null!(&*err ?= 0).print_to(dst) + } +} diff --git a/third_party/rust/hyper/src/ffi/http_types.rs b/third_party/rust/hyper/src/ffi/http_types.rs new file mode 100644 index 000000000000..f6d32947bfc3 --- /dev/null +++ b/third_party/rust/hyper/src/ffi/http_types.rs @@ -0,0 +1,558 @@ +use bytes::Bytes; +use libc::{c_int, size_t}; +use std::ffi::c_void; + +use super::body::{hyper_body, hyper_buf}; +use super::error::hyper_code; +use super::task::{hyper_task_return_type, AsTaskType}; +use super::{UserDataPointer, HYPER_ITER_CONTINUE}; +use crate::ext::HeaderCaseMap; +use crate::header::{HeaderName, HeaderValue}; +use crate::{Body, HeaderMap, Method, Request, Response, Uri}; + +/// An HTTP request. +pub struct hyper_request(pub(super) Request); + +/// An HTTP response. +pub struct hyper_response(pub(super) Response); + +/// An HTTP header map. +/// +/// These can be part of a request or response. +pub struct hyper_headers { + pub(super) headers: HeaderMap, + orig_casing: HeaderCaseMap, +} + +#[derive(Debug)] +pub(crate) struct ReasonPhrase(pub(crate) Bytes); + +pub(crate) struct RawHeaders(pub(crate) hyper_buf); + +pub(crate) struct OnInformational { + func: hyper_request_on_informational_callback, + data: UserDataPointer, +} + +type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *mut hyper_response); + +// ===== impl hyper_request ===== + +ffi_fn! { + /// Construct a new HTTP request. + fn hyper_request_new() -> *mut hyper_request { + Box::into_raw(Box::new(hyper_request(Request::new(Body::empty())))) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Free an HTTP request if not going to send it on a client. + fn hyper_request_free(req: *mut hyper_request) { + drop(non_null!(Box::from_raw(req) ?= ())); + } +} + +ffi_fn! { + /// Set the HTTP Method of the request. + fn hyper_request_set_method(req: *mut hyper_request, method: *const u8, method_len: size_t) -> hyper_code { + let bytes = unsafe { + std::slice::from_raw_parts(method, method_len as usize) + }; + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + match Method::from_bytes(bytes) { + Ok(m) => { + *req.0.method_mut() = m; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + +ffi_fn! { + /// Set the URI of the request. + /// + /// The request's URI is best described as the `request-target` from the RFCs. So in HTTP/1, + /// whatever is set will get sent as-is in the first line (GET $uri HTTP/1.1). It + /// supports the 4 defined variants, origin-form, absolute-form, authority-form, and + /// asterisk-form. + /// + /// The underlying type was built to efficiently support HTTP/2 where the request-target is + /// split over :scheme, :authority, and :path. As such, each part can be set explicitly, or the + /// type can parse a single contiguous string and if a scheme is found, that slot is "set". If + /// the string just starts with a path, only the path portion is set. All pseudo headers that + /// have been parsed/set are sent when the connection type is HTTP/2. + /// + /// To set each slot explicitly, use `hyper_request_set_uri_parts`. + fn hyper_request_set_uri(req: *mut hyper_request, uri: *const u8, uri_len: size_t) -> hyper_code { + let bytes = unsafe { + std::slice::from_raw_parts(uri, uri_len as usize) + }; + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + match Uri::from_maybe_shared(bytes) { + Ok(u) => { + *req.0.uri_mut() = u; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + +ffi_fn! { + /// Set the URI of the request with separate scheme, authority, and + /// path/query strings. + /// + /// Each of `scheme`, `authority`, and `path_and_query` should either be + /// null, to skip providing a component, or point to a UTF-8 encoded + /// string. If any string pointer argument is non-null, its corresponding + /// `len` parameter must be set to the string's length. + fn hyper_request_set_uri_parts( + req: *mut hyper_request, + scheme: *const u8, + scheme_len: size_t, + authority: *const u8, + authority_len: size_t, + path_and_query: *const u8, + path_and_query_len: size_t + ) -> hyper_code { + let mut builder = Uri::builder(); + if !scheme.is_null() { + let scheme_bytes = unsafe { + std::slice::from_raw_parts(scheme, scheme_len as usize) + }; + builder = builder.scheme(scheme_bytes); + } + if !authority.is_null() { + let authority_bytes = unsafe { + std::slice::from_raw_parts(authority, authority_len as usize) + }; + builder = builder.authority(authority_bytes); + } + if !path_and_query.is_null() { + let path_and_query_bytes = unsafe { + std::slice::from_raw_parts(path_and_query, path_and_query_len as usize) + }; + builder = builder.path_and_query(path_and_query_bytes); + } + match builder.build() { + Ok(u) => { + *unsafe { &mut *req }.0.uri_mut() = u; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + +ffi_fn! { + /// Set the preferred HTTP version of the request. + /// + /// The version value should be one of the `HYPER_HTTP_VERSION_` constants. + /// + /// Note that this won't change the major HTTP version of the connection, + /// since that is determined at the handshake step. + fn hyper_request_set_version(req: *mut hyper_request, version: c_int) -> hyper_code { + use http::Version; + + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + *req.0.version_mut() = match version { + super::HYPER_HTTP_VERSION_NONE => Version::HTTP_11, + super::HYPER_HTTP_VERSION_1_0 => Version::HTTP_10, + super::HYPER_HTTP_VERSION_1_1 => Version::HTTP_11, + super::HYPER_HTTP_VERSION_2 => Version::HTTP_2, + _ => { + // We don't know this version + return hyper_code::HYPERE_INVALID_ARG; + } + }; + hyper_code::HYPERE_OK + } +} + +ffi_fn! { + /// Gets a reference to the HTTP headers of this request + /// + /// This is not an owned reference, so it should not be accessed after the + /// `hyper_request` has been consumed. + fn hyper_request_headers(req: *mut hyper_request) -> *mut hyper_headers { + hyper_headers::get_or_default(unsafe { &mut *req }.0.extensions_mut()) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Set the body of the request. + /// + /// The default is an empty body. + /// + /// This takes ownership of the `hyper_body *`, you must not use it or + /// free it after setting it on the request. + fn hyper_request_set_body(req: *mut hyper_request, body: *mut hyper_body) -> hyper_code { + let body = non_null!(Box::from_raw(body) ?= hyper_code::HYPERE_INVALID_ARG); + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + *req.0.body_mut() = body.0; + hyper_code::HYPERE_OK + } +} + +ffi_fn! { + /// Set an informational (1xx) response callback. + /// + /// The callback is called each time hyper receives an informational (1xx) + /// response for this request. + /// + /// The third argument is an opaque user data pointer, which is passed to + /// the callback each time. + /// + /// The callback is passed the `void *` data pointer, and a + /// `hyper_response *` which can be inspected as any other response. The + /// body of the response will always be empty. + /// + /// NOTE: The `hyper_response *` is just borrowed data, and will not + /// be valid after the callback finishes. You must copy any data you wish + /// to persist. + fn hyper_request_on_informational(req: *mut hyper_request, callback: hyper_request_on_informational_callback, data: *mut c_void) -> hyper_code { + let ext = OnInformational { + func: callback, + data: UserDataPointer(data), + }; + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + req.0.extensions_mut().insert(ext); + hyper_code::HYPERE_OK + } +} + +impl hyper_request { + pub(super) fn finalize_request(&mut self) { + if let Some(headers) = self.0.extensions_mut().remove::() { + *self.0.headers_mut() = headers.headers; + self.0.extensions_mut().insert(headers.orig_casing); + } + } +} + +// ===== impl hyper_response ===== + +ffi_fn! { + /// Free an HTTP response after using it. + fn hyper_response_free(resp: *mut hyper_response) { + drop(non_null!(Box::from_raw(resp) ?= ())); + } +} + +ffi_fn! { + /// Get the HTTP-Status code of this response. + /// + /// It will always be within the range of 100-599. + fn hyper_response_status(resp: *const hyper_response) -> u16 { + non_null!(&*resp ?= 0).0.status().as_u16() + } +} + +ffi_fn! { + /// Get a pointer to the reason-phrase of this response. + /// + /// This buffer is not null-terminated. + /// + /// This buffer is owned by the response, and should not be used after + /// the response has been freed. + /// + /// Use `hyper_response_reason_phrase_len()` to get the length of this + /// buffer. + fn hyper_response_reason_phrase(resp: *const hyper_response) -> *const u8 { + non_null!(&*resp ?= std::ptr::null()).reason_phrase().as_ptr() + } ?= std::ptr::null() +} + +ffi_fn! { + /// Get the length of the reason-phrase of this response. + /// + /// Use `hyper_response_reason_phrase()` to get the buffer pointer. + fn hyper_response_reason_phrase_len(resp: *const hyper_response) -> size_t { + non_null!(&*resp ?= 0).reason_phrase().len() + } +} + +ffi_fn! { + /// Get a reference to the full raw headers of this response. + /// + /// You must have enabled `hyper_clientconn_options_headers_raw()`, or this + /// will return NULL. + /// + /// The returned `hyper_buf *` is just a reference, owned by the response. + /// You need to make a copy if you wish to use it after freeing the + /// response. + /// + /// The buffer is not null-terminated, see the `hyper_buf` functions for + /// getting the bytes and length. + fn hyper_response_headers_raw(resp: *const hyper_response) -> *const hyper_buf { + let resp = non_null!(&*resp ?= std::ptr::null()); + match resp.0.extensions().get::() { + Some(raw) => &raw.0, + None => std::ptr::null(), + } + } ?= std::ptr::null() +} + +ffi_fn! { + /// Get the HTTP version used by this response. + /// + /// The returned value could be: + /// + /// - `HYPER_HTTP_VERSION_1_0` + /// - `HYPER_HTTP_VERSION_1_1` + /// - `HYPER_HTTP_VERSION_2` + /// - `HYPER_HTTP_VERSION_NONE` if newer (or older). + fn hyper_response_version(resp: *const hyper_response) -> c_int { + use http::Version; + + match non_null!(&*resp ?= 0).0.version() { + Version::HTTP_10 => super::HYPER_HTTP_VERSION_1_0, + Version::HTTP_11 => super::HYPER_HTTP_VERSION_1_1, + Version::HTTP_2 => super::HYPER_HTTP_VERSION_2, + _ => super::HYPER_HTTP_VERSION_NONE, + } + } +} + +ffi_fn! { + /// Gets a reference to the HTTP headers of this response. + /// + /// This is not an owned reference, so it should not be accessed after the + /// `hyper_response` has been freed. + fn hyper_response_headers(resp: *mut hyper_response) -> *mut hyper_headers { + hyper_headers::get_or_default(unsafe { &mut *resp }.0.extensions_mut()) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Take ownership of the body of this response. + /// + /// It is safe to free the response even after taking ownership of its body. + fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body { + let body = std::mem::take(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut()); + Box::into_raw(Box::new(hyper_body(body))) + } ?= std::ptr::null_mut() +} + +impl hyper_response { + pub(super) fn wrap(mut resp: Response) -> hyper_response { + let headers = std::mem::take(resp.headers_mut()); + let orig_casing = resp + .extensions_mut() + .remove::() + .unwrap_or_else(HeaderCaseMap::default); + resp.extensions_mut().insert(hyper_headers { + headers, + orig_casing, + }); + + hyper_response(resp) + } + + fn reason_phrase(&self) -> &[u8] { + if let Some(reason) = self.0.extensions().get::() { + return &reason.0; + } + + if let Some(reason) = self.0.status().canonical_reason() { + return reason.as_bytes(); + } + + &[] + } +} + +unsafe impl AsTaskType for hyper_response { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_RESPONSE + } +} + +// ===== impl Headers ===== + +type hyper_headers_foreach_callback = + extern "C" fn(*mut c_void, *const u8, size_t, *const u8, size_t) -> c_int; + +impl hyper_headers { + pub(super) fn get_or_default(ext: &mut http::Extensions) -> &mut hyper_headers { + if let None = ext.get_mut::() { + ext.insert(hyper_headers::default()); + } + + ext.get_mut::().unwrap() + } +} + +ffi_fn! { + /// Iterates the headers passing each name and value pair to the callback. + /// + /// The `userdata` pointer is also passed to the callback. + /// + /// The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or + /// `HYPER_ITER_BREAK` to stop. + fn hyper_headers_foreach(headers: *const hyper_headers, func: hyper_headers_foreach_callback, userdata: *mut c_void) { + let headers = non_null!(&*headers ?= ()); + // For each header name/value pair, there may be a value in the casemap + // that corresponds to the HeaderValue. So, we iterator all the keys, + // and for each one, try to pair the originally cased name with the value. + // + // TODO: consider adding http::HeaderMap::entries() iterator + for name in headers.headers.keys() { + let mut names = headers.orig_casing.get_all(name); + + for value in headers.headers.get_all(name) { + let (name_ptr, name_len) = if let Some(orig_name) = names.next() { + (orig_name.as_ref().as_ptr(), orig_name.as_ref().len()) + } else { + ( + name.as_str().as_bytes().as_ptr(), + name.as_str().as_bytes().len(), + ) + }; + + let val_ptr = value.as_bytes().as_ptr(); + let val_len = value.as_bytes().len(); + + if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { + return; + } + } + } + } +} + +ffi_fn! { + /// Sets the header with the provided name to the provided value. + /// + /// This overwrites any previous value set for the header. + fn hyper_headers_set(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { + let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG); + match unsafe { raw_name_value(name, name_len, value, value_len) } { + Ok((name, value, orig_name)) => { + headers.headers.insert(&name, value); + headers.orig_casing.insert(name, orig_name); + hyper_code::HYPERE_OK + } + Err(code) => code, + } + } +} + +ffi_fn! { + /// Adds the provided value to the list of the provided name. + /// + /// If there were already existing values for the name, this will append the + /// new value to the internal list. + fn hyper_headers_add(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { + let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG); + + match unsafe { raw_name_value(name, name_len, value, value_len) } { + Ok((name, value, orig_name)) => { + headers.headers.append(&name, value); + headers.orig_casing.append(name, orig_name); + hyper_code::HYPERE_OK + } + Err(code) => code, + } + } +} + +impl Default for hyper_headers { + fn default() -> Self { + Self { + headers: Default::default(), + orig_casing: HeaderCaseMap::default(), + } + } +} + +unsafe fn raw_name_value( + name: *const u8, + name_len: size_t, + value: *const u8, + value_len: size_t, +) -> Result<(HeaderName, HeaderValue, Bytes), hyper_code> { + let name = std::slice::from_raw_parts(name, name_len); + let orig_name = Bytes::copy_from_slice(name); + let name = match HeaderName::from_bytes(name) { + Ok(name) => name, + Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), + }; + let value = std::slice::from_raw_parts(value, value_len); + let value = match HeaderValue::from_bytes(value) { + Ok(val) => val, + Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), + }; + + Ok((name, value, orig_name)) +} + +// ===== impl OnInformational ===== + +impl OnInformational { + pub(crate) fn call(&mut self, resp: Response) { + let mut resp = hyper_response::wrap(resp); + (self.func)(self.data.0, &mut resp); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_headers_foreach_cases_preserved() { + let mut headers = hyper_headers::default(); + + let name1 = b"Set-CookiE"; + let value1 = b"a=b"; + hyper_headers_add( + &mut headers, + name1.as_ptr(), + name1.len(), + value1.as_ptr(), + value1.len(), + ); + + let name2 = b"SET-COOKIE"; + let value2 = b"c=d"; + hyper_headers_add( + &mut headers, + name2.as_ptr(), + name2.len(), + value2.as_ptr(), + value2.len(), + ); + + let mut vec = Vec::::new(); + hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void); + + assert_eq!(vec, b"Set-CookiE: a=b\r\nSET-COOKIE: c=d\r\n"); + + extern "C" fn concat( + vec: *mut c_void, + name: *const u8, + name_len: usize, + value: *const u8, + value_len: usize, + ) -> c_int { + unsafe { + let vec = &mut *(vec as *mut Vec); + let name = std::slice::from_raw_parts(name, name_len); + let value = std::slice::from_raw_parts(value, value_len); + vec.extend(name); + vec.extend(b": "); + vec.extend(value); + vec.extend(b"\r\n"); + } + HYPER_ITER_CONTINUE + } + } +} diff --git a/third_party/rust/hyper/src/ffi/io.rs b/third_party/rust/hyper/src/ffi/io.rs new file mode 100644 index 000000000000..bff666dbcf78 --- /dev/null +++ b/third_party/rust/hyper/src/ffi/io.rs @@ -0,0 +1,178 @@ +use std::ffi::c_void; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use libc::size_t; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::task::hyper_context; + +/// Sentinel value to return from a read or write callback that the operation +/// is pending. +pub const HYPER_IO_PENDING: size_t = 0xFFFFFFFF; +/// Sentinel value to return from a read or write callback that the operation +/// has errored. +pub const HYPER_IO_ERROR: size_t = 0xFFFFFFFE; + +type hyper_io_read_callback = + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut u8, size_t) -> size_t; +type hyper_io_write_callback = + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *const u8, size_t) -> size_t; + +/// An IO object used to represent a socket or similar concept. +pub struct hyper_io { + read: hyper_io_read_callback, + write: hyper_io_write_callback, + userdata: *mut c_void, +} + +ffi_fn! { + /// Create a new IO type used to represent a transport. + /// + /// The read and write functions of this transport should be set with + /// `hyper_io_set_read` and `hyper_io_set_write`. + fn hyper_io_new() -> *mut hyper_io { + Box::into_raw(Box::new(hyper_io { + read: read_noop, + write: write_noop, + userdata: std::ptr::null_mut(), + })) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Free an unused `hyper_io *`. + /// + /// This is typically only useful if you aren't going to pass ownership + /// of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. + fn hyper_io_free(io: *mut hyper_io) { + drop(non_null!(Box::from_raw(io) ?= ())); + } +} + +ffi_fn! { + /// Set the user data pointer for this IO to some value. + /// + /// This value is passed as an argument to the read and write callbacks. + fn hyper_io_set_userdata(io: *mut hyper_io, data: *mut c_void) { + non_null!(&mut *io ?= ()).userdata = data; + } +} + +ffi_fn! { + /// Set the read function for this IO transport. + /// + /// Data that is read from the transport should be put in the `buf` pointer, + /// up to `buf_len` bytes. The number of bytes read should be the return value. + /// + /// It is undefined behavior to try to access the bytes in the `buf` pointer, + /// unless you have already written them yourself. It is also undefined behavior + /// to return that more bytes have been written than actually set on the `buf`. + /// + /// If there is no data currently available, a waker should be claimed from + /// the `ctx` and registered with whatever polling mechanism is used to signal + /// when data is available later on. The return value should be + /// `HYPER_IO_PENDING`. + /// + /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` + /// should be the return value. + fn hyper_io_set_read(io: *mut hyper_io, func: hyper_io_read_callback) { + non_null!(&mut *io ?= ()).read = func; + } +} + +ffi_fn! { + /// Set the write function for this IO transport. + /// + /// Data from the `buf` pointer should be written to the transport, up to + /// `buf_len` bytes. The number of bytes written should be the return value. + /// + /// If no data can currently be written, the `waker` should be cloned and + /// registered with whatever polling mechanism is used to signal when data + /// is available later on. The return value should be `HYPER_IO_PENDING`. + /// + /// Yeet. + /// + /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` + /// should be the return value. + fn hyper_io_set_write(io: *mut hyper_io, func: hyper_io_write_callback) { + non_null!(&mut *io ?= ()).write = func; + } +} + +/// cbindgen:ignore +extern "C" fn read_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _buf: *mut u8, + _buf_len: size_t, +) -> size_t { + 0 +} + +/// cbindgen:ignore +extern "C" fn write_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _buf: *const u8, + _buf_len: size_t, +) -> size_t { + 0 +} + +impl AsyncRead for hyper_io { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + let buf_ptr = unsafe { buf.unfilled_mut() }.as_mut_ptr() as *mut u8; + let buf_len = buf.remaining(); + + match (self.read)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { + HYPER_IO_PENDING => Poll::Pending, + HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new( + std::io::ErrorKind::Other, + "io error", + ))), + ok => { + // We have to trust that the user's read callback actually + // filled in that many bytes... :( + unsafe { buf.assume_init(ok) }; + buf.advance(ok); + Poll::Ready(Ok(())) + } + } + } +} + +impl AsyncWrite for hyper_io { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let buf_ptr = buf.as_ptr(); + let buf_len = buf.len(); + + match (self.write)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { + HYPER_IO_PENDING => Poll::Pending, + HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new( + std::io::ErrorKind::Other, + "io error", + ))), + ok => Poll::Ready(Ok(ok)), + } + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} + +unsafe impl Send for hyper_io {} +unsafe impl Sync for hyper_io {} diff --git a/third_party/rust/hyper/src/ffi/macros.rs b/third_party/rust/hyper/src/ffi/macros.rs new file mode 100644 index 000000000000..022711baaa53 --- /dev/null +++ b/third_party/rust/hyper/src/ffi/macros.rs @@ -0,0 +1,53 @@ +macro_rules! ffi_fn { + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block ?= $default:expr) => { + $(#[$doc])* + #[no_mangle] + pub extern fn $name($($arg: $arg_ty),*) -> $ret { + use std::panic::{self, AssertUnwindSafe}; + + match panic::catch_unwind(AssertUnwindSafe(move || $body)) { + Ok(v) => v, + Err(_) => { + $default + } + } + } + }; + + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> $ret $body ?= { + eprintln!("panic unwind caught, aborting"); + std::process::abort() + }); + }; + + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block ?= $default:expr) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body ?= $default); + }; + + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body); + }; +} + +macro_rules! non_null { + ($ptr:ident, $eval:expr, $err:expr) => {{ + debug_assert!(!$ptr.is_null(), "{:?} must not be null", stringify!($ptr)); + if $ptr.is_null() { + return $err; + } + unsafe { $eval } + }}; + (&*$ptr:ident ?= $err:expr) => {{ + non_null!($ptr, &*$ptr, $err) + }}; + (&mut *$ptr:ident ?= $err:expr) => {{ + non_null!($ptr, &mut *$ptr, $err) + }}; + (Box::from_raw($ptr:ident) ?= $err:expr) => {{ + non_null!($ptr, Box::from_raw($ptr), $err) + }}; + (Arc::from_raw($ptr:ident) ?= $err:expr) => {{ + non_null!($ptr, Arc::from_raw($ptr), $err) + }}; +} diff --git a/third_party/rust/hyper/src/ffi/mod.rs b/third_party/rust/hyper/src/ffi/mod.rs new file mode 100644 index 000000000000..fd67a880a621 --- /dev/null +++ b/third_party/rust/hyper/src/ffi/mod.rs @@ -0,0 +1,94 @@ +// We have a lot of c-types in here, stop warning about their names! +#![allow(non_camel_case_types)] +// fmt::Debug isn't helpful on FFI types +#![allow(missing_debug_implementations)] +// unreachable_pub warns `#[no_mangle] pub extern fn` in private mod. +#![allow(unreachable_pub)] + +//! # hyper C API +//! +//! This part of the documentation describes the C API for hyper. That is, how +//! to *use* the hyper library in C code. This is **not** a regular Rust +//! module, and thus it is not accessible in Rust. +//! +//! ## Unstable +//! +//! The C API of hyper is currently **unstable**, which means it's not part of +//! the semver contract as the rest of the Rust API is. Because of that, it's +//! only accessible if `--cfg hyper_unstable_ffi` is passed to `rustc` when +//! compiling. The easiest way to do that is setting the `RUSTFLAGS` +//! environment variable. +//! +//! ## Building +//! +//! The C API is part of the Rust library, but isn't compiled by default. Using +//! `cargo`, it can be compiled with the following command: +//! +//! ```notrust +//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo build --features client,http1,http2,ffi +//! ``` + +// We may eventually allow the FFI to be enabled without `client` or `http1`, +// that is why we don't auto enable them as `ffi = ["client", "http1"]` in +// the `Cargo.toml`. +// +// But for now, give a clear message that this compile error is expected. +#[cfg(not(all(feature = "client", feature = "http1")))] +compile_error!("The `ffi` feature currently requires the `client` and `http1` features."); + +#[cfg(not(hyper_unstable_ffi))] +compile_error!( + "\ + The `ffi` feature is unstable, and requires the \ + `RUSTFLAGS='--cfg hyper_unstable_ffi'` environment variable to be set.\ +" +); + +#[macro_use] +mod macros; + +mod body; +mod client; +mod error; +mod http_types; +mod io; +mod task; + +pub use self::body::*; +pub use self::client::*; +pub use self::error::*; +pub use self::http_types::*; +pub use self::io::*; +pub use self::task::*; + +/// Return in iter functions to continue iterating. +pub const HYPER_ITER_CONTINUE: libc::c_int = 0; +/// Return in iter functions to stop iterating. +#[allow(unused)] +pub const HYPER_ITER_BREAK: libc::c_int = 1; + +/// An HTTP Version that is unspecified. +pub const HYPER_HTTP_VERSION_NONE: libc::c_int = 0; +/// The HTTP/1.0 version. +pub const HYPER_HTTP_VERSION_1_0: libc::c_int = 10; +/// The HTTP/1.1 version. +pub const HYPER_HTTP_VERSION_1_1: libc::c_int = 11; +/// The HTTP/2 version. +pub const HYPER_HTTP_VERSION_2: libc::c_int = 20; + +struct UserDataPointer(*mut std::ffi::c_void); + +// We don't actually know anything about this pointer, it's up to the user +// to do the right thing. +unsafe impl Send for UserDataPointer {} +unsafe impl Sync for UserDataPointer {} + +/// cbindgen:ignore +static VERSION_CSTR: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + +ffi_fn! { + /// Returns a static ASCII (null terminated) string of the hyper version. + fn hyper_version() -> *const libc::c_char { + VERSION_CSTR.as_ptr() as _ + } ?= std::ptr::null() +} diff --git a/third_party/rust/hyper/src/ffi/task.rs b/third_party/rust/hyper/src/ffi/task.rs new file mode 100644 index 000000000000..e951e0dacc50 --- /dev/null +++ b/third_party/rust/hyper/src/ffi/task.rs @@ -0,0 +1,411 @@ +use std::ffi::c_void; +use std::future::Future; +use std::pin::Pin; +use std::ptr; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, Weak, +}; +use std::task::{Context, Poll}; + +use futures_util::stream::{FuturesUnordered, Stream}; +use libc::c_int; + +use super::error::hyper_code; +use super::UserDataPointer; + +type BoxFuture = Pin + Send>>; +type BoxAny = Box; + +/// Return in a poll function to indicate it was ready. +pub const HYPER_POLL_READY: c_int = 0; +/// Return in a poll function to indicate it is still pending. +/// +/// The passed in `hyper_waker` should be registered to wake up the task at +/// some later point. +pub const HYPER_POLL_PENDING: c_int = 1; +/// Return in a poll function indicate an error. +pub const HYPER_POLL_ERROR: c_int = 3; + +/// A task executor for `hyper_task`s. +pub struct hyper_executor { + /// The executor of all task futures. + /// + /// There should never be contention on the mutex, as it is only locked + /// to drive the futures. However, we cannot gaurantee proper usage from + /// `hyper_executor_poll()`, which in C could potentially be called inside + /// one of the stored futures. The mutex isn't re-entrant, so doing so + /// would result in a deadlock, but that's better than data corruption. + driver: Mutex>, + + /// The queue of futures that need to be pushed into the `driver`. + /// + /// This is has a separate mutex since `spawn` could be called from inside + /// a future, which would mean the driver's mutex is already locked. + spawn_queue: Mutex>, + + /// This is used to track when a future calls `wake` while we are within + /// `hyper_executor::poll_next`. + is_woken: Arc, +} + +#[derive(Clone)] +pub(crate) struct WeakExec(Weak); + +struct ExecWaker(AtomicBool); + +/// An async task. +pub struct hyper_task { + future: BoxFuture, + output: Option, + userdata: UserDataPointer, +} + +struct TaskFuture { + task: Option>, +} + +/// An async context for a task that contains the related waker. +pub struct hyper_context<'a>(Context<'a>); + +/// A waker that is saved and used to waken a pending task. +pub struct hyper_waker { + waker: std::task::Waker, +} + +/// A descriptor for what type a `hyper_task` value is. +#[repr(C)] +pub enum hyper_task_return_type { + /// The value of this task is null (does not imply an error). + HYPER_TASK_EMPTY, + /// The value of this task is `hyper_error *`. + HYPER_TASK_ERROR, + /// The value of this task is `hyper_clientconn *`. + HYPER_TASK_CLIENTCONN, + /// The value of this task is `hyper_response *`. + HYPER_TASK_RESPONSE, + /// The value of this task is `hyper_buf *`. + HYPER_TASK_BUF, +} + +pub(crate) unsafe trait AsTaskType { + fn as_task_type(&self) -> hyper_task_return_type; +} + +pub(crate) trait IntoDynTaskType { + fn into_dyn_task_type(self) -> BoxAny; +} + +// ===== impl hyper_executor ===== + +impl hyper_executor { + fn new() -> Arc { + Arc::new(hyper_executor { + driver: Mutex::new(FuturesUnordered::new()), + spawn_queue: Mutex::new(Vec::new()), + is_woken: Arc::new(ExecWaker(AtomicBool::new(false))), + }) + } + + pub(crate) fn downgrade(exec: &Arc) -> WeakExec { + WeakExec(Arc::downgrade(exec)) + } + + fn spawn(&self, task: Box) { + self.spawn_queue + .lock() + .unwrap() + .push(TaskFuture { task: Some(task) }); + } + + fn poll_next(&self) -> Option> { + // Drain the queue first. + self.drain_queue(); + + let waker = futures_util::task::waker_ref(&self.is_woken); + let mut cx = Context::from_waker(&waker); + + loop { + match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) { + Poll::Ready(val) => return val, + Poll::Pending => { + // Check if any of the pending tasks tried to spawn + // some new tasks. If so, drain into the driver and loop. + if self.drain_queue() { + continue; + } + + // If the driver called `wake` while we were polling, + // we should poll again immediately! + if self.is_woken.0.swap(false, Ordering::SeqCst) { + continue; + } + + return None; + } + } + } + } + + fn drain_queue(&self) -> bool { + let mut queue = self.spawn_queue.lock().unwrap(); + if queue.is_empty() { + return false; + } + + let driver = self.driver.lock().unwrap(); + + for task in queue.drain(..) { + driver.push(task); + } + + true + } +} + +impl futures_util::task::ArcWake for ExecWaker { + fn wake_by_ref(me: &Arc) { + me.0.store(true, Ordering::SeqCst); + } +} + +// ===== impl WeakExec ===== + +impl WeakExec { + pub(crate) fn new() -> Self { + WeakExec(Weak::new()) + } +} + +impl crate::rt::Executor> for WeakExec { + fn execute(&self, fut: BoxFuture<()>) { + if let Some(exec) = self.0.upgrade() { + exec.spawn(hyper_task::boxed(fut)); + } + } +} + +ffi_fn! { + /// Creates a new task executor. + fn hyper_executor_new() -> *const hyper_executor { + Arc::into_raw(hyper_executor::new()) + } ?= ptr::null() +} + +ffi_fn! { + /// Frees an executor and any incomplete tasks still part of it. + fn hyper_executor_free(exec: *const hyper_executor) { + drop(non_null!(Arc::from_raw(exec) ?= ())); + } +} + +ffi_fn! { + /// Push a task onto the executor. + /// + /// The executor takes ownership of the task, it should not be accessed + /// again unless returned back to the user with `hyper_executor_poll`. + fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code { + let exec = non_null!(&*exec ?= hyper_code::HYPERE_INVALID_ARG); + let task = non_null!(Box::from_raw(task) ?= hyper_code::HYPERE_INVALID_ARG); + exec.spawn(task); + hyper_code::HYPERE_OK + } +} + +ffi_fn! { + /// Polls the executor, trying to make progress on any tasks that have notified + /// that they are ready again. + /// + /// If ready, returns a task from the executor that has completed. + /// + /// If there are no ready tasks, this returns `NULL`. + fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task { + let exec = non_null!(&*exec ?= ptr::null_mut()); + match exec.poll_next() { + Some(task) => Box::into_raw(task), + None => ptr::null_mut(), + } + } ?= ptr::null_mut() +} + +// ===== impl hyper_task ===== + +impl hyper_task { + pub(crate) fn boxed(fut: F) -> Box + where + F: Future + Send + 'static, + F::Output: IntoDynTaskType + Send + Sync + 'static, + { + Box::new(hyper_task { + future: Box::pin(async move { fut.await.into_dyn_task_type() }), + output: None, + userdata: UserDataPointer(ptr::null_mut()), + }) + } + + fn output_type(&self) -> hyper_task_return_type { + match self.output { + None => hyper_task_return_type::HYPER_TASK_EMPTY, + Some(ref val) => val.as_task_type(), + } + } +} + +impl Future for TaskFuture { + type Output = Box; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) { + Poll::Ready(val) => { + let mut task = self.task.take().unwrap(); + task.output = Some(val); + Poll::Ready(task) + } + Poll::Pending => Poll::Pending, + } + } +} + +ffi_fn! { + /// Free a task. + fn hyper_task_free(task: *mut hyper_task) { + drop(non_null!(Box::from_raw(task) ?= ())); + } +} + +ffi_fn! { + /// Takes the output value of this task. + /// + /// This must only be called once polling the task on an executor has finished + /// this task. + /// + /// Use `hyper_task_type` to determine the type of the `void *` return value. + fn hyper_task_value(task: *mut hyper_task) -> *mut c_void { + let task = non_null!(&mut *task ?= ptr::null_mut()); + + if let Some(val) = task.output.take() { + let p = Box::into_raw(val) as *mut c_void; + // protect from returning fake pointers to empty types + if p == std::ptr::NonNull::::dangling().as_ptr() { + ptr::null_mut() + } else { + p + } + } else { + ptr::null_mut() + } + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Query the return type of this task. + fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type { + // instead of blowing up spectacularly, just say this null task + // doesn't have a value to retrieve. + non_null!(&*task ?= hyper_task_return_type::HYPER_TASK_EMPTY).output_type() + } +} + +ffi_fn! { + /// Set a user data pointer to be associated with this task. + /// + /// This value will be passed to task callbacks, and can be checked later + /// with `hyper_task_userdata`. + fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) { + if task.is_null() { + return; + } + + unsafe { (*task).userdata = UserDataPointer(userdata) }; + } +} + +ffi_fn! { + /// Retrieve the userdata that has been set via `hyper_task_set_userdata`. + fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void { + non_null!(&*task ?= ptr::null_mut()).userdata.0 + } ?= ptr::null_mut() +} + +// ===== impl AsTaskType ===== + +unsafe impl AsTaskType for () { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_EMPTY + } +} + +unsafe impl AsTaskType for crate::Error { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_ERROR + } +} + +impl IntoDynTaskType for T +where + T: AsTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + Box::new(self) + } +} + +impl IntoDynTaskType for crate::Result +where + T: IntoDynTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + match self { + Ok(val) => val.into_dyn_task_type(), + Err(err) => Box::new(err), + } + } +} + +impl IntoDynTaskType for Option +where + T: IntoDynTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + match self { + Some(val) => val.into_dyn_task_type(), + None => ().into_dyn_task_type(), + } + } +} + +// ===== impl hyper_context ===== + +impl hyper_context<'_> { + pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> { + // A struct with only one field has the same layout as that field. + unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) } + } +} + +ffi_fn! { + /// Copies a waker out of the task context. + fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker { + let waker = non_null!(&mut *cx ?= ptr::null_mut()).0.waker().clone(); + Box::into_raw(Box::new(hyper_waker { waker })) + } ?= ptr::null_mut() +} + +// ===== impl hyper_waker ===== + +ffi_fn! { + /// Free a waker that hasn't been woken. + fn hyper_waker_free(waker: *mut hyper_waker) { + drop(non_null!(Box::from_raw(waker) ?= ())); + } +} + +ffi_fn! { + /// Wake up the task associated with a waker. + /// + /// NOTE: This consumes the waker. You should not use or free the waker afterwards. + fn hyper_waker_wake(waker: *mut hyper_waker) { + let waker = non_null!(Box::from_raw(waker) ?= ()); + waker.waker.wake(); + } +} diff --git a/third_party/rust/hyper/src/headers.rs b/third_party/rust/hyper/src/headers.rs index 5375e78287f3..8407be185fa0 100644 --- a/third_party/rust/hyper/src/headers.rs +++ b/third_party/rust/hyper/src/headers.rs @@ -1,17 +1,22 @@ +#[cfg(feature = "http1")] use bytes::BytesMut; -use http::header::{HeaderValue, OccupiedEntry, ValueIter}; -use http::header::{CONTENT_LENGTH, TRANSFER_ENCODING}; -use http::method::Method; +use http::header::CONTENT_LENGTH; +use http::header::{HeaderValue, ValueIter}; use http::HeaderMap; +#[cfg(all(feature = "http2", feature = "client"))] +use http::Method; -pub fn connection_keep_alive(value: &HeaderValue) -> bool { +#[cfg(feature = "http1")] +pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool { connection_has(value, "keep-alive") } -pub fn connection_close(value: &HeaderValue) -> bool { +#[cfg(feature = "http1")] +pub(super) fn connection_close(value: &HeaderValue) -> bool { connection_has(value, "close") } +#[cfg(feature = "http1")] fn connection_has(value: &HeaderValue, needle: &str) -> bool { if let Ok(s) = value.to_str() { for val in s.split(',') { @@ -23,59 +28,91 @@ fn connection_has(value: &HeaderValue, needle: &str) -> bool { false } -pub fn content_length_parse(value: &HeaderValue) -> Option { - value.to_str().ok().and_then(|s| s.parse().ok()) +#[cfg(all(feature = "http1", feature = "server"))] +pub(super) fn content_length_parse(value: &HeaderValue) -> Option { + from_digits(value.as_bytes()) } -pub fn content_length_parse_all(headers: &HeaderMap) -> Option { +pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option { content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter()) } -pub fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option { +pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option { // If multiple Content-Length headers were sent, everything can still // be alright if they all contain the same value, and all parse // correctly. If not, then it's an error. - let folded = values.fold(None, |prev, line| match prev { - Some(Ok(prev)) => Some( - line.to_str() - .map_err(|_| ()) - .and_then(|s| s.parse().map_err(|_| ())) - .and_then(|n| if prev == n { Ok(n) } else { Err(()) }), - ), - None => Some( - line.to_str() - .map_err(|_| ()) - .and_then(|s| s.parse().map_err(|_| ())), - ), - Some(Err(())) => Some(Err(())), - }); - - if let Some(Ok(n)) = folded { - Some(n) - } else { - None + let mut content_length: Option = None; + for h in values { + if let Ok(line) = h.to_str() { + for v in line.split(',') { + if let Some(n) = from_digits(v.trim().as_bytes()) { + if content_length.is_none() { + content_length = Some(n) + } else if content_length != Some(n) { + return None; + } + } else { + return None + } + } + } else { + return None + } } + + return content_length } -pub fn method_has_defined_payload_semantics(method: &Method) -> bool { +fn from_digits(bytes: &[u8]) -> Option { + // cannot use FromStr for u64, since it allows a signed prefix + let mut result = 0u64; + const RADIX: u64 = 10; + + if bytes.is_empty() { + return None; + } + + for &b in bytes { + // can't use char::to_digit, since we haven't verified these bytes + // are utf-8. + match b { + b'0'..=b'9' => { + result = result.checked_mul(RADIX)?; + result = result.checked_add((b - b'0') as u64)?; + }, + _ => { + // not a DIGIT, get outta here! + return None; + } + } + } + + Some(result) +} + +#[cfg(all(feature = "http2", feature = "client"))] +pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool { match *method { Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false, _ => true, } } -pub fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { +#[cfg(feature = "http2")] +pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { headers .entry(CONTENT_LENGTH) .or_insert_with(|| HeaderValue::from(len)); } -pub fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { - is_chunked(headers.get_all(TRANSFER_ENCODING).into_iter()) +#[cfg(feature = "http1")] +pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { + is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter()) } -pub fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { +#[cfg(feature = "http1")] +pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { // chunked must always be the last encoding, according to spec if let Some(line) = encodings.next_back() { return is_chunked_(line); @@ -84,7 +121,8 @@ pub fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { false } -pub fn is_chunked_(value: &HeaderValue) -> bool { +#[cfg(feature = "http1")] +pub(super) fn is_chunked_(value: &HeaderValue) -> bool { // chunked must always be the last encoding, according to spec if let Ok(s) = value.to_str() { if let Some(encoding) = s.rsplit(',').next() { @@ -95,16 +133,17 @@ pub fn is_chunked_(value: &HeaderValue) -> bool { false } -pub fn add_chunked(mut entry: OccupiedEntry<'_, HeaderValue>) { +#[cfg(feature = "http1")] +pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) { const CHUNKED: &str = "chunked"; if let Some(line) = entry.iter_mut().next_back() { // + 2 for ", " let new_cap = line.as_bytes().len() + CHUNKED.len() + 2; let mut buf = BytesMut::with_capacity(new_cap); - buf.copy_from_slice(line.as_bytes()); - buf.copy_from_slice(b", "); - buf.copy_from_slice(CHUNKED.as_bytes()); + buf.extend_from_slice(line.as_bytes()); + buf.extend_from_slice(b", "); + buf.extend_from_slice(CHUNKED.as_bytes()); *line = HeaderValue::from_maybe_shared(buf.freeze()) .expect("original header value plus ascii is valid"); diff --git a/third_party/rust/hyper/src/lib.rs b/third_party/rust/hyper/src/lib.rs index 068b9bae189e..f7a93a195979 100644 --- a/third_party/rust/hyper/src/lib.rs +++ b/third_party/rust/hyper/src/lib.rs @@ -1,9 +1,10 @@ -#![doc(html_root_url = "https://docs.rs/hyper/0.13.6")] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] +#![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))] #![cfg_attr(test, deny(warnings))] #![cfg_attr(all(test, feature = "nightly"), feature(test))] +#![cfg_attr(docsrs, feature(doc_cfg))] //! # hyper //! @@ -31,41 +32,78 @@ //! //! # Optional Features //! +//! hyper uses a set of [feature flags] to reduce the amount of compiled code. +//! It is possible to just enable certain features over others. By default, +//! hyper does not enable any features but allows one to enable a subset for +//! their use case. Below is a list of the available feature flags. You may +//! also notice above each function, struct and trait there is listed one or +//! more feature flags that are required for that item to be used. +//! +//! If you are new to hyper it is possible to enable the `full` feature flag +//! which will enable all public APIs. Beware though that this will pull in +//! many extra dependencies that you may not need. +//! //! The following optional features are available: //! -//! - `runtime` (*enabled by default*): Enables convenient integration with -//! `tokio`, providing connectors and acceptors for TCP, and a default -//! executor. -//! - `tcp` (*enabled by default*): Enables convenient implementations over -//! TCP (using tokio). -//! - `stream` (*enabled by default*): Provides `futures::Stream` capabilities. +//! - `http1`: Enables HTTP/1 support. +//! - `http2`: Enables HTTP/2 support. +//! - `client`: Enables the HTTP `client`. +//! - `server`: Enables the HTTP `server`. +//! - `runtime`: Enables convenient integration with `tokio`, providing +//! connectors and acceptors for TCP, and a default executor. +//! - `tcp`: Enables convenient implementations over TCP (using tokio). +//! - `stream`: Provides `futures::Stream` capabilities. +//! +//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section #[doc(hidden)] pub use http; -#[macro_use] -extern crate log; #[cfg(all(test, feature = "nightly"))] extern crate test; -pub use http::{header, HeaderMap, Method, Request, Response, StatusCode, Uri, Version}; +pub use crate::http::{header, Method, Request, Response, StatusCode, Uri, Version}; + +#[doc(no_inline)] +pub use crate::http::HeaderMap; pub use crate::body::Body; -pub use crate::client::Client; pub use crate::error::{Error, Result}; -pub use crate::server::Server; +#[macro_use] +mod cfg; #[macro_use] mod common; pub mod body; -pub mod client; -#[doc(hidden)] // Mistakenly public... -pub mod error; -mod headers; +mod error; +pub mod ext; #[cfg(test)] mod mock; -mod proto; pub mod rt; -pub mod server; pub mod service; pub mod upgrade; + +#[cfg(feature = "ffi")] +pub mod ffi; + +cfg_proto! { + mod headers; + mod proto; +} + +cfg_feature! { + #![feature = "client"] + + pub mod client; + #[cfg(any(feature = "http1", feature = "http2"))] + #[doc(no_inline)] + pub use crate::client::Client; +} + +cfg_feature! { + #![feature = "server"] + + pub mod server; + #[doc(no_inline)] + pub use crate::server::Server; +} diff --git a/third_party/rust/hyper/src/proto/h1/conn.rs b/third_party/rust/hyper/src/proto/h1/conn.rs index c8b355cd63d0..66b2cdacc3fd 100644 --- a/third_party/rust/hyper/src/proto/h1/conn.rs +++ b/third_party/rust/hyper/src/proto/h1/conn.rs @@ -1,17 +1,24 @@ use std::fmt; -use std::io::{self}; +use std::io; use std::marker::PhantomData; +#[cfg(all(feature = "server", feature = "runtime"))] +use std::time::Duration; use bytes::{Buf, Bytes}; use http::header::{HeaderValue, CONNECTION}; use http::{HeaderMap, Method, Version}; +use httparse::ParserConfig; use tokio::io::{AsyncRead, AsyncWrite}; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Sleep; +use tracing::{debug, error, trace}; use super::io::Buffered; use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants}; +use crate::body::DecodedLength; use crate::common::{task, Pin, Poll, Unpin}; use crate::headers::connection_keep_alive; -use crate::proto::{BodyLength, DecodedLength, MessageHead}; +use crate::proto::{BodyLength, MessageHead}; const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; @@ -34,7 +41,7 @@ where B: Buf, T: Http1Transaction, { - pub fn new(io: I) -> Conn { + pub(crate) fn new(io: I) -> Conn { Conn { io: Buffered::new(io), state: State { @@ -43,7 +50,20 @@ where error: None, keep_alive: KA::Busy, method: None, + h1_parser_config: ParserConfig::default(), + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: None, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: None, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: false, + preserve_header_case: false, title_case_headers: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: None, + #[cfg(feature = "ffi")] + raw_headers: false, notify_read: false, reading: Reading::Init, writing: Writing::Init, @@ -56,47 +76,78 @@ where } } - pub fn set_flush_pipeline(&mut self, enabled: bool) { + #[cfg(feature = "server")] + pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { self.io.set_flush_pipeline(enabled); } - pub fn set_max_buf_size(&mut self, max: usize) { + pub(crate) fn set_write_strategy_queue(&mut self) { + self.io.set_write_strategy_queue(); + } + + pub(crate) fn set_max_buf_size(&mut self, max: usize) { self.io.set_max_buf_size(max); } - pub fn set_read_buf_exact_size(&mut self, sz: usize) { + #[cfg(feature = "client")] + pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { self.io.set_read_buf_exact_size(sz); } - pub fn set_write_strategy_flatten(&mut self) { + pub(crate) fn set_write_strategy_flatten(&mut self) { self.io.set_write_strategy_flatten(); } - pub fn set_title_case_headers(&mut self) { + #[cfg(feature = "client")] + pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) { + self.state.h1_parser_config = parser_config; + } + + pub(crate) fn set_title_case_headers(&mut self) { self.state.title_case_headers = true; } + pub(crate) fn set_preserve_header_case(&mut self) { + self.state.preserve_header_case = true; + } + + #[cfg(feature = "client")] + pub(crate) fn set_h09_responses(&mut self) { + self.state.h09_responses = true; + } + + #[cfg(all(feature = "server", feature = "runtime"))] + pub(crate) fn set_http1_header_read_timeout(&mut self, val: Duration) { + self.state.h1_header_read_timeout = Some(val); + } + + #[cfg(feature = "server")] pub(crate) fn set_allow_half_close(&mut self) { self.state.allow_half_close = true; } - pub fn into_inner(self) -> (I, Bytes) { + #[cfg(feature = "ffi")] + pub(crate) fn set_raw_headers(&mut self, enabled: bool) { + self.state.raw_headers = enabled; + } + + pub(crate) fn into_inner(self) -> (I, Bytes) { self.io.into_inner() } - pub fn pending_upgrade(&mut self) -> Option { + pub(crate) fn pending_upgrade(&mut self) -> Option { self.state.upgrade.take() } - pub fn is_read_closed(&self) -> bool { + pub(crate) fn is_read_closed(&self) -> bool { self.state.is_read_closed() } - pub fn is_write_closed(&self) -> bool { + pub(crate) fn is_write_closed(&self) -> bool { self.state.is_write_closed() } - pub fn can_read_head(&self) -> bool { + pub(crate) fn can_read_head(&self) -> bool { match self.state.reading { Reading::Init => { if T::should_read_first() { @@ -112,7 +163,7 @@ where } } - pub fn can_read_body(&self) -> bool { + pub(crate) fn can_read_body(&self) -> bool { match self.state.reading { Reading::Body(..) | Reading::Continue(..) => true, _ => false, @@ -141,6 +192,19 @@ where ParseContext { cached_headers: &mut self.state.cached_headers, req_method: &mut self.state.method, + h1_parser_config: self.state.h1_parser_config.clone(), + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: self.state.h1_header_read_timeout, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: &mut self.state.h1_header_read_timeout_fut, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running, + preserve_header_case: self.state.preserve_header_case, + h09_responses: self.state.h09_responses, + #[cfg(feature = "ffi")] + on_informational: &mut self.state.on_informational, + #[cfg(feature = "ffi")] + raw_headers: self.state.raw_headers, } )) { Ok(msg) => msg, @@ -152,6 +216,15 @@ where debug!("incoming body is {}", msg.decode); + // Prevent accepting HTTP/0.9 responses after the initial one, if any. + self.state.h09_responses = false; + + // Drop any OnInformational callbacks, we're done there! + #[cfg(feature = "ffi")] + { + self.state.on_informational = None; + } + self.state.busy(); self.state.keep_alive &= msg.keep_alive; self.state.version = msg.head.version; @@ -206,7 +279,7 @@ where } } - pub fn poll_read_body( + pub(crate) fn poll_read_body( &mut self, cx: &mut task::Context<'_>, ) -> Poll>> { @@ -214,8 +287,8 @@ where let (reading, ret) = match self.state.reading { Reading::Body(ref mut decoder) => { - match decoder.decode(cx, &mut self.io) { - Poll::Ready(Ok(slice)) => { + match ready!(decoder.decode(cx, &mut self.io)) { + Ok(slice) => { let (reading, chunk) = if decoder.is_eof() { debug!("incoming body completed"); ( @@ -237,8 +310,7 @@ where }; (reading, Poll::Ready(chunk)) } - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(e)) => { + Err(e) => { debug!("incoming body decode error: {}", e); (Reading::Closed, Poll::Ready(Some(Err(e)))) } @@ -264,13 +336,16 @@ where ret } - pub fn wants_read_again(&mut self) -> bool { + pub(crate) fn wants_read_again(&mut self) -> bool { let ret = self.state.notify_read; self.state.notify_read = false; ret } - pub fn poll_read_keep_alive(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_read_keep_alive( + &mut self, + cx: &mut task::Context<'_>, + ) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body()); if self.is_read_closed() { @@ -408,30 +483,30 @@ where self.maybe_notify(cx); } - pub fn can_write_head(&self) -> bool { + pub(crate) fn can_write_head(&self) -> bool { if !T::should_read_first() { if let Reading::Closed = self.state.reading { return false; } } match self.state.writing { - Writing::Init => true, + Writing::Init => self.io.can_headers_buf(), _ => false, } } - pub fn can_write_body(&self) -> bool { + pub(crate) fn can_write_body(&self) -> bool { match self.state.writing { Writing::Body(..) => true, Writing::Init | Writing::KeepAlive | Writing::Closed => false, } } - pub fn can_buffer_body(&self) -> bool { + pub(crate) fn can_buffer_body(&self) -> bool { self.io.can_buffer() } - pub fn write_head(&mut self, head: MessageHead, body: Option) { + pub(crate) fn write_head(&mut self, head: MessageHead, body: Option) { if let Some(encoder) = self.encode_head(head, body) { self.state.writing = if !encoder.is_eof() { Writing::Body(encoder) @@ -443,7 +518,7 @@ where } } - pub fn write_full_msg(&mut self, head: MessageHead, body: B) { + pub(crate) fn write_full_msg(&mut self, head: MessageHead, body: B) { if let Some(encoder) = self.encode_head(head, Some(BodyLength::Known(body.remaining() as u64))) { @@ -475,10 +550,11 @@ where self.enforce_version(&mut head); let buf = self.io.headers_buf(); - match T::encode( + match super::role::encode_headers::( Encode { head: &mut head, body, + #[cfg(feature = "server")] keep_alive: self.state.wants_keep_alive(), req_method: &mut self.state.method, title_case_headers: self.state.title_case_headers, @@ -489,6 +565,13 @@ where debug_assert!(self.state.cached_headers.is_none()); debug_assert!(head.headers.is_empty()); self.state.cached_headers = Some(head.headers); + + #[cfg(feature = "ffi")] + { + self.state.on_informational = + head.extensions.remove::(); + } + Some(encoder) } Err(err) => { @@ -540,7 +623,7 @@ where // the user's headers be. } - pub fn write_body(&mut self, chunk: B) { + pub(crate) fn write_body(&mut self, chunk: B) { debug_assert!(self.can_write_body() && self.can_buffer_body()); // empty chunks should be discarded at Dispatcher level debug_assert!(chunk.remaining() != 0); @@ -565,7 +648,7 @@ where self.state.writing = state; } - pub fn write_body_and_end(&mut self, chunk: B) { + pub(crate) fn write_body_and_end(&mut self, chunk: B) { debug_assert!(self.can_write_body() && self.can_buffer_body()); // empty chunks should be discarded at Dispatcher level debug_assert!(chunk.remaining() != 0); @@ -585,9 +668,10 @@ where self.state.writing = state; } - pub fn end_body(&mut self) { + pub(crate) fn end_body(&mut self) -> crate::Result<()> { debug_assert!(self.can_write_body()); + let mut res = Ok(()); let state = match self.state.writing { Writing::Body(ref mut encoder) => { // end of stream, that means we should try to eof @@ -596,19 +680,23 @@ where if let Some(end) = end { self.io.buffer(end); } - if encoder.is_last() { + if encoder.is_last() || encoder.is_close_delimited() { Writing::Closed } else { Writing::KeepAlive } } - Err(_not_eof) => Writing::Closed, + Err(not_eof) => { + res = Err(crate::Error::new_body_write_aborted().with(not_eof)); + Writing::Closed + } } } - _ => return, + _ => return Ok(()), }; self.state.writing = state; + res } // When we get a parse error, depending on what side we are, we might be able @@ -635,14 +723,14 @@ where Err(err) } - pub fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { ready!(Pin::new(&mut self.io).poll_flush(cx))?; self.try_keep_alive(cx); trace!("flushed({}): {:?}", T::LOG, self.state); Poll::Ready(Ok(())) } - pub fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) { Ok(()) => { trace!("shut down IO complete"); @@ -669,15 +757,16 @@ where } } - pub fn close_read(&mut self) { + pub(crate) fn close_read(&mut self) { self.state.close_read(); } - pub fn close_write(&mut self) { + pub(crate) fn close_write(&mut self) { self.state.close_write(); } - pub fn disable_keep_alive(&mut self) { + #[cfg(feature = "server")] + pub(crate) fn disable_keep_alive(&mut self) { if self.state.is_idle() { trace!("disable_keep_alive; closing idle connection"); self.state.close(); @@ -687,7 +776,7 @@ where } } - pub fn take_error(&mut self) -> crate::Result<()> { + pub(crate) fn take_error(&mut self) -> crate::Result<()> { if let Some(err) = self.state.error.take() { Err(err) } else { @@ -727,7 +816,23 @@ struct State { /// This is used to know things such as if the message can include /// a body or not. method: Option, + h1_parser_config: ParserConfig, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: Option, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: Option>>, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: bool, + preserve_header_case: bool, title_case_headers: bool, + h09_responses: bool, + /// If set, called with each 1xx informational response received for + /// the current request. MUST be unset after a non-1xx response is + /// received. + #[cfg(feature = "ffi")] + on_informational: Option, + #[cfg(feature = "ffi")] + raw_headers: bool, /// Set to true when the Dispatcher should poll read operations /// again. See the `maybe_notify` method for more. notify_read: bool, @@ -958,9 +1063,8 @@ mod tests { *conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]); conn.state.cached_headers = Some(HeaderMap::with_capacity(2)); - let mut rt = tokio::runtime::Builder::new() + let rt = tokio::runtime::Builder::new_current_thread() .enable_all() - .basic_scheduler() .build() .unwrap(); diff --git a/third_party/rust/hyper/src/proto/h1/date.rs b/third_party/rust/hyper/src/proto/h1/date.rs deleted file mode 100644 index 3e972d6e00ed..000000000000 --- a/third_party/rust/hyper/src/proto/h1/date.rs +++ /dev/null @@ -1,82 +0,0 @@ -use std::cell::RefCell; -use std::fmt::{self, Write}; -use std::str; - -use http::header::HeaderValue; -use time::{self, Duration}; - -// "Sun, 06 Nov 1994 08:49:37 GMT".len() -pub const DATE_VALUE_LENGTH: usize = 29; - -pub fn extend(dst: &mut Vec) { - CACHED.with(|cache| { - dst.extend_from_slice(cache.borrow().buffer()); - }) -} - -pub fn update() { - CACHED.with(|cache| { - cache.borrow_mut().check(); - }) -} - -pub(crate) fn update_and_header_value() -> HeaderValue { - CACHED.with(|cache| { - let mut cache = cache.borrow_mut(); - cache.check(); - HeaderValue::from_bytes(cache.buffer()).expect("Date format should be valid HeaderValue") - }) -} - -struct CachedDate { - bytes: [u8; DATE_VALUE_LENGTH], - pos: usize, - next_update: time::Timespec, -} - -thread_local!(static CACHED: RefCell = RefCell::new(CachedDate::new())); - -impl CachedDate { - fn new() -> Self { - let mut cache = CachedDate { - bytes: [0; DATE_VALUE_LENGTH], - pos: 0, - next_update: time::Timespec::new(0, 0), - }; - cache.update(time::get_time()); - cache - } - - fn buffer(&self) -> &[u8] { - &self.bytes[..] - } - - fn check(&mut self) { - let now = time::get_time(); - if now > self.next_update { - self.update(now); - } - } - - fn update(&mut self, now: time::Timespec) { - self.pos = 0; - let _ = write!(self, "{}", time::at_utc(now).rfc822()); - debug_assert!(self.pos == DATE_VALUE_LENGTH); - self.next_update = now + Duration::seconds(1); - self.next_update.nsec = 0; - } -} - -impl fmt::Write for CachedDate { - fn write_str(&mut self, s: &str) -> fmt::Result { - let len = s.len(); - self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes()); - self.pos += len; - Ok(()) - } -} - -#[test] -fn test_date_len() { - assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len()); -} diff --git a/third_party/rust/hyper/src/proto/h1/decode.rs b/third_party/rust/hyper/src/proto/h1/decode.rs index beaf9aff7ad7..1e3a38effc44 100644 --- a/third_party/rust/hyper/src/proto/h1/decode.rs +++ b/third_party/rust/hyper/src/proto/h1/decode.rs @@ -4,6 +4,7 @@ use std::io; use std::usize; use bytes::Bytes; +use tracing::{debug, trace}; use crate::common::{task, Poll}; @@ -17,7 +18,7 @@ use self::Kind::{Chunked, Eof, Length}; /// If a message body does not include a Transfer-Encoding, it *should* /// include a Content-Length header. #[derive(Clone, PartialEq)] -pub struct Decoder { +pub(crate) struct Decoder { kind: Kind, } @@ -55,6 +56,8 @@ enum ChunkedState { Body, BodyCr, BodyLf, + Trailer, + TrailerLf, EndCr, EndLf, End, @@ -63,19 +66,19 @@ enum ChunkedState { impl Decoder { // constructors - pub fn length(x: u64) -> Decoder { + pub(crate) fn length(x: u64) -> Decoder { Decoder { kind: Kind::Length(x), } } - pub fn chunked() -> Decoder { + pub(crate) fn chunked() -> Decoder { Decoder { kind: Kind::Chunked(ChunkedState::Size, 0), } } - pub fn eof() -> Decoder { + pub(crate) fn eof() -> Decoder { Decoder { kind: Kind::Eof(false), } @@ -91,14 +94,11 @@ impl Decoder { // methods - pub fn is_eof(&self) -> bool { - match self.kind { - Length(0) | Chunked(ChunkedState::End, _) | Eof(true) => true, - _ => false, - } + pub(crate) fn is_eof(&self) -> bool { + matches!(self.kind, Length(0) | Chunked(ChunkedState::End, _) | Eof(true)) } - pub fn decode( + pub(crate) fn decode( &mut self, cx: &mut task::Context<'_>, body: &mut R, @@ -196,6 +196,8 @@ impl ChunkedState { Body => ChunkedState::read_body(cx, body, size, buf), BodyCr => ChunkedState::read_body_cr(cx, body), BodyLf => ChunkedState::read_body_lf(cx, body), + Trailer => ChunkedState::read_trailer(cx, body), + TrailerLf => ChunkedState::read_trailer_lf(cx, body), EndCr => ChunkedState::read_end_cr(cx, body), EndLf => ChunkedState::read_end_lf(cx, body), End => Poll::Ready(Ok(ChunkedState::End)), @@ -207,19 +209,32 @@ impl ChunkedState { size: &mut u64, ) -> Poll> { trace!("Read chunk hex size"); + + macro_rules! or_overflow { + ($e:expr) => ( + match $e { + Some(val) => val, + None => return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "invalid chunk size: overflow", + ))), + } + ) + } + let radix = 16; match byte!(rdr, cx) { b @ b'0'..=b'9' => { - *size *= radix; - *size += (b - b'0') as u64; + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b - b'0') as u64)); } b @ b'a'..=b'f' => { - *size *= radix; - *size += (b + 10 - b'a') as u64; + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64)); } b @ b'A'..=b'F' => { - *size *= radix; - *size += (b + 10 - b'A') as u64; + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64)); } b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)), b';' => return Poll::Ready(Ok(ChunkedState::Extension)), @@ -254,8 +269,18 @@ impl ChunkedState { rdr: &mut R, ) -> Poll> { trace!("read_extension"); + // We don't care about extensions really at all. Just ignore them. + // They "end" at the next CRLF. + // + // However, some implementations may not check for the CR, so to save + // them from themselves, we reject extensions containing plain LF as + // well. match byte!(rdr, cx) { b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), + b'\n' => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "invalid chunk extension contains newline", + ))), _ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions } } @@ -340,16 +365,36 @@ impl ChunkedState { } } + fn read_trailer( + cx: &mut task::Context<'_>, + rdr: &mut R, + ) -> Poll> { + trace!("read_trailer"); + match byte!(rdr, cx) { + b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)), + _ => Poll::Ready(Ok(ChunkedState::Trailer)), + } + } + fn read_trailer_lf( + cx: &mut task::Context<'_>, + rdr: &mut R, + ) -> Poll> { + match byte!(rdr, cx) { + b'\n' => Poll::Ready(Ok(ChunkedState::EndCr)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid trailer end LF", + ))), + } + } + fn read_end_cr( cx: &mut task::Context<'_>, rdr: &mut R, ) -> Poll> { match byte!(rdr, cx) { b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk end CR", - ))), + _ => Poll::Ready(Ok(ChunkedState::Trailer)), } } fn read_end_lf( @@ -382,7 +427,7 @@ mod tests { use super::*; use std::pin::Pin; use std::time::Duration; - use tokio::io::AsyncRead; + use tokio::io::{AsyncRead, ReadBuf}; impl<'a> MemRead for &'a [u8] { fn read_mem(&mut self, _: &mut task::Context<'_>, len: usize) -> Poll> { @@ -401,8 +446,9 @@ mod tests { impl<'a> MemRead for &'a mut (dyn AsyncRead + Unpin) { fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll> { let mut v = vec![0; len]; - let n = ready!(Pin::new(self).poll_read(cx, &mut v)?); - Poll::Ready(Ok(Bytes::copy_from_slice(&v[..n]))) + let mut buf = ReadBuf::new(&mut v); + ready!(Pin::new(self).poll_read(cx, &mut buf)?); + Poll::Ready(Ok(Bytes::copy_from_slice(&buf.filled()))) } } @@ -427,7 +473,7 @@ mod tests { #[tokio::test] async fn test_read_chunk_size() { - use std::io::ErrorKind::{InvalidInput, UnexpectedEof}; + use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof}; async fn read(s: &str) -> u64 { let mut state = ChunkedState::Size; @@ -468,7 +514,7 @@ mod tests { } }; if state == ChunkedState::Body || state == ChunkedState::End { - panic!(format!("Was Ok. Expected Err for {:?}", s)); + panic!("Was Ok. Expected Err for {:?}", s); } } } @@ -502,6 +548,9 @@ mod tests { read_err("1 invalid extension\r\n", InvalidInput).await; read_err("1 A\r\n", InvalidInput).await; read_err("1;no CRLF", UnexpectedEof).await; + read_err("1;reject\nnewlines\r\n", InvalidData).await; + // Overflow + read_err("f0000000000000003\r\n", InvalidData).await; } #[tokio::test] @@ -537,6 +586,15 @@ mod tests { assert_eq!("1234567890abcdef", &result); } + #[tokio::test] + async fn test_read_chunked_trailer_with_missing_lf() { + let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..]; + let mut decoder = Decoder::chunked(); + decoder.decode_fut(&mut mock_buf).await.expect("decode"); + let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err(); + assert_eq!(e.kind(), io::ErrorKind::InvalidInput); + } + #[tokio::test] async fn test_read_chunked_after_eof() { let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..]; @@ -623,7 +681,7 @@ mod tests { #[cfg(feature = "nightly")] #[bench] fn bench_decode_chunked_1kb(b: &mut test::Bencher) { - let mut rt = new_runtime(); + let rt = new_runtime(); const LEN: usize = 1024; let mut vec = Vec::new(); @@ -647,7 +705,7 @@ mod tests { #[cfg(feature = "nightly")] #[bench] fn bench_decode_length_1kb(b: &mut test::Bencher) { - let mut rt = new_runtime(); + let rt = new_runtime(); const LEN: usize = 1024; let content = Bytes::from(&[0; LEN][..]); @@ -665,9 +723,8 @@ mod tests { #[cfg(feature = "nightly")] fn new_runtime() -> tokio::runtime::Runtime { - tokio::runtime::Builder::new() + tokio::runtime::Builder::new_current_thread() .enable_all() - .basic_scheduler() .build() .expect("rt build") } diff --git a/third_party/rust/hyper/src/proto/h1/dispatch.rs b/third_party/rust/hyper/src/proto/h1/dispatch.rs index a878651f0bb0..677131bfdd32 100644 --- a/third_party/rust/hyper/src/proto/h1/dispatch.rs +++ b/third_party/rust/hyper/src/proto/h1/dispatch.rs @@ -1,17 +1,17 @@ use std::error::Error as StdError; use bytes::{Buf, Bytes}; -use http::{Request, Response, StatusCode}; +use http::Request; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace}; use super::{Http1Transaction, Wants}; -use crate::body::{Body, HttpBody}; -use crate::common::{task, Future, Never, Pin, Poll, Unpin}; +use crate::body::{Body, DecodedLength, HttpBody}; +use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::proto::{ - BodyLength, Conn, DecodedLength, Dispatched, MessageHead, RequestHead, RequestLine, - ResponseHead, + BodyLength, Conn, Dispatched, MessageHead, RequestHead, }; -use crate::service::HttpService; +use crate::upgrade::OnUpgrade; pub(crate) struct Dispatcher { conn: Conn, @@ -27,7 +27,7 @@ pub(crate) trait Dispatch { type PollError; type RecvItem; fn poll_msg( - &mut self, + self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll>>; fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>; @@ -35,33 +35,42 @@ pub(crate) trait Dispatch { fn should_poll(&self) -> bool; } -pub struct Server, B> { - in_flight: Pin>>, - pub(crate) service: S, +cfg_server! { + use crate::service::HttpService; + + pub(crate) struct Server, B> { + in_flight: Pin>>, + pub(crate) service: S, + } } -pub struct Client { - callback: Option, Response>>, - rx: ClientRx, - rx_closed: bool, -} +cfg_client! { + pin_project_lite::pin_project! { + pub(crate) struct Client { + callback: Option, http::Response>>, + #[pin] + rx: ClientRx, + rx_closed: bool, + } + } -type ClientRx = crate::client::dispatch::Receiver, Response>; + type ClientRx = crate::client::dispatch::Receiver, http::Response>; +} impl Dispatcher where D: Dispatch< - PollItem = MessageHead, - PollBody = Bs, - RecvItem = MessageHead, - > + Unpin, + PollItem = MessageHead, + PollBody = Bs, + RecvItem = MessageHead, + > + Unpin, D::PollError: Into>, I: AsyncRead + AsyncWrite + Unpin, T: Http1Transaction + Unpin, Bs: HttpBody + 'static, Bs::Error: Into>, { - pub fn new(dispatch: D, conn: Conn) -> Self { + pub(crate) fn new(dispatch: D, conn: Conn) -> Self { Dispatcher { conn, dispatch, @@ -71,14 +80,15 @@ where } } - pub fn disable_keep_alive(&mut self) { + #[cfg(feature = "server")] + pub(crate) fn disable_keep_alive(&mut self) { self.conn.disable_keep_alive(); if self.conn.is_write_closed() { self.close(); } } - pub fn into_inner(self) -> (I, Bytes, D) { + pub(crate) fn into_inner(self) -> (I, Bytes, D) { let (io, buf) = self.conn.into_inner(); (io, buf, self.dispatch) } @@ -236,8 +246,8 @@ where } // dispatch is ready for a message, try to read one match ready!(self.conn.poll_read_head(cx)) { - Some(Ok((head, body_len, wants))) => { - let mut body = match body_len { + Some(Ok((mut head, body_len, wants))) => { + let body = match body_len { DecodedLength::ZERO => Body::empty(), other => { let (tx, rx) = Body::new_channel(other, wants.contains(Wants::EXPECT)); @@ -246,7 +256,10 @@ where } }; if wants.contains(Wants::UPGRADE) { - body.set_on_upgrade(self.conn.on_upgrade()); + let upgrade = self.conn.on_upgrade(); + debug_assert!(!upgrade.is_none(), "empty upgrade"); + debug_assert!(head.extensions.get::().is_none(), "OnUpgrade already set"); + head.extensions.insert(upgrade); } self.dispatch.recv_msg(Ok((head, body)))?; Poll::Ready(Ok(())) @@ -281,7 +294,7 @@ where && self.conn.can_write_head() && self.dispatch.should_poll() { - if let Some(msg) = ready!(self.dispatch.poll_msg(cx)) { + if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) { let (head, mut body) = msg.map_err(crate::Error::new_user_service)?; // Check if the body knows its full data immediately. @@ -338,7 +351,7 @@ where *clear_body = true; if chunk.remaining() == 0 { trace!("discarding empty chunk"); - self.conn.end_body(); + self.conn.end_body()?; } else { self.conn.write_body_and_end(chunk); } @@ -351,7 +364,7 @@ where } } else { *clear_body = true; - self.conn.end_body(); + self.conn.end_body()?; } } else { return Poll::Pending; @@ -394,10 +407,10 @@ where impl Future for Dispatcher where D: Dispatch< - PollItem = MessageHead, - PollBody = Bs, - RecvItem = MessageHead, - > + Unpin, + PollItem = MessageHead, + PollBody = Bs, + RecvItem = MessageHead, + > + Unpin, D::PollError: Into>, I: AsyncRead + AsyncWrite + Unpin, T: Http1Transaction + Unpin, @@ -438,196 +451,202 @@ impl<'a, T> Drop for OptGuard<'a, T> { // ===== impl Server ===== -impl Server -where - S: HttpService, -{ - pub fn new(service: S) -> Server { - Server { - in_flight: Box::pin(None), - service, +cfg_server! { + impl Server + where + S: HttpService, + { + pub(crate) fn new(service: S) -> Server { + Server { + in_flight: Box::pin(None), + service, + } + } + + pub(crate) fn into_service(self) -> S { + self.service } } - pub fn into_service(self) -> S { - self.service - } -} + // Service is never pinned + impl, B> Unpin for Server {} -// Service is never pinned -impl, B> Unpin for Server {} + impl Dispatch for Server + where + S: HttpService, + S::Error: Into>, + Bs: HttpBody, + { + type PollItem = MessageHead; + type PollBody = Bs; + type PollError = S::Error; + type RecvItem = RequestHead; -impl Dispatch for Server -where - S: HttpService, - S::Error: Into>, - Bs: HttpBody, -{ - type PollItem = MessageHead; - type PollBody = Bs; - type PollError = S::Error; - type RecvItem = RequestHead; - - fn poll_msg( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll>> { - let ret = if let Some(ref mut fut) = self.in_flight.as_mut().as_pin_mut() { - let resp = ready!(fut.as_mut().poll(cx)?); - let (parts, body) = resp.into_parts(); - let head = MessageHead { - version: parts.version, - subject: parts.status, - headers: parts.headers, + fn poll_msg( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + ) -> Poll>> { + let mut this = self.as_mut(); + let ret = if let Some(ref mut fut) = this.in_flight.as_mut().as_pin_mut() { + let resp = ready!(fut.as_mut().poll(cx)?); + let (parts, body) = resp.into_parts(); + let head = MessageHead { + version: parts.version, + subject: parts.status, + headers: parts.headers, + extensions: parts.extensions, + }; + Poll::Ready(Some(Ok((head, body)))) + } else { + unreachable!("poll_msg shouldn't be called if no inflight"); }; - Poll::Ready(Some(Ok((head, body)))) - } else { - unreachable!("poll_msg shouldn't be called if no inflight"); - }; - // Since in_flight finished, remove it - self.in_flight.set(None); - ret - } - - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { - let (msg, body) = msg?; - let mut req = Request::new(body); - *req.method_mut() = msg.subject.0; - *req.uri_mut() = msg.subject.1; - *req.headers_mut() = msg.headers; - *req.version_mut() = msg.version; - let fut = self.service.call(req); - self.in_flight.set(Some(fut)); - Ok(()) - } - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - if self.in_flight.is_some() { - Poll::Pending - } else { - self.service.poll_ready(cx).map_err(|_e| { - // FIXME: return error value. - trace!("service closed"); - }) + // Since in_flight finished, remove it + this.in_flight.set(None); + ret } - } - fn should_poll(&self) -> bool { - self.in_flight.is_some() + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { + let (msg, body) = msg?; + let mut req = Request::new(body); + *req.method_mut() = msg.subject.0; + *req.uri_mut() = msg.subject.1; + *req.headers_mut() = msg.headers; + *req.version_mut() = msg.version; + *req.extensions_mut() = msg.extensions; + let fut = self.service.call(req); + self.in_flight.set(Some(fut)); + Ok(()) + } + + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + if self.in_flight.is_some() { + Poll::Pending + } else { + self.service.poll_ready(cx).map_err(|_e| { + // FIXME: return error value. + trace!("service closed"); + }) + } + } + + fn should_poll(&self) -> bool { + self.in_flight.is_some() + } } } // ===== impl Client ===== -impl Client { - pub fn new(rx: ClientRx) -> Client { - Client { - callback: None, - rx, - rx_closed: false, - } - } -} - -impl Dispatch for Client -where - B: HttpBody, -{ - type PollItem = RequestHead; - type PollBody = B; - type PollError = Never; - type RecvItem = ResponseHead; - - fn poll_msg( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll>> { - debug_assert!(!self.rx_closed); - match self.rx.poll_next(cx) { - Poll::Ready(Some((req, mut cb))) => { - // check that future hasn't been canceled already - match cb.poll_canceled(cx) { - Poll::Ready(()) => { - trace!("request canceled"); - Poll::Ready(None) - } - Poll::Pending => { - let (parts, body) = req.into_parts(); - let head = RequestHead { - version: parts.version, - subject: RequestLine(parts.method, parts.uri), - headers: parts.headers, - }; - self.callback = Some(cb); - Poll::Ready(Some(Ok((head, body)))) - } - } +cfg_client! { + impl Client { + pub(crate) fn new(rx: ClientRx) -> Client { + Client { + callback: None, + rx, + rx_closed: false, } - Poll::Ready(None) => { - // user has dropped sender handle - trace!("client tx closed"); - self.rx_closed = true; - Poll::Ready(None) - } - Poll::Pending => Poll::Pending, } } - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { - match msg { - Ok((msg, body)) => { - if let Some(cb) = self.callback.take() { - let mut res = Response::new(body); - *res.status_mut() = msg.subject; - *res.headers_mut() = msg.headers; - *res.version_mut() = msg.version; - cb.send(Ok(res)); - Ok(()) - } else { - // Getting here is likely a bug! An error should have happened - // in Conn::require_empty_read() before ever parsing a - // full message! - Err(crate::Error::new_unexpected_message()) + impl Dispatch for Client + where + B: HttpBody, + { + type PollItem = RequestHead; + type PollBody = B; + type PollError = crate::common::Never; + type RecvItem = crate::proto::ResponseHead; + + fn poll_msg( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + ) -> Poll>> { + let mut this = self.as_mut(); + debug_assert!(!this.rx_closed); + match this.rx.poll_recv(cx) { + Poll::Ready(Some((req, mut cb))) => { + // check that future hasn't been canceled already + match cb.poll_canceled(cx) { + Poll::Ready(()) => { + trace!("request canceled"); + Poll::Ready(None) + } + Poll::Pending => { + let (parts, body) = req.into_parts(); + let head = RequestHead { + version: parts.version, + subject: crate::proto::RequestLine(parts.method, parts.uri), + headers: parts.headers, + extensions: parts.extensions, + }; + this.callback = Some(cb); + Poll::Ready(Some(Ok((head, body)))) + } + } } + Poll::Ready(None) => { + // user has dropped sender handle + trace!("client tx closed"); + this.rx_closed = true; + Poll::Ready(None) + } + Poll::Pending => Poll::Pending, } - Err(err) => { - if let Some(cb) = self.callback.take() { - cb.send(Err((err, None))); - Ok(()) - } else if !self.rx_closed { - self.rx.close(); - if let Some((req, cb)) = self.rx.try_recv() { - trace!("canceling queued request with connection error: {}", err); - // in this case, the message was never even started, so it's safe to tell - // the user that the request was completely canceled - cb.send(Err((crate::Error::new_canceled().with(err), Some(req)))); + } + + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { + match msg { + Ok((msg, body)) => { + if let Some(cb) = self.callback.take() { + let res = msg.into_response(body); + cb.send(Ok(res)); Ok(()) + } else { + // Getting here is likely a bug! An error should have happened + // in Conn::require_empty_read() before ever parsing a + // full message! + Err(crate::Error::new_unexpected_message()) + } + } + Err(err) => { + if let Some(cb) = self.callback.take() { + cb.send(Err((err, None))); + Ok(()) + } else if !self.rx_closed { + self.rx.close(); + if let Some((req, cb)) = self.rx.try_recv() { + trace!("canceling queued request with connection error: {}", err); + // in this case, the message was never even started, so it's safe to tell + // the user that the request was completely canceled + cb.send(Err((crate::Error::new_canceled().with(err), Some(req)))); + Ok(()) + } else { + Err(err) + } } else { Err(err) } - } else { - Err(err) } } } - } - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - match self.callback { - Some(ref mut cb) => match cb.poll_canceled(cx) { - Poll::Ready(()) => { - trace!("callback receiver has dropped"); - Poll::Ready(Err(())) - } - Poll::Pending => Poll::Ready(Ok(())), - }, - None => Poll::Ready(Err(())), + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + match self.callback { + Some(ref mut cb) => match cb.poll_canceled(cx) { + Poll::Ready(()) => { + trace!("callback receiver has dropped"); + Poll::Ready(Err(())) + } + Poll::Pending => Poll::Ready(Ok(())), + }, + None => Poll::Ready(Err(())), + } } - } - fn should_poll(&self) -> bool { - self.callback.is_none() + fn should_poll(&self) -> bool { + self.callback.is_none() + } } } @@ -646,7 +665,6 @@ mod tests { // Block at 0 for now, but we will release this response before // the request is ready to write later... - //let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\n\r\n".to_vec(), 0); let (mut tx, rx) = crate::client::dispatch::channel(); let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); let mut dispatcher = Dispatcher::new(Client::new(rx), conn); @@ -673,6 +691,34 @@ mod tests { }); } + #[tokio::test] + async fn client_flushing_is_not_ready_for_next_request() { + let _ = pretty_env_logger::try_init(); + + let (io, _handle) = tokio_test::io::Builder::new() + .write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n") + .read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n") + .wait(std::time::Duration::from_secs(2)) + .build_with_handle(); + + let (mut tx, rx) = crate::client::dispatch::channel(); + let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + conn.set_write_strategy_queue(); + + let dispatcher = Dispatcher::new(Client::new(rx), conn); + let _dispatcher = tokio::spawn(async move { dispatcher.await }); + + let req = crate::Request::builder() + .method("POST") + .body(crate::Body::from("reee")) + .unwrap(); + + let res = tx.try_send(req).unwrap().await.expect("response"); + drop(res); + + assert!(!tx.is_ready()); + } + #[tokio::test] async fn body_empty_chunks_ignored() { let _ = pretty_env_logger::try_init(); diff --git a/third_party/rust/hyper/src/proto/h1/encode.rs b/third_party/rust/hyper/src/proto/h1/encode.rs index e480945e6f29..f0aa261a4f8d 100644 --- a/third_party/rust/hyper/src/proto/h1/encode.rs +++ b/third_party/rust/hyper/src/proto/h1/encode.rs @@ -1,8 +1,9 @@ use std::fmt; use std::io::IoSlice; -use bytes::buf::ext::{BufExt, Chain, Take}; +use bytes::buf::{Chain, Take}; use bytes::Buf; +use tracing::trace; use super::io::WriteBuf; @@ -10,18 +11,18 @@ type StaticBuf = &'static [u8]; /// Encoders to handle different Transfer-Encodings. #[derive(Debug, Clone, PartialEq)] -pub struct Encoder { +pub(crate) struct Encoder { kind: Kind, is_last: bool, } #[derive(Debug)] -pub struct EncodedBuf { +pub(crate) struct EncodedBuf { kind: BufKind, } #[derive(Debug)] -pub struct NotEof; +pub(crate) struct NotEof(u64); #[derive(Debug, PartialEq, Clone)] enum Kind { @@ -35,6 +36,7 @@ enum Kind { /// /// This is mostly only used with HTTP/1.0 with a length. This kind requires /// the connection to be closed when the body is finished. + #[cfg(feature = "server")] CloseDelimited, } @@ -53,45 +55,54 @@ impl Encoder { is_last: false, } } - pub fn chunked() -> Encoder { + pub(crate) fn chunked() -> Encoder { Encoder::new(Kind::Chunked) } - pub fn length(len: u64) -> Encoder { + pub(crate) fn length(len: u64) -> Encoder { Encoder::new(Kind::Length(len)) } - pub fn close_delimited() -> Encoder { + #[cfg(feature = "server")] + pub(crate) fn close_delimited() -> Encoder { Encoder::new(Kind::CloseDelimited) } - pub fn is_eof(&self) -> bool { - match self.kind { - Kind::Length(0) => true, - _ => false, - } + pub(crate) fn is_eof(&self) -> bool { + matches!(self.kind, Kind::Length(0)) } - pub fn set_last(mut self, is_last: bool) -> Self { + #[cfg(feature = "server")] + pub(crate) fn set_last(mut self, is_last: bool) -> Self { self.is_last = is_last; self } - pub fn is_last(&self) -> bool { + pub(crate) fn is_last(&self) -> bool { self.is_last } - pub fn end(&self) -> Result>, NotEof> { + pub(crate) fn is_close_delimited(&self) -> bool { + match self.kind { + #[cfg(feature = "server")] + Kind::CloseDelimited => true, + _ => false, + } + } + + pub(crate) fn end(&self) -> Result>, NotEof> { match self.kind { Kind::Length(0) => Ok(None), Kind::Chunked => Ok(Some(EncodedBuf { kind: BufKind::ChunkedEnd(b"0\r\n\r\n"), })), - _ => Err(NotEof), + #[cfg(feature = "server")] + Kind::CloseDelimited => Ok(None), + Kind::Length(n) => Err(NotEof(n)), } } - pub fn encode(&mut self, msg: B) -> EncodedBuf + pub(crate) fn encode(&mut self, msg: B) -> EncodedBuf where B: Buf, { @@ -117,6 +128,7 @@ impl Encoder { BufKind::Exact(msg) } } + #[cfg(feature = "server")] Kind::CloseDelimited => { trace!("close delimited write {}B", len); BufKind::Exact(msg) @@ -160,6 +172,7 @@ impl Encoder { } } } + #[cfg(feature = "server")] Kind::CloseDelimited => { trace!("close delimited write {}B", len); dst.buffer(msg); @@ -217,12 +230,12 @@ where } #[inline] - fn bytes(&self) -> &[u8] { + fn chunk(&self) -> &[u8] { match self.kind { - BufKind::Exact(ref b) => b.bytes(), - BufKind::Limited(ref b) => b.bytes(), - BufKind::Chunked(ref b) => b.bytes(), - BufKind::ChunkedEnd(ref b) => b.bytes(), + BufKind::Exact(ref b) => b.chunk(), + BufKind::Limited(ref b) => b.chunk(), + BufKind::Chunked(ref b) => b.chunk(), + BufKind::ChunkedEnd(ref b) => b.chunk(), } } @@ -237,12 +250,12 @@ where } #[inline] - fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { + fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { match self.kind { - BufKind::Exact(ref b) => b.bytes_vectored(dst), - BufKind::Limited(ref b) => b.bytes_vectored(dst), - BufKind::Chunked(ref b) => b.bytes_vectored(dst), - BufKind::ChunkedEnd(ref b) => b.bytes_vectored(dst), + BufKind::Exact(ref b) => b.chunks_vectored(dst), + BufKind::Limited(ref b) => b.chunks_vectored(dst), + BufKind::Chunked(ref b) => b.chunks_vectored(dst), + BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst), } } } @@ -283,7 +296,7 @@ impl Buf for ChunkSize { } #[inline] - fn bytes(&self) -> &[u8] { + fn chunk(&self) -> &[u8] { &self.bytes[self.pos.into()..self.len.into()] } @@ -338,6 +351,14 @@ impl From, StaticBuf>> for EncodedBuf { } } +impl fmt::Display for NotEof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "early end, expected {} more bytes", self.0) + } +} + +impl std::error::Error for NotEof {} + #[cfg(test)] mod tests { use bytes::BufMut; @@ -405,7 +426,7 @@ mod tests { assert_eq!(dst, b"foo bar"); assert!(!encoder.is_eof()); - encoder.end::<()>().unwrap_err(); + encoder.end::<()>().unwrap(); let msg2 = b"baz".as_ref(); let buf2 = encoder.encode(msg2); @@ -413,6 +434,6 @@ mod tests { assert_eq!(dst, b"foo barbaz"); assert!(!encoder.is_eof()); - encoder.end::<()>().unwrap_err(); + encoder.end::<()>().unwrap(); } } diff --git a/third_party/rust/hyper/src/proto/h1/io.rs b/third_party/rust/hyper/src/proto/h1/io.rs index 00f4f64f47de..08a399368434 100644 --- a/third_party/rust/hyper/src/proto/h1/io.rs +++ b/third_party/rust/hyper/src/proto/h1/io.rs @@ -1,20 +1,28 @@ -use std::cell::Cell; use std::cmp; use std::fmt; use std::io::{self, IoSlice}; +use std::marker::Unpin; +use std::mem::MaybeUninit; +#[cfg(all(feature = "server", feature = "runtime"))] +use std::future::Future; +#[cfg(all(feature = "server", feature = "runtime"))] +use std::time::Duration; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Instant; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tracing::{debug, trace}; use super::{Http1Transaction, ParseContext, ParsedMessage}; use crate::common::buf::BufList; -use crate::common::{task, Pin, Poll, Unpin}; +use crate::common::{task, Pin, Poll}; /// The initial buffer size allocated before trying to read from IO. pub(crate) const INIT_BUFFER_SIZE: usize = 8192; /// The minimum value that can be set to max buffer size. -pub const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE; +pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE; /// The default maximum read buffer size. If the buffer gets this big and /// a message is still not complete, a `TooLarge` error is triggered. @@ -28,7 +36,7 @@ pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100; /// forces a flush if the queue gets this big. const MAX_BUF_LIST_BUFFERS: usize = 16; -pub struct Buffered { +pub(crate) struct Buffered { flush_pipeline: bool, io: T, read_blocked: bool, @@ -54,18 +62,25 @@ where T: AsyncRead + AsyncWrite + Unpin, B: Buf, { - pub fn new(io: T) -> Buffered { + pub(crate) fn new(io: T) -> Buffered { + let strategy = if io.is_write_vectored() { + WriteStrategy::Queue + } else { + WriteStrategy::Flatten + }; + let write_buf = WriteBuf::new(strategy); Buffered { flush_pipeline: false, io, read_blocked: false, read_buf: BytesMut::with_capacity(0), read_buf_strategy: ReadStrategy::default(), - write_buf: WriteBuf::new(), + write_buf, } } - pub fn set_flush_pipeline(&mut self, enabled: bool) { + #[cfg(feature = "server")] + pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { debug_assert!(!self.write_buf.has_remaining()); self.flush_pipeline = enabled; if enabled { @@ -73,7 +88,7 @@ where } } - pub fn set_max_buf_size(&mut self, max: usize) { + pub(crate) fn set_max_buf_size(&mut self, max: usize) { assert!( max >= MINIMUM_MAX_BUFFER_SIZE, "The max_buf_size cannot be smaller than {}.", @@ -83,18 +98,26 @@ where self.write_buf.max_buf_size = max; } - pub fn set_read_buf_exact_size(&mut self, sz: usize) { + #[cfg(feature = "client")] + pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { self.read_buf_strategy = ReadStrategy::Exact(sz); } - pub fn set_write_strategy_flatten(&mut self) { + pub(crate) fn set_write_strategy_flatten(&mut self) { // this should always be called only at construction time, // so this assert is here to catch myself debug_assert!(self.write_buf.queue.bufs_cnt() == 0); self.write_buf.set_strategy(WriteStrategy::Flatten); } - pub fn read_buf(&self) -> &[u8] { + pub(crate) fn set_write_strategy_queue(&mut self) { + // this should always be called only at construction time, + // so this assert is here to catch myself + debug_assert!(self.write_buf.queue.bufs_cnt() == 0); + self.write_buf.set_strategy(WriteStrategy::Queue); + } + + pub(crate) fn read_buf(&self) -> &[u8] { self.read_buf.as_ref() } @@ -110,7 +133,16 @@ where self.read_buf.capacity() - self.read_buf.len() } - pub fn headers_buf(&mut self) -> &mut Vec { + /// Return whether we can append to the headers buffer. + /// + /// Reasons we can't: + /// - The write buf is in queue mode, and some of the past body is still + /// needing to be flushed. + pub(crate) fn can_headers_buf(&self) -> bool { + !self.write_buf.queue.has_remaining() + } + + pub(crate) fn headers_buf(&mut self) -> &mut Vec { let buf = self.write_buf.headers_mut(); &mut buf.bytes } @@ -119,15 +151,15 @@ where &mut self.write_buf } - pub fn buffer>(&mut self, buf: BB) { + pub(crate) fn buffer>(&mut self, buf: BB) { self.write_buf.buffer(buf) } - pub fn can_buffer(&self) -> bool { + pub(crate) fn can_buffer(&self) -> bool { self.flush_pipeline || self.write_buf.can_buffer() } - pub fn consume_leading_lines(&mut self) { + pub(crate) fn consume_leading_lines(&mut self) { if !self.read_buf.is_empty() { let mut i = 0; while i < self.read_buf.len() { @@ -149,15 +181,38 @@ where S: Http1Transaction, { loop { - match S::parse( + match super::role::parse_headers::( &mut self.read_buf, ParseContext { cached_headers: parse_ctx.cached_headers, req_method: parse_ctx.req_method, + h1_parser_config: parse_ctx.h1_parser_config.clone(), + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: parse_ctx.h1_header_read_timeout, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: parse_ctx.h1_header_read_timeout_fut, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running, + preserve_header_case: parse_ctx.preserve_header_case, + h09_responses: parse_ctx.h09_responses, + #[cfg(feature = "ffi")] + on_informational: parse_ctx.on_informational, + #[cfg(feature = "ffi")] + raw_headers: parse_ctx.raw_headers, }, )? { Some(msg) => { debug!("parsed {} headers", msg.head.headers.len()); + + #[cfg(all(feature = "server", feature = "runtime"))] + { + *parse_ctx.h1_header_read_timeout_running = false; + + if let Some(h1_header_read_timeout_fut) = parse_ctx.h1_header_read_timeout_fut { + // Reset the timer in order to avoid woken up when the timeout finishes + h1_header_read_timeout_fut.as_mut().reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60)); + } + } return Poll::Ready(Ok(msg)); } None => { @@ -166,6 +221,18 @@ where debug!("max_buf_size ({}) reached, closing", max); return Poll::Ready(Err(crate::Error::new_too_large())); } + + #[cfg(all(feature = "server", feature = "runtime"))] + if *parse_ctx.h1_header_read_timeout_running { + if let Some(h1_header_read_timeout_fut) = parse_ctx.h1_header_read_timeout_fut { + if Pin::new( h1_header_read_timeout_fut).poll(cx).is_ready() { + *parse_ctx.h1_header_read_timeout_running = false; + + tracing::warn!("read header from client timeout"); + return Poll::Ready(Err(crate::Error::new_header_timeout())) + } + } + } } } if ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? == 0 { @@ -175,15 +242,29 @@ where } } - pub fn poll_read_from_io(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_read_from_io( + &mut self, + cx: &mut task::Context<'_>, + ) -> Poll> { self.read_blocked = false; let next = self.read_buf_strategy.next(); if self.read_buf_remaining_mut() < next { self.read_buf.reserve(next); } - match Pin::new(&mut self.io).poll_read_buf(cx, &mut self.read_buf) { - Poll::Ready(Ok(n)) => { - debug!("read {} bytes", n); + + let dst = self.read_buf.chunk_mut(); + let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit]) }; + let mut buf = ReadBuf::uninit(dst); + match Pin::new(&mut self.io).poll_read(cx, &mut buf) { + Poll::Ready(Ok(_)) => { + let n = buf.filled().len(); + trace!("received {} bytes", n); + unsafe { + // Safety: we just read that many bytes into the + // uninitialized part of the buffer, so this is okay. + // @tokio pls give me back `poll_read_buf` thanks + self.read_buf.advance_mut(n); + } self.read_buf_strategy.record(n); Poll::Ready(Ok(n)) } @@ -195,19 +276,19 @@ where } } - pub fn into_inner(self) -> (T, Bytes) { + pub(crate) fn into_inner(self) -> (T, Bytes) { (self.io, self.read_buf.freeze()) } - pub fn io_mut(&mut self) -> &mut T { + pub(crate) fn io_mut(&mut self) -> &mut T { &mut self.io } - pub fn is_read_blocked(&self) -> bool { + pub(crate) fn is_read_blocked(&self) -> bool { self.read_blocked } - pub fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { if self.flush_pipeline && !self.read_buf.is_empty() { Poll::Ready(Ok(())) } else if self.write_buf.remaining() == 0 { @@ -216,9 +297,18 @@ where if let WriteStrategy::Flatten = self.write_buf.strategy { return self.poll_flush_flattened(cx); } + + const MAX_WRITEV_BUFS: usize = 64; loop { - let n = - ready!(Pin::new(&mut self.io).poll_write_buf(cx, &mut self.write_buf.auto()))?; + let n = { + let mut iovs = [IoSlice::new(&[]); MAX_WRITEV_BUFS]; + let len = self.write_buf.chunks_vectored(&mut iovs); + ready!(Pin::new(&mut self.io).poll_write_vectored(cx, &iovs[..len]))? + }; + // TODO(eliza): we have to do this manually because + // `poll_write_buf` doesn't exist in Tokio 0.3 yet...when + // `poll_write_buf` comes back, the manual advance will need to leave! + self.write_buf.advance(n); debug!("flushed {} bytes", n); if self.write_buf.remaining() == 0 { break; @@ -240,7 +330,7 @@ where /// that skips some bookkeeping around using multiple buffers. fn poll_flush_flattened(&mut self, cx: &mut task::Context<'_>) -> Poll> { loop { - let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.bytes()))?; + let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?; debug!("flushed {} bytes", n); self.write_buf.headers.advance(n); if self.write_buf.headers.remaining() == 0 { @@ -267,7 +357,7 @@ where impl Unpin for Buffered {} // TODO: This trait is old... at least rename to PollBytes or something... -pub trait MemRead { +pub(crate) trait MemRead { fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll>; } @@ -294,6 +384,7 @@ enum ReadStrategy { next: usize, max: usize, }, + #[cfg(feature = "client")] Exact(usize), } @@ -309,6 +400,7 @@ impl ReadStrategy { fn next(&self) -> usize { match *self { ReadStrategy::Adaptive { next, .. } => next, + #[cfg(feature = "client")] ReadStrategy::Exact(exact) => exact, } } @@ -316,38 +408,42 @@ impl ReadStrategy { fn max(&self) -> usize { match *self { ReadStrategy::Adaptive { max, .. } => max, + #[cfg(feature = "client")] ReadStrategy::Exact(exact) => exact, } } fn record(&mut self, bytes_read: usize) { - if let ReadStrategy::Adaptive { - ref mut decrease_now, - ref mut next, - max, - .. - } = *self - { - if bytes_read >= *next { - *next = cmp::min(incr_power_of_two(*next), max); - *decrease_now = false; - } else { - let decr_to = prev_power_of_two(*next); - if bytes_read < decr_to { - if *decrease_now { - *next = cmp::max(decr_to, INIT_BUFFER_SIZE); - *decrease_now = false; - } else { - // Decreasing is a two "record" process. - *decrease_now = true; - } - } else { - // A read within the current range should cancel - // a potential decrease, since we just saw proof - // that we still need this size. + match *self { + ReadStrategy::Adaptive { + ref mut decrease_now, + ref mut next, + max, + .. + } => { + if bytes_read >= *next { + *next = cmp::min(incr_power_of_two(*next), max); *decrease_now = false; + } else { + let decr_to = prev_power_of_two(*next); + if bytes_read < decr_to { + if *decrease_now { + *next = cmp::max(decr_to, INIT_BUFFER_SIZE); + *decrease_now = false; + } else { + // Decreasing is a two "record" process. + *decrease_now = true; + } + } else { + // A read within the current range should cancel + // a potential decrease, since we just saw proof + // that we still need this size. + *decrease_now = false; + } } } + #[cfg(feature = "client")] + ReadStrategy::Exact(_) => (), } } } @@ -370,7 +466,7 @@ impl Default for ReadStrategy { } #[derive(Clone)] -pub struct Cursor { +pub(crate) struct Cursor { bytes: T, pos: usize, } @@ -383,6 +479,24 @@ impl> Cursor { } impl Cursor> { + /// If we've advanced the position a bit in this cursor, and wish to + /// extend the underlying vector, we may wish to unshift the "read" bytes + /// off, and move everything else over. + fn maybe_unshift(&mut self, additional: usize) { + if self.pos == 0 { + // nothing to do + return; + } + + if self.bytes.capacity() - self.bytes.len() >= additional { + // there's room! + return; + } + + self.bytes.drain(0..self.pos); + self.pos = 0; + } + fn reset(&mut self) { self.pos = 0; self.bytes.clear(); @@ -405,7 +519,7 @@ impl> Buf for Cursor { } #[inline] - fn bytes(&self) -> &[u8] { + fn chunk(&self) -> &[u8] { &self.bytes.as_ref()[self.pos..] } @@ -427,12 +541,12 @@ pub(super) struct WriteBuf { } impl WriteBuf { - fn new() -> WriteBuf { + fn new(strategy: WriteStrategy) -> WriteBuf { WriteBuf { headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)), max_buf_size: DEFAULT_MAX_BUFFER_SIZE, queue: BufList::new(), - strategy: WriteStrategy::Auto, + strategy, } } } @@ -445,21 +559,23 @@ where self.strategy = strategy; } - #[inline] - fn auto(&mut self) -> WriteBufAuto<'_, B> { - WriteBufAuto::new(self) - } - pub(super) fn buffer>(&mut self, mut buf: BB) { debug_assert!(buf.has_remaining()); match self.strategy { WriteStrategy::Flatten => { let head = self.headers_mut(); + + head.maybe_unshift(buf.remaining()); + trace!( + self.len = head.remaining(), + buf.len = buf.remaining(), + "buffer.flatten" + ); //perf: This is a little faster than >::put, //but accomplishes the same result. loop { let adv = { - let slice = buf.bytes(); + let slice = buf.chunk(); if slice.is_empty() { return; } @@ -469,7 +585,12 @@ where buf.advance(adv); } } - WriteStrategy::Auto | WriteStrategy::Queue => { + WriteStrategy::Queue => { + trace!( + self.len = self.remaining(), + buf.len = buf.remaining(), + "buffer.queue" + ); self.queue.push(buf.into()); } } @@ -478,7 +599,7 @@ where fn can_buffer(&self) -> bool { match self.strategy { WriteStrategy::Flatten => self.remaining() < self.max_buf_size, - WriteStrategy::Auto | WriteStrategy::Queue => { + WriteStrategy::Queue => { self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size } } @@ -506,12 +627,12 @@ impl Buf for WriteBuf { } #[inline] - fn bytes(&self) -> &[u8] { - let headers = self.headers.bytes(); + fn chunk(&self) -> &[u8] { + let headers = self.headers.chunk(); if !headers.is_empty() { headers } else { - self.queue.bytes() + self.queue.chunk() } } @@ -531,71 +652,14 @@ impl Buf for WriteBuf { } #[inline] - fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { - let n = self.headers.bytes_vectored(dst); - self.queue.bytes_vectored(&mut dst[n..]) + n - } -} - -/// Detects when wrapped `WriteBuf` is used for vectored IO, and -/// adjusts the `WriteBuf` strategy if not. -struct WriteBufAuto<'a, B: Buf> { - bytes_called: Cell, - bytes_vec_called: Cell, - inner: &'a mut WriteBuf, -} - -impl<'a, B: Buf> WriteBufAuto<'a, B> { - fn new(inner: &'a mut WriteBuf) -> WriteBufAuto<'a, B> { - WriteBufAuto { - bytes_called: Cell::new(false), - bytes_vec_called: Cell::new(false), - inner, - } - } -} - -impl<'a, B: Buf> Buf for WriteBufAuto<'a, B> { - #[inline] - fn remaining(&self) -> usize { - self.inner.remaining() - } - - #[inline] - fn bytes(&self) -> &[u8] { - self.bytes_called.set(true); - self.inner.bytes() - } - - #[inline] - fn advance(&mut self, cnt: usize) { - self.inner.advance(cnt) - } - - #[inline] - fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { - self.bytes_vec_called.set(true); - self.inner.bytes_vectored(dst) - } -} - -impl<'a, B: Buf + 'a> Drop for WriteBufAuto<'a, B> { - fn drop(&mut self) { - if let WriteStrategy::Auto = self.inner.strategy { - if self.bytes_vec_called.get() { - self.inner.strategy = WriteStrategy::Queue; - } else if self.bytes_called.get() { - trace!("detected no usage of vectored write, flattening"); - self.inner.strategy = WriteStrategy::Flatten; - self.inner.headers.bytes.put(&mut self.inner.queue); - } - } + fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { + let n = self.headers.chunks_vectored(dst); + self.queue.chunks_vectored(&mut dst[n..]) + n } } #[derive(Debug)] enum WriteStrategy { - Auto, Flatten, Queue, } @@ -607,8 +671,8 @@ mod tests { use tokio_test::io::Builder as Mock; - #[cfg(feature = "nightly")] - use test::Bencher; + // #[cfg(feature = "nightly")] + // use test::Bencher; /* impl MemRead for AsyncIo { @@ -621,28 +685,31 @@ mod tests { */ #[tokio::test] + #[ignore] async fn iobuf_write_empty_slice() { - // First, let's just check that the Mock would normally return an - // error on an unexpected write, even if the buffer is empty... - let mut mock = Mock::new().build(); - futures_util::future::poll_fn(|cx| { - Pin::new(&mut mock).poll_write_buf(cx, &mut Cursor::new(&[])) - }) - .await - .expect_err("should be a broken pipe"); + // TODO(eliza): can i have writev back pls T_T + // // First, let's just check that the Mock would normally return an + // // error on an unexpected write, even if the buffer is empty... + // let mut mock = Mock::new().build(); + // futures_util::future::poll_fn(|cx| { + // Pin::new(&mut mock).poll_write_buf(cx, &mut Cursor::new(&[])) + // }) + // .await + // .expect_err("should be a broken pipe"); - // underlying io will return the logic error upon write, - // so we are testing that the io_buf does not trigger a write - // when there is nothing to flush - let mock = Mock::new().build(); - let mut io_buf = Buffered::<_, Cursor>>::new(mock); - io_buf.flush().await.expect("should short-circuit flush"); + // // underlying io will return the logic error upon write, + // // so we are testing that the io_buf does not trigger a write + // // when there is nothing to flush + // let mock = Mock::new().build(); + // let mut io_buf = Buffered::<_, Cursor>>::new(mock); + // io_buf.flush().await.expect("should short-circuit flush"); } #[tokio::test] async fn parse_reads_until_blocked() { use crate::proto::h1::ClientTransaction; + let _ = pretty_env_logger::try_init(); let mock = Mock::new() // Split over multiple reads will read all of it .read(b"HTTP/1.1 200 OK\r\n") @@ -659,6 +726,16 @@ mod tests { let parse_ctx = ParseContext { cached_headers: &mut None, req_method: &mut None, + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }; assert!(buffered .parse::(cx, parse_ctx) @@ -818,7 +895,6 @@ mod tests { let _ = pretty_env_logger::try_init(); let mock = Mock::new() - // Just a single write .write(b"hello world, it's hyper!") .build(); @@ -834,31 +910,39 @@ mod tests { buffered.flush().await.expect("flush"); } - #[tokio::test] - async fn write_buf_auto_flatten() { + #[test] + fn write_buf_flatten_partially_flushed() { let _ = pretty_env_logger::try_init(); - let mock = Mock::new() - // Expects write_buf to only consume first buffer - .write(b"hello ") - // And then the Auto strategy will have flattened - .write(b"world, it's hyper!") - .build(); + let b = |s: &str| Cursor::new(s.as_bytes().to_vec()); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut write_buf = WriteBuf::>>::new(WriteStrategy::Flatten); - // we have 4 buffers, but hope to detect that vectored IO isn't - // being used, and switch to flattening automatically, - // resulting in only 2 writes - buffered.headers_buf().extend(b"hello "); - buffered.buffer(Cursor::new(b"world, ".to_vec())); - buffered.buffer(Cursor::new(b"it's ".to_vec())); - buffered.buffer(Cursor::new(b"hyper!".to_vec())); - assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3); + write_buf.buffer(b("hello ")); + write_buf.buffer(b("world, ")); - buffered.flush().await.expect("flush"); + assert_eq!(write_buf.chunk(), b"hello world, "); - assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); + // advance most of the way, but not all + write_buf.advance(11); + + assert_eq!(write_buf.chunk(), b", "); + assert_eq!(write_buf.headers.pos, 11); + assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE); + + // there's still room in the headers buffer, so just push on the end + write_buf.buffer(b("it's hyper!")); + + assert_eq!(write_buf.chunk(), b", it's hyper!"); + assert_eq!(write_buf.headers.pos, 11); + + let rem1 = write_buf.remaining(); + let cap = write_buf.headers.bytes.capacity(); + + // but when this would go over capacity, don't copy the old bytes + write_buf.buffer(Cursor::new(vec![b'X'; cap])); + assert_eq!(write_buf.remaining(), cap + rem1); + assert_eq!(write_buf.headers.pos, 0); } #[tokio::test] @@ -889,19 +973,19 @@ mod tests { assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); } - #[cfg(feature = "nightly")] - #[bench] - fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) { - let s = "Hello, World!"; - b.bytes = s.len() as u64; + // #[cfg(feature = "nightly")] + // #[bench] + // fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) { + // let s = "Hello, World!"; + // b.bytes = s.len() as u64; - let mut write_buf = WriteBuf::::new(); - write_buf.set_strategy(WriteStrategy::Flatten); - b.iter(|| { - let chunk = bytes::Bytes::from(s); - write_buf.buffer(chunk); - ::test::black_box(&write_buf); - write_buf.headers.bytes.clear(); - }) - } + // let mut write_buf = WriteBuf::::new(); + // write_buf.set_strategy(WriteStrategy::Flatten); + // b.iter(|| { + // let chunk = bytes::Bytes::from(s); + // write_buf.buffer(chunk); + // ::test::black_box(&write_buf); + // write_buf.headers.bytes.clear(); + // }) + // } } diff --git a/third_party/rust/hyper/src/proto/h1/mod.rs b/third_party/rust/hyper/src/proto/h1/mod.rs index 2d0bf39bc9e5..06d03bf5f164 100644 --- a/third_party/rust/hyper/src/proto/h1/mod.rs +++ b/third_party/rust/hyper/src/proto/h1/mod.rs @@ -1,25 +1,37 @@ +#[cfg(all(feature = "server", feature = "runtime"))] +use std::{pin::Pin, time::Duration}; + use bytes::BytesMut; use http::{HeaderMap, Method}; +use httparse::ParserConfig; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Sleep; -use crate::proto::{BodyLength, DecodedLength, MessageHead}; +use crate::body::DecodedLength; +use crate::proto::{BodyLength, MessageHead}; pub(crate) use self::conn::Conn; -pub use self::decode::Decoder; +pub(crate) use self::decode::Decoder; pub(crate) use self::dispatch::Dispatcher; -pub use self::encode::{EncodedBuf, Encoder}; -pub use self::io::Cursor; //TODO: move out of h1::io -pub use self::io::MINIMUM_MAX_BUFFER_SIZE; +pub(crate) use self::encode::{EncodedBuf, Encoder}; + //TODO: move out of h1::io +pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE; mod conn; -pub(super) mod date; mod decode; pub(crate) mod dispatch; mod encode; mod io; mod role; -pub(crate) type ServerTransaction = role::Server; -pub(crate) type ClientTransaction = role::Client; + +cfg_client! { + pub(crate) type ClientTransaction = role::Client; +} + +cfg_server! { + pub(crate) type ServerTransaction = role::Server; +} pub(crate) trait Http1Transaction { type Incoming; @@ -64,12 +76,26 @@ pub(crate) struct ParsedMessage { pub(crate) struct ParseContext<'a> { cached_headers: &'a mut Option, req_method: &'a mut Option, + h1_parser_config: ParserConfig, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: Option, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: &'a mut Option>>, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: &'a mut bool, + preserve_header_case: bool, + h09_responses: bool, + #[cfg(feature = "ffi")] + on_informational: &'a mut Option, + #[cfg(feature = "ffi")] + raw_headers: bool, } /// Passed to Http1Transaction::encode pub(crate) struct Encode<'a, T> { head: &'a mut MessageHead, body: Option, + #[cfg(feature = "server")] keep_alive: bool, req_method: &'a mut Option, title_case_headers: bool, diff --git a/third_party/rust/hyper/src/proto/h1/role.rs b/third_party/rust/hyper/src/proto/h1/role.rs index 46b8b2646e13..968b63cb8eae 100644 --- a/third_party/rust/hyper/src/proto/h1/role.rs +++ b/third_party/rust/hyper/src/proto/h1/role.rs @@ -1,78 +1,129 @@ -// `mem::uninitialized` replaced with `mem::MaybeUninit`, -// can't upgrade yet -#![allow(deprecated)] - use std::fmt::{self, Write}; -use std::mem; +use std::mem::MaybeUninit; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Instant; +#[cfg(any(test, feature = "server", feature = "ffi"))] +use bytes::Bytes; use bytes::BytesMut; +#[cfg(feature = "server")] +use http::header::ValueIter; use http::header::{self, Entry, HeaderName, HeaderValue}; use http::{HeaderMap, Method, StatusCode, Version}; +use tracing::{debug, error, trace, trace_span, warn}; +use crate::body::DecodedLength; +#[cfg(feature = "server")] +use crate::common::date; use crate::error::Parse; +use crate::ext::HeaderCaseMap; use crate::headers; use crate::proto::h1::{ - date, Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage, + Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage, }; -use crate::proto::{BodyLength, DecodedLength, MessageHead, RequestHead, RequestLine}; +use crate::proto::{BodyLength, MessageHead, RequestHead, RequestLine}; const MAX_HEADERS: usize = 100; const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific +#[cfg(feature = "server")] +const MAX_URI_LEN: usize = (u16::MAX - 1) as usize; macro_rules! header_name { ($bytes:expr) => {{ - #[cfg(debug_assertions)] { match HeaderName::from_bytes($bytes) { Ok(name) => name, - Err(_) => panic!( - "illegal header name from httparse: {:?}", - ::bytes::Bytes::copy_from_slice($bytes) - ), + Err(e) => maybe_panic!(e), } } - - #[cfg(not(debug_assertions))] - { - HeaderName::from_bytes($bytes).expect("header name validated by httparse") - } }}; } macro_rules! header_value { ($bytes:expr) => {{ - #[cfg(debug_assertions)] { - let __hvb: ::bytes::Bytes = $bytes; - match HeaderValue::from_maybe_shared(__hvb.clone()) { - Ok(name) => name, - Err(_) => panic!("illegal header value from httparse: {:?}", __hvb), - } - } - - #[cfg(not(debug_assertions))] - { - // Unsafe: httparse already validated header value unsafe { HeaderValue::from_maybe_shared_unchecked($bytes) } } }}; } +macro_rules! maybe_panic { + ($($arg:tt)*) => ({ + let _err = ($($arg)*); + if cfg!(debug_assertions) { + panic!("{:?}", _err); + } else { + error!("Internal Hyper error, please report {:?}", _err); + return Err(Parse::Internal) + } + }) +} + +pub(super) fn parse_headers( + bytes: &mut BytesMut, + ctx: ParseContext<'_>, +) -> ParseResult +where + T: Http1Transaction, +{ + // If the buffer is empty, don't bother entering the span, it's just noise. + if bytes.is_empty() { + return Ok(None); + } + + let span = trace_span!("parse_headers"); + let _s = span.enter(); + + #[cfg(all(feature = "server", feature = "runtime"))] + if !*ctx.h1_header_read_timeout_running { + if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout { + let deadline = Instant::now() + h1_header_read_timeout; + + match ctx.h1_header_read_timeout_fut { + Some(h1_header_read_timeout_fut) => { + debug!("resetting h1 header read timeout timer"); + h1_header_read_timeout_fut.as_mut().reset(deadline); + } + None => { + debug!("setting h1 header read timeout timer"); + *ctx.h1_header_read_timeout_fut = + Some(Box::pin(tokio::time::sleep_until(deadline))); + } + } + } + } + + T::parse(bytes, ctx) +} + +pub(super) fn encode_headers( + enc: Encode<'_, T::Outgoing>, + dst: &mut Vec, +) -> crate::Result +where + T: Http1Transaction, +{ + let span = trace_span!("encode_headers"); + let _s = span.enter(); + T::encode(enc, dst) +} + // There are 2 main roles, Client and Server. +#[cfg(feature = "client")] pub(crate) enum Client {} +#[cfg(feature = "server")] pub(crate) enum Server {} +#[cfg(feature = "server")] impl Http1Transaction for Server { type Incoming = RequestLine; type Outgoing = StatusCode; const LOG: &'static str = "{role=server}"; fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult { - if buf.is_empty() { - return Ok(None); - } + debug_assert!(!buf.is_empty(), "parse called with empty buf"); let mut keep_alive; let is_http_11; @@ -85,23 +136,28 @@ impl Http1Transaction for Server { // but we *never* read any of it until after httparse has assigned // values into it. By not zeroing out the stack memory, this saves // a good ~5% on pipeline benchmarks. - let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() }; + let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + MaybeUninit::uninit().assume_init() + }; { - let mut headers: [httparse::Header<'_>; MAX_HEADERS] = unsafe { mem::uninitialized() }; - trace!( - "Request.parse([Header; {}], [u8; {}])", - headers.len(), - buf.len() - ); - let mut req = httparse::Request::new(&mut headers); + /* SAFETY: it is safe to go from MaybeUninit array to array of MaybeUninit */ + let mut headers: [MaybeUninit>; MAX_HEADERS] = + unsafe { MaybeUninit::uninit().assume_init() }; + trace!(bytes = buf.len(), "Request.parse"); + let mut req = httparse::Request::new(&mut []); let bytes = buf.as_ref(); - match req.parse(bytes) { + match req.parse_with_uninit_headers(bytes, &mut headers) { Ok(httparse::Status::Complete(parsed_len)) => { trace!("Request.parse Complete({})", parsed_len); len = parsed_len; + let uri = req.path.unwrap(); + if uri.len() > MAX_URI_LEN { + return Err(Parse::UriTooLong); + } subject = RequestLine( Method::from_bytes(req.method.unwrap().as_bytes())?, - req.path.unwrap().parse()?, + uri.parse()?, ); version = if req.version.unwrap() == 1 { keep_alive = true; @@ -152,11 +208,19 @@ impl Http1Transaction for Server { let mut is_te_chunked = false; let mut wants_upgrade = subject.0 == Method::CONNECT; + let mut header_case_map = if ctx.preserve_header_case { + Some(HeaderCaseMap::default()) + } else { + None + }; + let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); headers.reserve(headers_len); for header in &headers_indices[..headers_len] { + // SAFETY: array is valid up to `headers_len` + let header = unsafe { &*header.as_ptr() }; let name = header_name!(&slice[header.name.0..header.name.1]); let value = header_value!(slice.slice(header.value.0..header.value.1)); @@ -168,29 +232,29 @@ impl Http1Transaction for Server { // malformed. A server should respond with 400 Bad Request. if !is_http_11 { debug!("HTTP/1.0 cannot have Transfer-Encoding header"); - return Err(Parse::Header); + return Err(Parse::transfer_encoding_unexpected()); } is_te = true; if headers::is_chunked_(&value) { is_te_chunked = true; decoder = DecodedLength::CHUNKED; + } else { + is_te_chunked = false; } } header::CONTENT_LENGTH => { if is_te { continue; } - let len = value - .to_str() - .map_err(|_| Parse::Header) - .and_then(|s| s.parse().map_err(|_| Parse::Header))?; + let len = headers::content_length_parse(&value) + .ok_or_else(Parse::content_length_invalid)?; if let Some(prev) = con_len { if prev != len { debug!( "multiple Content-Length headers with different values: [{}, {}]", prev, len, ); - return Err(Parse::Header); + return Err(Parse::content_length_invalid()); } // we don't need to append this secondary length continue; @@ -209,7 +273,10 @@ impl Http1Transaction for Server { } } header::EXPECT => { - expect_continue = value.as_bytes() == b"100-continue"; + // According to https://datatracker.ietf.org/doc/html/rfc2616#section-14.20 + // Comparison of expectation values is case-insensitive for unquoted tokens + // (including the 100-continue token) + expect_continue = value.as_bytes().eq_ignore_ascii_case(b"100-continue"); } header::UPGRADE => { // Upgrades are only allowed with HTTP/1.1 @@ -219,12 +286,22 @@ impl Http1Transaction for Server { _ => (), } + if let Some(ref mut header_case_map) = header_case_map { + header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); + } + headers.append(name, value); } if is_te && !is_te_chunked { debug!("request with transfer-encoding header, but not chunked, bad request"); - return Err(Parse::Header); + return Err(Parse::transfer_encoding_invalid()); + } + + let mut extensions = http::Extensions::default(); + + if let Some(header_case_map) = header_case_map { + extensions.insert(header_case_map); } *ctx.req_method = Some(subject.0.clone()); @@ -234,6 +311,7 @@ impl Http1Transaction for Server { version, subject, headers, + extensions, }, decode: decoder, expect_continue, @@ -242,20 +320,13 @@ impl Http1Transaction for Server { })) } - fn encode( - mut msg: Encode<'_, Self::Outgoing>, - mut dst: &mut Vec, - ) -> crate::Result { + fn encode(mut msg: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result { trace!( "Server::encode status={:?}, body={:?}, req_method={:?}", msg.head.subject, msg.body, msg.req_method ); - debug_assert!( - !msg.title_case_headers, - "no server config for title case headers" - ); let mut wrote_len = false; @@ -263,7 +334,7 @@ impl Http1Transaction for Server { // This is because Service only allows returning a single Response, and // so if you try to reply with a e.g. 100 Continue, you have no way of // replying with the latter status code response. - let (ret, mut is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS { + let (ret, is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS { (Ok(()), true) } else if msg.req_method == &Some(Method::CONNECT) && msg.head.subject.is_success() { // Sending content-length or transfer-encoding header on 2xx response @@ -284,9 +355,6 @@ impl Http1Transaction for Server { // pushing some bytes onto the `dst`. In those cases, we don't want to send // the half-pushed message, so rewind to before. let orig_len = dst.len(); - let rewind = |dst: &mut Vec| { - dst.truncate(orig_len); - }; let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; dst.reserve(init_cap); @@ -297,7 +365,7 @@ impl Http1Transaction for Server { Version::HTTP_10 => extend(dst, b"HTTP/1.0 "), Version::HTTP_11 => extend(dst, b"HTTP/1.1 "), Version::HTTP_2 => { - warn!("response with HTTP2 version coerced to HTTP/1.1"); + debug!("response with HTTP2 version coerced to HTTP/1.1"); extend(dst, b"HTTP/1.1 "); } other => panic!("unexpected response version: {:?}", other), @@ -317,6 +385,218 @@ impl Http1Transaction for Server { extend(dst, b"\r\n"); } + let orig_headers; + let extensions = std::mem::take(&mut msg.head.extensions); + let orig_headers = match extensions.get::() { + None if msg.title_case_headers => { + orig_headers = HeaderCaseMap::default(); + Some(&orig_headers) + } + orig_headers => orig_headers, + }; + let encoder = if let Some(orig_headers) = orig_headers { + Self::encode_headers_with_original_case( + msg, + dst, + is_last, + orig_len, + wrote_len, + orig_headers, + )? + } else { + Self::encode_headers_with_lower_case(msg, dst, is_last, orig_len, wrote_len)? + }; + + ret.map(|()| encoder) + } + + fn on_error(err: &crate::Error) -> Option> { + use crate::error::Kind; + let status = match *err.kind() { + Kind::Parse(Parse::Method) + | Kind::Parse(Parse::Header(_)) + | Kind::Parse(Parse::Uri) + | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST, + Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE, + Kind::Parse(Parse::UriTooLong) => StatusCode::URI_TOO_LONG, + _ => return None, + }; + + debug!("sending automatic response ({}) for parse error", status); + let mut msg = MessageHead::default(); + msg.subject = status; + Some(msg) + } + + fn is_server() -> bool { + true + } + + fn update_date() { + date::update(); + } +} + +#[cfg(feature = "server")] +impl Server { + fn can_have_body(method: &Option, status: StatusCode) -> bool { + Server::can_chunked(method, status) + } + + fn can_chunked(method: &Option, status: StatusCode) -> bool { + if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success() + { + false + } else if status.is_informational() { + false + } else { + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, + _ => true, + } + } + } + + fn can_have_content_length(method: &Option, status: StatusCode) -> bool { + if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() { + false + } else { + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, + _ => true, + } + } + } + + fn encode_headers_with_lower_case( + msg: Encode<'_, StatusCode>, + dst: &mut Vec, + is_last: bool, + orig_len: usize, + wrote_len: bool, + ) -> crate::Result { + struct LowercaseWriter; + + impl HeaderNameWriter for LowercaseWriter { + #[inline] + fn write_full_header_line( + &mut self, + dst: &mut Vec, + line: &str, + _: (HeaderName, &str), + ) { + extend(dst, line.as_bytes()) + } + + #[inline] + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + name_with_colon: &str, + _: HeaderName, + ) { + extend(dst, name_with_colon.as_bytes()) + } + + #[inline] + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName) { + extend(dst, name.as_str().as_bytes()) + } + } + + Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, LowercaseWriter) + } + + #[cold] + #[inline(never)] + fn encode_headers_with_original_case( + msg: Encode<'_, StatusCode>, + dst: &mut Vec, + is_last: bool, + orig_len: usize, + wrote_len: bool, + orig_headers: &HeaderCaseMap, + ) -> crate::Result { + struct OrigCaseWriter<'map> { + map: &'map HeaderCaseMap, + current: Option<(HeaderName, ValueIter<'map, Bytes>)>, + title_case_headers: bool, + } + + impl HeaderNameWriter for OrigCaseWriter<'_> { + #[inline] + fn write_full_header_line( + &mut self, + dst: &mut Vec, + _: &str, + (name, rest): (HeaderName, &str), + ) { + self.write_header_name(dst, &name); + extend(dst, rest.as_bytes()); + } + + #[inline] + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + _: &str, + name: HeaderName, + ) { + self.write_header_name(dst, &name); + extend(dst, b": "); + } + + #[inline] + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName) { + let Self { + map, + ref mut current, + title_case_headers, + } = *self; + if current.as_ref().map_or(true, |(last, _)| last != name) { + *current = None; + } + let (_, values) = + current.get_or_insert_with(|| (name.clone(), map.get_all_internal(name))); + + if let Some(orig_name) = values.next() { + extend(dst, orig_name); + } else if title_case_headers { + title_case(dst, name.as_str().as_bytes()); + } else { + extend(dst, name.as_str().as_bytes()); + } + } + } + + let header_name_writer = OrigCaseWriter { + map: orig_headers, + current: None, + title_case_headers: msg.title_case_headers, + }; + + Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, header_name_writer) + } + + #[inline] + fn encode_headers( + msg: Encode<'_, StatusCode>, + dst: &mut Vec, + mut is_last: bool, + orig_len: usize, + mut wrote_len: bool, + mut header_name_writer: W, + ) -> crate::Result + where + W: HeaderNameWriter, + { + // In some error cases, we don't know about the invalid message until already + // pushing some bytes onto the `dst`. In those cases, we don't want to send + // the half-pushed message, so rewind to before. + let rewind = |dst: &mut Vec| { + dst.truncate(orig_len); + }; + let mut encoder = Encoder::length(0); let mut wrote_date = false; let mut cur_name = None; @@ -380,7 +660,11 @@ impl Http1Transaction for Server { if !is_name_written { encoder = Encoder::length(known_len); - extend(dst, b"content-length: "); + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); extend(dst, value.as_bytes()); wrote_len = true; is_name_written = true; @@ -408,7 +692,11 @@ impl Http1Transaction for Server { } else { // we haven't written content-length yet! encoder = Encoder::length(len); - extend(dst, b"content-length: "); + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); extend(dst, value.as_bytes()); wrote_len = true; is_name_written = true; @@ -463,7 +751,11 @@ impl Http1Transaction for Server { if !is_name_written { encoder = Encoder::chunked(); is_name_written = true; - extend(dst, b"transfer-encoding: "); + header_name_writer.write_header_name_with_colon( + dst, + "transfer-encoding: ", + header::TRANSFER_ENCODING, + ); extend(dst, value.as_bytes()); } else { extend(dst, b", "); @@ -477,7 +769,11 @@ impl Http1Transaction for Server { } if !is_name_written { is_name_written = true; - extend(dst, b"connection: "); + header_name_writer.write_header_name_with_colon( + dst, + "connection: ", + header::CONNECTION, + ); extend(dst, value.as_bytes()); } else { extend(dst, b", "); @@ -499,7 +795,7 @@ impl Http1Transaction for Server { "{:?} set is_name_written and didn't continue loop", name, ); - extend(dst, name.as_str().as_bytes()); + header_name_writer.write_header_name(dst, name); extend(dst, b": "); extend(dst, value.as_bytes()); extend(dst, b"\r\n"); @@ -515,22 +811,34 @@ impl Http1Transaction for Server { { Encoder::close_delimited() } else { - extend(dst, b"transfer-encoding: chunked\r\n"); + header_name_writer.write_full_header_line( + dst, + "transfer-encoding: chunked\r\n", + (header::TRANSFER_ENCODING, ": chunked\r\n"), + ); Encoder::chunked() } } None | Some(BodyLength::Known(0)) => { - if msg.head.subject != StatusCode::NOT_MODIFIED { - extend(dst, b"content-length: 0\r\n"); + if Server::can_have_content_length(msg.req_method, msg.head.subject) { + header_name_writer.write_full_header_line( + dst, + "content-length: 0\r\n", + (header::CONTENT_LENGTH, ": 0\r\n"), + ) } Encoder::length(0) } Some(BodyLength::Known(len)) => { - if msg.head.subject == StatusCode::NOT_MODIFIED { + if !Server::can_have_content_length(msg.req_method, msg.head.subject) { Encoder::length(0) } else { - extend(dst, b"content-length: "); - let _ = ::itoa::write(&mut dst, len); + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); + extend(dst, ::itoa::Buffer::new().format(len).as_bytes()); extend(dst, b"\r\n"); Encoder::length(len) } @@ -550,91 +858,79 @@ impl Http1Transaction for Server { // cached date is much faster than formatting every request if !wrote_date { dst.reserve(date::DATE_VALUE_LENGTH + 8); - extend(dst, b"date: "); + header_name_writer.write_header_name_with_colon(dst, "date: ", header::DATE); date::extend(dst); extend(dst, b"\r\n\r\n"); } else { extend(dst, b"\r\n"); } - ret.map(|()| encoder.set_last(is_last)) - } - - fn on_error(err: &crate::Error) -> Option> { - use crate::error::Kind; - let status = match *err.kind() { - Kind::Parse(Parse::Method) - | Kind::Parse(Parse::Header) - | Kind::Parse(Parse::Uri) - | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST, - Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE, - _ => return None, - }; - - debug!("sending automatic response ({}) for parse error", status); - let mut msg = MessageHead::default(); - msg.subject = status; - Some(msg) - } - - fn is_server() -> bool { - true - } - - fn update_date() { - date::update(); + Ok(encoder.set_last(is_last)) } } -impl Server { - fn can_have_body(method: &Option, status: StatusCode) -> bool { - Server::can_chunked(method, status) - } - - fn can_chunked(method: &Option, status: StatusCode) -> bool { - if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success() - { - false - } else { - match status { - // TODO: support for 1xx codes needs improvement everywhere - // would be 100...199 => false - StatusCode::SWITCHING_PROTOCOLS - | StatusCode::NO_CONTENT - | StatusCode::NOT_MODIFIED => false, - _ => true, - } - } - } +#[cfg(feature = "server")] +trait HeaderNameWriter { + fn write_full_header_line( + &mut self, + dst: &mut Vec, + line: &str, + name_value_pair: (HeaderName, &str), + ); + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + name_with_colon: &str, + name: HeaderName, + ); + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName); } +#[cfg(feature = "client")] impl Http1Transaction for Client { type Incoming = StatusCode; type Outgoing = RequestLine; const LOG: &'static str = "{role=client}"; fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult { + debug_assert!(!buf.is_empty(), "parse called with empty buf"); + // Loop to skip information status code headers (100 Continue, etc). loop { - if buf.is_empty() { - return Ok(None); - } // Unsafe: see comment in Server Http1Transaction, above. - let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() }; - let (len, status, version, headers_len) = { - let mut headers: [httparse::Header<'_>; MAX_HEADERS] = - unsafe { mem::uninitialized() }; - trace!( - "Response.parse([Header; {}], [u8; {}])", - headers.len(), - buf.len() - ); - let mut res = httparse::Response::new(&mut headers); + let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + MaybeUninit::uninit().assume_init() + }; + let (len, status, reason, version, headers_len) = { + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + let mut headers: [MaybeUninit>; MAX_HEADERS] = + unsafe { MaybeUninit::uninit().assume_init() }; + trace!(bytes = buf.len(), "Response.parse"); + let mut res = httparse::Response::new(&mut []); let bytes = buf.as_ref(); - match res.parse(bytes)? { - httparse::Status::Complete(len) => { + match ctx.h1_parser_config.parse_response_with_uninit_headers( + &mut res, + bytes, + &mut headers, + ) { + Ok(httparse::Status::Complete(len)) => { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; + + #[cfg(not(feature = "ffi"))] + let reason = (); + #[cfg(feature = "ffi")] + let reason = { + let reason = res.reason.unwrap(); + // Only save the reason phrase if it isnt the canonical reason + if Some(reason) != status.canonical_reason() { + Some(Bytes::copy_from_slice(reason.as_bytes())) + } else { + None + } + }; + let version = if res.version.unwrap() == 1 { Version::HTTP_11 } else { @@ -642,20 +938,53 @@ impl Http1Transaction for Client { }; record_header_indices(bytes, &res.headers, &mut headers_indices)?; let headers_len = res.headers.len(); - (len, status, version, headers_len) + (len, status, reason, version, headers_len) } - httparse::Status::Partial => return Ok(None), + Ok(httparse::Status::Partial) => return Ok(None), + Err(httparse::Error::Version) if ctx.h09_responses => { + trace!("Response.parse accepted HTTP/0.9 response"); + + #[cfg(not(feature = "ffi"))] + let reason = (); + #[cfg(feature = "ffi")] + let reason = None; + + (0, StatusCode::OK, reason, Version::HTTP_09, 0) + } + Err(e) => return Err(e.into()), } }; - let slice = buf.split_to(len).freeze(); + let mut slice = buf.split_to(len); + + if ctx.h1_parser_config.obsolete_multiline_headers_in_responses_are_allowed() { + for header in &headers_indices[..headers_len] { + // SAFETY: array is valid up to `headers_len` + let header = unsafe { &*header.as_ptr() }; + for b in &mut slice[header.value.0..header.value.1] { + if *b == b'\r' || *b == b'\n' { + *b = b' '; + } + } + } + } + + let slice = slice.freeze(); let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); let mut keep_alive = version == Version::HTTP_11; + let mut header_case_map = if ctx.preserve_header_case { + Some(HeaderCaseMap::default()) + } else { + None + }; + headers.reserve(headers_len); for header in &headers_indices[..headers_len] { + // SAFETY: array is valid up to `headers_len` + let header = unsafe { &*header.as_ptr() }; let name = header_name!(&slice[header.name.0..header.name.1]); let value = header_value!(slice.slice(header.value.0..header.value.1)); @@ -669,13 +998,37 @@ impl Http1Transaction for Client { keep_alive = headers::connection_keep_alive(&value); } } + + if let Some(ref mut header_case_map) = header_case_map { + header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); + } + headers.append(name, value); } + let mut extensions = http::Extensions::default(); + + if let Some(header_case_map) = header_case_map { + extensions.insert(header_case_map); + } + + #[cfg(feature = "ffi")] + if let Some(reason) = reason { + extensions.insert(crate::ffi::ReasonPhrase(reason)); + } + #[cfg(not(feature = "ffi"))] + drop(reason); + + #[cfg(feature = "ffi")] + if ctx.raw_headers { + extensions.insert(crate::ffi::RawHeaders(crate::ffi::hyper_buf(slice))); + } + let head = MessageHead { version, subject: status, headers, + extensions, }; if let Some((decode, is_upgrade)) = Client::decoder(&head, ctx.req_method)? { return Ok(Some(ParsedMessage { @@ -688,6 +1041,19 @@ impl Http1Transaction for Client { wants_upgrade: is_upgrade, })); } + + #[cfg(feature = "ffi")] + if head.subject.is_informational() { + if let Some(callback) = ctx.on_informational { + callback.call(head.into_response(crate::Body::empty())); + } + } + + // Parsing a 1xx response could have consumed the buffer, check if + // it is empty now... + if buf.is_empty() { + return Ok(None); + } } } @@ -714,18 +1080,26 @@ impl Http1Transaction for Client { Version::HTTP_10 => extend(dst, b"HTTP/1.0"), Version::HTTP_11 => extend(dst, b"HTTP/1.1"), Version::HTTP_2 => { - warn!("request with HTTP2 version coerced to HTTP/1.1"); + debug!("request with HTTP2 version coerced to HTTP/1.1"); extend(dst, b"HTTP/1.1"); } other => panic!("unexpected request version: {:?}", other), } extend(dst, b"\r\n"); - if msg.title_case_headers { + if let Some(orig_headers) = msg.head.extensions.get::() { + write_headers_original_case( + &msg.head.headers, + orig_headers, + dst, + msg.title_case_headers, + ); + } else if msg.title_case_headers { write_headers_title_case(&msg.head.headers, dst); } else { write_headers(&msg.head.headers, dst); } + extend(dst, b"\r\n"); msg.head.headers.clear(); //TODO: remove when switching to drain() @@ -742,6 +1116,7 @@ impl Http1Transaction for Client { } } +#[cfg(feature = "client")] impl Client { /// Returns Some(length, wants_upgrade) if successful. /// @@ -792,7 +1167,7 @@ impl Client { // malformed. A server should respond with 400 Bad Request. if inc.version == Version::HTTP_10 { debug!("HTTP/1.0 cannot have Transfer-Encoding header"); - Err(Parse::Header) + Err(Parse::transfer_encoding_unexpected()) } else if headers::transfer_encoding_is_chunked(&inc.headers) { Ok(Some((DecodedLength::CHUNKED, false))) } else { @@ -803,15 +1178,12 @@ impl Client { Ok(Some((DecodedLength::checked_new(len)?, false))) } else if inc.headers.contains_key(header::CONTENT_LENGTH) { debug!("illegal Content-Length header"); - Err(Parse::Header) + Err(Parse::content_length_invalid()) } else { trace!("neither Transfer-Encoding nor Content-Length"); Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) } } -} - -impl Client { fn set_length(head: &mut RequestHead, body: Option) -> Encoder { let body = if let Some(body) = body { body @@ -967,7 +1339,7 @@ struct HeaderIndices { fn record_header_indices( bytes: &[u8], headers: &[httparse::Header<'_>], - indices: &mut [HeaderIndices], + indices: &mut [MaybeUninit], ) -> Result<(), crate::error::Parse> { let bytes_ptr = bytes.as_ptr() as usize; @@ -978,45 +1350,36 @@ fn record_header_indices( } let name_start = header.name.as_ptr() as usize - bytes_ptr; let name_end = name_start + header.name.len(); - indices.name = (name_start, name_end); let value_start = header.value.as_ptr() as usize - bytes_ptr; let value_end = value_start + header.value.len(); - indices.value = (value_start, value_end); + + // FIXME(maybe_uninit_extra) + // FIXME(addr_of) + // Currently we don't have `ptr::addr_of_mut` in stable rust or + // MaybeUninit::write, so this is some way of assigning into a MaybeUninit + // safely + let new_header_indices = HeaderIndices { + name: (name_start, name_end), + value: (value_start, value_end), + }; + *indices = MaybeUninit::new(new_header_indices); } Ok(()) } -// Write header names as title case. The header name is assumed to be ASCII, -// therefore it is trivial to convert an ASCII character from lowercase to -// uppercase. It is as simple as XORing the lowercase character byte with -// space. +// Write header names as title case. The header name is assumed to be ASCII. fn title_case(dst: &mut Vec, name: &[u8]) { dst.reserve(name.len()); - let mut iter = name.iter(); - - // Uppercase the first character - if let Some(c) = iter.next() { - if *c >= b'a' && *c <= b'z' { - dst.push(*c ^ b' '); - } else { - dst.push(*c); - } - } - - while let Some(c) = iter.next() { - dst.push(*c); - - if *c == b'-' { - if let Some(c) = iter.next() { - if *c >= b'a' && *c <= b'z' { - dst.push(*c ^ b' '); - } else { - dst.push(*c); - } - } + // Ensure first character is uppercased + let mut prev = b'-'; + for &(mut c) in name { + if prev == b'-' { + c.make_ascii_uppercase(); } + dst.push(c); + prev = c; } } @@ -1038,6 +1401,42 @@ fn write_headers(headers: &HeaderMap, dst: &mut Vec) { } } +#[cold] +fn write_headers_original_case( + headers: &HeaderMap, + orig_case: &HeaderCaseMap, + dst: &mut Vec, + title_case_headers: bool, +) { + // For each header name/value pair, there may be a value in the casemap + // that corresponds to the HeaderValue. So, we iterator all the keys, + // and for each one, try to pair the originally cased name with the value. + // + // TODO: consider adding http::HeaderMap::entries() iterator + for name in headers.keys() { + let mut names = orig_case.get_all(name); + + for value in headers.get_all(name) { + if let Some(orig_name) = names.next() { + extend(dst, orig_name.as_ref()); + } else if title_case_headers { + title_case(dst, name.as_str().as_bytes()); + } else { + extend(dst, name.as_str().as_bytes()); + } + + // Wanted for curl test cases that send `X-Custom-Header:\r\n` + if value.is_empty() { + extend(dst, b":\r\n"); + } else { + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } + } + } +} + struct FastWrite<'a>(&'a mut Vec); impl<'a> fmt::Write for FastWrite<'a> { @@ -1074,6 +1473,16 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut method, + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .unwrap() @@ -1094,6 +1503,16 @@ mod tests { let ctx = ParseContext { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw.len(), 0); @@ -1109,10 +1528,163 @@ mod tests { let ctx = ParseContext { cached_headers: &mut None, req_method: &mut None, + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }; Server::parse(&mut raw, ctx).unwrap_err(); } + const H09_RESPONSE: &'static str = "Baguettes are super delicious, don't you agree?"; + + #[test] + fn test_parse_response_h09_allowed() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(H09_RESPONSE); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: true, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); + assert_eq!(raw, H09_RESPONSE); + assert_eq!(msg.head.subject, crate::StatusCode::OK); + assert_eq!(msg.head.version, crate::Version::HTTP_09); + assert_eq!(msg.head.headers.len(), 0); + } + + #[test] + fn test_parse_response_h09_rejected() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(H09_RESPONSE); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + Client::parse(&mut raw, ctx).unwrap_err(); + assert_eq!(raw, H09_RESPONSE); + } + + const RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON: &'static str = + "HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials : true\r\n\r\n"; + + #[test] + fn test_parse_allow_response_with_spaces_before_colons() { + use httparse::ParserConfig; + + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); + let mut h1_parser_config = ParserConfig::default(); + h1_parser_config.allow_spaces_after_header_name_in_responses(true); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config, + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); + assert_eq!(raw.len(), 0); + assert_eq!(msg.head.subject, crate::StatusCode::OK); + assert_eq!(msg.head.version, crate::Version::HTTP_11); + assert_eq!(msg.head.headers.len(), 1); + assert_eq!(msg.head.headers["Access-Control-Allow-Credentials"], "true"); + } + + #[test] + fn test_parse_reject_response_with_spaces_before_colons() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + Client::parse(&mut raw, ctx).unwrap_err(); + } + + #[test] + fn test_parse_preserve_header_case_in_request() { + let mut raw = + BytesMut::from("GET / HTTP/1.1\r\nHost: hyper.rs\r\nX-BREAD: baguette\r\n\r\n"); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: true, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap(); + let orig_headers = parsed_message + .head + .extensions + .get::() + .unwrap(); + assert_eq!( + orig_headers + .get_all_internal(&HeaderName::from_static("host")) + .into_iter() + .collect::>(), + vec![&Bytes::from("Host")] + ); + assert_eq!( + orig_headers + .get_all_internal(&HeaderName::from_static("x-bread")) + .into_iter() + .collect::>(), + vec![&Bytes::from("X-BREAD")] + ); + } + #[test] fn test_decoder_request() { fn parse(s: &str) -> ParsedMessage { @@ -1122,6 +1694,16 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut None, + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect("parse ok") @@ -1135,6 +1717,16 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut None, + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect_err(comment) @@ -1280,6 +1872,16 @@ mod tests { "multiple content-lengths", ); + // content-length with prefix is not allowed + parse_err( + "\ + POST / HTTP/1.1\r\n\ + content-length: +10\r\n\ + \r\n\ + ", + "prefixed content-length", + ); + // transfer-encoding that isn't chunked is an error parse_err( "\ @@ -1299,6 +1901,16 @@ mod tests { "transfer-encoding doesn't end in chunked", ); + parse_err( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\ + transfer-encoding: afterlol\r\n\ + \r\n\ + ", + "transfer-encoding multiple lines doesn't end in chunked", + ); + // http/1.0 assert_eq!( @@ -1337,6 +1949,16 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, } ) .expect("parse ok") @@ -1350,6 +1972,16 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(m), + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect("parse ok") @@ -1363,6 +1995,16 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect_err("parse should err") @@ -1438,6 +2080,14 @@ mod tests { ", ); + parse_err( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: +8\r\n\ + \r\n\ + ", + ); + // transfer-encoding: chunked assert_eq!( parse( @@ -1648,6 +2298,75 @@ mod tests { assert_eq!(vec, b"GET / HTTP/1.1\r\nContent-Length: 10\r\nContent-Type: application/json\r\n*-*: o_o\r\n\r\n".to_vec()); } + #[test] + fn test_client_request_encode_orig_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Client::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + + assert_eq!( + &*vec, + b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\n\r\n" + .as_ref(), + ); + } + #[test] + fn test_client_request_encode_orig_and_title_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Client::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + assert_eq!( + &*vec, + b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\n\r\n" + .as_ref(), + ); + } + #[test] fn test_server_encode_connect_method() { let mut head = MessageHead::default(); @@ -1668,6 +2387,106 @@ mod tests { assert!(encoder.is_last()); } + #[test] + fn test_server_response_encode_title_case() { + use crate::proto::BodyLength; + use http::header::HeaderValue; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + head.headers + .insert("weird--header", HeaderValue::from_static("")); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: application/json\r\nWeird--Header: \r\n"; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + + #[test] + fn test_server_response_encode_orig_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\ndate: "; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + + #[test] + fn test_server_response_encode_orig_and_title_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\nDate: "; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + #[test] fn parse_header_htabs() { let mut bytes = BytesMut::from("HTTP/1.1 200 OK\r\nserver: hello\tworld\r\n\r\n"); @@ -1676,6 +2495,16 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect("parse ok") @@ -1684,6 +2513,40 @@ mod tests { assert_eq!(parsed.head.headers["server"], "hello\tworld"); } + #[test] + fn test_write_headers_orig_case_empty_value() { + let mut headers = HeaderMap::new(); + let name = http::header::HeaderName::from_static("x-empty"); + headers.insert(&name, "".parse().expect("parse empty")); + let mut orig_cases = HeaderCaseMap::default(); + orig_cases.insert(name, Bytes::from_static(b"X-EmptY")); + + let mut dst = Vec::new(); + super::write_headers_original_case(&headers, &orig_cases, &mut dst, false); + + assert_eq!( + dst, b"X-EmptY:\r\n", + "there should be no space between the colon and CRLF" + ); + } + + #[test] + fn test_write_headers_orig_case_multiple_entries() { + let mut headers = HeaderMap::new(); + let name = http::header::HeaderName::from_static("x-empty"); + headers.insert(&name, "a".parse().unwrap()); + headers.append(&name, "b".parse().unwrap()); + + let mut orig_cases = HeaderCaseMap::default(); + orig_cases.insert(name.clone(), Bytes::from_static(b"X-Empty")); + orig_cases.append(name, Bytes::from_static(b"X-EMPTY")); + + let mut dst = Vec::new(); + super::write_headers_original_case(&headers, &orig_cases, &mut dst, false); + + assert_eq!(dst, b"X-Empty: a\r\nX-EMPTY: b\r\n"); + } + #[cfg(feature = "nightly")] use test::Bencher; @@ -1719,6 +2582,16 @@ mod tests { ParseContext { cached_headers: &mut headers, req_method: &mut None, + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .unwrap() @@ -1752,6 +2625,16 @@ mod tests { ParseContext { cached_headers: &mut headers, req_method: &mut None, + h1_parser_config: Default::default(), + h1_header_read_timeout: None, + h1_header_read_timeout_fut: &mut None, + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .unwrap() diff --git a/third_party/rust/hyper/src/proto/h2/client.rs b/third_party/rust/hyper/src/proto/h2/client.rs index 41b8e1b0c86d..013f6fb5a87b 100644 --- a/third_party/rust/hyper/src/proto/h2/client.rs +++ b/third_party/rust/hyper/src/proto/h2/client.rs @@ -2,17 +2,23 @@ use std::error::Error as StdError; #[cfg(feature = "runtime")] use std::time::Duration; +use bytes::Bytes; use futures_channel::{mpsc, oneshot}; use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; use futures_util::stream::StreamExt as _; use h2::client::{Builder, SendRequest}; +use http::{Method, StatusCode}; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace, warn}; -use super::{decode_content_length, ping, PipeToSendStream, SendBuf}; +use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; use crate::body::HttpBody; -use crate::common::{task, Exec, Future, Never, Pin, Poll}; +use crate::common::{exec::Exec, task, Future, Never, Pin, Poll}; +use crate::ext::Protocol; use crate::headers; +use crate::proto::h2::UpgradedSendStream; use crate::proto::Dispatched; +use crate::upgrade::Upgraded; use crate::{Body, Request, Response}; type ClientRx = crate::client::dispatch::Receiver, Response>; @@ -31,6 +37,7 @@ type ConnEof = oneshot::Receiver; const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb +const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024; // 1mb #[derive(Clone, Debug)] pub(crate) struct Config { @@ -44,6 +51,8 @@ pub(crate) struct Config { pub(crate) keep_alive_timeout: Duration, #[cfg(feature = "runtime")] pub(crate) keep_alive_while_idle: bool, + pub(crate) max_concurrent_reset_streams: Option, + pub(crate) max_send_buffer_size: usize, } impl Default for Config { @@ -59,10 +68,42 @@ impl Default for Config { keep_alive_timeout: Duration::from_secs(20), #[cfg(feature = "runtime")] keep_alive_while_idle: false, + max_concurrent_reset_streams: None, + max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, } } } +fn new_builder(config: &Config) -> Builder { + let mut builder = Builder::default(); + builder + .initial_window_size(config.initial_stream_window_size) + .initial_connection_window_size(config.initial_conn_window_size) + .max_frame_size(config.max_frame_size) + .max_send_buffer_size(config.max_send_buffer_size) + .enable_push(false); + if let Some(max) = config.max_concurrent_reset_streams { + builder.max_concurrent_reset_streams(max); + } + builder +} + +fn new_ping_config(config: &Config) -> ping::Config { + ping::Config { + bdp_initial_window: if config.adaptive_window { + Some(config.initial_stream_window_size) + } else { + None + }, + #[cfg(feature = "runtime")] + keep_alive_interval: config.keep_alive_interval, + #[cfg(feature = "runtime")] + keep_alive_timeout: config.keep_alive_timeout, + #[cfg(feature = "runtime")] + keep_alive_while_idle: config.keep_alive_while_idle, + } +} + pub(crate) async fn handshake( io: T, req_rx: ClientRx, @@ -74,11 +115,7 @@ where B: HttpBody, B::Data: Send + 'static, { - let (h2_tx, mut conn) = Builder::default() - .initial_window_size(config.initial_stream_window_size) - .initial_connection_window_size(config.initial_conn_window_size) - .max_frame_size(config.max_frame_size) - .enable_push(false) + let (h2_tx, mut conn) = new_builder(config) .handshake::<_, SendBuf>(io) .await .map_err(crate::Error::new_h2)?; @@ -96,21 +133,9 @@ where } }); - let ping_config = ping::Config { - bdp_initial_window: if config.adaptive_window { - Some(config.initial_stream_window_size) - } else { - None - }, - #[cfg(feature = "runtime")] - keep_alive_interval: config.keep_alive_interval, - #[cfg(feature = "runtime")] - keep_alive_timeout: config.keep_alive_timeout, - #[cfg(feature = "runtime")] - keep_alive_while_idle: config.keep_alive_while_idle, - }; + let ping_config = new_ping_config(&config); - let ping = if ping_config.is_enabled() { + let (conn, ping) = if ping_config.is_enabled() { let pp = conn.ping_pong().expect("conn.ping_pong"); let (recorder, mut ponger) = ping::channel(pp, ping_config); @@ -130,16 +155,13 @@ where Pin::new(&mut conn).poll(cx) }); - let conn = conn.map_err(|e| debug!("connection error: {}", e)); - - exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); - recorder + (Either::Left(conn), recorder) } else { - let conn = conn.map_err(|e| debug!("connection error: {}", e)); - - exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); - ping::disabled() + (Either::Right(conn), ping::disabled()) }; + let conn = conn.map_err(|e| debug!("connection error: {}", e)); + + exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); Ok(ClientTask { ping, @@ -183,6 +205,15 @@ where req_rx: ClientRx, } +impl ClientTask +where + B: HttpBody + 'static, +{ + pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { + self.h2_tx.is_extended_connect_protocol_enabled() + } +} + impl Future for ClientTask where B: HttpBody + Send + 'static, @@ -206,7 +237,7 @@ where } }; - match Pin::new(&mut self.req_rx).poll_next(cx) { + match self.req_rx.poll_recv(cx) { Poll::Ready(Some((req, cb))) => { // check that future hasn't been canceled already if cb.is_canceled() { @@ -221,8 +252,29 @@ where headers::set_content_length_if_missing(req.headers_mut(), len); } } + + let is_connect = req.method() == Method::CONNECT; let eos = body.is_end_stream(); - let (fut, body_tx) = match self.h2_tx.send_request(req, eos) { + let ping = self.ping.clone(); + + if is_connect { + if headers::content_length_parse_all(req.headers()) + .map_or(false, |len| len != 0) + { + warn!("h2 connect request with non-zero body not supported"); + cb.send(Err(( + crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + None, + ))); + continue; + } + } + + if let Some(protocol) = req.extensions_mut().remove::() { + req.extensions_mut().insert(protocol.into_inner()); + } + + let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) { Ok(ok) => ok, Err(err) => { debug!("client send request error: {}", err); @@ -231,45 +283,81 @@ where } }; - let ping = self.ping.clone(); - if !eos { - let mut pipe = Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| { - if let Err(e) = res { - debug!("client request body error: {}", e); - } - }); - - // eagerly see if the body pipe is ready and - // can thus skip allocating in the executor - match Pin::new(&mut pipe).poll(cx) { - Poll::Ready(_) => (), - Poll::Pending => { - let conn_drop_ref = self.conn_drop_ref.clone(); - // keep the ping recorder's knowledge of an - // "open stream" alive while this body is - // still sending... - let ping = ping.clone(); - let pipe = pipe.map(move |x| { - drop(conn_drop_ref); - drop(ping); - x + let send_stream = if !is_connect { + if !eos { + let mut pipe = + Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| { + if let Err(e) = res { + debug!("client request body error: {}", e); + } }); - self.executor.execute(pipe); + + // eagerly see if the body pipe is ready and + // can thus skip allocating in the executor + match Pin::new(&mut pipe).poll(cx) { + Poll::Ready(_) => (), + Poll::Pending => { + let conn_drop_ref = self.conn_drop_ref.clone(); + // keep the ping recorder's knowledge of an + // "open stream" alive while this body is + // still sending... + let ping = ping.clone(); + let pipe = pipe.map(move |x| { + drop(conn_drop_ref); + drop(ping); + x + }); + self.executor.execute(pipe); + } } } - } + + None + } else { + Some(body_tx) + }; let fut = fut.map(move |result| match result { Ok(res) => { // record that we got the response headers ping.record_non_data(); - let content_length = decode_content_length(res.headers()); - let res = res.map(|stream| { - let ping = ping.for_stream(&stream); - crate::Body::h2(stream, content_length, ping) - }); - Ok(res) + let content_length = headers::content_length_parse_all(res.headers()); + if let (Some(mut send_stream), StatusCode::OK) = + (send_stream, res.status()) + { + if content_length.map_or(false, |len| len != 0) { + warn!("h2 connect response with non-zero body not supported"); + + send_stream.send_reset(h2::Reason::INTERNAL_ERROR); + return Err(( + crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + None, + )); + } + let (parts, recv_stream) = res.into_parts(); + let mut res = Response::from_parts(parts, Body::empty()); + + let (pending, on_upgrade) = crate::upgrade::pending(); + let io = H2Upgraded { + ping, + send_stream: unsafe { UpgradedSendStream::new(send_stream) }, + recv_stream, + buf: Bytes::new(), + }; + let upgraded = Upgraded::new(io, Bytes::new()); + + pending.fulfill(upgraded); + res.extensions_mut().insert(on_upgrade); + + Ok(res) + } else { + let res = res.map(|stream| { + let ping = ping.for_stream(&stream); + crate::Body::h2(stream, content_length.into(), ping) + }); + Ok(res) + } } Err(err) => { ping.ensure_not_timed_out().map_err(|e| (e, None))?; diff --git a/third_party/rust/hyper/src/proto/h2/mod.rs b/third_party/rust/hyper/src/proto/h2/mod.rs index 565f4e6135b1..5857c919d141 100644 --- a/third_party/rust/hyper/src/proto/h2/mod.rs +++ b/third_party/rust/hyper/src/proto/h2/mod.rs @@ -1,24 +1,30 @@ -use bytes::Buf; -use h2::SendStream; -use http::header::{ - HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER, - TRANSFER_ENCODING, UPGRADE, -}; +use bytes::{Buf, Bytes}; +use h2::{Reason, RecvStream, SendStream}; +use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE}; use http::HeaderMap; -use pin_project::pin_project; +use pin_project_lite::pin_project; use std::error::Error as StdError; +use std::io::{self, Cursor, IoSlice}; +use std::mem; +use std::task::Context; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tracing::{debug, trace, warn}; -use super::DecodedLength; use crate::body::HttpBody; use crate::common::{task, Future, Pin, Poll}; -use crate::headers::content_length_parse_all; +use crate::proto::h2::ping::Recorder; -pub(crate) mod client; pub(crate) mod ping; -pub(crate) mod server; -pub(crate) use self::client::ClientTask; -pub(crate) use self::server::Server; +cfg_client! { + pub(crate) mod client; + pub(crate) use self::client::ClientTask; +} + +cfg_server! { + pub(crate) mod server; + pub(crate) use self::server::Server; +} /// Default initial stream window size defined in HTTP2 spec. pub(crate) const SPEC_WINDOW_SIZE: u32 = 65_535; @@ -32,8 +38,6 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { let connection_headers = [ HeaderName::from_lowercase(b"keep-alive").unwrap(), HeaderName::from_lowercase(b"proxy-connection").unwrap(), - PROXY_AUTHENTICATE, - PROXY_AUTHORIZATION, TRAILER, TRANSFER_ENCODING, UPGRADE, @@ -78,26 +82,18 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { } } -fn decode_content_length(headers: &HeaderMap) -> DecodedLength { - if let Some(len) = content_length_parse_all(headers) { - // If the length is u64::MAX, oh well, just reported chunked. - DecodedLength::checked_new(len).unwrap_or_else(|_| DecodedLength::CHUNKED) - } else { - DecodedLength::CHUNKED - } -} - // body adapters used by both Client and Server -#[pin_project] -struct PipeToSendStream -where - S: HttpBody, -{ - body_tx: SendStream>, - data_done: bool, - #[pin] - stream: S, +pin_project! { + struct PipeToSendStream + where + S: HttpBody, + { + body_tx: SendStream>, + data_done: bool, + #[pin] + stream: S, + } } impl PipeToSendStream @@ -167,7 +163,7 @@ where is_eos, ); - let buf = SendBuf(Some(chunk)); + let buf = SendBuf::Buf(chunk); me.body_tx .send_data(buf, is_eos) .map_err(crate::Error::new_body_write)?; @@ -238,28 +234,238 @@ impl SendStreamExt for SendStream> { fn send_eos_frame(&mut self) -> crate::Result<()> { trace!("send body eos"); - self.send_data(SendBuf(None), true) + self.send_data(SendBuf::None, true) .map_err(crate::Error::new_body_write) } } -struct SendBuf(Option); +#[repr(usize)] +enum SendBuf { + Buf(B), + Cursor(Cursor>), + None, +} impl Buf for SendBuf { #[inline] fn remaining(&self) -> usize { - self.0.as_ref().map(|b| b.remaining()).unwrap_or(0) + match *self { + Self::Buf(ref b) => b.remaining(), + Self::Cursor(ref c) => Buf::remaining(c), + Self::None => 0, + } } #[inline] - fn bytes(&self) -> &[u8] { - self.0.as_ref().map(|b| b.bytes()).unwrap_or(&[]) + fn chunk(&self) -> &[u8] { + match *self { + Self::Buf(ref b) => b.chunk(), + Self::Cursor(ref c) => c.chunk(), + Self::None => &[], + } } #[inline] fn advance(&mut self, cnt: usize) { - if let Some(b) = self.0.as_mut() { - b.advance(cnt) + match *self { + Self::Buf(ref mut b) => b.advance(cnt), + Self::Cursor(ref mut c) => c.advance(cnt), + Self::None => {} + } + } + + fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { + match *self { + Self::Buf(ref b) => b.chunks_vectored(dst), + Self::Cursor(ref c) => c.chunks_vectored(dst), + Self::None => 0, } } } + +struct H2Upgraded +where + B: Buf, +{ + ping: Recorder, + send_stream: UpgradedSendStream, + recv_stream: RecvStream, + buf: Bytes, +} + +impl AsyncRead for H2Upgraded +where + B: Buf, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + read_buf: &mut ReadBuf<'_>, + ) -> Poll> { + if self.buf.is_empty() { + self.buf = loop { + match ready!(self.recv_stream.poll_data(cx)) { + None => return Poll::Ready(Ok(())), + Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => { + continue + } + Some(Ok(buf)) => { + self.ping.record_data(buf.len()); + break buf; + } + Some(Err(e)) => { + return Poll::Ready(match e.reason() { + Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()), + Some(Reason::STREAM_CLOSED) => { + Err(io::Error::new(io::ErrorKind::BrokenPipe, e)) + } + _ => Err(h2_to_io_error(e)), + }) + } + } + }; + } + let cnt = std::cmp::min(self.buf.len(), read_buf.remaining()); + read_buf.put_slice(&self.buf[..cnt]); + self.buf.advance(cnt); + let _ = self.recv_stream.flow_control().release_capacity(cnt); + Poll::Ready(Ok(())) + } +} + +impl AsyncWrite for H2Upgraded +where + B: Buf, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + if buf.is_empty() { + return Poll::Ready(Ok(0)); + } + self.send_stream.reserve_capacity(buf.len()); + + // We ignore all errors returned by `poll_capacity` and `write`, as we + // will get the correct from `poll_reset` anyway. + let cnt = match ready!(self.send_stream.poll_capacity(cx)) { + None => Some(0), + Some(Ok(cnt)) => self + .send_stream + .write(&buf[..cnt], false) + .ok() + .map(|()| cnt), + Some(Err(_)) => None, + }; + + if let Some(cnt) = cnt { + return Poll::Ready(Ok(cnt)); + } + + Poll::Ready(Err(h2_to_io_error( + match ready!(self.send_stream.poll_reset(cx)) { + Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + Ok(reason) => reason.into(), + Err(e) => e, + }, + ))) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + if self.send_stream.write(&[], true).is_ok() { + return Poll::Ready(Ok(())) + } + + Poll::Ready(Err(h2_to_io_error( + match ready!(self.send_stream.poll_reset(cx)) { + Ok(Reason::NO_ERROR) => { + return Poll::Ready(Ok(())) + } + Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + Ok(reason) => reason.into(), + Err(e) => e, + }, + ))) + } +} + +fn h2_to_io_error(e: h2::Error) -> io::Error { + if e.is_io() { + e.into_io().unwrap() + } else { + io::Error::new(io::ErrorKind::Other, e) + } +} + +struct UpgradedSendStream(SendStream>>); + +impl UpgradedSendStream +where + B: Buf, +{ + unsafe fn new(inner: SendStream>) -> Self { + assert_eq!(mem::size_of::(), mem::size_of::>()); + Self(mem::transmute(inner)) + } + + fn reserve_capacity(&mut self, cnt: usize) { + unsafe { self.as_inner_unchecked().reserve_capacity(cnt) } + } + + fn poll_capacity(&mut self, cx: &mut Context<'_>) -> Poll>> { + unsafe { self.as_inner_unchecked().poll_capacity(cx) } + } + + fn poll_reset(&mut self, cx: &mut Context<'_>) -> Poll> { + unsafe { self.as_inner_unchecked().poll_reset(cx) } + } + + fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> { + let send_buf = SendBuf::Cursor(Cursor::new(buf.into())); + unsafe { + self.as_inner_unchecked() + .send_data(send_buf, end_of_stream) + .map_err(h2_to_io_error) + } + } + + unsafe fn as_inner_unchecked(&mut self) -> &mut SendStream> { + &mut *(&mut self.0 as *mut _ as *mut _) + } +} + +#[repr(transparent)] +struct Neutered { + _inner: B, + impossible: Impossible, +} + +enum Impossible {} + +unsafe impl Send for Neutered {} + +impl Buf for Neutered { + fn remaining(&self) -> usize { + match self.impossible {} + } + + fn chunk(&self) -> &[u8] { + match self.impossible {} + } + + fn advance(&mut self, _cnt: usize) { + match self.impossible {} + } +} diff --git a/third_party/rust/hyper/src/proto/h2/ping.rs b/third_party/rust/hyper/src/proto/h2/ping.rs index c4fe2dd15cea..1e8386497ce3 100644 --- a/third_party/rust/hyper/src/proto/h2/ping.rs +++ b/third_party/rust/hyper/src/proto/h2/ping.rs @@ -33,7 +33,8 @@ use std::time::Instant; use h2::{Ping, PingPong}; #[cfg(feature = "runtime")] -use tokio::time::{Delay, Instant}; +use tokio::time::{Instant, Sleep}; +use tracing::{debug, trace}; type WindowSize = u32; @@ -51,16 +52,22 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) bdp: wnd, max_bandwidth: 0.0, rtt: 0.0, + ping_delay: Duration::from_millis(100), + stable_count: 0, }); - let bytes = bdp.as_ref().map(|_| 0); + let (bytes, next_bdp_at) = if bdp.is_some() { + (Some(0), Some(Instant::now())) + } else { + (None, None) + }; #[cfg(feature = "runtime")] let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive { interval, timeout: config.keep_alive_timeout, while_idle: config.keep_alive_while_idle, - timer: tokio::time::delay_for(interval), + timer: Box::pin(tokio::time::sleep(interval)), state: KeepAliveState::Init, }); @@ -75,6 +82,7 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) is_keep_alive_timed_out: false, ping_pong, ping_sent_at: None, + next_bdp_at, })); ( @@ -125,6 +133,9 @@ struct Shared { /// If `Some`, bdp is enabled, and this tracks how many bytes have been /// read during the current sample. bytes: Option, + /// We delay a variable amount of time between BDP pings. This allows us + /// to send less pings as the bandwidth stabilizes. + next_bdp_at: Option, // keep-alive /// If `Some`, keep-alive is enabled, and the Instant is how long ago @@ -143,6 +154,12 @@ struct Bdp { max_bandwidth: f64, /// Round trip time in seconds rtt: f64, + /// Delay the next ping by this amount. + /// + /// This will change depending on how stable the current bandwidth is. + ping_delay: Duration, + /// The count of ping round trips where BDP has stayed the same. + stable_count: u32, } #[cfg(feature = "runtime")] @@ -156,7 +173,7 @@ struct KeepAlive { while_idle: bool, state: KeepAliveState, - timer: Delay, + timer: Pin>, } #[cfg(feature = "runtime")] @@ -207,6 +224,17 @@ impl Recorder { #[cfg(feature = "runtime")] locked.update_last_read_at(); + // are we ready to send another bdp ping? + // if not, we don't need to record bytes either + + if let Some(ref next_bdp_at) = locked.next_bdp_at { + if Instant::now() < *next_bdp_at { + return; + } else { + locked.next_bdp_at = None; + } + } + if let Some(ref mut bytes) = locked.bytes { *bytes += len; } else { @@ -236,6 +264,7 @@ impl Recorder { /// If the incoming stream is already closed, convert self into /// a disabled reporter. + #[cfg(feature = "client")] pub(super) fn for_stream(self, stream: &h2::RecvStream) -> Self { if stream.is_end_stream() { disabled() @@ -264,6 +293,7 @@ impl Recorder { impl Ponger { pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll { + let now = Instant::now(); let mut locked = self.shared.lock().unwrap(); #[cfg(feature = "runtime")] let is_idle = self.is_idle(); @@ -281,13 +311,13 @@ impl Ponger { return Poll::Pending; } - let (bytes, rtt) = match locked.ping_pong.poll_pong(cx) { + match locked.ping_pong.poll_pong(cx) { Poll::Ready(Ok(_pong)) => { - let rtt = locked + let start = locked .ping_sent_at - .expect("pong received implies ping_sent_at") - .elapsed(); + .expect("pong received implies ping_sent_at"); locked.ping_sent_at = None; + let rtt = now - start; trace!("recv pong"); #[cfg(feature = "runtime")] @@ -298,19 +328,20 @@ impl Ponger { } } - if self.bdp.is_some() { + if let Some(ref mut bdp) = self.bdp { let bytes = locked.bytes.expect("bdp enabled implies bytes"); locked.bytes = Some(0); // reset trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt); - (bytes, rtt) - } else { - // no bdp, done! - return Poll::Pending; + + let update = bdp.calculate(bytes, rtt); + locked.next_bdp_at = Some(now + bdp.ping_delay); + if let Some(update) = update { + return Poll::Ready(Ponged::SizeUpdate(update)) + } } } Poll::Ready(Err(e)) => { debug!("pong error: {}", e); - return Poll::Pending; } Poll::Pending => { #[cfg(feature = "runtime")] @@ -323,19 +354,11 @@ impl Ponger { } } } - - return Poll::Pending; } - }; - - drop(locked); - - if let Some(bdp) = self.bdp.as_mut().and_then(|bdp| bdp.calculate(bytes, rtt)) { - Poll::Ready(Ponged::SizeUpdate(bdp)) - } else { - // XXX: this doesn't register a waker...? - Poll::Pending } + + // XXX: this doesn't register a waker...? + Poll::Pending } #[cfg(feature = "runtime")] @@ -385,6 +408,7 @@ impl Bdp { fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option { // No need to do any math if we're at the limit. if self.bdp as usize == BDP_LIMIT { + self.stabilize_delay(); return None; } @@ -404,6 +428,7 @@ impl Bdp { if bw < self.max_bandwidth { // not a faster bandwidth, so don't update + self.stabilize_delay(); return None; } else { self.max_bandwidth = bw; @@ -414,11 +439,26 @@ impl Bdp { if bytes >= self.bdp as usize * 2 / 3 { self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize; trace!("BDP increased to {}", self.bdp); + + self.stable_count = 0; + self.ping_delay /= 2; Some(self.bdp) } else { + self.stabilize_delay(); None } } + + fn stabilize_delay(&mut self) { + if self.ping_delay < Duration::from_secs(10) { + self.stable_count += 1; + + if self.stable_count >= 2 { + self.ping_delay *= 4; + self.stable_count = 0; + } + } + } } fn seconds(dur: Duration) -> f64 { @@ -440,9 +480,18 @@ impl KeepAlive { self.state = KeepAliveState::Scheduled; let interval = shared.last_read_at() + self.interval; - self.timer.reset(interval); + self.timer.as_mut().reset(interval); } - KeepAliveState::Scheduled | KeepAliveState::PingSent => (), + KeepAliveState::PingSent => { + if shared.is_ping_sent() { + return; + } + + self.state = KeepAliveState::Scheduled; + let interval = shared.last_read_at() + self.interval; + self.timer.as_mut().reset(interval); + } + KeepAliveState::Scheduled => (), } } @@ -462,7 +511,7 @@ impl KeepAlive { shared.send_ping(); self.state = KeepAliveState::PingSent; let timeout = Instant::now() + self.timeout; - self.timer.reset(timeout); + self.timer.as_mut().reset(timeout); } KeepAliveState::Init | KeepAliveState::PingSent => (), } diff --git a/third_party/rust/hyper/src/proto/h2/server.rs b/third_party/rust/hyper/src/proto/h2/server.rs index 4762a9209aab..b9037ee3ddb3 100644 --- a/third_party/rust/hyper/src/proto/h2/server.rs +++ b/third_party/rust/hyper/src/proto/h2/server.rs @@ -3,19 +3,26 @@ use std::marker::Unpin; #[cfg(feature = "runtime")] use std::time::Duration; +use bytes::Bytes; use h2::server::{Connection, Handshake, SendResponse}; -use h2::Reason; -use pin_project::{pin_project, project}; +use h2::{Reason, RecvStream}; +use http::{Method, Request}; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace, warn}; -use super::{decode_content_length, ping, PipeToSendStream, SendBuf}; +use super::{ping, PipeToSendStream, SendBuf}; use crate::body::HttpBody; -use crate::common::exec::H2Exec; -use crate::common::{task, Future, Pin, Poll}; +use crate::common::exec::ConnStreamExec; +use crate::common::{date, task, Future, Pin, Poll}; +use crate::ext::Protocol; use crate::headers; +use crate::proto::h2::ping::Recorder; +use crate::proto::h2::{H2Upgraded, UpgradedSendStream}; use crate::proto::Dispatched; use crate::service::HttpService; +use crate::upgrade::{OnUpgrade, Pending, Upgraded}; use crate::{Body, Response}; // Our defaults are chosen for the "majority" case, which usually are not @@ -27,6 +34,7 @@ use crate::{Body, Response}; const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb +const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb #[derive(Clone, Debug)] pub(crate) struct Config { @@ -34,11 +42,13 @@ pub(crate) struct Config { pub(crate) initial_conn_window_size: u32, pub(crate) initial_stream_window_size: u32, pub(crate) max_frame_size: u32, + pub(crate) enable_connect_protocol: bool, pub(crate) max_concurrent_streams: Option, #[cfg(feature = "runtime")] pub(crate) keep_alive_interval: Option, #[cfg(feature = "runtime")] pub(crate) keep_alive_timeout: Duration, + pub(crate) max_send_buffer_size: usize, } impl Default for Config { @@ -48,24 +58,27 @@ impl Default for Config { initial_conn_window_size: DEFAULT_CONN_WINDOW, initial_stream_window_size: DEFAULT_STREAM_WINDOW, max_frame_size: DEFAULT_MAX_FRAME_SIZE, + enable_connect_protocol: false, max_concurrent_streams: None, #[cfg(feature = "runtime")] keep_alive_interval: None, #[cfg(feature = "runtime")] keep_alive_timeout: Duration::from_secs(20), + max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, } } } -#[pin_project] -pub(crate) struct Server -where - S: HttpService, - B: HttpBody, -{ - exec: E, - service: S, - state: State, +pin_project! { + pub(crate) struct Server + where + S: HttpService, + B: HttpBody, + { + exec: E, + service: S, + state: State, + } } enum State @@ -95,17 +108,21 @@ where S: HttpService, S::Error: Into>, B: HttpBody + 'static, - E: H2Exec, + E: ConnStreamExec, { pub(crate) fn new(io: T, service: S, config: &Config, exec: E) -> Server { let mut builder = h2::server::Builder::default(); builder .initial_window_size(config.initial_stream_window_size) .initial_connection_window_size(config.initial_conn_window_size) - .max_frame_size(config.max_frame_size); + .max_frame_size(config.max_frame_size) + .max_send_buffer_size(config.max_send_buffer_size); if let Some(max) = config.max_concurrent_streams { builder.max_concurrent_streams(max); } + if config.enable_connect_protocol { + builder.enable_connect_protocol(); + } let handshake = builder.handshake(io); let bdp = if config.adaptive_window { @@ -136,7 +153,7 @@ where } } - pub fn graceful_shutdown(&mut self) { + pub(crate) fn graceful_shutdown(&mut self) { trace!("graceful_shutdown"); match self.state { State::Handshaking { .. } => { @@ -162,7 +179,7 @@ where S: HttpService, S::Error: Into>, B: HttpBody + 'static, - E: H2Exec, + E: ConnStreamExec, { type Output = crate::Result; @@ -216,7 +233,7 @@ where where S: HttpService, S::Error: Into>, - E: H2Exec, + E: ConnStreamExec, { if self.closing.is_none() { loop { @@ -255,9 +272,9 @@ where // When the service is ready, accepts an incoming request. match ready!(self.conn.poll_accept(cx)) { - Some(Ok((req, respond))) => { + Some(Ok((req, mut respond))) => { trace!("incoming request"); - let content_length = decode_content_length(req.headers()); + let content_length = headers::content_length_parse_all(req.headers()); let ping = self .ping .as_ref() @@ -267,8 +284,40 @@ where // Record the headers received ping.record_non_data(); - let req = req.map(|stream| crate::Body::h2(stream, content_length, ping)); - let fut = H2Stream::new(service.call(req), respond); + let is_connect = req.method() == Method::CONNECT; + let (mut parts, stream) = req.into_parts(); + let (mut req, connect_parts) = if !is_connect { + ( + Request::from_parts( + parts, + crate::Body::h2(stream, content_length.into(), ping), + ), + None, + ) + } else { + if content_length.map_or(false, |len| len != 0) { + warn!("h2 connect request with non-zero body not supported"); + respond.send_reset(h2::Reason::INTERNAL_ERROR); + return Poll::Ready(Ok(())); + } + let (pending, upgrade) = crate::upgrade::pending(); + debug_assert!(parts.extensions.get::().is_none()); + parts.extensions.insert(upgrade); + ( + Request::from_parts(parts, crate::Body::empty()), + Some(ConnectParts { + pending, + ping, + recv_stream: stream, + }), + ) + }; + + if let Some(protocol) = req.extensions_mut().remove::() { + req.extensions_mut().insert(Protocol::from_inner(protocol)); + } + + let fut = H2Stream::new(service.call(req), connect_parts, respond); exec.execute_h2stream(fut); } Some(Err(e)) => { @@ -315,34 +364,54 @@ where } } -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct H2Stream -where - B: HttpBody, -{ - reply: SendResponse>, - #[pin] - state: H2StreamState, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct H2Stream + where + B: HttpBody, + { + reply: SendResponse>, + #[pin] + state: H2StreamState, + } } -#[pin_project] -enum H2StreamState -where - B: HttpBody, -{ - Service(#[pin] F), - Body(#[pin] PipeToSendStream), +pin_project! { + #[project = H2StreamStateProj] + enum H2StreamState + where + B: HttpBody, + { + Service { + #[pin] + fut: F, + connect_parts: Option, + }, + Body { + #[pin] + pipe: PipeToSendStream, + }, + } +} + +struct ConnectParts { + pending: Pending, + ping: Recorder, + recv_stream: RecvStream, } impl H2Stream where B: HttpBody, { - fn new(fut: F, respond: SendResponse>) -> H2Stream { + fn new( + fut: F, + connect_parts: Option, + respond: SendResponse>, + ) -> H2Stream { H2Stream { reply: respond, - state: H2StreamState::Service(fut), + state: H2StreamState::Service { fut, connect_parts }, } } } @@ -364,16 +433,18 @@ impl H2Stream where F: Future, E>>, B: HttpBody, + B::Data: 'static, B::Error: Into>, E: Into>, { - #[project] fn poll2(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { let mut me = self.project(); loop { - #[project] let next = match me.state.as_mut().project() { - H2StreamState::Service(h) => { + H2StreamStateProj::Service { + fut: h, + connect_parts, + } => { let res = match h.poll(cx) { Poll::Ready(Ok(r)) => r, Poll::Pending => { @@ -402,22 +473,48 @@ where // set Date header if it isn't already set... res.headers_mut() .entry(::http::header::DATE) - .or_insert_with(crate::proto::h1::date::update_and_header_value); + .or_insert_with(date::update_and_header_value); - // automatically set Content-Length from body... - if let Some(len) = body.size_hint().exact() { - headers::set_content_length_if_missing(res.headers_mut(), len); + if let Some(connect_parts) = connect_parts.take() { + if res.status().is_success() { + if headers::content_length_parse_all(res.headers()) + .map_or(false, |len| len != 0) + { + warn!("h2 successful response to CONNECT request with body not supported"); + me.reply.send_reset(h2::Reason::INTERNAL_ERROR); + return Poll::Ready(Err(crate::Error::new_user_header())); + } + let send_stream = reply!(me, res, false); + connect_parts.pending.fulfill(Upgraded::new( + H2Upgraded { + ping: connect_parts.ping, + recv_stream: connect_parts.recv_stream, + send_stream: unsafe { UpgradedSendStream::new(send_stream) }, + buf: Bytes::new(), + }, + Bytes::new(), + )); + return Poll::Ready(Ok(())); + } } + if !body.is_end_stream() { + // automatically set Content-Length from body... + if let Some(len) = body.size_hint().exact() { + headers::set_content_length_if_missing(res.headers_mut(), len); + } + let body_tx = reply!(me, res, false); - H2StreamState::Body(PipeToSendStream::new(body, body_tx)) + H2StreamState::Body { + pipe: PipeToSendStream::new(body, body_tx), + } } else { reply!(me, res, true); return Poll::Ready(Ok(())); } } - H2StreamState::Body(pipe) => { + H2StreamStateProj::Body { pipe } => { return pipe.poll(cx); } }; @@ -430,6 +527,7 @@ impl Future for H2Stream where F: Future, E>>, B: HttpBody, + B::Data: 'static, B::Error: Into>, E: Into>, { diff --git a/third_party/rust/hyper/src/proto/mod.rs b/third_party/rust/hyper/src/proto/mod.rs index 7268e2126582..f938bf532baa 100644 --- a/third_party/rust/hyper/src/proto/mod.rs +++ b/third_party/rust/hyper/src/proto/mod.rs @@ -1,34 +1,49 @@ //! Pieces pertaining to the HTTP message protocol. -use http::{HeaderMap, Method, StatusCode, Uri, Version}; -pub(crate) use self::body_length::DecodedLength; -pub(crate) use self::h1::{dispatch, Conn, ServerTransaction}; +cfg_feature! { + #![feature = "http1"] -pub(crate) mod h1; + pub(crate) mod h1; + + pub(crate) use self::h1::Conn; + + #[cfg(feature = "client")] + pub(crate) use self::h1::dispatch; + #[cfg(feature = "server")] + pub(crate) use self::h1::ServerTransaction; +} + +#[cfg(feature = "http2")] pub(crate) mod h2; /// An Incoming Message head. Includes request/status line, and headers. -#[derive(Clone, Debug, Default, PartialEq)] -pub struct MessageHead { +#[derive(Debug, Default)] +pub(crate) struct MessageHead { /// HTTP version of the message. - pub version: Version, + pub(crate) version: http::Version, /// Subject (request line or status line) of Incoming message. - pub subject: S, + pub(crate) subject: S, /// Headers of the Incoming message. - pub headers: HeaderMap, + pub(crate) headers: http::HeaderMap, + /// Extensions. + extensions: http::Extensions, } /// An incoming request message. -pub type RequestHead = MessageHead; +#[cfg(feature = "http1")] +pub(crate) type RequestHead = MessageHead; #[derive(Debug, Default, PartialEq)] -pub struct RequestLine(pub Method, pub Uri); +#[cfg(feature = "http1")] +pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri); /// An incoming response message. -pub type ResponseHead = MessageHead; +#[cfg(all(feature = "http1", feature = "client"))] +pub(crate) type ResponseHead = MessageHead; #[derive(Debug)] -pub enum BodyLength { +#[cfg(feature = "http1")] +pub(crate) enum BodyLength { /// Content-Length Known(u64), /// Transfer-Encoding: chunked (if h1) @@ -40,106 +55,17 @@ pub(crate) enum Dispatched { /// Dispatcher completely shutdown connection. Shutdown, /// Dispatcher has pending upgrade, and so did not shutdown. + #[cfg(feature = "http1")] Upgrade(crate::upgrade::Pending), } -/// A separate module to encapsulate the invariants of the DecodedLength type. -mod body_length { - use std::fmt; - - #[derive(Clone, Copy, PartialEq, Eq)] - pub(crate) struct DecodedLength(u64); - - const MAX_LEN: u64 = std::u64::MAX - 2; - - impl DecodedLength { - pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(::std::u64::MAX); - pub(crate) const CHUNKED: DecodedLength = DecodedLength(::std::u64::MAX - 1); - pub(crate) const ZERO: DecodedLength = DecodedLength(0); - - #[cfg(test)] - pub(crate) fn new(len: u64) -> Self { - debug_assert!(len <= MAX_LEN); - DecodedLength(len) - } - - /// Takes the length as a content-length without other checks. - /// - /// Should only be called if previously confirmed this isn't - /// CLOSE_DELIMITED or CHUNKED. - #[inline] - pub(crate) fn danger_len(self) -> u64 { - debug_assert!(self.0 < Self::CHUNKED.0); - self.0 - } - - /// Converts to an Option representing a Known or Unknown length. - pub(crate) fn into_opt(self) -> Option { - match self { - DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None, - DecodedLength(known) => Some(known), - } - } - - /// Checks the `u64` is within the maximum allowed for content-length. - pub(crate) fn checked_new(len: u64) -> Result { - if len <= MAX_LEN { - Ok(DecodedLength(len)) - } else { - warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN); - Err(crate::error::Parse::TooLarge) - } - } - - pub(crate) fn sub_if(&mut self, amt: u64) { - match *self { - DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (), - DecodedLength(ref mut known) => { - *known -= amt; - } - } - } - } - - impl fmt::Debug for DecodedLength { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - DecodedLength::CLOSE_DELIMITED => f.write_str("CLOSE_DELIMITED"), - DecodedLength::CHUNKED => f.write_str("CHUNKED"), - DecodedLength(n) => f.debug_tuple("DecodedLength").field(&n).finish(), - } - } - } - - impl fmt::Display for DecodedLength { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - DecodedLength::CLOSE_DELIMITED => f.write_str("close-delimited"), - DecodedLength::CHUNKED => f.write_str("chunked encoding"), - DecodedLength::ZERO => f.write_str("empty"), - DecodedLength(n) => write!(f, "content-length ({} bytes)", n), - } - } - } - - #[cfg(test)] - mod tests { - use super::*; - - #[test] - fn sub_if_known() { - let mut len = DecodedLength::new(30); - len.sub_if(20); - - assert_eq!(len.0, 10); - } - - #[test] - fn sub_if_chunked() { - let mut len = DecodedLength::CHUNKED; - len.sub_if(20); - - assert_eq!(len, DecodedLength::CHUNKED); - } +impl MessageHead { + fn into_response(self, body: B) -> http::Response { + let mut res = http::Response::new(body); + *res.status_mut() = self.subject; + *res.headers_mut() = self.headers; + *res.version_mut() = self.version; + *res.extensions_mut() = self.extensions; + res } } diff --git a/third_party/rust/hyper/src/rt.rs b/third_party/rust/hyper/src/rt.rs index 4e60139a8722..2614b591127c 100644 --- a/third_party/rust/hyper/src/rt.rs +++ b/third_party/rust/hyper/src/rt.rs @@ -5,4 +5,8 @@ //! If the `runtime` feature is disabled, the types in this module can be used //! to plug in other runtimes. -pub use crate::common::Executor; +/// An executor of futures. +pub trait Executor { + /// Place the future into the executor to be run. + fn execute(&self, fut: Fut); +} diff --git a/third_party/rust/hyper/src/server/accept.rs b/third_party/rust/hyper/src/server/accept.rs index e56e3acf8419..4b7a1487dd64 100644 --- a/third_party/rust/hyper/src/server/accept.rs +++ b/third_party/rust/hyper/src/server/accept.rs @@ -8,6 +8,8 @@ #[cfg(feature = "stream")] use futures_core::Stream; +#[cfg(feature = "stream")] +use pin_project_lite::pin_project; use crate::common::{ task::{self, Poll}, @@ -53,6 +55,9 @@ where { struct PollFn(F); + // The closure `F` is never pinned + impl Unpin for PollFn {} + impl Accept for PollFn where F: FnMut(&mut task::Context<'_>) -> Poll>>, @@ -63,7 +68,7 @@ where self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll>> { - unsafe { (self.get_unchecked_mut().0)(cx) } + (self.get_mut().0)(cx) } } @@ -81,7 +86,12 @@ pub fn from_stream(stream: S) -> impl Accept where S: Stream>, { - struct FromStream(S); + pin_project! { + struct FromStream { + #[pin] + stream: S, + } + } impl Accept for FromStream where @@ -93,9 +103,9 @@ where self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll>> { - unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().0).poll_next(cx) } + self.project().stream.poll_next(cx) } } - FromStream(stream) + FromStream { stream } } diff --git a/third_party/rust/hyper/src/server/conn.rs b/third_party/rust/hyper/src/server/conn.rs index 76fac003bc7e..de765b3a1500 100644 --- a/third_party/rust/hyper/src/server/conn.rs +++ b/third_party/rust/hyper/src/server/conn.rs @@ -7,33 +7,82 @@ //! //! If you don't have need to manage connections yourself, consider using the //! higher-level [Server](super) API. +//! +//! ## Example +//! A simple example that uses the `Http` struct to talk HTTP over a Tokio TCP stream +//! ```no_run +//! # #[cfg(all(feature = "http1", feature = "runtime"))] +//! # mod rt { +//! use http::{Request, Response, StatusCode}; +//! use hyper::{server::conn::Http, service::service_fn, Body}; +//! use std::{net::SocketAddr, convert::Infallible}; +//! use tokio::net::TcpListener; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box> { +//! let addr: SocketAddr = ([127, 0, 0, 1], 8080).into(); +//! +//! let mut tcp_listener = TcpListener::bind(addr).await?; +//! loop { +//! let (tcp_stream, _) = tcp_listener.accept().await?; +//! tokio::task::spawn(async move { +//! if let Err(http_err) = Http::new() +//! .http1_only(true) +//! .http1_keep_alive(true) +//! .serve_connection(tcp_stream, service_fn(hello)) +//! .await { +//! eprintln!("Error while serving HTTP connection: {}", http_err); +//! } +//! }); +//! } +//! } +//! +//! async fn hello(_req: Request) -> Result, Infallible> { +//! Ok(Response::new(Body::from("Hello World!"))) +//! } +//! # } +//! ``` -use std::error::Error as StdError; -use std::fmt; -use std::mem; +#[cfg(all( + any(feature = "http1", feature = "http2"), + not(all(feature = "http1", feature = "http2")) +))] +use std::marker::PhantomData; #[cfg(feature = "tcp")] use std::net::SocketAddr; -#[cfg(feature = "runtime")] use std::time::Duration; -use bytes::Bytes; -use pin_project::{pin_project, project}; -use tokio::io::{AsyncRead, AsyncWrite}; - -use super::Accept; -use crate::body::{Body, HttpBody}; -use crate::common::exec::{Exec, H2Exec, NewSvcExec}; +#[cfg(feature = "http2")] use crate::common::io::Rewind; -use crate::common::{task, Future, Pin, Poll, Unpin}; +#[cfg(all(feature = "http1", feature = "http2"))] use crate::error::{Kind, Parse}; -use crate::proto; -use crate::service::{HttpService, MakeServiceRef}; +#[cfg(feature = "http1")] use crate::upgrade::Upgraded; -use self::spawn_all::NewSvcTask; -pub(super) use self::spawn_all::NoopWatcher; -pub(super) use self::spawn_all::Watcher; -pub(super) use self::upgrades::UpgradeableConnection; +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + use std::error::Error as StdError; + use std::fmt; + + use bytes::Bytes; + use pin_project_lite::pin_project; + use tokio::io::{AsyncRead, AsyncWrite}; + use tracing::trace; + + use super::accept::Accept; + use crate::body::{Body, HttpBody}; + use crate::common::{task, Future, Pin, Poll, Unpin}; + #[cfg(not(all(feature = "http1", feature = "http2")))] + use crate::common::Never; + use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; + use crate::proto; + use crate::service::{HttpService, MakeServiceRef}; + use self::spawn_all::NewSvcTask; + + pub(super) use self::spawn_all::{NoopWatcher, Watcher}; + pub(super) use self::upgrades::UpgradeableConnection; +} #[cfg(feature = "tcp")] pub use super::tcp::{AddrIncoming, AddrStream}; @@ -45,11 +94,18 @@ pub use super::tcp::{AddrIncoming, AddrStream}; /// If you don't have need to manage connections yourself, consider using the /// higher-level [Server](super) API. #[derive(Clone, Debug)] +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Http { exec: E, h1_half_close: bool, h1_keep_alive: bool, - h1_writev: bool, + h1_title_case_headers: bool, + h1_preserve_header_case: bool, + #[cfg(all(feature = "http1", feature = "runtime"))] + h1_header_read_timeout: Option, + h1_writev: Option, + #[cfg(feature = "http2")] h2_builder: proto::h2::server::Config, mode: ConnectionMode, max_buf_size: Option, @@ -57,91 +113,130 @@ pub struct Http { } /// The internal mode of HTTP protocol which indicates the behavior when a parse error occurs. +#[cfg(any(feature = "http1", feature = "http2"))] #[derive(Clone, Debug, PartialEq)] enum ConnectionMode { /// Always use HTTP/1 and do not upgrade when a parse error occurs. + #[cfg(feature = "http1")] H1Only, /// Always use HTTP/2. + #[cfg(feature = "http2")] H2Only, /// Use HTTP/1 and try to upgrade to h2 when a parse error occurs. + #[cfg(all(feature = "http1", feature = "http2"))] Fallback, } -/// A stream mapping incoming IOs to new services. -/// -/// Yields `Connecting`s that are futures that should be put on a reactor. -#[must_use = "streams do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub(super) struct Serve { - #[pin] - incoming: I, - make_service: S, - protocol: Http, -} - -/// A future building a new `Service` to a `Connection`. -/// -/// Wraps the future returned from `MakeService` into one that returns -/// a `Connection`. -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub struct Connecting { - #[pin] - future: F, - io: Option, - protocol: Http, -} - -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub(super) struct SpawnAll { - // TODO: re-add `pub(super)` once rustdoc can handle this. - // - // See https://github.com/rust-lang/rust/issues/64705 - #[pin] - pub serve: Serve, -} - -/// A future binding a connection with a Service. -/// -/// Polling this future will drive HTTP forward. -#[must_use = "futures do nothing unless polled"] -#[pin_project] -pub struct Connection -where - S: HttpService, -{ - pub(super) conn: Option>, - fallback: Fallback, -} - -#[pin_project] -pub(super) enum ProtoServer -where - S: HttpService, - B: HttpBody, -{ - H1( +#[cfg(any(feature = "http1", feature = "http2"))] +pin_project! { + /// A stream mapping incoming IOs to new services. + /// + /// Yields `Connecting`s that are futures that should be put on a reactor. + #[must_use = "streams do nothing unless polled"] + #[derive(Debug)] + pub(super) struct Serve { #[pin] - proto::h1::Dispatcher< - proto::h1::dispatch::Server, - B, - T, - proto::ServerTransaction, - >, - ), - H2(#[pin] proto::h2::Server, S, B, E>), + incoming: I, + make_service: S, + protocol: Http, + } } +#[cfg(any(feature = "http1", feature = "http2"))] +pin_project! { + /// A future building a new `Service` to a `Connection`. + /// + /// Wraps the future returned from `MakeService` into one that returns + /// a `Connection`. + #[must_use = "futures do nothing unless polled"] + #[derive(Debug)] + #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] + pub struct Connecting { + #[pin] + future: F, + io: Option, + protocol: Http, + } +} + +#[cfg(any(feature = "http1", feature = "http2"))] +pin_project! { + #[must_use = "futures do nothing unless polled"] + #[derive(Debug)] + pub(super) struct SpawnAll { + // TODO: re-add `pub(super)` once rustdoc can handle this. + // + // See https://github.com/rust-lang/rust/issues/64705 + #[pin] + pub(super) serve: Serve, + } +} + +#[cfg(any(feature = "http1", feature = "http2"))] +pin_project! { + /// A future binding a connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] + pub struct Connection + where + S: HttpService, + { + pub(super) conn: Option>, + fallback: Fallback, + } +} + +#[cfg(feature = "http1")] +type Http1Dispatcher = + proto::h1::Dispatcher, B, T, proto::ServerTransaction>; + +#[cfg(all(not(feature = "http1"), feature = "http2"))] +type Http1Dispatcher = (Never, PhantomData<(T, Box>, Box>)>); + +#[cfg(feature = "http2")] +type Http2Server = proto::h2::Server, S, B, E>; + +#[cfg(all(not(feature = "http2"), feature = "http1"))] +type Http2Server = ( + Never, + PhantomData<(T, Box>, Box>, Box>)>, +); + +#[cfg(any(feature = "http1", feature = "http2"))] +pin_project! { + #[project = ProtoServerProj] + pub(super) enum ProtoServer + where + S: HttpService, + B: HttpBody, + { + H1 { + #[pin] + h1: Http1Dispatcher, + }, + H2 { + #[pin] + h2: Http2Server, + }, + } +} + +#[cfg(all(feature = "http1", feature = "http2"))] #[derive(Clone, Debug)] enum Fallback { ToHttp2(proto::h2::server::Config, E), Http1Only, } +#[cfg(all( + any(feature = "http1", feature = "http2"), + not(all(feature = "http1", feature = "http2")) +))] +type Fallback = PhantomData; + +#[cfg(all(feature = "http1", feature = "http2"))] impl Fallback { fn to_h2(&self) -> bool { match *self { @@ -151,6 +246,7 @@ impl Fallback { } } +#[cfg(all(feature = "http1", feature = "http2"))] impl Unpin for Fallback {} /// Deconstructed parts of a `Connection`. @@ -158,6 +254,8 @@ impl Unpin for Fallback {} /// This allows taking apart a `Connection` at a later time, in order to /// reclaim the IO object, and additional related pieces. #[derive(Debug)] +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Parts { /// The original IO object used in the handshake. pub io: T, @@ -177,6 +275,7 @@ pub struct Parts { // ===== impl Http ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Creates a new instance of the HTTP protocol, ready to spawn a server or /// start accepting connections. @@ -185,24 +284,35 @@ impl Http { exec: Exec::Default, h1_half_close: false, h1_keep_alive: true, - h1_writev: true, + h1_title_case_headers: false, + h1_preserve_header_case: false, + #[cfg(all(feature = "http1", feature = "runtime"))] + h1_header_read_timeout: None, + h1_writev: None, + #[cfg(feature = "http2")] h2_builder: Default::default(), - mode: ConnectionMode::Fallback, + mode: ConnectionMode::default(), max_buf_size: None, pipeline_flush: false, } } } +#[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Sets whether HTTP1 is required. /// /// Default is false + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_only(&mut self, val: bool) -> &mut Self { if val { self.mode = ConnectionMode::H1Only; } else { - self.mode = ConnectionMode::Fallback; + #[cfg(feature = "http2")] + { + self.mode = ConnectionMode::Fallback; + } } self } @@ -215,6 +325,8 @@ impl Http { /// detects an EOF in the middle of a request. /// /// Default is `false`. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_half_close(&mut self, val: bool) -> &mut Self { self.h1_half_close = val; self @@ -223,16 +335,55 @@ impl Http { /// Enables or disables HTTP/1 keep-alive. /// /// Default is true. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_keep_alive(&mut self, val: bool) -> &mut Self { self.h1_keep_alive = val; self } - // renamed due different semantics of http2 keep alive - #[doc(hidden)] - #[deprecated(note = "renamed to `http1_keep_alive`")] - pub fn keep_alive(&mut self, val: bool) -> &mut Self { - self.http1_keep_alive(val) + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Self { + self.h1_title_case_headers = enabled; + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Request`. It will also look for and use + /// such an extension in any provided `Response`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Self { + self.h1_preserve_header_case = enabled; + self + } + + /// Set a timeout for reading client request headers. If a client does not + /// transmit the entire header within this time, the connection is closed. + /// + /// Default is None. + #[cfg(all(feature = "http1", feature = "runtime"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))] + pub fn http1_header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { + self.h1_header_read_timeout = Some(read_timeout); + self } /// Set whether HTTP/1 connections should try to use vectored writes, @@ -242,21 +393,32 @@ impl Http { /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// - /// Default is `true`. + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use #[inline] + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_writev(&mut self, val: bool) -> &mut Self { - self.h1_writev = val; + self.h1_writev = Some(val); self } /// Sets whether HTTP2 is required. /// /// Default is false + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_only(&mut self, val: bool) -> &mut Self { if val { self.mode = ConnectionMode::H2Only; } else { - self.mode = ConnectionMode::Fallback; + #[cfg(feature = "http1")] + { + self.mode = ConnectionMode::Fallback; + } } self } @@ -269,6 +431,8 @@ impl Http { /// If not set, hyper will use a default. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; @@ -282,6 +446,8 @@ impl Http { /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_connection_window_size( &mut self, sz: impl Into>, @@ -298,6 +464,8 @@ impl Http { /// Enabling this will override the limits set in /// `http2_initial_stream_window_size` and /// `http2_initial_connection_window_size`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { use proto::h2::SPEC_WINDOW_SIZE; @@ -314,6 +482,8 @@ impl Http { /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.max_frame_size = sz; @@ -327,6 +497,8 @@ impl Http { /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { self.h2_builder.max_concurrent_streams = max.into(); self @@ -343,6 +515,8 @@ impl Http { /// /// Requires the `runtime` cargo feature to be enabled. #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_interval( &mut self, interval: impl Into>, @@ -362,11 +536,37 @@ impl Http { /// /// Requires the `runtime` cargo feature to be enabled. #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.h2_builder.keep_alive_timeout = timeout; self } + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently ~400KB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + #[cfg(feature = "http2")] + pub fn http2_enable_connect_protocol(&mut self) -> &mut Self { + self.h2_builder.enable_connect_protocol = true; + self + } + /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. @@ -374,6 +574,8 @@ impl Http { /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn max_buf_size(&mut self, max: usize) -> &mut Self { assert!( max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, @@ -401,7 +603,12 @@ impl Http { exec, h1_half_close: self.h1_half_close, h1_keep_alive: self.h1_keep_alive, + h1_title_case_headers: self.h1_title_case_headers, + h1_preserve_header_case: self.h1_preserve_header_case, + #[cfg(all(feature = "http1", feature = "runtime"))] + h1_header_read_timeout: self.h1_header_read_timeout, h1_writev: self.h1_writev, + #[cfg(feature = "http2")] h2_builder: self.h2_builder, mode: self.mode, max_buf_size: self.max_buf_size, @@ -444,10 +651,11 @@ impl Http { Bd: HttpBody + 'static, Bd::Error: Into>, I: AsyncRead + AsyncWrite + Unpin, - E: H2Exec, + E: ConnStreamExec, { - let proto = match self.mode { - ConnectionMode::H1Only | ConnectionMode::Fallback => { + #[cfg(feature = "http1")] + macro_rules! h1 { + () => {{ let mut conn = proto::Conn::new(io); if !self.h1_keep_alive { conn.disable_keep_alive(); @@ -455,31 +663,60 @@ impl Http { if self.h1_half_close { conn.set_allow_half_close(); } - if !self.h1_writev { - conn.set_write_strategy_flatten(); + if self.h1_title_case_headers { + conn.set_title_case_headers(); + } + if self.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + #[cfg(all(feature = "http1", feature = "runtime"))] + if let Some(header_read_timeout) = self.h1_header_read_timeout { + conn.set_http1_header_read_timeout(header_read_timeout); + } + if let Some(writev) = self.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } } conn.set_flush_pipeline(self.pipeline_flush); if let Some(max) = self.max_buf_size { conn.set_max_buf_size(max); } let sd = proto::h1::dispatch::Server::new(service); - ProtoServer::H1(proto::h1::Dispatcher::new(sd, conn)) - } + ProtoServer::H1 { + h1: proto::h1::Dispatcher::new(sd, conn), + } + }}; + } + + let proto = match self.mode { + #[cfg(feature = "http1")] + #[cfg(not(feature = "http2"))] + ConnectionMode::H1Only => h1!(), + #[cfg(feature = "http2")] + #[cfg(feature = "http1")] + ConnectionMode::H1Only | ConnectionMode::Fallback => h1!(), + #[cfg(feature = "http2")] ConnectionMode::H2Only => { let rewind_io = Rewind::new(io); let h2 = proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone()); - ProtoServer::H2(h2) + ProtoServer::H2 { h2 } } }; Connection { conn: Some(proto), + #[cfg(all(feature = "http1", feature = "http2"))] fallback: if self.mode == ConnectionMode::Fallback { Fallback::ToHttp2(self.h2_builder.clone(), self.exec.clone()) } else { Fallback::Http1Only }, + #[cfg(not(all(feature = "http1", feature = "http2")))] + fallback: PhantomData, } } @@ -491,7 +728,7 @@ impl Http { S: MakeServiceRef, S::Error: Into>, Bd: HttpBody, - E: H2Exec<>::Future, Bd>, + E: ConnStreamExec<>::Future, Bd>, { Serve { incoming, @@ -503,6 +740,7 @@ impl Http { // ===== impl Connection ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Connection where S: HttpService, @@ -510,7 +748,7 @@ where I: AsyncRead + AsyncWrite + Unpin, B: HttpBody + 'static, B::Error: Into>, - E: H2Exec, + E: ConnStreamExec, { /// Start a graceful shutdown process for this connection. /// @@ -522,15 +760,22 @@ where /// This should only be called while the `Connection` future is still /// pending. If called after `Connection::poll` has resolved, this does /// nothing. - pub fn graceful_shutdown(self: Pin<&mut Self>) { - match self.project().conn { - Some(ProtoServer::H1(ref mut h1)) => { + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + match self.conn { + #[cfg(feature = "http1")] + Some(ProtoServer::H1 { ref mut h1, .. }) => { h1.disable_keep_alive(); } - Some(ProtoServer::H2(ref mut h2)) => { + #[cfg(feature = "http2")] + Some(ProtoServer::H2 { ref mut h2 }) => { h2.graceful_shutdown(); } None => (), + + #[cfg(not(feature = "http1"))] + Some(ProtoServer::H1 { ref mut h1, .. }) => match h1.0 {}, + #[cfg(not(feature = "http2"))] + Some(ProtoServer::H2 { ref mut h2 }) => match h2.0 {}, } } @@ -553,7 +798,8 @@ where /// This method will return a `None` if this connection is using an h2 protocol. pub fn try_into_parts(self) -> Option> { match self.conn.unwrap() { - ProtoServer::H1(h1) => { + #[cfg(feature = "http1")] + ProtoServer::H1 { h1, .. } => { let (io, read_buf, dispatch) = h1.into_inner(); Some(Parts { io, @@ -562,7 +808,10 @@ where _inner: (), }) } - ProtoServer::H2(_h2) => None, + ProtoServer::H2 { .. } => None, + + #[cfg(not(feature = "http1"))] + ProtoServer::H1 { h1, .. } => match h1.0 {}, } } @@ -573,10 +822,6 @@ where /// upgrade. Once the upgrade is completed, the connection would be "done", /// but it is not desired to actually shutdown the IO object. Instead you /// would take it back using `into_parts`. - /// - /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) - /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) - /// to work with this function; or use the `without_shutdown` wrapper. pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> where S: Unpin, @@ -584,25 +829,40 @@ where B: Unpin, { loop { - let polled = match *self.conn.as_mut().unwrap() { - ProtoServer::H1(ref mut h1) => h1.poll_without_shutdown(cx), - ProtoServer::H2(ref mut h2) => return Pin::new(h2).poll(cx).map_ok(|_| ()), - }; - match ready!(polled) { - Ok(()) => return Poll::Ready(Ok(())), - Err(e) => match *e.kind() { - Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => { - self.upgrade_h2(); - continue; + match *self.conn.as_mut().unwrap() { + #[cfg(feature = "http1")] + ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) { + Ok(()) => return Poll::Ready(Ok(())), + Err(e) => { + #[cfg(feature = "http2")] + match *e.kind() { + Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => { + self.upgrade_h2(); + continue; + } + _ => (), + } + + return Poll::Ready(Err(e)); } - _ => return Poll::Ready(Err(e)), }, - } + #[cfg(feature = "http2")] + ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()), + + #[cfg(not(feature = "http1"))] + ProtoServer::H1 { ref mut h1, .. } => match h1.0 {}, + #[cfg(not(feature = "http2"))] + ProtoServer::H2 { ref mut h2 } => match h2.0 {}, + }; } } /// Prevent shutdown of the underlying IO object at the end of service the request, /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. + /// + /// # Error + /// + /// This errors if the underlying connection protocol is not HTTP/1. pub fn without_shutdown(self) -> impl Future>> where S: Unpin, @@ -612,17 +872,18 @@ where let mut conn = Some(self); futures_util::future::poll_fn(move |cx| { ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; - Poll::Ready(Ok(conn.take().unwrap().into_parts())) + Poll::Ready(conn.take().unwrap().try_into_parts().ok_or_else(crate::Error::new_without_shutdown_not_h1)) }) } + #[cfg(all(feature = "http1", feature = "http2"))] fn upgrade_h2(&mut self) { trace!("Trying to upgrade connection to h2"); let conn = self.conn.take(); let (io, read_buf, dispatch) = match conn.unwrap() { - ProtoServer::H1(h1) => h1.into_inner(), - ProtoServer::H2(_h2) => { + ProtoServer::H1 { h1, .. } => h1.into_inner(), + ProtoServer::H2 { .. } => { panic!("h2 cannot into_inner"); } }; @@ -635,7 +896,7 @@ where let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone()); debug_assert!(self.conn.is_none()); - self.conn = Some(ProtoServer::H2(h2)); + self.conn = Some(ProtoServer::H2 { h2 }); } /// Enable this connection to support higher-level HTTP upgrades. @@ -649,6 +910,7 @@ where } } +#[cfg(any(feature = "http1", feature = "http2"))] impl Future for Connection where S: HttpService, @@ -656,7 +918,7 @@ where I: AsyncRead + AsyncWrite + Unpin + 'static, B: HttpBody + 'static, B::Error: Into>, - E: H2Exec, + E: ConnStreamExec, { type Output = crate::Result<()>; @@ -664,27 +926,38 @@ where loop { match ready!(Pin::new(self.conn.as_mut().unwrap()).poll(cx)) { Ok(done) => { - if let proto::Dispatched::Upgrade(pending) = done { - // With no `Send` bound on `I`, we can't try to do - // upgrades here. In case a user was trying to use - // `Body::on_upgrade` with this API, send a special - // error letting them know about that. - pending.manual(); - } + match done { + proto::Dispatched::Shutdown => {} + #[cfg(feature = "http1")] + proto::Dispatched::Upgrade(pending) => { + // With no `Send` bound on `I`, we can't try to do + // upgrades here. In case a user was trying to use + // `Body::on_upgrade` with this API, send a special + // error letting them know about that. + pending.manual(); + } + }; return Poll::Ready(Ok(())); } - Err(e) => match *e.kind() { - Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => { - self.upgrade_h2(); - continue; + Err(e) => { + #[cfg(feature = "http1")] + #[cfg(feature = "http2")] + match *e.kind() { + Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => { + self.upgrade_h2(); + continue; + } + _ => (), } - _ => return Poll::Ready(Err(e)), - }, + + return Poll::Ready(Err(e)); + } } } } } +#[cfg(any(feature = "http1", feature = "http2"))] impl fmt::Debug for Connection where S: HttpService, @@ -693,12 +966,34 @@ where f.debug_struct("Connection").finish() } } + +// ===== impl ConnectionMode ===== + +#[cfg(any(feature = "http1", feature = "http2"))] +impl Default for ConnectionMode { + #[cfg(all(feature = "http1", feature = "http2"))] + fn default() -> ConnectionMode { + ConnectionMode::Fallback + } + + #[cfg(all(feature = "http1", not(feature = "http2")))] + fn default() -> ConnectionMode { + ConnectionMode::H1Only + } + + #[cfg(all(not(feature = "http1"), feature = "http2"))] + fn default() -> ConnectionMode { + ConnectionMode::H2Only + } +} + // ===== impl Serve ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Serve { /// Get a reference to the incoming stream. #[inline] - pub fn incoming_ref(&self) -> &I { + pub(super) fn incoming_ref(&self) -> &I { &self.incoming } @@ -716,6 +1011,7 @@ impl Serve { } } +#[cfg(any(feature = "http1", feature = "http2"))] impl Serve where I: Accept, @@ -723,7 +1019,7 @@ where IE: Into>, S: MakeServiceRef, B: HttpBody, - E: H2Exec<>::Future, B>, + E: ConnStreamExec<>::Future, B>, { fn poll_next_( self: Pin<&mut Self>, @@ -754,6 +1050,7 @@ where // ===== impl Connecting ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Future for Connecting where I: AsyncRead + AsyncWrite + Unpin, @@ -761,33 +1058,35 @@ where S: HttpService, B: HttpBody + 'static, B::Error: Into>, - E: H2Exec, + E: ConnStreamExec, { type Output = Result, FE>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let me = self.project(); + let mut me = self.project(); let service = ready!(me.future.poll(cx))?; - let io = me.io.take().expect("polled after complete"); + let io = Option::take(&mut me.io).expect("polled after complete"); Poll::Ready(Ok(me.protocol.serve_connection(io, service))) } } // ===== impl SpawnAll ===== -#[cfg(feature = "tcp")] +#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] impl SpawnAll { pub(super) fn local_addr(&self) -> SocketAddr { self.serve.incoming.local_addr() } } +#[cfg(any(feature = "http1", feature = "http2"))] impl SpawnAll { pub(super) fn incoming_ref(&self) -> &I { self.serve.incoming_ref() } } +#[cfg(any(feature = "http1", feature = "http2"))] impl SpawnAll where I: Accept, @@ -795,7 +1094,7 @@ where IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, S: MakeServiceRef, B: HttpBody, - E: H2Exec<>::Future, B>, + E: ConnStreamExec<>::Future, B>, { pub(super) fn poll_watch( self: Pin<&mut Self>, @@ -825,6 +1124,7 @@ where // ===== impl ProtoServer ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Future for ProtoServer where T: AsyncRead + AsyncWrite + Unpin, @@ -832,30 +1132,37 @@ where S::Error: Into>, B: HttpBody + 'static, B::Error: Into>, - E: H2Exec, + E: ConnStreamExec, { type Output = crate::Result; - #[project] fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - #[project] match self.project() { - ProtoServer::H1(s) => s.poll(cx), - ProtoServer::H2(s) => s.poll(cx), + #[cfg(feature = "http1")] + ProtoServerProj::H1 { h1, .. } => h1.poll(cx), + #[cfg(feature = "http2")] + ProtoServerProj::H2 { h2 } => h2.poll(cx), + + #[cfg(not(feature = "http1"))] + ProtoServerProj::H1 { h1, .. } => match h1.0 {}, + #[cfg(not(feature = "http2"))] + ProtoServerProj::H2 { h2 } => match h2.0 {}, } } } +#[cfg(any(feature = "http1", feature = "http2"))] pub(crate) mod spawn_all { use std::error::Error as StdError; use tokio::io::{AsyncRead, AsyncWrite}; + use tracing::debug; use super::{Connecting, UpgradeableConnection}; use crate::body::{Body, HttpBody}; - use crate::common::exec::H2Exec; + use crate::common::exec::ConnStreamExec; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::HttpService; - use pin_project::{pin_project, project}; + use pin_project_lite::pin_project; // Used by `SpawnAll` to optionally watch a `Connection` future. // @@ -879,7 +1186,7 @@ pub(crate) mod spawn_all { where I: AsyncRead + AsyncWrite + Unpin + Send + 'static, S: HttpService, - E: H2Exec, + E: ConnStreamExec, S::ResBody: 'static, ::Error: Into>, { @@ -900,23 +1207,36 @@ pub(crate) mod spawn_all { // Users cannot import this type, nor the associated `NewSvcExec`. Instead, // a blanket implementation for `Executor` is sufficient. - #[pin_project] - #[allow(missing_debug_implementations)] - pub struct NewSvcTask, E, W: Watcher> { - #[pin] - state: State, + pin_project! { + #[allow(missing_debug_implementations)] + pub struct NewSvcTask, E, W: Watcher> { + #[pin] + state: State, + } } - #[pin_project] - pub enum State, E, W: Watcher> { - Connecting(#[pin] Connecting, W), - Connected(#[pin] W::Future), + pin_project! { + #[project = StateProj] + pub(super) enum State, E, W: Watcher> { + Connecting { + #[pin] + connecting: Connecting, + watcher: W, + }, + Connected { + #[pin] + future: W::Future, + }, + } } impl, E, W: Watcher> NewSvcTask { pub(super) fn new(connecting: Connecting, watcher: W) -> Self { NewSvcTask { - state: State::Connecting(connecting, watcher), + state: State::Connecting { + connecting, + watcher, + }, } } } @@ -929,12 +1249,11 @@ pub(crate) mod spawn_all { S: HttpService, B: HttpBody + 'static, B::Error: Into>, - E: H2Exec, + E: ConnStreamExec, W: Watcher, { type Output = (); - #[project] fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { // If it weren't for needing to name this type so the `Send` bounds // could be projected to the `Serve` executor, this could just be @@ -943,9 +1262,11 @@ pub(crate) mod spawn_all { let mut me = self.project(); loop { let next = { - #[project] match me.state.as_mut().project() { - State::Connecting(connecting, watcher) => { + StateProj::Connecting { + connecting, + watcher, + } => { let res = ready!(connecting.poll(cx)); let conn = match res { Ok(conn) => conn, @@ -955,10 +1276,10 @@ pub(crate) mod spawn_all { return Poll::Ready(()); } }; - let connected = watcher.watch(conn.with_upgrades()); - State::Connected(connected) + let future = watcher.watch(conn.with_upgrades()); + State::Connected { future } } - State::Connected(future) => { + StateProj::Connected { future } => { return future.poll(cx).map(|res| { if let Err(err) = res { debug!("connection error: {}", err); @@ -974,6 +1295,7 @@ pub(crate) mod spawn_all { } } +#[cfg(any(feature = "http1", feature = "http2"))] mod upgrades { use super::*; @@ -997,7 +1319,7 @@ mod upgrades { I: AsyncRead + AsyncWrite + Unpin, B: HttpBody + 'static, B::Error: Into>, - E: H2Exec, + E: ConnStreamExec, { /// Start a graceful shutdown process for this connection. /// @@ -1015,7 +1337,7 @@ mod upgrades { I: AsyncRead + AsyncWrite + Unpin + Send + 'static, B: HttpBody + 'static, B::Error: Into>, - E: super::H2Exec, + E: ConnStreamExec, { type Output = crate::Result<()>; @@ -1023,23 +1345,33 @@ mod upgrades { loop { match ready!(Pin::new(self.inner.conn.as_mut().unwrap()).poll(cx)) { Ok(proto::Dispatched::Shutdown) => return Poll::Ready(Ok(())), + #[cfg(feature = "http1")] Ok(proto::Dispatched::Upgrade(pending)) => { - let h1 = match mem::replace(&mut self.inner.conn, None) { - Some(ProtoServer::H1(h1)) => h1, - _ => unreachable!("Upgrade expects h1"), + match self.inner.conn.take() { + Some(ProtoServer::H1 { h1, .. }) => { + let (io, buf, _) = h1.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + return Poll::Ready(Ok(())); + } + _ => { + drop(pending); + unreachable!("Upgrade expects h1") + } }; - - let (io, buf, _) = h1.into_inner(); - pending.fulfill(Upgraded::new(io, buf)); - return Poll::Ready(Ok(())); } - Err(e) => match *e.kind() { - Kind::Parse(Parse::VersionH2) if self.inner.fallback.to_h2() => { - self.inner.upgrade_h2(); - continue; + Err(e) => { + #[cfg(feature = "http1")] + #[cfg(feature = "http2")] + match *e.kind() { + Kind::Parse(Parse::VersionH2) if self.inner.fallback.to_h2() => { + self.inner.upgrade_h2(); + continue; + } + _ => (), } - _ => return Poll::Ready(Err(e)), - }, + + return Poll::Ready(Err(e)); + } } } } diff --git a/third_party/rust/hyper/src/server/mod.rs b/third_party/rust/hyper/src/server/mod.rs index a57eaebbf423..a97944f518b6 100644 --- a/third_party/rust/hyper/src/server/mod.rs +++ b/third_party/rust/hyper/src/server/mod.rs @@ -16,7 +16,7 @@ //! //! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default. //! -//! ## Example +//! ## Examples //! //! ```no_run //! use std::convert::Infallible; @@ -50,434 +50,115 @@ //! # #[cfg(not(feature = "runtime"))] //! # fn main() {} //! ``` +//! +//! If you don't need the connection and your service implements `Clone` you can use +//! [`tower::make::Shared`] instead of `make_service_fn` which is a bit simpler: +//! +//! ```no_run +//! # use std::convert::Infallible; +//! # use std::net::SocketAddr; +//! # use hyper::{Body, Request, Response, Server}; +//! # use hyper::service::{make_service_fn, service_fn}; +//! # use tower::make::Shared; +//! # async fn handle(_req: Request) -> Result, Infallible> { +//! # Ok(Response::new(Body::from("Hello World"))) +//! # } +//! # #[cfg(feature = "runtime")] +//! #[tokio::main] +//! async fn main() { +//! // Construct our SocketAddr to listen on... +//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); +//! +//! // Shared is a MakeService that produces services by cloning an inner service... +//! let make_service = Shared::new(service_fn(handle)); +//! +//! // Then bind and serve... +//! let server = Server::bind(&addr).serve(make_service); +//! +//! // And run forever... +//! if let Err(e) = server.await { +//! eprintln!("server error: {}", e); +//! } +//! } +//! # #[cfg(not(feature = "runtime"))] +//! # fn main() {} +//! ``` +//! +//! Passing data to your request handler can be done like so: +//! +//! ```no_run +//! use std::convert::Infallible; +//! use std::net::SocketAddr; +//! use hyper::{Body, Request, Response, Server}; +//! use hyper::service::{make_service_fn, service_fn}; +//! use hyper::server::conn::AddrStream; +//! +//! #[derive(Clone)] +//! struct AppContext { +//! // Whatever data your application needs can go here +//! } +//! +//! async fn handle( +//! context: AppContext, +//! addr: SocketAddr, +//! req: Request +//! ) -> Result, Infallible> { +//! Ok(Response::new(Body::from("Hello World"))) +//! } +//! +//! # #[cfg(feature = "runtime")] +//! #[tokio::main] +//! async fn main() { +//! let context = AppContext { +//! // ... +//! }; +//! +//! // A `MakeService` that produces a `Service` to handle each connection. +//! let make_service = make_service_fn(move |conn: &AddrStream| { +//! // We have to clone the context to share it with each invocation of +//! // `make_service`. If your data doesn't implement `Clone` consider using +//! // an `std::sync::Arc`. +//! let context = context.clone(); +//! +//! // You can grab the address of the incoming connection like so. +//! let addr = conn.remote_addr(); +//! +//! // Create a `Service` for responding to the request. +//! let service = service_fn(move |req| { +//! handle(context.clone(), addr, req) +//! }); +//! +//! // Return the service to hyper. +//! async move { Ok::<_, Infallible>(service) } +//! }); +//! +//! // Run the server like above... +//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); +//! +//! let server = Server::bind(&addr).serve(make_service); +//! +//! if let Err(e) = server.await { +//! eprintln!("server error: {}", e); +//! } +//! } +//! # #[cfg(not(feature = "runtime"))] +//! # fn main() {} +//! ``` +//! +//! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html pub mod accept; pub mod conn; -mod shutdown; +mod server; #[cfg(feature = "tcp")] mod tcp; -use std::error::Error as StdError; -use std::fmt; -#[cfg(feature = "tcp")] -use std::net::{SocketAddr, TcpListener as StdTcpListener}; +pub use self::server::Server; -#[cfg(feature = "tcp")] -use std::time::Duration; +cfg_feature! { + #![any(feature = "http1", feature = "http2")] -use pin_project::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; + pub use self::server::Builder; -use self::accept::Accept; -use crate::body::{Body, HttpBody}; -use crate::common::exec::{Exec, H2Exec, NewSvcExec}; -use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::service::{HttpService, MakeServiceRef}; -// Renamed `Http` as `Http_` for now so that people upgrading don't see an -// error that `hyper::server::Http` is private... -use self::conn::{Http as Http_, NoopWatcher, SpawnAll}; -use self::shutdown::{Graceful, GracefulWatcher}; -#[cfg(feature = "tcp")] -use self::tcp::AddrIncoming; - -/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. -/// -/// `Server` is a `Future` mapping a bound listener with a set of service -/// handlers. It is built using the [`Builder`](Builder), and the future -/// completes when the server has been shutdown. It should be run by an -/// `Executor`. -#[pin_project] -pub struct Server { - #[pin] - spawn_all: SpawnAll, -} - -/// A builder for a [`Server`](Server). -#[derive(Debug)] -pub struct Builder { - incoming: I, - protocol: Http_, -} - -// ===== impl Server ===== - -impl Server { - /// Starts a [`Builder`](Builder) with the provided incoming stream. - pub fn builder(incoming: I) -> Builder { - Builder { - incoming, - protocol: Http_::new(), - } - } -} - -#[cfg(feature = "tcp")] -impl Server { - /// Binds to the provided address, and returns a [`Builder`](Builder). - /// - /// # Panics - /// - /// This method will panic if binding to the address fails. For a method - /// to bind to an address and return a `Result`, see `Server::try_bind`. - pub fn bind(addr: &SocketAddr) -> Builder { - let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { - panic!("error binding to {}: {}", addr, e); - }); - Server::builder(incoming) - } - - /// Tries to bind to the provided address, and returns a [`Builder`](Builder). - pub fn try_bind(addr: &SocketAddr) -> crate::Result> { - AddrIncoming::new(addr).map(Server::builder) - } - - /// Create a new instance from a `std::net::TcpListener` instance. - pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { - AddrIncoming::from_std(listener).map(Server::builder) - } -} - -#[cfg(feature = "tcp")] -impl Server { - /// Returns the local address that this server is bound to. - pub fn local_addr(&self) -> SocketAddr { - self.spawn_all.local_addr() - } -} - -impl Server -where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + Send + Sync + 'static, - B::Error: Into>, - E: H2Exec<>::Future, B>, - E: NewSvcExec, -{ - /// Prepares a server to handle graceful shutdown when the provided future - /// completes. - /// - /// # Example - /// - /// ``` - /// # fn main() {} - /// # #[cfg(feature = "tcp")] - /// # async fn run() { - /// # use hyper::{Body, Response, Server, Error}; - /// # use hyper::service::{make_service_fn, service_fn}; - /// # let make_service = make_service_fn(|_| async { - /// # Ok::<_, Error>(service_fn(|_req| async { - /// # Ok::<_, Error>(Response::new(Body::from("Hello World"))) - /// # })) - /// # }); - /// // Make a server from the previous examples... - /// let server = Server::bind(&([127, 0, 0, 1], 3000).into()) - /// .serve(make_service); - /// - /// // Prepare some signal for when the server should start shutting down... - /// let (tx, rx) = tokio::sync::oneshot::channel::<()>(); - /// let graceful = server - /// .with_graceful_shutdown(async { - /// rx.await.ok(); - /// }); - /// - /// // Await the `server` receiving the signal... - /// if let Err(e) = graceful.await { - /// eprintln!("server error: {}", e); - /// } - /// - /// // And later, trigger the signal by calling `tx.send(())`. - /// let _ = tx.send(()); - /// # } - /// ``` - pub fn with_graceful_shutdown(self, signal: F) -> Graceful - where - F: Future, - { - Graceful::new(self.spawn_all, signal) - } -} - -impl Future for Server -where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - E: H2Exec<>::Future, B>, - E: NewSvcExec, -{ - type Output = crate::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.project().spawn_all.poll_watch(cx, &NoopWatcher) - } -} - -impl fmt::Debug for Server { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Server") - .field("listener", &self.spawn_all.incoming_ref()) - .finish() - } -} - -// ===== impl Builder ===== - -impl Builder { - /// Start a new builder, wrapping an incoming stream and low-level options. - /// - /// For a more convenient constructor, see [`Server::bind`](Server::bind). - pub fn new(incoming: I, protocol: Http_) -> Self { - Builder { incoming, protocol } - } - - /// Sets whether to use keep-alive for HTTP/1 connections. - /// - /// Default is `true`. - pub fn http1_keepalive(mut self, val: bool) -> Self { - self.protocol.http1_keep_alive(val); - self - } - - /// Set whether HTTP/1 connections should support half-closures. - /// - /// Clients can chose to shutdown their write-side while waiting - /// for the server to respond. Setting this to `true` will - /// prevent closing the connection immediately if `read` - /// detects an EOF in the middle of a request. - /// - /// Default is `false`. - pub fn http1_half_close(mut self, val: bool) -> Self { - self.protocol.http1_half_close(val); - self - } - - /// Set the maximum buffer size. - /// - /// Default is ~ 400kb. - pub fn http1_max_buf_size(mut self, val: usize) -> Self { - self.protocol.max_buf_size(val); - self - } - - // Sets whether to bunch up HTTP/1 writes until the read buffer is empty. - // - // This isn't really desirable in most cases, only really being useful in - // silly pipeline benchmarks. - #[doc(hidden)] - pub fn http1_pipeline_flush(mut self, val: bool) -> Self { - self.protocol.pipeline_flush(val); - self - } - - /// Set whether HTTP/1 connections should try to use vectored writes, - /// or always flatten into a single buffer. - /// - /// # Note - /// - /// Setting this to `false` may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Default is `true`. - pub fn http1_writev(mut self, val: bool) -> Self { - self.protocol.http1_writev(val); - self - } - - /// Sets whether HTTP/1 is required. - /// - /// Default is `false`. - pub fn http1_only(mut self, val: bool) -> Self { - self.protocol.http1_only(val); - self - } - - /// Sets whether HTTP/2 is required. - /// - /// Default is `false`. - pub fn http2_only(mut self, val: bool) -> Self { - self.protocol.http2_only(val); - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - pub fn http2_initial_stream_window_size(mut self, sz: impl Into>) -> Self { - self.protocol.http2_initial_stream_window_size(sz.into()); - self - } - - /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - pub fn http2_initial_connection_window_size(mut self, sz: impl Into>) -> Self { - self.protocol - .http2_initial_connection_window_size(sz.into()); - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. - pub fn http2_adaptive_window(mut self, enabled: bool) -> Self { - self.protocol.http2_adaptive_window(enabled); - self - } - - /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 - /// connections. - /// - /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS - pub fn http2_max_concurrent_streams(mut self, max: impl Into>) -> Self { - self.protocol.http2_max_concurrent_streams(max.into()); - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - pub fn http2_keep_alive_interval(mut self, interval: impl Into>) -> Self { - self.protocol.http2_keep_alive_interval(interval); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - pub fn http2_keep_alive_timeout(mut self, timeout: Duration) -> Self { - self.protocol.http2_keep_alive_timeout(timeout); - self - } - - /// Sets the `Executor` to deal with connection tasks. - /// - /// Default is `tokio::spawn`. - pub fn executor(self, executor: E2) -> Builder { - Builder { - incoming: self.incoming, - protocol: self.protocol.with_executor(executor), - } - } - - /// Consume this `Builder`, creating a [`Server`](Server). - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "tcp")] - /// # async fn run() { - /// use hyper::{Body, Error, Response, Server}; - /// use hyper::service::{make_service_fn, service_fn}; - /// - /// // Construct our SocketAddr to listen on... - /// let addr = ([127, 0, 0, 1], 3000).into(); - /// - /// // And a MakeService to handle each connection... - /// let make_svc = make_service_fn(|_| async { - /// Ok::<_, Error>(service_fn(|_req| async { - /// Ok::<_, Error>(Response::new(Body::from("Hello World"))) - /// })) - /// }); - /// - /// // Then bind and serve... - /// let server = Server::bind(&addr) - /// .serve(make_svc); - /// - /// // Run forever-ish... - /// if let Err(err) = server.await { - /// eprintln!("server error: {}", err); - /// } - /// # } - /// ``` - pub fn serve(self, new_service: S) -> Server - where - I: Accept, - I::Error: Into>, - I::Conn: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - E: NewSvcExec, - E: H2Exec<>::Future, B>, - { - let serve = self.protocol.serve(self.incoming, new_service); - let spawn_all = serve.spawn_all(); - Server { spawn_all } - } -} - -#[cfg(feature = "tcp")] -impl Builder { - /// Set whether TCP keepalive messages are enabled on accepted connections. - /// - /// If `None` is specified, keepalive is disabled, otherwise the duration - /// specified will be the time to remain idle before sending TCP keepalive - /// probes. - pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { - self.incoming.set_keepalive(keepalive); - self - } - - /// Set the value of `TCP_NODELAY` option for accepted connections. - pub fn tcp_nodelay(mut self, enabled: bool) -> Self { - self.incoming.set_nodelay(enabled); - self - } - - /// Set whether to sleep on accept errors. - /// - /// A possible scenario is that the process has hit the max open files - /// allowed, and so trying to accept a new connection will fail with - /// EMFILE. In some cases, it's preferable to just wait for some time, if - /// the application will likely close some files (or connections), and try - /// to accept the connection again. If this option is true, the error will - /// be logged at the error level, since it is still a big deal, and then - /// the listener will sleep for 1 second. - /// - /// In other cases, hitting the max open files should be treat similarly - /// to being out-of-memory, and simply error (and shutdown). Setting this - /// option to false will allow that. - /// - /// For more details see [`AddrIncoming::set_sleep_on_errors`] - pub fn tcp_sleep_on_accept_errors(mut self, val: bool) -> Self { - self.incoming.set_sleep_on_errors(val); - self - } + mod shutdown; } diff --git a/third_party/rust/hyper/src/server/server.rs b/third_party/rust/hyper/src/server/server.rs new file mode 100644 index 000000000000..c48582c7fde7 --- /dev/null +++ b/third_party/rust/hyper/src/server/server.rs @@ -0,0 +1,560 @@ +use std::fmt; +#[cfg(feature = "tcp")] +use std::net::{SocketAddr, TcpListener as StdTcpListener}; +#[cfg(any(feature = "tcp", feature = "http1"))] +use std::time::Duration; + +#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] +use super::tcp::AddrIncoming; +use crate::common::exec::Exec; + +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + use std::error::Error as StdError; + + use pin_project_lite::pin_project; + use tokio::io::{AsyncRead, AsyncWrite}; + + use super::accept::Accept; + use crate::body::{Body, HttpBody}; + use crate::common::{task, Future, Pin, Poll, Unpin}; + use crate::common::exec::{ConnStreamExec, NewSvcExec}; + // Renamed `Http` as `Http_` for now so that people upgrading don't see an + // error that `hyper::server::Http` is private... + use super::conn::{Http as Http_, NoopWatcher, SpawnAll}; + use super::shutdown::{Graceful, GracefulWatcher}; + use crate::service::{HttpService, MakeServiceRef}; +} + +#[cfg(any(feature = "http1", feature = "http2"))] +pin_project! { + /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. + /// + /// `Server` is a `Future` mapping a bound listener with a set of service + /// handlers. It is built using the [`Builder`](Builder), and the future + /// completes when the server has been shutdown. It should be run by an + /// `Executor`. + pub struct Server { + #[pin] + spawn_all: SpawnAll, + } +} + +/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. +/// +/// Needs at least one of the `http1` and `http2` features to be activated to actually be useful. +#[cfg(not(any(feature = "http1", feature = "http2")))] +pub struct Server { + _marker: std::marker::PhantomData<(I, S, E)>, +} + +/// A builder for a [`Server`](Server). +#[derive(Debug)] +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +pub struct Builder { + incoming: I, + protocol: Http_, +} + +// ===== impl Server ===== + +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +impl Server { + /// Starts a [`Builder`](Builder) with the provided incoming stream. + pub fn builder(incoming: I) -> Builder { + Builder { + incoming, + protocol: Http_::new(), + } + } +} + +cfg_feature! { + #![all(feature = "tcp", any(feature = "http1", feature = "http2"))] + + impl Server { + /// Binds to the provided address, and returns a [`Builder`](Builder). + /// + /// # Panics + /// + /// This method will panic if binding to the address fails. For a method + /// to bind to an address and return a `Result`, see `Server::try_bind`. + pub fn bind(addr: &SocketAddr) -> Builder { + let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { + panic!("error binding to {}: {}", addr, e); + }); + Server::builder(incoming) + } + + /// Tries to bind to the provided address, and returns a [`Builder`](Builder). + pub fn try_bind(addr: &SocketAddr) -> crate::Result> { + AddrIncoming::new(addr).map(Server::builder) + } + + /// Create a new instance from a `std::net::TcpListener` instance. + pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { + AddrIncoming::from_std(listener).map(Server::builder) + } + } +} + +cfg_feature! { + #![all(feature = "tcp", any(feature = "http1", feature = "http2"))] + + impl Server { + /// Returns the local address that this server is bound to. + pub fn local_addr(&self) -> SocketAddr { + self.spawn_all.local_addr() + } + } +} + +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +impl Server +where + I: Accept, + IE: Into>, + IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, + S: MakeServiceRef, + S::Error: Into>, + B: HttpBody + 'static, + B::Error: Into>, + E: ConnStreamExec<>::Future, B>, + E: NewSvcExec, +{ + /// Prepares a server to handle graceful shutdown when the provided future + /// completes. + /// + /// # Example + /// + /// ``` + /// # fn main() {} + /// # #[cfg(feature = "tcp")] + /// # async fn run() { + /// # use hyper::{Body, Response, Server, Error}; + /// # use hyper::service::{make_service_fn, service_fn}; + /// # let make_service = make_service_fn(|_| async { + /// # Ok::<_, Error>(service_fn(|_req| async { + /// # Ok::<_, Error>(Response::new(Body::from("Hello World"))) + /// # })) + /// # }); + /// // Make a server from the previous examples... + /// let server = Server::bind(&([127, 0, 0, 1], 3000).into()) + /// .serve(make_service); + /// + /// // Prepare some signal for when the server should start shutting down... + /// let (tx, rx) = tokio::sync::oneshot::channel::<()>(); + /// let graceful = server + /// .with_graceful_shutdown(async { + /// rx.await.ok(); + /// }); + /// + /// // Await the `server` receiving the signal... + /// if let Err(e) = graceful.await { + /// eprintln!("server error: {}", e); + /// } + /// + /// // And later, trigger the signal by calling `tx.send(())`. + /// let _ = tx.send(()); + /// # } + /// ``` + pub fn with_graceful_shutdown(self, signal: F) -> Graceful + where + F: Future, + { + Graceful::new(self.spawn_all, signal) + } +} + +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +impl Future for Server +where + I: Accept, + IE: Into>, + IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, + S: MakeServiceRef, + S::Error: Into>, + B: HttpBody + 'static, + B::Error: Into>, + E: ConnStreamExec<>::Future, B>, + E: NewSvcExec, +{ + type Output = crate::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + self.project().spawn_all.poll_watch(cx, &NoopWatcher) + } +} + +impl fmt::Debug for Server { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut st = f.debug_struct("Server"); + #[cfg(any(feature = "http1", feature = "http2"))] + st.field("listener", &self.spawn_all.incoming_ref()); + st.finish() + } +} + +// ===== impl Builder ===== + +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +impl Builder { + /// Start a new builder, wrapping an incoming stream and low-level options. + /// + /// For a more convenient constructor, see [`Server::bind`](Server::bind). + pub fn new(incoming: I, protocol: Http_) -> Self { + Builder { incoming, protocol } + } + + /// Sets whether to use keep-alive for HTTP/1 connections. + /// + /// Default is `true`. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_keepalive(mut self, val: bool) -> Self { + self.protocol.http1_keep_alive(val); + self + } + + /// Set whether HTTP/1 connections should support half-closures. + /// + /// Clients can chose to shutdown their write-side while waiting + /// for the server to respond. Setting this to `true` will + /// prevent closing the connection immediately if `read` + /// detects an EOF in the middle of a request. + /// + /// Default is `false`. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_half_close(mut self, val: bool) -> Self { + self.protocol.http1_half_close(val); + self + } + + /// Set the maximum buffer size. + /// + /// Default is ~ 400kb. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_buf_size(mut self, val: usize) -> Self { + self.protocol.max_buf_size(val); + self + } + + // Sets whether to bunch up HTTP/1 writes until the read buffer is empty. + // + // This isn't really desirable in most cases, only really being useful in + // silly pipeline benchmarks. + #[doc(hidden)] + #[cfg(feature = "http1")] + pub fn http1_pipeline_flush(mut self, val: bool) -> Self { + self.protocol.pipeline_flush(val); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + #[cfg(feature = "http1")] + pub fn http1_writev(mut self, enabled: bool) -> Self { + self.protocol.http1_writev(enabled); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_title_case_headers(mut self, val: bool) -> Self { + self.protocol.http1_title_case_headers(val); + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Request`. It will also look for and use + /// such an extension in any provided `Response`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_preserve_header_case(mut self, val: bool) -> Self { + self.protocol.http1_preserve_header_case(val); + self + } + + /// Set a timeout for reading client request headers. If a client does not + /// transmit the entire header within this time, the connection is closed. + /// + /// Default is None. + #[cfg(all(feature = "http1", feature = "runtime"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))] + pub fn http1_header_read_timeout(mut self, read_timeout: Duration) -> Self { + self.protocol.http1_header_read_timeout(read_timeout); + self + } + + /// Sets whether HTTP/1 is required. + /// + /// Default is `false`. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_only(mut self, val: bool) -> Self { + self.protocol.http1_only(val); + self + } + + /// Sets whether HTTP/2 is required. + /// + /// Default is `false`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_only(mut self, val: bool) -> Self { + self.protocol.http2_only(val); + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_stream_window_size(mut self, sz: impl Into>) -> Self { + self.protocol.http2_initial_stream_window_size(sz.into()); + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_connection_window_size(mut self, sz: impl Into>) -> Self { + self.protocol + .http2_initial_connection_window_size(sz.into()); + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `http2_initial_stream_window_size` and + /// `http2_initial_connection_window_size`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_adaptive_window(mut self, enabled: bool) -> Self { + self.protocol.http2_adaptive_window(enabled); + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_frame_size(mut self, sz: impl Into>) -> Self { + self.protocol.http2_max_frame_size(sz); + self + } + + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 + /// connections. + /// + /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_concurrent_streams(mut self, max: impl Into>) -> Self { + self.protocol.http2_max_concurrent_streams(max.into()); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(all(feature = "runtime", feature = "http2"))] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_interval(mut self, interval: impl Into>) -> Self { + self.protocol.http2_keep_alive_interval(interval); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(all(feature = "runtime", feature = "http2"))] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_timeout(mut self, timeout: Duration) -> Self { + self.protocol.http2_keep_alive_timeout(timeout); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently ~400KB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_send_buf_size(mut self, max: usize) -> Self { + self.protocol.http2_max_send_buf_size(max); + self + } + + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + #[cfg(feature = "http2")] + pub fn http2_enable_connect_protocol(mut self) -> Self { + self.protocol.http2_enable_connect_protocol(); + self + } + + /// Sets the `Executor` to deal with connection tasks. + /// + /// Default is `tokio::spawn`. + pub fn executor(self, executor: E2) -> Builder { + Builder { + incoming: self.incoming, + protocol: self.protocol.with_executor(executor), + } + } + + /// Consume this `Builder`, creating a [`Server`](Server). + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "tcp")] + /// # async fn run() { + /// use hyper::{Body, Error, Response, Server}; + /// use hyper::service::{make_service_fn, service_fn}; + /// + /// // Construct our SocketAddr to listen on... + /// let addr = ([127, 0, 0, 1], 3000).into(); + /// + /// // And a MakeService to handle each connection... + /// let make_svc = make_service_fn(|_| async { + /// Ok::<_, Error>(service_fn(|_req| async { + /// Ok::<_, Error>(Response::new(Body::from("Hello World"))) + /// })) + /// }); + /// + /// // Then bind and serve... + /// let server = Server::bind(&addr) + /// .serve(make_svc); + /// + /// // Run forever-ish... + /// if let Err(err) = server.await { + /// eprintln!("server error: {}", err); + /// } + /// # } + /// ``` + pub fn serve(self, new_service: S) -> Server + where + I: Accept, + I::Error: Into>, + I::Conn: AsyncRead + AsyncWrite + Unpin + Send + 'static, + S: MakeServiceRef, + S::Error: Into>, + B: HttpBody + 'static, + B::Error: Into>, + E: NewSvcExec, + E: ConnStreamExec<>::Future, B>, + { + let serve = self.protocol.serve(self.incoming, new_service); + let spawn_all = serve.spawn_all(); + Server { spawn_all } + } +} + +#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] +impl Builder { + /// Set whether TCP keepalive messages are enabled on accepted connections. + /// + /// If `None` is specified, keepalive is disabled, otherwise the duration + /// specified will be the time to remain idle before sending TCP keepalive + /// probes. + pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { + self.incoming.set_keepalive(keepalive); + self + } + + /// Set the value of `TCP_NODELAY` option for accepted connections. + pub fn tcp_nodelay(mut self, enabled: bool) -> Self { + self.incoming.set_nodelay(enabled); + self + } + + /// Set whether to sleep on accept errors. + /// + /// A possible scenario is that the process has hit the max open files + /// allowed, and so trying to accept a new connection will fail with + /// EMFILE. In some cases, it's preferable to just wait for some time, if + /// the application will likely close some files (or connections), and try + /// to accept the connection again. If this option is true, the error will + /// be logged at the error level, since it is still a big deal, and then + /// the listener will sleep for 1 second. + /// + /// In other cases, hitting the max open files should be treat similarly + /// to being out-of-memory, and simply error (and shutdown). Setting this + /// option to false will allow that. + /// + /// For more details see [`AddrIncoming::set_sleep_on_errors`] + pub fn tcp_sleep_on_accept_errors(mut self, val: bool) -> Self { + self.incoming.set_sleep_on_errors(val); + self + } +} diff --git a/third_party/rust/hyper/src/server/shutdown.rs b/third_party/rust/hyper/src/server/shutdown.rs index 6a8d32cc0b0d..2277a4096489 100644 --- a/third_party/rust/hyper/src/server/shutdown.rs +++ b/third_party/rust/hyper/src/server/shutdown.rs @@ -1,33 +1,37 @@ use std::error::Error as StdError; -use pin_project::{pin_project, project}; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::debug; +use super::accept::Accept; use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; -use super::Accept; use crate::body::{Body, HttpBody}; use crate::common::drain::{self, Draining, Signal, Watch, Watching}; -use crate::common::exec::{H2Exec, NewSvcExec}; +use crate::common::exec::{ConnStreamExec, NewSvcExec}; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::{HttpService, MakeServiceRef}; -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct Graceful { - #[pin] - state: State, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct Graceful { + #[pin] + state: State, + } } -#[pin_project] -pub(super) enum State { - Running { - drain: Option<(Signal, Watch)>, - #[pin] - spawn_all: SpawnAll, - #[pin] - signal: F, - }, - Draining(Draining), +pin_project! { + #[project = StateProj] + pub(super) enum State { + Running { + drain: Option<(Signal, Watch)>, + #[pin] + spawn_all: SpawnAll, + #[pin] + signal: F, + }, + Draining { draining: Draining }, + } } impl Graceful { @@ -50,22 +54,20 @@ where IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, S: MakeServiceRef, S::Error: Into>, - B: HttpBody + Send + Sync + 'static, + B: HttpBody + 'static, B::Error: Into>, F: Future, - E: H2Exec<>::Future, B>, + E: ConnStreamExec<>::Future, B>, E: NewSvcExec, { type Output = crate::Result<()>; - #[project] fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let mut me = self.project(); loop { let next = { - #[project] match me.state.as_mut().project() { - State::Running { + StateProj::Running { drain, spawn_all, signal, @@ -73,14 +75,16 @@ where Poll::Ready(()) => { debug!("signal received, starting graceful shutdown"); let sig = drain.take().expect("drain channel").0; - State::Draining(sig.drain()) + State::Draining { + draining: sig.drain(), + } } Poll::Pending => { let watch = drain.as_ref().expect("drain channel").1.clone(); return spawn_all.poll_watch(cx, &GracefulWatcher(watch)); } }, - State::Draining(ref mut draining) => { + StateProj::Draining { ref mut draining } => { return Pin::new(draining).poll(cx).map(Ok); } } @@ -98,8 +102,8 @@ impl Watcher for GracefulWatcher where I: AsyncRead + AsyncWrite + Unpin + Send + 'static, S: HttpService, - E: H2Exec, - S::ResBody: Send + Sync + 'static, + E: ConnStreamExec, + S::ResBody: 'static, ::Error: Into>, { type Future = @@ -115,9 +119,9 @@ where S: HttpService, S::Error: Into>, I: AsyncRead + AsyncWrite + Unpin, - S::ResBody: HttpBody + Send + 'static, + S::ResBody: HttpBody + 'static, ::Error: Into>, - E: H2Exec, + E: ConnStreamExec, { conn.graceful_shutdown() } diff --git a/third_party/rust/hyper/src/server/tcp.rs b/third_party/rust/hyper/src/server/tcp.rs index b823818693e4..013bdaea1d6c 100644 --- a/third_party/rust/hyper/src/server/tcp.rs +++ b/third_party/rust/hyper/src/server/tcp.rs @@ -3,14 +3,15 @@ use std::io; use std::net::{SocketAddr, TcpListener as StdTcpListener}; use std::time::Duration; -use futures_util::FutureExt as _; use tokio::net::TcpListener; -use tokio::time::Delay; +use tokio::time::Sleep; +use tracing::{debug, error, trace}; use crate::common::{task, Future, Pin, Poll}; +#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 pub use self::addr_stream::AddrStream; -use super::Accept; +use super::accept::Accept; /// A stream of connections from binding to an address. #[must_use = "streams do nothing unless polled"] @@ -20,7 +21,7 @@ pub struct AddrIncoming { sleep_on_errors: bool, tcp_keepalive_timeout: Option, tcp_nodelay: bool, - timeout: Option, + timeout: Option>>, } impl AddrIncoming { @@ -31,7 +32,21 @@ impl AddrIncoming { } pub(super) fn from_std(std_listener: StdTcpListener) -> crate::Result { + // TcpListener::from_std doesn't set O_NONBLOCK + std_listener + .set_nonblocking(true) + .map_err(crate::Error::new_listen)?; let listener = TcpListener::from_std(std_listener).map_err(crate::Error::new_listen)?; + AddrIncoming::from_listener(listener) + } + + /// Creates a new `AddrIncoming` binding to provided socket address. + pub fn bind(addr: &SocketAddr) -> crate::Result { + AddrIncoming::new(addr) + } + + /// Creates a new `AddrIncoming` from an existing `tokio::net::TcpListener`. + pub fn from_listener(listener: TcpListener) -> crate::Result { let addr = listener.local_addr().map_err(crate::Error::new_listen)?; Ok(AddrIncoming { listener, @@ -43,11 +58,6 @@ impl AddrIncoming { }) } - /// Creates a new `AddrIncoming` binding to provided socket address. - pub fn bind(addr: &SocketAddr) -> crate::Result { - AddrIncoming::new(addr) - } - /// Get the local address bound to this listener. pub fn local_addr(&self) -> SocketAddr { self.addr @@ -91,21 +101,17 @@ impl AddrIncoming { fn poll_next_(&mut self, cx: &mut task::Context<'_>) -> Poll> { // Check if a previous timeout is active that was set by IO errors. if let Some(ref mut to) = self.timeout { - match Pin::new(to).poll(cx) { - Poll::Ready(()) => {} - Poll::Pending => return Poll::Pending, - } + ready!(Pin::new(to).poll(cx)); } self.timeout = None; - let accept = self.listener.accept(); - futures_util::pin_mut!(accept); - loop { - match accept.poll_unpin(cx) { - Poll::Ready(Ok((socket, addr))) => { + match ready!(self.listener.poll_accept(cx)) { + Ok((socket, addr)) => { if let Some(dur) = self.tcp_keepalive_timeout { - if let Err(e) = socket.set_keepalive(Some(dur)) { + let socket = socket2::SockRef::from(&socket); + let conf = socket2::TcpKeepalive::new().with_time(dur); + if let Err(e) = socket.set_tcp_keepalive(&conf) { trace!("error trying to set TCP keepalive: {}", e); } } @@ -114,8 +120,7 @@ impl AddrIncoming { } return Poll::Ready(Ok(AddrStream::new(socket, addr))); } - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(e)) => { + Err(e) => { // Connection errors can be ignored directly, continue by // accepting the next request. if is_connection_error(&e) { @@ -127,9 +132,9 @@ impl AddrIncoming { error!("accept error: {}", e); // Sleep 1s. - let mut timeout = tokio::time::delay_for(Duration::from_secs(1)); + let mut timeout = Box::pin(tokio::time::sleep(Duration::from_secs(1))); - match Pin::new(&mut timeout).poll(cx) { + match timeout.as_mut().poll(cx) { Poll::Ready(()) => { // Wow, it's been a second already? Ok then... continue; @@ -169,12 +174,9 @@ impl Accept for AddrIncoming { /// The timeout is useful to handle resource exhaustion errors like ENFILE /// and EMFILE. Otherwise, could enter into tight loop. fn is_connection_error(e: &io::Error) -> bool { - match e.kind() { - io::ErrorKind::ConnectionRefused + matches!(e.kind(), io::ErrorKind::ConnectionRefused | io::ErrorKind::ConnectionAborted - | io::ErrorKind::ConnectionReset => true, - _ => false, - } + | io::ErrorKind::ConnectionReset) } impl fmt::Debug for AddrIncoming { @@ -189,19 +191,23 @@ impl fmt::Debug for AddrIncoming { } mod addr_stream { - use bytes::{Buf, BufMut}; use std::io; use std::net::SocketAddr; - use tokio::io::{AsyncRead, AsyncWrite}; + #[cfg(unix)] + use std::os::unix::io::{AsRawFd, RawFd}; + use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::net::TcpStream; use crate::common::{task, Pin, Poll}; - /// A transport returned yieled by `AddrIncoming`. - #[derive(Debug)] - pub struct AddrStream { - inner: TcpStream, - pub(super) remote_addr: SocketAddr, + pin_project_lite::pin_project! { + /// A transport returned yieled by `AddrIncoming`. + #[derive(Debug)] + pub struct AddrStream { + #[pin] + inner: TcpStream, + pub(super) remote_addr: SocketAddr, + } } impl AddrStream { @@ -230,56 +236,40 @@ mod addr_stream { pub fn poll_peek( &mut self, cx: &mut task::Context<'_>, - buf: &mut [u8], + buf: &mut tokio::io::ReadBuf<'_>, ) -> Poll> { self.inner.poll_peek(cx, buf) } } impl AsyncRead for AddrStream { - unsafe fn prepare_uninitialized_buffer( - &self, - buf: &mut [std::mem::MaybeUninit], - ) -> bool { - self.inner.prepare_uninitialized_buffer(buf) - } - #[inline] fn poll_read( - mut self: Pin<&mut Self>, + self: Pin<&mut Self>, cx: &mut task::Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } - - #[inline] - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut B, - ) -> Poll> { - Pin::new(&mut self.inner).poll_read_buf(cx, buf) + buf: &mut ReadBuf<'_>, + ) -> Poll> { + self.project().inner.poll_read(cx, buf) } } impl AsyncWrite for AddrStream { #[inline] fn poll_write( - mut self: Pin<&mut Self>, + self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8], ) -> Poll> { - Pin::new(&mut self.inner).poll_write(cx, buf) + self.project().inner.poll_write(cx, buf) } #[inline] - fn poll_write_buf( - mut self: Pin<&mut Self>, + fn poll_write_vectored( + self: Pin<&mut Self>, cx: &mut task::Context<'_>, - buf: &mut B, + bufs: &[io::IoSlice<'_>], ) -> Poll> { - Pin::new(&mut self.inner).poll_write_buf(cx, buf) + self.project().inner.poll_write_vectored(cx, bufs) } #[inline] @@ -289,11 +279,24 @@ mod addr_stream { } #[inline] - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.inner).poll_shutdown(cx) + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + self.project().inner.poll_shutdown(cx) + } + + #[inline] + fn is_write_vectored(&self) -> bool { + // Note that since `self.inner` is a `TcpStream`, this could + // *probably* be hard-coded to return `true`...but it seems more + // correct to ask it anyway (maybe we're on some platform without + // scatter-gather IO?) + self.inner.is_write_vectored() + } + } + + #[cfg(unix)] + impl AsRawFd for AddrStream { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() } } } diff --git a/third_party/rust/hyper/src/service/make.rs b/third_party/rust/hyper/src/service/make.rs index 074d66f1b9d8..63e6f298f14a 100644 --- a/third_party/rust/hyper/src/service/make.rs +++ b/third_party/rust/hyper/src/service/make.rs @@ -177,6 +177,7 @@ impl fmt::Debug for MakeServiceFn { mod sealed { pub trait Sealed {} + #[allow(unreachable_pub)] // This is intentional. pub trait CantImpl {} #[allow(missing_debug_implementations)] diff --git a/third_party/rust/hyper/src/service/mod.rs b/third_party/rust/hyper/src/service/mod.rs index bb5a77406f92..22f850ca47a4 100644 --- a/third_party/rust/hyper/src/service/mod.rs +++ b/third_party/rust/hyper/src/service/mod.rs @@ -21,8 +21,9 @@ //! to a single connection. It defines how to respond to **all** requests that //! connection will receive. //! -//! While it's possible to implement `Service` for a type manually, the helper -//! [`service_fn`](service_fn) should be sufficient for most cases. +//! The helper [`service_fn`](service_fn) should be sufficient for most cases, but +//! if you need to implement `Service` for a type manually, you can follow the example +//! in `service_struct_impl.rs`. //! //! # MakeService //! @@ -38,12 +39,17 @@ pub use tower_service::Service; mod http; mod make; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] mod oneshot; mod util; -pub(crate) use self::http::HttpService; -pub(crate) use self::make::{MakeConnection, MakeServiceRef}; -pub(crate) use self::oneshot::{oneshot, Oneshot}; +pub(super) use self::http::HttpService; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] +pub(super) use self::make::MakeConnection; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))] +pub(super) use self::make::MakeServiceRef; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] +pub(super) use self::oneshot::{oneshot, Oneshot}; pub use self::make::make_service_fn; pub use self::util::service_fn; diff --git a/third_party/rust/hyper/src/service/oneshot.rs b/third_party/rust/hyper/src/service/oneshot.rs index 94f4b43a8066..2697af8f4cbd 100644 --- a/third_party/rust/hyper/src/service/oneshot.rs +++ b/third_party/rust/hyper/src/service/oneshot.rs @@ -1,8 +1,6 @@ // TODO: Eventually to be replaced with tower_util::Oneshot. -use std::marker::Unpin; -use std::mem; - +use pin_project_lite::pin_project; use tower_service::Service; use crate::common::{task, Future, Pin, Poll}; @@ -12,30 +10,35 @@ where S: Service, { Oneshot { - state: State::NotReady(svc, req), + state: State::NotReady { svc, req }, } } -// A `Future` consuming a `Service` and request, waiting until the `Service` -// is ready, and then calling `Service::call` with the request, and -// waiting for that `Future`. -#[allow(missing_debug_implementations)] -pub struct Oneshot, Req> { - state: State, +pin_project! { + // A `Future` consuming a `Service` and request, waiting until the `Service` + // is ready, and then calling `Service::call` with the request, and + // waiting for that `Future`. + #[allow(missing_debug_implementations)] + pub struct Oneshot, Req> { + #[pin] + state: State, + } } -enum State, Req> { - NotReady(S, Req), - Called(S::Future), - Tmp, -} - -// Unpin is projected to S::Future, but never S. -impl Unpin for Oneshot -where - S: Service, - S::Future: Unpin, -{ +pin_project! { + #[project = StateProj] + #[project_replace = StateProjOwn] + enum State, Req> { + NotReady { + svc: S, + req: Req, + }, + Called { + #[pin] + fut: S::Future, + }, + Tmp, + } } impl Future for Oneshot @@ -45,24 +48,23 @@ where type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - // Safety: The service's future is never moved once we get one. - let mut me = unsafe { Pin::get_unchecked_mut(self) }; + let mut me = self.project(); loop { - match me.state { - State::NotReady(ref mut svc, _) => { + match me.state.as_mut().project() { + StateProj::NotReady { ref mut svc, .. } => { ready!(svc.poll_ready(cx))?; // fallthrough out of the match's borrow } - State::Called(ref mut fut) => { - return unsafe { Pin::new_unchecked(fut) }.poll(cx); + StateProj::Called { fut } => { + return fut.poll(cx); } - State::Tmp => unreachable!(), + StateProj::Tmp => unreachable!(), } - match mem::replace(&mut me.state, State::Tmp) { - State::NotReady(mut svc, req) => { - me.state = State::Called(svc.call(req)); + match me.state.as_mut().project_replace(State::Tmp) { + StateProjOwn::NotReady { mut svc, req } => { + me.state.set(State::Called { fut: svc.call(req) }); } _ => unreachable!(), } diff --git a/third_party/rust/hyper/src/upgrade.rs b/third_party/rust/hyper/src/upgrade.rs index 55f390431fd5..1c7b5b01cdb1 100644 --- a/third_party/rust/hyper/src/upgrade.rs +++ b/third_party/rust/hyper/src/upgrade.rs @@ -1,5 +1,39 @@ //! HTTP Upgrades //! +//! This module deals with managing [HTTP Upgrades][mdn] in hyper. Since +//! several concepts in HTTP allow for first talking HTTP, and then converting +//! to a different protocol, this module conflates them into a single API. +//! Those include: +//! +//! - HTTP/1.1 Upgrades +//! - HTTP `CONNECT` +//! +//! You are responsible for any other pre-requisites to establish an upgrade, +//! such as sending the appropriate headers, methods, and status codes. You can +//! then use [`on`][] to grab a `Future` which will resolve to the upgraded +//! connection object, or an error if the upgrade fails. +//! +//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism +//! +//! # Client +//! +//! Sending an HTTP upgrade from the [`client`](super::client) involves setting +//! either the appropriate method, if wanting to `CONNECT`, or headers such as +//! `Upgrade` and `Connection`, on the `http::Request`. Once receiving the +//! `http::Response` back, you must check for the specific information that the +//! upgrade is agreed upon by the server (such as a `101` status code), and then +//! get the `Future` from the `Response`. +//! +//! # Server +//! +//! Receiving upgrade requests in a server requires you to check the relevant +//! headers in a `Request`, and if an upgrade should be done, you then send the +//! corresponding headers in a response. To then wait for hyper to finish the +//! upgrade, you call `on()` with the `Request`, and then can spawn a task +//! awaiting it. +//! +//! # Example +//! //! See [this example][example] showing how upgrades work with both //! Clients and Servers. //! @@ -11,9 +45,11 @@ use std::fmt; use std::io; use std::marker::Unpin; -use bytes::{Buf, Bytes}; -use tokio::io::{AsyncRead, AsyncWrite}; +use bytes::Bytes; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::sync::oneshot; +#[cfg(any(feature = "http1", feature = "http2"))] +use tracing::trace; use crate::common::io::Rewind; use crate::common::{task, Future, Pin, Poll}; @@ -57,18 +93,25 @@ pub struct Parts { _inner: (), } -pub(crate) struct Pending { +/// Gets a pending HTTP upgrade from this message. +/// +/// This can be called on the following types: +/// +/// - `http::Request` +/// - `http::Response` +/// - `&mut http::Request` +/// - `&mut http::Response` +pub fn on(msg: T) -> OnUpgrade { + msg.on_upgrade() +} + +#[cfg(any(feature = "http1", feature = "http2"))] +pub(super) struct Pending { tx: oneshot::Sender>, } -/// Error cause returned when an upgrade was expected but canceled -/// for whatever reason. -/// -/// This likely means the actual `Conn` future wasn't polled and upgraded. -#[derive(Debug)] -struct UpgradeExpected(()); - -pub(crate) fn pending() -> (Pending, OnUpgrade) { +#[cfg(any(feature = "http1", feature = "http2"))] +pub(super) fn pending() -> (Pending, OnUpgrade) { let (tx, rx) = oneshot::channel(); (Pending { tx }, OnUpgrade { rx: Some(rx) }) } @@ -76,12 +119,13 @@ pub(crate) fn pending() -> (Pending, OnUpgrade) { // ===== impl Upgraded ===== impl Upgraded { - pub(crate) fn new(io: T, read_buf: Bytes) -> Self + #[cfg(any(feature = "http1", feature = "http2", test))] + pub(super) fn new(io: T, read_buf: Bytes) -> Self where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { Upgraded { - io: Rewind::new_buffered(Box::new(ForwardsWriteBuf(io)), read_buf), + io: Rewind::new_buffered(Box::new(io), read_buf), } } @@ -91,9 +135,9 @@ impl Upgraded { /// `Upgraded` back. pub fn downcast(self) -> Result, Self> { let (io, buf) = self.io.into_inner(); - match io.__hyper_downcast::>() { + match io.__hyper_downcast() { Ok(t) => Ok(Parts { - io: t.0, + io: *t, read_buf: buf, _inner: (), }), @@ -105,15 +149,11 @@ impl Upgraded { } impl AsyncRead for Upgraded { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit]) -> bool { - self.io.prepare_uninitialized_buffer(buf) - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { Pin::new(&mut self.io).poll_read(cx, buf) } } @@ -127,12 +167,12 @@ impl AsyncWrite for Upgraded { Pin::new(&mut self.io).poll_write(cx, buf) } - fn poll_write_buf( + fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, - buf: &mut B, + bufs: &[io::IoSlice<'_>], ) -> Poll> { - Pin::new(self.io.get_mut()).poll_write_dyn_buf(cx, buf) + Pin::new(&mut self.io).poll_write_vectored(cx, bufs) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { @@ -142,6 +182,10 @@ impl AsyncWrite for Upgraded { fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { Pin::new(&mut self.io).poll_shutdown(cx) } + + fn is_write_vectored(&self) -> bool { + self.io.is_write_vectored() + } } impl fmt::Debug for Upgraded { @@ -153,11 +197,12 @@ impl fmt::Debug for Upgraded { // ===== impl OnUpgrade ===== impl OnUpgrade { - pub(crate) fn none() -> Self { + pub(super) fn none() -> Self { OnUpgrade { rx: None } } - pub(crate) fn is_none(&self) -> bool { + #[cfg(feature = "http1")] + pub(super) fn is_none(&self) -> bool { self.rx.is_none() } } @@ -170,9 +215,7 @@ impl Future for OnUpgrade { Some(ref mut rx) => Pin::new(rx).poll(cx).map(|res| match res { Ok(Ok(upgraded)) => Ok(upgraded), Ok(Err(err)) => Err(err), - Err(_oneshot_canceled) => { - Err(crate::Error::new_canceled().with(UpgradeExpected(()))) - } + Err(_oneshot_canceled) => Err(crate::Error::new_canceled().with(UpgradeExpected)), }), None => Poll::Ready(Err(crate::Error::new_user_no_upgrade())), } @@ -187,15 +230,17 @@ impl fmt::Debug for OnUpgrade { // ===== impl Pending ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Pending { - pub(crate) fn fulfill(self, upgraded: Upgraded) { + pub(super) fn fulfill(self, upgraded: Upgraded) { trace!("pending upgrade fulfill"); let _ = self.tx.send(Ok(upgraded)); } + #[cfg(feature = "http1")] /// Don't fulfill the pending Upgrade, but instead signal that /// upgrades are handled manually. - pub(crate) fn manual(self) { + pub(super) fn manual(self) { trace!("pending upgrade handled manually"); let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade())); } @@ -203,9 +248,16 @@ impl Pending { // ===== impl UpgradeExpected ===== +/// Error cause returned when an upgrade was expected but canceled +/// for whatever reason. +/// +/// This likely means the actual `Conn` future wasn't polled and upgraded. +#[derive(Debug)] +struct UpgradeExpected; + impl fmt::Display for UpgradeExpected { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "upgrade expected but not completed") + f.write_str("upgrade expected but not completed") } } @@ -213,20 +265,14 @@ impl StdError for UpgradeExpected {} // ===== impl Io ===== -struct ForwardsWriteBuf(T); - -pub(crate) trait Io: AsyncRead + AsyncWrite + Unpin + 'static { - fn poll_write_dyn_buf( - &mut self, - cx: &mut task::Context<'_>, - buf: &mut dyn Buf, - ) -> Poll>; - +pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static { fn __hyper_type_id(&self) -> TypeId { TypeId::of::() } } +impl Io for T {} + impl dyn Io + Send { fn __hyper_is(&self) -> bool { let t = TypeId::of::(); @@ -246,60 +292,49 @@ impl dyn Io + Send { } } -impl AsyncRead for ForwardsWriteBuf { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit]) -> bool { - self.0.prepare_uninitialized_buffer(buf) +mod sealed { + use super::OnUpgrade; + + pub trait CanUpgrade { + fn on_upgrade(self) -> OnUpgrade; } - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut self.0).poll_read(cx, buf) - } -} - -impl AsyncWrite for ForwardsWriteBuf { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.0).poll_write(cx, buf) + impl CanUpgrade for http::Request { + fn on_upgrade(mut self) -> OnUpgrade { + self.extensions_mut() + .remove::() + .unwrap_or_else(OnUpgrade::none) + } } - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut B, - ) -> Poll> { - Pin::new(&mut self.0).poll_write_buf(cx, buf) + impl CanUpgrade for &'_ mut http::Request { + fn on_upgrade(self) -> OnUpgrade { + self.extensions_mut() + .remove::() + .unwrap_or_else(OnUpgrade::none) + } } - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - Pin::new(&mut self.0).poll_flush(cx) + impl CanUpgrade for http::Response { + fn on_upgrade(mut self) -> OnUpgrade { + self.extensions_mut() + .remove::() + .unwrap_or_else(OnUpgrade::none) + } } - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - Pin::new(&mut self.0).poll_shutdown(cx) - } -} - -impl Io for ForwardsWriteBuf { - fn poll_write_dyn_buf( - &mut self, - cx: &mut task::Context<'_>, - mut buf: &mut dyn Buf, - ) -> Poll> { - Pin::new(&mut self.0).poll_write_buf(cx, &mut buf) + impl CanUpgrade for &'_ mut http::Response { + fn on_upgrade(self) -> OnUpgrade { + self.extensions_mut() + .remove::() + .unwrap_or_else(OnUpgrade::none) + } } } #[cfg(test)] mod tests { use super::*; - use tokio::io::AsyncWriteExt; #[test] fn upgraded_downcast() { @@ -310,15 +345,6 @@ mod tests { upgraded.downcast::().unwrap(); } - #[tokio::test] - async fn upgraded_forwards_write_buf() { - // sanity check that the underlying IO implements write_buf - Mock.write_buf(&mut "hello".as_bytes()).await.unwrap(); - - let mut upgraded = Upgraded::new(Mock, Bytes::new()); - upgraded.write_buf(&mut "hello".as_bytes()).await.unwrap(); - } - // TODO: replace with tokio_test::io when it can test write_buf struct Mock; @@ -326,8 +352,8 @@ mod tests { fn poll_read( self: Pin<&mut Self>, _cx: &mut task::Context<'_>, - _buf: &mut [u8], - ) -> Poll> { + _buf: &mut ReadBuf<'_>, + ) -> Poll> { unreachable!("Mock::poll_read") } } @@ -335,20 +361,11 @@ mod tests { impl AsyncWrite for Mock { fn poll_write( self: Pin<&mut Self>, - _cx: &mut task::Context<'_>, - _buf: &[u8], + _: &mut task::Context<'_>, + buf: &[u8], ) -> Poll> { - panic!("poll_write shouldn't be called"); - } - - fn poll_write_buf( - self: Pin<&mut Self>, - _cx: &mut task::Context<'_>, - buf: &mut B, - ) -> Poll> { - let n = buf.remaining(); - buf.advance(n); - Poll::Ready(Ok(n)) + // panic!("poll_write shouldn't be called"); + Poll::Ready(Ok(buf.len())) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll> { diff --git a/third_party/rust/pin-project-internal/.cargo-checksum.json b/third_party/rust/pin-project-internal/.cargo-checksum.json index def6c9f85033..fa585bc1da7f 100644 --- a/third_party/rust/pin-project-internal/.cargo-checksum.json +++ b/third_party/rust/pin-project-internal/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"5258b88a2b398ff9c68a822446c6d524ac475545f38d39140ba17e54277f82e5","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","build.rs":"ff90cd6a515a5eab716f63f3af90db47c411ac3a1629800731fb73de832495aa","src/lib.rs":"e3e8f00f9c9ada23bb4783ff964b08e24dd2d6e3e159ec2b0b788370b38a50a7","src/pin_project/attribute.rs":"4bf467e46315e87a181ad4ef99c8a9f1936505c4f3d72d57ab737244bf957fd7","src/pin_project/derive.rs":"caae2a40a61e556f75718d80338f550610c21c36876e3e6b42ccfa1b88f28c2d","src/pin_project/mod.rs":"149bda261825c80b1043ce1a26f944f69d939d86e5fc37fd1f71969469202b2f","src/pinned_drop.rs":"cd028eb8f588dbfe3239ef1287a655de17c224cfdc19b9803b795d231e2f10e3","src/project.rs":"bebfed7481d93661d5af28fcadf8919ec90bdfd9e588cbe2955165a2b2e00ce4","src/utils.rs":"637418e3b2df45427c5047caa57e4bb65a632d5d78d03b300d3e4b9e1125df41"},"package":"044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a"} \ No newline at end of file +{"files":{"Cargo.toml":"3fae4fd22b78dc36d4a467f3edb70b3dc42efb171200dc4ba917e2d5f9a40910","LICENSE-APACHE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","src/lib.rs":"d33dc65378ce2152b90a8e6cfe786d822daf6ba14346054c54b92bf66379d00c","src/pin_project/args.rs":"903be7b22a2eda4ed59d890feb75bd4c98e243b4faaa809aff0621d15cd06431","src/pin_project/attribute.rs":"d901baca05c9b336a8c9944b36111d8d45e2e8b162d3c4ba9fac1416aa1b1086","src/pin_project/derive.rs":"877bed8ca39cd6d31404518246aa188803e25f3357b86bcc24e663388c831d36","src/pin_project/mod.rs":"83e6fc982a8c136811332512abc7d368e5d09b94f245de5d19490f835e85943a","src/pinned_drop.rs":"f3d386e00ce2fe25fc817ac57f07569f9e43a519e12d977db39f4c239be4dcf4","src/utils.rs":"24372d39be74fb9b6728bca08d74ed0a8ed7915de97ada6b20ebb6243ae6eed0"},"package":"744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb"} \ No newline at end of file diff --git a/third_party/rust/pin-project-internal/Cargo.toml b/third_party/rust/pin-project-internal/Cargo.toml index eea89594ef28..1d5cfef32134 100644 --- a/third_party/rust/pin-project-internal/Cargo.toml +++ b/third_party/rust/pin-project-internal/Cargo.toml @@ -11,9 +11,8 @@ [package] edition = "2018" -rust-version = "1.34" name = "pin-project-internal" -version = "0.4.29" +version = "1.0.10" description = "Implementation detail of the `pin-project` crate.\n" keywords = ["pin", "macros", "attribute"] categories = ["no-std", "rust-patterns"] @@ -31,7 +30,7 @@ version = "1" version = "1" [dependencies.syn] -version = "1.0.44" +version = "1.0.56" features = ["full", "visit-mut"] -[dev-dependencies.rustversion] -version = "1" + +[dev-dependencies] diff --git a/third_party/rust/pin-project-internal/LICENSE-APACHE b/third_party/rust/pin-project-internal/LICENSE-APACHE index d64569567334..f433b1a53f5b 100644 --- a/third_party/rust/pin-project-internal/LICENSE-APACHE +++ b/third_party/rust/pin-project-internal/LICENSE-APACHE @@ -175,28 +175,3 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/third_party/rust/pin-project-internal/build.rs b/third_party/rust/pin-project-internal/build.rs deleted file mode 100644 index b4d314c9d8a6..000000000000 --- a/third_party/rust/pin-project-internal/build.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::{env, process::Command, str}; - -// The rustc-cfg strings below are *not* public API. Please let us know by -// opening a GitHub issue if your build environment requires some way to enable -// these cfgs other than by executing our build script. -fn main() { - let minor = match rustc_minor_version() { - Some(minor) => minor, - None => return, - }; - - // Underscore const names requires Rust 1.37: - // https://github.com/rust-lang/rust/pull/61347 - if minor >= 37 { - println!("cargo:rustc-cfg=underscore_consts"); - } - - // #[deprecated] on proc-macro requires Rust 1.40: - // https://github.com/rust-lang/rust/pull/65666 - if minor >= 40 { - println!("cargo:rustc-cfg=deprecated_proc_macro"); - } -} - -fn rustc_minor_version() -> Option { - let rustc = env::var_os("RUSTC")?; - let output = Command::new(rustc).arg("--version").output().ok()?; - let version = str::from_utf8(&output.stdout).ok()?; - let mut pieces = version.split('.'); - if pieces.next() != Some("rustc 1") { - return None; - } - pieces.next()?.parse().ok() -} diff --git a/third_party/rust/pin-project-internal/src/lib.rs b/third_party/rust/pin-project-internal/src/lib.rs index 0f46d3b533e0..04c4ce250bfb 100644 --- a/third_party/rust/pin-project-internal/src/lib.rs +++ b/third_party/rust/pin-project-internal/src/lib.rs @@ -1,14 +1,15 @@ -//! An internal crate to support pin_project - **do not use directly** +//! Implementation detail of the `pin-project` crate. - **do not use directly** #![doc(test( no_crate_inject, - attr(deny(warnings, rust_2018_idioms, single_use_lifetimes), allow(dead_code)) + attr( + deny(warnings, rust_2018_idioms, single_use_lifetimes), + allow(dead_code, unused_variables) + ) ))] #![warn(unsafe_code)] #![warn(rust_2018_idioms, single_use_lifetimes, unreachable_pub)] #![warn(clippy::default_trait_access, clippy::wildcard_imports)] -// mem::take and #[non_exhaustive] requires Rust 1.40 -#![allow(clippy::mem_replace_with_default, clippy::manual_non_exhaustive)] #![allow(clippy::needless_doctest_main)] // older compilers require explicit `extern crate`. @@ -20,20 +21,17 @@ mod utils; mod pin_project; mod pinned_drop; -mod project; use proc_macro::TokenStream; -use crate::utils::ProjKind; - /// An attribute that creates projection types covering all the fields of /// struct or enum. /// /// This attribute creates projection types according to the following rules: /// -/// * For the fields that use `#[pin]` attribute, create the pinned reference to +/// - For the fields that use `#[pin]` attribute, create the pinned reference to /// the field. -/// * For the other fields, create a normal reference to the field. +/// - For the other fields, create a normal reference to the field. /// /// And the following methods are implemented on the original type: /// @@ -48,24 +46,25 @@ use crate::utils::ProjKind; /// ``` /// /// By passing an argument with the same name as the method to the attribute, -/// you can name the projection type returned from the method: +/// you can name the projection type returned from the method. This allows you +/// to use pattern matching on the projected types. /// /// ```rust -/// use std::pin::Pin; -/// -/// use pin_project::pin_project; -/// -/// #[pin_project(project = StructProj)] -/// struct Struct { -/// #[pin] -/// field: T, +/// # use pin_project::pin_project; +/// # use std::pin::Pin; +/// #[pin_project(project = EnumProj)] +/// enum Enum { +/// Variant(#[pin] T), /// } /// -/// impl Struct { +/// impl Enum { /// fn method(self: Pin<&mut Self>) { -/// let this: StructProj<'_, T> = self.project(); -/// let StructProj { field } = this; -/// let _: Pin<&mut T> = field; +/// let this: EnumProj<'_, T> = self.project(); +/// match this { +/// EnumProj::Variant(x) => { +/// let _: Pin<&mut T> = x; +/// } +/// } /// } /// } /// ``` @@ -73,10 +72,15 @@ use crate::utils::ProjKind; /// Note that the projection types returned by `project` and `project_ref` have /// an additional lifetime at the beginning of generics. /// -/// The visibility of the projected type and projection method is based on the +/// ```text +/// let this: EnumProj<'_, T> = self.project(); +/// ^^ +/// ``` +/// +/// The visibility of the projected types and projection methods is based on the /// original type. However, if the visibility of the original type is `pub`, the -/// visibility of the projected type and the projection method is downgraded to -/// `pub(crate)`. +/// visibility of the projected types and the projection methods is downgraded +/// to `pub(crate)`. /// /// # Safety /// @@ -123,7 +127,7 @@ use crate::utils::ProjKind; /// your struct - that is, [`Pin`]`<&mut MyStruct>` where `MyStruct` is the /// type of your struct. /// -/// You can call `project()` on this type as usual, along with any other +/// You can call `.project()` on this type as usual, along with any other /// methods you have defined. Because your code is never provided with /// a `&mut MyStruct`, it is impossible to move out of pin-projectable /// fields in safe code in your destructor. @@ -149,8 +153,8 @@ use crate::utils::ProjKind; /// code. /// /// Pin projections are also incompatible with [`#[repr(packed)]`][repr-packed] -/// structs. Attempting to use this attribute on a -/// [`#[repr(packed)]`][repr-packed] struct results in a compile-time error. +/// types. Attempting to use this attribute on a `#[repr(packed)]` type results +/// in a compile-time error. /// /// # Examples /// @@ -224,7 +228,33 @@ use crate::utils::ProjKind; /// } /// ``` /// -/// If you want to call the `project()` method multiple times or later use the +/// When `#[pin_project]` is used on enums, only named projection types and +/// methods are generated because there is no way to access variants of +/// projected types without naming it. +/// For example, in the above example, only the `project` method is generated, +/// and the `project_ref` method is not generated. +/// (When `#[pin_project]` is used on structs, both methods are always generated.) +/// +/// ```rust,compile_fail,E0599 +/// # use pin_project::pin_project; +/// # use std::pin::Pin; +/// # +/// # #[pin_project(project = EnumProj)] +/// # enum Enum { +/// # Tuple(#[pin] T), +/// # Struct { field: U }, +/// # Unit, +/// # } +/// # +/// impl Enum { +/// fn call_project_ref(self: Pin<&Self>) { +/// let _this = self.project_ref(); +/// //~^ ERROR no method named `project_ref` found for struct `Pin<&Enum>` in the current scope +/// } +/// } +/// ``` +/// +/// If you want to call `.project()` multiple times or later use the /// original [`Pin`] type, it needs to use [`.as_mut()`][`Pin::as_mut`] to avoid /// consuming the [`Pin`]. /// @@ -273,7 +303,7 @@ use crate::utils::ProjKind; /// #[pin_project] /// struct Struct { /// field: T, -/// #[pin] +/// #[pin] // <------ This `#[pin]` is required to make `Struct` to `!Unpin`. /// _pin: PhantomPinned, /// } /// ``` @@ -321,8 +351,8 @@ use crate::utils::ProjKind; /// This impl block acts just like a normal [`Drop`] impl, /// except for the following two: /// -/// * `drop` method takes [`Pin`]`<&mut Self>` -/// * Name of the trait is `PinnedDrop`. +/// - `drop` method takes [`Pin`]`<&mut Self>` +/// - Name of the trait is `PinnedDrop`. /// /// ```rust /// # use std::pin::Pin; @@ -346,14 +376,14 @@ use crate::utils::ProjKind; /// use pin_project::{pin_project, pinned_drop}; /// /// #[pin_project(PinnedDrop)] -/// struct Struct { +/// struct PrintOnDrop { /// #[pin] /// pinned_field: T, /// unpin_field: U, /// } /// /// #[pinned_drop] -/// impl PinnedDrop for Struct { +/// impl PinnedDrop for PrintOnDrop { /// fn drop(self: Pin<&mut Self>) { /// println!("Dropping pinned field: {:?}", self.pinned_field); /// println!("Dropping unpin field: {:?}", self.unpin_field); @@ -361,17 +391,17 @@ use crate::utils::ProjKind; /// } /// /// fn main() { -/// let _x = Struct { pinned_field: true, unpin_field: 40 }; +/// let _x = PrintOnDrop { pinned_field: true, unpin_field: 40 }; /// } /// ``` /// -/// See also [`#[pinned_drop]`][`pinned_drop`] attribute. +/// See also [`#[pinned_drop]`][macro@pinned_drop] attribute. /// -/// # `project_replace()` +/// # `project_replace` method /// -/// In addition to the `project()` and `project_ref()` methods which are always +/// In addition to the `project` and `project_ref` methods which are always /// provided when you use the `#[pin_project]` attribute, there is a third -/// method, `project_replace()` which can be useful in some situations. It is +/// method, `project_replace` which can be useful in some situations. It is /// equivalent to [`Pin::set`], except that the unpinned fields are moved and /// returned, instead of being dropped in-place. /// @@ -415,8 +445,8 @@ use crate::utils::ProjKind; /// ``` /// /// By passing the value to the `project_replace` argument, you can name the -/// returned type of `project_replace()`. This is necessary whenever -/// destructuring the return type of `project_replace()`, and work in exactly +/// returned type of the `project_replace` method. This is necessary whenever +/// destructuring the return type of the `project_replace` method, and work in exactly /// the same way as the `project` and `project_ref` arguments. /// /// ```rust @@ -445,29 +475,28 @@ use crate::utils::ProjKind; /// [`Pin::as_mut`]: core::pin::Pin::as_mut /// [`Pin::set`]: core::pin::Pin::set /// [`Pin`]: core::pin::Pin -/// [`UnsafeUnpin`]: https://docs.rs/pin-project/0.4/pin_project/trait.UnsafeUnpin.html -/// [`pinned_drop`]: ./attr.pinned_drop.html -/// [drop-guarantee]: https://doc.rust-lang.org/nightly/std/pin/index.html#drop-guarantee -/// [pin-projection]: https://doc.rust-lang.org/nightly/std/pin/index.html#projections-and-structural-pinning -/// [pinned-drop]: ./attr.pin_project.html#pinned_drop +/// [`UnsafeUnpin`]: https://docs.rs/pin-project/1/pin_project/trait.UnsafeUnpin.html +/// [drop-guarantee]: core::pin#drop-guarantee +/// [pin-projection]: core::pin#projections-and-structural-pinning +/// [pinned-drop]: macro@pin_project#pinned_drop /// [repr-packed]: https://doc.rust-lang.org/nomicon/other-reprs.html#reprpacked /// [undefined-behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html -/// [unsafe-unpin]: ./attr.pin_project.html#unsafeunpin +/// [unsafe-unpin]: macro@pin_project#unsafeunpin #[proc_macro_attribute] pub fn pin_project(args: TokenStream, input: TokenStream) -> TokenStream { pin_project::attribute(&args.into(), input.into()).into() } -/// An attribute for annotating an impl block that implements [`Drop`]. +/// An attribute used for custom implementations of [`Drop`]. /// -/// This attribute is only needed when you wish to provide a [`Drop`] -/// impl for your type. +/// This attribute is used in conjunction with the `PinnedDrop` argument to +/// the [`#[pin_project]`][macro@pin_project] attribute. /// -/// This impl block acts just like a normal [`Drop`] impl, -/// except for the following two: +/// The impl block annotated with this attribute acts just like a normal +/// [`Drop`] impl, except for the following two: /// -/// * `drop` method takes [`Pin`]`<&mut Self>` -/// * Name of the trait is `PinnedDrop`. +/// - `drop` method takes [`Pin`]`<&mut Self>` +/// - Name of the trait is `PinnedDrop`. /// /// ```rust /// # use std::pin::Pin; @@ -483,7 +512,7 @@ pub fn pin_project(args: TokenStream, input: TokenStream) -> TokenStream { /// In particular, it will never be called more than once, just like /// [`Drop::drop`]. /// -/// # Example +/// # Examples /// /// ```rust /// use std::pin::Pin; @@ -491,20 +520,20 @@ pub fn pin_project(args: TokenStream, input: TokenStream) -> TokenStream { /// use pin_project::{pin_project, pinned_drop}; /// /// #[pin_project(PinnedDrop)] -/// struct Foo { +/// struct PrintOnDrop { /// #[pin] /// field: u8, /// } /// /// #[pinned_drop] -/// impl PinnedDrop for Foo { +/// impl PinnedDrop for PrintOnDrop { /// fn drop(self: Pin<&mut Self>) { /// println!("Dropping: {}", self.field); /// } /// } /// /// fn main() { -/// let _x = Foo { field: 50 }; +/// let _x = PrintOnDrop { field: 50 }; /// } /// ``` /// @@ -513,13 +542,13 @@ pub fn pin_project(args: TokenStream, input: TokenStream) -> TokenStream { /// # Why `#[pinned_drop]` attribute is needed? /// /// Implementing `PinnedDrop::drop` is safe, but calling it is not safe. -// This is because destructors can be called multiple times in safe code and -/// [double dropping is unsound](https://github.com/rust-lang/rust/pull/62360). +/// This is because destructors can be called multiple times in safe code and +/// [double dropping is unsound][rust-lang/rust#62360]. /// /// Ideally, it would be desirable to be able to forbid manual calls in /// the same way as [`Drop::drop`], but the library cannot do it. So, by using -/// macros and replacing them with private traits like the following, we prevent -/// users from calling `PinnedDrop::drop` in safe code. +/// macros and replacing them with private traits like the following, +/// this crate prevent users from calling `PinnedDrop::drop` in safe code. /// /// ```rust /// # use std::pin::Pin; @@ -532,248 +561,16 @@ pub fn pin_project(args: TokenStream, input: TokenStream) -> TokenStream { /// Also by using the [`drop`] function just like dropping a type that directly /// implements [`Drop`], can drop safely a type that implements `PinnedDrop`. /// +/// [rust-lang/rust#62360]: https://github.com/rust-lang/rust/pull/62360 /// [`Pin`]: core::pin::Pin -/// [pinned-drop]: ./attr.pin_project.html#pinned_drop +/// [pinned-drop]: macro@pin_project#pinned_drop #[proc_macro_attribute] pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input); pinned_drop::attribute(&args.into(), input).into() } -/// (deprecated) An attribute to provide way to refer to the projected type returned by -/// `project` method. -/// -/// **This attribute is deprecated. Consider naming projected type by passing -/// `project` argument to `#[pin_project]` attribute instead, see [release note] -/// for details** -/// -/// The following syntaxes are supported. -/// -/// # `let` bindings -/// -/// *The attribute at the expression position is not stable, so you need to use -/// a dummy `#[project]` attribute for the function.* -/// -/// ## Examples -/// -/// ```rust -/// # #![allow(deprecated)] -/// use std::pin::Pin; -/// -/// use pin_project::{pin_project, project}; -/// -/// #[pin_project] -/// struct Foo { -/// #[pin] -/// future: T, -/// field: U, -/// } -/// -/// impl Foo { -/// #[project] // Nightly does not need a dummy attribute to the function. -/// fn baz(self: Pin<&mut Self>) { -/// #[project] -/// let Foo { future, field } = self.project(); -/// -/// let _: Pin<&mut T> = future; -/// let _: &mut U = field; -/// } -/// } -/// ``` -/// -/// # `match` expressions -/// -/// *The attribute at the expression position is not stable, so you need to use -/// a dummy `#[project]` attribute for the function.* -/// -/// ## Examples -/// -/// ```rust -/// # #![allow(deprecated)] -/// use std::pin::Pin; -/// -/// use pin_project::{pin_project, project}; -/// -/// #[pin_project] -/// enum Enum { -/// Tuple(#[pin] A, B), -/// Struct { field: C }, -/// Unit, -/// } -/// -/// impl Enum { -/// #[project] // Nightly does not need a dummy attribute to the function. -/// fn baz(self: Pin<&mut Self>) { -/// #[project] -/// match self.project() { -/// Enum::Tuple(x, y) => { -/// let _: Pin<&mut A> = x; -/// let _: &mut B = y; -/// } -/// Enum::Struct { field } => { -/// let _: &mut C = field; -/// } -/// Enum::Unit => {} -/// } -/// } -/// } -/// ``` -/// -/// # `impl` blocks -/// -/// All methods and associated functions in `#[project] impl` block become -/// methods of the projected type. If you want to implement methods on the -/// original type, you need to create another (non-`#[project]`) `impl` block. -/// -/// To call a method implemented in `#[project] impl` block, you need to first -/// get the projected-type with `let this = self.project();`. -/// -/// ## Examples -/// -/// ```rust -/// # #![allow(deprecated)] -/// use std::pin::Pin; -/// -/// use pin_project::{pin_project, project}; -/// -/// #[pin_project] -/// struct Foo { -/// #[pin] -/// future: T, -/// field: U, -/// } -/// -/// // impl for the original type -/// impl Foo { -/// fn bar(self: Pin<&mut Self>) { -/// self.project().baz() -/// } -/// } -/// -/// // impl for the projected type -/// #[project] -/// impl Foo { -/// fn baz(self) { -/// let Self { future, field } = self; -/// -/// let _: Pin<&mut T> = future; -/// let _: &mut U = field; -/// } -/// } -/// ``` -/// -/// # `use` statements -/// -/// ## Examples -/// -/// ```rust -/// # #![allow(deprecated)] -/// # mod dox { -/// use pin_project::pin_project; -/// -/// #[pin_project] -/// struct Foo { -/// #[pin] -/// field: A, -/// } -/// -/// mod bar { -/// use std::pin::Pin; -/// -/// use pin_project::project; -/// -/// use super::Foo; -/// #[project] -/// use super::Foo; -/// -/// #[project] -/// fn baz(foo: Pin<&mut Foo>) { -/// #[project] -/// let Foo { field } = foo.project(); -/// let _: Pin<&mut A> = field; -/// } -/// } -/// # } -/// ``` -/// -/// [release note]: https://github.com/taiki-e/pin-project/releases/tag/v0.4.21 -#[cfg_attr( - deprecated_proc_macro, - deprecated( - since = "0.4.21", - note = "consider naming projected type by passing `project` \ - argument to #[pin_project] attribute instead, see release note \ - \ - for details" - ) -)] -#[proc_macro_attribute] -pub fn project(args: TokenStream, input: TokenStream) -> TokenStream { - let input = syn::parse_macro_input!(input); - project::attribute(&args.into(), input, ProjKind::Mutable).into() -} - -/// (deprecated) An attribute to provide way to refer to the projected type returned by -/// `project_ref` method. -/// -/// **This attribute is deprecated. Consider naming projected type by passing -/// `project_ref` argument to `#[pin_project]` attribute instead, see [release note] -/// for details** -/// -/// This is the same as [`#[project]`][`project`] attribute except it refers to -/// the projected type returned by the `project_ref` method. -/// -/// See [`#[project]`][`project`] attribute for more details. -/// -/// [release note]: https://github.com/taiki-e/pin-project/releases/tag/v0.4.21 -/// [`project`]: ./attr.project.html -#[cfg_attr( - deprecated_proc_macro, - deprecated( - since = "0.4.21", - note = "consider naming projected type by passing `project_ref` \ - argument to #[pin_project] attribute instead, see release note \ - \ - for details" - ) -)] -#[proc_macro_attribute] -pub fn project_ref(args: TokenStream, input: TokenStream) -> TokenStream { - let input = syn::parse_macro_input!(input); - project::attribute(&args.into(), input, ProjKind::Immutable).into() -} - -/// (deprecated) An attribute to provide way to refer to the projected type returned by -/// `project_replace` method. -/// -/// **This attribute is deprecated. Consider naming projected type by passing -/// `project_replace` argument to `#[pin_project]` attribute instead, see [release note] -/// for details** -/// -/// This is the same as [`#[project]`][`project`] attribute except it refers to -/// the projected type returned by the `project_replace` method. -/// -/// See [`#[project]`][`project`] attribute for more details. -/// -/// [release note]: https://github.com/taiki-e/pin-project/releases/tag/v0.4.21 -/// [`project`]: ./attr.project.html -#[cfg_attr( - deprecated_proc_macro, - deprecated( - since = "0.4.21", - note = "consider naming projected type by passing `project_replace` \ - argument to #[pin_project] attribute instead, see release note \ - \ - for details" - ) -)] -#[proc_macro_attribute] -pub fn project_replace(args: TokenStream, input: TokenStream) -> TokenStream { - let input = syn::parse_macro_input!(input); - project::attribute(&args.into(), input, ProjKind::Owned).into() -} - -// An internal helper macro. Not public API. +// Not public API. #[doc(hidden)] #[proc_macro_derive(__PinProjectInternalDerive, attributes(pin))] pub fn __pin_project_internal_derive(input: TokenStream) -> TokenStream { diff --git a/third_party/rust/pin-project-internal/src/pin_project/args.rs b/third_party/rust/pin-project-internal/src/pin_project/args.rs new file mode 100644 index 000000000000..d0d4f362f575 --- /dev/null +++ b/third_party/rust/pin-project-internal/src/pin_project/args.rs @@ -0,0 +1,254 @@ +use proc_macro2::{Span, TokenStream}; +use quote::quote; +use syn::{ + parse::{Parse, ParseStream}, + spanned::Spanned, + Attribute, Error, Ident, Result, Token, +}; + +use super::PIN; +use crate::utils::{ParseBufferExt, SliceExt}; + +pub(super) fn parse_args(attrs: &[Attribute]) -> Result { + // `(__private())` -> `` + struct Input(Option); + + impl Parse for Input { + fn parse(input: ParseStream<'_>) -> Result { + Ok(Self((|| { + let content = input.parenthesized().ok()?; + let private = content.parse::().ok()?; + if private == "__private" { + content.parenthesized().ok()?.parse::().ok() + } else { + None + } + })())) + } + } + + if let Some(attr) = attrs.find("pin_project") { + bail!(attr, "duplicate #[pin_project] attribute"); + } + + let mut attrs = attrs.iter().filter(|attr| attr.path.is_ident(PIN)); + + let prev = if let Some(attr) = attrs.next() { + (attr, syn::parse2::(attr.tokens.clone()).unwrap().0) + } else { + // This only fails if another macro removes `#[pin]`. + bail!(TokenStream::new(), "#[pin_project] attribute has been removed"); + }; + + if let Some(attr) = attrs.next() { + let (prev_attr, prev_res) = &prev; + // As the `#[pin]` attribute generated by `#[pin_project]` + // has the same span as `#[pin_project]`, it is possible + // that a useless error message will be generated. + // So, use the span of `prev_attr` if it is not a valid attribute. + let res = syn::parse2::(attr.tokens.clone()).unwrap().0; + let span = match (prev_res, res) { + (Some(_), _) => attr, + (None, _) => prev_attr, + }; + bail!(span, "duplicate #[pin] attribute"); + } + // This `unwrap` only fails if another macro removes `#[pin]` and inserts own `#[pin]`. + syn::parse2(prev.1.unwrap()) +} + +pub(super) struct Args { + /// `PinnedDrop` argument. + pub(super) pinned_drop: Option, + /// `UnsafeUnpin` or `!Unpin` argument. + pub(super) unpin_impl: UnpinImpl, + /// `project = ` argument. + pub(super) project: Option, + /// `project_ref = ` argument. + pub(super) project_ref: Option, + /// `project_replace [= ]` argument. + pub(super) project_replace: ProjReplace, +} + +impl Parse for Args { + fn parse(input: ParseStream<'_>) -> Result { + mod kw { + syn::custom_keyword!(Unpin); + } + + /// Parses `= ` in ` = ` and returns value and span of name-value pair. + fn parse_value( + input: ParseStream<'_>, + name: &Ident, + has_prev: bool, + ) -> Result<(Ident, TokenStream)> { + if input.is_empty() { + bail!(name, "expected `{0} = `, found `{0}`", name); + } + let eq_token: Token![=] = input.parse()?; + if input.is_empty() { + let span = quote!(#name #eq_token); + bail!(span, "expected `{0} = `, found `{0} =`", name); + } + let value: Ident = input.parse()?; + let span = quote!(#name #value); + if has_prev { + bail!(span, "duplicate `{}` argument", name); + } + Ok((value, span)) + } + + let mut pinned_drop = None; + let mut unsafe_unpin = None; + let mut not_unpin = None; + let mut project = None; + let mut project_ref = None; + let mut project_replace_value = None; + let mut project_replace_span = None; + + while !input.is_empty() { + if input.peek(Token![!]) { + let bang: Token![!] = input.parse()?; + if input.is_empty() { + bail!(bang, "expected `!Unpin`, found `!`"); + } + let unpin: kw::Unpin = input.parse()?; + let span = quote!(#bang #unpin); + if not_unpin.replace(span.span()).is_some() { + bail!(span, "duplicate `!Unpin` argument"); + } + } else { + let token = input.parse::()?; + match &*token.to_string() { + "PinnedDrop" => { + if pinned_drop.replace(token.span()).is_some() { + bail!(token, "duplicate `PinnedDrop` argument"); + } + } + "UnsafeUnpin" => { + if unsafe_unpin.replace(token.span()).is_some() { + bail!(token, "duplicate `UnsafeUnpin` argument"); + } + } + "project" => { + project = Some(parse_value(input, &token, project.is_some())?.0); + } + "project_ref" => { + project_ref = Some(parse_value(input, &token, project_ref.is_some())?.0); + } + "project_replace" => { + if input.peek(Token![=]) { + let (value, span) = + parse_value(input, &token, project_replace_span.is_some())?; + project_replace_value = Some(value); + project_replace_span = Some(span.span()); + } else if project_replace_span.is_some() { + bail!(token, "duplicate `project_replace` argument"); + } else { + project_replace_span = Some(token.span()); + } + } + "Replace" => { + bail!( + token, + "`Replace` argument was removed, use `project_replace` argument instead" + ); + } + _ => bail!(token, "unexpected argument: {}", token), + } + } + + if input.is_empty() { + break; + } + let _: Token![,] = input.parse()?; + } + + if project.is_some() || project_ref.is_some() { + if project == project_ref { + bail!( + project_ref, + "name `{}` is already specified by `project` argument", + project_ref.as_ref().unwrap() + ); + } + if let Some(ident) = &project_replace_value { + if project == project_replace_value { + bail!(ident, "name `{}` is already specified by `project` argument", ident); + } else if project_ref == project_replace_value { + bail!(ident, "name `{}` is already specified by `project_ref` argument", ident); + } + } + } + + if let Some(span) = pinned_drop { + if project_replace_span.is_some() { + return Err(Error::new( + span, + "arguments `PinnedDrop` and `project_replace` are mutually exclusive", + )); + } + } + let project_replace = match (project_replace_span, project_replace_value) { + (None, _) => ProjReplace::None, + (Some(span), Some(ident)) => ProjReplace::Named { ident, span }, + (Some(span), None) => ProjReplace::Unnamed { span }, + }; + let unpin_impl = match (unsafe_unpin, not_unpin) { + (None, None) => UnpinImpl::Default, + (Some(span), None) => UnpinImpl::Unsafe(span), + (None, Some(span)) => UnpinImpl::Negative(span), + (Some(span), Some(_)) => { + return Err(Error::new( + span, + "arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive", + )); + } + }; + + Ok(Self { pinned_drop, unpin_impl, project, project_ref, project_replace }) + } +} + +/// `UnsafeUnpin` or `!Unpin` argument. +#[derive(Clone, Copy)] +pub(super) enum UnpinImpl { + Default, + /// `UnsafeUnpin`. + Unsafe(Span), + /// `!Unpin`. + Negative(Span), +} + +/// `project_replace [= ]` argument. +pub(super) enum ProjReplace { + None, + /// `project_replace`. + Unnamed { + span: Span, + }, + /// `project_replace = `. + #[allow(dead_code)] // false positive that fixed in Rust 1.38 + Named { + span: Span, + ident: Ident, + }, +} + +impl ProjReplace { + /// Return the span of this argument. + pub(super) fn span(&self) -> Option { + match self { + Self::None => None, + Self::Named { span, .. } | Self::Unnamed { span, .. } => Some(*span), + } + } + + pub(super) fn ident(&self) -> Option<&Ident> { + if let Self::Named { ident, .. } = self { + Some(ident) + } else { + None + } + } +} diff --git a/third_party/rust/pin-project-internal/src/pin_project/attribute.rs b/third_party/rust/pin-project-internal/src/pin_project/attribute.rs index eee58a8e7679..c8811cb43dee 100644 --- a/third_party/rust/pin-project-internal/src/pin_project/attribute.rs +++ b/third_party/rust/pin-project-internal/src/pin_project/attribute.rs @@ -17,9 +17,9 @@ use crate::utils::SliceExt; // // At this stage, only attributes are parsed and the following attributes are // added to the attributes of the item. -// * `#[derive(InternalDerive)]` - An internal helper macro that does the above +// - `#[derive(InternalDerive)]` - An internal helper macro that does the above // processing. -// * `#[pin(__private(#args))]` - Pass the argument of `#[pin_project]` to +// - `#[pin(__private(#args))]` - Pass the argument of `#[pin_project]` to // proc-macro-derive (`InternalDerive`). pub(super) fn parse_attribute(args: &TokenStream, input: TokenStream) -> Result { @@ -36,7 +36,7 @@ pub(super) fn parse_attribute(args: &TokenStream, input: TokenStream) -> Result< }) } -#[allow(dead_code)] // https://github.com/rust-lang/rust/issues/56750 +#[allow(dead_code)] // false positive that fixed in Rust 1.39 struct Input { attrs: Vec, body: TokenStream, @@ -51,16 +51,15 @@ impl Parse for Input { if !ahead.peek(Token![struct]) && !ahead.peek(Token![enum]) { // If we check this only on proc-macro-derive, it may generate unhelpful error // messages. So it is preferable to be able to detect it here. - Err(error!( + bail!( input.parse::()?, "#[pin_project] attribute may only be used on structs or enums" - )) + ); } else if let Some(attr) = attrs.find(PIN) { - Err(error!(attr, "#[pin] attribute may only be used on fields of structs or variants")) + bail!(attr, "#[pin] attribute may only be used on fields of structs or variants"); } else if let Some(attr) = attrs.find("pin_project") { - Err(error!(attr, "duplicate #[pin_project] attribute")) - } else { - Ok(Self { attrs, body: input.parse()? }) + bail!(attr, "duplicate #[pin_project] attribute"); } + Ok(Self { attrs, body: input.parse()? }) } } diff --git a/third_party/rust/pin-project-internal/src/pin_project/derive.rs b/third_party/rust/pin-project-internal/src/pin_project/derive.rs index 7357da524d20..3e578f782038 100644 --- a/third_party/rust/pin-project-internal/src/pin_project/derive.rs +++ b/third_party/rust/pin-project-internal/src/pin_project/derive.rs @@ -1,25 +1,26 @@ use proc_macro2::{Delimiter, Group, Span, TokenStream}; use quote::{format_ident, quote, quote_spanned, ToTokens}; use syn::{ - parse::{Parse, ParseStream}, - parse_quote, - spanned::Spanned, - token, - visit_mut::VisitMut, - Attribute, Data, DataEnum, DataStruct, DeriveInput, Error, Field, Fields, FieldsNamed, - FieldsUnnamed, Generics, Ident, Index, Lifetime, LifetimeDef, Meta, MetaList, NestedMeta, - Result, Token, Type, Variant, Visibility, WhereClause, + parse_quote, token, visit_mut::VisitMut, Attribute, Data, DataEnum, DeriveInput, Error, Field, + Fields, FieldsNamed, FieldsUnnamed, Generics, Ident, Index, Lifetime, LifetimeDef, Meta, + MetaList, MetaNameValue, NestedMeta, Result, Token, Type, Variant, Visibility, WhereClause, }; -use super::PIN; +use super::{ + args::{parse_args, Args, ProjReplace, UnpinImpl}, + PIN, +}; use crate::utils::{ - determine_lifetime_name, determine_visibility, insert_lifetime_and_bound, ParseBufferExt, - ProjKind, ReplaceReceiver, SliceExt, Variants, + determine_lifetime_name, determine_visibility, insert_lifetime_and_bound, ReplaceReceiver, + SliceExt, Variants, }; pub(super) fn parse_derive(input: TokenStream) -> Result { let mut input: DeriveInput = syn::parse2(input)?; + let mut cx; + let mut generate = GenerateTokens::default(); + let ident = &input.ident; let ty_generics = input.generics.split_for_impl().1; let self_ty = parse_quote!(#ident #ty_generics); @@ -27,345 +28,219 @@ pub(super) fn parse_derive(input: TokenStream) -> Result { visitor.visit_generics_mut(&mut input.generics); visitor.visit_data_mut(&mut input.data); - let mut cx = Context::new(&input.attrs, &input.vis, &input.ident, &mut input.generics)?; - let packed_check; - - let (mut items, scoped_items) = match &input.data { + match &input.data { Data::Struct(data) => { - // Do this first for a better error message. - packed_check = Some(cx.ensure_not_packed(&data.fields)?); - cx.parse_struct(data)? + cx = Context::new(&input.attrs, &input.vis, ident, &mut input.generics, Struct)?; + parse_struct(&mut cx, &data.fields, &mut generate)?; } Data::Enum(data) => { - // We don't need to check for `#[repr(packed)]`, - // since it does not apply to enums. - packed_check = None; - cx.parse_enum(data)? + cx = Context::new(&input.attrs, &input.vis, ident, &mut input.generics, Enum)?; + parse_enum(&mut cx, data, &mut generate)?; } Data::Union(_) => { - return Err(error!( - input, - "#[pin_project] attribute may only be used on structs or enums" - )); + bail!(input, "#[pin_project] attribute may only be used on structs or enums"); } - }; + } - let unpin_impl = cx.make_unpin_impl(); - let drop_impl = cx.make_drop_impl(); - let dummy_const = if cfg!(underscore_consts) { - format_ident!("_") - } else { - format_ident!("__SCOPE_{}", ident) - }; - items.extend(quote! { - // All items except projected types are generated inside a `const` scope. - // This makes it impossible for user code to refer to these types. - // However, this prevents Rustdoc from displaying docs for any - // of our types. In particular, users cannot see the - // automatically generated `Unpin` impl for the '__UnpinStruct' types - // - // Previously, we provided a flag to correctly document the - // automatically generated `Unpin` impl by using def-site hygiene, - // but it is now removed. - // - // Refs: - // * https://github.com/rust-lang/rust/issues/63281 - // * https://github.com/taiki-e/pin-project/pull/53#issuecomment-525906867 - // * https://github.com/taiki-e/pin-project/pull/70 - #[doc(hidden)] - #[allow(non_upper_case_globals)] - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - #[allow(clippy::used_underscore_binding)] - const #dummy_const: () = { - #scoped_items - #unpin_impl - #drop_impl - #packed_check - }; - }); - Ok(items) + Ok(generate.into_tokens(&cx)) } -fn validate_struct(ident: &Ident, fields: &Fields) -> Result<()> { - if fields.is_empty() { - let msg = "#[pin_project] attribute may not be used on structs with zero fields"; - if let Fields::Unit = fields { Err(error!(ident, msg)) } else { Err(error!(fields, msg)) } - } else { - Ok(()) - } +#[derive(Default)] +struct GenerateTokens { + exposed: TokenStream, + scoped: TokenStream, } -fn validate_enum(brace_token: token::Brace, variants: &Variants) -> Result<()> { - if variants.is_empty() { - return Err(Error::new( - brace_token.span, - "#[pin_project] attribute may not be used on enums without variants", - )); - } - let has_field = variants.iter().try_fold(false, |has_field, v| { - if let Some((_, e)) = &v.discriminant { - Err(error!(e, "#[pin_project] attribute may not be used on enums with discriminants")) - } else if let Some(attr) = v.attrs.find(PIN) { - Err(error!(attr, "#[pin] attribute may only be used on fields of structs or variants")) - } else if v.fields.is_empty() { - Ok(has_field) +impl GenerateTokens { + fn extend(&mut self, expose: bool, tokens: TokenStream) { + if expose { + self.exposed.extend(tokens); } else { - Ok(true) + self.scoped.extend(tokens); } - })?; - if has_field { - Ok(()) - } else { - Err(error!(variants, "#[pin_project] attribute may not be used on enums with zero fields")) + } + + fn into_tokens(self, cx: &Context<'_>) -> TokenStream { + let mut tokens = self.exposed; + let scoped = self.scoped; + + let unpin_impl = make_unpin_impl(cx); + let drop_impl = make_drop_impl(cx); + let allowed_lints = global_allowed_lints(); + + tokens.extend(quote! { + // All items except projected types are generated inside a `const` scope. + // This makes it impossible for user code to refer to these types. + // However, this prevents Rustdoc from displaying docs for any + // of our types. In particular, users cannot see the + // automatically generated `Unpin` impl for the '__UnpinStruct' types + // + // Previously, we provided a flag to correctly document the + // automatically generated `Unpin` impl by using def-site hygiene, + // but it is now removed. + // + // Refs: + // - https://github.com/rust-lang/rust/issues/63281 + // - https://github.com/taiki-e/pin-project/pull/53#issuecomment-525906867 + // - https://github.com/taiki-e/pin-project/pull/70 + #allowed_lints + #[allow(unused_qualifications)] + #[allow(clippy::semicolon_if_nothing_returned)] + #[allow(clippy::use_self)] + #[allow(clippy::used_underscore_binding)] + const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #scoped + #unpin_impl + #drop_impl + }; + }); + tokens } } -struct Args { +/// Returns attributes that should be applied to all generated code. +fn global_allowed_lints() -> TokenStream { + quote! { + #[allow(box_pointers)] // This lint warns use of the `Box` type. + #[allow(deprecated)] + #[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993 + #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 + #[allow(unreachable_pub)] // This lint warns `pub` field in private struct. + // This lint warns of `clippy::*` generated by external macros. + // We allow this lint for compatibility with older compilers. + #[allow(clippy::unknown_clippy_lints)] + #[allow(clippy::pattern_type_mismatch)] + #[allow(clippy::redundant_pub_crate)] // This lint warns `pub(crate)` field in private struct. + #[allow(clippy::type_repetition_in_bounds)] // https://github.com/rust-lang/rust-clippy/issues/4326 + } +} + +/// Returns attributes used on projected types. +fn proj_allowed_lints(cx: &Context<'_>) -> (TokenStream, TokenStream, TokenStream) { + let large_enum_variant = if cx.kind == Enum { + Some(quote! { + #[allow(variant_size_differences)] + #[allow(clippy::large_enum_variant)] + }) + } else { + None + }; + let global_allowed_lints = global_allowed_lints(); + let proj_mut_allowed_lints = if cx.project { Some(&global_allowed_lints) } else { None }; + let proj_mut = quote! { + #proj_mut_allowed_lints + #[allow(dead_code)] // This lint warns unused fields/variants. + #[allow(clippy::mut_mut)] // This lint warns `&mut &mut `. + }; + let proj_ref_allowed_lints = if cx.project_ref { Some(&global_allowed_lints) } else { None }; + let proj_ref = quote! { + #proj_ref_allowed_lints + #[allow(dead_code)] // This lint warns unused fields/variants. + #[allow(clippy::ref_option_ref)] // This lint warns `&Option<&>`. + }; + let proj_own_allowed_lints = + if cx.project_replace.ident().is_some() { Some(&global_allowed_lints) } else { None }; + let proj_own = quote! { + #proj_own_allowed_lints + #[allow(dead_code)] // This lint warns unused fields/variants. + #large_enum_variant + }; + (proj_mut, proj_ref, proj_own) +} + +struct Context<'a> { + /// The original type. + orig: OriginalType<'a>, + /// The projected types. + proj: ProjectedType, + /// Types of the pinned fields. + pinned_fields: Vec<&'a Type>, + /// Kind of the original type: struct or enum + kind: TypeKind, + /// `PinnedDrop` argument. pinned_drop: Option, /// `UnsafeUnpin` or `!Unpin` argument. unpin_impl: UnpinImpl, - /// `project = ` argument. - project: Option, - /// `project_ref = ` argument. - project_ref: Option, - /// `project_replace [= ]` or `Replace` argument. + /// `project` argument. + project: bool, + /// `project_ref` argument. + project_ref: bool, + /// `project_replace [= ]` argument. project_replace: ProjReplace, } -enum ProjReplace { - None, - /// `project_replace` or `Replace`. - Unnamed { - span: Span, - }, - /// `project_replace = `. - Named { - span: Span, - ident: Ident, - }, -} +impl<'a> Context<'a> { + fn new( + attrs: &'a [Attribute], + vis: &'a Visibility, + ident: &'a Ident, + generics: &'a mut Generics, + kind: TypeKind, + ) -> Result { + let Args { pinned_drop, unpin_impl, project, project_ref, project_replace } = + parse_args(attrs)?; -impl ProjReplace { - fn span(&self) -> Option { - match self { - ProjReplace::None => None, - ProjReplace::Named { span, .. } | ProjReplace::Unnamed { span, .. } => Some(*span), + if let Some(name) = [project.as_ref(), project_ref.as_ref(), project_replace.ident()] + .iter() + .filter_map(Option::as_ref) + .find(|name| **name == ident) + { + bail!(name, "name `{}` is the same as the original type name", name); } - } - fn ident(&self) -> Option<&Ident> { - if let ProjReplace::Named { ident, .. } = self { Some(ident) } else { None } + let mut lifetime_name = String::from("'pin"); + determine_lifetime_name(&mut lifetime_name, generics); + let lifetime = Lifetime::new(&lifetime_name, Span::call_site()); + + let ty_generics = generics.split_for_impl().1; + let ty_generics_as_generics = parse_quote!(#ty_generics); + let mut proj_generics = generics.clone(); + let pred = insert_lifetime_and_bound( + &mut proj_generics, + lifetime.clone(), + &ty_generics_as_generics, + ident, + ); + let mut where_clause = generics.make_where_clause().clone(); + where_clause.predicates.push(pred); + + let own_ident = project_replace + .ident() + .cloned() + .unwrap_or_else(|| format_ident!("__{}ProjectionOwned", ident)); + + Ok(Self { + kind, + pinned_drop, + unpin_impl, + project: project.is_some(), + project_ref: project_ref.is_some(), + project_replace, + proj: ProjectedType { + vis: determine_visibility(vis), + mut_ident: project.unwrap_or_else(|| format_ident!("__{}Projection", ident)), + ref_ident: project_ref.unwrap_or_else(|| format_ident!("__{}ProjectionRef", ident)), + own_ident, + lifetime, + generics: proj_generics, + where_clause, + }, + orig: OriginalType { attrs, vis, ident, generics }, + pinned_fields: Vec::new(), + }) } } -const DUPLICATE_PIN: &str = "duplicate #[pin] attribute"; - -impl Args { - fn get(attrs: &[Attribute]) -> Result { - // `(__private())` -> `` - struct Input(Option); - - impl Parse for Input { - fn parse(input: ParseStream<'_>) -> Result { - Ok(Self((|| { - let content = input.parenthesized().ok()?; - let private = content.parse::().ok()?; - if private == "__private" { - content.parenthesized().ok()?.parse::().ok() - } else { - None - } - })())) - } - } - - if let Some(attr) = attrs.find("pin_project") { - return Err(error!(attr, "duplicate #[pin_project] attribute")); - } - - let mut attrs = attrs.iter().filter(|attr| attr.path.is_ident(PIN)); - - let prev = if let Some(attr) = attrs.next() { - (attr, syn::parse2::(attr.tokens.clone()).unwrap().0) - } else { - // This only fails if another macro removes `#[pin]`. - return Err(error!(TokenStream::new(), "#[pin_project] attribute has been removed")); - }; - - if let Some(attr) = attrs.next() { - let (prev_attr, prev_res) = &prev; - // As the `#[pin]` attribute generated by `#[pin_project]` - // has the same span as `#[pin_project]`, it is possible - // that a useless error message will be generated. - let res = syn::parse2::(attr.tokens.clone()).unwrap().0; - let span = match (&prev_res, res) { - (Some(_), _) => attr, - (_, Some(_)) => prev_attr, - (None, None) => prev_attr, - }; - Err(error!(span, DUPLICATE_PIN)) - } else { - // This `unwrap` only fails if another macro removes `#[pin]` and inserts own `#[pin]`. - syn::parse2(prev.1.unwrap()) - } - } +#[derive(Copy, Clone, Eq, PartialEq)] +enum TypeKind { + Enum, + Struct, } -impl Parse for Args { - fn parse(input: ParseStream<'_>) -> Result { - mod kw { - syn::custom_keyword!(Unpin); - } - - // Parses `= ` in ` = ` and returns value and span of name-value pair. - fn parse_value( - input: ParseStream<'_>, - name: &Ident, - has_prev: bool, - ) -> Result<(Ident, TokenStream)> { - if input.is_empty() { - return Err(error!(name, "expected `{0} = `, found `{0}`", name)); - } - let eq_token: Token![=] = input.parse()?; - if input.is_empty() { - let span = quote!(#name #eq_token); - return Err(error!(span, "expected `{0} = `, found `{0} =`", name)); - } - let value: Ident = input.parse()?; - let span = quote!(#name #value); - if has_prev { - Err(error!(span, "duplicate `{}` argument", name)) - } else { - Ok((value, span)) - } - } - - let mut pinned_drop = None; - let mut unsafe_unpin = None; - let mut not_unpin = None; - let mut project = None; - let mut project_ref = None; - - let mut replace = None; - let mut project_replace_value = None; - let mut project_replace_span = None; - - while !input.is_empty() { - if input.peek(Token![!]) { - let bang: Token![!] = input.parse()?; - if input.is_empty() { - return Err(error!(bang, "expected `!Unpin`, found `!`")); - } - let unpin: kw::Unpin = input.parse()?; - let span = quote!(#bang #unpin); - if not_unpin.replace(span.span()).is_some() { - return Err(error!(span, "duplicate `!Unpin` argument")); - } - } else { - let token = input.parse::()?; - match &*token.to_string() { - "PinnedDrop" => { - if pinned_drop.replace(token.span()).is_some() { - return Err(error!(token, "duplicate `PinnedDrop` argument")); - } - } - "UnsafeUnpin" => { - if unsafe_unpin.replace(token.span()).is_some() { - return Err(error!(token, "duplicate `UnsafeUnpin` argument")); - } - } - "project" => { - project = Some(parse_value(input, &token, project.is_some())?.0); - } - "project_ref" => { - project_ref = Some(parse_value(input, &token, project_ref.is_some())?.0); - } - "project_replace" => { - if input.peek(Token![=]) { - let (value, span) = - parse_value(input, &token, project_replace_span.is_some())?; - project_replace_value = Some(value); - project_replace_span = Some(span.span()); - } else if project_replace_span.is_some() { - return Err(error!(token, "duplicate `project_replace` argument")); - } else { - project_replace_span = Some(token.span()); - } - } - "Replace" => { - if replace.replace(token.span()).is_some() { - return Err(error!(token, "duplicate `Replace` argument")); - } - } - _ => return Err(error!(token, "unexpected argument: {}", token)), - } - } - - if input.is_empty() { - break; - } - let _: Token![,] = input.parse()?; - } - - if project.is_some() || project_ref.is_some() { - if project == project_ref { - return Err(error!( - project_ref, - "name `{}` is already specified by `project` argument", - project_ref.as_ref().unwrap() - )); - } - if let Some(ident) = &project_replace_value { - if project == project_replace_value { - return Err(error!( - ident, - "name `{}` is already specified by `project` argument", ident - )); - } else if project_ref == project_replace_value { - return Err(error!( - ident, - "name `{}` is already specified by `project_ref` argument", ident - )); - } - } - } - - if let Some(span) = pinned_drop { - if project_replace_span.is_some() { - return Err(Error::new( - span, - "arguments `PinnedDrop` and `project_replace` are mutually exclusive", - )); - } else if replace.is_some() { - return Err(Error::new( - span, - "arguments `PinnedDrop` and `Replace` are mutually exclusive", - )); - } - } - let project_replace = match (project_replace_span, project_replace_value, replace) { - (None, _, None) => ProjReplace::None, - // If both `project_replace` and `Replace` are specified, - // We always prefer `project_replace`'s span, - (Some(span), Some(ident), _) => ProjReplace::Named { ident, span }, - (Some(span), ..) | (None, _, Some(span)) => ProjReplace::Unnamed { span }, - }; - let unpin_impl = match (unsafe_unpin, not_unpin) { - (None, None) => UnpinImpl::Default, - (Some(span), None) => UnpinImpl::Unsafe(span), - (None, Some(span)) => UnpinImpl::Negative(span), - (Some(span), Some(_)) => { - return Err(Error::new( - span, - "arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive", - )); - } - }; - - Ok(Self { pinned_drop, unpin_impl, project, project_ref, project_replace }) - } -} +use TypeKind::{Enum, Struct}; struct OriginalType<'a> { /// Attributes of the original type. @@ -415,121 +290,218 @@ struct ProjectedFields { proj_own_fields: TokenStream, } -struct Context<'a> { - /// The original type. - orig: OriginalType<'a>, - /// The projected types. - proj: ProjectedType, - /// Types of the pinned fields. - pinned_fields: Vec, - - /// `PinnedDrop` argument. - pinned_drop: Option, - /// `UnsafeUnpin` or `!Unpin` argument. - unpin_impl: UnpinImpl, - /// `project` argument. - project: bool, - /// `project_ref` argument. - project_ref: bool, - /// `project_replace [= ]` or `Replace` argument. - project_replace: ProjReplace, +fn validate_struct(ident: &Ident, fields: &Fields) -> Result<()> { + if fields.is_empty() { + let msg = "#[pin_project] attribute may not be used on structs with zero fields"; + if let Fields::Unit = fields { + bail!(ident, msg) + } + bail!(fields, msg) + } + Ok(()) } -#[derive(Clone, Copy)] -enum UnpinImpl { - Default, - /// `UnsafeUnpin`. - Unsafe(Span), - /// `!Unpin`. - Negative(Span), +fn validate_enum(brace_token: token::Brace, variants: &Variants) -> Result<()> { + if variants.is_empty() { + return Err(Error::new( + brace_token.span, + "#[pin_project] attribute may not be used on enums without variants", + )); + } + let has_field = variants.iter().try_fold(false, |has_field, v| { + if let Some((_, e)) = &v.discriminant { + bail!(e, "#[pin_project] attribute may not be used on enums with discriminants"); + } else if let Some(attr) = v.attrs.find(PIN) { + bail!(attr, "#[pin] attribute may only be used on fields of structs or variants"); + } else if v.fields.is_empty() { + Ok(has_field) + } else { + Ok(true) + } + })?; + if has_field { + Ok(()) + } else { + bail!(variants, "#[pin_project] attribute may not be used on enums with zero fields"); + } } -impl<'a> Context<'a> { - fn new( - attrs: &'a [Attribute], - vis: &'a Visibility, - ident: &'a Ident, - generics: &'a mut Generics, - ) -> Result { - let Args { pinned_drop, unpin_impl, project, project_ref, project_replace } = - Args::get(attrs)?; +fn parse_struct<'a>( + cx: &mut Context<'a>, + fields: &'a Fields, + generate: &mut GenerateTokens, +) -> Result<()> { + // Do this first for a better error message. + let packed_check = ensure_not_packed(&cx.orig, Some(fields))?; - let mut lifetime_name = String::from("'pin"); - determine_lifetime_name(&mut lifetime_name, generics); - let lifetime = Lifetime::new(&lifetime_name, Span::call_site()); + validate_struct(cx.orig.ident, fields)?; - let ty_generics = generics.split_for_impl().1; - let ty_generics_as_generics = parse_quote!(#ty_generics); - let mut proj_generics = generics.clone(); - let pred = insert_lifetime_and_bound( - &mut proj_generics, - lifetime.clone(), - &ty_generics_as_generics, - ident, - ); - let mut where_clause = generics.make_where_clause().clone(); - where_clause.predicates.push(pred); + let ProjectedFields { + proj_pat, + proj_body, + proj_fields, + proj_ref_fields, + proj_own_fields, + proj_own_body, + } = match fields { + Fields::Named(_) => visit_fields(cx, None, fields, Delimiter::Brace)?, + Fields::Unnamed(_) => visit_fields(cx, None, fields, Delimiter::Parenthesis)?, + Fields::Unit => unreachable!(), + }; - let own_ident = - project_replace.ident().cloned().unwrap_or_else(|| ProjKind::Owned.proj_ident(ident)); + let proj_ident = &cx.proj.mut_ident; + let proj_ref_ident = &cx.proj.ref_ident; + let proj_own_ident = &cx.proj.own_ident; + let vis = &cx.proj.vis; + let mut orig_generics = cx.orig.generics.clone(); + let orig_where_clause = orig_generics.where_clause.take(); + let proj_generics = &cx.proj.generics; + let proj_where_clause = &cx.proj.where_clause; - Ok(Self { - pinned_drop, - unpin_impl, - project: project.is_some(), - project_ref: project_ref.is_some(), - project_replace, - proj: ProjectedType { - vis: determine_visibility(vis), - mut_ident: project.unwrap_or_else(|| ProjKind::Mutable.proj_ident(ident)), - ref_ident: project_ref.unwrap_or_else(|| ProjKind::Immutable.proj_ident(ident)), - own_ident, - lifetime, - generics: proj_generics, - where_clause, - }, - orig: OriginalType { attrs, vis, ident, generics }, - pinned_fields: Vec::new(), - }) + // For tuple structs, we need to generate `(T1, T2) where Foo: Bar` + // For non-tuple structs, we need to generate `where Foo: Bar { field1: T }` + let (where_clause_fields, where_clause_ref_fields, where_clause_own_fields) = match fields { + Fields::Named(_) => ( + quote!(#proj_where_clause #proj_fields), + quote!(#proj_where_clause #proj_ref_fields), + quote!(#orig_where_clause #proj_own_fields), + ), + Fields::Unnamed(_) => ( + quote!(#proj_fields #proj_where_clause;), + quote!(#proj_ref_fields #proj_where_clause;), + quote!(#proj_own_fields #orig_where_clause;), + ), + Fields::Unit => unreachable!(), + }; + + let (proj_attrs, proj_ref_attrs, proj_own_attrs) = proj_allowed_lints(cx); + generate.extend(cx.project, quote! { + #proj_attrs + #vis struct #proj_ident #proj_generics #where_clause_fields + }); + generate.extend(cx.project_ref, quote! { + #proj_ref_attrs + #vis struct #proj_ref_ident #proj_generics #where_clause_ref_fields + }); + if cx.project_replace.span().is_some() { + generate.extend(cx.project_replace.ident().is_some(), quote! { + #proj_own_attrs + #vis struct #proj_own_ident #orig_generics #where_clause_own_fields + }); } - /// Returns attributes used on projected types. - fn proj_attrs(&self) -> (TokenStream, TokenStream, TokenStream) { - // If the user gave it a name, it should appear in the document. - let doc_attr = quote!(#[doc(hidden)]); - let doc_proj = if self.project { None } else { Some(&doc_attr) }; - let doc_proj_ref = if self.project_ref { None } else { Some(&doc_attr) }; - let doc_proj_own = - if self.project_replace.ident().is_some() { None } else { Some(&doc_attr) }; + let proj_mut_body = quote! { + let Self #proj_pat = self.get_unchecked_mut(); + #proj_ident #proj_body + }; + let proj_ref_body = quote! { + let Self #proj_pat = self.get_ref(); + #proj_ref_ident #proj_body + }; + let proj_own_body = quote! { + let Self #proj_pat = &mut *__self_ptr; + #proj_own_body + }; + generate.extend(false, make_proj_impl(cx, &proj_mut_body, &proj_ref_body, &proj_own_body)); - let proj_mut = quote! { - #doc_proj - #[allow(dead_code)] // This lint warns unused fields/variants. - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - #[allow(clippy::mut_mut)] // This lint warns `&mut &mut `. - #[allow(clippy::type_repetition_in_bounds)] // https://github.com/rust-lang/rust-clippy/issues/4326} - }; - let proj_ref = quote! { - #doc_proj_ref - #[allow(dead_code)] // This lint warns unused fields/variants. - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - #[allow(clippy::type_repetition_in_bounds)] // https://github.com/rust-lang/rust-clippy/issues/4326 - }; - let proj_own = quote! { - #doc_proj_own - #[allow(dead_code)] // This lint warns unused fields/variants. - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - #[allow(unreachable_pub)] // This lint warns `pub` field in private struct. - }; - (proj_mut, proj_ref, proj_own) + generate.extend(false, packed_check); + Ok(()) +} + +fn parse_enum<'a>( + cx: &mut Context<'a>, + DataEnum { brace_token, variants, .. }: &'a DataEnum, + generate: &mut GenerateTokens, +) -> Result<()> { + if let ProjReplace::Unnamed { span } = &cx.project_replace { + return Err(Error::new( + *span, + "`project_replace` argument requires a value when used on enums", + )); } - fn parse_struct( - &mut self, - DataStruct { fields, .. }: &DataStruct, - ) -> Result<(TokenStream, TokenStream)> { - validate_struct(self.orig.ident, fields)?; + // #[repr(packed)] cannot be apply on enums and will be rejected by rustc. + // However, we should not rely on the behavior of rustc that rejects this. + // https://github.com/taiki-e/pin-project/pull/324#discussion_r612388001 + // + // Do this first for a better error message. + ensure_not_packed(&cx.orig, None)?; + validate_enum(*brace_token, variants)?; + + let ProjectedVariants { + proj_variants, + proj_ref_variants, + proj_own_variants, + proj_arms, + proj_ref_arms, + proj_own_arms, + } = visit_variants(cx, variants)?; + + let proj_ident = &cx.proj.mut_ident; + let proj_ref_ident = &cx.proj.ref_ident; + let proj_own_ident = &cx.proj.own_ident; + let vis = &cx.proj.vis; + let mut orig_generics = cx.orig.generics.clone(); + let orig_where_clause = orig_generics.where_clause.take(); + let proj_generics = &cx.proj.generics; + let proj_where_clause = &cx.proj.where_clause; + + let (proj_attrs, proj_ref_attrs, proj_own_attrs) = proj_allowed_lints(cx); + if cx.project { + generate.extend(true, quote! { + #proj_attrs + #vis enum #proj_ident #proj_generics #proj_where_clause { + #proj_variants + } + }); + } + if cx.project_ref { + generate.extend(true, quote! { + #proj_ref_attrs + #vis enum #proj_ref_ident #proj_generics #proj_where_clause { + #proj_ref_variants + } + }); + } + if cx.project_replace.ident().is_some() { + generate.extend(true, quote! { + #proj_own_attrs + #vis enum #proj_own_ident #orig_generics #orig_where_clause { + #proj_own_variants + } + }); + } + + let proj_mut_body = quote! { + match self.get_unchecked_mut() { + #proj_arms + } + }; + let proj_ref_body = quote! { + match self.get_ref() { + #proj_ref_arms + } + }; + let proj_own_body = quote! { + match &mut *__self_ptr { + #proj_own_arms + } + }; + generate.extend(false, make_proj_impl(cx, &proj_mut_body, &proj_ref_body, &proj_own_body)); + + Ok(()) +} + +fn visit_variants<'a>(cx: &mut Context<'a>, variants: &'a Variants) -> Result { + let mut proj_variants = TokenStream::new(); + let mut proj_ref_variants = TokenStream::new(); + let mut proj_own_variants = TokenStream::new(); + let mut proj_arms = TokenStream::new(); + let mut proj_ref_arms = TokenStream::new(); + let mut proj_own_arms = TokenStream::new(); + + for Variant { ident, fields, .. } in variants { let ProjectedFields { proj_pat, proj_body, @@ -538,710 +510,615 @@ impl<'a> Context<'a> { proj_own_fields, proj_own_body, } = match fields { - Fields::Named(_) => self.visit_fields(None, fields, Delimiter::Brace)?, - Fields::Unnamed(_) => self.visit_fields(None, fields, Delimiter::Parenthesis)?, - Fields::Unit => unreachable!(), + Fields::Named(_) => visit_fields(cx, Some(ident), fields, Delimiter::Brace)?, + Fields::Unnamed(_) => visit_fields(cx, Some(ident), fields, Delimiter::Parenthesis)?, + Fields::Unit => ProjectedFields { + proj_own_body: proj_own_body(cx, Some(ident), None, &[]), + ..ProjectedFields::default() + }, }; - let proj_ident = &self.proj.mut_ident; - let proj_ref_ident = &self.proj.ref_ident; - let proj_own_ident = &self.proj.own_ident; - let vis = &self.proj.vis; - let mut orig_generics = self.orig.generics.clone(); - let orig_where_clause = orig_generics.where_clause.take(); - let proj_generics = &self.proj.generics; - let proj_where_clause = &self.proj.where_clause; - - // For tuple structs, we need to generate `(T1, T2) where Foo: Bar` - // For non-tuple structs, we need to generate `where Foo: Bar { field1: T }` - let (where_clause_fields, where_clause_ref_fields, where_clause_own_fields) = match fields { - Fields::Named(_) => ( - quote!(#proj_where_clause #proj_fields), - quote!(#proj_where_clause #proj_ref_fields), - quote!(#orig_where_clause #proj_own_fields), - ), - Fields::Unnamed(_) => ( - quote!(#proj_fields #proj_where_clause;), - quote!(#proj_ref_fields #proj_where_clause;), - quote!(#proj_own_fields #orig_where_clause;), - ), - Fields::Unit => unreachable!(), - }; - - let (proj_attrs, proj_ref_attrs, proj_own_attrs) = self.proj_attrs(); - let mut proj_items = quote! { - #proj_attrs - #vis struct #proj_ident #proj_generics #where_clause_fields - #proj_ref_attrs - #vis struct #proj_ref_ident #proj_generics #where_clause_ref_fields - }; - if self.project_replace.span().is_some() { - proj_items.extend(quote! { - #proj_own_attrs - #vis struct #proj_own_ident #orig_generics #where_clause_own_fields - }); - } - - let proj_mut_body = quote! { - let Self #proj_pat = self.get_unchecked_mut(); - #proj_ident #proj_body - }; - let proj_ref_body = quote! { - let Self #proj_pat = self.get_ref(); - #proj_ref_ident #proj_body - }; - let proj_own_body = quote! { - let __self_ptr: *mut Self = self.get_unchecked_mut(); - let Self #proj_pat = &mut *__self_ptr; - #proj_own_body - }; - let proj_impl = self.make_proj_impl(&proj_mut_body, &proj_ref_body, &proj_own_body); - - Ok((proj_items, proj_impl)) + let proj_ident = &cx.proj.mut_ident; + let proj_ref_ident = &cx.proj.ref_ident; + proj_variants.extend(quote! { + #ident #proj_fields, + }); + proj_ref_variants.extend(quote! { + #ident #proj_ref_fields, + }); + proj_own_variants.extend(quote! { + #ident #proj_own_fields, + }); + proj_arms.extend(quote! { + Self::#ident #proj_pat => #proj_ident::#ident #proj_body, + }); + proj_ref_arms.extend(quote! { + Self::#ident #proj_pat => #proj_ref_ident::#ident #proj_body, + }); + proj_own_arms.extend(quote! { + Self::#ident #proj_pat => { #proj_own_body } + }); } - fn parse_enum( - &mut self, - DataEnum { brace_token, variants, .. }: &DataEnum, - ) -> Result<(TokenStream, TokenStream)> { - validate_enum(*brace_token, variants)?; + Ok(ProjectedVariants { + proj_variants, + proj_ref_variants, + proj_own_variants, + proj_arms, + proj_ref_arms, + proj_own_arms, + }) +} - let ProjectedVariants { - proj_variants, - proj_ref_variants, - proj_own_variants, - proj_arms, - proj_ref_arms, - proj_own_arms, - } = self.visit_variants(variants)?; - - let proj_ident = &self.proj.mut_ident; - let proj_ref_ident = &self.proj.ref_ident; - let proj_own_ident = &self.proj.own_ident; - let vis = &self.proj.vis; - let mut orig_generics = self.orig.generics.clone(); - let orig_where_clause = orig_generics.where_clause.take(); - let proj_generics = &self.proj.generics; - let proj_where_clause = &self.proj.where_clause; - - let (proj_attrs, proj_ref_attrs, proj_own_attrs) = self.proj_attrs(); - let mut proj_items = quote! { - #proj_attrs - #vis enum #proj_ident #proj_generics #proj_where_clause { - #proj_variants - } - #proj_ref_attrs - #vis enum #proj_ref_ident #proj_generics #proj_where_clause { - #proj_ref_variants - } - }; - if self.project_replace.span().is_some() { - proj_items.extend(quote! { - #proj_own_attrs - #vis enum #proj_own_ident #orig_generics #orig_where_clause { - #proj_own_variants - } - }); - } - - let proj_mut_body = quote! { - match self.get_unchecked_mut() { - #proj_arms - } - }; - let proj_ref_body = quote! { - match self.get_ref() { - #proj_ref_arms - } - }; - let proj_own_body = quote! { - let __self_ptr: *mut Self = self.get_unchecked_mut(); - match &mut *__self_ptr { - #proj_own_arms - } - }; - let proj_impl = self.make_proj_impl(&proj_mut_body, &proj_ref_body, &proj_own_body); - - Ok((proj_items, proj_impl)) +fn visit_fields<'a>( + cx: &mut Context<'a>, + variant_ident: Option<&Ident>, + fields: &'a Fields, + delim: Delimiter, +) -> Result { + fn surround(delim: Delimiter, tokens: TokenStream) -> TokenStream { + Group::new(delim, tokens).into_token_stream() } - fn visit_variants(&mut self, variants: &Variants) -> Result { - let mut proj_variants = TokenStream::new(); - let mut proj_ref_variants = TokenStream::new(); - let mut proj_own_variants = TokenStream::new(); - let mut proj_arms = TokenStream::new(); - let mut proj_ref_arms = TokenStream::new(); - let mut proj_own_arms = TokenStream::new(); + let mut proj_pat = TokenStream::new(); + let mut proj_body = TokenStream::new(); + let mut proj_fields = TokenStream::new(); + let mut proj_ref_fields = TokenStream::new(); + let mut proj_own_fields = TokenStream::new(); + let mut proj_move = TokenStream::new(); + let mut pinned_bindings = Vec::with_capacity(fields.len()); - for Variant { ident, fields, .. } in variants { - let ProjectedFields { - proj_pat, - proj_body, - proj_fields, - proj_ref_fields, - proj_own_fields, - proj_own_body, - } = match fields { - Fields::Named(_) => self.visit_fields(Some(ident), fields, Delimiter::Brace)?, - Fields::Unnamed(_) => { - self.visit_fields(Some(ident), fields, Delimiter::Parenthesis)? - } - Fields::Unit => ProjectedFields { - proj_own_body: self.proj_own_body(Some(ident), None, &[]), - ..ProjectedFields::default() - }, - }; - - let orig_ident = self.orig.ident; - let proj_ident = &self.proj.mut_ident; - let proj_ref_ident = &self.proj.ref_ident; - proj_variants.extend(quote! { - #ident #proj_fields, + for (i, Field { attrs, vis, ident, colon_token, ty }) in fields.iter().enumerate() { + let binding = ident.clone().unwrap_or_else(|| format_ident!("_{}", i)); + proj_pat.extend(quote!(#binding,)); + let lifetime = &cx.proj.lifetime; + if attrs.position_exact(PIN)?.is_some() { + proj_fields.extend(quote! { + #vis #ident #colon_token ::pin_project::__private::Pin<&#lifetime mut (#ty)>, }); - proj_ref_variants.extend(quote! { - #ident #proj_ref_fields, + proj_ref_fields.extend(quote! { + #vis #ident #colon_token ::pin_project::__private::Pin<&#lifetime (#ty)>, }); - proj_own_variants.extend(quote! { - #ident #proj_own_fields, + proj_own_fields.extend(quote! { + #vis #ident #colon_token ::pin_project::__private::PhantomData<#ty>, }); - proj_arms.extend(quote! { - #orig_ident::#ident #proj_pat => { - #proj_ident::#ident #proj_body - } + proj_body.extend(quote! { + #ident #colon_token _pin_project::__private::Pin::new_unchecked(#binding), }); - proj_ref_arms.extend(quote! { - #orig_ident::#ident #proj_pat => { - #proj_ref_ident::#ident #proj_body - } + proj_move.extend(quote! { + #ident #colon_token _pin_project::__private::PhantomData, }); - proj_own_arms.extend(quote! { - #orig_ident::#ident #proj_pat => { - #proj_own_body - } + + cx.pinned_fields.push(ty); + pinned_bindings.push(binding); + } else { + proj_fields.extend(quote! { + #vis #ident #colon_token &#lifetime mut (#ty), + }); + proj_ref_fields.extend(quote! { + #vis #ident #colon_token &#lifetime (#ty), + }); + proj_own_fields.extend(quote! { + #vis #ident #colon_token #ty, + }); + proj_body.extend(quote! { + #binding, + }); + proj_move.extend(quote! { + #ident #colon_token _pin_project::__private::ptr::read(#binding), }); - } - - Ok(ProjectedVariants { - proj_variants, - proj_ref_variants, - proj_own_variants, - proj_arms, - proj_ref_arms, - proj_own_arms, - }) - } - - fn visit_fields( - &mut self, - variant_ident: Option<&Ident>, - fields: &Fields, - delim: Delimiter, - ) -> Result { - let mut proj_pat = TokenStream::new(); - let mut proj_body = TokenStream::new(); - let mut proj_fields = TokenStream::new(); - let mut proj_ref_fields = TokenStream::new(); - let mut proj_own_fields = TokenStream::new(); - let mut proj_move = TokenStream::new(); - let mut pinned_bindings = Vec::with_capacity(fields.len()); - - for (i, Field { attrs, vis, ident, colon_token, ty, .. }) in fields.iter().enumerate() { - let binding = ident.clone().unwrap_or_else(|| format_ident!("_{}", i)); - proj_pat.extend(quote!(#binding,)); - if attrs.position_exact(PIN)?.is_some() { - let lifetime = &self.proj.lifetime; - proj_fields.extend(quote! { - #vis #ident #colon_token ::pin_project::__private::Pin<&#lifetime mut (#ty)>, - }); - proj_ref_fields.extend(quote! { - #vis #ident #colon_token ::pin_project::__private::Pin<&#lifetime (#ty)>, - }); - proj_own_fields.extend(quote! { - #vis #ident #colon_token ::pin_project::__private::PhantomData<#ty>, - }); - proj_body.extend(quote! { - #ident #colon_token ::pin_project::__private::Pin::new_unchecked(#binding), - }); - proj_move.extend(quote! { - #ident #colon_token ::pin_project::__private::PhantomData, - }); - - self.pinned_fields.push(ty.clone()); - pinned_bindings.push(binding); - } else { - let lifetime = &self.proj.lifetime; - proj_fields.extend(quote! { - #vis #ident #colon_token &#lifetime mut (#ty), - }); - proj_ref_fields.extend(quote! { - #vis #ident #colon_token &#lifetime (#ty), - }); - proj_own_fields.extend(quote! { - #vis #ident #colon_token #ty, - }); - proj_body.extend(quote! { - #binding, - }); - proj_move.extend(quote! { - #ident #colon_token ::pin_project::__private::ptr::read(#binding), - }); - } - } - - fn surround(delim: Delimiter, tokens: TokenStream) -> TokenStream { - Group::new(delim, tokens).into_token_stream() - } - - let proj_pat = surround(delim, proj_pat); - let proj_body = surround(delim, proj_body); - let proj_fields = surround(delim, proj_fields); - let proj_ref_fields = surround(delim, proj_ref_fields); - let proj_own_fields = surround(delim, proj_own_fields); - - let proj_move = Group::new(delim, proj_move); - let proj_own_body = self.proj_own_body(variant_ident, Some(proj_move), &pinned_bindings); - - Ok(ProjectedFields { - proj_pat, - proj_body, - proj_own_body, - proj_fields, - proj_ref_fields, - proj_own_fields, - }) - } - - /// Generates the processing that `project_replace` does for the struct or each variant. - /// - /// Note: `pinned_fields` must be in declaration order. - fn proj_own_body( - &self, - variant_ident: Option<&'a Ident>, - proj_move: Option, - pinned_fields: &[Ident], - ) -> TokenStream { - let ident = &self.proj.own_ident; - let proj_own = match variant_ident { - Some(variant_ident) => quote!(#ident::#variant_ident), - None => quote!(#ident), - }; - // The fields of the struct and the active enum variant are dropped - // in declaration order. - // Refs: https://doc.rust-lang.org/reference/destructors.html - let pinned_fields = pinned_fields.iter().rev(); - - quote! { - // First, extract all the unpinned fields. - let __result = #proj_own #proj_move; - - // Destructors will run in reverse order, so next create a guard to overwrite - // `self` with the replacement value without calling destructors. - let __guard = ::pin_project::__private::UnsafeOverwriteGuard { - target: __self_ptr, - value: ::pin_project::__private::ManuallyDrop::new(__replacement), - }; - - // Now create guards to drop all the pinned fields. - // - // Due to a compiler bug (https://github.com/rust-lang/rust/issues/47949) - // this must be in its own scope, or else `__result` will not be dropped - // if any of the destructors panic. - { - #( - let __guard = ::pin_project::__private::UnsafeDropInPlaceGuard(#pinned_fields); - )* - } - - // Finally, return the result. - __result } } - /// Creates `Unpin` implementation for original type. - fn make_unpin_impl(&self) -> TokenStream { - match self.unpin_impl { - UnpinImpl::Unsafe(span) => { - let mut proj_generics = self.proj.generics.clone(); - let orig_ident = self.orig.ident; - let lifetime = &self.proj.lifetime; + let proj_pat = surround(delim, proj_pat); + let proj_body = surround(delim, proj_body); + let proj_fields = surround(delim, proj_fields); + let proj_ref_fields = surround(delim, proj_ref_fields); + let proj_own_fields = surround(delim, proj_own_fields); - // Make the error message highlight `UnsafeUnpin` argument. - proj_generics.make_where_clause().predicates.push(parse_quote_spanned! { span => - ::pin_project::__private::Wrapper<#lifetime, Self>: ::pin_project::UnsafeUnpin - }); + let proj_move = Group::new(delim, proj_move); + let proj_own_body = proj_own_body(cx, variant_ident, Some(&proj_move), &pinned_bindings); - let (impl_generics, _, where_clause) = proj_generics.split_for_impl(); - let ty_generics = self.orig.generics.split_for_impl().1; + Ok(ProjectedFields { + proj_pat, + proj_body, + proj_own_body, + proj_fields, + proj_ref_fields, + proj_own_fields, + }) +} - quote_spanned! { span => - impl #impl_generics ::pin_project::__private::Unpin for #orig_ident #ty_generics - #where_clause - { - } - } - } - UnpinImpl::Negative(span) => { - let mut proj_generics = self.proj.generics.clone(); - let orig_ident = self.orig.ident; - let lifetime = &self.proj.lifetime; +/// Generates the processing that `project_replace` does for the struct or each variant. +/// +/// Note: `pinned_fields` must be in declaration order. +fn proj_own_body( + cx: &Context<'_>, + variant_ident: Option<&Ident>, + proj_move: Option<&Group>, + pinned_fields: &[Ident], +) -> TokenStream { + let ident = &cx.proj.own_ident; + let proj_own = match variant_ident { + Some(variant_ident) => quote!(#ident::#variant_ident), + None => quote!(#ident), + }; - proj_generics.make_where_clause().predicates.push(parse_quote! { - ::pin_project::__private::Wrapper< - #lifetime, ::pin_project::__private::PhantomPinned - >: ::pin_project::__private::Unpin - }); + // The fields of the struct and the active enum variant are dropped + // in declaration order. + // Refs: https://doc.rust-lang.org/reference/destructors.html + let pinned_fields = pinned_fields.iter().rev(); - let (proj_impl_generics, _, proj_where_clause) = proj_generics.split_for_impl(); - let (impl_generics, ty_generics, orig_where_clause) = - self.orig.generics.split_for_impl(); + quote! { + // First, extract all the unpinned fields. + let __result = #proj_own #proj_move; - // For interoperability with `forbid(unsafe_code)`, `unsafe` token should be - // call-site span. - let unsafety = ::default(); - quote_spanned! { span => - impl #proj_impl_generics ::pin_project::__private::Unpin - for #orig_ident #ty_generics - #proj_where_clause - { - } + // Now create guards to drop all the pinned fields. + // + // Due to a compiler bug (https://github.com/rust-lang/rust/issues/47949) + // this must be in its own scope, or else `__result` will not be dropped + // if any of the destructors panic. + { + #( + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(#pinned_fields); + )* + } - // A dummy impl of `UnsafeUnpin`, to ensure that the user cannot implement it. - // - // To ensure that users don't accidentally write a non-functional `UnsafeUnpin` - // impls, we emit one ourselves. If the user ends up writing an `UnsafeUnpin` - // impl, they'll get a "conflicting implementations of trait" error when - // coherence checks are run. - #unsafety impl #impl_generics ::pin_project::UnsafeUnpin - for #orig_ident #ty_generics - #orig_where_clause - { - } - } - } - UnpinImpl::Default => { - let mut full_where_clause = self.orig.generics.where_clause.clone().unwrap(); + // Finally, return the result. + __result + } +} - // Generate a field in our new struct for every - // pinned field in the original type. - let fields = self.pinned_fields.iter().enumerate().map(|(i, ty)| { - let field_ident = format_ident!("__field{}", i); - quote!(#field_ident: #ty) - }); +/// Creates `Unpin` implementation for the original type. +/// +/// The kind of `Unpin` impl generated depends on `unpin_impl` field: +/// - `UnpinImpl::Unsafe` - Implements `Unpin` via `UnsafeUnpin` impl. +/// - `UnpinImpl::Negative` - Generates `Unpin` impl with bounds that will never be true. +/// - `UnpinImpl::Default` - Generates `Unpin` impl that requires `Unpin` for all pinned fields. +fn make_unpin_impl(cx: &Context<'_>) -> TokenStream { + match cx.unpin_impl { + UnpinImpl::Unsafe(span) => { + let mut proj_generics = cx.proj.generics.clone(); + let orig_ident = cx.orig.ident; + let lifetime = &cx.proj.lifetime; - // We could try to determine the subset of type parameters - // and lifetimes that are actually used by the pinned fields - // (as opposed to those only used by unpinned fields). - // However, this would be tricky and error-prone, since - // it's possible for users to create types that would alias - // with generic parameters (e.g. 'struct T'). - // - // Instead, we generate a use of every single type parameter - // and lifetime used in the original struct. For type parameters, - // we generate code like this: - // - // ```rust - // struct AlwaysUnpin(PhantomData) {} - // impl Unpin for AlwaysUnpin {} - // - // ... - // _field: AlwaysUnpin<(A, B, C)> - // ``` - // - // This ensures that any unused type parameters - // don't end up with `Unpin` bounds. - let lifetime_fields = self.orig.generics.lifetimes().enumerate().map( - |(i, LifetimeDef { lifetime, .. })| { - let field_ident = format_ident!("__lifetime{}", i); - quote!(#field_ident: &#lifetime ()) - }, - ); + // Make the error message highlight `UnsafeUnpin` argument. + proj_generics.make_where_clause().predicates.push(parse_quote_spanned! { span => + _pin_project::__private::Wrapper<#lifetime, Self>: _pin_project::UnsafeUnpin + }); - let orig_ident = self.orig.ident; - let struct_ident = format_ident!("__{}", orig_ident); - let vis = self.orig.vis; - let lifetime = &self.proj.lifetime; - let type_params = self.orig.generics.type_params().map(|t| &t.ident); - let proj_generics = &self.proj.generics; - let (proj_impl_generics, proj_ty_generics, _) = proj_generics.split_for_impl(); - let (impl_generics, ty_generics, where_clause) = - self.orig.generics.split_for_impl(); + let (impl_generics, _, where_clause) = proj_generics.split_for_impl(); + let ty_generics = cx.orig.generics.split_for_impl().1; - full_where_clause.predicates.push(parse_quote! { - #struct_ident #proj_ty_generics: ::pin_project::__private::Unpin - }); - - quote! { - // This needs to have the same visibility as the original type, - // due to the limitations of the 'public in private' error. - // - // Our goal is to implement the public trait `Unpin` for - // a potentially public user type. Because of this, rust - // requires that any types mentioned in the where clause of - // our `Unpin` impl also be public. This means that our generated - // `__UnpinStruct` type must also be public. - // However, we ensure that the user can never actually reference - // this 'public' type by creating this type in the inside of `const`. - #vis struct #struct_ident #proj_generics #where_clause { - __pin_project_use_generics: ::pin_project::__private::AlwaysUnpin< - #lifetime, (#(::pin_project::__private::PhantomData<#type_params>),*) - >, - - #(#fields,)* - #(#lifetime_fields,)* - } - - impl #proj_impl_generics ::pin_project::__private::Unpin - for #orig_ident #ty_generics - #full_where_clause - { - } - - // A dummy impl of `UnsafeUnpin`, to ensure that the user cannot implement it. - // - // To ensure that users don't accidentally write a non-functional `UnsafeUnpin` - // impls, we emit one ourselves. If the user ends up writing an `UnsafeUnpin` - // impl, they'll get a "conflicting implementations of trait" error when - // coherence checks are run. - unsafe impl #impl_generics ::pin_project::UnsafeUnpin - for #orig_ident #ty_generics - #where_clause - { - } + quote_spanned! { span => + impl #impl_generics _pin_project::__private::Unpin for #orig_ident #ty_generics + #where_clause + { } } } - } + UnpinImpl::Negative(span) => { + let mut proj_generics = cx.proj.generics.clone(); + let orig_ident = cx.orig.ident; + let lifetime = &cx.proj.lifetime; - /// Creates `Drop` implementation for original type. - fn make_drop_impl(&self) -> TokenStream { - let ident = self.orig.ident; - let (impl_generics, ty_generics, where_clause) = self.orig.generics.split_for_impl(); + proj_generics.make_where_clause().predicates.push(parse_quote! { + _pin_project::__private::Wrapper< + #lifetime, _pin_project::__private::PhantomPinned + >: _pin_project::__private::Unpin + }); + + let (proj_impl_generics, _, proj_where_clause) = proj_generics.split_for_impl(); + let ty_generics = cx.orig.generics.split_for_impl().1; - if let Some(span) = self.pinned_drop { // For interoperability with `forbid(unsafe_code)`, `unsafe` token should be // call-site span. let unsafety = ::default(); quote_spanned! { span => - impl #impl_generics ::pin_project::__private::Drop for #ident #ty_generics - #where_clause + impl #proj_impl_generics _pin_project::__private::Unpin + for #orig_ident #ty_generics + #proj_where_clause { - fn drop(&mut self) { + } + + // Generate a dummy impl of `UnsafeUnpin`, to ensure that the user cannot implement it. + // + // To ensure that users don't accidentally write a non-functional `UnsafeUnpin` + // impls, we emit one ourselves. If the user ends up writing an `UnsafeUnpin` + // impl, they'll get a "conflicting implementations of trait" error when + // coherence checks are run. + #[doc(hidden)] + #unsafety impl #proj_impl_generics _pin_project::UnsafeUnpin + for #orig_ident #ty_generics + #proj_where_clause + { + } + } + } + UnpinImpl::Default => { + let mut full_where_clause = cx.orig.generics.where_clause.clone().unwrap(); + + // Generate a field in our new struct for every + // pinned field in the original type. + let fields = cx.pinned_fields.iter().enumerate().map(|(i, ty)| { + let field_ident = format_ident!("__field{}", i); + quote!(#field_ident: #ty) + }); + + // We could try to determine the subset of type parameters + // and lifetimes that are actually used by the pinned fields + // (as opposed to those only used by unpinned fields). + // However, this would be tricky and error-prone, since + // it's possible for users to create types that would alias + // with generic parameters (e.g. 'struct T'). + // + // Instead, we generate a use of every single type parameter + // and lifetime used in the original struct. For type parameters, + // we generate code like this: + // + // ```rust + // struct AlwaysUnpin(PhantomData) {} + // impl Unpin for AlwaysUnpin {} + // + // ... + // _field: AlwaysUnpin<(A, B, C)> + // ``` + // + // This ensures that any unused type parameters + // don't end up with `Unpin` bounds. + let lifetime_fields = cx.orig.generics.lifetimes().enumerate().map( + |(i, LifetimeDef { lifetime, .. })| { + let field_ident = format_ident!("__lifetime{}", i); + quote!(#field_ident: &#lifetime ()) + }, + ); + + let orig_ident = cx.orig.ident; + let struct_ident = format_ident!("__{}", orig_ident); + let vis = cx.orig.vis; + let lifetime = &cx.proj.lifetime; + let type_params = cx.orig.generics.type_params().map(|t| &t.ident); + let proj_generics = &cx.proj.generics; + let (proj_impl_generics, proj_ty_generics, _) = proj_generics.split_for_impl(); + let (_, ty_generics, where_clause) = cx.orig.generics.split_for_impl(); + + full_where_clause.predicates.push(parse_quote! { + #struct_ident #proj_ty_generics: _pin_project::__private::Unpin + }); + + quote! { + // This needs to have the same visibility as the original type, + // due to the limitations of the 'public in private' error. + // + // Our goal is to implement the public trait `Unpin` for + // a potentially public user type. Because of this, rust + // requires that any types mentioned in the where clause of + // our `Unpin` impl also be public. This means that our generated + // `__UnpinStruct` type must also be public. + // However, we ensure that the user can never actually reference + // this 'public' type by creating this type in the inside of `const`. + #[allow(missing_debug_implementations)] + #vis struct #struct_ident #proj_generics #where_clause { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + #lifetime, (#(_pin_project::__private::PhantomData<#type_params>),*) + >, + + #(#fields,)* + #(#lifetime_fields,)* + } + + impl #proj_impl_generics _pin_project::__private::Unpin + for #orig_ident #ty_generics + #full_where_clause + { + } + + // Generate a dummy impl of `UnsafeUnpin`, to ensure that the user cannot implement it. + // + // To ensure that users don't accidentally write a non-functional `UnsafeUnpin` + // impls, we emit one ourselves. If the user ends up writing an `UnsafeUnpin` + // impl, they'll get a "conflicting implementations of trait" error when + // coherence checks are run. + #[doc(hidden)] + unsafe impl #proj_impl_generics _pin_project::UnsafeUnpin + for #orig_ident #ty_generics + #full_where_clause + { + } + } + } + } +} + +/// Creates `Drop` implementation for the original type. +/// +/// The kind of `Drop` impl generated depends on `pinned_drop` field: +/// - `Some` - implements `Drop` via `PinnedDrop` impl. +/// - `None` - generates code that ensures that `Drop` trait is not implemented, +/// instead of generating `Drop` impl. +fn make_drop_impl(cx: &Context<'_>) -> TokenStream { + let ident = cx.orig.ident; + let (impl_generics, ty_generics, where_clause) = cx.orig.generics.split_for_impl(); + + if let Some(span) = cx.pinned_drop { + // For interoperability with `forbid(unsafe_code)`, `unsafe` token should be + // call-site span. + let unsafety = ::default(); + quote_spanned! { span => + impl #impl_generics _pin_project::__private::Drop for #ident #ty_generics + #where_clause + { + fn drop(&mut self) { + #unsafety { // Safety - we're in 'drop', so we know that 'self' will // never move again. - let pinned_self = #unsafety { - ::pin_project::__private::Pin::new_unchecked(self) - }; + let __pinned_self = _pin_project::__private::Pin::new_unchecked(self); // We call `pinned_drop` only once. Since `PinnedDrop::drop` // is an unsafe method and a private API, it is never called again in safe // code *unless the user uses a maliciously crafted macro*. - #unsafety { - ::pin_project::__private::PinnedDrop::drop(pinned_self); - } + _pin_project::__private::PinnedDrop::drop(__pinned_self); } } } - } else { - // If the user does not provide a `PinnedDrop` impl, - // we need to ensure that they don't provide a `Drop` impl of their - // own. - // Based on https://github.com/upsuper/assert-impl/blob/f503255b292ab0ba8d085b657f4065403cfa46eb/src/lib.rs#L80-L87 - // - // We create a new identifier for each struct, so that the traits - // for different types do not conflict with each other. - // - // Another approach would be to provide an empty Drop impl, - // which would conflict with a user-provided Drop impl. - // However, this would trigger the compiler's special handling - // of Drop types (e.g. fields cannot be moved out of a Drop type). - // This approach prevents the creation of needless Drop impls, - // giving users more flexibility. - let trait_ident = format_ident!("{}MustNotImplDrop", ident); - - quote! { - // There are two possible cases: - // 1. The user type does not implement Drop. In this case, - // the first blanked impl will not apply to it. This code - // will compile, as there is only one impl of MustNotImplDrop for the user type - // 2. The user type does impl Drop. This will make the blanket impl applicable, - // which will then conflict with the explicit MustNotImplDrop impl below. - // This will result in a compilation error, which is exactly what we want. - trait #trait_ident {} - #[allow(clippy::drop_bounds, drop_bounds)] - impl #trait_ident for T {} - impl #impl_generics #trait_ident for #ident #ty_generics #where_clause {} - - // A dummy impl of `PinnedDrop`, to ensure that the user cannot implement it. - // Since the user did not pass `PinnedDrop` to `#[pin_project]`, any `PinnedDrop` - // impl will not actually be called. Unfortunately, we can't detect this situation - // directly from either the `#[pin_project]` or `#[pinned_drop]` attributes, since - // we don't know what other attirbutes/impl may exist. - // - // To ensure that users don't accidentally write a non-functional `PinnedDrop` - // impls, we emit one ourselves. If the user ends up writing a `PinnedDrop` impl, - // they'll get a "conflicting implementations of trait" error when coherence - // checks are run. - impl #impl_generics ::pin_project::__private::PinnedDrop for #ident #ty_generics - #where_clause - { - unsafe fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} - } - } } - } - - /// Creates an implementation of the projection method. - fn make_proj_impl( - &self, - proj_body: &TokenStream, - proj_ref_body: &TokenStream, - proj_own_body: &TokenStream, - ) -> TokenStream { - let vis = &self.proj.vis; - let lifetime = &self.proj.lifetime; - let orig_ident = self.orig.ident; - let proj_ident = &self.proj.mut_ident; - let proj_ref_ident = &self.proj.ref_ident; - let proj_own_ident = &self.proj.own_ident; - - let orig_ty_generics = self.orig.generics.split_for_impl().1; - let proj_ty_generics = self.proj.generics.split_for_impl().1; - let (impl_generics, ty_generics, where_clause) = self.orig.generics.split_for_impl(); - - let replace_impl = self.project_replace.span().map(|span| { - // For interoperability with `forbid(unsafe_code)`, `unsafe` token should be - // call-site span. - let unsafety = ::default(); - quote_spanned! { span => - #vis fn project_replace( - self: ::pin_project::__private::Pin<&mut Self>, - __replacement: Self, - ) -> #proj_own_ident #orig_ty_generics { - #unsafety { - #proj_own_body - } - } - } - }); + } else { + // If the user does not provide a `PinnedDrop` impl, + // we need to ensure that they don't provide a `Drop` impl of their + // own. + // Based on https://github.com/upsuper/assert-impl/blob/f503255b292ab0ba8d085b657f4065403cfa46eb/src/lib.rs#L80-L87 + // + // We create a new identifier for each struct, so that the traits + // for different types do not conflict with each other. + // + // Another approach would be to provide an empty Drop impl, + // which would conflict with a user-provided Drop impl. + // However, this would trigger the compiler's special handling + // of Drop types (e.g. fields cannot be moved out of a Drop type). + // This approach prevents the creation of needless Drop impls, + // giving users more flexibility. + let trait_ident = format_ident!("{}MustNotImplDrop", ident); quote! { - impl #impl_generics #orig_ident #ty_generics #where_clause { - #vis fn project<#lifetime>( - self: ::pin_project::__private::Pin<&#lifetime mut Self>, - ) -> #proj_ident #proj_ty_generics { - unsafe { - #proj_body - } - } - #vis fn project_ref<#lifetime>( - self: ::pin_project::__private::Pin<&#lifetime Self>, - ) -> #proj_ref_ident #proj_ty_generics { - unsafe { - #proj_ref_body - } - } - #replace_impl + // There are two possible cases: + // 1. The user type does not implement Drop. In this case, + // the first blanked impl will not apply to it. This code + // will compile, as there is only one impl of MustNotImplDrop for the user type + // 2. The user type does impl Drop. This will make the blanket impl applicable, + // which will then conflict with the explicit MustNotImplDrop impl below. + // This will result in a compilation error, which is exactly what we want. + trait #trait_ident {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl #trait_ident for T {} + impl #impl_generics #trait_ident for #ident #ty_generics #where_clause {} + + // Generate a dummy impl of `PinnedDrop`, to ensure that the user cannot implement it. + // Since the user did not pass `PinnedDrop` to `#[pin_project]`, any `PinnedDrop` + // impl will not actually be called. Unfortunately, we can't detect this situation + // directly from either the `#[pin_project]` or `#[pinned_drop]` attributes, since + // we don't know what other attributes/impl may exist. + // + // To ensure that users don't accidentally write a non-functional `PinnedDrop` + // impls, we emit one ourselves. If the user ends up writing a `PinnedDrop` impl, + // they'll get a "conflicting implementations of trait" error when coherence + // checks are run. + #[doc(hidden)] + impl #impl_generics _pin_project::__private::PinnedDrop for #ident #ty_generics + #where_clause + { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} } } } - - fn ensure_not_packed(&self, fields: &Fields) -> Result { - for meta in self.orig.attrs.iter().filter_map(|attr| attr.parse_meta().ok()) { - if let Meta::List(list) = meta { - if list.path.is_ident("repr") { - for repr in list.nested.iter() { - match repr { - NestedMeta::Meta(Meta::Path(path)) - | NestedMeta::Meta(Meta::List(MetaList { path, .. })) - if path.is_ident("packed") => - { - return Err(error!( - repr, - "#[pin_project] attribute may not be used on #[repr(packed)] types" - )); - } - _ => {} - } - } - } - } - } - - // Workaround for https://github.com/taiki-e/pin-project/issues/32 - // Through the tricky use of proc macros, it's possible to bypass - // the above check for the `repr` attribute. - // To ensure that it's impossible to use pin projections on a `#[repr(packed)]` - // struct, we generate code like this: - // - // ```rust - // #[forbid(unaligned_references)] - // fn assert_not_repr_packed(val: &MyStruct) { - // let _field1 = &val.field1; - // let _field2 = &val.field2; - // ... - // let _fieldn = &val.fieldn; - // } - // ``` - // - // Taking a reference to a packed field is UB, and applying - // `#[forbid(unaligned_references)]` makes sure that doing this is a hard error. - // - // If the struct ends up having `#[repr(packed)]` applied somehow, - // this will generate an (unfriendly) error message. Under all reasonable - // circumstances, we'll detect the `#[repr(packed)]` attribute, and generate - // a much nicer error above. - // - // There is one exception: If the type of a struct field has an alignment of 1 - // (e.g. u8), it is always safe to take a reference to it, even if the struct - // is `#[repr(packed)]`. If the struct is composed entirely of types of - // alignment 1, our generated method will not trigger an error if the - // struct is `#[repr(packed)]`. - // - // Fortunately, this should have no observable consequence - `#[repr(packed)]` - // is essentially a no-op on such a type. Nevertheless, we include a test - // to ensure that the compiler doesn't ever try to copy the fields on - // such a struct when trying to drop it - which is reason we prevent - // `#[repr(packed)]` in the first place. - // - // See also https://github.com/taiki-e/pin-project/pull/34. - // - // Note: - // - pin-project v0.4.3 or later (ttps://github.com/taiki-e/pin-project/pull/135, - // v0.4.0-v0.4.2 are already yanked for another reason) is internally - // proc-macro-derive, so they are not affected by the problem that the - // struct definition is rewritten by another macro after the - // #[pin_project] is expanded. So this is probably no longer necessary, - // but it keeps it for now. - // - // - Lint-based tricks aren't perfect, but they're much better than nothing: - // https://github.com/taiki-e/pin-project-lite/issues/26 - // - // - Forbid both unaligned_references and safe_packed_borrows lints - // because unaligned_references lint does not exist in older compilers: - // https://github.com/taiki-e/pin-project-lite/pull/55 - // https://github.com/rust-lang/rust/pull/82525 - let mut field_refs = vec![]; - match fields { - Fields::Named(FieldsNamed { named, .. }) => { - for Field { ident, .. } in named { - field_refs.push(quote!(&this.#ident)); - } - } - Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { - for (index, _) in unnamed.iter().enumerate() { - let index = Index::from(index); - field_refs.push(quote!(&this.#index)); - } - } - Fields::Unit => {} - } - - let (impl_generics, ty_generics, where_clause) = self.orig.generics.split_for_impl(); - let ident = self.orig.ident; - Ok(quote! { - #[forbid(unaligned_references, safe_packed_borrows)] - fn __assert_not_repr_packed #impl_generics (this: &#ident #ty_generics) #where_clause { - #(let _ = #field_refs;)* - } - }) - } +} + +/// Creates an implementation of the projection methods. +/// +/// On structs, both the `project` and `project_ref` methods are always generated, +/// and the `project_replace` method is only generated if `ProjReplace::span` is `Some`. +/// +/// On enums, only methods that the returned projected type is named will be generated. +fn make_proj_impl( + cx: &Context<'_>, + proj_body: &TokenStream, + proj_ref_body: &TokenStream, + proj_own_body: &TokenStream, +) -> TokenStream { + let vis = &cx.proj.vis; + let lifetime = &cx.proj.lifetime; + let orig_ident = cx.orig.ident; + let proj_ident = &cx.proj.mut_ident; + let proj_ref_ident = &cx.proj.ref_ident; + let proj_own_ident = &cx.proj.own_ident; + + let orig_ty_generics = cx.orig.generics.split_for_impl().1; + let proj_ty_generics = cx.proj.generics.split_for_impl().1; + let (impl_generics, ty_generics, where_clause) = cx.orig.generics.split_for_impl(); + + let mut project = Some(quote! { + #vis fn project<#lifetime>( + self: _pin_project::__private::Pin<&#lifetime mut Self>, + ) -> #proj_ident #proj_ty_generics { + unsafe { + #proj_body + } + } + }); + let mut project_ref = Some(quote! { + #[allow(clippy::missing_const_for_fn)] + #vis fn project_ref<#lifetime>( + self: _pin_project::__private::Pin<&#lifetime Self>, + ) -> #proj_ref_ident #proj_ty_generics { + unsafe { + #proj_ref_body + } + } + }); + let mut project_replace = cx.project_replace.span().map(|span| { + // It is enough to only set the span of the signature. + let sig = quote_spanned! { span => + #vis fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> #proj_own_ident #orig_ty_generics + }; + quote! { + #sig { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + + // Destructors will run in reverse order, so next create a guard to overwrite + // `self` with the replacement value without calling destructors. + let __guard = _pin_project::__private::UnsafeOverwriteGuard::new( + __self_ptr, + __replacement, + ); + + #proj_own_body + } + } + } + }); + + if cx.kind == Enum { + if !cx.project { + project = None; + } + if !cx.project_ref { + project_ref = None; + } + if cx.project_replace.ident().is_none() { + project_replace = None; + } + } + + quote! { + impl #impl_generics #orig_ident #ty_generics #where_clause { + #project + #project_ref + #project_replace + } + } +} + +/// Checks that the `[repr(packed)]` attribute is not included. +/// +/// This currently does two checks: +/// - Checks the attributes of structs to ensure there is no `[repr(packed)]`. +/// - Generates a function that borrows fields without an unsafe block and +/// forbidding `unaligned_references` lint. +fn ensure_not_packed(orig: &OriginalType<'_>, fields: Option<&Fields>) -> Result { + for meta in orig.attrs.iter().filter_map(|attr| attr.parse_meta().ok()) { + if let Meta::List(list) = meta { + if list.path.is_ident("repr") { + for repr in list.nested.iter() { + match repr { + NestedMeta::Meta(Meta::Path(path)) + | NestedMeta::Meta(Meta::List(MetaList { path, .. })) + | NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, .. })) => { + if path.is_ident("packed") { + let msg = if fields.is_none() { + // #[repr(packed)] cannot be apply on enums and will be rejected by rustc. + // However, we should not rely on the behavior of rustc that rejects this. + // https://github.com/taiki-e/pin-project/pull/324#discussion_r612388001 + "#[repr(packed)] attribute should be applied to a struct or union" + } else if let NestedMeta::Meta(Meta::NameValue(..)) = repr { + // #[repr(packed = "")] is not valid format of #[repr(packed)] and will be + // rejected by rustc. + // However, we should not rely on the behavior of rustc that rejects this. + // https://github.com/taiki-e/pin-project/pull/324#discussion_r612388001 + "#[repr(packed)] attribute should not be name-value pair" + } else { + "#[pin_project] attribute may not be used on #[repr(packed)] types" + }; + bail!(repr, msg); + } + } + NestedMeta::Lit(..) => {} + } + } + } + } + } + + let fields = match fields { + Some(fields) => fields, + None => return Ok(TokenStream::new()), + }; + + // Workaround for https://github.com/taiki-e/pin-project/issues/32 + // Through the tricky use of proc macros, it's possible to bypass + // the above check for the `repr` attribute. + // To ensure that it's impossible to use pin projections on a `#[repr(packed)]` + // struct, we generate code like this: + // + // ```rust + // #[forbid(unaligned_references)] + // fn assert_not_repr_packed(val: &MyStruct) { + // let _field1 = &val.field1; + // let _field2 = &val.field2; + // ... + // let _fieldn = &val.fieldn; + // } + // ``` + // + // Taking a reference to a packed field is UB, and applying + // `#[forbid(unaligned_references)]` makes sure that doing this is a hard error. + // + // If the struct ends up having `#[repr(packed)]` applied somehow, + // this will generate an (unfriendly) error message. Under all reasonable + // circumstances, we'll detect the `#[repr(packed)]` attribute, and generate + // a much nicer error above. + // + // There is one exception: If the type of a struct field has an alignment of 1 + // (e.g. u8), it is always safe to take a reference to it, even if the struct + // is `#[repr(packed)]`. If the struct is composed entirely of types of + // alignment 1, our generated method will not trigger an error if the + // struct is `#[repr(packed)]`. + // + // Fortunately, this should have no observable consequence - `#[repr(packed)]` + // is essentially a no-op on such a type. Nevertheless, we include a test + // to ensure that the compiler doesn't ever try to copy the fields on + // such a struct when trying to drop it - which is reason we prevent + // `#[repr(packed)]` in the first place. + // + // See also https://github.com/taiki-e/pin-project/pull/34. + // + // Note: + // - pin-project v0.4.3 or later (#135, v0.4.0-v0.4.2 are already yanked for + // another reason) is internally proc-macro-derive, so they are not + // affected by the problem that the struct definition is rewritten by + // another macro after the #[pin_project] is expanded. + // So this is probably no longer necessary, but it keeps it for now. + // + // - Lint-based tricks aren't perfect, but they're much better than nothing: + // https://github.com/taiki-e/pin-project-lite/issues/26 + // + // - Enable both unaligned_references and safe_packed_borrows lints + // because unaligned_references lint does not exist in older compilers: + // https://github.com/taiki-e/pin-project-lite/pull/55 + // https://github.com/rust-lang/rust/pull/82525 + let mut field_refs = vec![]; + match fields { + Fields::Named(FieldsNamed { named, .. }) => { + for Field { ident, .. } in named { + field_refs.push(quote!(&this.#ident)); + } + } + Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { + for (index, _) in unnamed.iter().enumerate() { + let index = Index::from(index); + field_refs.push(quote!(&this.#index)); + } + } + Fields::Unit => {} + } + + let (impl_generics, ty_generics, where_clause) = orig.generics.split_for_impl(); + let ident = orig.ident; + Ok(quote! { + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed #impl_generics (this: &#ident #ty_generics) #where_clause { + #(let _ = #field_refs;)* + } + }) } diff --git a/third_party/rust/pin-project-internal/src/pin_project/mod.rs b/third_party/rust/pin-project-internal/src/pin_project/mod.rs index 3c8e29e8ea7b..2dce78f37fb3 100644 --- a/third_party/rust/pin-project-internal/src/pin_project/mod.rs +++ b/third_party/rust/pin-project-internal/src/pin_project/mod.rs @@ -1,15 +1,17 @@ +mod args; mod attribute; mod derive; use proc_macro2::TokenStream; +use syn::Error; /// The annotation for pinned type. const PIN: &str = "pin"; pub(crate) fn attribute(args: &TokenStream, input: TokenStream) -> TokenStream { - attribute::parse_attribute(args, input).unwrap_or_else(|e| e.to_compile_error()) + attribute::parse_attribute(args, input).unwrap_or_else(Error::into_compile_error) } pub(crate) fn derive(input: TokenStream) -> TokenStream { - derive::parse_derive(input).unwrap_or_else(|e| e.to_compile_error()) + derive::parse_derive(input).unwrap_or_else(Error::into_compile_error) } diff --git a/third_party/rust/pin-project-internal/src/pinned_drop.rs b/third_party/rust/pin-project-internal/src/pinned_drop.rs index c65a2c4b516e..912989dd4168 100644 --- a/third_party/rust/pin-project-internal/src/pinned_drop.rs +++ b/third_party/rust/pin-project-internal/src/pinned_drop.rs @@ -1,20 +1,27 @@ use proc_macro2::TokenStream; -use quote::{quote, ToTokens}; +use quote::{format_ident, quote, ToTokens}; use syn::{ - parse_quote, spanned::Spanned, visit_mut::VisitMut, Error, FnArg, GenericArgument, Ident, - ImplItem, ImplItemMethod, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Token, Type, + parse_quote, spanned::Spanned, visit_mut::VisitMut, Error, FnArg, GenericArgument, ImplItem, + ItemImpl, Pat, PatIdent, Path, PathArguments, Result, ReturnType, Signature, Token, Type, TypePath, TypeReference, }; use crate::utils::{parse_as_empty, prepend_underscore_to_self, ReplaceReceiver, SliceExt}; pub(crate) fn attribute(args: &TokenStream, mut input: ItemImpl) -> TokenStream { - if let Err(e) = parse_as_empty(args).and_then(|()| parse(&mut input)) { + let res = (|| -> Result<()> { + parse_as_empty(args)?; + validate_impl(&input)?; + expand_impl(&mut input); + Ok(()) + })(); + + if let Err(e) = res { let mut tokens = e.to_compile_error(); if let Type::Path(self_ty) = &*input.self_ty { let (impl_generics, _, where_clause) = input.generics.split_for_impl(); - // A dummy impl of `PinnedDrop`. + // Generate a dummy impl of `PinnedDrop`. // In many cases, `#[pinned_drop] impl` is declared after `#[pin_project]`. // Therefore, if `pinned_drop` compile fails, you will also get an error // about `PinnedDrop` not being implemented. @@ -24,7 +31,8 @@ pub(crate) fn attribute(args: &TokenStream, mut input: ItemImpl) -> TokenStream // accidentally compile successfully. // // However, if `input.self_ty` is not Type::Path, there is a high possibility that - // the type does not exist, so do not generate a dummy impl. + // the type does not exist (since #[pin_project] can only be used on struct/enum + // definitions), so do not generate a dummy impl. tokens.extend(quote! { impl #impl_generics ::pin_project::__private::PinnedDrop for #self_ty #where_clause @@ -39,36 +47,90 @@ pub(crate) fn attribute(args: &TokenStream, mut input: ItemImpl) -> TokenStream } } -fn parse_method(method: &ImplItemMethod) -> Result<()> { +/// Validates the signature of given `PinnedDrop` impl. +fn validate_impl(item: &ItemImpl) -> Result<()> { + const INVALID_ITEM: &str = + "#[pinned_drop] may only be used on implementation for the `PinnedDrop` trait"; + + if let Some(attr) = item.attrs.find("pinned_drop") { + bail!(attr, "duplicate #[pinned_drop] attribute"); + } + + if let Some((_, path, _)) = &item.trait_ { + if !path.is_ident("PinnedDrop") { + bail!(path, INVALID_ITEM); + } + } else { + bail!(item.self_ty, INVALID_ITEM); + } + + if item.unsafety.is_some() { + bail!(item.unsafety, "implementing the trait `PinnedDrop` is not unsafe"); + } + if item.items.is_empty() { + bail!(item, "not all trait items implemented, missing: `drop`"); + } + + match &*item.self_ty { + Type::Path(_) => {} + ty => { + bail!(ty, "implementing the trait `PinnedDrop` on this type is unsupported"); + } + } + + item.items.iter().enumerate().try_for_each(|(i, item)| match item { + ImplItem::Const(item) => { + bail!(item, "const `{}` is not a member of trait `PinnedDrop`", item.ident) + } + ImplItem::Type(item) => { + bail!(item, "type `{}` is not a member of trait `PinnedDrop`", item.ident) + } + ImplItem::Method(method) => { + validate_sig(&method.sig)?; + if i == 0 { + Ok(()) + } else { + bail!(method, "duplicate definitions with name `drop`") + } + } + _ => unreachable!("unexpected ImplItem"), + }) +} + +/// Validates the signature of given `PinnedDrop::drop` method. +/// +/// The correct signature is: `(mut) self: (::)Pin<&mut Self>` +fn validate_sig(sig: &Signature) -> Result<()> { fn get_ty_path(ty: &Type) -> Option<&Path> { - if let Type::Path(TypePath { qself: None, path }) = ty { Some(path) } else { None } + if let Type::Path(TypePath { qself: None, path }) = ty { + Some(path) + } else { + None + } } const INVALID_ARGUMENT: &str = "method `drop` must take an argument `self: Pin<&mut Self>`"; - if method.sig.ident != "drop" { - return Err(error!( - method.sig.ident, - "method `{}` is not a member of trait `PinnedDrop", method.sig.ident, - )); + if sig.ident != "drop" { + bail!(sig.ident, "method `{}` is not a member of trait `PinnedDrop", sig.ident,); } - if let ReturnType::Type(_, ty) = &method.sig.output { + if let ReturnType::Type(_, ty) = &sig.output { match &**ty { Type::Tuple(ty) if ty.elems.is_empty() => {} - _ => return Err(error!(ty, "method `drop` must return the unit type")), + _ => bail!(ty, "method `drop` must return the unit type"), } } - match method.sig.inputs.len() { + match sig.inputs.len() { 1 => {} - 0 => return Err(Error::new(method.sig.paren_token.span, INVALID_ARGUMENT)), - _ => return Err(error!(method.sig.inputs, INVALID_ARGUMENT)), + 0 => return Err(Error::new(sig.paren_token.span, INVALID_ARGUMENT)), + _ => bail!(sig.inputs, INVALID_ARGUMENT), } - if let Some(FnArg::Typed(pat)) = method.sig.receiver() { + if let Some(FnArg::Typed(arg)) = sig.receiver() { // (mut) self: - if let Some(path) = get_ty_path(&pat.ty) { + if let Some(path) = get_ty_path(&arg.ty) { let ty = path.segments.last().unwrap(); if let PathArguments::AngleBracketed(args) = &ty.arguments { // (mut) self: (::)<&mut ..> @@ -83,11 +145,8 @@ fn parse_method(method: &ImplItemMethod) -> Result<()> { && ty.ident == "Pin" && get_ty_path(elem).map_or(false, |path| path.is_ident("Self")) { - if method.sig.unsafety.is_some() { - return Err(error!( - method.sig.unsafety, - "implementing the method `drop` is not unsafe" - )); + if sig.unsafety.is_some() { + bail!(sig.unsafety, "implementing the method `drop` is not unsafe"); } return Ok(()); } @@ -96,73 +155,13 @@ fn parse_method(method: &ImplItemMethod) -> Result<()> { } } - Err(error!(method.sig.inputs[0], INVALID_ARGUMENT)) -} - -fn parse(item: &mut ItemImpl) -> Result<()> { - const INVALID_ITEM: &str = - "#[pinned_drop] may only be used on implementation for the `PinnedDrop` trait"; - - if let Some(attr) = item.attrs.find("pinned_drop") { - return Err(error!(attr, "duplicate #[pinned_drop] attribute")); - } - - if let Some((_, path, _)) = &mut item.trait_ { - if path.is_ident("PinnedDrop") { - *path = parse_quote_spanned! { path.span() => - ::pin_project::__private::PinnedDrop - }; - } else { - return Err(error!(path, INVALID_ITEM)); - } - } else { - return Err(error!(item.self_ty, INVALID_ITEM)); - } - - if item.unsafety.is_some() { - return Err(error!(item.unsafety, "implementing the trait `PinnedDrop` is not unsafe")); - } - if item.items.is_empty() { - return Err(error!(item, "not all trait items implemented, missing: `drop`")); - } - - match &*item.self_ty { - Type::Path(_) => {} - ty => { - return Err(error!( - ty, - "implementing the trait `PinnedDrop` on this type is unsupported" - )); - } - } - - item.items - .iter() - .enumerate() - .try_for_each(|(i, item)| match item { - ImplItem::Const(item) => { - Err(error!(item, "const `{}` is not a member of trait `PinnedDrop`", item.ident)) - } - ImplItem::Type(item) => { - Err(error!(item, "type `{}` is not a member of trait `PinnedDrop`", item.ident)) - } - ImplItem::Method(method) => { - parse_method(method)?; - if i == 0 { - Ok(()) - } else { - Err(error!(method, "duplicate definitions with name `drop`")) - } - } - _ => unreachable!("unexpected ImplItem"), - }) - .map(|()| expand_item(item)) + bail!(sig.inputs[0], INVALID_ARGUMENT) } // from: // // fn drop(self: Pin<&mut Self>) { -// // something +// // ... // } // // into: @@ -170,42 +169,54 @@ fn parse(item: &mut ItemImpl) -> Result<()> { // unsafe fn drop(self: Pin<&mut Self>) { // fn __drop_inner(__self: Pin<&mut Foo<'_, T>>) { // fn __drop_inner() {} -// // something +// // ... // } // __drop_inner(self); // } // -fn expand_item(item: &mut ItemImpl) { +fn expand_impl(item: &mut ItemImpl) { + fn get_arg_pat(arg: &mut FnArg) -> Option<&mut PatIdent> { + if let FnArg::Typed(arg) = arg { + if let Pat::Ident(ident) = &mut *arg.pat { + return Some(ident); + } + } + None + } + + // `PinnedDrop` is a private trait and should not appear in docs. + item.attrs.push(parse_quote!(#[doc(hidden)])); + + let path = &mut item.trait_.as_mut().unwrap().1; + *path = parse_quote_spanned! { path.span() => + ::pin_project::__private::PinnedDrop + }; + let method = if let ImplItem::Method(method) = &mut item.items[0] { method } else { unreachable!() }; - let mut drop_inner = method.clone(); // `fn drop(mut self: Pin<&mut Self>)` -> `fn __drop_inner(mut __self: Pin<&mut Receiver>)` - let ident = Ident::new("__drop_inner", drop_inner.sig.ident.span()); - // Add a dummy `__drop_inner` function to prevent users call outer `__drop_inner`. - drop_inner.block.stmts.insert(0, parse_quote!(fn #ident() {})); - drop_inner.sig.ident = ident; - drop_inner.sig.generics = item.generics.clone(); - if let FnArg::Typed(arg) = &mut drop_inner.sig.inputs[0] { - if let Pat::Ident(ident) = &mut *arg.pat { - prepend_underscore_to_self(&mut ident.ident); - } - } - let self_ty = if let Type::Path(ty) = &*item.self_ty { ty } else { unreachable!() }; - let mut visitor = ReplaceReceiver(self_ty); - visitor.visit_signature_mut(&mut drop_inner.sig); - visitor.visit_block_mut(&mut drop_inner.block); + let drop_inner = { + let mut drop_inner = method.clone(); + let ident = format_ident!("__drop_inner"); + // Add a dummy `__drop_inner` function to prevent users call outer `__drop_inner`. + drop_inner.block.stmts.insert(0, parse_quote!(fn #ident() {})); + drop_inner.sig.ident = ident; + drop_inner.sig.generics = item.generics.clone(); + let self_pat = get_arg_pat(&mut drop_inner.sig.inputs[0]).unwrap(); + prepend_underscore_to_self(&mut self_pat.ident); + let self_ty = if let Type::Path(ty) = &*item.self_ty { ty } else { unreachable!() }; + let mut visitor = ReplaceReceiver(self_ty); + visitor.visit_signature_mut(&mut drop_inner.sig); + visitor.visit_block_mut(&mut drop_inner.block); + drop_inner + }; // `fn drop(mut self: Pin<&mut Self>)` -> `unsafe fn drop(self: Pin<&mut Self>)` method.sig.unsafety = Some(::default()); - let mut self_token = None; - if let FnArg::Typed(arg) = &mut method.sig.inputs[0] { - if let Pat::Ident(ident) = &mut *arg.pat { - ident.mutability = None; - self_token = Some(&ident.ident); - } - } - assert!(self_token.is_some()); + let self_pat = get_arg_pat(&mut method.sig.inputs[0]).unwrap(); + self_pat.mutability = None; + let self_token = &self_pat.ident; method.block.stmts = parse_quote! { #[allow(clippy::needless_pass_by_value)] // This lint does not warn the receiver. diff --git a/third_party/rust/pin-project-internal/src/project.rs b/third_party/rust/pin-project-internal/src/project.rs deleted file mode 100644 index f593d9f719b2..000000000000 --- a/third_party/rust/pin-project-internal/src/project.rs +++ /dev/null @@ -1,353 +0,0 @@ -use proc_macro2::{Span, TokenStream}; -use quote::ToTokens; -use syn::{ - parse_quote, - visit_mut::{self, VisitMut}, - Expr, ExprLet, ExprMatch, Ident, ImplItem, Item, ItemFn, ItemImpl, ItemUse, Lifetime, Local, - Pat, PatBox, PatIdent, PatOr, PatPath, PatReference, PatStruct, PatTupleStruct, PatType, Path, - PathArguments, PathSegment, Result, Stmt, Type, TypePath, UseTree, -}; - -use crate::utils::{ - determine_lifetime_name, insert_lifetime, parse_as_empty, ProjKind, SliceExt, VecExt, -}; - -pub(crate) fn attribute(args: &TokenStream, input: Stmt, kind: ProjKind) -> TokenStream { - parse_as_empty(args).and_then(|()| parse(input, kind)).unwrap_or_else(|e| e.to_compile_error()) -} - -fn replace_expr(expr: &mut Expr, kind: ProjKind) { - match expr { - Expr::Match(expr) => { - Context::new(kind).replace_expr_match(expr); - } - Expr::If(expr_if) => { - let mut expr_if = expr_if; - while let Expr::Let(ref mut expr) = &mut *expr_if.cond { - Context::new(kind).replace_expr_let(expr); - if let Some((_, ref mut expr)) = expr_if.else_branch { - if let Expr::If(new_expr_if) = &mut **expr { - expr_if = new_expr_if; - continue; - } - } - break; - } - } - _ => {} - } -} - -fn parse(mut stmt: Stmt, kind: ProjKind) -> Result { - match &mut stmt { - Stmt::Expr(expr) | Stmt::Semi(expr, _) => replace_expr(expr, kind), - Stmt::Local(local) => Context::new(kind).replace_local(local)?, - Stmt::Item(Item::Fn(item)) => replace_item_fn(item, kind)?, - Stmt::Item(Item::Impl(item)) => replace_item_impl(item, kind)?, - Stmt::Item(Item::Use(item)) => replace_item_use(item, kind)?, - _ => {} - } - - Ok(stmt.into_token_stream()) -} - -struct Context { - register: Option<(Ident, usize)>, - replaced: bool, - kind: ProjKind, -} - -impl Context { - fn new(kind: ProjKind) -> Self { - Self { register: None, replaced: false, kind } - } - - fn update(&mut self, ident: &Ident, len: usize) { - self.register.get_or_insert_with(|| (ident.clone(), len)); - } - - fn compare_paths(&self, ident: &Ident, len: usize) -> bool { - match &self.register { - Some((i, l)) => *l == len && i == ident, - None => false, - } - } - - fn replace_local(&mut self, local: &mut Local) -> Result<()> { - if let Some(attr) = local.attrs.find(self.kind.method_name()) { - return Err(error!(attr, "duplicate #[{}] attribute", self.kind.method_name())); - } - - if let Some(Expr::Match(expr)) = local.init.as_mut().map(|(_, expr)| &mut **expr) { - self.replace_expr_match(expr); - } - - if self.replaced { - if is_replaceable(&local.pat, false) { - return Err(error!( - local.pat, - "Both initializer expression and pattern are replaceable, \ - you need to split the initializer expression into separate let bindings \ - to avoid ambiguity" - )); - } - } else { - self.replace_pat(&mut local.pat, false); - } - - Ok(()) - } - - fn replace_expr_let(&mut self, expr: &mut ExprLet) { - self.replace_pat(&mut expr.pat, true) - } - - fn replace_expr_match(&mut self, expr: &mut ExprMatch) { - expr.arms.iter_mut().for_each(|arm| self.replace_pat(&mut arm.pat, true)) - } - - fn replace_pat(&mut self, pat: &mut Pat, allow_pat_path: bool) { - match pat { - Pat::Ident(PatIdent { subpat: Some((_, pat)), .. }) - | Pat::Reference(PatReference { pat, .. }) - | Pat::Box(PatBox { pat, .. }) - | Pat::Type(PatType { pat, .. }) => self.replace_pat(pat, allow_pat_path), - - Pat::Or(PatOr { cases, .. }) => { - cases.iter_mut().for_each(|pat| self.replace_pat(pat, allow_pat_path)) - } - - Pat::Struct(PatStruct { path, .. }) | Pat::TupleStruct(PatTupleStruct { path, .. }) => { - self.replace_path(path) - } - Pat::Path(PatPath { qself: None, path, .. }) if allow_pat_path => { - self.replace_path(path) - } - _ => {} - } - } - - fn replace_path(&mut self, path: &mut Path) { - let len = match path.segments.len() { - // 1: struct - // 2: enum - len @ 1 | len @ 2 => len, - // other path - _ => return, - }; - - if self.register.is_none() || self.compare_paths(&path.segments[0].ident, len) { - self.update(&path.segments[0].ident, len); - self.replaced = true; - replace_ident(&mut path.segments[0].ident, self.kind); - } - } -} - -fn is_replaceable(pat: &Pat, allow_pat_path: bool) -> bool { - match pat { - Pat::Ident(PatIdent { subpat: Some((_, pat)), .. }) - | Pat::Reference(PatReference { pat, .. }) - | Pat::Box(PatBox { pat, .. }) - | Pat::Type(PatType { pat, .. }) => is_replaceable(pat, allow_pat_path), - - Pat::Or(PatOr { cases, .. }) => cases.iter().any(|pat| is_replaceable(pat, allow_pat_path)), - - Pat::Struct(_) | Pat::TupleStruct(_) => true, - Pat::Path(PatPath { qself: None, .. }) => allow_pat_path, - _ => false, - } -} - -fn replace_ident(ident: &mut Ident, kind: ProjKind) { - *ident = kind.proj_ident(ident); -} - -fn replace_item_impl(item: &mut ItemImpl, kind: ProjKind) -> Result<()> { - if let Some(attr) = item.attrs.find(kind.method_name()) { - return Err(error!(attr, "duplicate #[{}] attribute", kind.method_name())); - } - - let PathSegment { ident, arguments } = match &mut *item.self_ty { - Type::Path(TypePath { qself: None, path }) => path.segments.last_mut().unwrap(), - _ => return Ok(()), - }; - - replace_ident(ident, kind); - - let mut lifetime_name = String::from("'pin"); - determine_lifetime_name(&mut lifetime_name, &mut item.generics); - item.items - .iter_mut() - .filter_map(|i| if let ImplItem::Method(i) = i { Some(i) } else { None }) - .for_each(|item| determine_lifetime_name(&mut lifetime_name, &mut item.sig.generics)); - let lifetime = Lifetime::new(&lifetime_name, Span::call_site()); - - insert_lifetime(&mut item.generics, lifetime.clone()); - - match arguments { - PathArguments::None => { - *arguments = PathArguments::AngleBracketed(parse_quote!(<#lifetime>)); - } - PathArguments::AngleBracketed(args) => { - args.args.insert(0, parse_quote!(#lifetime)); - } - PathArguments::Parenthesized(_) => unreachable!(), - } - Ok(()) -} - -fn replace_item_fn(item: &mut ItemFn, kind: ProjKind) -> Result<()> { - struct FnVisitor(Result<()>); - - impl FnVisitor { - fn visit_stmt(&mut self, node: &mut Stmt) -> Result<()> { - match node { - Stmt::Expr(expr) | Stmt::Semi(expr, _) => self.visit_expr(expr), - Stmt::Local(local) => { - visit_mut::visit_local_mut(self, local); - - let mut prev = None; - for &kind in &ProjKind::ALL { - if let Some(attr) = local.attrs.find_remove(kind.method_name())? { - if let Some(prev) = prev.replace(kind) { - return Err(error!( - attr, - "attributes `{}` and `{}` are mutually exclusive", - prev.method_name(), - kind.method_name(), - )); - } - Context::new(kind).replace_local(local)?; - } - } - - Ok(()) - } - // Do not recurse into nested items. - Stmt::Item(_) => Ok(()), - } - } - - fn visit_expr(&mut self, node: &mut Expr) -> Result<()> { - visit_mut::visit_expr_mut(self, node); - match node { - Expr::Match(expr) => { - let mut prev = None; - for &kind in &ProjKind::ALL { - if let Some(attr) = expr.attrs.find_remove(kind.method_name())? { - if let Some(prev) = prev.replace(kind) { - return Err(error!( - attr, - "attributes `{}` and `{}` are mutually exclusive", - prev.method_name(), - kind.method_name(), - )); - } - } - } - if let Some(kind) = prev { - replace_expr(node, kind); - } - } - Expr::If(expr_if) => { - if let Expr::Let(_) = &*expr_if.cond { - let mut prev = None; - for &kind in &ProjKind::ALL { - if let Some(attr) = expr_if.attrs.find_remove(kind.method_name())? { - if let Some(prev) = prev.replace(kind) { - return Err(error!( - attr, - "attributes `{}` and `{}` are mutually exclusive", - prev.method_name(), - kind.method_name(), - )); - } - } - } - if let Some(kind) = prev { - replace_expr(node, kind); - } - } - } - _ => {} - } - Ok(()) - } - } - - impl VisitMut for FnVisitor { - fn visit_stmt_mut(&mut self, node: &mut Stmt) { - if self.0.is_err() { - return; - } - if let Err(e) = self.visit_stmt(node) { - self.0 = Err(e) - } - } - - fn visit_expr_mut(&mut self, node: &mut Expr) { - if self.0.is_err() { - return; - } - if let Err(e) = self.visit_expr(node) { - self.0 = Err(e) - } - } - - fn visit_item_mut(&mut self, _: &mut Item) { - // Do not recurse into nested items. - } - } - - if let Some(attr) = item.attrs.find(kind.method_name()) { - return Err(error!(attr, "duplicate #[{}] attribute", kind.method_name())); - } - - let mut visitor = FnVisitor(Ok(())); - visitor.visit_block_mut(&mut item.block); - visitor.0 -} - -fn replace_item_use(item: &mut ItemUse, kind: ProjKind) -> Result<()> { - struct UseTreeVisitor { - res: Result<()>, - kind: ProjKind, - } - - impl VisitMut for UseTreeVisitor { - fn visit_use_tree_mut(&mut self, node: &mut UseTree) { - if self.res.is_err() { - return; - } - - match node { - // Desugar `use tree::` into `tree::__Projection`. - UseTree::Name(name) => replace_ident(&mut name.ident, self.kind), - UseTree::Glob(glob) => { - self.res = Err(error!( - glob, - "#[{}] attribute may not be used on glob imports", - self.kind.method_name() - )); - } - UseTree::Rename(rename) => { - self.res = Err(error!( - rename, - "#[{}] attribute may not be used on renamed imports", - self.kind.method_name() - )); - } - UseTree::Path(_) | UseTree::Group(_) => visit_mut::visit_use_tree_mut(self, node), - } - } - } - - if let Some(attr) = item.attrs.find(kind.method_name()) { - return Err(error!(attr, "duplicate #[{}] attribute", kind.method_name())); - } - - let mut visitor = UseTreeVisitor { res: Ok(()), kind }; - visitor.visit_item_use_mut(item); - visitor.res -} diff --git a/third_party/rust/pin-project-internal/src/utils.rs b/third_party/rust/pin-project-internal/src/utils.rs index 1c1d671f1359..27373efbae72 100644 --- a/third_party/rust/pin-project-internal/src/utils.rs +++ b/third_party/rust/pin-project-internal/src/utils.rs @@ -1,7 +1,7 @@ use std::{iter::FromIterator, mem}; use proc_macro2::{Group, Spacing, Span, TokenStream, TokenTree}; -use quote::{format_ident, quote, quote_spanned, ToTokens}; +use quote::{quote, quote_spanned, ToTokens}; use syn::{ parse::{Parse, ParseBuffer, ParseStream}, parse_quote, @@ -15,12 +15,18 @@ use syn::{ pub(crate) type Variants = Punctuated; -macro_rules! error { - ($span:expr, $msg:expr) => { - syn::Error::new_spanned(&$span, $msg) +macro_rules! format_err { + ($span:expr, $msg:expr $(,)?) => { + syn::Error::new_spanned(&$span as &dyn quote::ToTokens, &$msg as &dyn std::fmt::Display) }; ($span:expr, $($tt:tt)*) => { - error!($span, format!($($tt)*)) + format_err!($span, format!($($tt)*)) + }; +} + +macro_rules! bail { + ($($tt:tt)*) => { + return Err(format_err!($($tt)*)) }; } @@ -30,36 +36,6 @@ macro_rules! parse_quote_spanned { }; } -#[derive(Clone, Copy, Eq, PartialEq)] -pub(crate) enum ProjKind { - Mutable, - Immutable, - Owned, -} - -impl ProjKind { - pub(crate) const ALL: [Self; 3] = [ProjKind::Mutable, ProjKind::Immutable, ProjKind::Owned]; - - /// Returns the name of the projection method. - pub(crate) fn method_name(self) -> &'static str { - match self { - ProjKind::Mutable => "project", - ProjKind::Immutable => "project_ref", - ProjKind::Owned => "project_replace", - } - } - - /// Creates the ident of the projected type from the ident of the original - /// type. - pub(crate) fn proj_ident(self, ident: &Ident) -> Ident { - match self { - ProjKind::Mutable => format_ident!("__{}Projection", ident), - ProjKind::Immutable => format_ident!("__{}ProjectionRef", ident), - ProjKind::Owned => format_ident!("__{}ProjectionOwned", ident), - } - } -} - /// Determines the lifetime names. Ensure it doesn't overlap with any existing /// lifetime names. pub(crate) fn determine_lifetime_name(lifetime_name: &mut String, generics: &mut Generics) { @@ -111,7 +87,10 @@ pub(crate) fn insert_lifetime(generics: &mut Generics, lifetime: Lifetime) { generics.params.insert(0, LifetimeDef::new(lifetime).into()); } -/// Determines the visibility of the projected type and projection method. +/// Determines the visibility of the projected types and projection methods. +/// +/// If given visibility is `pub`, returned visibility is `pub(crate)`. +/// Otherwise, returned visibility is the same as given visibility. pub(crate) fn determine_visibility(vis: &Visibility) -> Visibility { if let Visibility::Public(token) = vis { parse_quote_spanned!(token.pub_token.span => pub(crate)) @@ -120,11 +99,16 @@ pub(crate) fn determine_visibility(vis: &Visibility) -> Visibility { } } -/// Check if `tokens` is an empty `TokenStream`. +/// Checks if `tokens` is an empty `TokenStream`. +/// /// This is almost equivalent to `syn::parse2::()`, but produces /// a better error message and does not require ownership of `tokens`. pub(crate) fn parse_as_empty(tokens: &TokenStream) -> Result<()> { - if tokens.is_empty() { Ok(()) } else { Err(error!(tokens, "unexpected token: {}", tokens)) } + if tokens.is_empty() { + Ok(()) + } else { + bail!(tokens, "unexpected token: `{}`", tokens) + } } pub(crate) fn respan(node: &T, span: Span) -> T @@ -154,17 +138,17 @@ pub(crate) trait SliceExt { fn find(&self, ident: &str) -> Option<&Attribute>; } -pub(crate) trait VecExt { - fn find_remove(&mut self, ident: &str) -> Result>; -} - impl SliceExt for [Attribute] { + /// # Errors + /// + /// - There are multiple specified attributes. + /// - The `Attribute::tokens` field of the specified attribute is not empty. fn position_exact(&self, ident: &str) -> Result> { self.iter() .try_fold((0, None), |(i, mut prev), attr| { if attr.path.is_ident(ident) { if prev.replace(i).is_some() { - return Err(error!(attr, "duplicate #[{}] attribute", ident)); + bail!(attr, "duplicate #[{}] attribute", ident); } parse_as_empty(&attr.tokens)?; } @@ -174,13 +158,7 @@ impl SliceExt for [Attribute] { } fn find(&self, ident: &str) -> Option<&Attribute> { - self.iter().position(|attr| attr.path.is_ident(ident)).and_then(|i| self.get(i)) - } -} - -impl VecExt for Vec { - fn find_remove(&mut self, ident: &str) -> Result> { - self.position_exact(ident).map(|pos| pos.map(|i| self.remove(i))) + self.iter().position(|attr| attr.path.is_ident(ident)).map(|i| &self[i]) } } @@ -208,7 +186,9 @@ impl<'a> ParseBufferExt<'a> for ParseBuffer<'a> { // visitors // Replace `self`/`Self` with `__self`/`self_ty`. -// Based on https://github.com/dtolnay/async-trait/blob/0.1.35/src/receiver.rs +// Based on: +// - https://github.com/dtolnay/async-trait/blob/0.1.35/src/receiver.rs +// - https://github.com/dtolnay/async-trait/commit/6029cbf375c562ca98fa5748e9d950a8ff93b0e7 pub(crate) struct ReplaceReceiver<'a>(pub(crate) &'a TypePath); @@ -291,7 +271,7 @@ impl ReplaceReceiver<'_> { match iter.peek() { Some(TokenTree::Punct(p)) if p.as_char() == ':' => { let span = ident.span(); - out.extend(quote_spanned!(span=> <#self_ty>)) + out.extend(quote_spanned!(span=> <#self_ty>)); } _ => out.extend(quote!(#self_ty)), } @@ -345,7 +325,6 @@ impl VisitMut for ReplaceReceiver<'_> { // `Self::method` -> `::method` fn visit_expr_path_mut(&mut self, expr: &mut ExprPath) { if expr.qself.is_none() { - prepend_underscore_to_self(&mut expr.path.segments[0].ident); self.self_to_qself(&mut expr.qself, &mut expr.path); } visit_mut::visit_expr_path_mut(self, expr); @@ -373,11 +352,21 @@ impl VisitMut for ReplaceReceiver<'_> { visit_mut::visit_pat_tuple_struct_mut(self, pat); } + fn visit_path_mut(&mut self, path: &mut Path) { + if path.segments.len() == 1 { + // Replace `self`, but not `self::function`. + prepend_underscore_to_self(&mut path.segments[0].ident); + } + for segment in &mut path.segments { + self.visit_path_arguments_mut(&mut segment.arguments); + } + } + fn visit_item_mut(&mut self, item: &mut Item) { match item { // Visit `macro_rules!` because locally defined macros can refer to `self`. Item::Macro(item) if item.mac.path.is_ident("macro_rules") => { - self.visit_macro_mut(&mut item.mac) + self.visit_macro_mut(&mut item.mac); } // Otherwise, do not recurse into nested items. _ => {} diff --git a/third_party/rust/pin-project-lite-0.1.12/.cargo-checksum.json b/third_party/rust/pin-project-lite-0.1.12/.cargo-checksum.json deleted file mode 100644 index 8c0640578b03..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"CHANGELOG.md":"8eb961687eb18a203d2bf746a24f2be25c61f5fff3d0b378bfd2024afcc25620","Cargo.toml":"57bcf2cf00d18ed9ea2abbcd7bfaab33512c58628d4765d2483ccda2ca64f9f1","LICENSE-APACHE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"00c5f78218b433dcb02ee227b583d23a1528514a307579d411a9d9920be53a19","src/lib.rs":"36c5eab3d62ff2043a125bf7878753585e0c7ff4a279cf75f6aa196da2446340","tests/auxiliary/mod.rs":"7e263e987e09b77e384f734384a00a71c5b70230bb1b5376446ef003a8da9372","tests/compiletest.rs":"39d2a829c224453bb4eab9779001a919f5d97e865ca666afc448b8782ac3f733","tests/include/basic.rs":"c3f9a1703b7a4f370842e0678b4ecd739f8452aec184b1115f11e0c3822e79e0","tests/lint.rs":"4664659c071e9d77c7e076a5e4762583312751cc5e9b9034366c232310eacd78","tests/proper_unpin.rs":"b8cb75c403a96ddd1f58c31e04f6299c69b7ceda59b67b481cff7d461841d68c","tests/test.rs":"eb3a0b3fe756b9ccf379100393b08347459ad994ab7eec1bd53ec2b25b8ba2f0","tests/ui/conflict-drop.rs":"55e6809b5f59dd81e32c2c89b742c0c76db6b099a1d2621e5b882c0d20f92837","tests/ui/conflict-drop.stderr":"b29a458533f0312b2b04e9a0dfea17fce2ba423ef764ec3e2996a315c00a4cf8","tests/ui/conflict-unpin.rs":"51b3b6720fa581b63167d6ac941a1ea1bf739b09954931b2bc2f7abe2d934f26","tests/ui/conflict-unpin.stderr":"ab8e9acab4ffcdff1d0065e9504d64a0220267ab9f18b2840c97da8c0bbbf984","tests/ui/invalid-bounds.rs":"f86f23d377df015182f2f5dae6464a4f98c37f2198e0646f721fedc4017cb82c","tests/ui/invalid-bounds.stderr":"59c3b887b5f2d87804c25c54f4427e616947821c8f3fc5642dfc1ced67520b71","tests/ui/invalid.rs":"7304bd4a5bac1419382742432cfa8af83535d7be8cfad52c661410e0e9e8207a","tests/ui/invalid.stderr":"cb935ec370a87ba511aeef64613e7351b2eac9e574b5a256c7cb3c30f4bd74a6","tests/ui/overlapping_lifetime_names.rs":"a64c42cc56fa4751d73a1db80a7deb568427dc96f30c1a29665067d8c0ecb2c9","tests/ui/overlapping_lifetime_names.stderr":"ebfd5c08e7c472f49d49d67de9308e647a6a6b1e289308bf2d60d79d72521457","tests/ui/overlapping_unpin_struct.rs":"5fde23ef628d2dbd27377ffa472fc19b1c6873122f38c3fb1f84fda8602f55f3","tests/ui/overlapping_unpin_struct.stderr":"e83e99bacdf6564727c77dafe5eaf000b4de6e784e6032b9b0ecca831ecc528c","tests/ui/packed.rs":"1f1a34aafbff9a59b94cdf3a53df03e9fc661d9e27e0f9962bad7f9bdad03b14","tests/ui/packed.stderr":"4acb1514ca0a2c3b9448b28a71ab81d1dce37ba42f1af8b3220f926f0ca556e4","tests/ui/unpin_sneaky.rs":"12e97a387ce1af6ee6a567687674aab70e96962a48f2433c39976d0b3e2c3341","tests/ui/unpin_sneaky.stderr":"7b2bc76faca39f77968c798674f2321111a74a5754512149320ea4cdd3e278d5","tests/ui/unsupported.rs":"14defa90e736f314bbbc219973929b77bdd22e5f7e4c4c88403db764f4d167d6","tests/ui/unsupported.stderr":"310a8a7ed4e8120fa570957800e6cc86ff5561580a241ab808092e99a1f3b8b2"},"package":"257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777"} \ No newline at end of file diff --git a/third_party/rust/pin-project-lite-0.1.12/CHANGELOG.md b/third_party/rust/pin-project-lite-0.1.12/CHANGELOG.md deleted file mode 100644 index 2a6245b0ceca..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/CHANGELOG.md +++ /dev/null @@ -1,111 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -This project adheres to [Semantic Versioning](https://semver.org). - -## [Unreleased] - -## [0.1.12] - 2021-03-02 - -- [Prepare for removal of `safe_packed_borrows` lint.](https://github.com/taiki-e/pin-project-lite/pull/55) See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -## [0.1.11] - 2020-10-20 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- Suppress `clippy::redundant_pub_crate` lint in generated code. - -- Documentation improvements. - -## [0.1.10] - 2020-10-01 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- Suppress `drop_bounds` lint, which will be added to rustc in the future. See [taiki-e/pin-project#272](https://github.com/taiki-e/pin-project/issues/272) for more details. - -## [0.1.9] - 2020-09-29 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Fix trailing comma support in generics.](https://github.com/taiki-e/pin-project-lite/pull/32) - -## [0.1.8] - 2020-09-26 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Fix compatibility of generated code with `forbid(future_incompatible)`.](https://github.com/taiki-e/pin-project-lite/pull/30) - - Note: This does not guarantee compatibility with `forbid(future_incompatible)` in the future. - If rustc adds a new lint, we may not be able to keep this. - -## [0.1.7] - 2020-06-04 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support `?Sized` bounds in where clauses.](https://github.com/taiki-e/pin-project-lite/pull/22) - -- [Fix lifetime inference error when an associated type is used in fields.](https://github.com/taiki-e/pin-project-lite/pull/20) - -- Suppress `clippy::used_underscore_binding` lint in generated code. - -- Documentation improvements. - -## [0.1.6] - 2020-05-31 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support lifetime bounds in where clauses.](https://github.com/taiki-e/pin-project-lite/pull/18) - -- Documentation improvements. - -## [0.1.5] - 2020-05-07 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support overwriting the name of `core` crate.](https://github.com/taiki-e/pin-project-lite/pull/14) - -## [0.1.4] - 2020-01-20 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support ?Sized bounds in generic parameters.](https://github.com/taiki-e/pin-project-lite/pull/9) - -## [0.1.3] - 2020-01-20 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support lifetime bounds in generic parameters.](https://github.com/taiki-e/pin-project-lite/pull/7) - -## [0.1.2] - 2020-01-05 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support recognizing default generic parameters.](https://github.com/taiki-e/pin-project-lite/pull/6) - -## [0.1.1] - 2019-11-15 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [`pin_project!` macro now determines the visibility of the projection type/method is based on the original type.](https://github.com/taiki-e/pin-project-lite/pull/5) - -## [0.1.0] - 2019-10-22 - -**Note: This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -Initial release - -[Unreleased]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.12...HEAD -[0.1.12]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.11...v0.1.12 -[0.1.11]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.10...v0.1.11 -[0.1.10]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.9...v0.1.10 -[0.1.9]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.8...v0.1.9 -[0.1.8]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.7...v0.1.8 -[0.1.7]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.6...v0.1.7 -[0.1.6]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.5...v0.1.6 -[0.1.5]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.4...v0.1.5 -[0.1.4]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.3...v0.1.4 -[0.1.3]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.2...v0.1.3 -[0.1.2]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.1...v0.1.2 -[0.1.1]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.0...v0.1.1 -[0.1.0]: https://github.com/taiki-e/pin-project-lite/releases/tag/v0.1.0 diff --git a/third_party/rust/pin-project-lite-0.1.12/Cargo.toml b/third_party/rust/pin-project-lite-0.1.12/Cargo.toml deleted file mode 100644 index f3ab911669bd..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "pin-project-lite" -version = "0.1.12" -authors = ["Taiki Endo "] -exclude = ["/.*", "/scripts"] -description = "A lightweight version of pin-project written with declarative macros.\n" -documentation = "https://docs.rs/pin-project-lite" -keywords = ["pin", "macros"] -categories = ["no-std", "rust-patterns"] -license = "Apache-2.0 OR MIT" -repository = "https://github.com/taiki-e/pin-project-lite" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -[dev-dependencies.rustversion] -version = "1" - -[dev-dependencies.static_assertions] -version = "1" - -[dev-dependencies.trybuild] -version = "1" diff --git a/third_party/rust/pin-project-lite-0.1.12/LICENSE-APACHE b/third_party/rust/pin-project-lite-0.1.12/LICENSE-APACHE deleted file mode 100644 index f433b1a53f5b..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/LICENSE-APACHE +++ /dev/null @@ -1,177 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/third_party/rust/pin-project-lite-0.1.12/README.md b/third_party/rust/pin-project-lite-0.1.12/README.md deleted file mode 100644 index b6c5b3ad7613..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/README.md +++ /dev/null @@ -1,109 +0,0 @@ -# pin-project-lite - -[![crates-badge]][crates-url] -[![docs-badge]][docs-url] -[![license-badge]][license] -[![rustc-badge]][rustc-url] - -[crates-badge]: https://img.shields.io/crates/v/pin-project-lite.svg -[crates-url]: https://crates.io/crates/pin-project-lite -[docs-badge]: https://docs.rs/pin-project-lite/badge.svg -[docs-url]: https://docs.rs/pin-project-lite -[license-badge]: https://img.shields.io/badge/license-Apache--2.0%20OR%20MIT-blue.svg -[license]: #license -[rustc-badge]: https://img.shields.io/badge/rustc-1.37+-lightgray.svg -[rustc-url]: https://blog.rust-lang.org/2019/08/15/Rust-1.37.0.html - -A lightweight version of [pin-project] written with declarative macros. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -pin-project-lite = "0.1" -``` - -The current pin-project-lite requires Rust 1.37 or later. - -## Examples - -[`pin_project!`] macro creates a projection type covering all the fields of struct. - -```rust -use pin_project_lite::pin_project; -use std::pin::Pin; - -pin_project! { - struct Struct { - #[pin] - pinned: T, - unpinned: U, - } -} - -impl Struct { - fn method(self: Pin<&mut Self>) { - let this = self.project(); - let _: Pin<&mut T> = this.pinned; // Pinned reference to the field - let _: &mut U = this.unpinned; // Normal reference to the field - } -} -``` - -## [pin-project] vs pin-project-lite - -Here are some similarities and differences compared to [pin-project]. - -### Similar: Safety - -pin-project-lite guarantees safety in much the same way as [pin-project]. Both are completely safe unless you write other unsafe code. - -### Different: Minimal design - -This library does not tackle as expansive of a range of use cases as [pin-project] does. If your use case is not already covered, please use [pin-project]. - -### Different: No proc-macro related dependencies - -This is the **only** reason to use this crate. However, **if you already have proc-macro related dependencies in your crate's dependency graph, there is no benefit from using this crate.** (Note: There is almost no difference in the amount of code generated between [pin-project] and pin-project-lite.) - -### Different: No useful error messages - -This macro does not handle any invalid input. So error messages are not to be useful in most cases. If you do need useful error messages, then upon error you can pass the same input to [pin-project] to receive a helpful description of the compile error. - -### Different: Structs only - -pin-project-lite will refuse anything other than a braced struct with named fields. Enums and tuple structs are not supported. - -### Different: No support for custom Drop implementation - -pin-project supports this by [`#[pinned_drop]`][pinned-drop]. - -### Different: No support for custom Unpin implementation - -pin-project supports this by [`UnsafeUnpin`][unsafe-unpin] and [`!Unpin`][not-unpin]. - -### Different: No support for pattern matching and destructing - -[pin-project supports this.][naming] - -[`pin_project!`]: https://docs.rs/pin-project-lite/0.1/pin_project_lite/macro.pin_project.html -[naming]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html -[not-unpin]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unpin -[pin-project]: https://github.com/taiki-e/pin-project -[pinned-drop]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#pinned_drop -[unsafe-unpin]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unsafeunpin - -## License - -Licensed under either of - -* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ) -* MIT license ([LICENSE-MIT](LICENSE-MIT) or ) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/pin-project-lite-0.1.12/src/lib.rs b/third_party/rust/pin-project-lite-0.1.12/src/lib.rs deleted file mode 100644 index ca3e1cfe5df5..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/src/lib.rs +++ /dev/null @@ -1,628 +0,0 @@ -//! A lightweight version of [pin-project] written with declarative macros. -//! -//! # Examples -//! -//! [`pin_project!`] macro creates a projection type covering all the fields of struct. -//! -//! ```rust -//! use std::pin::Pin; -//! -//! use pin_project_lite::pin_project; -//! -//! pin_project! { -//! struct Struct { -//! #[pin] -//! pinned: T, -//! unpinned: U, -//! } -//! } -//! -//! impl Struct { -//! fn method(self: Pin<&mut Self>) { -//! let this = self.project(); -//! let _: Pin<&mut T> = this.pinned; // Pinned reference to the field -//! let _: &mut U = this.unpinned; // Normal reference to the field -//! } -//! } -//! ``` -//! -//! # [pin-project] vs pin-project-lite -//! -//! Here are some similarities and differences compared to [pin-project]. -//! -//! ## Similar: Safety -//! -//! pin-project-lite guarantees safety in much the same way as [pin-project]. Both are completely safe unless you write other unsafe code. -//! -//! ## Different: Minimal design -//! -//! This library does not tackle as expansive of a range of use cases as [pin-project] does. If your use case is not already covered, please use [pin-project]. -//! -//! ## Different: No proc-macro related dependencies -//! -//! This is the **only** reason to use this crate. However, **if you already have proc-macro related dependencies in your crate's dependency graph, there is no benefit from using this crate.** (Note: There is almost no difference in the amount of code generated between [pin-project] and pin-project-lite.) -//! -//! ## Different: No useful error messages -//! -//! This macro does not handle any invalid input. So error messages are not to be useful in most cases. If you do need useful error messages, then upon error you can pass the same input to [pin-project] to receive a helpful description of the compile error. -//! -//! ## Different: Structs only -//! -//! pin-project-lite will refuse anything other than a braced struct with named fields. Enums and tuple structs are not supported. -//! -//! ## Different: No support for custom Drop implementation -//! -//! pin-project supports this by [`#[pinned_drop]`][pinned-drop]. -//! -//! ## Different: No support for custom Unpin implementation -//! -//! pin-project supports this by [`UnsafeUnpin`][unsafe-unpin] and [`!Unpin`][not-unpin]. -//! -//! ## Different: No support for pattern matching and destructing -//! -//! [pin-project supports this.][naming] -//! -//! [naming]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html -//! [not-unpin]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unpin -//! [pin-project]: https://github.com/taiki-e/pin-project -//! [pinned-drop]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#pinned_drop -//! [unsafe-unpin]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unsafeunpin - -#![no_std] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms, single_use_lifetimes), allow(dead_code)) -))] -#![warn(unsafe_code)] -#![warn(future_incompatible, rust_2018_idioms, single_use_lifetimes, unreachable_pub)] -#![warn(clippy::all, clippy::default_trait_access)] -// mem::take and #[non_exhaustive] requires Rust 1.40, matches! requires Rust 1.42 -#![allow( - clippy::mem_replace_with_default, - clippy::manual_non_exhaustive, - clippy::match_like_matches_macro -)] - -/// A macro that creates a projection type covering all the fields of struct. -/// -/// This macro creates a projection type according to the following rules: -/// -/// * For the field that uses `#[pin]` attribute, makes the pinned reference to the field. -/// * For the other fields, makes the unpinned reference to the field. -/// -/// And the following methods are implemented on the original type: -/// -/// ```rust -/// # use std::pin::Pin; -/// # type Projection<'a> = &'a (); -/// # type ProjectionRef<'a> = &'a (); -/// # trait Dox { -/// fn project(self: Pin<&mut Self>) -> Projection<'_>; -/// fn project_ref(self: Pin<&Self>) -> ProjectionRef<'_>; -/// # } -/// ``` -/// -/// The visibility of the projected type and projection method is based on the -/// original type. However, if the visibility of the original type is `pub`, -/// the visibility of the projected type and the projection method is `pub(crate)`. -/// -/// # Safety -/// -/// `pin_project!` macro guarantees safety in much the same way as [pin-project] crate. -/// Both are completely safe unless you write other unsafe code. -/// -/// See [pin-project] crate for more details. -/// -/// # Examples -/// -/// ```rust -/// use std::pin::Pin; -/// -/// use pin_project_lite::pin_project; -/// -/// pin_project! { -/// struct Struct { -/// #[pin] -/// pinned: T, -/// unpinned: U, -/// } -/// } -/// -/// impl Struct { -/// fn method(self: Pin<&mut Self>) { -/// let this = self.project(); -/// let _: Pin<&mut T> = this.pinned; // Pinned reference to the field -/// let _: &mut U = this.unpinned; // Normal reference to the field -/// } -/// } -/// ``` -/// -/// If you want to call the `project()` method multiple times or later use the -/// original [`Pin`] type, it needs to use [`.as_mut()`][`Pin::as_mut`] to avoid -/// consuming the [`Pin`]. -/// -/// ```rust -/// use std::pin::Pin; -/// -/// use pin_project_lite::pin_project; -/// -/// pin_project! { -/// struct Struct { -/// #[pin] -/// field: T, -/// } -/// } -/// -/// impl Struct { -/// fn call_project_twice(mut self: Pin<&mut Self>) { -/// // `project` consumes `self`, so reborrow the `Pin<&mut Self>` via `as_mut`. -/// self.as_mut().project(); -/// self.as_mut().project(); -/// } -/// } -/// ``` -/// -/// # `!Unpin` -/// -/// If you want to ensure that [`Unpin`] is not implemented, use `#[pin]` -/// attribute for a [`PhantomPinned`] field. -/// -/// ```rust -/// use std::marker::PhantomPinned; -/// -/// use pin_project_lite::pin_project; -/// -/// pin_project! { -/// struct Struct { -/// field: T, -/// #[pin] // <------ This `#[pin]` is required to make `Struct` to `!Unpin`. -/// _pin: PhantomPinned, -/// } -/// } -/// ``` -/// -/// Note that using [`PhantomPinned`] without `#[pin]` attribute has no effect. -/// -/// [`PhantomPinned`]: core::marker::PhantomPinned -/// [`Pin::as_mut`]: core::pin::Pin::as_mut -/// [`Pin`]: core::pin::Pin -/// [pin-project]: https://github.com/taiki-e/pin-project -#[macro_export] -macro_rules! pin_project { - ($($tt:tt)*) => { - $crate::__pin_project_internal! { $($tt)* } - }; -} - -// limitations: -// * no support for tuple structs and enums. -// * no support for naming the projection types. -// * no support for multiple trait/lifetime bounds. -// * no support for `Self` in where clauses. (wontfix) -// * no support for overlapping lifetime names. (wontfix) -// * no interoperability with other field attributes. -// * no useful error messages. (wontfix) -// etc... - -// Not public API. -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_internal { - // ============================================================================================= - // main: struct - (@struct_internal; - [$proj_vis:vis] - [$(#[$attrs:meta])* $vis:vis struct $ident:ident] - [$($def_generics:tt)*] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident: $field_ty:ty - ),+ - } - ) => { - $(#[$attrs])* - $vis struct $ident $($def_generics)* - $(where - $($where_clause)*)? - { - $( - $field_vis $field: $field_ty - ),+ - } - - #[allow(explicit_outlives_requirements)] - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - #[allow(clippy::redundant_pub_crate)] - #[allow(clippy::used_underscore_binding)] - const _: () = { - $crate::__pin_project_internal! { @make_proj_ty_struct; - [$proj_vis] - [$vis struct $ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - { - $( - $(#[$pin])? - $field_vis $field: $field_ty - ),+ - } - } - - impl <$($impl_generics)*> $ident <$($ty_generics)*> - $(where - $($where_clause)*)? - { - $proj_vis fn project<'__pin>( - self: $crate::__private::Pin<&'__pin mut Self>, - ) -> Projection <'__pin, $($ty_generics)*> { - unsafe { - let Self { $($field),* } = self.get_unchecked_mut(); - Projection { - $( - $field: $crate::__pin_project_internal!(@make_unsafe_field_proj; - $(#[$pin])? $field - ) - ),+ - } - } - } - $proj_vis fn project_ref<'__pin>( - self: $crate::__private::Pin<&'__pin Self>, - ) -> ProjectionRef <'__pin, $($ty_generics)*> { - unsafe { - let Self { $($field),* } = self.get_ref(); - ProjectionRef { - $( - $field: $crate::__pin_project_internal!(@make_unsafe_field_proj; - $(#[$pin])? $field - ) - ),+ - } - } - } - } - - $crate::__pin_project_internal! { @make_unpin_impl; - [$vis $ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - $( - $field: $crate::__pin_project_internal!(@make_unpin_bound; - $(#[$pin])? $field_ty - ) - ),+ - } - - $crate::__pin_project_internal! { @make_drop_impl; - [$ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - } - - // Ensure that it's impossible to use pin projections on a #[repr(packed)] struct. - // - // Taking a reference to a packed field is UB, and applying - // `#[forbid(unaligned_references)]` makes sure that doing this is a hard error. - // - // If the struct ends up having #[repr(packed)] applied somehow, - // this will generate an (unfriendly) error message. Under all reasonable - // circumstances, we'll detect the #[repr(packed)] attribute, and generate - // a much nicer error above. - // - // See https://github.com/taiki-e/pin-project/pull/34 for more details. - // - // Note: - // - Lint-based tricks aren't perfect, but they're much better than nothing: - // https://github.com/taiki-e/pin-project-lite/issues/26 - // - // - Enable both unaligned_references and safe_packed_borrows lints - // because unaligned_references lint does not exist in older compilers: - // https://github.com/taiki-e/pin-project-lite/pull/55 - // https://github.com/rust-lang/rust/pull/82525 - #[forbid(unaligned_references, safe_packed_borrows)] - fn __assert_not_repr_packed <$($impl_generics)*> (this: &$ident <$($ty_generics)*>) - $(where - $($where_clause)*)? - { - $( - let _ = &this.$field; - )+ - } - }; - }; - - // ============================================================================================= - // make_proj_ty: struct - (@make_proj_ty_struct; - [$proj_vis:vis] - [$vis:vis struct $ident:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)* )?] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident: $field_ty:ty - ),+ - } - ) => { - #[allow(dead_code)] // This lint warns unused fields/variants. - #[allow(clippy::mut_mut)] // This lint warns `&mut &mut `. - #[allow(clippy::type_repetition_in_bounds)] // https://github.com/rust-lang/rust-clippy/issues/4326 - $proj_vis struct Projection <'__pin, $($impl_generics)*> - where - $ident <$($ty_generics)*>: '__pin - $(, $($where_clause)*)? - { - $( - $field_vis $field: $crate::__pin_project_internal!(@make_proj_field; - $(#[$pin])? $field_ty; mut - ) - ),+ - } - #[allow(dead_code)] // This lint warns unused fields/variants. - #[allow(clippy::type_repetition_in_bounds)] // https://github.com/rust-lang/rust-clippy/issues/4326 - $proj_vis struct ProjectionRef <'__pin, $($impl_generics)*> - where - $ident <$($ty_generics)*>: '__pin - $(, $($where_clause)*)? - { - $( - $field_vis $field: $crate::__pin_project_internal!(@make_proj_field; - $(#[$pin])? $field_ty; - ) - ),+ - } - }; - - // ============================================================================================= - // make_unpin_impl - (@make_unpin_impl; - [$vis:vis $ident:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)* )?] - $($field:tt)* - ) => { - // Automatically create the appropriate conditional `Unpin` implementation. - // - // Basically this is equivalent to the following code: - // ```rust - // impl Unpin for Struct where T: Unpin {} - // ``` - // - // However, if struct is public and there is a private type field, - // this would cause an E0446 (private type in public interface). - // - // When RFC 2145 is implemented (rust-lang/rust#48054), - // this will become a lint, rather then a hard error. - // - // As a workaround for this, we generate a new struct, containing all of the pinned - // fields from our #[pin_project] type. This struct is delcared within - // a function, which makes it impossible to be named by user code. - // This guarnatees that it will use the default auto-trait impl for Unpin - - // that is, it will implement Unpin iff all of its fields implement Unpin. - // This type can be safely declared as 'public', satisfiying the privacy - // checker without actually allowing user code to access it. - // - // This allows users to apply the #[pin_project] attribute to types - // regardless of the privacy of the types of their fields. - // - // See also https://github.com/taiki-e/pin-project/pull/53. - $vis struct __Origin <'__pin, $($impl_generics)*> - $(where - $($where_clause)*)? - { - __dummy_lifetime: $crate::__private::PhantomData<&'__pin ()>, - $($field)* - } - impl <'__pin, $($impl_generics)*> $crate::__private::Unpin for $ident <$($ty_generics)*> - where - __Origin <'__pin, $($ty_generics)*>: $crate::__private::Unpin - $(, $($where_clause)*)? - { - } - }; - - // ============================================================================================= - // make_drop_impl - (@make_drop_impl; - [$ident:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)* )?] - ) => { - // Ensure that struct does not implement `Drop`. - // - // There are two possible cases: - // 1. The user type does not implement Drop. In this case, - // the first blanked impl will not apply to it. This code - // will compile, as there is only one impl of MustNotImplDrop for the user type - // 2. The user type does impl Drop. This will make the blanket impl applicable, - // which will then comflict with the explicit MustNotImplDrop impl below. - // This will result in a compilation error, which is exactly what we want. - trait MustNotImplDrop {} - #[allow(clippy::drop_bounds, drop_bounds)] - impl MustNotImplDrop for T {} - impl <$($impl_generics)*> MustNotImplDrop for $ident <$($ty_generics)*> - $(where - $($where_clause)*)? - { - } - }; - - // ============================================================================================= - // make_unpin_bound - (@make_unpin_bound; - #[pin] - $field_ty:ty - ) => { - $field_ty - }; - (@make_unpin_bound; - $field_ty:ty - ) => { - $crate::__private::AlwaysUnpin<$field_ty> - }; - - // ============================================================================================= - // make_unsafe_field_proj - (@make_unsafe_field_proj; - #[pin] - $field:ident - ) => { - $crate::__private::Pin::new_unchecked($field) - }; - (@make_unsafe_field_proj; - $field:ident - ) => { - $field - }; - - // ============================================================================================= - // make_proj_field - (@make_proj_field; - #[pin] - $field_ty:ty; - $($mut:ident)? - ) => { - $crate::__private::Pin<&'__pin $($mut)? ($field_ty)> - }; - (@make_proj_field; - $field_ty:ty; - $($mut:ident)? - ) => { - &'__pin $($mut)? ($field_ty) - }; - - // ============================================================================================= - // Parses input and determines visibility - ( - $(#[$attrs:meta])* - pub struct $ident:ident $(< - $( $lifetime:lifetime $(: $lifetime_bound:lifetime)? ),* $(,)? - $( $generics:ident - $(: $generics_bound:path)? - $(: ?$generics_unsized_bound:path)? - $(: $generics_lifetime_bound:lifetime)? - $(= $generics_default:ty)? - ),* $(,)? - >)? - $(where - $( $where_clause_ty:ty - $(: $where_clause_bound:path)? - $(: ?$where_clause_unsized_bound:path)? - $(: $where_clause_lifetime_bound:lifetime)? - ),* $(,)? - )? - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident: $field_ty:ty - ),+ $(,)? - } - ) => { - $crate::__pin_project_internal! { @struct_internal; - [pub(crate)] - [$(#[$attrs])* pub struct $ident] - [$(< - $( $lifetime $(: $lifetime_bound)? ,)* - $( $generics - $(: $generics_bound)? - $(: ?$generics_unsized_bound)? - $(: $generics_lifetime_bound)? - $(= $generics_default)? - ),* - >)?] - [$( - $( $lifetime $(: $lifetime_bound)? ,)* - $( $generics - $(: $generics_bound)? - $(: ?$generics_unsized_bound)? - $(: $generics_lifetime_bound)? - ),* - )?] - [$( $( $lifetime ,)* $( $generics ),* )?] - [$(where $( $where_clause_ty - $(: $where_clause_bound)? - $(: ?$where_clause_unsized_bound)? - $(: $where_clause_lifetime_bound)? - ),* )?] - { - $( - $(#[$pin])? - $field_vis $field: $field_ty - ),+ - } - } - }; - ( - $(#[$attrs:meta])* - $vis:vis struct $ident:ident $(< - $( $lifetime:lifetime $(: $lifetime_bound:lifetime)? ),* $(,)? - $( $generics:ident - $(: $generics_bound:path)? - $(: ?$generics_unsized_bound:path)? - $(: $generics_lifetime_bound:lifetime)? - $(= $generics_default:ty)? - ),* $(,)? - >)? - $(where - $( $where_clause_ty:ty - $(: $where_clause_bound:path)? - $(: ?$where_clause_unsized_bound:path)? - $(: $where_clause_lifetime_bound:lifetime)? - ),* $(,)? - )? - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident: $field_ty:ty - ),+ $(,)? - } - ) => { - $crate::__pin_project_internal! { @struct_internal; - [$vis] - [$(#[$attrs])* $vis struct $ident] - [$(< - $( $lifetime $(: $lifetime_bound)? ,)* - $( $generics - $(: $generics_bound)? - $(: ?$generics_unsized_bound)? - $(: $generics_lifetime_bound)? - $(= $generics_default)? - ),* - >)?] - [$( - $( $lifetime $(: $lifetime_bound)? ,)* - $( $generics - $(: $generics_bound)? - $(: ?$generics_unsized_bound)? - $(: $generics_lifetime_bound)? - ),* - )?] - [$( $( $lifetime ,)* $( $generics ),* )?] - [$(where $( $where_clause_ty - $(: $where_clause_bound)? - $(: ?$where_clause_unsized_bound)? - $(: $where_clause_lifetime_bound)? - ),* )?] - { - $( - $(#[$pin])? - $field_vis $field: $field_ty - ),+ - } - } - }; -} - -// Not public API. -#[doc(hidden)] -pub mod __private { - #[doc(hidden)] - pub use core::{ - marker::{PhantomData, Unpin}, - ops::Drop, - pin::Pin, - }; - - // This is an internal helper struct used by `pin_project!`. - #[doc(hidden)] - pub struct AlwaysUnpin(PhantomData); - - impl Unpin for AlwaysUnpin {} -} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/compiletest.rs b/third_party/rust/pin-project-lite-0.1.12/tests/compiletest.rs deleted file mode 100644 index d18149101856..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/compiletest.rs +++ /dev/null @@ -1,8 +0,0 @@ -#![warn(rust_2018_idioms, single_use_lifetimes)] - -#[rustversion::attr(not(nightly), ignore)] -#[test] -fn ui() { - let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/*.rs"); -} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/include/basic.rs b/third_party/rust/pin-project-lite-0.1.12/tests/include/basic.rs deleted file mode 100644 index 967cf81d6e56..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/include/basic.rs +++ /dev/null @@ -1,10 +0,0 @@ -// default pin_project! is completely safe. - -::pin_project_lite::pin_project! { - #[derive(Debug)] - pub struct DefaultStruct { - #[pin] - pub pinned: T, - pub unpinned: U, - } -} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/lint.rs b/third_party/rust/pin-project-lite-0.1.12/tests/lint.rs deleted file mode 100644 index dc0649dfe0a8..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/lint.rs +++ /dev/null @@ -1,131 +0,0 @@ -#![forbid(unsafe_code)] -#![warn(nonstandard_style, rust_2018_idioms, rustdoc, unused)] -// Note: This does not guarantee compatibility with forbidding these lints in the future. -// If rustc adds a new lint, we may not be able to keep this. -#![forbid(future_incompatible, rust_2018_compatibility)] -#![allow(unknown_lints)] // for old compilers -#![warn( - box_pointers, - deprecated_in_future, - elided_lifetimes_in_paths, - explicit_outlives_requirements, - macro_use_extern_crate, - meta_variable_misuse, - missing_copy_implementations, - missing_crate_level_docs, - missing_debug_implementations, - missing_docs, - non_ascii_idents, - single_use_lifetimes, - trivial_casts, - trivial_numeric_casts, - unaligned_references, - unreachable_pub, - unused_extern_crates, - unused_import_braces, - unused_lifetimes, - unused_qualifications, - unused_results, - variant_size_differences -)] -// absolute_paths_not_starting_with_crate, anonymous_parameters, keyword_idents, pointer_structural_match: forbidden as a part of future_incompatible -// missing_doc_code_examples, private_doc_tests, invalid_html_tags: warned as a part of rustdoc -// unsafe_block_in_unsafe_fn: unstable -// unsafe_code: forbidden -// unstable_features: deprecated: https://doc.rust-lang.org/beta/rustc/lints/listing/allowed-by-default.html#unstable-features -// unused_crate_dependencies: unrelated -#![warn(clippy::all, clippy::pedantic, clippy::nursery)] -#![warn(clippy::restriction)] -#![allow(clippy::blanket_clippy_restriction_lints)] // this is a test, so enable all restriction lints intentionally. - -// Check interoperability with rustc and clippy lints. - -pub mod basic { - include!("include/basic.rs"); -} - -pub mod box_pointers { - use pin_project_lite::pin_project; - - pin_project! { - #[derive(Debug)] - pub struct Struct { - #[pin] - pub p: Box, - pub u: Box, - } - } -} - -pub mod explicit_outlives_requirements { - use pin_project_lite::pin_project; - - pin_project! { - #[derive(Debug)] - pub struct Struct<'a, T, U> - where - T: ?Sized, - U: ?Sized, - { - #[pin] - pub pinned: &'a mut T, - pub unpinned: &'a mut U, - } - } -} - -pub mod clippy_mut_mut { - use pin_project_lite::pin_project; - - pin_project! { - #[derive(Debug)] - pub struct Struct<'a, T, U> { - #[pin] - pub pinned: &'a mut T, - pub unpinned: &'a mut U, - } - } -} - -#[allow(unreachable_pub)] -mod clippy_redundant_pub_crate { - use pin_project_lite::pin_project; - - pin_project! { - #[derive(Debug)] - pub struct Struct { - #[pin] - pub pinned: T, - pub unpinned: U, - } - } -} - -pub mod clippy_type_repetition_in_bounds { - use pin_project_lite::pin_project; - - pin_project! { - #[derive(Debug)] - pub struct Struct - where - Struct: Sized, - { - #[pin] - pub pinned: T, - pub unpinned: U, - } - } -} - -pub mod clippy_used_underscore_binding { - use pin_project_lite::pin_project; - - pin_project! { - #[derive(Debug)] - pub struct Struct { - #[pin] - pub _pinned: T, - pub _unpinned: U, - } - } -} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/proper_unpin.rs b/third_party/rust/pin-project-lite-0.1.12/tests/proper_unpin.rs deleted file mode 100644 index bea139e705f3..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/proper_unpin.rs +++ /dev/null @@ -1,50 +0,0 @@ -#![warn(rust_2018_idioms, single_use_lifetimes)] -#![allow(dead_code)] - -#[macro_use] -mod auxiliary; - -pub mod default { - use std::marker::PhantomPinned; - - use pin_project_lite::pin_project; - - struct Inner { - f: T, - } - - assert_unpin!(Inner<()>); - assert_not_unpin!(Inner); - - pin_project! { - struct Foo { - #[pin] - f1: Inner, - f2: U, - } - } - - assert_unpin!(Foo<(), ()>); - assert_unpin!(Foo<(), PhantomPinned>); - assert_not_unpin!(Foo); - assert_not_unpin!(Foo); - - pin_project! { - struct TrivialBounds { - #[pin] - f: PhantomPinned, - } - } - - assert_not_unpin!(TrivialBounds); - - pin_project! { - struct Bar<'a, T, U> { - #[pin] - f1: &'a mut Inner, - f2: U, - } - } - - assert_unpin!(Bar<'_, PhantomPinned, PhantomPinned>); -} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/test.rs b/third_party/rust/pin-project-lite-0.1.12/tests/test.rs deleted file mode 100644 index 34256c975709..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/test.rs +++ /dev/null @@ -1,416 +0,0 @@ -#![warn(rust_2018_idioms, single_use_lifetimes)] -#![allow(dead_code)] - -#[macro_use] -mod auxiliary; - -use core::{marker::PhantomPinned, pin::Pin}; - -use pin_project_lite::pin_project; - -#[test] -fn projection() { - pin_project! { - struct Struct { - #[pin] - f1: T, - f2: U, - } - } - - let mut s = Struct { f1: 1, f2: 2 }; - let mut s_orig = Pin::new(&mut s); - let s = s_orig.as_mut().project(); - - let x: Pin<&mut i32> = s.f1; - assert_eq!(*x, 1); - - let y: &mut i32 = s.f2; - assert_eq!(*y, 2); - - assert_eq!(s_orig.as_ref().f1, 1); - assert_eq!(s_orig.as_ref().f2, 2); - - let mut s = Struct { f1: 1, f2: 2 }; - - let s = Pin::new(&mut s).project(); - - let _: Pin<&mut i32> = s.f1; - let _: &mut i32 = s.f2; -} - -#[test] -fn where_clause() { - pin_project! { - struct Struct - where - T: Copy, - { - f: T, - } - } -} - -#[test] -fn where_clause_and_associated_type_field() { - pin_project! { - struct Struct1 - where - I: Iterator, - { - #[pin] - f1: I, - f2: I::Item, - } - } - - pin_project! { - struct Struct2 - where - I: Iterator, - { - #[pin] - f1: I, - f2: J, - } - } - - pin_project! { - pub struct Struct3 - where - T: 'static, - { - f: T, - } - } - - trait Static: 'static {} - - impl Static for Struct3 {} -} - -#[test] -fn derive_copy() { - pin_project! { - #[derive(Clone, Copy)] - struct Struct { - f: T, - } - } - - fn is_copy() {} - - is_copy::>(); -} - -#[test] -fn move_out() { - struct NotCopy; - - pin_project! { - struct Struct { - f: NotCopy, - } - } - - let x = Struct { f: NotCopy }; - let _val: NotCopy = x.f; -} - -#[test] -fn trait_bounds_on_type_generics() { - pin_project! { - pub struct Struct1<'a, T: ?Sized> { - f: &'a mut T, - } - } - - pin_project! { - pub struct Struct2<'a, T: ::core::fmt::Debug> { - f: &'a mut T, - } - } - - pin_project! { - pub struct Struct3<'a, T: core::fmt::Debug> { - f: &'a mut T, - } - } - - // pin_project! { - // pub struct Struct4<'a, T: core::fmt::Debug + core::fmt::Display> { - // f: &'a mut T, - // } - // } - - // pin_project! { - // pub struct Struct5<'a, T: core::fmt::Debug + ?Sized> { - // f: &'a mut T, - // } - // } - - pin_project! { - pub struct Struct6<'a, T: core::fmt::Debug = [u8; 16]> { - f: &'a mut T, - } - } - - let _: Struct6<'_> = Struct6 { f: &mut [0u8; 16] }; - - pin_project! { - pub struct Struct7 { - f: T, - } - } - - trait Static: 'static {} - - impl Static for Struct7 {} - - pin_project! { - pub struct Struct8<'a, 'b: 'a> { - f1: &'a u8, - f2: &'b u8, - } - } -} - -#[test] -fn private_type_in_public_type() { - pin_project! { - pub struct PublicStruct { - #[pin] - inner: PrivateStruct, - } - } - - struct PrivateStruct(T); -} - -#[allow(clippy::needless_lifetimes)] -#[test] -fn lifetime_project() { - pin_project! { - struct Struct1 { - #[pin] - pinned: T, - unpinned: U, - } - } - - pin_project! { - struct Struct2<'a, T, U> { - #[pin] - pinned: &'a mut T, - unpinned: U, - } - } - - impl Struct1 { - fn get_pin_ref<'a>(self: Pin<&'a Self>) -> Pin<&'a T> { - self.project_ref().pinned - } - fn get_pin_mut<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut T> { - self.project().pinned - } - fn get_pin_ref_elided(self: Pin<&Self>) -> Pin<&T> { - self.project_ref().pinned - } - fn get_pin_mut_elided(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().pinned - } - } - - impl<'b, T, U> Struct2<'b, T, U> { - fn get_pin_ref<'a>(self: Pin<&'a Self>) -> Pin<&'a &'b mut T> { - self.project_ref().pinned - } - fn get_pin_mut<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut &'b mut T> { - self.project().pinned - } - fn get_pin_ref_elided(self: Pin<&Self>) -> Pin<&&'b mut T> { - self.project_ref().pinned - } - fn get_pin_mut_elided(self: Pin<&mut Self>) -> Pin<&mut &'b mut T> { - self.project().pinned - } - } -} - -mod visibility { - use pin_project_lite::pin_project; - - pin_project! { - pub(crate) struct S { - pub f: u8, - } - } -} - -#[test] -fn visibility() { - let mut x = visibility::S { f: 0 }; - let x = Pin::new(&mut x); - let y = x.as_ref().project_ref(); - let _: &u8 = y.f; - let y = x.project(); - let _: &mut u8 = y.f; -} - -#[test] -fn trivial_bounds() { - pin_project! { - pub struct NoGenerics { - #[pin] - f: PhantomPinned, - } - } - - assert_not_unpin!(NoGenerics); -} - -#[test] -fn dst() { - pin_project! { - pub struct Struct1 { - f: T, - } - } - - let mut x = Struct1 { f: 0_u8 }; - let x: Pin<&mut Struct1> = Pin::new(&mut x as _); - let _y: &mut (dyn core::fmt::Debug) = x.project().f; - - pin_project! { - pub struct Struct2 { - #[pin] - f: T, - } - } - - let mut x = Struct2 { f: 0_u8 }; - let x: Pin<&mut Struct2> = Pin::new(&mut x as _); - let _y: Pin<&mut (dyn core::fmt::Debug + Unpin)> = x.project().f; - - pin_project! { - struct Struct3 - where - T: ?Sized, - { - f: T, - } - } - - pin_project! { - struct Struct4 - where - T: ?Sized, - { - #[pin] - f: T, - } - } - - pin_project! { - struct Struct11<'a, T: ?Sized, U: ?Sized> { - f1: &'a mut T, - f2: U, - } - } -} - -#[test] -fn dyn_type() { - pin_project! { - struct Struct1 { - f: dyn core::fmt::Debug, - } - } - - pin_project! { - struct Struct2 { - #[pin] - f: dyn core::fmt::Debug, - } - } - - pin_project! { - struct Struct3 { - f: dyn core::fmt::Debug + Send, - } - } - - pin_project! { - struct Struct4 { - #[pin] - f: dyn core::fmt::Debug + Send, - } - } -} - -#[test] -fn no_infer_outlives() { - trait Trait { - type Y; - } - - struct Struct1(A); - - impl Trait for Struct1 { - type Y = Option; - } - - pin_project! { - struct Struct2 { - _f: as Trait>::Y, - } - } -} - -// https://github.com/taiki-e/pin-project-lite/issues/31 -#[test] -fn trailing_comma() { - pub trait T {} - - pin_project! { - pub struct S1< - A: T, - B: T, - > { - f: (A, B), - } - } - - pin_project! { - pub struct S2< - A, - B, - > - where - A: T, - B: T, - { - f: (A, B), - } - } - - pin_project! { - #[allow(explicit_outlives_requirements)] - pub struct S3< - 'a, - A: 'a, - B: 'a, - > { - f: &'a (A, B), - } - } - - // pin_project! { - // pub struct S4< - // 'a, - // 'b: 'a, // <----- - // > { - // f: &'a &'b (), - // } - // } -} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-drop.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-drop.rs deleted file mode 100644 index 870059d62f98..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-drop.rs +++ /dev/null @@ -1,15 +0,0 @@ -use pin_project_lite::pin_project; - -pin_project! { //~ ERROR E0119 - struct Foo { - #[pin] - future: T, - field: U, - } -} - -impl Drop for Foo { - fn drop(&mut self) {} -} - -fn main() {} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-drop.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-drop.stderr deleted file mode 100644 index f97c92b6739a..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-drop.stderr +++ /dev/null @@ -1,16 +0,0 @@ -error[E0119]: conflicting implementations of trait `_::MustNotImplDrop` for type `Foo<_, _>`: - --> $DIR/conflict-drop.rs:3:1 - | -3 | / pin_project! { //~ ERROR E0119 -4 | | struct Foo { -5 | | #[pin] -6 | | future: T, -7 | | field: U, -8 | | } -9 | | } - | | ^ - | | | - | |_first implementation here - | conflicting implementation for `Foo<_, _>` - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-unpin.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-unpin.rs deleted file mode 100644 index f702f064deb6..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-unpin.rs +++ /dev/null @@ -1,40 +0,0 @@ -use pin_project_lite::pin_project; - -// The same implementation. - -pin_project! { //~ ERROR E0119 - struct Foo { - #[pin] - future: T, - field: U, - } -} - -// conflicting implementations -impl Unpin for Foo where T: Unpin {} // Conditional Unpin impl - -// The implementation that under different conditions. - -pin_project! { //~ ERROR E0119 - struct Bar { - #[pin] - future: T, - field: U, - } -} - -// conflicting implementations -impl Unpin for Bar {} // Non-conditional Unpin impl - -pin_project! { //~ ERROR E0119 - struct Baz { - #[pin] - future: T, - field: U, - } -} - -// conflicting implementations -impl Unpin for Baz {} // Conditional Unpin impl - -fn main() {} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-unpin.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-unpin.stderr deleted file mode 100644 index 546dafdbbe03..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/conflict-unpin.stderr +++ /dev/null @@ -1,50 +0,0 @@ -error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Foo<_, _>`: - --> $DIR/conflict-unpin.rs:5:1 - | -5 | / pin_project! { //~ ERROR E0119 -6 | | struct Foo { -7 | | #[pin] -8 | | future: T, -9 | | field: U, -10 | | } -11 | | } - | |_^ conflicting implementation for `Foo<_, _>` -... -14 | impl Unpin for Foo where T: Unpin {} // Conditional Unpin impl - | --------------------------------------------- first implementation here - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Bar<_, _>`: - --> $DIR/conflict-unpin.rs:18:1 - | -18 | / pin_project! { //~ ERROR E0119 -19 | | struct Bar { -20 | | #[pin] -21 | | future: T, -22 | | field: U, -23 | | } -24 | | } - | |_^ conflicting implementation for `Bar<_, _>` -... -27 | impl Unpin for Bar {} // Non-conditional Unpin impl - | ------------------------------ first implementation here - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Baz<_, _>`: - --> $DIR/conflict-unpin.rs:29:1 - | -29 | / pin_project! { //~ ERROR E0119 -30 | | struct Baz { -31 | | #[pin] -32 | | future: T, -33 | | field: U, -34 | | } -35 | | } - | |_^ conflicting implementation for `Baz<_, _>` -... -38 | impl Unpin for Baz {} // Conditional Unpin impl - | -------------------------------------------- first implementation here - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid-bounds.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid-bounds.rs deleted file mode 100644 index 64b397a37ed1..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid-bounds.rs +++ /dev/null @@ -1,93 +0,0 @@ -use pin_project_lite::pin_project; - -pin_project! { - struct Generics1 { //~ ERROR no rules expected the token `:` - field: T, - } -} - -pin_project! { - struct Generics2 { //~ ERROR no rules expected the token `:` - field: T, - } -} - -pin_project! { - struct Generics3 { //~ ERROR expected one of `+`, `,`, `=`, or `>`, found `:` - field: T, - } -} - -pin_project! { - struct Generics4 { //~ ERROR expected one of `+`, `,`, `=`, or `>`, found `:` - field: T, - } -} - -pin_project! { - struct Generics5 { //~ ERROR expected one of `+`, `,`, `=`, or `>`, found `:` - field: T, - } -} - -pin_project! { - struct Generics6 { //~ ERROR no rules expected the token `Sized` - field: T, - } -} - -pin_project! { - struct WhereClause1 - where - T: 'static : Sized //~ ERROR no rules expected the token `:` - { - field: T, - } -} - -pin_project! { - struct WhereClause2 - where - T: 'static : ?Sized //~ ERROR no rules expected the token `:` - { - field: T, - } -} - -pin_project! { - struct WhereClause3 - where - T: Sized : 'static //~ ERROR expected `where`, or `{` after struct name, found `:` - { - field: T, - } -} - -pin_project! { - struct WhereClause4 - where - T: ?Sized : 'static //~ ERROR expected `where`, or `{` after struct name, found `:` - { - field: T, - } -} - -pin_project! { - struct WhereClause5 - where - T: Sized : ?Sized //~ ERROR expected `where`, or `{` after struct name, found `:` - { - field: T, - } -} - -pin_project! { - struct WhereClause6 - where - T: ?Sized : Sized //~ ERROR no rules expected the token `Sized` - { - field: T, - } -} - -fn main() {} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid-bounds.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid-bounds.stderr deleted file mode 100644 index 59e9b136a879..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid-bounds.stderr +++ /dev/null @@ -1,134 +0,0 @@ -error: no rules expected the token `:` - --> $DIR/invalid-bounds.rs:4:33 - | -4 | struct Generics1 { //~ ERROR no rules expected the token `:` - | ^ no rules expected this token in macro call - -error: no rules expected the token `:` - --> $DIR/invalid-bounds.rs:10:33 - | -10 | struct Generics2 { //~ ERROR no rules expected the token `:` - | ^ no rules expected this token in macro call - -error: expected one of `+`, `,`, `=`, or `>`, found `:` - --> $DIR/invalid-bounds.rs:15:1 - | -15 | / pin_project! { -16 | | struct Generics3 { //~ ERROR expected one of `+`, `,`, `=`, or `>`, found `:` -17 | | field: T, -18 | | } -19 | | } - | | ^ - | | | - | | expected one of `+`, `,`, `=`, or `>` - | |_unexpected token - | in this macro invocation - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: expected one of `+`, `,`, `=`, or `>`, found `:` - --> $DIR/invalid-bounds.rs:21:1 - | -21 | / pin_project! { -22 | | struct Generics4 { //~ ERROR expected one of `+`, `,`, `=`, or `>`, found `:` -23 | | field: T, -24 | | } -25 | | } - | | ^ - | | | - | | expected one of `+`, `,`, `=`, or `>` - | |_unexpected token - | in this macro invocation - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: expected one of `+`, `,`, `=`, or `>`, found `:` - --> $DIR/invalid-bounds.rs:27:1 - | -27 | / pin_project! { -28 | | struct Generics5 { //~ ERROR expected one of `+`, `,`, `=`, or `>`, found `:` -29 | | field: T, -30 | | } -31 | | } - | | ^ - | | | - | | expected one of `+`, `,`, `=`, or `>` - | |_unexpected token - | in this macro invocation - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: no rules expected the token `Sized` - --> $DIR/invalid-bounds.rs:34:34 - | -34 | struct Generics6 { //~ ERROR no rules expected the token `Sized` - | ^^^^^ no rules expected this token in macro call - -error: no rules expected the token `:` - --> $DIR/invalid-bounds.rs:42:20 - | -42 | T: 'static : Sized //~ ERROR no rules expected the token `:` - | ^ no rules expected this token in macro call - -error: no rules expected the token `:` - --> $DIR/invalid-bounds.rs:51:20 - | -51 | T: 'static : ?Sized //~ ERROR no rules expected the token `:` - | ^ no rules expected this token in macro call - -error: expected `where`, or `{` after struct name, found `:` - --> $DIR/invalid-bounds.rs:57:1 - | -57 | / pin_project! { -58 | | struct WhereClause3 -59 | | where -60 | | T: Sized : 'static //~ ERROR expected `where`, or `{` after struct name, found `:` -... | -63 | | } -64 | | } - | | ^ - | | | - | |_expected `where`, or `{` after struct name - | in this macro invocation - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: expected `where`, or `{` after struct name, found `:` - --> $DIR/invalid-bounds.rs:66:1 - | -66 | / pin_project! { -67 | | struct WhereClause4 -68 | | where -69 | | T: ?Sized : 'static //~ ERROR expected `where`, or `{` after struct name, found `:` -... | -72 | | } -73 | | } - | | ^ - | | | - | |_expected `where`, or `{` after struct name - | in this macro invocation - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: expected `where`, or `{` after struct name, found `:` - --> $DIR/invalid-bounds.rs:75:1 - | -75 | / pin_project! { -76 | | struct WhereClause5 -77 | | where -78 | | T: Sized : ?Sized //~ ERROR expected `where`, or `{` after struct name, found `:` -... | -81 | | } -82 | | } - | | ^ - | | | - | |_expected `where`, or `{` after struct name - | in this macro invocation - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: no rules expected the token `Sized` - --> $DIR/invalid-bounds.rs:87:21 - | -87 | T: ?Sized : Sized //~ ERROR no rules expected the token `Sized` - | ^^^^^ no rules expected this token in macro call diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid.rs deleted file mode 100644 index e0ea61d4f77a..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid.rs +++ /dev/null @@ -1,25 +0,0 @@ -use pin_project_lite::pin_project; - -pin_project! { - struct A { - #[pin()] //~ ERROR no rules expected the token `(` - pinned: T, - } -} - -pin_project! { - #[pin] //~ ERROR cannot find attribute `pin` in this scope - struct B { - pinned: T, - } -} - -pin_project! { - struct C { - #[pin] - #[pin] //~ ERROR no rules expected the token `#` - pinned: T, - } -} - -fn main() {} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid.stderr deleted file mode 100644 index f780e2e69af9..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/invalid.stderr +++ /dev/null @@ -1,17 +0,0 @@ -error: no rules expected the token `(` - --> $DIR/invalid.rs:5:14 - | -5 | #[pin()] //~ ERROR no rules expected the token `(` - | ^ no rules expected this token in macro call - -error: no rules expected the token `#` - --> $DIR/invalid.rs:20:9 - | -20 | #[pin] //~ ERROR no rules expected the token `#` - | ^ no rules expected this token in macro call - -error: cannot find attribute `pin` in this scope - --> $DIR/invalid.rs:11:7 - | -11 | #[pin] //~ ERROR cannot find attribute `pin` in this scope - | ^^^ diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_lifetime_names.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_lifetime_names.rs deleted file mode 100644 index 87a737e2fa1d..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_lifetime_names.rs +++ /dev/null @@ -1,10 +0,0 @@ -use pin_project_lite::pin_project; - -pin_project! { //~ ERROR E0496 - pub struct Foo<'__pin, T> { //~ ERROR E0263 - #[pin] - field: &'__pin mut T, - } -} - -fn main() {} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_lifetime_names.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_lifetime_names.stderr deleted file mode 100644 index 8a9bb4fc91b1..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_lifetime_names.stderr +++ /dev/null @@ -1,75 +0,0 @@ -error[E0496]: lifetime name `'__pin` shadows a lifetime name that is already in scope - --> $DIR/overlapping_lifetime_names.rs:3:1 - | -3 | / pin_project! { //~ ERROR E0496 -4 | | pub struct Foo<'__pin, T> { //~ ERROR E0263 - | | ------ first declared here -5 | | #[pin] -6 | | field: &'__pin mut T, -7 | | } -8 | | } - | |_^ lifetime `'__pin` already in scope - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0496]: lifetime name `'__pin` shadows a lifetime name that is already in scope - --> $DIR/overlapping_lifetime_names.rs:3:1 - | -3 | / pin_project! { //~ ERROR E0496 -4 | | pub struct Foo<'__pin, T> { //~ ERROR E0263 - | | ------ first declared here -5 | | #[pin] -6 | | field: &'__pin mut T, -7 | | } -8 | | } - | |_^ lifetime `'__pin` already in scope - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0263]: lifetime name `'__pin` declared twice in the same scope - --> $DIR/overlapping_lifetime_names.rs:4:20 - | -3 | / pin_project! { //~ ERROR E0496 -4 | | pub struct Foo<'__pin, T> { //~ ERROR E0263 - | | ^^^^^^ declared twice -5 | | #[pin] -6 | | field: &'__pin mut T, -7 | | } -8 | | } - | |_- previous declaration here - -error[E0263]: lifetime name `'__pin` declared twice in the same scope - --> $DIR/overlapping_lifetime_names.rs:4:20 - | -3 | / pin_project! { //~ ERROR E0496 -4 | | pub struct Foo<'__pin, T> { //~ ERROR E0263 - | | ^^^^^^ declared twice -5 | | #[pin] -6 | | field: &'__pin mut T, -7 | | } -8 | | } - | |_- previous declaration here - -error[E0263]: lifetime name `'__pin` declared twice in the same scope - --> $DIR/overlapping_lifetime_names.rs:4:20 - | -3 | / pin_project! { //~ ERROR E0496 -4 | | pub struct Foo<'__pin, T> { //~ ERROR E0263 - | | ^^^^^^ declared twice -5 | | #[pin] -6 | | field: &'__pin mut T, -7 | | } -8 | | } - | |_- previous declaration here - -error[E0263]: lifetime name `'__pin` declared twice in the same scope - --> $DIR/overlapping_lifetime_names.rs:4:20 - | -3 | / pin_project! { //~ ERROR E0496 -4 | | pub struct Foo<'__pin, T> { //~ ERROR E0263 - | | ^^^^^^ declared twice -5 | | #[pin] -6 | | field: &'__pin mut T, -7 | | } -8 | | } - | |_- previous declaration here diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_unpin_struct.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_unpin_struct.rs deleted file mode 100644 index 13385243073f..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_unpin_struct.rs +++ /dev/null @@ -1,19 +0,0 @@ -use pin_project_lite::pin_project; -use std::marker::PhantomPinned; - -pin_project! { - struct Foo { - #[pin] - inner: T, - } -} - -struct __Origin {} - -impl Unpin for __Origin {} - -fn is_unpin() {} - -fn main() { - is_unpin::>(); //~ ERROR E0277 -} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_unpin_struct.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_unpin_struct.stderr deleted file mode 100644 index ab76f81d9bdf..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/overlapping_unpin_struct.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error[E0277]: `PhantomPinned` cannot be unpinned - --> $DIR/overlapping_unpin_struct.rs:18:5 - | -15 | fn is_unpin() {} - | ----- required by this bound in `is_unpin` -... -18 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `_::__Origin<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: required because it appears within the type `_::__Origin<'_, PhantomPinned>` - = note: required because of the requirements on the impl of `Unpin` for `Foo` diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/packed.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/packed.rs deleted file mode 100644 index 507a0385fb16..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/packed.rs +++ /dev/null @@ -1,19 +0,0 @@ -use pin_project_lite::pin_project; - -pin_project! { //~ ERROR reference to packed field is unaligned - #[repr(packed, C)] - struct Packed { - #[pin] - field: u16, - } -} - -pin_project! { //~ ERROR reference to packed field is unaligned - #[repr(packed(2))] - struct PackedN { - #[pin] - field: u32, - } -} - -fn main() {} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/packed.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/packed.stderr deleted file mode 100644 index 14ffc86d4244..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/packed.stderr +++ /dev/null @@ -1,107 +0,0 @@ -error: reference to packed field is unaligned - --> $DIR/packed.rs:3:1 - | -3 | / pin_project! { //~ ERROR reference to packed field is unaligned -4 | | #[repr(packed, C)] -5 | | struct Packed { -6 | | #[pin] -7 | | field: u16, -8 | | } -9 | | } - | |_^ - | -note: the lint level is defined here - --> $DIR/packed.rs:3:1 - | -3 | / pin_project! { //~ ERROR reference to packed field is unaligned -4 | | #[repr(packed, C)] -5 | | struct Packed { -6 | | #[pin] -7 | | field: u16, -8 | | } -9 | | } - | |_^ - = note: fields of packed structs are not properly aligned, and creating a misaligned reference is undefined behavior (even if that reference is never dereferenced) - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: reference to packed field is unaligned - --> $DIR/packed.rs:11:1 - | -11 | / pin_project! { //~ ERROR reference to packed field is unaligned -12 | | #[repr(packed(2))] -13 | | struct PackedN { -14 | | #[pin] -15 | | field: u32, -16 | | } -17 | | } - | |_^ - | -note: the lint level is defined here - --> $DIR/packed.rs:11:1 - | -11 | / pin_project! { //~ ERROR reference to packed field is unaligned -12 | | #[repr(packed(2))] -13 | | struct PackedN { -14 | | #[pin] -15 | | field: u32, -16 | | } -17 | | } - | |_^ - = note: fields of packed structs are not properly aligned, and creating a misaligned reference is undefined behavior (even if that reference is never dereferenced) - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: borrow of packed field is unsafe and requires unsafe function or block (error E0133) - --> $DIR/packed.rs:3:1 - | -3 | / pin_project! { //~ ERROR reference to packed field is unaligned -4 | | #[repr(packed, C)] -5 | | struct Packed { -6 | | #[pin] -7 | | field: u16, -8 | | } -9 | | } - | |_^ - | -note: the lint level is defined here - --> $DIR/packed.rs:3:1 - | -3 | / pin_project! { //~ ERROR reference to packed field is unaligned -4 | | #[repr(packed, C)] -5 | | struct Packed { -6 | | #[pin] -7 | | field: u16, -8 | | } -9 | | } - | |_^ - = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! - = note: for more information, see issue #46043 - = note: fields of packed structs might be misaligned: dereferencing a misaligned pointer or even just creating a misaligned reference is undefined behavior - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: borrow of packed field is unsafe and requires unsafe function or block (error E0133) - --> $DIR/packed.rs:11:1 - | -11 | / pin_project! { //~ ERROR reference to packed field is unaligned -12 | | #[repr(packed(2))] -13 | | struct PackedN { -14 | | #[pin] -15 | | field: u32, -16 | | } -17 | | } - | |_^ - | -note: the lint level is defined here - --> $DIR/packed.rs:11:1 - | -11 | / pin_project! { //~ ERROR reference to packed field is unaligned -12 | | #[repr(packed(2))] -13 | | struct PackedN { -14 | | #[pin] -15 | | field: u32, -16 | | } -17 | | } - | |_^ - = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! - = note: for more information, see issue #46043 - = note: fields of packed structs might be misaligned: dereferencing a misaligned pointer or even just creating a misaligned reference is undefined behavior - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/unpin_sneaky.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/unpin_sneaky.rs deleted file mode 100644 index 984cc2a21909..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/unpin_sneaky.rs +++ /dev/null @@ -1,12 +0,0 @@ -use pin_project_lite::pin_project; - -pin_project! { - struct Foo { - #[pin] - inner: u8, - } -} - -impl Unpin for __Origin {} //~ ERROR E0412,E0321 - -fn main() {} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/unpin_sneaky.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/unpin_sneaky.stderr deleted file mode 100644 index 39a7745ed72e..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/unpin_sneaky.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error[E0412]: cannot find type `__Origin` in this scope - --> $DIR/unpin_sneaky.rs:10:16 - | -10 | impl Unpin for __Origin {} //~ ERROR E0412,E0321 - | ^^^^^^^^ not found in this scope - -error[E0321]: cross-crate traits with a default impl, like `Unpin`, can only be implemented for a struct/enum type, not `[type error]` - --> $DIR/unpin_sneaky.rs:10:1 - | -10 | impl Unpin for __Origin {} //~ ERROR E0412,E0321 - | ^^^^^^^^^^^^^^^^^^^^^^^ can't implement cross-crate trait with a default impl for non-struct/enum type diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/unsupported.rs b/third_party/rust/pin-project-lite-0.1.12/tests/ui/unsupported.rs deleted file mode 100644 index 2f808362756e..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/unsupported.rs +++ /dev/null @@ -1,27 +0,0 @@ -use pin_project_lite::pin_project; - -pin_project! { - struct Struct1 {} //~ ERROR no rules expected the token `}` -} - -pin_project! { - struct Struct2(); //~ ERROR no rules expected the token `(` -} - -pin_project! { - struct Struct3; //~ ERROR no rules expected the token `;` -} - -pin_project! { - enum Enum { //~ ERROR no rules expected the token `enum` - A(u8) - } -} - -pin_project! { - union Union { //~ ERROR no rules expected the token `union` - x: u8, - } -} - -fn main() {} diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/ui/unsupported.stderr b/third_party/rust/pin-project-lite-0.1.12/tests/ui/unsupported.stderr deleted file mode 100644 index 4f7b1aed0978..000000000000 --- a/third_party/rust/pin-project-lite-0.1.12/tests/ui/unsupported.stderr +++ /dev/null @@ -1,29 +0,0 @@ -error: no rules expected the token `}` - --> $DIR/unsupported.rs:4:21 - | -4 | struct Struct1 {} //~ ERROR no rules expected the token `}` - | ^ no rules expected this token in macro call - -error: no rules expected the token `(` - --> $DIR/unsupported.rs:8:19 - | -8 | struct Struct2(); //~ ERROR no rules expected the token `(` - | ^ no rules expected this token in macro call - -error: no rules expected the token `;` - --> $DIR/unsupported.rs:12:19 - | -12 | struct Struct3; //~ ERROR no rules expected the token `;` - | ^ no rules expected this token in macro call - -error: no rules expected the token `enum` - --> $DIR/unsupported.rs:16:5 - | -16 | enum Enum { //~ ERROR no rules expected the token `enum` - | ^^^^ no rules expected this token in macro call - -error: no rules expected the token `union` - --> $DIR/unsupported.rs:22:5 - | -22 | union Union { //~ ERROR no rules expected the token `union` - | ^^^^^ no rules expected this token in macro call diff --git a/third_party/rust/pin-project/.cargo-checksum.json b/third_party/rust/pin-project/.cargo-checksum.json index a546c5bc4572..a4ac3ada8d54 100644 --- a/third_party/rust/pin-project/.cargo-checksum.json +++ b/third_party/rust/pin-project/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"7f45ef95d3102b25f9c4c9181d60c361ce9f9ea3e689f5c1e7f4748a78fe07cd","Cargo.lock":"b0abc49a7c56c1b8303b75ac3903242d0ac8585e3d42e0aee47baf272d9e8ce1","Cargo.toml":"68e94c2bb6e737bd6d1aecff34f3d2223c1f6f672901290993f63a0c4f4f0964","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"18eac0021ee384fa8f377583887ba5da3ffe5255e8b9539dde8933aa555c6aa4","examples/README.md":"86f688e188b258f706e9344b31d28bcf15e22e130acd84bb7af7201fb62e16f7","examples/enum-default-expanded.rs":"b0b334914c4a8f6edf8f84fbfcad2fca263d6842c8c614a8f1d9c521e90f6096","examples/enum-default.rs":"01e1d285ffbb87aa70950c0ec184b395f7faf0667a1fc874b4de509e3e3c8d5c","examples/not_unpin-expanded.rs":"36e303e622b3588b8f0f5bbda69d6e7382c99cf9fb09242fbaf4f0e97cc074c8","examples/not_unpin.rs":"3e43439c521089f7f58a411fb823827433c7476a0b41aecd26a6ce5f0c99e405","examples/pinned_drop-expanded.rs":"9c19f3bb73753b7dd189db64b0a984b8b7e2028651ec575fb843ee03aad5579a","examples/pinned_drop.rs":"8913e9b0b7851d470c3f15e40930af16c79f5ee8b4a947cac8235361b427db30","examples/project_replace-expanded.rs":"46364eb6ad9849fe921f1cf50a351a409eab0ba60eb901eec24f044af45ae5cb","examples/project_replace.rs":"352d10d7a2db3de0101faedd04d69c24b9bb5021c768199a379c233886a3881c","examples/struct-default-expanded.rs":"589dd3d2475dcf67261620fd5ec6cf8fccc92188df01a3e72fa41ac5561a6949","examples/struct-default.rs":"eb60ea5412e83ac9eba96794b31772afe0798bef304b26bff65b6845638bb729","examples/unsafe_unpin-expanded.rs":"87f0e32b62d489dcfe899b26d07deee5ea60764306346b4eb802999dba5064f8","examples/unsafe_unpin.rs":"7da585f423999dcbe03289a72b14a29fed41a45a774de1af2fe93cb20aa8d426","src/lib.rs":"763089653ee589367a02f920b8736756aeacde43a58c70472265dc4796a197ca","tests/cfg.rs":"31a3c409275c86f9dcfd314eaacdd31276fdbd1a05760d190b189b7a5a597aaf","tests/compiletest.rs":"0bd788b5e52bd2778ba817292e1ba6174f74e779eb87489788ca5f5ee1a433e7","tests/drop_order.rs":"3a35a1a109dda4b5cd47aaf5e6e506f52165f023c9553c6bd61581857c0e2d82","tests/include/basic-safe-part.rs":"dc7ae2a983e05028e55b6b00eba7490399991bec304762477b433bf59f06296a","tests/include/basic.rs":"855d9cc4657be080337c07f8bb5f82747a72a22e63e1103b00914a4448937970","tests/lint.rs":"88bfb95432ae83bf280dbef60e2e4d4565b30ae0ac1735717a97d0917a243371","tests/pin_project.rs":"f730e98acacda83f40fd4efcf15289ccf0cf5964dc4f83be214756ffd9138678","tests/pinned_drop.rs":"2951d0e117cacc0b1b2c778d1735289ec123f262560fde2bda51f3994dee7c36","tests/project.rs":"22b77e4474bfc7b76f1f508e53b74b5b560d5dca9422dc35a3f7810a35654dcc","tests/project_if_attr.rs.in":"2030a358cbd6c722c173c84615ce07087cf67d9ab85cbff19fb2d36789acea56","tests/project_ref.rs":"787c4f374203821ba3dda31d8c3e225b78ce16e5bae9c6c356ca5a7e123b3e21","tests/project_replace.rs":"9537b0d0db20c2a1ae4523fc7dcf801379031866708be41c150e80f603ac849a","tests/repr_packed.rs":"3845a63e1de4176fd8c415c4a2ba7236daa143776e7d86d1ae37e32db36b8bf8","tests/sized.rs":"d976a125da3691805383b32aaac9153145250bd0d013f3e9aad16faa5c7a604c","tests/ui/cfg/cfg_attr-resolve.rs":"f0807b6db56e2842d87cf694f929fd6695addf1ac4e8ce5fb83552447e1cb58d","tests/ui/cfg/cfg_attr-resolve.stderr":"36d01583be7282b5ad28ee462858ed646d09ee5d964ab01f9623a0de7a2bf504","tests/ui/cfg/cfg_attr-type-mismatch.rs":"ef82a8dc600bad0fe861419119145d8f117cbe6ddbdbc5338dad8b7182756a2c","tests/ui/cfg/cfg_attr-type-mismatch.stderr":"dc5da98a65d3cea3263d920f2bd49362fa4d183345393822c8ad4a146677df2a","tests/ui/cfg/cfg_attr-unpin.rs":"02cd72d7b6a9e08e85ec8cf3a325d773422c5ebe712213dc0907544d9976711e","tests/ui/cfg/cfg_attr-unpin.stderr":"0712f0d7f0743de276e2a83a3d144d0639600acb0139b939b4759e04044f1fe3","tests/ui/cfg/packed_sneaky-span-issue-1.rs":"22688e4dacaed9437d9fbcd18c2044a8f037bf77bd0bdf498a5118bda99813a7","tests/ui/cfg/packed_sneaky-span-issue-1.stderr":"deca5d37561748bb950b7cb2c9819b6018ba3e693c3d88ad24247f2ab8dbb8f5","tests/ui/cfg/packed_sneaky-span-issue-2.rs":"42d00141ddc12f6e7f7c779f51157cd8154c11f8eff54d53a9d93391c4caf51b","tests/ui/cfg/packed_sneaky-span-issue-2.stderr":"522489490b907e2e76ad382020726b49ba5a3ec95ab6b4585fd3327e9fd7ebe9","tests/ui/cfg/packed_sneaky.rs":"ebd6d5ccc813ad3939f25a20008ee97201db1d4dc6808461e98e46f2c83f56c3","tests/ui/cfg/packed_sneaky.stderr":"269fb48452b440985e17f0fe071a90a04bcd42be180116a6870ac3067a9cfd1f","tests/ui/cfg/proper_unpin.rs":"3e6ddbe86e8ee2c4366f83ce7ef89428bba73b2bb1b8ede166b1ce3bb1e88696","tests/ui/cfg/proper_unpin.stderr":"423d4eb94d3831bf498624968fdaac9a35a8f1df620f26ef80149684ca694589","tests/ui/cfg/unsupported.rs":"b774675e0cee115c5e64ddd59008671de313d15b3b14de825f904bd25e1c3dea","tests/ui/cfg/unsupported.stderr":"53e6245576c14f7d1d6fd4e2981ebcfe443d8ae708110ab1ed3f18f51340f4cd","tests/ui/not_unpin/assert-not-unpin.rs":"5b2f25d54a869eaf5840755c05cb2c74a1c12ca0470db1264442f6933185a2ad","tests/ui/not_unpin/assert-not-unpin.stderr":"8f37bd5ad84eb47f1473f023c4f3e85792d6c74f8fce06160579b7b9ff509156","tests/ui/not_unpin/conflict-unpin.rs":"e83eaaa14879ed2cbfe64a81bb9da752d9cb90003f6f4f39195767f3edbc3aaf","tests/ui/not_unpin/conflict-unpin.stderr":"1827ee49af6c08ad079d58ccd44f486cff08c88de68df60729f3838ff1ec2ee9","tests/ui/not_unpin/impl-unsafe-unpin.rs":"5da7d3f5e5ee95e18040b9a524515ec1af0e8b2fd78c017bf32e60d17e92234e","tests/ui/not_unpin/impl-unsafe-unpin.stderr":"bbc43a42e912d802e1c4b7b72db8172856293a5e7e01ff99287469bf018c162a","tests/ui/pin_project/add-attr-to-struct.rs":"46db75d07e8e66078062fe9976f1341a82b5daac05cee04485970c23ce1ef4e5","tests/ui/pin_project/add-attr-to-struct.stderr":"2979bcc33581a846b1ffebf851de4b9135ee972965144a26050d9f37bda3bf7b","tests/ui/pin_project/add-pinned-field.rs":"8b55ad123ed35191b988341e4747a2aa3ae115db307ff0bb3aecceff013aa710","tests/ui/pin_project/add-pinned-field.stderr":"8478c41754c866c0662ba3d1162226f9e01c111935a0b3a9a8f976e7c9f3dbe5","tests/ui/pin_project/conflict-drop.rs":"93137ac53f3114038550037b304ee97c8f0e01c6f517bfdec32f9be867c5533b","tests/ui/pin_project/conflict-drop.stderr":"4a5ed245afad8bc48c77086ed92e3f7c268b7414610f56c81e1bc7e31c52ed17","tests/ui/pin_project/conflict-unpin.rs":"56d8f0eb22d547f5f9954bd37ca31c5b58b9d60bc5dc5b8c70f7d73cf8844649","tests/ui/pin_project/conflict-unpin.stderr":"62139ec929df0668eacdb69d9304b61e4726527e0199133cc79ef59225aa281d","tests/ui/pin_project/impl-unsafe-unpin.rs":"0b8a032993740be80ba1510b21f0c664e9776421395707712dee12fbfc637efb","tests/ui/pin_project/impl-unsafe-unpin.stderr":"49def6e0a5cff7a5f0852697a7b153616195e814cbac486edd49f1239fb2af5d","tests/ui/pin_project/invalid.rs":"9ebc49e9038cc09ea92091cc7897b131b294520941d9b4eef47ccebe3243651d","tests/ui/pin_project/invalid.stderr":"b4b62860978f7b6d2f782ce976a2d16bbaee509d50852f4dcd7750a839c54f9f","tests/ui/pin_project/overlapping_unpin_struct.rs":"d563525d945e970841002cc1f93822f50cfb9c81586363e2038a9aabf0a66ad3","tests/ui/pin_project/overlapping_unpin_struct.stderr":"16a1181520be1c63aacc53b4f755de390819835e58eecb74f87a9b9edca9d875","tests/ui/pin_project/packed-enum.rs":"31739a04fab97a3300c50ccefd5ddce7b8e200fab0e48fb7160fb8a54c0365da","tests/ui/pin_project/packed-enum.stderr":"f9aae214e1e4d77261aa5a5f27e6ece24c4a5a551106e77c3681b16f9caacaa5","tests/ui/pin_project/packed-name-value.rs":"cb0b6043326074d49398f091215fd7e69202e0d7a132539aec0e302d59b99237","tests/ui/pin_project/packed-name-value.stderr":"7ad936f770b874a15a5820ec0417da3374bb9780093c2121c4d4c549b0514ccd","tests/ui/pin_project/packed.rs":"7a66b7c2ca8253cfd6774a339d74400966e85c73ef59f54ad66d9f1b02ea5db7","tests/ui/pin_project/packed.stderr":"2b9e7a0818e21f958536738035452d760b9fbd3784d2034b1732d2c8d4b85812","tests/ui/pin_project/packed_sneaky-1.rs":"96a01014669ccff0f4ea68c1695b885bdb4ebda88084396d15485534864255d6","tests/ui/pin_project/packed_sneaky-1.stderr":"208f36f5cee9839a37b3b465e9e375987e718124825a6e20a79b429bc1483c25","tests/ui/pin_project/packed_sneaky-2.rs":"72d32a127f4e1430e7f964b574f224baf1a65aa7794cb9a6dd3d4d6669af4a12","tests/ui/pin_project/packed_sneaky-2.stderr":"87c56be98d0c78a2ad8dfb00b1082174f0df1f533efdf14d429e1db991b37a3b","tests/ui/pin_project/private_in_public-enum.rs":"6c882b9c77773fe47ec083bfeff9624267b1efa1ef38438806c3fb9073c6c1a9","tests/ui/pin_project/private_in_public-enum.stderr":"350eda0292ffb6a2c2383922cc474887c14aae600a877af32419c574b58029e6","tests/ui/pin_project/project_replace_unsized.rs":"96b7ba5a511cac1fc94a942bbe0c922c9647584b7323f78b54f13f73047d8c7a","tests/ui/pin_project/project_replace_unsized.stderr":"a05c80ee426686cf18e9fdc50e5d576d1c5d8b4f1d09e5936337e9f412aa2ba9","tests/ui/pin_project/project_replace_unsized_fn_params.rs":"5ae9886df3c6f6fee6346afa6241e4a3d55206dc3643f45b2d9bd0b6d25a6673","tests/ui/pin_project/project_replace_unsized_fn_params.stderr":"38c3d6e790f7489eabca2e44a038184df77e00b92858285a732292805750f8df","tests/ui/pin_project/proper_unpin.rs":"8a5d0990f2f300f8cd33d93da93340e2a73a3e277d780865a9c8f018f19d25f5","tests/ui/pin_project/proper_unpin.stderr":"cd6183a2c35d36f16d913502f7b2a8b3084a064a1325fe53adef290223f68620","tests/ui/pin_project/remove-attr-from-field.rs":"2334cb1658ec4ecec34f9dea8bc47e1653436b08213291a57b436e111312eafc","tests/ui/pin_project/remove-attr-from-field.stderr":"60bc68dd47592cd7ff2795723912e3ab7961b60dc026cec56bef50237f50a013","tests/ui/pin_project/remove-attr-from-struct.rs":"567deece4945ebad3f3e667ce0290e040649bfa2987f71769006cb8e13b39e4d","tests/ui/pin_project/remove-attr-from-struct.stderr":"7978a8c657c3630665aa41157c4d85cd9c109133dc992447369fe9358438af6f","tests/ui/pin_project/safe_packed_borrows.rs":"313d671d4d6ad74afd4e3ef0eee62bc7c6d3fa9e28a26d09a9c2ba0ccb5eb902","tests/ui/pin_project/safe_packed_borrows.stderr":"f56c3357538b202de22e14366a3054407541496eb08a2df6bf11dadb93b5373a","tests/ui/pin_project/unaligned_references.rs":"a7e7949b01787e7584448f1f6b90a817473c02cb3c5bac2b725dbfa77de3d4b1","tests/ui/pin_project/unaligned_references.stderr":"e928dbc6ed3aa15d6be261b3c32ccec5585721c39285d0f55ddcc912fed92567","tests/ui/pin_project/unpin_sneaky.rs":"7920d4e05142a0277bd9daeb40457586ec3be27372d797087aa9b693607efacd","tests/ui/pin_project/unpin_sneaky.stderr":"9fc89c23b0e4dabf72b31af5632bdf3d894fb9120ce5968ee63c297f38a189e4","tests/ui/pin_project/visibility.rs":"6f5ba3342d500623d81cb719135b7f8c15f7927d7d31c71bb506c1bd97e646f4","tests/ui/pin_project/visibility.stderr":"b82958da6ea01e8a2815dd01fe5b5020de015b698414c5a8100bcef645e1821d","tests/ui/pinned_drop/call-drop-inner.rs":"4ae96a18e16a1fe81132a2f5ed9a4a3c4ccda686b5b523bb061d0691559c84a0","tests/ui/pinned_drop/call-drop-inner.stderr":"d5c0160d12e6c4bace65e1c3bf43fce4d77363a81ab4f711aca1a2f3b6b1ebc4","tests/ui/pinned_drop/conditional-drop-impl.rs":"c0d1633612f652d659d90fed585db7f95af6b3f09150a6d548ea247677c4537c","tests/ui/pinned_drop/conditional-drop-impl.stderr":"479b45a9c499f902fab55e6f12044714b80031b4bc979bf9b83daa5e8c62bbae","tests/ui/pinned_drop/forget-pinned-drop-impl.rs":"329ceecba3e51ead54aeb66d48553e988a00475c37d71c9c72c9a09094199131","tests/ui/pinned_drop/forget-pinned-drop-impl.stderr":"d45a218e8107ad11a59a75f30bbb2a018e153ecdead28880b09cf63f3d64f98f","tests/ui/pinned_drop/invalid-self.rs":"77f671353e055f3673a4a864d99dafe2930a4d62c90454ab550427c4c062355f","tests/ui/pinned_drop/invalid-self.stderr":"4d8453b318babdcbcaaecd5df79998622caf3eeb43db64d15e86c7f7eb23e279","tests/ui/pinned_drop/invalid.rs":"e1ca69decc33bb26c971718e6c9a6cafd89d6644f41086879b27c034161de3c4","tests/ui/pinned_drop/invalid.stderr":"4b686b1e4b273d794f0185e53ef549a25abd9e9242555e444d920a9ba6043eb6","tests/ui/pinned_drop/pinned-drop-no-attr-arg.rs":"6097441094e69ae18887171c8528900ae018707d3cfa03e2d3dc13fd06c58837","tests/ui/pinned_drop/pinned-drop-no-attr-arg.stderr":"f8d1dd1dc5f06d30e58b2b7c9a57b02c93a74693aee869c6f40342b98386c58c","tests/ui/pinned_drop/self.rs":"fcbea30041edafeb21aa18760aad87db38cda611ad936175fe164a164c771a92","tests/ui/pinned_drop/self.stderr":"6c48784713ecd9e4f42521f2ef2d2a8d863e401aea72065c94fc683858a3f09d","tests/ui/pinned_drop/unsafe-call.rs":"1b23d07e843c9324c85472482ab3301af7d99c425cd275f2abfbeb5838281403","tests/ui/pinned_drop/unsafe-call.stderr":"68fcaa4f52a3d5bda6e65f4e44247a2c9ef86bcd6db32dc160a676c9584111bd","tests/ui/project/ambiguous-let.rs":"17fdbc0edf5156653ee4fb7687aab49457bd4e6bd20d98e30506fdd292dfb672","tests/ui/project/ambiguous-let.stderr":"5bb90a71ebc92c2520c37a381a345fbbd98b4de6a30479e2689e2fbe2b6b5722","tests/ui/project/deprecated.rs":"afa0585ca3296572c10a2fdbb7ff8ae532311ed4384712886d6e9465281b546e","tests/ui/project/deprecated.stderr":"5bb75152a3412e8d3855c652e68d8c8c88f2ada5983cafca544e2101531fcb99","tests/ui/project/invalid.rs":"72f196c25eda76828ef9f1217e786c019a831e9ca71f7d9292dde77171d83ae4","tests/ui/project/invalid.stderr":"071388c07560ee868326c2be19c3798ab375f36fe9b04b7eed73834399d8cada","tests/ui/project/type-mismatch.rs":"bea9bded3e610d338248a197e6159b0ebecdebe4095601ddb3eef0a43bee327b","tests/ui/project/type-mismatch.stderr":"c7a87ef686378f8914bed64f03ea49eea0134c8a98f8192663610162047116ce","tests/ui/project/use-public.rs":"43fa6391c3b3018950d9d0a6dee29e9484e3d37a690f4c36c2d424b545ff949d","tests/ui/project/use-public.stderr":"89d23ce9ea68fb0ac7e53fd96d9ee0be7a10434c5542fbec8f93a351342cc3fa","tests/ui/project/use.rs":"bd87883ff31fa949dca57d05bcc27373accee8ee2c437ce78f1d3b74b8a33218","tests/ui/project/use.stderr":"6b12a8269919c4e57bd377ab7b2625965aad5f39c3eb369e6b7c89997c8862f4","tests/ui/unsafe_unpin/conflict-unpin.rs":"5752570223ddfa9aebdbb27d466e4440aef94b2795b144e0af4935acdbf5677d","tests/ui/unsafe_unpin/conflict-unpin.stderr":"9d3e25209f19fffcc8d03342481ae501dcf890fce61057bbc587010f66b13064","tests/ui/unsafe_unpin/not-implement-unsafe-unpin.rs":"cc63daa89bae170521449219e3b5b964decdcb4f5e7180d4df84ccc0031c0170","tests/ui/unsafe_unpin/not-implement-unsafe-unpin.stderr":"21baea965abd8e74838d04f2539328fcc757e53843286e7bfc0c67fbf1856019","tests/ui/unsafe_unpin/proper_unpin.rs":"86c99f092bd3118e36c17f637e4884e90c9a7406b0c092c092c6cd292d611749","tests/ui/unsafe_unpin/proper_unpin.stderr":"41906d656c9417a1518abf665a780d852723636afa6181fe6b4a66c12e8fc274","tests/ui/unstable-features/README.md":"4d828aa223e725f5177513eb72fd0c9670f467aecdfbdd3d9758e35ab17f7daf","tests/ui/unstable-features/marker_trait_attr-feature-gate.rs":"e403718e5f23f6ffb6afbaee87c291b80392fcc4462ac874250dcc089e9147fd","tests/ui/unstable-features/marker_trait_attr-feature-gate.stderr":"08ea254ef04f139fc6c174048108993eb37870a40949a00dca39137ae975f17a","tests/ui/unstable-features/marker_trait_attr.rs":"f4a5aabb76983ac286f26fbf29127ead115700adcf4373b7e74f78e6162ff28c","tests/ui/unstable-features/marker_trait_attr.stderr":"0d356dfdd5c718fdde804369eafec77600b732356eb6f9fd9d08151e5875a4de","tests/ui/unstable-features/overlapping_marker_traits-feature-gate.rs":"59f36a4b4e91fd97367679fd5decac8d603fc91a970df22e3f4628a5a02151da","tests/ui/unstable-features/overlapping_marker_traits-feature-gate.stderr":"6358b4e4ab3396f5700a028a8d220b369974b67d0ee65b32f32d52e147d50fb4","tests/ui/unstable-features/overlapping_marker_traits.rs":"30b3192c9bee17542edc9e1ba50010e2ba62e5a3bfc7b58adf7d10876e4159e1","tests/ui/unstable-features/overlapping_marker_traits.stderr":"2667541926916046cc25d275b4aa5877172ea847d130e14c10eef57d1b14164c","tests/ui/unstable-features/run-pass/stmt_expr_attributes.rs":"86f941e6f8924c5fc5c7fc821f9335601139d4d3e8b6c923fbab5673f8749eb7","tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs":"01b9a6f8eae9686c1803008c97e341fd1e7e4922e3cf88861a0df7ca53296efb","tests/ui/unstable-features/stmt_expr_attributes-feature-gate.stderr":"39090f4bd0c8b82babbf296261924d180ccb72b7cf32edac1197b5d63883280c","tests/ui/unstable-features/trivial_bounds-feature-gate.rs":"3b73733424449390a4083dfe222c728fcf964316efe6aa9794db384ffdf35686","tests/ui/unstable-features/trivial_bounds-feature-gate.stderr":"d9897f2aeca61dbbd07e3c5122237e7b34a891fb71b9bacf1462fe2f6c7e8298","tests/ui/unstable-features/trivial_bounds.rs":"b12e91f4f24b841bb83b4831849795999a2095b25ab62c8966948ed03da3d327","tests/ui/unstable-features/trivial_bounds.stderr":"96d4f84e3a14064a495a57e32fe31b1c3fc819253a95c83b2a2e5ae48bd4fdc3","tests/unsafe_unpin.rs":"c6287ef01d4ba0338d2bdf6f9a30673435665a939dce2a74cb6a71820826b789"},"package":"9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909"} \ No newline at end of file +{"files":{"CHANGELOG.md":"4e8ad30f08e7deffe30cddae396370090d8ef85d5027be881d3f73eb496b120d","Cargo.lock":"60fd10d661f0a2a56ff331d0fee263e540346a5235ce4df3cea79848e7e194b4","Cargo.toml":"893a2b117568219f5bd6ebb82973e1c9f4f17ada38b8d5f6321e9e713aa3bab5","LICENSE-APACHE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"767057f6c71898058fd048ad3677687486f117c04477dd8f8c599d79588e3b96","examples/README.md":"6dc8e65b9d76f49caedc1d7a6aadcf5cc48a21e365123b144637bd3d6b44e7b6","examples/enum-default-expanded.rs":"92414c9b48f3e280ba10994a426629da799d125d764d1b3857676b21cb42dbbd","examples/enum-default.rs":"01e1d285ffbb87aa70950c0ec184b395f7faf0667a1fc874b4de509e3e3c8d5c","examples/not_unpin-expanded.rs":"19e944c6159b6a57a19a653adb7a30d27ddc098351bd311a91ada586eb8e6ee7","examples/not_unpin.rs":"3e43439c521089f7f58a411fb823827433c7476a0b41aecd26a6ce5f0c99e405","examples/pinned_drop-expanded.rs":"6b2bb25bcb3404d1b03276be4ae3f6c768a98b5e6675a33b24bf57bff9a9cfaa","examples/pinned_drop.rs":"8913e9b0b7851d470c3f15e40930af16c79f5ee8b4a947cac8235361b427db30","examples/project_replace-expanded.rs":"0b5adc4f83b3b56febc1a721f334b5bea26ec4aea407f35f4cce5cdde76ddd30","examples/project_replace.rs":"352d10d7a2db3de0101faedd04d69c24b9bb5021c768199a379c233886a3881c","examples/struct-default-expanded.rs":"2e290064aa48e001625f3badce5fda7e2fa7a8ce613cbe1788892b1230885895","examples/struct-default.rs":"eb60ea5412e83ac9eba96794b31772afe0798bef304b26bff65b6845638bb729","examples/unsafe_unpin-expanded.rs":"71d13e6bb284642d81520fce7adf5c1e9510451295c1806a3703dae481e64ee3","examples/unsafe_unpin.rs":"7da585f423999dcbe03289a72b14a29fed41a45a774de1af2fe93cb20aa8d426","src/lib.rs":"1622ef62254dff7db1c57d7bfc6a5602bcd8e547908ea5458bedf4f807883426","tests/README.md":"2b6cf365e06fae7dc3260504351177855d675452cc61e398a9998d5a317ae0dc","tests/auxiliary/mod.rs":"7e263e987e09b77e384f734384a00a71c5b70230bb1b5376446ef003a8da9372","tests/cfg.rs":"de3ad3baf0715163d7b941bc988d67219e9c4dfb4c9068f1c60f5214649fa6ee","tests/compiletest.rs":"f764ff03c73c8a21c35ee30cf851b9266b2987e88d646d9bd086fc71944eb945","tests/drop_order.rs":"d87cce198587f1c2a4167b347e199496b177ced30769c51c2bea175a8d7f4445","tests/expand/default/enum.expanded.rs":"02dc844f6d2392db17d691b7645a039cdf7642debee5d45b0d042833dc1f7478","tests/expand/default/enum.rs":"e23fac8939fd21c96afaf4b10613a1b8fbfff655f76128902c2cbe8577c13d08","tests/expand/default/struct.expanded.rs":"a648afedb3ff9ded62f67ae84750e52ce409c84718e1a4ec5f03f69ca2874ccb","tests/expand/default/struct.rs":"a0c95b1441b93b3ef346a47dc1e3d8113a6e1df90e577464d3f12a4e50654c4f","tests/expand/default/tuple_struct.expanded.rs":"874804e6dc9d041d06274ad35634dd6125d61d3ac95239fc1cf2736324890c93","tests/expand/default/tuple_struct.rs":"1132f9700ef58079666f25173076a009c23c4f5e0ad80af80687a2e5958db6e9","tests/expand/multifields/enum.expanded.rs":"9dbd33f15dcd044867a0e48dee03886975b080e91e54cdffd6caea64fc97a610","tests/expand/multifields/enum.rs":"a909ee4c44aef0d6959d46343a2927033acb665f6051f37e088367778af90c78","tests/expand/multifields/struct.expanded.rs":"469e9b4bc2e05100de1459ddb30efd94cc66d3e038c35ef7f808e22e1ae4f430","tests/expand/multifields/struct.rs":"9f99e89352d212e3d4ed9cd6be2741ea13658267de6b557bdc8302c4902c72a9","tests/expand/multifields/tuple_struct.expanded.rs":"c90932b83aa9326eda6c4cc706ab6d21d96bf5cad216af82190df17712332432","tests/expand/multifields/tuple_struct.rs":"9ec0b313d829717bf7d3484428136a3012a996dbd97d3ecc8a7b0ba682e5db0b","tests/expand/naming/enum-all.expanded.rs":"47661ea6b898fba73571c3beded975c86079dc98cb8ce006264a50f865204091","tests/expand/naming/enum-all.rs":"c46d8ff27879a26afb31d71df8ab69a871b2fd509ba90668cffaadafb7a54940","tests/expand/naming/enum-mut.expanded.rs":"f8390c5589193f97b5cf26ebf5030014daef883c20c78b121750422cd8b2b172","tests/expand/naming/enum-mut.rs":"9df4e0e1fd8bec8ab471ef419be312c8e15158c7017e964a8337c3108f6c601b","tests/expand/naming/enum-none.expanded.rs":"eb6c72355e3a7907d9870b0eed4cb15142c1f935a8a25729dca6f9766ddea1ff","tests/expand/naming/enum-none.rs":"487f9469127b9375f53d6242062455eac38ccdaa30c168ed2ea755ad7287f02f","tests/expand/naming/enum-own.expanded.rs":"9c61764e2dd91dda88c7049b9d12117e2c52787e149917e7b5dfd19822e0387e","tests/expand/naming/enum-own.rs":"5bb51375842f2956cceb685511cc34a099665439310f01a5cc02a1d4264897a6","tests/expand/naming/enum-ref.expanded.rs":"5deff7bfe636614e0ed1ff1c1369b8e79783498d79f23b5e0c821486bac703b3","tests/expand/naming/enum-ref.rs":"3764e8471702f968e79f633a1a471974b7a726bcc09ce4443e0bce65194f8930","tests/expand/naming/struct-all.expanded.rs":"a2a5d97c98678bf7aa5c515089eb0e837345a5fe28e7cda324a13452ecd9d947","tests/expand/naming/struct-all.rs":"a8d70a576ff5325b848d14dc8083d0967e1b3b4486fd061281484b95adade136","tests/expand/naming/struct-mut.expanded.rs":"8956c9480b7aaf9db5a20aff64f3ee7bc60944855c54a6ad9e9822152879af12","tests/expand/naming/struct-mut.rs":"e793dc13594ba2836881ab2580a73d73c40299af94e24a442f704e76552ce7fb","tests/expand/naming/struct-none.expanded.rs":"a648afedb3ff9ded62f67ae84750e52ce409c84718e1a4ec5f03f69ca2874ccb","tests/expand/naming/struct-none.rs":"a0c95b1441b93b3ef346a47dc1e3d8113a6e1df90e577464d3f12a4e50654c4f","tests/expand/naming/struct-own.expanded.rs":"d0ea125b4191c0e689da5d58832111f78f19c2ce8455513e9e17be15f8099846","tests/expand/naming/struct-own.rs":"caa714f00757980ef60640a90cba289886293320524395a753b876e3050018e1","tests/expand/naming/struct-ref.expanded.rs":"86a87c40cb6a24466ae5a1dcd312188d4df618fad3f84583066c8cddf4bc0afd","tests/expand/naming/struct-ref.rs":"f0ce9bb2ebb0c501cce8eaa24c2657a87e58109d5bde8319e7b5d3c0bae4ad86","tests/expand/naming/tuple_struct-all.expanded.rs":"80a43d2f541975723179e30fdac1dc63898ee5b3813a352a8d3d5c238317a431","tests/expand/naming/tuple_struct-all.rs":"a77e3d5d2409f9016bb8df7ca0387fa512d3383833c9591e64435b689d3710c7","tests/expand/naming/tuple_struct-mut.expanded.rs":"86f5be9829fbfbc5a9c8a1e82f27b316b200937fd8383b00d0d44af3df1e47df","tests/expand/naming/tuple_struct-mut.rs":"06b87b86b6bed07ddfb96067772e9aaf9c1db2d3f871e248658593bd22c4a17c","tests/expand/naming/tuple_struct-none.expanded.rs":"874804e6dc9d041d06274ad35634dd6125d61d3ac95239fc1cf2736324890c93","tests/expand/naming/tuple_struct-none.rs":"1132f9700ef58079666f25173076a009c23c4f5e0ad80af80687a2e5958db6e9","tests/expand/naming/tuple_struct-own.expanded.rs":"282b6f6c5a693a04a0ac910ba403c433dd285d04eda61b749c15c8b5d660ae42","tests/expand/naming/tuple_struct-own.rs":"89ccd509482a95e74733c767b34f5d6bc8d4128cedc911834aa28aef08e7dc8e","tests/expand/naming/tuple_struct-ref.expanded.rs":"1c348f6b361a1b6c76acd0b62605f4538f9be6e3c13041d00e7e1550e9c8eab8","tests/expand/naming/tuple_struct-ref.rs":"2718b96b3e2e6cdef7f8293719d8914e3fd81e92d272b90f019863fa19584a29","tests/expand/not_unpin/enum.expanded.rs":"556889143af7231e516cf00a52bd1a836e3aedc801295dbf41e954f652191bfb","tests/expand/not_unpin/enum.rs":"60134857b6c3c2aca42e9eb2b4c6dbb9149701f115d0de30a2477395ce54fdfa","tests/expand/not_unpin/struct.expanded.rs":"1e590b68991cb278eef5b5e586d9d1323094fe9863b2b3d33135a5648d3a6707","tests/expand/not_unpin/struct.rs":"bf7468f2b42284897648e2545cf0da92a576a748cd5b75893b85f03eb2a52ba4","tests/expand/not_unpin/tuple_struct.expanded.rs":"06f6d4554ba662099fd65a4e5b290bb4b4b61dc2f3347bfb273bae112a4ab1da","tests/expand/not_unpin/tuple_struct.rs":"a3e2c2c484699087a570b1b247ce21bc40203fad889e7c0e9737d03c7ca4bd4e","tests/expand/pinned_drop/enum.expanded.rs":"6cb130dfe6f8b0d1096ac073f6ddd66e1cb2e855ab03f2cd8519ca5f755750f5","tests/expand/pinned_drop/enum.rs":"66f98ea8a567dcdeb58838df68fcba3594aea8a959826ff54fb4493fe06c4086","tests/expand/pinned_drop/struct.expanded.rs":"5b1263b613428b9586228e7ba220937d479df3103fbb026192b43cfa6178e4d5","tests/expand/pinned_drop/struct.rs":"44321ea6b494149e416d413185c2d23ed9d96982d1c4f378381b18e969abc16b","tests/expand/pinned_drop/tuple_struct.expanded.rs":"8acf9cc6685605b79a79bf43bb6547beca1675bd785ca53d53c2bccbec62aea8","tests/expand/pinned_drop/tuple_struct.rs":"e0532400f0bf77be198928c120e9efd9fd1b5d34f5fc9c902eb3b778c412a83d","tests/expand/project_replace/enum.expanded.rs":"bc6dc22b273b922378c73984fc17e08564c1e1fdc191f859fea35a51f0a1d8f6","tests/expand/project_replace/enum.rs":"ce2f5ddff8efd37b1b0028172fde7ee7fba4eff7622c8747cd61591d81c0f503","tests/expand/project_replace/struct.expanded.rs":"458189cc46bcc88db60cb44469ff1b9063bc6f68552b047040a7528cfbf9c3a9","tests/expand/project_replace/struct.rs":"f8c2915e03b557b9f11a6ea63c971cfb19b09e3a9916ab304f0ce62626e35895","tests/expand/project_replace/tuple_struct.expanded.rs":"207194e7c9b0e0325a0f60a5ab1707c504e388c0ef58fbb7952ef0e076ef5355","tests/expand/project_replace/tuple_struct.rs":"8ca1cd1d5feadb23999d8f4e7307f91d1932fff7e8d38889d3889d6ba4d43430","tests/expand/pub/enum.expanded.rs":"e0649e4100019bb3d7e28bacc8eabdf25601a9d93d52cdfad6721ac9538a450d","tests/expand/pub/enum.rs":"64ca05d529227157ba4cdce1c526d67d15f06108fd71f5955749d236c002471f","tests/expand/pub/struct.expanded.rs":"086ce3fcecf284b42165e243d13e6ac650ee33d9040921c42a440b997bac19e8","tests/expand/pub/struct.rs":"379b8c4c01a3fc2aa3f020578a6dd836d455f4c48604e1dad91f631146b9e6ec","tests/expand/pub/tuple_struct.expanded.rs":"292ada99231bb56ac9722bda3bd99fd265ed73df20c779f3ae83d3a018653e3e","tests/expand/pub/tuple_struct.rs":"77cc812220e956a2255ef129dec3b16d6973b7e8b1bc02a6f65bd9fa56040564","tests/expand/unsafe_unpin/enum.expanded.rs":"38fc3c54c8ce2d4af460db98d2642ffbd07cde94537b59f828a24428fd86d3b8","tests/expand/unsafe_unpin/enum.rs":"00fad3fed0f7d2d0eae56020726818ab91895d6eaed7c4c7cc5d497b5efa7cfd","tests/expand/unsafe_unpin/struct.expanded.rs":"906bdd18eee4de016f65be2d1569dfd56d6ef2198dcd54454ee9ac3e5c050507","tests/expand/unsafe_unpin/struct.rs":"a41bed68966abb874f31ad02a860e15a2714e2e459ef73475a908e71141e96f0","tests/expand/unsafe_unpin/tuple_struct.expanded.rs":"ebd43686132b7930345ae056ae509e7def537fe2245ca36b554760964b0402e9","tests/expand/unsafe_unpin/tuple_struct.rs":"bddd36be1795be2f445d7baec269ad8a5a2647b95af7f0b130331ab7a3c64baf","tests/expandtest.rs":"898df948c7ee82df0e0823cbde16ccc2b9dba455475665e6d2c7c0a84493e9fb","tests/include/basic-safe-part.rs":"6fb5accb7476f85233ef919eedaff83c30015e186414589db6e44d614016ad3e","tests/include/basic.rs":"855d9cc4657be080337c07f8bb5f82747a72a22e63e1103b00914a4448937970","tests/lint.rs":"25ece0e67517861cc54eff51957dc2347536764dbc0a6fab5d71d6e78c560d47","tests/pin_project.rs":"f5a8c9e3964f360a0f93c549d79b0e1c4351790234bac3e3eaa59ff37a5a03c5","tests/pinned_drop.rs":"e9a41a4a2d286835a7be72f7c22edf33e1a34834121eb53e3f38af0c35ef0bb0","tests/proper_unpin.rs":"435b9c7243ab896c2546c1e402276e93ef38cd93de50cc7369f486fe22698a02","tests/repr_packed.rs":"f54a553a579dbce7f80f666f0228ec2dd2a80282ac5da5f715bb122e7e17712e","tests/ui/cfg/cfg_attr-resolve.rs":"bb924ea9ceb6579e80774ef7fee1bb99ae08efc7d909e8d1edff40c3c1adaa7f","tests/ui/cfg/cfg_attr-resolve.stderr":"720f806ac06d598753a6a90c95b942410413d8860f60a7251fbde3e1fa575934","tests/ui/cfg/cfg_attr-type-mismatch.rs":"25e8337f9fd5533799dd990df6e016d18e2a00468de3386aa7d45aa98e39a0f9","tests/ui/cfg/cfg_attr-type-mismatch.stderr":"79e2a14cc486d9c8665d89d41460ab1e1ea8edc87b01ea630affea012d067618","tests/ui/cfg/packed_sneaky-span-issue-1.rs":"768762cf1831b9b09868404d9d7fd394ed30fb4495bd8388034ee9bf3823d11b","tests/ui/cfg/packed_sneaky-span-issue-1.stderr":"235ad4c1c784e9116c480502e813a0f0254f3b0e624c89712bafa56d43eaa2c4","tests/ui/cfg/packed_sneaky-span-issue-2.rs":"229d91b2531ace7c1d644125b77ee044fc639e9b981aaede5fda6f5e38154a4d","tests/ui/cfg/packed_sneaky-span-issue-2.stderr":"dae8adcb5b6ac12be55da9f4d6d04c1a790907dc5ee23a16d86a2a370daf596a","tests/ui/cfg/packed_sneaky.rs":"785e77f597bfc0cdb7bebc040cf11b17b1e2aa727b0fc369b7fe073f8441cad0","tests/ui/cfg/packed_sneaky.stderr":"1674d0f108f91cc21f9009338bde1b343b65a68d81d6bb3b6aecd919846cc6e0","tests/ui/cfg/unsupported.rs":"45d6eddef59e67dfca3733450249632dd8135283cedafa663e7bfa2b232ca13e","tests/ui/cfg/unsupported.stderr":"72421d6c14eb7d4f7af7eea1e0701343df691985d1d58325e91412e749705d3f","tests/ui/not_unpin/conflict-unpin.rs":"5709b68bbf26c0b2b87ee7a0bbf83ae9e4f1bacd2266114114b4dcb8706d0080","tests/ui/not_unpin/conflict-unpin.stderr":"1827ee49af6c08ad079d58ccd44f486cff08c88de68df60729f3838ff1ec2ee9","tests/ui/not_unpin/impl-unsafe-unpin.rs":"088374540354c4052a2daf2e97cdf49fc54287e0d943bf34bbb70298d62e8c9b","tests/ui/not_unpin/impl-unsafe-unpin.stderr":"a924bd04b2704e256aa5dc1d9e34afe8f125f4471ab7fb5ef581498136874588","tests/ui/pin_project/add-attr-to-struct.rs":"975ab96e5660587eced2f4d2e133d331550ccc7db0e56fd3be6895fcfd1085a2","tests/ui/pin_project/add-attr-to-struct.stderr":"c3004aef263914c9abb43a5956058c5c0022bb9074b862b26753df3764d351fc","tests/ui/pin_project/add-pinned-field.rs":"791df5b427ba412fb25836569963e6a03ccacea9fcefa4bf3f789ee4de90762d","tests/ui/pin_project/add-pinned-field.stderr":"8478c41754c866c0662ba3d1162226f9e01c111935a0b3a9a8f976e7c9f3dbe5","tests/ui/pin_project/conflict-drop.rs":"c97bedbed751436994fec33003dca06761cc2cbf9fcc832c120321aa8fc0be7b","tests/ui/pin_project/conflict-drop.stderr":"830248bc54590be23c9ffa4e24ae4197fe72a8c2db0434064e191f9673654a67","tests/ui/pin_project/conflict-unpin.rs":"9e3b06ce53d97ebd79620d729b525fac1c87c67ed44b91d05dd4c3d48be455e3","tests/ui/pin_project/conflict-unpin.stderr":"62139ec929df0668eacdb69d9304b61e4726527e0199133cc79ef59225aa281d","tests/ui/pin_project/impl-unsafe-unpin.rs":"d24d630abd82e08aea529d358bf998011ead0e939c82dca410160764fc24e06b","tests/ui/pin_project/impl-unsafe-unpin.stderr":"9ce045122b4b10e5bcf529dcaf84f337b393ad428dfabc0c10c4d9d635b510e3","tests/ui/pin_project/import_unnamed.rs":"09fb23d22c044321f9bf3a526d16d694916adb7b5defeb6a353cdaff46df7efb","tests/ui/pin_project/import_unnamed.stderr":"2358b70ea4b4c797816cf3b47920f72e0eb7ad0ff11d9d7e8f9f0faed27cbd93","tests/ui/pin_project/invalid.rs":"bbb60256e5b0b35950161935a569ae3e623e51486419170452c73c032f97ed1f","tests/ui/pin_project/invalid.stderr":"3c027a4ab513d477d4877b540a58d319159c4d20b69e643d475b0b54abac7e67","tests/ui/pin_project/overlapping_unpin_struct.rs":"3f459dda0c01e36f989a1413b2a43f4d95f9ae2524653b61f0561acff28ad9a6","tests/ui/pin_project/overlapping_unpin_struct.stderr":"80663b2f53656616e016d90cf617306671d5885a0fadb2215af9113388c3c4a6","tests/ui/pin_project/override-priv-mod.rs":"e33ac45fac3f366737364040ab967a75e6f8f3aa1c093d044380a1cc4a0096bd","tests/ui/pin_project/override-priv-mod.stderr":"a6da3ab8e3aeff94dfa27ca52be0ce00104ca319d0e70232dbd627b4dca81030","tests/ui/pin_project/packed-enum.rs":"7784ff49119daa5ae562f0fa84fdf4e65fe8abaf62ecff7d2ead184748e16b9b","tests/ui/pin_project/packed-enum.stderr":"3c710e899c68e5db4707405546c9c81b819ba6d3bdb83d3578e34c1254fbf1e3","tests/ui/pin_project/packed-name-value.rs":"64cbf4ef3c01709d302f91d98e92c980f2858b14ddaf047646c4de57707541b1","tests/ui/pin_project/packed-name-value.stderr":"f8beabf2de5cdd913903eed642d886841ad14a719f562769f525c79a5df2fc76","tests/ui/pin_project/packed.rs":"7a66b7c2ca8253cfd6774a339d74400966e85c73ef59f54ad66d9f1b02ea5db7","tests/ui/pin_project/packed.stderr":"2b9e7a0818e21f958536738035452d760b9fbd3784d2034b1732d2c8d4b85812","tests/ui/pin_project/packed_sneaky-1.rs":"e8fed4a7450b81835479f898d21fe596438f5ca10634eeafaf43070f558eee17","tests/ui/pin_project/packed_sneaky-1.stderr":"6d28614daa2d5a7ddc9e434962c76ccf41c7d94626efcea1388b9a1da71b38a2","tests/ui/pin_project/packed_sneaky-2.rs":"f1601aa4f642ed4aaaab2dd2e0328b7af145be9a3a7460ad36339b47b4d7ce14","tests/ui/pin_project/packed_sneaky-2.stderr":"5df9f0c90032016856c4797f07c8e8c172ed0ded2c7078c404f80b5be675d33a","tests/ui/pin_project/packed_sneaky-3.rs":"d9ee3366b5e3849f3ec0d0bd62d365cff0c6e8f0eb3434d70fb84e62a1976eb2","tests/ui/pin_project/packed_sneaky-3.stderr":"15dca7e5e4e9cf2635d19c1c66c5385b9c749607a3b8ca07f56194e63e662a22","tests/ui/pin_project/private_in_public-enum.rs":"79a6ac31dc36cdf4a72c464d4f2702570724b3d33fe22e5b6c05c69129ea4458","tests/ui/pin_project/private_in_public-enum.stderr":"d951fb2b9b80ff07848723439da190cb91f776fbef0e27db025dcd24186f80aa","tests/ui/pin_project/project_replace_unsized.rs":"7c35e6c8ebf8337b6abf6a7fa7106bd519cebbe32d3f33b8865fa251820e0e5a","tests/ui/pin_project/project_replace_unsized.stderr":"77ca0400388be78a3d738be4dd7a6487b900209b4b2807e6d944638e85c70111","tests/ui/pin_project/project_replace_unsized_fn_params.rs":"db7c72e96119f245535627b887c1289b064dd657fbb524c0f6635287033b85e2","tests/ui/pin_project/project_replace_unsized_fn_params.stderr":"5248a6f166e9dd3d0238d41d8f8fe2dfcba1886ac3d775236be6fb5f42ffc23f","tests/ui/pin_project/remove-attr-from-field.rs":"9faac7ca8220c7069b1a7b2f43420efd3d73377f0ac0558ef67cd5664a9c54c1","tests/ui/pin_project/remove-attr-from-field.stderr":"bedbeba9a7fdb49ffcbe55ad1fc8893cba4e46a6415ccbdd4c68f1fa28854b19","tests/ui/pin_project/remove-attr-from-struct.rs":"f47a6cfbb5a4fa5bb233c0427be255110749621bed7cfa7a8f45e80451aa8615","tests/ui/pin_project/remove-attr-from-struct.stderr":"72813d275cc89198d1808d1dbc427d0e7c2d79eb45c8b9941297fbdf32ab2296","tests/ui/pin_project/safe_packed_borrows.rs":"313d671d4d6ad74afd4e3ef0eee62bc7c6d3fa9e28a26d09a9c2ba0ccb5eb902","tests/ui/pin_project/safe_packed_borrows.stderr":"f56c3357538b202de22e14366a3054407541496eb08a2df6bf11dadb93b5373a","tests/ui/pin_project/unaligned_references.rs":"a7e7949b01787e7584448f1f6b90a817473c02cb3c5bac2b725dbfa77de3d4b1","tests/ui/pin_project/unaligned_references.stderr":"e928dbc6ed3aa15d6be261b3c32ccec5585721c39285d0f55ddcc912fed92567","tests/ui/pin_project/unpin_sneaky.rs":"8d49b2dcb1695b4ae26669de58dd6dc73bd836f3cd836078d6be2a0ac2cc56f3","tests/ui/pin_project/unpin_sneaky.stderr":"1f7e6e59afb094f38ed74bbd9c091bad1690f9b4a69a7892d34d441b8599cd8a","tests/ui/pin_project/visibility.rs":"4345aa1fd270a99e631510097ab7fea51aa7cbe15e155cf19f406c63735e3daa","tests/ui/pin_project/visibility.stderr":"9d56584accaad71c65e38823b0200b8ee229c027558f4461168c869ac53655b2","tests/ui/pinned_drop/call-drop-inner.rs":"a155a5b4cf7434ad6c2c4f217beb37b57edae74c5ae809627a50ea8d6ab5da50","tests/ui/pinned_drop/call-drop-inner.stderr":"39bc626a67fe768bf1b29a56d53fa80505baf7f727d8b0e18f0becab63e3dc18","tests/ui/pinned_drop/conditional-drop-impl.rs":"5d4c147d48d87a306fa875e033c4962ecd9d7666e6859da5b22a34fd552f0fc6","tests/ui/pinned_drop/conditional-drop-impl.stderr":"3a1fa63780f4d9c7fedf01bd80a1ff72aa1d6cdbfd33903db2979daf9d96a9b8","tests/ui/pinned_drop/forget-pinned-drop-impl.rs":"9a6d08c98f1214d11aac8bbf67586130ad62d07d03f4ba6aae474fe30695b536","tests/ui/pinned_drop/forget-pinned-drop-impl.stderr":"d45a218e8107ad11a59a75f30bbb2a018e153ecdead28880b09cf63f3d64f98f","tests/ui/pinned_drop/invalid-self.rs":"9c7e431b0808204d6a3bf3541668655cb72c76e8ebe8f4a04518d0c6dcdd1bd5","tests/ui/pinned_drop/invalid-self.stderr":"a2004c3f04e13bbc17446e751acb1b3985f5a6bfeb846e1b8a661951e40adb12","tests/ui/pinned_drop/invalid.rs":"881a93cfdcaeeb807314a28eea41be26e5476835467237b63e9fe2cbad52e574","tests/ui/pinned_drop/invalid.stderr":"f66265c3d0d251df73b7f678b46ae07bb10fed05ff2c7178e14af63e4b3e986f","tests/ui/pinned_drop/pinned-drop-no-attr-arg.rs":"cc406f6ffa1998896931d33cfdab79466a1494750b55519f5a8ad7fe13e08657","tests/ui/pinned_drop/pinned-drop-no-attr-arg.stderr":"7b9e301cf51d37a0cbd39228cbea3450b0ef68497f7d0d276694313b7f7045d7","tests/ui/pinned_drop/self.rs":"db7da7c136a16c34b0492dbaeed680cdf0a1bdeb186b2d4a1324dd11b43bbe2b","tests/ui/pinned_drop/self.stderr":"cc6bb620e0753abd0398a9fb3b41a320f81af16f182d9b1cd825ee414b86b770","tests/ui/pinned_drop/unsafe-call.rs":"2ecdfd7b658c0aeb85ff0feed8d6c1776a06166c287d3b8925f7fc4c699ee74a","tests/ui/pinned_drop/unsafe-call.stderr":"b92ae55c2752129fd1090539216171cd5fe4b1a693dfe1c770dc285a880cb121","tests/ui/unsafe_unpin/conflict-unpin.rs":"a22831379eb1f2d597d5f8088130f7c91add5ec69bade962d555d1b49145f56a","tests/ui/unsafe_unpin/conflict-unpin.stderr":"18a7107c3d5a9d934bd86f64079e6b57e25a86eca5db3fddcafb3ce01f39b225","tests/ui/unstable-features/README.md":"f9dcf95e9c68fe15345f493a3fb4f54726d36c76d1a5a1e2c9dfa25540d4b642","tests/ui/unstable-features/marker_trait_attr-feature-gate.rs":"35596063ddbc8a289182c3a18d98d595583e6a0b2b3324d3eec4f416de06ea4b","tests/ui/unstable-features/marker_trait_attr-feature-gate.stderr":"9e22be3282c29e976ae39f48b6bd866ef6fb45694867800991d445dc1cecb8ca","tests/ui/unstable-features/marker_trait_attr.rs":"f2a5275a5b80249d2a0e7a4e9e7d3f573ffd5f2a183dfced314e91a7d7945007","tests/ui/unstable-features/marker_trait_attr.stderr":"fbfd1b4f4a4daed395da86d31c5f305eaa026d111df2cd2a62fa2ed66ebf7b67","tests/ui/unstable-features/overlapping_marker_traits-feature-gate.rs":"dc6b854cc60ea03344f8ca3fa518a2bdc0b9aa69f2368162f4b6ad24d31bc4f0","tests/ui/unstable-features/overlapping_marker_traits-feature-gate.stderr":"9d4e7b12b19fafb2eb0b924c924ce563d99f6b787ed7663b4a2e66e6e9d3e1b6","tests/ui/unstable-features/overlapping_marker_traits.rs":"8af0d8a5afe9dcaa02fa67b394d376f9933cc99361d68f64185769326700bf7c","tests/ui/unstable-features/overlapping_marker_traits.stderr":"ed49c06dcdc05160b9cf8bdc450b27d3bb981c96e271cda1fa977d205ef5b0c8","tests/ui/unstable-features/trivial_bounds-feature-gate.rs":"658aab9de2e52a54b9e4731faae6a87172b1b4b78f3853c9fd4b565066b74e64","tests/ui/unstable-features/trivial_bounds-feature-gate.stderr":"f95c11b531c1d2bc9d7a7d64d70bc62217608498ed468ed6cc6acd03d70205ca","tests/ui/unstable-features/trivial_bounds.rs":"b12e91f4f24b841bb83b4831849795999a2095b25ab62c8966948ed03da3d327","tests/ui/unstable-features/trivial_bounds.stderr":"96d4f84e3a14064a495a57e32fe31b1c3fc819253a95c83b2a2e5ae48bd4fdc3","tests/unsafe_unpin.rs":"5ec1a97b7a54fe1cdc72f8e2c71ea8101caa0426d286532946c324be4eabead8"},"package":"58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e"} \ No newline at end of file diff --git a/third_party/rust/pin-project/CHANGELOG.md b/third_party/rust/pin-project/CHANGELOG.md index 46b1c96d854e..ebac043a1a0c 100644 --- a/third_party/rust/pin-project/CHANGELOG.md +++ b/third_party/rust/pin-project/CHANGELOG.md @@ -10,32 +10,160 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [Unreleased] +## [1.0.10] - 2021-12-31 + +- Revert the increase of the minimal version of `syn` that was done in 1.0.9. + +## [1.0.9] - 2021-12-26 + +- [Prevent abuse of private module.](https://github.com/taiki-e/pin-project/pull/336) + +- Update minimal version of `syn` to 1.0.84. + +## [1.0.8] - 2021-07-21 + +- [Suppress `clippy::use_self` and `clippy::type_repetition_in_bounds` lints in generated code.](https://github.com/taiki-e/pin-project/pull/331) + +## [1.0.7] - 2021-04-16 + +- [Fix compile error when using `self::` as prefix of path inside `#[pinned_drop]` impl.](https://github.com/taiki-e/pin-project/pull/326) + +## [1.0.6] - 2021-03-25 + +- [Suppress `clippy::semicolon_if_nothing_returned` lint in generated code.](https://github.com/taiki-e/pin-project/pull/318) + +## [1.0.5] - 2021-02-03 + +- [Suppress `deprecated` lint in generated code.](https://github.com/taiki-e/pin-project/pull/313) + +## [1.0.4] - 2021-01-09 + +- [Suppress `clippy::ref_option_ref` lint in generated code.](https://github.com/taiki-e/pin-project/pull/308) + +## [1.0.3] - 2021-01-05 + +- Exclude unneeded files from crates.io. + +## [1.0.2] - 2020-11-18 + +- [Suppress `clippy::unknown_clippy_lints` lint in generated code.](https://github.com/taiki-e/pin-project/pull/303) + +## [1.0.1] - 2020-10-15 + +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/commit/ddcd88079ba2d82857c365f2a3543ad146ade54c). + +- [Fix warnings when `#[pin_project]` attribute used within `macro_rules!` macros.](https://github.com/taiki-e/pin-project/pull/298) + +## [1.0.0] - 2020-10-13 + +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/commit/ddcd88079ba2d82857c365f2a3543ad146ade54c). + +- [Remove deprecated `#[project]`, `#[project_ref]`, and `#[project_replace]` attributes.](https://github.com/taiki-e/pin-project/pull/265) + + Name the projected type by passing an argument with the same name as the method to the `#[pin_project]` attribute instead: + + ```diff + - #[pin_project] + + #[pin_project(project = EnumProj)] + enum Enum { + Variant(#[pin] T), + } + + - #[project] + fn func(x: Pin<&mut Enum>) { + - #[project] + match x.project() { + - Enum::Variant(_) => { /* ... */ } + + EnumProj::Variant(_) => { /* ... */ } + } + } + ``` + +- [Remove deprecated `Replace` argument from `#[pin_project]` attribute.](https://github.com/taiki-e/pin-project/pull/266) Use `project_replace` argument instead. + +- [Optimize code generation when used on enums.](https://github.com/taiki-e/pin-project/pull/270) + +- [Raise the minimum supported Rust version of this crate from Rust 1.34 to Rust 1.37.](https://github.com/taiki-e/pin-project/pull/292) + +- Suppress `explicit_outlives_requirements`, `box_pointers`, `clippy::large_enum_variant`, `clippy::pattern_type_mismatch`, `clippy::implicit_return`, and `clippy::redundant_pub_crate` lints in generated code. ([#276](https://github.com/taiki-e/pin-project/pull/276), [#277](https://github.com/taiki-e/pin-project/pull/277), [#284](https://github.com/taiki-e/pin-project/pull/284)) + +- Diagnostic improvements. + +Changes since the 1.0.0-alpha.1 release: + +- [Fix drop order of pinned fields in `project_replace`.](https://github.com/taiki-e/pin-project/pull/287) + +- Update minimal version of `syn` to 1.0.44. + +## [1.0.0-alpha.1] - 2020-09-22 + +- [Remove deprecated `#[project]`, `#[project_ref]`, and `#[project_replace]` attributes.](https://github.com/taiki-e/pin-project/pull/265) + + Name the projected type by passing an argument with the same name as the method to the `#[pin_project]` attribute instead: + + ```diff + - #[pin_project] + + #[pin_project(project = EnumProj)] + enum Enum { + Variant(#[pin] T), + } + + - #[project] + fn func(x: Pin<&mut Enum>) { + - #[project] + match x.project() { + - Enum::Variant(_) => { /* ... */ } + + EnumProj::Variant(_) => { /* ... */ } + } + } + ``` + +- [Remove deprecated `Replace` argument from `#[pin_project]` attribute.](https://github.com/taiki-e/pin-project/pull/266) Use `project_replace` argument instead. + +- [Optimize code generation when used on enums.](https://github.com/taiki-e/pin-project/pull/270) + +- Suppress `explicit_outlives_requirements`, `box_pointers`, `clippy::large_enum_variant`, `clippy::pattern_type_mismatch`, and `clippy::implicit_return` lints in generated code. ([#276](https://github.com/taiki-e/pin-project/pull/276), [#277](https://github.com/taiki-e/pin-project/pull/277)) + +- Diagnostic improvements. + +See also [tracking issue for 1.0 release](https://github.com/taiki-e/pin-project/issues/264). + ## [0.4.29] - 2021-12-26 - [Fix compile error with `syn` 1.0.84 and later.](https://github.com/taiki-e/pin-project/pull/335) ## [0.4.28] - 2021-03-28 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix `unused_must_use` warning on unused borrows, which will be added to rustc in the future.](https://github.com/taiki-e/pin-project/pull/322) See [#322](https://github.com/taiki-e/pin-project/pull/322) for more details. (NOTE: 1.0 does not have this problem.) ## [0.4.27] - 2020-10-11 -- Update minimal version of `syn` to 1.0.44 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + +- Update minimal version of `syn` to 1.0.44. ## [0.4.26] - 2020-10-04 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix drop order of pinned fields in `project_replace`.](https://github.com/taiki-e/pin-project/pull/287) ## [0.4.25] - 2020-10-01 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Suppress `drop_bounds` lint, which will be added to rustc in the future.](https://github.com/taiki-e/pin-project/pull/273) See [#272](https://github.com/taiki-e/pin-project/issues/272) for more details. (NOTE: 1.0.0-alpha.1 already contains this change.) ## [0.4.24] - 2020-09-26 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix compatibility of generated code with `forbid(future_incompatible)`.](https://github.com/taiki-e/pin-project/pull/282) NOTE: This does not guarantee compatibility with `forbid(future_incompatible)` in the future. @@ -43,14 +171,20 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.23] - 2020-07-27 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix compile error with `?Sized` type parameters.](https://github.com/taiki-e/pin-project/pull/263) ## [0.4.22] - 2020-06-14 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - Documentation improvements. ## [0.4.21] - 2020-06-13 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Deprecated `#[project]`, `#[project_ref]`, and `#[project_replace]` attributes due to some unfixable limitations.](https://github.com/taiki-e/pin-project/pull/244) Consider naming the projected type by passing an argument with the same name as the method to the `#[pin_project]` attribute instead. @@ -80,6 +214,8 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.20] - 2020-06-07 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [You can now use `project_replace` argument without Replace argument.](https://github.com/taiki-e/pin-project/pull/243) This used to require you to specify both. @@ -106,10 +242,14 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.19] - 2020-06-04 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Suppress `unused_results` lint in generated code.](https://github.com/taiki-e/pin-project/pull/239) ## [0.4.18] - 2020-06-04 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Support `Self` in more syntax positions inside `#[pinned_drop]` impl.](https://github.com/taiki-e/pin-project/pull/230) - [Suppress `clippy::type_repetition_in_bounds` and `clippy::used_underscore_binding` lints in generated code.](https://github.com/taiki-e/pin-project/pull/233) @@ -120,6 +260,8 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.17] - 2020-05-18 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Support naming the projection types.](https://github.com/taiki-e/pin-project/pull/202) By passing an argument with the same name as the method to the attribute, you can name the projection type returned from the method: @@ -141,14 +283,20 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.16] - 2020-05-11 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix an issue that users can call internal function generated by `#[pinned_drop]`.](https://github.com/taiki-e/pin-project/pull/223) ## [0.4.15] - 2020-05-10 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [`#[project]` attribute can now handle all `project*` attributes in that scope with one wrapper attribute.](https://github.com/taiki-e/pin-project/pull/220) ## [0.4.14] - 2020-05-09 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Add `!Unpin` option to `#[pin_project]` attribute for guarantee the type is `!Unpin`.](https://github.com/taiki-e/pin-project/pull/219) ```rust @@ -183,6 +331,8 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.13] - 2020-05-07 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix a regression in 0.4.11.](https://github.com/taiki-e/pin-project/pull/207) Changes from [0.4.10](https://github.com/taiki-e/pin-project/releases/tag/v0.4.10) and [0.4.12](https://github.com/taiki-e/pin-project/releases/tag/v0.4.12): @@ -199,6 +349,8 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.12] - 2020-05-07 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - A release to avoid [a regression in 0.4.11](https://github.com/taiki-e/pin-project/issues/206). No code changes from [0.4.10](https://github.com/taiki-e/pin-project/releases/tag/v0.4.10). ## [0.4.11] - 2020-05-07 @@ -217,6 +369,8 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.10] - 2020-05-04 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Add `project_replace` method and `#[project_replace]` attribute.](https://github.com/taiki-e/pin-project/pull/194) `project_replace` method is optional and can be enabled by passing the `Replace` argument to `#[pin_project]` attribute. See [the documentation](https://docs.rs/pin-project/0.4/pin_project/attr.pin_project.html#project_replace) for more details. @@ -227,6 +381,8 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.9] - 2020-04-14 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix lifetime inference error when associated types are used in fields.](https://github.com/taiki-e/pin-project/pull/188) - [Fix compile error with tuple structs with `where` clauses.](https://github.com/taiki-e/pin-project/pull/186) @@ -235,24 +391,34 @@ NOTE: In this file, do not use the hard wrap in the middle of a sentence for com ## [0.4.8] - 2020-01-27 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Ensure that users cannot implement `PinnedDrop` without proper attribute argument.](https://github.com/taiki-e/pin-project/pull/180) - [Fix use of `Self` in expression position inside `#[pinned_drop]` impl.](https://github.com/taiki-e/pin-project/pull/177) ## [0.4.7] - 2020-01-20 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix support for lifetime bounds.](https://github.com/taiki-e/pin-project/pull/176) ## [0.4.6] - 2019-11-20 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix compile error when there is `Self` in the where clause.](https://github.com/taiki-e/pin-project/pull/169) ## [0.4.5] - 2019-10-21 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix compile error with `dyn` types.](https://github.com/taiki-e/pin-project/pull/158) ## [0.4.4] - 2019-10-17 +**NOTE:** This release has been yanked because it [failed to compile with syn 1.0.84 and later](https://github.com/taiki-e/pin-project/pull/335). + - [Fix an issue where `PinnedDrop` implementations can call unsafe code without an unsafe block.](https://github.com/taiki-e/pin-project/pull/149) ## [0.4.3] - 2019-10-15 @@ -502,7 +668,19 @@ See also [tracking issue for 0.4 release](https://github.com/taiki-e/pin-project Initial release -[Unreleased]: https://github.com/taiki-e/pin-project/compare/v0.4.29...v0.4 +[Unreleased]: https://github.com/taiki-e/pin-project/compare/v1.0.10...HEAD +[1.0.10]: https://github.com/taiki-e/pin-project/compare/v1.0.9...v1.0.10 +[1.0.9]: https://github.com/taiki-e/pin-project/compare/v1.0.8...v1.0.9 +[1.0.8]: https://github.com/taiki-e/pin-project/compare/v1.0.7...v1.0.8 +[1.0.7]: https://github.com/taiki-e/pin-project/compare/v1.0.6...v1.0.7 +[1.0.6]: https://github.com/taiki-e/pin-project/compare/v1.0.5...v1.0.6 +[1.0.5]: https://github.com/taiki-e/pin-project/compare/v1.0.4...v1.0.5 +[1.0.4]: https://github.com/taiki-e/pin-project/compare/v1.0.3...v1.0.4 +[1.0.3]: https://github.com/taiki-e/pin-project/compare/v1.0.2...v1.0.3 +[1.0.2]: https://github.com/taiki-e/pin-project/compare/v1.0.1...v1.0.2 +[1.0.1]: https://github.com/taiki-e/pin-project/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/taiki-e/pin-project/compare/v1.0.0-alpha.1...v1.0.0 +[1.0.0-alpha.1]: https://github.com/taiki-e/pin-project/compare/v0.4.23...v1.0.0-alpha.1 [0.4.29]: https://github.com/taiki-e/pin-project/compare/v0.4.28...v0.4.29 [0.4.28]: https://github.com/taiki-e/pin-project/compare/v0.4.27...v0.4.28 [0.4.27]: https://github.com/taiki-e/pin-project/compare/v0.4.26...v0.4.27 diff --git a/third_party/rust/pin-project/Cargo.lock b/third_party/rust/pin-project/Cargo.lock index 5cb7226d3716..9b36dd519c13 100644 --- a/third_party/rust/pin-project/Cargo.lock +++ b/third_party/rust/pin-project/Cargo.lock @@ -2,6 +2,29 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "diff" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + [[package]] name = "glob" version = "0.3.0" @@ -10,9 +33,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "itoa" -version = "0.4.8" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "lazy_static" @@ -21,21 +44,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] -name = "pin-project" -version = "0.4.29" +name = "libc" +version = "0.2.112" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" + +[[package]] +name = "macrotest" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2cb3d2a66da81f6c300d4fd5370aeea9beb3ab5b5256ef22e62c0aa524cb31b" dependencies = [ + "diff", + "glob", + "rand", + "serde", + "serde_json", + "toml", +] + +[[package]] +name = "pin-project" +version = "1.0.10" +dependencies = [ + "macrotest", "pin-project-internal", "rustversion", - "ryu", - "serde_json", + "static_assertions", "trybuild", ] [[package]] name = "pin-project-internal" -version = "0.4.29" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -43,23 +86,70 @@ dependencies = [ ] [[package]] -name = "proc-macro2" -version = "1.0.34" +name = "ppv-lite86" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f84e92c0f7c9d58328b85a78557813e4bd845130db68d7184635344399423b1" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" + +[[package]] +name = "proc-macro2" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] [[package]] name = "quote" -version = "1.0.10" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core", +] + [[package]] name = "rustversion" version = "1.0.6" @@ -68,9 +158,9 @@ checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] name = "ryu" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c9613b5a66ab9ba26415184cfc41156594925a9cf3a2057e57f31ff145f6568" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "serde" @@ -94,15 +184,21 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527" +checksum = "bcbd0344bc6533bc7ec56df11d42fb70f1b912351c0825ccb7211b59d8af7cf5" dependencies = [ "itoa", "ryu", "serde", ] +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "syn" version = "1.0.84" @@ -152,6 +248,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + [[package]] name = "winapi" version = "0.3.9" diff --git a/third_party/rust/pin-project/Cargo.toml b/third_party/rust/pin-project/Cargo.toml index ebf65041ef9a..cc234e1783de 100644 --- a/third_party/rust/pin-project/Cargo.toml +++ b/third_party/rust/pin-project/Cargo.toml @@ -11,9 +11,9 @@ [package] edition = "2018" -rust-version = "1.34" +rust-version = "1.37" name = "pin-project" -version = "0.4.29" +version = "1.0.10" exclude = ["/.*", "/ci", "/tools"] description = "A crate for safe and ergonomic pin-projection.\n" keywords = ["pin", "macros", "attribute"] @@ -23,16 +23,15 @@ repository = "https://github.com/taiki-e/pin-project" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies.pin-project-internal] -version = "=0.4.29" -default-features = false +version = "=1.0.10" +[dev-dependencies.macrotest] +version = "1.0.8" + [dev-dependencies.rustversion] -version = "1.0" +version = "1" -[dev-dependencies.ryu] -version = "=1.0.6" - -[dev-dependencies.serde_json] -version = "=1.0.72" +[dev-dependencies.static_assertions] +version = "1" [dev-dependencies.trybuild] -version = "1.0" +version = "1.0.49" diff --git a/third_party/rust/pin-project/LICENSE-APACHE b/third_party/rust/pin-project/LICENSE-APACHE index d64569567334..f433b1a53f5b 100644 --- a/third_party/rust/pin-project/LICENSE-APACHE +++ b/third_party/rust/pin-project/LICENSE-APACHE @@ -175,28 +175,3 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/third_party/rust/pin-project/README.md b/third_party/rust/pin-project/README.md index caee4d3d9d28..0e21654fad6a 100644 --- a/third_party/rust/pin-project/README.md +++ b/third_party/rust/pin-project/README.md @@ -1,18 +1,10 @@ # pin-project -[![crates-badge]][crates-url] -[![docs-badge]][docs-url] -[![license-badge]][license] -[![rustc-badge]][rustc-url] - -[crates-badge]: https://img.shields.io/crates/v/pin-project.svg -[crates-url]: https://crates.io/crates/pin-project -[docs-badge]: https://docs.rs/pin-project/badge.svg -[docs-url]: https://docs.rs/pin-project -[license-badge]: https://img.shields.io/badge/license-Apache--2.0%20OR%20MIT-blue.svg -[license]: #license -[rustc-badge]: https://img.shields.io/badge/rustc-1.34+-lightgray.svg -[rustc-url]: https://blog.rust-lang.org/2019/04/11/Rust-1.34.0.html +[![crates.io](https://img.shields.io/crates/v/pin-project?style=flat-square&logo=rust)](https://crates.io/crates/pin-project) +[![docs.rs](https://img.shields.io/badge/docs.rs-pin--project-blue?style=flat-square)](https://docs.rs/pin-project) +[![license](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue?style=flat-square)](#license) +[![rustc](https://img.shields.io/badge/rustc-1.37+-blue?style=flat-square&logo=rust)](https://www.rust-lang.org) +[![build status](https://img.shields.io/github/workflow/status/taiki-e/pin-project/CI/main?style=flat-square&logo=github)](https://github.com/taiki-e/pin-project/actions) A crate for safe and ergonomic [pin-projection]. @@ -22,10 +14,10 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -pin-project = "0.4" +pin-project = "1" ``` -The current pin-project requires Rust 1.34 or later. +*Compiler support: requires rustc 1.37+* ## Examples @@ -54,29 +46,55 @@ impl Struct { [*code like this will be generated*][struct-default-expanded] -See [documentation][docs-url] for more details, and +To use `#[pin_project]` on enums, you need to name the projection type +returned from the method. + +```rust +use pin_project::pin_project; +use std::pin::Pin; + +#[pin_project(project = EnumProj)] +enum Enum { + Pinned(#[pin] T), + Unpinned(U), +} + +impl Enum { + fn method(self: Pin<&mut Self>) { + match self.project() { + EnumProj::Pinned(x) => { + let _: Pin<&mut T> = x; + } + EnumProj::Unpinned(y) => { + let _: &mut U = y; + } + } + } +} +``` + +[*code like this will be generated*][enum-default-expanded] + +See [documentation](https://docs.rs/pin-project) for more details, and see [examples] directory for more examples and generated code. -[`pin_project`]: https://docs.rs/pin-project/0.4/pin_project/attr.pin_project.html +[`pin_project`]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html +[enum-default-expanded]: examples/enum-default-expanded.rs [examples]: examples/README.md -[pin-projection]: https://doc.rust-lang.org/nightly/std/pin/index.html#projections-and-structural-pinning +[pin-projection]: https://doc.rust-lang.org/std/pin/index.html#projections-and-structural-pinning [struct-default-expanded]: examples/struct-default-expanded.rs ## Related Projects -* [pin-project-lite]: A lightweight version of pin-project written with declarative macros. +- [pin-project-lite]: A lightweight version of pin-project written with declarative macros. [pin-project-lite]: https://github.com/taiki-e/pin-project-lite ## License -Licensed under either of +Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or +[MIT license](LICENSE-MIT) at your option. -* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ) -* MIT license ([LICENSE-MIT](LICENSE-MIT) or ) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/pin-project/examples/README.md b/third_party/rust/pin-project/examples/README.md index 94f49b5ae1f9..9324dc62ea91 100644 --- a/third_party/rust/pin-project/examples/README.md +++ b/third_party/rust/pin-project/examples/README.md @@ -2,34 +2,38 @@ ### Basic usage of `#[pin_project]` on structs - * [example](struct-default.rs) - * [generated code](struct-default-expanded.rs) +- [example](struct-default.rs) +- [generated code](struct-default-expanded.rs) ### Basic usage of `#[pin_project]` on enums - * [example](enum-default.rs) - * [generated code](enum-default-expanded.rs) +- [example](enum-default.rs) +- [generated code](enum-default-expanded.rs) ### Manual implementation of `Unpin` by `UnsafeUnpin` - * [example](unsafe_unpin.rs) - * [generated code](unsafe_unpin-expanded.rs) - * [`UnsafeUnpin` documentation](https://docs.rs/pin-project/0.4/pin_project/trait.UnsafeUnpin.html) +- [example](unsafe_unpin.rs) +- [generated code](unsafe_unpin-expanded.rs) +- [`UnsafeUnpin` documentation](https://docs.rs/pin-project/1/pin_project/trait.UnsafeUnpin.html) ### Manual implementation of `Drop` by `#[pinned_drop]` - * [example](pinned_drop.rs) - * [generated code](pinned_drop-expanded.rs) - * [`#[pinned_drop]` documentation](https://docs.rs/pin-project/0.4/pin_project/attr.pinned_drop.html) +- [example](pinned_drop.rs) +- [generated code](pinned_drop-expanded.rs) +- [`#[pinned_drop]` documentation](https://docs.rs/pin-project/1/pin_project/attr.pinned_drop.html) ### `project_replace()` method - * [example](project_replace.rs) - * [generated code](project_replace-expanded.rs) - * [`project_replace()` documentation](https://docs.rs/pin-project/0.4/pin_project/attr.pin_project.html#project_replace) +- [example](project_replace.rs) +- [generated code](project_replace-expanded.rs) +- [`project_replace()` documentation](https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#project_replace) ### Ensure `!Unpin` by `#[pin_project(!Unpin)]` - * [example](not_unpin.rs) - * [generated code](not_unpin-expanded.rs) - * [`!Unpin` documentation](https://docs.rs/pin-project/0.4/pin_project/attr.pin_project.html#unpin) +- [example](not_unpin.rs) +- [generated code](not_unpin-expanded.rs) +- [`!Unpin` documentation](https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unpin) + +Note: These generated code examples are the little simplified version of the +actual generated code. See [expansion tests](../tests/expand/README.md) if you +want to see the exact version of the actual generated code. diff --git a/third_party/rust/pin-project/examples/enum-default-expanded.rs b/third_party/rust/pin-project/examples/enum-default-expanded.rs index 48d013179772..459ca39b0324 100644 --- a/third_party/rust/pin-project/examples/enum-default-expanded.rs +++ b/third_party/rust/pin-project/examples/enum-default-expanded.rs @@ -15,19 +15,20 @@ // ``` #![allow(dead_code, unused_imports, unused_parens, unknown_lints, renamed_and_removed_lints)] -#![allow(clippy::needless_lifetimes, clippy::just_underscores_and_digits)] +#![allow( + clippy::needless_lifetimes, + clippy::just_underscores_and_digits, + clippy::used_underscore_binding +)] use pin_project::pin_project; +// #[pin_project(project = EnumProj)] enum Enum { Pinned(/* #[pin] */ T), Unpinned(U), } -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::mut_mut)] -#[allow(clippy::type_repetition_in_bounds)] enum EnumProj<'pin, T, U> where Enum: 'pin, @@ -35,45 +36,23 @@ where Pinned(::pin_project::__private::Pin<&'pin mut (T)>), Unpinned(&'pin mut (U)), } -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::type_repetition_in_bounds)] -enum __EnumProjectionRef<'pin, T, U> -where - Enum: 'pin, -{ - Pinned(::pin_project::__private::Pin<&'pin (T)>), - Unpinned(&'pin (U)), -} -#[doc(hidden)] -#[allow(non_upper_case_globals)] -#[allow(single_use_lifetimes)] -#[allow(clippy::used_underscore_binding)] const _: () = { + // When `#[pin_project]` is used on enums, only named projection types and + // methods are generated because there is no way to access variants of + // projected types without naming it. + // (When `#[pin_project]` is used on structs, both methods are always generated.) + impl Enum { fn project<'pin>( self: ::pin_project::__private::Pin<&'pin mut Self>, ) -> EnumProj<'pin, T, U> { unsafe { match self.get_unchecked_mut() { - Enum::Pinned(_0) => { + Self::Pinned(_0) => { EnumProj::Pinned(::pin_project::__private::Pin::new_unchecked(_0)) } - Enum::Unpinned(_0) => EnumProj::Unpinned(_0), - } - } - } - fn project_ref<'pin>( - self: ::pin_project::__private::Pin<&'pin Self>, - ) -> __EnumProjectionRef<'pin, T, U> { - unsafe { - match self.get_ref() { - Enum::Pinned(_0) => __EnumProjectionRef::Pinned( - ::pin_project::__private::Pin::new_unchecked(_0), - ), - Enum::Unpinned(_0) => __EnumProjectionRef::Unpinned(_0), + Self::Unpinned(_0) => EnumProj::Unpinned(_0), } } } @@ -94,7 +73,12 @@ const _: () = { __Enum<'pin, T, U>: ::pin_project::__private::Unpin { } - unsafe impl ::pin_project::UnsafeUnpin for Enum {} + // A dummy impl of `UnsafeUnpin`, to ensure that the user cannot implement it. + #[doc(hidden)] + unsafe impl<'pin, T, U> ::pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: ::pin_project::__private::Unpin + { + } // Ensure that enum does not implement `Drop`. // @@ -103,6 +87,9 @@ const _: () = { #[allow(clippy::drop_bounds, drop_bounds)] impl EnumMustNotImplDrop for T {} impl EnumMustNotImplDrop for Enum {} + // A dummy impl of `PinnedDrop`, to ensure that users don't accidentally + // write a non-functional `PinnedDrop` impls. + #[doc(hidden)] impl ::pin_project::__private::PinnedDrop for Enum { unsafe fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} } diff --git a/third_party/rust/pin-project/examples/not_unpin-expanded.rs b/third_party/rust/pin-project/examples/not_unpin-expanded.rs index f146db452532..5700c120a5ae 100644 --- a/third_party/rust/pin-project/examples/not_unpin-expanded.rs +++ b/third_party/rust/pin-project/examples/not_unpin-expanded.rs @@ -19,45 +19,33 @@ // ``` #![allow(dead_code, unused_imports, unused_parens, unknown_lints, renamed_and_removed_lints)] -#![allow(clippy::no_effect, clippy::needless_lifetimes)] +#![allow(clippy::needless_lifetimes)] use pin_project::pin_project; +// #[pin_project(!Unpin)] pub struct Struct { // #[pin] pinned: T, unpinned: U, } -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::mut_mut)] -#[allow(clippy::type_repetition_in_bounds)] -pub(crate) struct __StructProjection<'pin, T, U> -where - Struct: 'pin, -{ - pinned: ::pin_project::__private::Pin<&'pin mut (T)>, - unpinned: &'pin mut (U), -} -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::type_repetition_in_bounds)] -pub(crate) struct __StructProjectionRef<'pin, T, U> -where - Struct: 'pin, -{ - pinned: ::pin_project::__private::Pin<&'pin (T)>, - unpinned: &'pin (U), -} - -#[doc(hidden)] -#[allow(non_upper_case_globals)] -#[allow(single_use_lifetimes)] -#[allow(clippy::used_underscore_binding)] const _: () = { + pub(crate) struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + pub(crate) struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { pub(crate) fn project<'pin>( self: ::pin_project::__private::Pin<&'pin mut Self>, @@ -83,6 +71,17 @@ const _: () = { } } + // Ensure that it's impossible to use pin projections on a #[repr(packed)] + // struct. + // + // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/34 + // for details. + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + // Create `Unpin` impl that has trivial `Unpin` bounds. // // See https://github.com/taiki-e/pin-project/issues/102#issuecomment-540472282 @@ -98,7 +97,12 @@ const _: () = { // impls, we emit one ourselves. If the user ends up writing an `UnsafeUnpin` // impl, they'll get a "conflicting implementations of trait" error when // coherence checks are run. - unsafe impl ::pin_project::UnsafeUnpin for Struct {} + #[doc(hidden)] + unsafe impl<'pin, T, U> ::pin_project::UnsafeUnpin for Struct where + ::pin_project::__private::Wrapper<'pin, ::pin_project::__private::PhantomPinned>: + ::pin_project::__private::Unpin + { + } // Ensure that struct does not implement `Drop`. // @@ -107,20 +111,12 @@ const _: () = { #[allow(clippy::drop_bounds, drop_bounds)] impl StructMustNotImplDrop for T {} impl StructMustNotImplDrop for Struct {} + // A dummy impl of `PinnedDrop`, to ensure that users don't accidentally + // write a non-functional `PinnedDrop` impls. + #[doc(hidden)] impl ::pin_project::__private::PinnedDrop for Struct { unsafe fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} } - - // Ensure that it's impossible to use pin projections on a #[repr(packed)] - // struct. - // - // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/34 - // for details. - #[forbid(unaligned_references, safe_packed_borrows)] - fn __assert_not_repr_packed(this: &Struct) { - let _ = &this.pinned; - let _ = &this.unpinned; - } }; fn main() { diff --git a/third_party/rust/pin-project/examples/pinned_drop-expanded.rs b/third_party/rust/pin-project/examples/pinned_drop-expanded.rs index 44dde4dd2026..82207b60b161 100644 --- a/third_party/rust/pin-project/examples/pinned_drop-expanded.rs +++ b/third_party/rust/pin-project/examples/pinned_drop-expanded.rs @@ -23,47 +23,35 @@ // ``` #![allow(dead_code, unused_imports, unused_parens, unknown_lints, renamed_and_removed_lints)] -#![allow(clippy::no_effect, clippy::needless_lifetimes)] +#![allow(clippy::needless_lifetimes, clippy::mut_mut)] use std::pin::Pin; use pin_project::{pin_project, pinned_drop}; +// #[pin_project(PinnedDrop)] pub struct Struct<'a, T> { was_dropped: &'a mut bool, // #[pin] field: T, } -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::mut_mut)] -#[allow(clippy::type_repetition_in_bounds)] -pub(crate) struct __StructProjection<'pin, 'a, T> -where - Struct<'a, T>: 'pin, -{ - was_dropped: &'pin mut (&'a mut bool), - field: ::pin_project::__private::Pin<&'pin mut (T)>, -} -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::type_repetition_in_bounds)] -pub(crate) struct __StructProjectionRef<'pin, 'a, T> -where - Struct<'a, T>: 'pin, -{ - was_dropped: &'pin (&'a mut bool), - field: ::pin_project::__private::Pin<&'pin (T)>, -} - -#[doc(hidden)] -#[allow(non_upper_case_globals)] -#[allow(single_use_lifetimes)] -#[allow(clippy::used_underscore_binding)] const _: () = { + pub(crate) struct __StructProjection<'pin, 'a, T> + where + Struct<'a, T>: 'pin, + { + was_dropped: &'pin mut (&'a mut bool), + field: ::pin_project::__private::Pin<&'pin mut (T)>, + } + pub(crate) struct __StructProjectionRef<'pin, 'a, T> + where + Struct<'a, T>: 'pin, + { + was_dropped: &'pin (&'a mut bool), + field: ::pin_project::__private::Pin<&'pin (T)>, + } + impl<'a, T> Struct<'a, T> { pub(crate) fn project<'pin>( self: ::pin_project::__private::Pin<&'pin mut Self>, @@ -89,6 +77,17 @@ const _: () = { } } + // Ensure that it's impossible to use pin projections on a #[repr(packed)] + // struct. + // + // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/34 + // for details. + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed<'a, T>(this: &Struct<'a, T>) { + let _ = &this.was_dropped; + let _ = &this.field; + } + impl<'a, T> ::pin_project::__private::Drop for Struct<'a, T> { fn drop(&mut self) { // Safety - we're in 'drop', so we know that 'self' will @@ -116,17 +115,11 @@ const _: () = { __Struct<'pin, 'a, T>: ::pin_project::__private::Unpin { } - unsafe impl<'a, T> ::pin_project::UnsafeUnpin for Struct<'a, T> {} - - // Ensure that it's impossible to use pin projections on a #[repr(packed)] - // struct. - // - // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/34 - // for details. - #[forbid(unaligned_references, safe_packed_borrows)] - fn __assert_not_repr_packed<'a, T>(this: &Struct<'a, T>) { - let _ = &this.was_dropped; - let _ = &this.field; + // A dummy impl of `UnsafeUnpin`, to ensure that the user cannot implement it. + #[doc(hidden)] + unsafe impl<'pin, 'a, T> ::pin_project::UnsafeUnpin for Struct<'a, T> where + __Struct<'pin, 'a, T>: ::pin_project::__private::Unpin + { } }; @@ -142,6 +135,7 @@ const _: () = { // Users can implement [`Drop`] safely using `#[pinned_drop]` and can drop a // type that implements `PinnedDrop` using the [`drop`] function safely. // **Do not call or implement this trait directly.** +#[doc(hidden)] impl ::pin_project::__private::PinnedDrop for Struct<'_, T> { // Since calling it twice on the same object would be UB, // this method is unsafe. diff --git a/third_party/rust/pin-project/examples/project_replace-expanded.rs b/third_party/rust/pin-project/examples/project_replace-expanded.rs index 472bafc45bcf..445e5705e1a5 100644 --- a/third_party/rust/pin-project/examples/project_replace-expanded.rs +++ b/third_party/rust/pin-project/examples/project_replace-expanded.rs @@ -16,53 +16,37 @@ // ``` #![allow(dead_code, unused_imports, unused_parens, unknown_lints, renamed_and_removed_lints)] -#![allow(clippy::no_effect, clippy::needless_lifetimes)] +#![allow(clippy::needless_lifetimes)] use pin_project::pin_project; +// #[pin_project(project_replace)] struct Struct { // #[pin] pinned: T, unpinned: U, } -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::mut_mut)] -#[allow(clippy::type_repetition_in_bounds)] -struct __StructProjection<'pin, T, U> -where - Struct: 'pin, -{ - pinned: ::pin_project::__private::Pin<&'pin mut (T)>, - unpinned: &'pin mut (U), -} -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::type_repetition_in_bounds)] -struct __StructProjectionRef<'pin, T, U> -where - Struct: 'pin, -{ - pinned: ::pin_project::__private::Pin<&'pin (T)>, - unpinned: &'pin (U), -} -#[doc(hidden)] -#[allow(dead_code)] -#[allow(unreachable_pub)] -#[allow(single_use_lifetimes)] -struct __StructProjectionOwned { - pinned: ::pin_project::__private::PhantomData, - unpinned: U, -} - -#[doc(hidden)] -#[allow(non_upper_case_globals)] -#[allow(single_use_lifetimes)] -#[allow(clippy::used_underscore_binding)] const _: () = { + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + struct __StructProjectionOwned { + pinned: ::pin_project::__private::PhantomData, + unpinned: U, + } + impl Struct { fn project<'pin>( self: ::pin_project::__private::Pin<&'pin mut Self>, @@ -92,6 +76,12 @@ const _: () = { ) -> __StructProjectionOwned { unsafe { let __self_ptr: *mut Self = self.get_unchecked_mut(); + + // Destructors will run in reverse order, so next create a guard to overwrite + // `self` with the replacement value without calling destructors. + let __guard = + ::pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self { pinned, unpinned } = &mut *__self_ptr; // First, extract all the unpinned fields @@ -100,20 +90,13 @@ const _: () = { unpinned: ::pin_project::__private::ptr::read(unpinned), }; - // Destructors will run in reverse order, so next create a guard to overwrite - // `self` with the replacement value without calling destructors. - let __guard = ::pin_project::__private::UnsafeOverwriteGuard { - target: __self_ptr, - value: ::pin_project::__private::ManuallyDrop::new(__replacement), - }; - // Now create guards to drop all the pinned fields // // Due to a compiler bug (https://github.com/rust-lang/rust/issues/47949) // this must be in its own scope, or else `__result` will not be dropped // if any of the destructors panic. { - let __guard = ::pin_project::__private::UnsafeDropInPlaceGuard(pinned); + let __guard = ::pin_project::__private::UnsafeDropInPlaceGuard::new(pinned); } // Finally, return the result @@ -122,6 +105,17 @@ const _: () = { } } + // Ensure that it's impossible to use pin projections on a #[repr(packed)] + // struct. + // + // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/34 + // for details. + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + // Automatically create the appropriate conditional `Unpin` implementation. // // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/53. @@ -137,7 +131,12 @@ const _: () = { __Struct<'pin, T, U>: ::pin_project::__private::Unpin { } - unsafe impl ::pin_project::UnsafeUnpin for Struct {} + // A dummy impl of `UnsafeUnpin`, to ensure that the user cannot implement it. + #[doc(hidden)] + unsafe impl<'pin, T, U> ::pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: ::pin_project::__private::Unpin + { + } // Ensure that struct does not implement `Drop`. // @@ -146,20 +145,12 @@ const _: () = { #[allow(clippy::drop_bounds, drop_bounds)] impl StructMustNotImplDrop for T {} impl StructMustNotImplDrop for Struct {} + // A dummy impl of `PinnedDrop`, to ensure that users don't accidentally + // write a non-functional `PinnedDrop` impls. + #[doc(hidden)] impl ::pin_project::__private::PinnedDrop for Struct { unsafe fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} } - - // Ensure that it's impossible to use pin projections on a #[repr(packed)] - // struct. - // - // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/34 - // for details. - #[forbid(unaligned_references, safe_packed_borrows)] - fn __assert_not_repr_packed(this: &Struct) { - let _ = &this.pinned; - let _ = &this.unpinned; - } }; fn main() {} diff --git a/third_party/rust/pin-project/examples/struct-default-expanded.rs b/third_party/rust/pin-project/examples/struct-default-expanded.rs index 818beaf0c3b4..d6610993d3b9 100644 --- a/third_party/rust/pin-project/examples/struct-default-expanded.rs +++ b/third_party/rust/pin-project/examples/struct-default-expanded.rs @@ -16,45 +16,33 @@ // ``` #![allow(dead_code, unused_imports, unused_parens, unknown_lints, renamed_and_removed_lints)] -#![allow(clippy::no_effect, clippy::needless_lifetimes)] +#![allow(clippy::needless_lifetimes)] use pin_project::pin_project; +// #[pin_project] struct Struct { // #[pin] pinned: T, unpinned: U, } -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::mut_mut)] -#[allow(clippy::type_repetition_in_bounds)] -struct __StructProjection<'pin, T, U> -where - Struct: 'pin, -{ - pinned: ::pin_project::__private::Pin<&'pin mut (T)>, - unpinned: &'pin mut (U), -} -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::type_repetition_in_bounds)] -struct __StructProjectionRef<'pin, T, U> -where - Struct: 'pin, -{ - pinned: ::pin_project::__private::Pin<&'pin (T)>, - unpinned: &'pin (U), -} - -#[doc(hidden)] -#[allow(non_upper_case_globals)] -#[allow(single_use_lifetimes)] -#[allow(clippy::used_underscore_binding)] const _: () = { + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { fn project<'pin>( self: ::pin_project::__private::Pin<&'pin mut Self>, @@ -80,6 +68,24 @@ const _: () = { } } + // Ensure that it's impossible to use pin projections on a #[repr(packed)] + // struct. + // + // Taking a reference to a packed field is UB, and applying + // `#[forbid(unaligned_references)]` makes sure that doing this is a hard error. + // + // If the struct ends up having #[repr(packed)] applied somehow, + // this will generate an (unfriendly) error message. Under all reasonable + // circumstances, we'll detect the #[repr(packed)] attribute, and generate + // a much nicer error above. + // + // See https://github.com/taiki-e/pin-project/pull/34 for more details. + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + // Automatically create the appropriate conditional `Unpin` implementation. // // Basically this is equivalent to the following code: @@ -123,7 +129,11 @@ const _: () = { // impls, we emit one ourselves. If the user ends up writing an `UnsafeUnpin` // impl, they'll get a "conflicting implementations of trait" error when // coherence checks are run. - unsafe impl ::pin_project::UnsafeUnpin for Struct {} + #[doc(hidden)] + unsafe impl<'pin, T, U> ::pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: ::pin_project::__private::Unpin + { + } // Ensure that struct does not implement `Drop`. // @@ -136,27 +146,10 @@ const _: () = { impl StructMustNotImplDrop for Struct {} // A dummy impl of `PinnedDrop`, to ensure that users don't accidentally // write a non-functional `PinnedDrop` impls. + #[doc(hidden)] impl ::pin_project::__private::PinnedDrop for Struct { unsafe fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} } - - // Ensure that it's impossible to use pin projections on a #[repr(packed)] - // struct. - // - // Taking a reference to a packed field is UB, and applying - // `#[forbid(unaligned_references)]` makes sure that doing this is a hard error. - // - // If the struct ends up having #[repr(packed)] applied somehow, - // this will generate an (unfriendly) error message. Under all reasonable - // circumstances, we'll detect the #[repr(packed)] attribute, and generate - // a much nicer error above. - // - // See https://github.com/taiki-e/pin-project/pull/34 for more details. - #[forbid(unaligned_references, safe_packed_borrows)] - fn __assert_not_repr_packed(this: &Struct) { - let _ = &this.pinned; - let _ = &this.unpinned; - } }; fn main() {} diff --git a/third_party/rust/pin-project/examples/unsafe_unpin-expanded.rs b/third_party/rust/pin-project/examples/unsafe_unpin-expanded.rs index 3c18e49439e2..e9c7abceddce 100644 --- a/third_party/rust/pin-project/examples/unsafe_unpin-expanded.rs +++ b/third_party/rust/pin-project/examples/unsafe_unpin-expanded.rs @@ -18,45 +18,33 @@ // ``` #![allow(dead_code, unused_imports, unused_parens, unknown_lints, renamed_and_removed_lints)] -#![allow(clippy::no_effect, clippy::needless_lifetimes)] +#![allow(clippy::needless_lifetimes)] use pin_project::{pin_project, UnsafeUnpin}; +// #[pin_project(UnsafeUnpin)] pub struct Struct { // #[pin] pinned: T, unpinned: U, } -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::mut_mut)] -#[allow(clippy::type_repetition_in_bounds)] -pub(crate) struct __StructProjection<'pin, T, U> -where - Struct: 'pin, -{ - pinned: ::pin_project::__private::Pin<&'pin mut (T)>, - unpinned: &'pin mut (U), -} -#[doc(hidden)] -#[allow(dead_code)] -#[allow(single_use_lifetimes)] -#[allow(clippy::type_repetition_in_bounds)] -pub(crate) struct __StructProjectionRef<'pin, T, U> -where - Struct: 'pin, -{ - pinned: ::pin_project::__private::Pin<&'pin (T)>, - unpinned: &'pin (U), -} - -#[doc(hidden)] -#[allow(non_upper_case_globals)] -#[allow(single_use_lifetimes)] -#[allow(clippy::used_underscore_binding)] const _: () = { + pub(crate) struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + pub(crate) struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { pub(crate) fn project<'pin>( self: ::pin_project::__private::Pin<&'pin mut Self>, @@ -82,6 +70,18 @@ const _: () = { } } + // Ensure that it's impossible to use pin projections on a #[repr(packed)] + // struct. + // + // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/34 + // for details. + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + + // Implement `Unpin` via `UnsafeUnpin`. impl<'pin, T, U> ::pin_project::__private::Unpin for Struct where ::pin_project::__private::Wrapper<'pin, Self>: ::pin_project::UnsafeUnpin { @@ -94,20 +94,12 @@ const _: () = { #[allow(clippy::drop_bounds, drop_bounds)] impl StructMustNotImplDrop for T {} impl StructMustNotImplDrop for Struct {} + // A dummy impl of `PinnedDrop`, to ensure that users don't accidentally + // write a non-functional `PinnedDrop` impls. + #[doc(hidden)] impl ::pin_project::__private::PinnedDrop for Struct { unsafe fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} } - - // Ensure that it's impossible to use pin projections on a #[repr(packed)] - // struct. - // - // See ./struct-default-expanded.rs and https://github.com/taiki-e/pin-project/pull/34 - // for details. - #[forbid(unaligned_references, safe_packed_borrows)] - fn __assert_not_repr_packed(this: &Struct) { - let _ = &this.pinned; - let _ = &this.unpinned; - } }; unsafe impl UnsafeUnpin for Struct {} diff --git a/third_party/rust/pin-project/src/lib.rs b/third_party/rust/pin-project/src/lib.rs index 4b08e35da0db..240655136e2e 100644 --- a/third_party/rust/pin-project/src/lib.rs +++ b/third_party/rust/pin-project/src/lib.rs @@ -28,46 +28,71 @@ //! //! [*code like this will be generated*][struct-default-expanded] //! +//! To use `#[pin_project]` on enums, you need to name the projection type +//! returned from the method. +//! +//! ```rust +//! use std::pin::Pin; +//! +//! use pin_project::pin_project; +//! +//! #[pin_project(project = EnumProj)] +//! enum Enum { +//! Pinned(#[pin] T), +//! Unpinned(U), +//! } +//! +//! impl Enum { +//! fn method(self: Pin<&mut Self>) { +//! match self.project() { +//! EnumProj::Pinned(x) => { +//! let _: Pin<&mut T> = x; +//! } +//! EnumProj::Unpinned(y) => { +//! let _: &mut U = y; +//! } +//! } +//! } +//! } +//! ``` +//! +//! [*code like this will be generated*][enum-default-expanded] +//! //! See [`#[pin_project]`][`pin_project`] attribute for more details, and //! see [examples] directory for more examples and generated code. //! -//! [`pin_project`]: attr.pin_project.html -//! [examples]: https://github.com/taiki-e/pin-project/blob/master/examples/README.md -//! [pin-projection]: https://doc.rust-lang.org/nightly/std/pin/index.html#projections-and-structural-pinning -//! [struct-default-expanded]: https://github.com/taiki-e/pin-project/blob/master/examples/struct-default-expanded.rs +//! [examples]: https://github.com/taiki-e/pin-project/blob/HEAD/examples/README.md +//! [enum-default-expanded]: https://github.com/taiki-e/pin-project/blob/HEAD/examples/enum-default-expanded.rs +//! [pin-projection]: core::pin#projections-and-structural-pinning +//! [struct-default-expanded]: https://github.com/taiki-e/pin-project/blob/HEAD/examples/struct-default-expanded.rs #![no_std] #![doc(test( no_crate_inject, - attr(deny(warnings, rust_2018_idioms, single_use_lifetimes), allow(dead_code)) + attr( + deny(warnings, rust_2018_idioms, single_use_lifetimes), + allow(dead_code, unused_variables) + ) ))] #![warn(missing_docs, rust_2018_idioms, single_use_lifetimes, unreachable_pub)] #![warn(clippy::default_trait_access, clippy::wildcard_imports)] -// mem::take and #[non_exhaustive] requires Rust 1.40 -#![allow(clippy::mem_replace_with_default, clippy::manual_non_exhaustive)] #![allow(clippy::needless_doctest_main)] #[doc(inline)] pub use pin_project_internal::pin_project; #[doc(inline)] pub use pin_project_internal::pinned_drop; -#[allow(deprecated)] -#[doc(inline)] -pub use pin_project_internal::project; -#[allow(deprecated)] -#[doc(inline)] -pub use pin_project_internal::project_ref; -#[allow(deprecated)] -#[doc(inline)] -pub use pin_project_internal::project_replace; /// A trait used for custom implementations of [`Unpin`]. -/// This trait is used in conjunction with the `UnsafeUnpin` -/// argument to [`#[pin_project]`][`pin_project`] +/// +/// This trait is used in conjunction with the `UnsafeUnpin` argument to +/// the [`#[pin_project]`][macro@pin_project] attribute. +/// +/// # Safety /// /// The Rust [`Unpin`] trait is safe to implement - by itself, -/// implementing it cannot lead to undefined behavior. Undefined -/// behavior can only occur when other unsafe code is used. +/// implementing it cannot lead to [undefined behavior][undefined-behavior]. +/// Undefined behavior can only occur when other unsafe code is used. /// /// It turns out that using pin projections, which requires unsafe code, /// imposes additional requirements on an [`Unpin`] impl. Normally, all of this @@ -105,28 +130,28 @@ pub use pin_project_internal::project_replace; /// use pin_project::{pin_project, UnsafeUnpin}; /// /// #[pin_project(UnsafeUnpin)] -/// struct Foo { +/// struct Struct { /// #[pin] /// field_1: K, /// field_2: V, /// } /// -/// unsafe impl UnsafeUnpin for Foo where K: Unpin + Clone {} +/// unsafe impl UnsafeUnpin for Struct where K: Unpin + Clone {} /// ``` /// /// [`PhantomPinned`]: core::marker::PhantomPinned -/// [`pin_project`]: attr.pin_project.html -/// [pin-projection]: https://doc.rust-lang.org/nightly/std/pin/index.html#projections-and-structural-pinning /// [cargo-geiger]: https://github.com/rust-secure-code/cargo-geiger +/// [pin-projection]: core::pin#projections-and-structural-pinning +/// [undefined-behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html pub unsafe trait UnsafeUnpin {} // Not public API. #[doc(hidden)] pub mod __private { + use core::mem::ManuallyDrop; #[doc(hidden)] pub use core::{ marker::{PhantomData, PhantomPinned, Unpin}, - mem::ManuallyDrop, ops::Drop, pin::Pin, ptr, @@ -137,18 +162,26 @@ pub mod __private { use super::UnsafeUnpin; + // An internal trait used for custom implementations of [`Drop`]. + // + // **Do not call or implement this trait directly.** + // + // # Why this trait is private and `#[pinned_drop]` attribute is needed? + // // Implementing `PinnedDrop::drop` is safe, but calling it is not safe. // This is because destructors can be called multiple times in safe code and - // [double dropping is unsound](https://github.com/rust-lang/rust/pull/62360). + // [double dropping is unsound][rust-lang/rust#62360]. // // Ideally, it would be desirable to be able to forbid manual calls in // the same way as [`Drop::drop`], but the library cannot do it. So, by using - // macros and replacing them with private traits, we prevent users from - // calling `PinnedDrop::drop`. + // macros and replacing them with private traits, + // this crate prevent users from calling `PinnedDrop::drop` in safe code. // - // Users can implement [`Drop`] safely using `#[pinned_drop]` and can drop a - // type that implements `PinnedDrop` using the [`drop`] function safely. - // **Do not call or implement this trait directly.** + // This allows implementing [`Drop`] safely using `#[pinned_drop]`. + // Also by using the [`drop`] function just like dropping a type that directly + // implements [`Drop`], can drop safely a type that implements `PinnedDrop`. + // + // [rust-lang/rust#62360]: https://github.com/rust-lang/rust/pull/62360 #[doc(hidden)] pub trait PinnedDrop { #[doc(hidden)] @@ -222,7 +255,14 @@ pub mod __private { // This is an internal helper used to ensure a value is dropped. #[doc(hidden)] - pub struct UnsafeDropInPlaceGuard(pub *mut T); + pub struct UnsafeDropInPlaceGuard(*mut T); + + impl UnsafeDropInPlaceGuard { + #[doc(hidden)] + pub unsafe fn new(ptr: *mut T) -> Self { + Self(ptr) + } + } impl Drop for UnsafeDropInPlaceGuard { fn drop(&mut self) { @@ -236,8 +276,15 @@ pub mod __private { // its destructor being called. #[doc(hidden)] pub struct UnsafeOverwriteGuard { - pub value: ManuallyDrop, - pub target: *mut T, + target: *mut T, + value: ManuallyDrop, + } + + impl UnsafeOverwriteGuard { + #[doc(hidden)] + pub unsafe fn new(target: *mut T, value: T) -> Self { + Self { target, value: ManuallyDrop::new(value) } + } } impl Drop for UnsafeOverwriteGuard { diff --git a/third_party/rust/pin-project/tests/README.md b/third_party/rust/pin-project/tests/README.md new file mode 100644 index 000000000000..b109e7153460 --- /dev/null +++ b/third_party/rust/pin-project/tests/README.md @@ -0,0 +1,44 @@ +# Tests + +To run all tests, run the following command: + +```sh +cargo +nightly test --all +``` + +## UI tests (`ui`, `compiletest.rs`) + +This checks errors detected by the macro or the Rust compiler in the resulting +expanded code. + +To run this test, run the following command: + +```sh +cargo +nightly test --test compiletest +``` + +Locally, this test updates the files in the `ui` directory if there are +changes to the generated code. If there are any changes to the files in the +`ui` directory after running the test, please commit them. + +See also [`trybuild` documentation](https://docs.rs/trybuild). + +## Expansion tests (`expand`, `expandtest.rs`) + +Similar to ui tests, but instead of checking the compiler output, this checks +the code generated by macros. + +See [examples](../examples/README.md) for descriptions of what the generated +code does, and why it needs to be generated. + +To run this test, run the following command: + +```sh +cargo +nightly test --test expandtest +``` + +Locally, this test updates the files in the `expand` directory if there are +changes to the generated code. If there are any changes to the files in the +`expand` directory after running the test, please commit them. + +See also [`macrotest` documentation](https://docs.rs/macrotest). diff --git a/third_party/rust/pin-project-lite-0.1.12/tests/auxiliary/mod.rs b/third_party/rust/pin-project/tests/auxiliary/mod.rs similarity index 100% rename from third_party/rust/pin-project-lite-0.1.12/tests/auxiliary/mod.rs rename to third_party/rust/pin-project/tests/auxiliary/mod.rs diff --git a/third_party/rust/pin-project/tests/cfg.rs b/third_party/rust/pin-project/tests/cfg.rs index 5f28bbf01973..2f5387b671df 100644 --- a/third_party/rust/pin-project/tests/cfg.rs +++ b/third_party/rust/pin-project/tests/cfg.rs @@ -1,22 +1,19 @@ #![warn(rust_2018_idioms, single_use_lifetimes)] #![allow(dead_code)] -// Refs: https://doc.rust-lang.org/nightly/reference/attributes.html +// Refs: https://doc.rust-lang.org/reference/attributes.html + +#[macro_use] +mod auxiliary; use std::{marker::PhantomPinned, pin::Pin}; use pin_project::pin_project; -fn is_unpin() {} - -#[cfg(target_os = "linux")] -struct Linux; -#[cfg(not(target_os = "linux"))] -struct Other; +struct Always; // Use this type to check that `cfg(any())` is working properly. -// If `cfg(any())` is not working properly, `is_unpin` will fail. -struct Any(PhantomPinned); +struct Never(PhantomPinned); #[test] fn cfg() { @@ -24,201 +21,142 @@ fn cfg() { #[pin_project(project_replace)] struct SameName { - #[cfg(target_os = "linux")] + #[cfg(not(any()))] #[pin] - inner: Linux, - #[cfg(not(target_os = "linux"))] - #[pin] - inner: Other, + inner: Always, #[cfg(any())] #[pin] - any: Any, + inner: Never, } - is_unpin::(); + assert_unpin!(SameName); - #[cfg(target_os = "linux")] - let _x = SameName { inner: Linux }; - #[cfg(not(target_os = "linux"))] - let _x = SameName { inner: Other }; + let _ = SameName { inner: Always }; #[pin_project(project_replace)] struct DifferentName { - #[cfg(target_os = "linux")] + #[cfg(not(any()))] #[pin] - l: Linux, - #[cfg(not(target_os = "linux"))] - #[pin] - o: Other, + a: Always, #[cfg(any())] #[pin] - a: Any, + n: Never, } - is_unpin::(); + assert_unpin!(DifferentName); - #[cfg(target_os = "linux")] - let _x = DifferentName { l: Linux }; - #[cfg(not(target_os = "linux"))] - let _x = DifferentName { o: Other }; + let _ = DifferentName { a: Always }; #[pin_project(project_replace)] struct TupleStruct( - #[cfg(target_os = "linux")] + #[cfg(not(any()))] #[pin] - Linux, - #[cfg(not(target_os = "linux"))] - #[pin] - Other, + Always, #[cfg(any())] #[pin] - Any, + Never, ); - is_unpin::(); + assert_unpin!(TupleStruct); - #[cfg(target_os = "linux")] - let _x = TupleStruct(Linux); - #[cfg(not(target_os = "linux"))] - let _x = TupleStruct(Other); + let _ = TupleStruct(Always); // enums - #[pin_project(project_replace)] + #[pin_project( + project = VariantProj, + project_ref = VariantProjRef, + project_replace = VariantProjOwn, + )] enum Variant { - #[cfg(target_os = "linux")] - Inner(#[pin] Linux), - #[cfg(not(target_os = "linux"))] - Inner(#[pin] Other), - - #[cfg(target_os = "linux")] - Linux(#[pin] Linux), - #[cfg(not(target_os = "linux"))] - Other(#[pin] Other), + #[cfg(not(any()))] + Inner(#[pin] Always), #[cfg(any())] - Any(#[pin] Any), + Inner(#[pin] Never), + + #[cfg(not(any()))] + A(#[pin] Always), + #[cfg(any())] + N(#[pin] Never), } - is_unpin::(); + assert_unpin!(Variant); - #[cfg(target_os = "linux")] - let _x = Variant::Inner(Linux); - #[cfg(not(target_os = "linux"))] - let _x = Variant::Inner(Other); + let _ = Variant::Inner(Always); + let _ = Variant::A(Always); - #[cfg(target_os = "linux")] - let _x = Variant::Linux(Linux); - #[cfg(not(target_os = "linux"))] - let _x = Variant::Other(Other); - - #[pin_project(project_replace)] + #[pin_project( + project = FieldProj, + project_ref = FieldProjRef, + project_replace = FieldProjOwn, + )] enum Field { SameName { - #[cfg(target_os = "linux")] + #[cfg(not(any()))] #[pin] - inner: Linux, - #[cfg(not(target_os = "linux"))] - #[pin] - inner: Other, + inner: Always, #[cfg(any())] #[pin] - any: Any, + inner: Never, }, DifferentName { - #[cfg(target_os = "linux")] + #[cfg(not(any()))] #[pin] - l: Linux, - #[cfg(not(target_os = "linux"))] - #[pin] - w: Other, + a: Always, #[cfg(any())] #[pin] - any: Any, + n: Never, }, TupleVariant( - #[cfg(target_os = "linux")] + #[cfg(not(any()))] #[pin] - Linux, - #[cfg(not(target_os = "linux"))] - #[pin] - Other, + Always, #[cfg(any())] #[pin] - Any, + Never, ), } - is_unpin::(); + assert_unpin!(Field); - #[cfg(target_os = "linux")] - let _x = Field::SameName { inner: Linux }; - #[cfg(not(target_os = "linux"))] - let _x = Field::SameName { inner: Other }; - - #[cfg(target_os = "linux")] - let _x = Field::DifferentName { l: Linux }; - #[cfg(not(target_os = "linux"))] - let _x = Field::DifferentName { w: Other }; - - #[cfg(target_os = "linux")] - let _x = Field::TupleVariant(Linux); - #[cfg(not(target_os = "linux"))] - let _x = Field::TupleVariant(Other); + let _ = Field::SameName { inner: Always }; + let _ = Field::DifferentName { a: Always }; + let _ = Field::TupleVariant(Always); } #[test] fn cfg_attr() { #[pin_project(project_replace)] struct SameCfg { - #[cfg(target_os = "linux")] - #[cfg_attr(target_os = "linux", pin)] - inner: Linux, - #[cfg(not(target_os = "linux"))] - #[cfg_attr(not(target_os = "linux"), pin)] - inner: Other, + #[cfg(not(any()))] + #[cfg_attr(not(any()), pin)] + inner: Always, #[cfg(any())] #[cfg_attr(any(), pin)] - any: Any, + inner: Never, } - is_unpin::(); - - #[cfg(target_os = "linux")] - let mut x = SameCfg { inner: Linux }; - #[cfg(not(target_os = "linux"))] - let mut x = SameCfg { inner: Other }; + assert_unpin!(SameCfg); + let mut x = SameCfg { inner: Always }; let x = Pin::new(&mut x).project(); - #[cfg(target_os = "linux")] - let _: Pin<&mut Linux> = x.inner; - #[cfg(not(target_os = "linux"))] - let _: Pin<&mut Other> = x.inner; + let _: Pin<&mut Always> = x.inner; #[pin_project(project_replace)] struct DifferentCfg { - #[cfg(target_os = "linux")] - #[cfg_attr(target_os = "linux", pin)] - inner: Linux, - #[cfg(not(target_os = "linux"))] - #[cfg_attr(target_os = "linux", pin)] - inner: Other, - #[cfg(any())] + #[cfg(not(any()))] #[cfg_attr(any(), pin)] - any: Any, + inner: Always, + #[cfg(any())] + #[cfg_attr(not(any()), pin)] + inner: Never, } - is_unpin::(); - - #[cfg(target_os = "linux")] - let mut x = DifferentCfg { inner: Linux }; - #[cfg(not(target_os = "linux"))] - let mut x = DifferentCfg { inner: Other }; + assert_unpin!(DifferentCfg); + let mut x = DifferentCfg { inner: Always }; let x = Pin::new(&mut x).project(); - #[cfg(target_os = "linux")] - let _: Pin<&mut Linux> = x.inner; - #[cfg(not(target_os = "linux"))] - let _: &mut Other = x.inner; + let _: &mut Always = x.inner; #[cfg_attr(not(any()), pin_project)] struct Foo { @@ -226,6 +164,9 @@ fn cfg_attr() { inner: T, } + assert_unpin!(Foo<()>); + assert_not_unpin!(Foo); + let mut x = Foo { inner: 0_u8 }; let x = Pin::new(&mut x).project(); let _: Pin<&mut u8> = x.inner; @@ -238,6 +179,6 @@ fn cfg_attr_any_packed() { #[cfg_attr(any(), repr(packed))] struct Struct { #[pin] - field: u32, + f: u32, } } diff --git a/third_party/rust/pin-project/tests/compiletest.rs b/third_party/rust/pin-project/tests/compiletest.rs index e78b3dc0a539..70d2358943a1 100644 --- a/third_party/rust/pin-project/tests/compiletest.rs +++ b/third_party/rust/pin-project/tests/compiletest.rs @@ -1,15 +1,15 @@ +#![cfg(not(miri))] #![warn(rust_2018_idioms, single_use_lifetimes)] +use std::env; + #[rustversion::attr(not(nightly), ignore)] #[test] fn ui() { + if env::var_os("CI").is_none() { + env::set_var("TRYBUILD", "overwrite"); + } + let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/cfg/*.rs"); - t.compile_fail("tests/ui/not_unpin/*.rs"); - t.compile_fail("tests/ui/pin_project/*.rs"); - t.compile_fail("tests/ui/pinned_drop/*.rs"); - t.compile_fail("tests/ui/project/*.rs"); - t.compile_fail("tests/ui/unsafe_unpin/*.rs"); - t.compile_fail("tests/ui/unstable-features/*.rs"); - t.pass("tests/ui/unstable-features/run-pass/*.rs"); + t.compile_fail("tests/ui/**/*.rs"); } diff --git a/third_party/rust/pin-project/tests/drop_order.rs b/third_party/rust/pin-project/tests/drop_order.rs index 40a538a89ded..8ced56e75a36 100644 --- a/third_party/rust/pin-project/tests/drop_order.rs +++ b/third_party/rust/pin-project/tests/drop_order.rs @@ -1,5 +1,7 @@ #![warn(rust_2018_idioms, single_use_lifetimes)] +// Refs: https://doc.rust-lang.org/reference/destructors.html + use std::{cell::Cell, pin::Pin, thread}; use pin_project::pin_project; @@ -35,14 +37,16 @@ struct TuplePinned<'a>(#[pin] D<'a>, #[pin] D<'a>); #[pin_project(project_replace)] struct TupleUnpinned<'a>(D<'a>, D<'a>); -#[pin_project(project_replace= EnumProj)] +#[pin_project(project_replace = EnumProj)] enum Enum<'a> { + #[allow(dead_code)] // false positive that fixed in Rust 1.38 StructPinned { #[pin] f1: D<'a>, #[pin] f2: D<'a>, }, + #[allow(dead_code)] // false positive that fixed in Rust 1.38 StructUnpinned { f1: D<'a>, f2: D<'a>, @@ -59,9 +63,9 @@ fn struct_pinned() { } { let c = Cell::new(0); - let mut _x = StructPinned { f1: D(&c, 1), f2: D(&c, 2) }; - let _y = Pin::new(&mut _x); - let _z = _y.project_replace(StructPinned { f1: D(&c, 3), f2: D(&c, 4) }); + let mut x = StructPinned { f1: D(&c, 1), f2: D(&c, 2) }; + let y = Pin::new(&mut x); + let _z = y.project_replace(StructPinned { f1: D(&c, 3), f2: D(&c, 4) }); } } @@ -73,9 +77,9 @@ fn struct_unpinned() { } { let c = Cell::new(0); - let mut _x = StructUnpinned { f1: D(&c, 1), f2: D(&c, 2) }; - let _y = Pin::new(&mut _x); - let _z = _y.project_replace(StructUnpinned { f1: D(&c, 3), f2: D(&c, 4) }); + let mut x = StructUnpinned { f1: D(&c, 1), f2: D(&c, 2) }; + let y = Pin::new(&mut x); + let _z = y.project_replace(StructUnpinned { f1: D(&c, 3), f2: D(&c, 4) }); } } @@ -87,9 +91,9 @@ fn tuple_pinned() { } { let c = Cell::new(0); - let mut _x = TuplePinned(D(&c, 1), D(&c, 2)); - let _y = Pin::new(&mut _x); - let _z = _y.project_replace(TuplePinned(D(&c, 3), D(&c, 4))); + let mut x = TuplePinned(D(&c, 1), D(&c, 2)); + let y = Pin::new(&mut x); + let _z = y.project_replace(TuplePinned(D(&c, 3), D(&c, 4))); } } @@ -101,9 +105,9 @@ fn tuple_unpinned() { } { let c = Cell::new(0); - let mut _x = TupleUnpinned(D(&c, 1), D(&c, 2)); - let _y = Pin::new(&mut _x); - let _z = _y.project_replace(TupleUnpinned(D(&c, 3), D(&c, 4))); + let mut x = TupleUnpinned(D(&c, 1), D(&c, 2)); + let y = Pin::new(&mut x); + let _z = y.project_replace(TupleUnpinned(D(&c, 3), D(&c, 4))); } } @@ -115,9 +119,9 @@ fn enum_struct() { } { let c = Cell::new(0); - let mut _x = Enum::StructPinned { f1: D(&c, 1), f2: D(&c, 2) }; - let _y = Pin::new(&mut _x); - let _z = _y.project_replace(Enum::StructPinned { f1: D(&c, 3), f2: D(&c, 4) }); + let mut x = Enum::StructPinned { f1: D(&c, 1), f2: D(&c, 2) }; + let y = Pin::new(&mut x); + let _z = y.project_replace(Enum::StructPinned { f1: D(&c, 3), f2: D(&c, 4) }); } { @@ -126,9 +130,9 @@ fn enum_struct() { } { let c = Cell::new(0); - let mut _x = Enum::StructUnpinned { f1: D(&c, 1), f2: D(&c, 2) }; - let _y = Pin::new(&mut _x); - let _z = _y.project_replace(Enum::StructUnpinned { f1: D(&c, 3), f2: D(&c, 4) }); + let mut x = Enum::StructUnpinned { f1: D(&c, 1), f2: D(&c, 2) }; + let y = Pin::new(&mut x); + let _z = y.project_replace(Enum::StructUnpinned { f1: D(&c, 3), f2: D(&c, 4) }); } } @@ -140,9 +144,9 @@ fn enum_tuple() { } { let c = Cell::new(0); - let mut _x = Enum::TuplePinned(D(&c, 1), D(&c, 2)); - let _y = Pin::new(&mut _x); - let _z = _y.project_replace(Enum::TuplePinned(D(&c, 3), D(&c, 4))); + let mut x = Enum::TuplePinned(D(&c, 1), D(&c, 2)); + let y = Pin::new(&mut x); + let _z = y.project_replace(Enum::TuplePinned(D(&c, 3), D(&c, 4))); } { @@ -151,8 +155,8 @@ fn enum_tuple() { } { let c = Cell::new(0); - let mut _x = Enum::TupleUnpinned(D(&c, 1), D(&c, 2)); - let _y = Pin::new(&mut _x); - let _z = _y.project_replace(Enum::TupleUnpinned(D(&c, 3), D(&c, 4))); + let mut x = Enum::TupleUnpinned(D(&c, 1), D(&c, 2)); + let y = Pin::new(&mut x); + let _z = y.project_replace(Enum::TupleUnpinned(D(&c, 3), D(&c, 4))); } } diff --git a/third_party/rust/pin-project/tests/expand/default/enum.expanded.rs b/third_party/rust/pin-project/tests/expand/default/enum.expanded.rs new file mode 100644 index 000000000000..a3b03385a808 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/default/enum.expanded.rs @@ -0,0 +1,137 @@ +use pin_project::pin_project; +# [pin (__private (project = EnumProj , project_ref = EnumProjRef))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +enum EnumProj<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + }, + Tuple(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +enum EnumProjRef<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + }, + Tuple(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> EnumProj<'pin, T, U> { + unsafe { + match self.get_unchecked_mut() { + Self::Struct { pinned, unpinned } => EnumProj::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProj::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProj::Unit, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> EnumProjRef<'pin, T, U> { + unsafe { + match self.get_ref() { + Self::Struct { pinned, unpinned } => EnumProjRef::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProjRef::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProjRef::Unit, + } + } + } + } + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/default/enum.rs b/third_party/rust/pin-project/tests/expand/default/enum.rs new file mode 100644 index 000000000000..ff056150adcc --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/default/enum.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project(project = EnumProj, project_ref = EnumProjRef)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/default/struct.expanded.rs b/third_party/rust/pin-project/tests/expand/default/struct.expanded.rs new file mode 100644 index 000000000000..3089a545d576 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/default/struct.expanded.rs @@ -0,0 +1,101 @@ +use pin_project::pin_project; +#[pin(__private())] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/default/struct.rs b/third_party/rust/pin-project/tests/expand/default/struct.rs new file mode 100644 index 000000000000..474f0a11612e --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/default/struct.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/default/tuple_struct.expanded.rs b/third_party/rust/pin-project/tests/expand/default/tuple_struct.expanded.rs new file mode 100644 index 000000000000..cc9b75e67bea --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/default/tuple_struct.expanded.rs @@ -0,0 +1,89 @@ +use pin_project::pin_project; +#[pin(__private())] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/default/tuple_struct.rs b/third_party/rust/pin-project/tests/expand/default/tuple_struct.rs new file mode 100644 index 000000000000..398b14f3a507 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/default/tuple_struct.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project] +struct TupleStruct(#[pin] T, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/multifields/enum.expanded.rs b/third_party/rust/pin-project/tests/expand/multifields/enum.expanded.rs new file mode 100644 index 000000000000..fd31201b9ac0 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/multifields/enum.expanded.rs @@ -0,0 +1,253 @@ +use pin_project::pin_project; +# [pin (__private (project = EnumProj , project_ref = EnumProjRef , project_replace = EnumProjOwn))] +enum Enum { + Struct { + #[pin] + pinned1: T, + #[pin] + pinned2: T, + unpinned1: U, + unpinned2: U, + }, + Tuple(#[pin] T, #[pin] T, U, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +enum EnumProj<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned1: ::pin_project::__private::Pin<&'pin mut (T)>, + pinned2: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned1: &'pin mut (U), + unpinned2: &'pin mut (U), + }, + Tuple( + ::pin_project::__private::Pin<&'pin mut (T)>, + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + &'pin mut (U), + ), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +enum EnumProjRef<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned1: ::pin_project::__private::Pin<&'pin (T)>, + pinned2: ::pin_project::__private::Pin<&'pin (T)>, + unpinned1: &'pin (U), + unpinned2: &'pin (U), + }, + Tuple( + ::pin_project::__private::Pin<&'pin (T)>, + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + &'pin (U), + ), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(variant_size_differences)] +#[allow(clippy::large_enum_variant)] +enum EnumProjOwn { + Struct { + pinned1: ::pin_project::__private::PhantomData, + pinned2: ::pin_project::__private::PhantomData, + unpinned1: U, + unpinned2: U, + }, + Tuple( + ::pin_project::__private::PhantomData, + ::pin_project::__private::PhantomData, + U, + U, + ), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> EnumProj<'pin, T, U> { + unsafe { + match self.get_unchecked_mut() { + Self::Struct { + pinned1, + pinned2, + unpinned1, + unpinned2, + } => EnumProj::Struct { + pinned1: _pin_project::__private::Pin::new_unchecked(pinned1), + pinned2: _pin_project::__private::Pin::new_unchecked(pinned2), + unpinned1, + unpinned2, + }, + Self::Tuple(_0, _1, _2, _3) => EnumProj::Tuple( + _pin_project::__private::Pin::new_unchecked(_0), + _pin_project::__private::Pin::new_unchecked(_1), + _2, + _3, + ), + Self::Unit => EnumProj::Unit, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> EnumProjRef<'pin, T, U> { + unsafe { + match self.get_ref() { + Self::Struct { + pinned1, + pinned2, + unpinned1, + unpinned2, + } => EnumProjRef::Struct { + pinned1: _pin_project::__private::Pin::new_unchecked(pinned1), + pinned2: _pin_project::__private::Pin::new_unchecked(pinned2), + unpinned1, + unpinned2, + }, + Self::Tuple(_0, _1, _2, _3) => EnumProjRef::Tuple( + _pin_project::__private::Pin::new_unchecked(_0), + _pin_project::__private::Pin::new_unchecked(_1), + _2, + _3, + ), + Self::Unit => EnumProjRef::Unit, + } + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> EnumProjOwn { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + match &mut *__self_ptr { + Self::Struct { + pinned1, + pinned2, + unpinned1, + unpinned2, + } => { + let __result = EnumProjOwn::Struct { + pinned1: _pin_project::__private::PhantomData, + pinned2: _pin_project::__private::PhantomData, + unpinned1: _pin_project::__private::ptr::read(unpinned1), + unpinned2: _pin_project::__private::ptr::read(unpinned2), + }; + { + let __guard = + _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned2); + let __guard = + _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned1); + } + __result + } + Self::Tuple(_0, _1, _2, _3) => { + let __result = EnumProjOwn::Tuple( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + _pin_project::__private::ptr::read(_2), + _pin_project::__private::ptr::read(_3), + ); + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_1); + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_0); + } + __result + } + Self::Unit => { + let __result = EnumProjOwn::Unit; + {} + __result + } + } + } + } + } + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + __field2: T, + __field3: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/multifields/enum.rs b/third_party/rust/pin-project/tests/expand/multifields/enum.rs new file mode 100644 index 000000000000..754d48c9f964 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/multifields/enum.rs @@ -0,0 +1,17 @@ +use pin_project::pin_project; + +#[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] +enum Enum { + Struct { + #[pin] + pinned1: T, + #[pin] + pinned2: T, + unpinned1: U, + unpinned2: U, + }, + Tuple(#[pin] T, #[pin] T, U, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/multifields/struct.expanded.rs b/third_party/rust/pin-project/tests/expand/multifields/struct.expanded.rs new file mode 100644 index 000000000000..7ed1f3e734d9 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/multifields/struct.expanded.rs @@ -0,0 +1,159 @@ +use pin_project::pin_project; +#[pin(__private(project_replace))] +struct Struct { + #[pin] + pinned1: T, + #[pin] + pinned2: T, + unpinned1: U, + unpinned2: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned1: ::pin_project::__private::Pin<&'pin mut (T)>, + pinned2: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned1: &'pin mut (U), + unpinned2: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned1: ::pin_project::__private::Pin<&'pin (T)>, + pinned2: ::pin_project::__private::Pin<&'pin (T)>, + unpinned1: &'pin (U), + unpinned2: &'pin (U), + } + #[allow(dead_code)] + struct __StructProjectionOwned { + pinned1: ::pin_project::__private::PhantomData, + pinned2: ::pin_project::__private::PhantomData, + unpinned1: U, + unpinned2: U, + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { + pinned1, + pinned2, + unpinned1, + unpinned2, + } = self.get_unchecked_mut(); + __StructProjection { + pinned1: _pin_project::__private::Pin::new_unchecked(pinned1), + pinned2: _pin_project::__private::Pin::new_unchecked(pinned2), + unpinned1, + unpinned2, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { + pinned1, + pinned2, + unpinned1, + unpinned2, + } = self.get_ref(); + __StructProjectionRef { + pinned1: _pin_project::__private::Pin::new_unchecked(pinned1), + pinned2: _pin_project::__private::Pin::new_unchecked(pinned2), + unpinned1, + unpinned2, + } + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> __StructProjectionOwned { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self { + pinned1, + pinned2, + unpinned1, + unpinned2, + } = &mut *__self_ptr; + let __result = __StructProjectionOwned { + pinned1: _pin_project::__private::PhantomData, + pinned2: _pin_project::__private::PhantomData, + unpinned1: _pin_project::__private::ptr::read(unpinned1), + unpinned2: _pin_project::__private::ptr::read(unpinned2), + }; + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned2); + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned1); + } + __result + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned1; + let _ = &this.pinned2; + let _ = &this.unpinned1; + let _ = &this.unpinned2; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/multifields/struct.rs b/third_party/rust/pin-project/tests/expand/multifields/struct.rs new file mode 100644 index 000000000000..3b319bf81308 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/multifields/struct.rs @@ -0,0 +1,13 @@ +use pin_project::pin_project; + +#[pin_project(project_replace)] +struct Struct { + #[pin] + pinned1: T, + #[pin] + pinned2: T, + unpinned1: U, + unpinned2: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/multifields/tuple_struct.expanded.rs b/third_party/rust/pin-project/tests/expand/multifields/tuple_struct.expanded.rs new file mode 100644 index 000000000000..351c084c2bb3 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/multifields/tuple_struct.expanded.rs @@ -0,0 +1,135 @@ +use pin_project::pin_project; +#[pin(__private(project_replace))] +struct TupleStruct(#[pin] T, #[pin] T, U, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + &'pin (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + struct __TupleStructProjectionOwned( + ::pin_project::__private::PhantomData, + ::pin_project::__private::PhantomData, + U, + U, + ); + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1, _2, _3) = self.get_unchecked_mut(); + __TupleStructProjection( + _pin_project::__private::Pin::new_unchecked(_0), + _pin_project::__private::Pin::new_unchecked(_1), + _2, + _3, + ) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1, _2, _3) = self.get_ref(); + __TupleStructProjectionRef( + _pin_project::__private::Pin::new_unchecked(_0), + _pin_project::__private::Pin::new_unchecked(_1), + _2, + _3, + ) + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> __TupleStructProjectionOwned { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self(_0, _1, _2, _3) = &mut *__self_ptr; + let __result = __TupleStructProjectionOwned( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + _pin_project::__private::ptr::read(_2), + _pin_project::__private::ptr::read(_3), + ); + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_1); + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_0); + } + __result + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + let _ = &this.2; + let _ = &this.3; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/multifields/tuple_struct.rs b/third_party/rust/pin-project/tests/expand/multifields/tuple_struct.rs new file mode 100644 index 000000000000..bc92eee52c96 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/multifields/tuple_struct.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project(project_replace)] +struct TupleStruct(#[pin] T, #[pin] T, U, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-all.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/enum-all.expanded.rs new file mode 100644 index 000000000000..7259a13a069f --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-all.expanded.rs @@ -0,0 +1,193 @@ +use pin_project::pin_project; +# [pin (__private (project = Proj , project_ref = ProjRef , project_replace = ProjOwn))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +enum Proj<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + }, + Tuple(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +enum ProjRef<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + }, + Tuple(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(variant_size_differences)] +#[allow(clippy::large_enum_variant)] +enum ProjOwn { + Struct { + pinned: ::pin_project::__private::PhantomData, + unpinned: U, + }, + Tuple(::pin_project::__private::PhantomData, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project<'pin>(self: _pin_project::__private::Pin<&'pin mut Self>) -> Proj<'pin, T, U> { + unsafe { + match self.get_unchecked_mut() { + Self::Struct { pinned, unpinned } => Proj::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + Proj::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => Proj::Unit, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> ProjRef<'pin, T, U> { + unsafe { + match self.get_ref() { + Self::Struct { pinned, unpinned } => ProjRef::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + ProjRef::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => ProjRef::Unit, + } + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> ProjOwn { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + match &mut *__self_ptr { + Self::Struct { pinned, unpinned } => { + let __result = ProjOwn::Struct { + pinned: _pin_project::__private::PhantomData, + unpinned: _pin_project::__private::ptr::read(unpinned), + }; + { + let __guard = + _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned); + } + __result + } + Self::Tuple(_0, _1) => { + let __result = ProjOwn::Tuple( + _pin_project::__private::PhantomData, + _pin_project::__private::ptr::read(_1), + ); + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_0); + } + __result + } + Self::Unit => { + let __result = ProjOwn::Unit; + {} + __result + } + } + } + } + } + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-all.rs b/third_party/rust/pin-project/tests/expand/naming/enum-all.rs new file mode 100644 index 000000000000..dd513e6c3622 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-all.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project(project = Proj, project_ref = ProjRef, project_replace = ProjOwn)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-mut.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/enum-mut.expanded.rs new file mode 100644 index 000000000000..09271fdcc3ae --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-mut.expanded.rs @@ -0,0 +1,96 @@ +use pin_project::pin_project; +# [pin (__private (project = Proj))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +enum Proj<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + }, + Tuple(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project<'pin>(self: _pin_project::__private::Pin<&'pin mut Self>) -> Proj<'pin, T, U> { + unsafe { + match self.get_unchecked_mut() { + Self::Struct { pinned, unpinned } => Proj::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + Proj::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => Proj::Unit, + } + } + } + } + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-mut.rs b/third_party/rust/pin-project/tests/expand/naming/enum-mut.rs new file mode 100644 index 000000000000..818276f39c25 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-mut.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project(project = Proj)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-none.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/enum-none.expanded.rs new file mode 100644 index 000000000000..54cd1f8ccd6e --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-none.expanded.rs @@ -0,0 +1,59 @@ +use pin_project::pin_project; +#[pin(__private())] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum {} + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-none.rs b/third_party/rust/pin-project/tests/expand/naming/enum-none.rs new file mode 100644 index 000000000000..a87438db3805 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-none.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-own.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/enum-own.expanded.rs new file mode 100644 index 000000000000..19ec570c1b46 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-own.expanded.rs @@ -0,0 +1,118 @@ +use pin_project::pin_project; +# [pin (__private (project_replace = ProjOwn))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(variant_size_differences)] +#[allow(clippy::large_enum_variant)] +enum ProjOwn { + Struct { + pinned: ::pin_project::__private::PhantomData, + unpinned: U, + }, + Tuple(::pin_project::__private::PhantomData, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> ProjOwn { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + match &mut *__self_ptr { + Self::Struct { pinned, unpinned } => { + let __result = ProjOwn::Struct { + pinned: _pin_project::__private::PhantomData, + unpinned: _pin_project::__private::ptr::read(unpinned), + }; + { + let __guard = + _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned); + } + __result + } + Self::Tuple(_0, _1) => { + let __result = ProjOwn::Tuple( + _pin_project::__private::PhantomData, + _pin_project::__private::ptr::read(_1), + ); + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_0); + } + __result + } + Self::Unit => { + let __result = ProjOwn::Unit; + {} + __result + } + } + } + } + } + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-own.rs b/third_party/rust/pin-project/tests/expand/naming/enum-own.rs new file mode 100644 index 000000000000..cf886974db19 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-own.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project(project_replace = ProjOwn)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-ref.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/enum-ref.expanded.rs new file mode 100644 index 000000000000..4565d084db84 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-ref.expanded.rs @@ -0,0 +1,99 @@ +use pin_project::pin_project; +# [pin (__private (project_ref = ProjRef))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +enum ProjRef<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + }, + Tuple(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> ProjRef<'pin, T, U> { + unsafe { + match self.get_ref() { + Self::Struct { pinned, unpinned } => ProjRef::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + ProjRef::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => ProjRef::Unit, + } + } + } + } + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/enum-ref.rs b/third_party/rust/pin-project/tests/expand/naming/enum-ref.rs new file mode 100644 index 000000000000..b1ff805ce5bb --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/enum-ref.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project(project_ref = ProjRef)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-all.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/struct-all.expanded.rs new file mode 100644 index 000000000000..4b46e34930eb --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-all.expanded.rs @@ -0,0 +1,150 @@ +use pin_project::pin_project; +# [pin (__private (project = Proj , project_ref = ProjRef , project_replace = ProjOwn))] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +struct Proj<'pin, T, U> +where + Struct: 'pin, +{ + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +struct ProjRef<'pin, T, U> +where + Struct: 'pin, +{ + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +struct ProjOwn { + pinned: ::pin_project::__private::PhantomData, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Struct { + fn project<'pin>(self: _pin_project::__private::Pin<&'pin mut Self>) -> Proj<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + Proj { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> ProjRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + ProjRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> ProjOwn { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self { pinned, unpinned } = &mut *__self_ptr; + let __result = ProjOwn { + pinned: _pin_project::__private::PhantomData, + unpinned: _pin_project::__private::ptr::read(unpinned), + }; + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned); + } + __result + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-all.rs b/third_party/rust/pin-project/tests/expand/naming/struct-all.rs new file mode 100644 index 000000000000..c229ba4e0ed4 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-all.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project(project = Proj, project_ref = ProjRef, project_replace = ProjOwn)] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-mut.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/struct-mut.expanded.rs new file mode 100644 index 000000000000..5803d6b26041 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-mut.expanded.rs @@ -0,0 +1,108 @@ +use pin_project::pin_project; +# [pin (__private (project = Proj))] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +struct Proj<'pin, T, U> +where + Struct: 'pin, +{ + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { + fn project<'pin>(self: _pin_project::__private::Pin<&'pin mut Self>) -> Proj<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + Proj { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-mut.rs b/third_party/rust/pin-project/tests/expand/naming/struct-mut.rs new file mode 100644 index 000000000000..2f554d324dfb --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-mut.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project(project = Proj)] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-none.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/struct-none.expanded.rs new file mode 100644 index 000000000000..3089a545d576 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-none.expanded.rs @@ -0,0 +1,101 @@ +use pin_project::pin_project; +#[pin(__private())] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-none.rs b/third_party/rust/pin-project/tests/expand/naming/struct-none.rs new file mode 100644 index 000000000000..474f0a11612e --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-none.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-own.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/struct-own.expanded.rs new file mode 100644 index 000000000000..d05c7d6e4c93 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-own.expanded.rs @@ -0,0 +1,134 @@ +use pin_project::pin_project; +# [pin (__private (project_replace = ProjOwn))] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +struct ProjOwn { + pinned: ::pin_project::__private::PhantomData, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> ProjOwn { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self { pinned, unpinned } = &mut *__self_ptr; + let __result = ProjOwn { + pinned: _pin_project::__private::PhantomData, + unpinned: _pin_project::__private::ptr::read(unpinned), + }; + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned); + } + __result + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-own.rs b/third_party/rust/pin-project/tests/expand/naming/struct-own.rs new file mode 100644 index 000000000000..4924362ba070 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-own.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project(project_replace = ProjOwn)] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-ref.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/struct-ref.expanded.rs new file mode 100644 index 000000000000..c131ec49fc29 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-ref.expanded.rs @@ -0,0 +1,110 @@ +use pin_project::pin_project; +# [pin (__private (project_ref = ProjRef))] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +struct ProjRef<'pin, T, U> +where + Struct: 'pin, +{ + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> ProjRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + ProjRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/struct-ref.rs b/third_party/rust/pin-project/tests/expand/naming/struct-ref.rs new file mode 100644 index 000000000000..4e29a162d490 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/struct-ref.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project(project_ref = ProjRef)] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-all.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-all.expanded.rs new file mode 100644 index 000000000000..042a798c6c03 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-all.expanded.rs @@ -0,0 +1,129 @@ +use pin_project::pin_project; +# [pin (__private (project = Proj , project_ref = ProjRef , project_replace = ProjOwn))] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +struct Proj<'pin, T, U>(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)) +where + TupleStruct: 'pin; +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +struct ProjRef<'pin, T, U>(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)) +where + TupleStruct: 'pin; +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +struct ProjOwn(::pin_project::__private::PhantomData, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl TupleStruct { + fn project<'pin>(self: _pin_project::__private::Pin<&'pin mut Self>) -> Proj<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + Proj(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> ProjRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + ProjRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> ProjOwn { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self(_0, _1) = &mut *__self_ptr; + let __result = ProjOwn( + _pin_project::__private::PhantomData, + _pin_project::__private::ptr::read(_1), + ); + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_0); + } + __result + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-all.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-all.rs new file mode 100644 index 000000000000..0d95cb0080cd --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-all.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project(project = Proj, project_ref = ProjRef, project_replace = ProjOwn)] +struct TupleStruct(#[pin] T, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-mut.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-mut.expanded.rs new file mode 100644 index 000000000000..60218d6c65f1 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-mut.expanded.rs @@ -0,0 +1,93 @@ +use pin_project::pin_project; +# [pin (__private (project = Proj))] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +struct Proj<'pin, T, U>(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)) +where + TupleStruct: 'pin; +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + fn project<'pin>(self: _pin_project::__private::Pin<&'pin mut Self>) -> Proj<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + Proj(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-mut.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-mut.rs new file mode 100644 index 000000000000..e9779a6cade1 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-mut.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project(project = Proj)] +struct TupleStruct(#[pin] T, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-none.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-none.expanded.rs new file mode 100644 index 000000000000..cc9b75e67bea --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-none.expanded.rs @@ -0,0 +1,89 @@ +use pin_project::pin_project; +#[pin(__private())] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-none.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-none.rs new file mode 100644 index 000000000000..398b14f3a507 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-none.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project] +struct TupleStruct(#[pin] T, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-own.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-own.expanded.rs new file mode 100644 index 000000000000..21e12de64a58 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-own.expanded.rs @@ -0,0 +1,119 @@ +use pin_project::pin_project; +# [pin (__private (project_replace = ProjOwn))] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +struct ProjOwn(::pin_project::__private::PhantomData, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> ProjOwn { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self(_0, _1) = &mut *__self_ptr; + let __result = ProjOwn( + _pin_project::__private::PhantomData, + _pin_project::__private::ptr::read(_1), + ); + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_0); + } + __result + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-own.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-own.rs new file mode 100644 index 000000000000..a15ad4094a4a --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-own.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project(project_replace = ProjOwn)] +struct TupleStruct(#[pin] T, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-ref.expanded.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-ref.expanded.rs new file mode 100644 index 000000000000..ea11b6a2f494 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-ref.expanded.rs @@ -0,0 +1,95 @@ +use pin_project::pin_project; +# [pin (__private (project_ref = ProjRef))] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +struct ProjRef<'pin, T, U>(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)) +where + TupleStruct: 'pin; +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> ProjRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + ProjRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/naming/tuple_struct-ref.rs b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-ref.rs new file mode 100644 index 000000000000..cc61edfb7941 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/naming/tuple_struct-ref.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project(project_ref = ProjRef)] +struct TupleStruct(#[pin] T, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/not_unpin/enum.expanded.rs b/third_party/rust/pin-project/tests/expand/not_unpin/enum.expanded.rs new file mode 100644 index 000000000000..5173b545293e --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/not_unpin/enum.expanded.rs @@ -0,0 +1,127 @@ +use pin_project::pin_project; +# [pin (__private (! Unpin , project = EnumProj , project_ref = EnumProjRef))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +enum EnumProj<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + }, + Tuple(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +enum EnumProjRef<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + }, + Tuple(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> EnumProj<'pin, T, U> { + unsafe { + match self.get_unchecked_mut() { + Self::Struct { pinned, unpinned } => EnumProj::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProj::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProj::Unit, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> EnumProjRef<'pin, T, U> { + unsafe { + match self.get_ref() { + Self::Struct { pinned, unpinned } => EnumProjRef::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProjRef::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProjRef::Unit, + } + } + } + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + _pin_project::__private::Wrapper<'pin, _pin_project::__private::PhantomPinned>: + _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + _pin_project::__private::Wrapper<'pin, _pin_project::__private::PhantomPinned>: + _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/not_unpin/enum.rs b/third_party/rust/pin-project/tests/expand/not_unpin/enum.rs new file mode 100644 index 000000000000..ac0b3b80c036 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/not_unpin/enum.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project(!Unpin, project = EnumProj, project_ref = EnumProjRef)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/not_unpin/struct.expanded.rs b/third_party/rust/pin-project/tests/expand/not_unpin/struct.expanded.rs new file mode 100644 index 000000000000..e9d892254e8f --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/not_unpin/struct.expanded.rs @@ -0,0 +1,92 @@ +use pin_project::pin_project; +# [pin (__private (! Unpin))] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + _pin_project::__private::Wrapper<'pin, _pin_project::__private::PhantomPinned>: + _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + _pin_project::__private::Wrapper<'pin, _pin_project::__private::PhantomPinned>: + _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/not_unpin/struct.rs b/third_party/rust/pin-project/tests/expand/not_unpin/struct.rs new file mode 100644 index 000000000000..233e6d41f8ba --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/not_unpin/struct.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project(!Unpin)] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/not_unpin/tuple_struct.expanded.rs b/third_party/rust/pin-project/tests/expand/not_unpin/tuple_struct.expanded.rs new file mode 100644 index 000000000000..cefb61db9b06 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/not_unpin/tuple_struct.expanded.rs @@ -0,0 +1,80 @@ +use pin_project::pin_project; +# [pin (__private (! Unpin))] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + _pin_project::__private::Wrapper<'pin, _pin_project::__private::PhantomPinned>: + _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + _pin_project::__private::Wrapper<'pin, _pin_project::__private::PhantomPinned>: + _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/not_unpin/tuple_struct.rs b/third_party/rust/pin-project/tests/expand/not_unpin/tuple_struct.rs new file mode 100644 index 000000000000..c8065db6ade1 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/not_unpin/tuple_struct.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project(!Unpin)] +struct TupleStruct(#[pin] T, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pinned_drop/enum.expanded.rs b/third_party/rust/pin-project/tests/expand/pinned_drop/enum.expanded.rs new file mode 100644 index 000000000000..e1bc486bb71d --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pinned_drop/enum.expanded.rs @@ -0,0 +1,149 @@ +use std::pin::Pin; +use pin_project::{pin_project, pinned_drop}; +# [pin (__private (PinnedDrop , project = EnumProj , project_ref = EnumProjRef))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +enum EnumProj<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + }, + Tuple(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +enum EnumProjRef<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + }, + Tuple(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> EnumProj<'pin, T, U> { + unsafe { + match self.get_unchecked_mut() { + Self::Struct { pinned, unpinned } => EnumProj::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProj::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProj::Unit, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> EnumProjRef<'pin, T, U> { + unsafe { + match self.get_ref() { + Self::Struct { pinned, unpinned } => EnumProjRef::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProjRef::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProjRef::Unit, + } + } + } + } + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + impl _pin_project::__private::Drop for Enum { + fn drop(&mut self) { + unsafe { + let __pinned_self = _pin_project::__private::Pin::new_unchecked(self); + _pin_project::__private::PinnedDrop::drop(__pinned_self); + } + } + } +}; +#[doc(hidden)] +impl ::pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: Pin<&mut Self>) { + #[allow(clippy::needless_pass_by_value)] + fn __drop_inner(__self: Pin<&mut Enum>) { + fn __drop_inner() {} + let _ = __self; + } + __drop_inner(self); + } +} +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pinned_drop/enum.rs b/third_party/rust/pin-project/tests/expand/pinned_drop/enum.rs new file mode 100644 index 000000000000..c162ef6b2b8f --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pinned_drop/enum.rs @@ -0,0 +1,23 @@ +use std::pin::Pin; + +use pin_project::{pin_project, pinned_drop}; + +#[pin_project(PinnedDrop, project = EnumProj, project_ref = EnumProjRef)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +#[pinned_drop] +impl PinnedDrop for Enum { + fn drop(self: Pin<&mut Self>) { + let _ = self; + } +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pinned_drop/struct.expanded.rs b/third_party/rust/pin-project/tests/expand/pinned_drop/struct.expanded.rs new file mode 100644 index 000000000000..22274429dd7b --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pinned_drop/struct.expanded.rs @@ -0,0 +1,113 @@ +use std::pin::Pin; +use pin_project::{pin_project, pinned_drop}; +#[pin(__private(PinnedDrop))] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + impl _pin_project::__private::Drop for Struct { + fn drop(&mut self) { + unsafe { + let __pinned_self = _pin_project::__private::Pin::new_unchecked(self); + _pin_project::__private::PinnedDrop::drop(__pinned_self); + } + } + } +}; +#[doc(hidden)] +impl ::pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: Pin<&mut Self>) { + #[allow(clippy::needless_pass_by_value)] + fn __drop_inner(__self: Pin<&mut Struct>) { + fn __drop_inner() {} + let _ = __self; + } + __drop_inner(self); + } +} +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pinned_drop/struct.rs b/third_party/rust/pin-project/tests/expand/pinned_drop/struct.rs new file mode 100644 index 000000000000..691d3cb4a0fb --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pinned_drop/struct.rs @@ -0,0 +1,19 @@ +use std::pin::Pin; + +use pin_project::{pin_project, pinned_drop}; + +#[pin_project(PinnedDrop)] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +#[pinned_drop] +impl PinnedDrop for Struct { + fn drop(self: Pin<&mut Self>) { + let _ = self; + } +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pinned_drop/tuple_struct.expanded.rs b/third_party/rust/pin-project/tests/expand/pinned_drop/tuple_struct.expanded.rs new file mode 100644 index 000000000000..8c7433e78e98 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pinned_drop/tuple_struct.expanded.rs @@ -0,0 +1,101 @@ +use std::pin::Pin; +use pin_project::{pin_project, pinned_drop}; +#[pin(__private(PinnedDrop))] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + impl _pin_project::__private::Drop for TupleStruct { + fn drop(&mut self) { + unsafe { + let __pinned_self = _pin_project::__private::Pin::new_unchecked(self); + _pin_project::__private::PinnedDrop::drop(__pinned_self); + } + } + } +}; +#[doc(hidden)] +impl ::pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: Pin<&mut Self>) { + #[allow(clippy::needless_pass_by_value)] + fn __drop_inner(__self: Pin<&mut TupleStruct>) { + fn __drop_inner() {} + let _ = __self; + } + __drop_inner(self); + } +} +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pinned_drop/tuple_struct.rs b/third_party/rust/pin-project/tests/expand/pinned_drop/tuple_struct.rs new file mode 100644 index 000000000000..1f4917c22115 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pinned_drop/tuple_struct.rs @@ -0,0 +1,15 @@ +use std::pin::Pin; + +use pin_project::{pin_project, pinned_drop}; + +#[pin_project(PinnedDrop)] +struct TupleStruct(#[pin] T, U); + +#[pinned_drop] +impl PinnedDrop for TupleStruct { + fn drop(self: Pin<&mut Self>) { + let _ = self; + } +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/project_replace/enum.expanded.rs b/third_party/rust/pin-project/tests/expand/project_replace/enum.expanded.rs new file mode 100644 index 000000000000..d2b2094a766c --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/project_replace/enum.expanded.rs @@ -0,0 +1,118 @@ +use pin_project::pin_project; +# [pin (__private (project_replace = EnumProjOwn))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(variant_size_differences)] +#[allow(clippy::large_enum_variant)] +enum EnumProjOwn { + Struct { + pinned: ::pin_project::__private::PhantomData, + unpinned: U, + }, + Tuple(::pin_project::__private::PhantomData, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> EnumProjOwn { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + match &mut *__self_ptr { + Self::Struct { pinned, unpinned } => { + let __result = EnumProjOwn::Struct { + pinned: _pin_project::__private::PhantomData, + unpinned: _pin_project::__private::ptr::read(unpinned), + }; + { + let __guard = + _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned); + } + __result + } + Self::Tuple(_0, _1) => { + let __result = EnumProjOwn::Tuple( + _pin_project::__private::PhantomData, + _pin_project::__private::ptr::read(_1), + ); + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_0); + } + __result + } + Self::Unit => { + let __result = EnumProjOwn::Unit; + {} + __result + } + } + } + } + } + #[allow(missing_debug_implementations)] + struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/project_replace/enum.rs b/third_party/rust/pin-project/tests/expand/project_replace/enum.rs new file mode 100644 index 000000000000..d737f649bf85 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/project_replace/enum.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project(project_replace = EnumProjOwn)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/project_replace/struct.expanded.rs b/third_party/rust/pin-project/tests/expand/project_replace/struct.expanded.rs new file mode 100644 index 000000000000..aa5fd54f6782 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/project_replace/struct.expanded.rs @@ -0,0 +1,125 @@ +use pin_project::pin_project; +#[pin(__private(project_replace))] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + #[allow(dead_code)] + struct __StructProjectionOwned { + pinned: ::pin_project::__private::PhantomData, + unpinned: U, + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> __StructProjectionOwned { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self { pinned, unpinned } = &mut *__self_ptr; + let __result = __StructProjectionOwned { + pinned: _pin_project::__private::PhantomData, + unpinned: _pin_project::__private::ptr::read(unpinned), + }; + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(pinned); + } + __result + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/project_replace/struct.rs b/third_party/rust/pin-project/tests/expand/project_replace/struct.rs new file mode 100644 index 000000000000..5865526a58fb --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/project_replace/struct.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project(project_replace)] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/project_replace/tuple_struct.expanded.rs b/third_party/rust/pin-project/tests/expand/project_replace/tuple_struct.expanded.rs new file mode 100644 index 000000000000..529f0b24c055 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/project_replace/tuple_struct.expanded.rs @@ -0,0 +1,110 @@ +use pin_project::pin_project; +#[pin(__private(project_replace))] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + struct __TupleStructProjectionOwned(::pin_project::__private::PhantomData, U); + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + fn project_replace( + self: _pin_project::__private::Pin<&mut Self>, + __replacement: Self, + ) -> __TupleStructProjectionOwned { + unsafe { + let __self_ptr: *mut Self = self.get_unchecked_mut(); + let __guard = + _pin_project::__private::UnsafeOverwriteGuard::new(__self_ptr, __replacement); + let Self(_0, _1) = &mut *__self_ptr; + let __result = __TupleStructProjectionOwned( + _pin_project::__private::PhantomData, + _pin_project::__private::ptr::read(_1), + ); + { + let __guard = _pin_project::__private::UnsafeDropInPlaceGuard::new(_0); + } + __result + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/project_replace/tuple_struct.rs b/third_party/rust/pin-project/tests/expand/project_replace/tuple_struct.rs new file mode 100644 index 000000000000..c4d05f5faaaf --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/project_replace/tuple_struct.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project(project_replace)] +struct TupleStruct(#[pin] T, U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pub/enum.expanded.rs b/third_party/rust/pin-project/tests/expand/pub/enum.expanded.rs new file mode 100644 index 000000000000..530eca949d6a --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pub/enum.expanded.rs @@ -0,0 +1,137 @@ +use pin_project::pin_project; +# [pin (__private (project = EnumProj , project_ref = EnumProjRef))] +pub enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +pub(crate) enum EnumProj<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + }, + Tuple(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +pub(crate) enum EnumProjRef<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + }, + Tuple(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + pub(crate) fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> EnumProj<'pin, T, U> { + unsafe { + match self.get_unchecked_mut() { + Self::Struct { pinned, unpinned } => EnumProj::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProj::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProj::Unit, + } + } + } + #[allow(clippy::missing_const_for_fn)] + pub(crate) fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> EnumProjRef<'pin, T, U> { + unsafe { + match self.get_ref() { + Self::Struct { pinned, unpinned } => EnumProjRef::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProjRef::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProjRef::Unit, + } + } + } + } + #[allow(missing_debug_implementations)] + pub struct __Enum<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + __field1: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Enum where + __Enum<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pub/enum.rs b/third_party/rust/pin-project/tests/expand/pub/enum.rs new file mode 100644 index 000000000000..7c8577fd1b06 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pub/enum.rs @@ -0,0 +1,14 @@ +use pin_project::pin_project; + +#[pin_project(project = EnumProj, project_ref = EnumProjRef)] +pub enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pub/struct.expanded.rs b/third_party/rust/pin-project/tests/expand/pub/struct.expanded.rs new file mode 100644 index 000000000000..71ae3a3b93b5 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pub/struct.expanded.rs @@ -0,0 +1,101 @@ +use pin_project::pin_project; +#[pin(__private())] +pub struct Struct { + #[pin] + pub pinned: T, + pub unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + pub(crate) struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pub pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + pub unpinned: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + pub(crate) struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pub pinned: ::pin_project::__private::Pin<&'pin (T)>, + pub unpinned: &'pin (U), + } + impl Struct { + pub(crate) fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + pub(crate) fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + #[allow(missing_debug_implementations)] + pub struct __Struct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for Struct where + __Struct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pub/struct.rs b/third_party/rust/pin-project/tests/expand/pub/struct.rs new file mode 100644 index 000000000000..f50d1e927293 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pub/struct.rs @@ -0,0 +1,10 @@ +use pin_project::pin_project; + +#[pin_project] +pub struct Struct { + #[pin] + pub pinned: T, + pub unpinned: U, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pub/tuple_struct.expanded.rs b/third_party/rust/pin-project/tests/expand/pub/tuple_struct.expanded.rs new file mode 100644 index 000000000000..02c3f24db752 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pub/tuple_struct.expanded.rs @@ -0,0 +1,89 @@ +use pin_project::pin_project; +#[pin(__private())] +pub struct TupleStruct(#[pin] pub T, pub U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + pub(crate) struct __TupleStructProjection<'pin, T, U>( + pub ::pin_project::__private::Pin<&'pin mut (T)>, + pub &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + pub(crate) struct __TupleStructProjectionRef<'pin, T, U>( + pub ::pin_project::__private::Pin<&'pin (T)>, + pub &'pin (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + pub(crate) fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + pub(crate) fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + #[allow(missing_debug_implementations)] + pub struct __TupleStruct<'pin, T, U> { + __pin_project_use_generics: _pin_project::__private::AlwaysUnpin< + 'pin, + ( + _pin_project::__private::PhantomData, + _pin_project::__private::PhantomData, + ), + >, + __field0: T, + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + #[doc(hidden)] + unsafe impl<'pin, T, U> _pin_project::UnsafeUnpin for TupleStruct where + __TupleStruct<'pin, T, U>: _pin_project::__private::Unpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/pub/tuple_struct.rs b/third_party/rust/pin-project/tests/expand/pub/tuple_struct.rs new file mode 100644 index 000000000000..5756aafcc83f --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/pub/tuple_struct.rs @@ -0,0 +1,6 @@ +use pin_project::pin_project; + +#[pin_project] +pub struct TupleStruct(#[pin] pub T, pub U); + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/unsafe_unpin/enum.expanded.rs b/third_party/rust/pin-project/tests/expand/unsafe_unpin/enum.expanded.rs new file mode 100644 index 000000000000..3d53a1e8556f --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/unsafe_unpin/enum.expanded.rs @@ -0,0 +1,121 @@ +use pin_project::{pin_project, UnsafeUnpin}; +# [pin (__private (UnsafeUnpin , project = EnumProj , project_ref = EnumProjRef))] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::mut_mut)] +enum EnumProj<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + }, + Tuple(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(dead_code)] +#[allow(clippy::ref_option_ref)] +enum EnumProjRef<'pin, T, U> +where + Enum: 'pin, +{ + Struct { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + }, + Tuple(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)), + Unit, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + impl Enum { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> EnumProj<'pin, T, U> { + unsafe { + match self.get_unchecked_mut() { + Self::Struct { pinned, unpinned } => EnumProj::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProj::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProj::Unit, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> EnumProjRef<'pin, T, U> { + unsafe { + match self.get_ref() { + Self::Struct { pinned, unpinned } => EnumProjRef::Struct { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + }, + Self::Tuple(_0, _1) => { + EnumProjRef::Tuple(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + Self::Unit => EnumProjRef::Unit, + } + } + } + } + impl<'pin, T, U> _pin_project::__private::Unpin for Enum where + _pin_project::__private::Wrapper<'pin, Self>: _pin_project::UnsafeUnpin + { + } + trait EnumMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl EnumMustNotImplDrop for T {} + impl EnumMustNotImplDrop for Enum {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Enum { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +unsafe impl UnsafeUnpin for Enum {} +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/unsafe_unpin/enum.rs b/third_party/rust/pin-project/tests/expand/unsafe_unpin/enum.rs new file mode 100644 index 000000000000..d368d7132b3d --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/unsafe_unpin/enum.rs @@ -0,0 +1,16 @@ +use pin_project::{pin_project, UnsafeUnpin}; + +#[pin_project(UnsafeUnpin, project = EnumProj, project_ref = EnumProjRef)] +enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, +} + +unsafe impl UnsafeUnpin for Enum {} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/unsafe_unpin/struct.expanded.rs b/third_party/rust/pin-project/tests/expand/unsafe_unpin/struct.expanded.rs new file mode 100644 index 000000000000..a866ab3c4036 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/unsafe_unpin/struct.expanded.rs @@ -0,0 +1,86 @@ +use pin_project::{pin_project, UnsafeUnpin}; +#[pin(__private(UnsafeUnpin))] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __StructProjection<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin mut (T)>, + unpinned: &'pin mut (U), + } + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __StructProjectionRef<'pin, T, U> + where + Struct: 'pin, + { + pinned: ::pin_project::__private::Pin<&'pin (T)>, + unpinned: &'pin (U), + } + impl Struct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __StructProjection<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_unchecked_mut(); + __StructProjection { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __StructProjectionRef<'pin, T, U> { + unsafe { + let Self { pinned, unpinned } = self.get_ref(); + __StructProjectionRef { + pinned: _pin_project::__private::Pin::new_unchecked(pinned), + unpinned, + } + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &Struct) { + let _ = &this.pinned; + let _ = &this.unpinned; + } + impl<'pin, T, U> _pin_project::__private::Unpin for Struct where + _pin_project::__private::Wrapper<'pin, Self>: _pin_project::UnsafeUnpin + { + } + trait StructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl StructMustNotImplDrop for T {} + impl StructMustNotImplDrop for Struct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for Struct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +unsafe impl UnsafeUnpin for Struct {} +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/unsafe_unpin/struct.rs b/third_party/rust/pin-project/tests/expand/unsafe_unpin/struct.rs new file mode 100644 index 000000000000..b0851b1ad689 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/unsafe_unpin/struct.rs @@ -0,0 +1,12 @@ +use pin_project::{pin_project, UnsafeUnpin}; + +#[pin_project(UnsafeUnpin)] +struct Struct { + #[pin] + pinned: T, + unpinned: U, +} + +unsafe impl UnsafeUnpin for Struct {} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/unsafe_unpin/tuple_struct.expanded.rs b/third_party/rust/pin-project/tests/expand/unsafe_unpin/tuple_struct.expanded.rs new file mode 100644 index 000000000000..1a419768c114 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/unsafe_unpin/tuple_struct.expanded.rs @@ -0,0 +1,74 @@ +use pin_project::{pin_project, UnsafeUnpin}; +#[pin(__private(UnsafeUnpin))] +struct TupleStruct(#[pin] T, U); +#[allow(box_pointers)] +#[allow(deprecated)] +#[allow(explicit_outlives_requirements)] +#[allow(single_use_lifetimes)] +#[allow(unreachable_pub)] +#[allow(clippy::unknown_clippy_lints)] +#[allow(clippy::pattern_type_mismatch)] +#[allow(clippy::redundant_pub_crate)] +#[allow(clippy::type_repetition_in_bounds)] +#[allow(unused_qualifications)] +#[allow(clippy::semicolon_if_nothing_returned)] +#[allow(clippy::use_self)] +#[allow(clippy::used_underscore_binding)] +const _: () = { + #[allow(unused_extern_crates)] + extern crate pin_project as _pin_project; + #[allow(dead_code)] + #[allow(clippy::mut_mut)] + struct __TupleStructProjection<'pin, T, U>( + ::pin_project::__private::Pin<&'pin mut (T)>, + &'pin mut (U), + ) + where + TupleStruct: 'pin; + #[allow(dead_code)] + #[allow(clippy::ref_option_ref)] + struct __TupleStructProjectionRef<'pin, T, U>( + ::pin_project::__private::Pin<&'pin (T)>, + &'pin (U), + ) + where + TupleStruct: 'pin; + impl TupleStruct { + fn project<'pin>( + self: _pin_project::__private::Pin<&'pin mut Self>, + ) -> __TupleStructProjection<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_unchecked_mut(); + __TupleStructProjection(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + #[allow(clippy::missing_const_for_fn)] + fn project_ref<'pin>( + self: _pin_project::__private::Pin<&'pin Self>, + ) -> __TupleStructProjectionRef<'pin, T, U> { + unsafe { + let Self(_0, _1) = self.get_ref(); + __TupleStructProjectionRef(_pin_project::__private::Pin::new_unchecked(_0), _1) + } + } + } + #[forbid(unaligned_references, safe_packed_borrows)] + fn __assert_not_repr_packed(this: &TupleStruct) { + let _ = &this.0; + let _ = &this.1; + } + impl<'pin, T, U> _pin_project::__private::Unpin for TupleStruct where + _pin_project::__private::Wrapper<'pin, Self>: _pin_project::UnsafeUnpin + { + } + trait TupleStructMustNotImplDrop {} + #[allow(clippy::drop_bounds, drop_bounds)] + impl TupleStructMustNotImplDrop for T {} + impl TupleStructMustNotImplDrop for TupleStruct {} + #[doc(hidden)] + impl _pin_project::__private::PinnedDrop for TupleStruct { + unsafe fn drop(self: _pin_project::__private::Pin<&mut Self>) {} + } +}; +unsafe impl UnsafeUnpin for Struct {} +fn main() {} diff --git a/third_party/rust/pin-project/tests/expand/unsafe_unpin/tuple_struct.rs b/third_party/rust/pin-project/tests/expand/unsafe_unpin/tuple_struct.rs new file mode 100644 index 000000000000..964617a1c930 --- /dev/null +++ b/third_party/rust/pin-project/tests/expand/unsafe_unpin/tuple_struct.rs @@ -0,0 +1,8 @@ +use pin_project::{pin_project, UnsafeUnpin}; + +#[pin_project(UnsafeUnpin)] +struct TupleStruct(#[pin] T, U); + +unsafe impl UnsafeUnpin for Struct {} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/expandtest.rs b/third_party/rust/pin-project/tests/expandtest.rs new file mode 100644 index 000000000000..3f0d5c11214a --- /dev/null +++ b/third_party/rust/pin-project/tests/expandtest.rs @@ -0,0 +1,43 @@ +#![cfg(not(miri))] +#![warn(rust_2018_idioms, single_use_lifetimes)] + +use std::{ + env, + process::{Command, ExitStatus, Stdio}, +}; + +const PATH: &str = "tests/expand/**/*.rs"; + +#[rustversion::attr(not(nightly), ignore)] +#[test] +fn expandtest() { + let is_ci = env::var_os("CI").is_some(); + let cargo = &*env::var("CARGO").unwrap_or_else(|_| "cargo".into()); + if !has_command(&[cargo, "expand"]) || !has_command(&[cargo, "fmt"]) { + if is_ci { + panic!("expandtest requires rustfmt and cargo-expand"); + } + return; + } + + let args = &["--all-features"]; + if is_ci { + macrotest::expand_without_refresh_args(PATH, args); + } else { + env::set_var("MACROTEST", "overwrite"); + macrotest::expand_args(PATH, args); + } +} + +fn has_command(command: &[&str]) -> bool { + Command::new(command[0]) + .args(&command[1..]) + .arg("--version") + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status() + .as_ref() + .map(ExitStatus::success) + .unwrap_or(false) +} diff --git a/third_party/rust/pin-project/tests/include/basic-safe-part.rs b/third_party/rust/pin-project/tests/include/basic-safe-part.rs index fefc92458c6d..0b7c43e87372 100644 --- a/third_party/rust/pin-project/tests/include/basic-safe-part.rs +++ b/third_party/rust/pin-project/tests/include/basic-safe-part.rs @@ -8,11 +8,32 @@ pub struct DefaultStruct { pub unpinned: U, } +#[::pin_project::pin_project( + project = DefaultStructNamedProj, + project_ref = DefaultStructNamedProjRef, +)] +#[derive(Debug)] +pub struct DefaultStructNamed { + #[pin] + pub pinned: T, + pub unpinned: U, +} + #[::pin_project::pin_project] #[derive(Debug)] pub struct DefaultTupleStruct(#[pin] pub T, pub U); -#[::pin_project::pin_project] +#[::pin_project::pin_project( + project = DefaultTupleStructNamedProj, + project_ref = DefaultTupleStructNamedProjRef, +)] +#[derive(Debug)] +pub struct DefaultTupleStructNamed(#[pin] pub T, pub U); + +#[::pin_project::pin_project( + project = DefaultEnumProj, + project_ref = DefaultEnumProjRef, +)] #[derive(Debug)] pub enum DefaultEnum { Struct { @@ -46,7 +67,11 @@ impl PinnedDrop for PinnedDropTupleStruct { fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} } -#[::pin_project::pin_project(PinnedDrop)] +#[::pin_project::pin_project( + PinnedDrop, + project = PinnedDropEnumProj, + project_ref = PinnedDropEnumProjRef, +)] #[derive(Debug)] pub enum PinnedDropEnum { Struct { @@ -71,11 +96,35 @@ pub struct ReplaceStruct { pub unpinned: U, } +#[::pin_project::pin_project( + project = ReplaceStructNamedProj, + project_ref = ReplaceStructNamedProjRef, + project_replace = ReplaceStructNamedProjOwn, +)] +#[derive(Debug)] +pub struct ReplaceStructNamed { + #[pin] + pub pinned: T, + pub unpinned: U, +} + #[::pin_project::pin_project(project_replace)] #[derive(Debug)] pub struct ReplaceTupleStruct(#[pin] pub T, pub U); -#[::pin_project::pin_project(project_replace)] +#[::pin_project::pin_project( + project = ReplaceTupleStructNamedProj, + project_ref = ReplaceTupleStructNamedProjRef, + project_replace = ReplaceTupleStructNamedProjOwn, +)] +#[derive(Debug)] +pub struct ReplaceTupleStructNamed(#[pin] pub T, pub U); + +#[::pin_project::pin_project( + project = ReplaceEnumProj, + project_ref = ReplaceEnumProjRef, + project_replace = ReplaceEnumProjOwn, +)] #[derive(Debug)] pub enum ReplaceEnum { Struct { @@ -99,7 +148,11 @@ pub struct UnsafeUnpinStruct { #[derive(Debug)] pub struct UnsafeUnpinTupleStruct(#[pin] pub T, pub U); -#[::pin_project::pin_project(UnsafeUnpin)] +#[::pin_project::pin_project( + UnsafeUnpin, + project = UnsafeUnpinEnumProj, + project_ref = UnsafeUnpinEnumProjRef, +)] #[derive(Debug)] pub enum UnsafeUnpinEnum { Struct { @@ -123,7 +176,11 @@ pub struct NotUnpinStruct { #[derive(Debug)] pub struct NotUnpinTupleStruct(#[pin] pub T, pub U); -#[::pin_project::pin_project(!Unpin)] +#[::pin_project::pin_project( + !Unpin, + project = NotUnpinEnumProj, + project_ref = NotUnpinEnumProjRef, +)] #[derive(Debug)] pub enum NotUnpinEnum { Struct { diff --git a/third_party/rust/pin-project/tests/lint.rs b/third_party/rust/pin-project/tests/lint.rs index 1d874979a2de..6a75461d65c5 100644 --- a/third_party/rust/pin-project/tests/lint.rs +++ b/third_party/rust/pin-project/tests/lint.rs @@ -1,70 +1,698 @@ +// Check interoperability with rustc and clippy lints. + +// for old compilers +#![allow(unknown_lints)] #![warn(nonstandard_style, rust_2018_idioms, unused)] // Note: This does not guarantee compatibility with forbidding these lints in the future. // If rustc adds a new lint, we may not be able to keep this. -#![forbid(future_incompatible, rust_2018_compatibility)] -#![allow(unknown_lints)] // for old compilers +#![forbid(future_incompatible, rust_2018_compatibility, rust_2021_compatibility)] +// lints forbidden as a part of future_incompatible, rust_2018_compatibility, and rust_2021_compatibility are not included in the list below. +// elided_lifetimes_in_paths, explicit_outlives_requirements, unused_extern_crates: as a part of rust_2018_idioms +// unsafe_block_in_unsafe_fn: requires Rust 1.52. and, we don't generate unsafe fn. +// non_exhaustive_omitted_patterns: unstable +// unstable_features: no way to generate #![feature(..)] by macros, expect for unstable inner attribute. and this lint is deprecated: https://doc.rust-lang.org/rustc/lints/listing/allowed-by-default.html#unstable-features +// unused_crate_dependencies: unrelated +// unsafe_code: checked in forbid_unsafe module #![warn( box_pointers, deprecated_in_future, - elided_lifetimes_in_paths, - explicit_outlives_requirements, macro_use_extern_crate, meta_variable_misuse, + missing_abi, missing_copy_implementations, missing_debug_implementations, missing_docs, non_ascii_idents, + noop_method_call, single_use_lifetimes, trivial_casts, trivial_numeric_casts, unreachable_pub, - unused_extern_crates, unused_import_braces, unused_lifetimes, unused_qualifications, unused_results, variant_size_differences )] -// absolute_paths_not_starting_with_crate, anonymous_parameters, keyword_idents, pointer_structural_match: forbidden as a part of future_incompatible -// unsafe_block_in_unsafe_fn: unstable: https://github.com/rust-lang/rust/issues/71668 -// unsafe_code: checked in forbid_unsafe module -// unstable_features: deprecated: https://doc.rust-lang.org/beta/rustc/lints/listing/allowed-by-default.html#unstable-features -// unused_crate_dependencies: unrelated -#![warn(clippy::all, clippy::pedantic, clippy::nursery)] - -// Check interoperability with rustc and clippy lints. +#![warn(clippy::all, clippy::pedantic, clippy::nursery, clippy::restriction)] +#![allow(clippy::blanket_clippy_restriction_lints)] // this is a test, so enable all restriction lints intentionally. +#![allow(clippy::exhaustive_structs, clippy::exhaustive_enums)] // TODO pub mod basic { include!("include/basic.rs"); + + pub mod inside_macro { + #[rustfmt::skip] + macro_rules! mac { + () => { + #[::pin_project::pin_project] + #[derive(Debug)] + pub struct DefaultStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project( + project = DefaultStructNamedProj, + project_ref = DefaultStructNamedProjRef, + )] + #[derive(Debug)] + pub struct DefaultStructNamed { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project] + #[derive(Debug)] + pub struct DefaultTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + project = DefaultTupleStructNamedProj, + project_ref = DefaultTupleStructNamedProjRef, + )] + #[derive(Debug)] + pub struct DefaultTupleStructNamed(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + project = DefaultEnumProj, + project_ref = DefaultEnumProjRef, + )] + #[derive(Debug)] + pub enum DefaultEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + #[::pin_project::pin_project(PinnedDrop)] + #[derive(Debug)] + pub struct PinnedDropStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pinned_drop] + impl PinnedDrop for PinnedDropStruct { + fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} + } + + #[::pin_project::pin_project(PinnedDrop)] + #[derive(Debug)] + pub struct PinnedDropTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pinned_drop] + impl PinnedDrop for PinnedDropTupleStruct { + fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} + } + + #[::pin_project::pin_project( + PinnedDrop, + project = PinnedDropEnumProj, + project_ref = PinnedDropEnumProjRef, + )] + #[derive(Debug)] + pub enum PinnedDropEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + #[::pin_project::pinned_drop] + impl PinnedDrop for PinnedDropEnum { + fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} + } + + #[::pin_project::pin_project(project_replace)] + #[derive(Debug)] + pub struct ReplaceStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project( + project = ReplaceStructNamedProj, + project_ref = ReplaceStructNamedProjRef, + project_replace = ReplaceStructNamedProjOwn, + )] + #[derive(Debug)] + pub struct ReplaceStructNamed { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project(project_replace)] + #[derive(Debug)] + pub struct ReplaceTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + project = ReplaceTupleStructNamedProj, + project_ref = ReplaceTupleStructNamedProjRef, + project_replace = ReplaceTupleStructNamedProjOwn, + )] + #[derive(Debug)] + pub struct ReplaceTupleStructNamed(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + project = ReplaceEnumProj, + project_ref = ReplaceEnumProjRef, + project_replace = ReplaceEnumProjOwn, + )] + #[derive(Debug)] + pub enum ReplaceEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + #[::pin_project::pin_project(UnsafeUnpin)] + #[derive(Debug)] + pub struct UnsafeUnpinStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project(UnsafeUnpin)] + #[derive(Debug)] + pub struct UnsafeUnpinTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + UnsafeUnpin, + project = UnsafeUnpinEnumProj, + project_ref = UnsafeUnpinEnumProjRef, + )] + #[derive(Debug)] + pub enum UnsafeUnpinEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + #[::pin_project::pin_project(!Unpin)] + #[derive(Debug)] + pub struct NotUnpinStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project(!Unpin)] + #[derive(Debug)] + pub struct NotUnpinTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + !Unpin, + project = NotUnpinEnumProj, + project_ref = NotUnpinEnumProjRef, + )] + #[derive(Debug)] + pub enum NotUnpinEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + unsafe impl + ::pin_project::UnsafeUnpin for UnsafeUnpinStruct + { + } + unsafe impl + ::pin_project::UnsafeUnpin for UnsafeUnpinTupleStruct + { + } + unsafe impl + ::pin_project::UnsafeUnpin for UnsafeUnpinEnum + { + } + }; + } + + mac!(); + } } pub mod forbid_unsafe { #![forbid(unsafe_code)] include!("include/basic-safe-part.rs"); + + pub mod inside_macro { + #[rustfmt::skip] + macro_rules! mac { + () => { + #[::pin_project::pin_project] + #[derive(Debug)] + pub struct DefaultStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project( + project = DefaultStructNamedProj, + project_ref = DefaultStructNamedProjRef, + )] + #[derive(Debug)] + pub struct DefaultStructNamed { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project] + #[derive(Debug)] + pub struct DefaultTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + project = DefaultTupleStructNamedProj, + project_ref = DefaultTupleStructNamedProjRef, + )] + #[derive(Debug)] + pub struct DefaultTupleStructNamed(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + project = DefaultEnumProj, + project_ref = DefaultEnumProjRef, + )] + #[derive(Debug)] + pub enum DefaultEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + #[::pin_project::pin_project(PinnedDrop)] + #[derive(Debug)] + pub struct PinnedDropStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pinned_drop] + impl PinnedDrop for PinnedDropStruct { + fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} + } + + #[::pin_project::pin_project(PinnedDrop)] + #[derive(Debug)] + pub struct PinnedDropTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pinned_drop] + impl PinnedDrop for PinnedDropTupleStruct { + fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} + } + + #[::pin_project::pin_project( + PinnedDrop, + project = PinnedDropEnumProj, + project_ref = PinnedDropEnumProjRef, + )] + #[derive(Debug)] + pub enum PinnedDropEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + #[::pin_project::pinned_drop] + impl PinnedDrop for PinnedDropEnum { + fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} + } + + #[::pin_project::pin_project(project_replace)] + #[derive(Debug)] + pub struct ReplaceStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project( + project = ReplaceStructNamedProj, + project_ref = ReplaceStructNamedProjRef, + project_replace = ReplaceStructNamedProjOwn, + )] + #[derive(Debug)] + pub struct ReplaceStructNamed { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project(project_replace)] + #[derive(Debug)] + pub struct ReplaceTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + project = ReplaceTupleStructNamedProj, + project_ref = ReplaceTupleStructNamedProjRef, + project_replace = ReplaceTupleStructNamedProjOwn, + )] + #[derive(Debug)] + pub struct ReplaceTupleStructNamed(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + project = ReplaceEnumProj, + project_ref = ReplaceEnumProjRef, + project_replace = ReplaceEnumProjOwn, + )] + #[derive(Debug)] + pub enum ReplaceEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + #[::pin_project::pin_project(UnsafeUnpin)] + #[derive(Debug)] + pub struct UnsafeUnpinStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project(UnsafeUnpin)] + #[derive(Debug)] + pub struct UnsafeUnpinTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + UnsafeUnpin, + project = UnsafeUnpinEnumProj, + project_ref = UnsafeUnpinEnumProjRef, + )] + #[derive(Debug)] + pub enum UnsafeUnpinEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + #[::pin_project::pin_project(!Unpin)] + #[derive(Debug)] + pub struct NotUnpinStruct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[::pin_project::pin_project(!Unpin)] + #[derive(Debug)] + pub struct NotUnpinTupleStruct(#[pin] pub T, pub U); + + #[::pin_project::pin_project( + !Unpin, + project = NotUnpinEnumProj, + project_ref = NotUnpinEnumProjRef, + )] + #[derive(Debug)] + pub enum NotUnpinEnum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + }; + } + + mac!(); + } } -pub mod clippy { +pub mod box_pointers { use pin_project::pin_project; - #[rustversion::attr(before(1.37), allow(single_use_lifetimes))] // https://github.com/rust-lang/rust/issues/53738 + #[allow(box_pointers)] // for the type itself #[pin_project(project_replace)] #[derive(Debug)] - pub struct MutMutStruct<'a, T, U> { + pub struct Struct { + #[pin] + pub p: Box, + pub u: Box, + } + + #[allow(box_pointers)] // for the type itself + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct TupleStruct(#[pin] pub Box, pub Box); + + #[allow(box_pointers)] // for the type itself + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum { + Struct { + #[pin] + p: Box, + u: Box, + }, + Tuple(#[pin] Box, Box), + Unit, + } + + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[allow(box_pointers)] // for the type itself + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct { + #[pin] + pub p: Box, + pub u: Box, + } + + #[allow(box_pointers)] // for the type itself + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct TupleStruct(#[pin] pub Box, pub Box); + + #[allow(box_pointers)] // for the type itself + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum { + Struct { + #[pin] + p: Box, + u: Box, + }, + Tuple(#[pin] Box, Box), + Unit, + } + }; + } + + mac!(); + } +} + +pub mod deprecated { + use pin_project::pin_project; + + #[allow(deprecated)] // for the type itself + #[pin_project(project_replace)] + #[derive(Debug, Clone, Copy)] + #[deprecated] + pub struct Struct { + #[deprecated] + #[pin] + pub p: (), + #[deprecated] + pub u: (), + } + + #[allow(deprecated)] // for the type itself + #[pin_project(project_replace)] + #[derive(Debug, Clone, Copy)] + #[deprecated] + pub struct TupleStruct( + #[deprecated] + #[pin] + pub (), + #[deprecated] pub (), + ); + + #[allow(deprecated)] // for the type itself + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug, Clone, Copy)] + #[deprecated] + pub enum Enum { + #[deprecated] + Struct { + #[deprecated] + #[pin] + p: (), + #[deprecated] + u: (), + }, + #[deprecated] + Tuple( + #[deprecated] + #[pin] + (), + #[deprecated] (), + ), + #[deprecated] + Unit, + } + + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[allow(deprecated)] // for the type itself + #[pin_project(project_replace)] + #[derive(Debug, Clone, Copy)] + #[deprecated] + pub struct Struct { + #[deprecated] + #[pin] + pub p: (), + #[deprecated] + pub u: (), + } + + #[allow(deprecated)] // for the type itself + #[pin_project(project_replace)] + #[derive(Debug, Clone, Copy)] + #[deprecated] + pub struct TupleStruct( + #[deprecated] + #[pin] + pub (), + #[deprecated] pub (), + ); + + #[allow(deprecated)] // for the type itself + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug, Clone, Copy)] + #[deprecated] + pub enum Enum { + #[deprecated] + Struct { + #[deprecated] + #[pin] + p: (), + #[deprecated] + u: (), + }, + #[deprecated] + Tuple( + #[deprecated] + #[pin] + (), + #[deprecated] (), + ), + #[deprecated] + Unit, + } + }; + } + + mac!(); + } +} + +pub mod explicit_outlives_requirements { + use pin_project::pin_project; + + #[allow(explicit_outlives_requirements)] // for the type itself: https://github.com/rust-lang/rust/issues/60993 + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct<'a, T, U> + where + T: ?Sized, + U: ?Sized, + { #[pin] pub pinned: &'a mut T, pub unpinned: &'a mut U, } - #[rustversion::attr(before(1.37), allow(single_use_lifetimes))] // https://github.com/rust-lang/rust/issues/53738 + #[allow(explicit_outlives_requirements)] // for the type itself: https://github.com/rust-lang/rust/issues/60993 #[pin_project(project_replace)] #[derive(Debug)] - pub struct MutMutTupleStruct<'a, T, U>(#[pin] &'a mut T, &'a mut U); + pub struct TupleStruct<'a, T, U>(#[pin] pub &'a mut T, pub &'a mut U) + where + T: ?Sized, + U: ?Sized; - #[rustversion::attr(before(1.37), allow(single_use_lifetimes))] // https://github.com/rust-lang/rust/issues/53738 - #[pin_project(project_replace)] + #[allow(explicit_outlives_requirements)] // for the type itself: https://github.com/rust-lang/rust/issues/60993 + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] #[derive(Debug)] - pub enum MutMutEnum<'a, T, U> { + pub enum Enum<'a, T, U> + where + T: ?Sized, + U: ?Sized, + { Struct { #[pin] pinned: &'a mut T, @@ -74,9 +702,301 @@ pub mod clippy { Unit, } + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[allow(explicit_outlives_requirements)] // for the type itself: https://github.com/rust-lang/rust/issues/60993 + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct<'a, T, U> + where + T: ?Sized, + U: ?Sized, + { + #[pin] + pub pinned: &'a mut T, + pub unpinned: &'a mut U, + } + + #[allow(explicit_outlives_requirements)] // for the type itself: https://github.com/rust-lang/rust/issues/60993 + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct TupleStruct<'a, T, U>(#[pin] pub &'a mut T, pub &'a mut U) + where + T: ?Sized, + U: ?Sized; + + #[allow(explicit_outlives_requirements)] // for the type itself: https://github.com/rust-lang/rust/issues/60993 + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum<'a, T, U> + where + T: ?Sized, + U: ?Sized, + { + Struct { + #[pin] + pinned: &'a mut T, + unpinned: &'a mut U, + }, + Tuple(#[pin] &'a mut T, &'a mut U), + Unit, + } + }; + } + + mac!(); + } +} + +pub mod single_use_lifetimes { + use pin_project::pin_project; + + #[allow(unused_lifetimes)] + pub trait Trait<'a> {} + + #[allow(unused_lifetimes)] // for the type itself + #[allow(single_use_lifetimes)] // for the type itself: https://github.com/rust-lang/rust/issues/55058 #[pin_project(project_replace)] #[derive(Debug)] - pub struct TypeRepetitionInBoundsStruct + pub struct Hrtb<'pin___, T> + where + for<'pin> &'pin T: Unpin, + T: for<'pin> Trait<'pin>, + for<'pin, 'pin_, 'pin__> &'pin &'pin_ &'pin__ T: Unpin, + { + #[pin] + _f: &'pin___ mut T, + } + + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[allow(unused_lifetimes)] + pub trait Trait<'a> {} + + #[allow(unused_lifetimes)] // for the type itself + #[allow(single_use_lifetimes)] // for the type itself: https://github.com/rust-lang/rust/issues/55058 + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Hrtb<'pin___, T> + where + for<'pin> &'pin T: Unpin, + T: for<'pin> Trait<'pin>, + for<'pin, 'pin_, 'pin__> &'pin &'pin_ &'pin__ T: Unpin, + { + #[pin] + _f: &'pin___ mut T, + } + }; + } + + mac!(); + } +} + +pub mod variant_size_differences { + use pin_project::pin_project; + + #[allow(missing_debug_implementations, missing_copy_implementations)] // https://github.com/rust-lang/rust/pull/74060 + #[allow(variant_size_differences)] // for the type itself + #[allow(clippy::large_enum_variant)] // for the type itself + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + pub enum Enum { + V1(u8), + V2([u8; 1024]), + } + + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[allow(missing_debug_implementations, missing_copy_implementations)] // https://github.com/rust-lang/rust/pull/74060 + #[allow(variant_size_differences)] // for the type itself + #[allow(clippy::large_enum_variant)] // for the type itself + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + pub enum Enum { + V1(u8), + V2([u8; 1024]), + } + }; + } + + mac!(); + } +} + +pub mod clippy_mut_mut { + use pin_project::pin_project; + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct<'a, T, U> { + #[pin] + pub pinned: &'a mut T, + pub unpinned: &'a mut U, + } + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct TupleStruct<'a, T, U>(#[pin] &'a mut T, &'a mut U); + + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum<'a, T, U> { + Struct { + #[pin] + pinned: &'a mut T, + unpinned: &'a mut U, + }, + Tuple(#[pin] &'a mut T, &'a mut U), + Unit, + } + + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct<'a, T, U> { + #[pin] + pub pinned: &'a mut T, + pub unpinned: &'a mut U, + } + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct TupleStruct<'a, T, U>(#[pin] &'a mut T, &'a mut U); + + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum<'a, T, U> { + Struct { + #[pin] + pinned: &'a mut T, + unpinned: &'a mut U, + }, + Tuple(#[pin] &'a mut T, &'a mut U), + Unit, + } + }; + } + + mac!(); + } +} + +#[allow(unreachable_pub)] +mod clippy_redundant_pub_crate { + use pin_project::pin_project; + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct TupleStruct(#[pin] pub T, pub U); + + #[allow(dead_code)] + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct TupleStruct(#[pin] pub T, pub U); + + #[allow(dead_code)] + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + }; + } + + mac!(); + } +} + +pub mod clippy_type_repetition_in_bounds { + use pin_project::pin_project; + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct where Self: Sized, { @@ -87,13 +1007,17 @@ pub mod clippy { #[pin_project(project_replace)] #[derive(Debug)] - pub struct TypeRepetitionInBoundsTupleStruct(#[pin] T, U) + pub struct TupleStruct(#[pin] T, U) where Self: Sized; - #[pin_project(project_replace)] + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] #[derive(Debug)] - pub enum TypeRepetitionInBoundsEnum + pub enum Enum where Self: Sized, { @@ -106,21 +1030,170 @@ pub mod clippy { Unit, } + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct + where + Self: Sized, + { + #[pin] + pub pinned: T, + pub unpinned: U, + } + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct TupleStruct(#[pin] T, U) + where + Self: Sized; + + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum + where + Self: Sized, + { + Struct { + #[pin] + pinned: T, + unpinned: U, + }, + Tuple(#[pin] T, U), + Unit, + } + }; + } + + mac!(); + } +} + +pub mod clippy_use_self { + use pin_project::pin_project; + + pub trait Trait { + type Assoc; + } + #[pin_project(project_replace)] #[derive(Debug)] - pub struct UsedUnderscoreBindingStruct { + pub struct Generics> + where + Self: Trait, + { + _f: T, + } + + pub mod inside_macro { + use pin_project::pin_project; + + use super::Trait; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Generics> + where + Self: Trait, + { + _f: T, + } + }; + } + + mac!(); + } +} + +pub mod clippy_used_underscore_binding { + use pin_project::pin_project; + + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct { #[pin] pub _pinned: T, pub _unpinned: U, } - #[pin_project(project_replace)] + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] #[derive(Debug)] - pub enum UsedUnderscoreBindingEnum { + pub enum Enum { Struct { #[pin] _pinned: T, _unpinned: U, }, } + + pub mod inside_macro { + use pin_project::pin_project; + + #[rustfmt::skip] + macro_rules! mac { + () => { + #[pin_project(project_replace)] + #[derive(Debug)] + pub struct Struct { + #[pin] + pub _pinned: T, + pub _unpinned: U, + } + + #[pin_project( + project = EnumProj, + project_ref = EnumProjRef, + project_replace = EnumProjOwn, + )] + #[derive(Debug)] + pub enum Enum { + Struct { + #[pin] + _pinned: T, + _unpinned: U, + }, + } + }; + } + + mac!(); + } +} + +pub mod clippy_ref_option_ref { + use pin_project::pin_project; + + #[pin_project] + #[derive(Debug)] + pub struct Struct<'a> { + #[pin] + pub _pinned: Option<&'a ()>, + pub _unpinned: Option<&'a ()>, + } + + #[pin_project(project = EnumProj, project_ref = EnumProjRef)] + #[derive(Debug)] + pub enum Enum<'a> { + Struct { + #[pin] + _pinned: Option<&'a ()>, + _unpinned: Option<&'a ()>, + }, + } } diff --git a/third_party/rust/pin-project/tests/pin_project.rs b/third_party/rust/pin-project/tests/pin_project.rs index b6631f2c83ba..aa08056e81d3 100644 --- a/third_party/rust/pin-project/tests/pin_project.rs +++ b/third_party/rust/pin-project/tests/pin_project.rs @@ -1,8 +1,12 @@ #![warn(rust_2018_idioms, single_use_lifetimes)] #![allow(dead_code)] +#[macro_use] +mod auxiliary; + use std::{ marker::{PhantomData, PhantomPinned}, + panic, pin::Pin, }; @@ -17,41 +21,42 @@ fn projection() { )] struct Struct { #[pin] - field1: T, - field2: U, + f1: T, + f2: U, } - let mut s = Struct { field1: 1, field2: 2 }; + let mut s = Struct { f1: 1, f2: 2 }; let mut s_orig = Pin::new(&mut s); let s = s_orig.as_mut().project(); - let x: Pin<&mut i32> = s.field1; - assert_eq!(*x, 1); + let _: Pin<&mut i32> = s.f1; + assert_eq!(*s.f1, 1); + let _: &mut i32 = s.f2; + assert_eq!(*s.f2, 2); - let y: &mut i32 = s.field2; - assert_eq!(*y, 2); - - assert_eq!(s_orig.as_ref().field1, 1); - assert_eq!(s_orig.as_ref().field2, 2); - - let mut s = Struct { field1: 1, field2: 2 }; - - let StructProj { field1, field2 } = Pin::new(&mut s).project(); - let _: Pin<&mut i32> = field1; - let _: &mut i32 = field2; - - let StructProjRef { field1, field2 } = Pin::new(&s).project_ref(); - let _: Pin<&i32> = field1; - let _: &i32 = field2; + assert_eq!(s_orig.as_ref().f1, 1); + assert_eq!(s_orig.as_ref().f2, 2); + let mut s = Struct { f1: 1, f2: 2 }; let mut s = Pin::new(&mut s); - let StructProjOwn { field1, field2 } = - s.as_mut().project_replace(Struct { field1: 3, field2: 4 }); - let _: PhantomData = field1; - let _: i32 = field2; - assert_eq!(field2, 2); - assert_eq!(s.field1, 3); - assert_eq!(s.field2, 4); + { + let StructProj { f1, f2 } = s.as_mut().project(); + let _: Pin<&mut i32> = f1; + let _: &mut i32 = f2; + } + { + let StructProjRef { f1, f2 } = s.as_ref().project_ref(); + let _: Pin<&i32> = f1; + let _: &i32 = f2; + } + { + let StructProjOwn { f1, f2 } = s.as_mut().project_replace(Struct { f1: 3, f2: 4 }); + let _: PhantomData = f1; + let _: i32 = f2; + assert_eq!(f2, 2); + assert_eq!(s.f1, 3); + assert_eq!(s.f2, 4); + } #[pin_project(project_replace)] struct TupleStruct(#[pin] T, U); @@ -59,94 +64,91 @@ fn projection() { let mut s = TupleStruct(1, 2); let s = Pin::new(&mut s).project(); - let x: Pin<&mut i32> = s.0; - assert_eq!(*x, 1); + let _: Pin<&mut i32> = s.0; + assert_eq!(*s.0, 1); + let _: &mut i32 = s.1; + assert_eq!(*s.1, 2); - let y: &mut i32 = s.1; - assert_eq!(*y, 2); - - #[pin_project(project_replace, project = EnumProj)] + #[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] #[derive(Eq, PartialEq, Debug)] enum Enum { - Variant1(#[pin] A, B), - Variant2 { + Tuple(#[pin] A, B), + Struct { #[pin] - field1: C, - field2: D, + f1: C, + f2: D, }, - None, + Unit, } - let mut e = Enum::Variant1(1, 2); - let mut e_orig = Pin::new(&mut e); - let e = e_orig.as_mut().project(); + let mut e = Enum::Tuple(1, 2); + let mut e = Pin::new(&mut e); - match e { - EnumProj::Variant1(x, y) => { + match e.as_mut().project() { + EnumProj::Tuple(x, y) => { let x: Pin<&mut i32> = x; assert_eq!(*x, 1); - let y: &mut i32 = y; assert_eq!(*y, 2); } - EnumProj::Variant2 { field1, field2 } => { - let _x: Pin<&mut i32> = field1; - let _y: &mut i32 = field2; + EnumProj::Struct { f1, f2 } => { + let _: Pin<&mut i32> = f1; + let _: &mut i32 = f2; + unreachable!(); } - EnumProj::None => {} + EnumProj::Unit => unreachable!(), } - assert_eq!(Pin::into_ref(e_orig).get_ref(), &Enum::Variant1(1, 2)); + assert_eq!(&*e, &Enum::Tuple(1, 2)); - let mut e = Enum::Variant2 { field1: 3, field2: 4 }; - let mut e = Pin::new(&mut e).project(); + let mut e = Enum::Struct { f1: 3, f2: 4 }; + let mut e = Pin::new(&mut e); - match &mut e { - EnumProj::Variant1(x, y) => { - let _x: &mut Pin<&mut i32> = x; - let _y: &mut &mut i32 = y; + match e.as_mut().project() { + EnumProj::Tuple(x, y) => { + let _: Pin<&mut i32> = x; + let _: &mut i32 = y; + unreachable!(); } - EnumProj::Variant2 { field1, field2 } => { - let x: &mut Pin<&mut i32> = field1; - assert_eq!(**x, 3); - - let y: &mut &mut i32 = field2; - assert_eq!(**y, 4); + EnumProj::Struct { f1, f2 } => { + let _: Pin<&mut i32> = f1; + assert_eq!(*f1, 3); + let _: &mut i32 = f2; + assert_eq!(*f2, 4); } - EnumProj::None => {} + EnumProj::Unit => unreachable!(), } - if let EnumProj::Variant2 { field1, field2 } = e { - let x: Pin<&mut i32> = field1; - assert_eq!(*x, 3); - - let y: &mut i32 = field2; - assert_eq!(*y, 4); + if let EnumProj::Struct { f1, f2 } = e.as_mut().project() { + let _: Pin<&mut i32> = f1; + assert_eq!(*f1, 3); + let _: &mut i32 = f2; + assert_eq!(*f2, 4); } } #[test] fn enum_project_set() { - #[pin_project(project_replace, project = EnumProj)] + #[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] #[derive(Eq, PartialEq, Debug)] enum Enum { - Variant1(#[pin] u8), - Variant2(bool), + V1(#[pin] u8), + V2(bool), } - let mut e = Enum::Variant1(25); + let mut e = Enum::V1(25); let mut e_orig = Pin::new(&mut e); let e_proj = e_orig.as_mut().project(); match e_proj { - EnumProj::Variant1(val) => { - let new_e = Enum::Variant2(val.as_ref().get_ref() == &25); + EnumProj::V1(val) => { + let new_e = Enum::V2(val.as_ref().get_ref() == &25); e_orig.set(new_e); } - _ => unreachable!(), + EnumProj::V2(_) => unreachable!(), } - assert_eq!(e, Enum::Variant2(true)); + assert_eq!(e, Enum::V2(true)); } #[test] @@ -156,7 +158,7 @@ fn where_clause() { where T: Copy, { - field: T, + f: T, } #[pin_project] @@ -164,12 +166,12 @@ fn where_clause() { where T: Copy; - #[pin_project] - enum EnumWhere + #[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] + enum Enum where T: Copy, { - Variant(T), + V(T), } } @@ -181,8 +183,8 @@ fn where_clause_and_associated_type_field() { I: Iterator, { #[pin] - field1: I, - field2: I::Item, + f1: I, + f2: I::Item, } #[pin_project(project_replace)] @@ -191,8 +193,8 @@ fn where_clause_and_associated_type_field() { I: Iterator, { #[pin] - field1: I, - field2: J, + f1: I, + f2: J, } #[pin_project(project_replace)] @@ -200,7 +202,7 @@ fn where_clause_and_associated_type_field() { where T: 'static, { - field: T, + f: T, } trait Static: 'static {} @@ -212,13 +214,13 @@ fn where_clause_and_associated_type_field() { where I: Iterator; - #[pin_project(project_replace)] + #[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] enum Enum where I: Iterator, { - Variant1(#[pin] I), - Variant2(I::Item), + V1(#[pin] I), + V2(I::Item), } } @@ -227,7 +229,7 @@ fn derive_copy() { #[pin_project(project_replace)] #[derive(Clone, Copy)] struct Struct { - val: T, + f: T, } fn is_copy() {} @@ -241,21 +243,21 @@ fn move_out() { #[pin_project(project_replace)] struct Struct { - val: NotCopy, + f: NotCopy, } - let x = Struct { val: NotCopy }; - let _val: NotCopy = x.val; + let x = Struct { f: NotCopy }; + let _val: NotCopy = x.f; - #[pin_project(project_replace)] + #[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] enum Enum { - Variant(NotCopy), + V(NotCopy), } - let x = Enum::Variant(NotCopy); + let x = Enum::V(NotCopy); #[allow(clippy::infallible_destructuring_match)] let _val: NotCopy = match x { - Enum::Variant(val) => val, + Enum::V(val) => val, }; } @@ -263,39 +265,39 @@ fn move_out() { fn trait_bounds_on_type_generics() { #[pin_project(project_replace)] pub struct Struct1<'a, T: ?Sized> { - field: &'a mut T, + f: &'a mut T, } #[pin_project(project_replace)] pub struct Struct2<'a, T: ::core::fmt::Debug> { - field: &'a mut T, + f: &'a mut T, } #[pin_project(project_replace)] pub struct Struct3<'a, T: core::fmt::Debug> { - field: &'a mut T, + f: &'a mut T, } #[pin_project(project_replace)] pub struct Struct4<'a, T: core::fmt::Debug + core::fmt::Display> { - field: &'a mut T, + f: &'a mut T, } #[pin_project(project_replace)] pub struct Struct5<'a, T: core::fmt::Debug + ?Sized> { - field: &'a mut T, + f: &'a mut T, } #[pin_project(project_replace)] pub struct Struct6<'a, T: core::fmt::Debug = [u8; 16]> { - field: &'a mut T, + f: &'a mut T, } - let _: Struct6<'_> = Struct6 { field: &mut [0u8; 16] }; + let _: Struct6<'_> = Struct6 { f: &mut [0_u8; 16] }; #[pin_project(project_replace)] pub struct Struct7 { - field: T, + f: T, } trait Static: 'static {} @@ -304,16 +306,16 @@ fn trait_bounds_on_type_generics() { #[pin_project(project_replace)] pub struct Struct8<'a, 'b: 'a> { - field1: &'a u8, - field2: &'b u8, + f1: &'a u8, + f2: &'b u8, } #[pin_project(project_replace)] pub struct TupleStruct<'a, T: ?Sized>(&'a mut T); - #[pin_project(project_replace)] + #[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] enum Enum<'a, T: ?Sized> { - Variant(&'a mut T), + V(&'a mut T), } } @@ -322,27 +324,52 @@ fn overlapping_lifetime_names() { #[pin_project(project_replace)] pub struct Struct1<'pin, T> { #[pin] - field: &'pin mut T, + f: &'pin mut T, } #[pin_project(project_replace)] pub struct Struct2<'pin, 'pin_, 'pin__> { #[pin] - field: &'pin &'pin_ &'pin__ (), + f: &'pin &'pin_ &'pin__ (), } - pub trait A<'a> {} + pub trait Trait<'a> {} #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 #[pin_project(project_replace)] - pub struct HRTB<'pin___, T> + pub struct Hrtb<'pin___, T> where for<'pin> &'pin T: Unpin, - T: for<'pin> A<'pin>, + T: for<'pin> Trait<'pin>, for<'pin, 'pin_, 'pin__> &'pin &'pin_ &'pin__ T: Unpin, { #[pin] - field: &'pin___ mut T, + f: &'pin___ mut T, + } + + #[pin_project(PinnedDrop)] + pub struct PinnedDropStruct<'pin> { + #[pin] + f: &'pin (), + } + + #[pinned_drop] + impl PinnedDrop for PinnedDropStruct<'_> { + fn drop(self: Pin<&mut Self>) {} + } + + #[pin_project(UnsafeUnpin)] + pub struct UnsafeUnpinStruct<'pin> { + #[pin] + f: &'pin (), + } + + unsafe impl UnsafeUnpin for UnsafeUnpinStruct<'_> {} + + #[pin_project(!Unpin)] + pub struct NotUnpinStruct<'pin> { + #[pin] + f: &'pin (), } } @@ -351,7 +378,7 @@ fn combine() { #[pin_project(PinnedDrop, UnsafeUnpin)] pub struct PinnedDropWithUnsafeUnpin { #[pin] - field: T, + f: T, } #[pinned_drop] @@ -364,7 +391,7 @@ fn combine() { #[pin_project(PinnedDrop, !Unpin)] pub struct PinnedDropWithNotUnpin { #[pin] - field: T, + f: T, } #[pinned_drop] @@ -375,7 +402,7 @@ fn combine() { #[pin_project(UnsafeUnpin, project_replace)] pub struct UnsafeUnpinWithReplace { #[pin] - field: T, + f: T, } unsafe impl UnsafeUnpin for UnsafeUnpinWithReplace {} @@ -383,7 +410,7 @@ fn combine() { #[pin_project(!Unpin, project_replace)] pub struct NotUnpinWithReplace { #[pin] - field: T, + f: T, } } @@ -411,13 +438,13 @@ fn lifetime_project() { #[pin_project(project_replace)] struct Struct2<'a, T, U> { #[pin] - pinned: &'a mut T, + pinned: &'a T, unpinned: U, } - #[pin_project(project_replace, project = EnumProj, project_ref = EnumProjRef)] + #[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] enum Enum { - Variant { + V { #[pin] pinned: T, unpinned: U, @@ -431,13 +458,25 @@ fn lifetime_project() { fn get_pin_mut<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut T> { self.project().pinned } + fn get_pin_ref_elided(self: Pin<&Self>) -> Pin<&T> { + self.project_ref().pinned + } + fn get_pin_mut_elided(self: Pin<&mut Self>) -> Pin<&mut T> { + self.project().pinned + } } impl<'b, T, U> Struct2<'b, T, U> { - fn get_pin_ref<'a>(self: Pin<&'a Self>) -> Pin<&'a &'b mut T> { + fn get_pin_ref<'a>(self: Pin<&'a Self>) -> Pin<&'a &'b T> { self.project_ref().pinned } - fn get_pin_mut<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut &'b mut T> { + fn get_pin_mut<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut &'b T> { + self.project().pinned + } + fn get_pin_ref_elided(self: Pin<&Self>) -> Pin<&&'b T> { + self.project_ref().pinned + } + fn get_pin_mut_elided(self: Pin<&mut Self>) -> Pin<&mut &'b T> { self.project().pinned } } @@ -445,70 +484,22 @@ fn lifetime_project() { impl Enum { fn get_pin_ref<'a>(self: Pin<&'a Self>) -> Pin<&'a T> { match self.project_ref() { - EnumProjRef::Variant { pinned, .. } => pinned, + EnumProjRef::V { pinned, .. } => pinned, } } fn get_pin_mut<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut T> { match self.project() { - EnumProj::Variant { pinned, .. } => pinned, + EnumProj::V { pinned, .. } => pinned, } } - } -} - -#[rustversion::since(1.36)] // https://github.com/rust-lang/rust/pull/61207 -#[test] -fn lifetime_project_elided() { - #[pin_project(project_replace)] - struct Struct1 { - #[pin] - pinned: T, - unpinned: U, - } - - #[pin_project(project_replace)] - struct Struct2<'a, T, U> { - #[pin] - pinned: &'a mut T, - unpinned: U, - } - - #[pin_project(project_replace, project = EnumProj, project_ref = EnumProjRef)] - enum Enum { - Variant { - #[pin] - pinned: T, - unpinned: U, - }, - } - - impl Struct1 { - fn get_pin_ref(self: Pin<&Self>) -> Pin<&T> { - self.project_ref().pinned - } - fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().pinned - } - } - - impl<'b, T, U> Struct2<'b, T, U> { - fn get_pin_ref(self: Pin<&Self>) -> Pin<&&'b mut T> { - self.project_ref().pinned - } - fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut &'b mut T> { - self.project().pinned - } - } - - impl Enum { - fn get_pin_ref(self: Pin<&Self>) -> Pin<&T> { + fn get_pin_ref_elided(self: Pin<&Self>) -> Pin<&T> { match self.project_ref() { - EnumProjRef::Variant { pinned, .. } => pinned, + EnumProjRef::V { pinned, .. } => pinned, } } - fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { + fn get_pin_mut_elided(self: Pin<&mut Self>) -> Pin<&mut T> { match self.project() { - EnumProj::Variant { pinned, .. } => pinned, + EnumProj::V { pinned, .. } => pinned, } } } @@ -518,19 +509,19 @@ mod visibility { use pin_project::pin_project; #[pin_project(project_replace)] - pub(crate) struct A { - pub b: u8, + pub(crate) struct S { + pub f: u8, } } #[test] fn visibility() { - let mut x = visibility::A { b: 0 }; + let mut x = visibility::S { f: 0 }; let x = Pin::new(&mut x); let y = x.as_ref().project_ref(); - let _: &u8 = y.b; + let _: &u8 = y.f; let y = x.project(); - let _: &mut u8 = y.b; + let _: &mut u8 = y.f; } #[test] @@ -538,45 +529,66 @@ fn trivial_bounds() { #[pin_project(project_replace)] pub struct NoGenerics { #[pin] - field: PhantomPinned, + f: PhantomPinned, } + + assert_not_unpin!(NoGenerics); } #[test] fn dst() { #[pin_project] struct Struct1 { - x: T, + f: T, } - let mut x = Struct1 { x: 0_u8 }; + let mut x = Struct1 { f: 0_u8 }; let x: Pin<&mut Struct1> = Pin::new(&mut x as _); - let _y: &mut (dyn core::fmt::Debug) = x.project().x; + let _: &mut (dyn core::fmt::Debug) = x.project().f; #[pin_project] struct Struct2 { #[pin] - x: T, + f: T, } - let mut x = Struct2 { x: 0_u8 }; + let mut x = Struct2 { f: 0_u8 }; let x: Pin<&mut Struct2> = Pin::new(&mut x as _); - let _y: Pin<&mut (dyn core::fmt::Debug + Unpin)> = x.project().x; + let _: Pin<&mut (dyn core::fmt::Debug + Unpin)> = x.project().f; + + #[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993 + #[pin_project] + struct Struct3 + where + T: ?Sized, + { + f: T, + } + + #[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993 + #[pin_project] + struct Struct4 + where + T: ?Sized, + { + #[pin] + f: T, + } #[pin_project(UnsafeUnpin)] struct Struct5 { - x: T, + f: T, } #[pin_project(UnsafeUnpin)] struct Struct6 { #[pin] - x: T, + f: T, } #[pin_project(PinnedDrop)] struct Struct7 { - x: T, + f: T, } #[pinned_drop] @@ -587,7 +599,7 @@ fn dst() { #[pin_project(PinnedDrop)] struct Struct8 { #[pin] - x: T, + f: T, } #[pinned_drop] @@ -597,13 +609,19 @@ fn dst() { #[pin_project(!Unpin)] struct Struct9 { - x: T, + f: T, } #[pin_project(!Unpin)] struct Struct10 { #[pin] - x: T, + f: T, + } + + #[pin_project] + struct Struct11<'a, T: ?Sized, U: ?Sized> { + f1: &'a mut T, + f2: U, } #[pin_project] @@ -612,6 +630,18 @@ fn dst() { #[pin_project] struct TupleStruct2(#[pin] T); + #[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993 + #[pin_project] + struct TupleStruct3(T) + where + T: ?Sized; + + #[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993 + #[pin_project] + struct TupleStruct4(#[pin] T) + where + T: ?Sized; + #[pin_project(UnsafeUnpin)] struct TupleStruct5(T); @@ -639,37 +669,9 @@ fn dst() { #[pin_project(!Unpin)] struct TupleStruct10(#[pin] T); -} - -#[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993 -#[test] -fn unsized_in_where_clause() { - #[pin_project] - struct Struct3 - where - T: ?Sized, - { - x: T, - } #[pin_project] - struct Struct4 - where - T: ?Sized, - { - #[pin] - x: T, - } - - #[pin_project] - struct TupleStruct3(T) - where - T: ?Sized; - - #[pin_project] - struct TupleStruct4(#[pin] T) - where - T: ?Sized; + struct TupleStruct11<'a, T: ?Sized, U: ?Sized>(&'a mut T, U); } #[test] @@ -721,6 +723,7 @@ fn parse_self() { type Assoc; } + #[allow(clippy::type_repetition_in_bounds)] #[pin_project(project_replace)] pub struct Generics> where @@ -777,7 +780,7 @@ fn parse_self() { type Assoc = Self; } - #[pin_project(project_replace)] + #[pin_project(project = EnumProj, project_ref = EnumProjRef, project_replace = EnumProjOwn)] enum Enum { Struct { _f1: Box, @@ -811,19 +814,19 @@ fn parse_self() { #[test] fn no_infer_outlives() { - trait Bar { + trait Trait { type Y; } - struct Example(A); + struct Struct1(A); - impl Bar for Example { + impl Trait for Struct1 { type Y = Option; } #[pin_project(project_replace)] - struct Foo { - _x: as Bar>::Y, + struct Struct2 { + _f: as Trait>::Y, } } @@ -832,8 +835,6 @@ fn no_infer_outlives() { #[allow(clippy::many_single_char_names)] #[test] fn project_replace_panic() { - use std::panic; - #[pin_project(project_replace)] struct S { #[pin] @@ -846,7 +847,7 @@ fn project_replace_panic() { fn drop(&mut self) { *self.0 = true; if self.1 { - panic!() + panic!(); } } } diff --git a/third_party/rust/pin-project/tests/pinned_drop.rs b/third_party/rust/pin-project/tests/pinned_drop.rs index ab4ffd66a24a..99273c4234fb 100644 --- a/third_party/rust/pin-project/tests/pinned_drop.rs +++ b/third_party/rust/pin-project/tests/pinned_drop.rs @@ -25,66 +25,10 @@ fn safe_project() { assert!(was_dropped); } -#[test] -fn self_argument_in_macro() { - use std::pin::Pin; - - use pin_project::{pin_project, pinned_drop}; - - #[pin_project(PinnedDrop)] - struct Struct { - x: (), - } - - #[pinned_drop] - impl PinnedDrop for Struct { - fn drop(self: Pin<&mut Self>) { - let _: Vec<_> = vec![self.x]; - } - } -} - -#[test] -fn self_in_macro_containing_fn() { - use std::pin::Pin; - - use pin_project::{pin_project, pinned_drop}; - - macro_rules! mac { - ($($tt:tt)*) => { - $($tt)* - }; - } - - #[pin_project(PinnedDrop)] - pub struct Struct { - _x: (), - } - - #[pinned_drop] - impl PinnedDrop for Struct { - fn drop(self: Pin<&mut Self>) { - let _ = mac!({ - impl Struct { - pub fn _f(self) -> Self { - self - } - } - }); - } - } -} - #[test] fn self_call() { - use std::pin::Pin; - - use pin_project::{pin_project, pinned_drop}; - #[pin_project(PinnedDrop)] - pub struct Struct { - _x: T, - } + pub struct S(T); trait Trait { fn self_ref(&self) {} @@ -94,10 +38,10 @@ fn self_call() { fn assoc_fn(_this: Pin<&mut Self>) {} } - impl Trait for Struct {} + impl Trait for S {} #[pinned_drop] - impl PinnedDrop for Struct { + impl PinnedDrop for S { fn drop(mut self: Pin<&mut Self>) { self.self_ref(); self.as_ref().self_pin_ref(); @@ -110,14 +54,10 @@ fn self_call() { } #[test] -fn self_struct() { - use std::pin::Pin; - - use pin_project::{pin_project, pinned_drop}; - +fn self_ty() { #[pin_project(PinnedDrop)] pub struct Struct { - pub x: (), + pub f: (), } #[pinned_drop] @@ -126,14 +66,14 @@ fn self_struct() { #[allow(clippy::match_single_binding)] fn drop(mut self: Pin<&mut Self>) { // expr - let _: Self = Self { x: () }; + let _: Self = Self { f: () }; // pat match *self { - Self { x: _ } => {} + Self { f: () } => {} } - if let Self { x: _ } = *self {} - let Self { x: _ } = *self; + if let Self { f: () } = *self {} + let Self { f: () } = *self; } } @@ -155,35 +95,56 @@ fn self_struct() { let Self(_) = *self; } } -} -#[rustversion::since(1.37)] // type_alias_enum_variants requires Rust 1.37 -#[test] -fn self_enum() { - use std::pin::Pin; - - use pin_project::{pin_project, pinned_drop}; - - #[pin_project(PinnedDrop)] + #[pin_project(PinnedDrop, project = EnumProj, project_ref = EnumProjRef)] pub enum Enum { - Struct { x: () }, + Struct { f: () }, Tuple(()), + Unit, } #[pinned_drop] impl PinnedDrop for Enum { fn drop(mut self: Pin<&mut Self>) { // expr - let _: Self = Self::Struct { x: () }; + let _: Self = Self::Struct { f: () }; let _: Self = Self::Tuple(()); + let _: Self = Self::Unit; // pat match *self { - Self::Struct { x: _ } => {} + Self::Struct { f: () } => {} Self::Tuple(_) => {} + Self::Unit => {} } - if let Self::Struct { x: _ } = *self {} + if let Self::Struct { f: () } = *self {} if let Self::Tuple(_) = *self {} + if let Self::Unit = *self {} + } + } +} + +#[test] +fn self_inside_macro_containing_fn() { + macro_rules! mac { + ($($tt:tt)*) => { + $($tt)* + }; + } + + #[pin_project(PinnedDrop)] + pub struct S(()); + + #[pinned_drop] + impl PinnedDrop for S { + fn drop(self: Pin<&mut Self>) { + mac!({ + impl S { + pub fn _f(self) -> Self { + self + } + } + }); } } } @@ -191,22 +152,17 @@ fn self_enum() { // See also `ui/pinned_drop/self.rs`. #[rustversion::since(1.40)] // https://github.com/rust-lang/rust/pull/64690 #[test] -fn self_in_macro_def() { - use std::pin::Pin; - - use pin_project::{pin_project, pinned_drop}; - +fn self_inside_macro_def() { #[pin_project(PinnedDrop)] - pub struct Struct { - _x: (), - } + pub struct S(()); #[pinned_drop] - impl PinnedDrop for Struct { + impl PinnedDrop for S { fn drop(self: Pin<&mut Self>) { macro_rules! mac { () => {{ let _ = self; + let _ = Self(()); }}; } mac!(); @@ -215,11 +171,22 @@ fn self_in_macro_def() { } #[test] -fn self_inside_macro() { - use std::pin::Pin; +fn self_arg_inside_macro_call() { + #[pin_project(PinnedDrop)] + struct Struct { + f: (), + } - use pin_project::{pin_project, pinned_drop}; + #[pinned_drop] + impl PinnedDrop for Struct { + fn drop(self: Pin<&mut Self>) { + let _: Vec<_> = vec![self.f]; + } + } +} +#[test] +fn self_ty_inside_macro_call() { macro_rules! mac { ($($tt:tt)*) => { $($tt)* @@ -231,24 +198,24 @@ fn self_inside_macro() { where mac!(Self): Send, { - _x: T, + _f: T, } impl Struct { - const ASSOCIATED1: &'static str = "1"; - fn associated1() {} + const ASSOC1: usize = 1; + fn assoc1() {} } trait Trait { - type Associated2; - const ASSOCIATED2: &'static str; - fn associated2(); + type Assoc2; + const ASSOC2: usize; + fn assoc2(); } impl Trait for Struct { - type Associated2 = (); - const ASSOCIATED2: &'static str = "2"; - fn associated2() {} + type Assoc2 = u8; + const ASSOC2: usize = 2; + fn assoc2() {} } #[pinned_drop] @@ -260,39 +227,34 @@ fn self_inside_macro() { #[allow(clippy::no_effect)] fn drop(self: Pin<&mut Self>) { // inherent items - mac!(Self::ASSOCIATED1;); - mac!(::ASSOCIATED1;); - mac!(Self::associated1();); - mac!(::associated1();); + mac!(Self::ASSOC1;); + mac!(::ASSOC1;); + mac!(Self::assoc1();); + mac!(::assoc1();); // trait items - mac!(let _: ::Associated2;); - mac!(Self::ASSOCIATED2;); - mac!(::ASSOCIATED2;); - mac!(::ASSOCIATED2;); - mac!(Self::associated2();); - mac!(::associated2();); - mac!(::associated2();); + mac!(let _: ::Assoc2;); + mac!(Self::ASSOC2;); + mac!(::ASSOC2;); + mac!(::ASSOC2;); + mac!(Self::assoc2();); + mac!(::assoc2();); + mac!(::assoc2();); } } } #[test] fn inside_macro() { - use std::pin::Pin; - - use pin_project::{pin_project, pinned_drop}; - #[pin_project(PinnedDrop)] - struct Struct(()); + struct S(()); macro_rules! mac { ($expr:expr) => { #[pinned_drop] - impl PinnedDrop for Struct { - #[allow(clippy::no_effect)] + impl PinnedDrop for S { fn drop(self: Pin<&mut Self>) { - $expr; + let _ = $expr; } } }; @@ -300,3 +262,23 @@ fn inside_macro() { mac!(1); } + +pub mod self_path { + use super::*; + + #[pin_project(PinnedDrop)] + pub struct S(T); + + fn f() {} + + #[pinned_drop] + impl PinnedDrop for self::S { + fn drop(mut self: Pin<&mut Self>) { + self::f(); + let _: self::S<()> = self::S(()); + let _: self::S> = self::S(self.as_mut()); + let self::S(()) = self::S(()); + let self::S(&mut Self(_)) = self::S(&mut *self); + } + } +} diff --git a/third_party/rust/pin-project/tests/project.rs b/third_party/rust/pin-project/tests/project.rs deleted file mode 100644 index c1f5b4dc9195..000000000000 --- a/third_party/rust/pin-project/tests/project.rs +++ /dev/null @@ -1,301 +0,0 @@ -#![warn(rust_2018_idioms, single_use_lifetimes)] -#![allow(dead_code)] -#![allow(deprecated)] - -// Ceurrently, `#[attr] if true {}` doesn't even *parse* on MSRV, -// which means that it will error even behind a `#[rustversion::since(..)]` -// -// This trick makes sure that we don't even attempt to parse -// the `#[project] if let _` test on MSRV. -#[rustversion::since(1.43)] -include!("project_if_attr.rs.in"); - -use std::pin::Pin; - -use pin_project::{pin_project, project, project_ref, project_replace}; - -#[project] // Nightly does not need a dummy attribute to the function. -#[test] -fn project_stmt_expr() { - #[pin_project] - struct Struct { - #[pin] - field1: T, - field2: U, - } - - let mut s = Struct { field1: 1, field2: 2 }; - - #[project] - let Struct { field1, field2 } = Pin::new(&mut s).project(); - - let x: Pin<&mut i32> = field1; - assert_eq!(*x, 1); - - let y: &mut i32 = field2; - assert_eq!(*y, 2); - - #[pin_project] - struct TupleStruct(#[pin] T, U); - - let mut s = TupleStruct(1, 2); - - #[project] - let TupleStruct(x, y) = Pin::new(&mut s).project(); - - let x: Pin<&mut i32> = x; - assert_eq!(*x, 1); - - let y: &mut i32 = y; - assert_eq!(*y, 2); - - #[pin_project] - enum Enum { - Variant1(#[pin] A, B), - Variant2 { - #[pin] - field1: C, - field2: D, - }, - None, - } - - let mut e = Enum::Variant1(1, 2); - - let mut e = Pin::new(&mut e).project(); - - #[project] - match &mut e { - Enum::Variant1(x, y) => { - let x: &mut Pin<&mut i32> = x; - assert_eq!(**x, 1); - - let y: &mut &mut i32 = y; - assert_eq!(**y, 2); - } - Enum::Variant2 { field1, field2 } => { - let _x: &mut Pin<&mut i32> = field1; - let _y: &mut &mut i32 = field2; - } - Enum::None => {} - } - - #[project] - let val = match &mut e { - Enum::Variant1(_, _) => true, - Enum::Variant2 { .. } => false, - Enum::None => false, - }; - assert_eq!(val, true); -} - -#[test] -fn project_impl() { - #[pin_project] - struct HasGenerics { - #[pin] - field1: T, - field2: U, - } - - #[project] - impl HasGenerics { - fn a(self) { - let Self { field1, field2 } = self; - - let _x: Pin<&mut T> = field1; - let _y: &mut U = field2; - } - } - - #[pin_project] - struct NoneGenerics { - #[pin] - field1: i32, - field2: u32, - } - - #[project] - impl NoneGenerics {} - - #[pin_project] - struct HasLifetimes<'a, T, U> { - #[pin] - field1: &'a mut T, - field2: U, - } - - #[project] - impl HasLifetimes<'_, T, U> {} - - #[pin_project] - struct HasOverlappingLifetimes<'pin, T, U> { - #[pin] - field1: &'pin mut T, - field2: U, - } - - #[allow(single_use_lifetimes)] - #[project] - impl<'pin, T, U> HasOverlappingLifetimes<'pin, T, U> {} - - #[pin_project] - struct HasOverlappingLifetimes2 { - #[pin] - field1: T, - field2: U, - } - - #[allow(single_use_lifetimes)] - #[allow(clippy::needless_lifetimes)] - #[project] - impl HasOverlappingLifetimes2 { - fn foo<'pin>(&'pin self) {} - } -} - -#[pin_project] -struct A { - #[pin] - field: u8, -} - -mod project_use_1 { - use std::pin::Pin; - - use pin_project::project; - - use crate::A; - #[project] - use crate::A; - - #[project] - #[test] - fn project_use() { - let mut x = A { field: 0 }; - #[project] - let A { field } = Pin::new(&mut x).project(); - let _: Pin<&mut u8> = field; - } -} - -mod project_use_2 { - use pin_project::project; - - #[project] - use crate::A; - - #[project] - impl A { - fn project_use(self) {} - } -} - -#[allow(clippy::unnecessary_operation, clippy::unit_arg)] -#[test] -#[project] -fn non_stmt_expr_match() { - #[pin_project] - enum Enum { - Variant(#[pin] A), - } - - let mut x = Enum::Variant(1); - let x = Pin::new(&mut x).project(); - - Some( - #[project] - match x { - Enum::Variant(_x) => {} - }, - ); -} - -// https://github.com/taiki-e/pin-project/issues/206 -#[allow(clippy::unnecessary_operation, clippy::unit_arg)] -#[test] -#[project] -fn issue_206() { - #[pin_project] - enum Enum { - Variant(#[pin] A), - } - - let mut x = Enum::Variant(1); - let x = Pin::new(&mut x).project(); - - Some({ - #[project] - match &x { - Enum::Variant(_) => {} - } - }); - - #[allow(clippy::never_loop)] - loop { - let _ = { - #[project] - match &x { - Enum::Variant(_) => {} - } - }; - break; - } -} - -#[project] -#[test] -fn combine() { - #[pin_project(project_replace)] - enum Enum { - V1(#[pin] A), - V2, - } - - let mut x = Enum::V1(1); - #[project] - match Pin::new(&mut x).project() { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } - #[project_ref] - match Pin::new(&x).project_ref() { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } - #[project_replace] - match Pin::new(&mut x).project_replace(Enum::V2) { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } -} - -// FIXME: This should be denied, but allowed for compatibility at this time. -#[project] -#[project_ref] -#[project_replace] -#[test] -fn combine_compat() { - #[pin_project(project_replace)] - enum Enum { - V1(#[pin] A), - V2, - } - - let mut x = Enum::V1(1); - #[project] - match Pin::new(&mut x).project() { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } - #[project_ref] - match Pin::new(&x).project_ref() { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } - #[project_replace] - match Pin::new(&mut x).project_replace(Enum::V2) { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } -} diff --git a/third_party/rust/pin-project/tests/project_if_attr.rs.in b/third_party/rust/pin-project/tests/project_if_attr.rs.in deleted file mode 100644 index 7bc236dbfc33..000000000000 --- a/third_party/rust/pin-project/tests/project_if_attr.rs.in +++ /dev/null @@ -1,45 +0,0 @@ -#[test] -#[project] -fn project_if_let() { - #[pin_project] - enum Foo { - Variant1(#[pin] A), - Variant2(u8), - Variant3 { - #[pin] - field: B, - }, - } - - let mut x: Foo = Foo::Variant1(true); - let x = Pin::new(&mut x).project(); - - #[project] - if let Foo::Variant1(a) = x { - let a: Pin<&mut bool> = a; - assert_eq!(*a, true); - } else if let Foo::Variant2(_) = x { - unreachable!(); - } else if let Foo::Variant3 { .. } = x { - unreachable!(); - } -} - -#[allow(clippy::unnecessary_operation, clippy::unit_arg)] -#[test] -#[project] -fn non_stmt_expr_if_let() { - #[pin_project] - enum Enum { - Variant(#[pin] A), - } - - let mut x = Enum::Variant(1); - let x = Pin::new(&mut x).project(); - - #[allow(irrefutable_let_patterns)] - Some( - #[project] - if let Enum::Variant(_x) = x {}, - ); -} diff --git a/third_party/rust/pin-project/tests/project_ref.rs b/third_party/rust/pin-project/tests/project_ref.rs deleted file mode 100644 index c6cbc02ffc03..000000000000 --- a/third_party/rust/pin-project/tests/project_ref.rs +++ /dev/null @@ -1,176 +0,0 @@ -#![warn(rust_2018_idioms, single_use_lifetimes)] -#![allow(dead_code)] -#![allow(deprecated)] - -use std::pin::Pin; - -use pin_project::{pin_project, project_ref}; - -#[project_ref] // Nightly does not need a dummy attribute to the function. -#[test] -fn project_stmt_expr() { - #[pin_project] - struct Struct { - #[pin] - field1: T, - field2: U, - } - - let s = Struct { field1: 1, field2: 2 }; - - #[project_ref] - let Struct { field1, field2 } = Pin::new(&s).project_ref(); - - let x: Pin<&i32> = field1; - assert_eq!(*x, 1); - - let y: &i32 = field2; - assert_eq!(*y, 2); - - // tuple struct - - #[pin_project] - struct TupleStruct(#[pin] T, U); - - let s = TupleStruct(1, 2); - - #[project_ref] - let TupleStruct(x, y) = Pin::new(&s).project_ref(); - - let x: Pin<&i32> = x; - assert_eq!(*x, 1); - - let y: &i32 = y; - assert_eq!(*y, 2); - - #[pin_project] - enum Enum { - Variant1(#[pin] A, B), - Variant2 { - #[pin] - field1: C, - field2: D, - }, - None, - } - - let e = Enum::Variant1(1, 2); - - let e = Pin::new(&e).project_ref(); - - #[project_ref] - match &e { - Enum::Variant1(x, y) => { - let x: &Pin<&i32> = x; - assert_eq!(**x, 1); - - let y: &&i32 = y; - assert_eq!(**y, 2); - } - Enum::Variant2 { field1, field2 } => { - let _x: &Pin<&i32> = field1; - let _y: &&i32 = field2; - } - Enum::None => {} - } - - #[project_ref] - let val = match &e { - Enum::Variant1(_, _) => true, - Enum::Variant2 { .. } => false, - Enum::None => false, - }; - assert_eq!(val, true); -} - -#[test] -fn project_impl() { - #[pin_project] - struct HasGenerics { - #[pin] - field1: T, - field2: U, - } - - #[project_ref] - impl HasGenerics { - fn a(self) { - let Self { field1, field2 } = self; - - let _x: Pin<&T> = field1; - let _y: &U = field2; - } - } - - #[pin_project] - struct NoneGenerics { - #[pin] - field1: i32, - field2: u32, - } - - #[project_ref] - impl NoneGenerics {} - - #[pin_project] - struct HasLifetimes<'a, T, U> { - #[pin] - field1: &'a mut T, - field2: U, - } - - #[project_ref] - impl HasLifetimes<'_, T, U> {} - - #[pin_project] - struct HasOverlappingLifetimes<'pin, T, U> { - #[pin] - field1: &'pin mut T, - field2: U, - } - - #[allow(single_use_lifetimes)] - #[project_ref] - impl<'pin, T, U> HasOverlappingLifetimes<'pin, T, U> {} - - #[pin_project] - struct HasOverlappingLifetimes2 { - #[pin] - field1: T, - field2: U, - } - - #[allow(single_use_lifetimes)] - #[allow(clippy::needless_lifetimes)] - #[project_ref] - impl HasOverlappingLifetimes2 { - fn foo<'pin>(&'pin self) {} - } -} - -#[project_ref] -#[test] -fn combine() { - #[pin_project(project_replace)] - enum Enum { - V1(#[pin] A), - V2, - } - - let mut x = Enum::V1(1); - #[project] - match Pin::new(&mut x).project() { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } - #[project_ref] - match Pin::new(&x).project_ref() { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } - #[project_replace] - match Pin::new(&mut x).project_replace(Enum::V2) { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } -} diff --git a/third_party/rust/pin-project/tests/project_replace.rs b/third_party/rust/pin-project/tests/project_replace.rs deleted file mode 100644 index 6e80b34e15a0..000000000000 --- a/third_party/rust/pin-project/tests/project_replace.rs +++ /dev/null @@ -1,100 +0,0 @@ -#![warn(rust_2018_idioms, single_use_lifetimes)] -#![allow(dead_code)] -#![allow(deprecated)] - -use std::{marker::PhantomData, pin::Pin}; - -use pin_project::{pin_project, project_replace}; - -#[project_replace] // Nightly does not need a dummy attribute to the function. -#[test] -fn project_replace_stmt_expr() { - #[pin_project(project_replace)] - struct Struct { - #[pin] - field1: T, - field2: U, - } - - let mut s = Struct { field1: 1, field2: 2 }; - - #[project_replace] - let Struct { field1, field2 } = - Pin::new(&mut s).project_replace(Struct { field1: 42, field2: 43 }); - - let _x: PhantomData = field1; - - let y: i32 = field2; - assert_eq!(y, 2); - - // tuple struct - - #[pin_project(project_replace)] - struct TupleStruct(#[pin] T, U); - - let mut s = TupleStruct(1, 2); - - #[project_replace] - let TupleStruct(x, y) = Pin::new(&mut s).project_replace(TupleStruct(42, 43)); - - let _x: PhantomData = x; - let y: i32 = y; - assert_eq!(y, 2); - - #[pin_project(project_replace)] - enum Enum { - Variant1(#[pin] A, B), - Variant2 { - #[pin] - field1: C, - field2: D, - }, - None, - } - - let mut e = Enum::Variant1(1, 2); - - let e = Pin::new(&mut e).project_replace(Enum::None); - - #[project_replace] - match e { - Enum::Variant1(x, y) => { - let _x: PhantomData = x; - let y: i32 = y; - assert_eq!(y, 2); - } - Enum::Variant2 { field1, field2 } => { - let _x: PhantomData = field1; - let _y: i32 = field2; - panic!() - } - Enum::None => panic!(), - } -} - -#[project_replace] -#[test] -fn combine() { - #[pin_project(project_replace)] - enum Enum { - V1(#[pin] A), - V2, - } - - let mut x = Enum::V1(1); - #[project] - match Pin::new(&mut x).project() { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } - #[project_ref] - match Pin::new(&x).project_ref() { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } - #[project_replace] - match Pin::new(&mut x).project_replace(Enum::V2) { - Enum::V1(_) => {} - Enum::V2 => unreachable!(), - } -} diff --git a/third_party/rust/pin-project/tests/proper_unpin.rs b/third_party/rust/pin-project/tests/proper_unpin.rs new file mode 100644 index 000000000000..0ba3ce3b5dbe --- /dev/null +++ b/third_party/rust/pin-project/tests/proper_unpin.rs @@ -0,0 +1,153 @@ +#![warn(rust_2018_idioms, single_use_lifetimes)] +#![allow(dead_code)] + +#[macro_use] +mod auxiliary; + +pub mod default { + use std::marker::PhantomPinned; + + use pin_project::pin_project; + + struct Inner { + f: T, + } + + assert_unpin!(Inner<()>); + assert_not_unpin!(Inner); + + #[pin_project] + struct Struct { + #[pin] + f1: Inner, + f2: U, + } + + assert_unpin!(Struct<(), ()>); + assert_unpin!(Struct<(), PhantomPinned>); + assert_not_unpin!(Struct); + assert_not_unpin!(Struct); + + #[pin_project(project = EnumProj, project_ref = EnumProjRef)] + enum Enum { + V1 { + #[pin] + f1: Inner, + f2: U, + }, + } + + assert_unpin!(Enum<(), ()>); + assert_unpin!(Enum<(), PhantomPinned>); + assert_not_unpin!(Enum); + assert_not_unpin!(Enum); + + #[pin_project] + struct TrivialBounds { + #[pin] + f: PhantomPinned, + } + + assert_not_unpin!(TrivialBounds); + + #[pin_project] + struct PinRef<'a, T, U> { + #[pin] + f1: &'a mut Inner, + f2: U, + } + + assert_unpin!(PinRef<'_, PhantomPinned, PhantomPinned>); +} + +pub mod cfg { + use std::marker::PhantomPinned; + + use pin_project::pin_project; + + #[pin_project] + struct Foo { + #[cfg(any())] + #[pin] + f: T, + #[cfg(not(any()))] + f: T, + } + + assert_unpin!(Foo); + + #[pin_project] + struct Bar { + #[cfg(any())] + f: T, + #[cfg(not(any()))] + #[pin] + f: T, + } + + assert_unpin!(Bar<()>); + assert_not_unpin!(Bar); +} + +pub mod cfg_attr { + use std::marker::PhantomPinned; + + use pin_project::pin_project; + + #[cfg_attr(any(), pin_project)] + struct Foo { + f: T, + } + + assert_unpin!(Foo<()>); + assert_not_unpin!(Foo); + + #[cfg_attr(not(any()), pin_project)] + struct Bar { + #[cfg_attr(not(any()), pin)] + f: T, + } + + assert_unpin!(Bar<()>); + assert_not_unpin!(Bar); +} + +// pin_project(!Unpin) +pub mod not_unpin { + use std::marker::PhantomPinned; + + use pin_project::pin_project; + + struct Inner { + f: T, + } + + #[pin_project(!Unpin)] + struct Struct { + #[pin] + inner: Inner, + other: U, + } + + assert_not_unpin!(Struct<(), ()>); + assert_not_unpin!(Struct<(), PhantomPinned>); + assert_not_unpin!(Struct); + assert_not_unpin!(Struct); + + #[pin_project(!Unpin)] + struct TrivialBounds { + #[pin] + f: PhantomPinned, + } + + assert_not_unpin!(TrivialBounds); + + #[pin_project(!Unpin)] + struct PinRef<'a, T, U> { + #[pin] + inner: &'a mut Inner, + other: U, + } + + assert_not_unpin!(PinRef<'_, (), ()>); +} diff --git a/third_party/rust/pin-project/tests/repr_packed.rs b/third_party/rust/pin-project/tests/repr_packed.rs index 599f95dd7a9d..be7cab975fe3 100644 --- a/third_party/rust/pin-project/tests/repr_packed.rs +++ b/third_party/rust/pin-project/tests/repr_packed.rs @@ -25,15 +25,15 @@ fn weird_repr_packed() { } #[repr(packed)] - struct Foo { + struct Struct { field: u8, } - impl Drop for Foo { + impl Drop for Struct { fn drop(&mut self) { FIELD_ADDR.with(|f| { f.set(&self.field as *const u8 as usize); - }) + }); } } @@ -44,9 +44,9 @@ fn weird_repr_packed() { // Calling drop(foo) causes 'foo' to be moved // into the 'drop' function, resulting in a different // address. - let x = Foo { field: 27 }; + let x = Struct { field: 27 }; let field_addr = &x.field as *const u8 as usize; field_addr }; - assert_eq!(field_addr, FIELD_ADDR.with(|f| f.get())); + assert_eq!(field_addr, FIELD_ADDR.with(Cell::get)); } diff --git a/third_party/rust/pin-project/tests/sized.rs b/third_party/rust/pin-project/tests/sized.rs deleted file mode 100644 index 9fd7e2ba6a04..000000000000 --- a/third_party/rust/pin-project/tests/sized.rs +++ /dev/null @@ -1,13 +0,0 @@ -#![warn(rust_2018_idioms, single_use_lifetimes)] -#![allow(dead_code)] - -use pin_project::pin_project; - -#[pin_project] -struct Foo<'a, I: ?Sized, Item> -where - I: Iterator, -{ - iter: &'a mut I, - item: Option, -} diff --git a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-resolve.rs b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-resolve.rs index e16f3e8de257..e36cc9593ad1 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-resolve.rs +++ b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-resolve.rs @@ -2,10 +2,10 @@ use std::pin::Pin; #[cfg_attr(any(), pin_project::pin_project)] struct Foo { - inner: T, + f: T, } fn main() { - let mut x = Foo { inner: 0_u8 }; - let _x = Pin::new(&mut x).project(); //~ ERROR E0599 + let mut x = Foo { f: 0_u8 }; + let _ = Pin::new(&mut x).project(); //~ ERROR E0599 } diff --git a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-resolve.stderr b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-resolve.stderr index c473e8a1a50b..0393c143fe06 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-resolve.stderr +++ b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-resolve.stderr @@ -1,5 +1,5 @@ error[E0599]: no method named `project` found for struct `Pin<&mut Foo>` in the current scope - --> tests/ui/cfg/cfg_attr-resolve.rs:10:31 + --> tests/ui/cfg/cfg_attr-resolve.rs:10:30 | -10 | let _x = Pin::new(&mut x).project(); //~ ERROR E0599 - | ^^^^^^^ method not found in `Pin<&mut Foo>` +10 | let _ = Pin::new(&mut x).project(); //~ ERROR E0599 + | ^^^^^^^ method not found in `Pin<&mut Foo>` diff --git a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-type-mismatch.rs b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-type-mismatch.rs index 2807c8768ba5..1b9664b5447e 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-type-mismatch.rs +++ b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-type-mismatch.rs @@ -1,24 +1,25 @@ -use pin_project::pin_project; use std::pin::Pin; +use pin_project::pin_project; + #[cfg_attr(not(any()), pin_project)] struct Foo { #[cfg_attr(any(), pin)] - inner: T, + f: T, } #[cfg_attr(not(any()), pin_project)] struct Bar { #[cfg_attr(not(any()), pin)] - inner: T, + f: T, } fn main() { - let mut x = Foo { inner: 0_u8 }; + let mut x = Foo { f: 0_u8 }; let x = Pin::new(&mut x).project(); - let _: Pin<&mut u8> = x.inner; //~ ERROR E0308 + let _: Pin<&mut u8> = x.f; //~ ERROR E0308 - let mut x = Bar { inner: 0_u8 }; + let mut x = Bar { f: 0_u8 }; let x = Pin::new(&mut x).project(); - let _: &mut u8 = x.inner; //~ ERROR E0308 + let _: &mut u8 = x.f; //~ ERROR E0308 } diff --git a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-type-mismatch.stderr b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-type-mismatch.stderr index 166a32c0bb5f..366d9c74cbde 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-type-mismatch.stderr +++ b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-type-mismatch.stderr @@ -1,8 +1,8 @@ error[E0308]: mismatched types - --> tests/ui/cfg/cfg_attr-type-mismatch.rs:19:27 + --> tests/ui/cfg/cfg_attr-type-mismatch.rs:20:27 | -19 | let _: Pin<&mut u8> = x.inner; //~ ERROR E0308 - | ------------ ^^^^^^^ expected struct `Pin`, found `&mut u8` +20 | let _: Pin<&mut u8> = x.f; //~ ERROR E0308 + | ------------ ^^^ expected struct `Pin`, found `&mut u8` | | | expected due to this | @@ -10,13 +10,13 @@ error[E0308]: mismatched types found mutable reference `&mut u8` error[E0308]: mismatched types - --> tests/ui/cfg/cfg_attr-type-mismatch.rs:23:22 + --> tests/ui/cfg/cfg_attr-type-mismatch.rs:24:22 | -23 | let _: &mut u8 = x.inner; //~ ERROR E0308 - | ------- ^^^^^^^ +24 | let _: &mut u8 = x.f; //~ ERROR E0308 + | ------- ^^^ | | | | | expected `&mut u8`, found struct `Pin` - | | help: consider mutably borrowing here: `&mut x.inner` + | | help: consider mutably borrowing here: `&mut x.f` | expected due to this | = note: expected mutable reference `&mut u8` diff --git a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-unpin.rs b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-unpin.rs deleted file mode 100644 index 7b882055423e..000000000000 --- a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-unpin.rs +++ /dev/null @@ -1,21 +0,0 @@ -use pin_project::pin_project; -use std::marker::PhantomPinned; - -#[cfg_attr(any(), pin_project)] -struct Foo { - inner: T, -} - -#[cfg_attr(not(any()), pin_project)] -struct Bar { - #[cfg_attr(not(any()), pin)] - inner: T, -} - -fn is_unpin() {} - -fn main() { - is_unpin::>(); // ERROR E0277 - is_unpin::>(); // Ok - is_unpin::>(); //~ ERROR E0277 -} diff --git a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-unpin.stderr b/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-unpin.stderr deleted file mode 100644 index b59e644d64e1..000000000000 --- a/third_party/rust/pin-project/tests/ui/cfg/cfg_attr-unpin.stderr +++ /dev/null @@ -1,43 +0,0 @@ -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/cfg/cfg_attr-unpin.rs:18:5 - | -18 | is_unpin::>(); // ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `Foo`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because it appears within the type `Foo` - --> tests/ui/cfg/cfg_attr-unpin.rs:5:8 - | -5 | struct Foo { - | ^^^ -note: required by a bound in `is_unpin` - --> tests/ui/cfg/cfg_attr-unpin.rs:15:16 - | -15 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/cfg/cfg_attr-unpin.rs:20:5 - | -20 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `__Bar<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because it appears within the type `__Bar<'_, PhantomPinned>` - --> tests/ui/cfg/cfg_attr-unpin.rs:10:8 - | -10 | struct Bar { - | ^^^ -note: required because of the requirements on the impl of `Unpin` for `Bar` - --> tests/ui/cfg/cfg_attr-unpin.rs:9:24 - | -9 | #[cfg_attr(not(any()), pin_project)] - | ^^^^^^^^^^^ -10 | struct Bar { - | ^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/cfg/cfg_attr-unpin.rs:15:16 - | -15 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-1.rs b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-1.rs index e0775d79cecb..7e19952b6d71 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-1.rs +++ b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-1.rs @@ -1,18 +1,15 @@ use auxiliary_macro::hidden_repr; use pin_project::pin_project; -//~ ERROR may not be used on #[repr(packed)] types -// span is lost. -// Refs: https://github.com/rust-lang/rust/issues/43081 #[pin_project] -#[hidden_repr(packed)] -struct Foo { +#[hidden_repr(packed)] //~ ERROR may not be used on #[repr(packed)] types +struct S { #[cfg(not(any()))] #[pin] - field: u32, + f: u32, #[cfg(any())] #[pin] - field: u8, + f: u8, } fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-1.stderr b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-1.stderr index 1783adb39f3a..4f3acc34941e 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-1.stderr +++ b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-1.stderr @@ -1,7 +1,5 @@ error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/cfg/packed_sneaky-span-issue-1.rs:8:1 + --> tests/ui/cfg/packed_sneaky-span-issue-1.rs:5:15 | -8 | #[hidden_repr(packed)] - | ^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the attribute macro `hidden_repr` (in Nightly builds, run with -Z macro-backtrace for more info) +5 | #[hidden_repr(packed)] //~ ERROR may not be used on #[repr(packed)] types + | ^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-2.rs b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-2.rs index 40eceaab9e59..fcea76bab80b 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-2.rs +++ b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-2.rs @@ -1,18 +1,15 @@ use auxiliary_macro::hidden_repr; use pin_project::pin_project; -//~ ERROR may not be used on #[repr(packed)] types -// span is lost. -// Refs: https://github.com/rust-lang/rust/issues/43081 #[pin_project] -#[hidden_repr(packed)] -struct Foo { +#[hidden_repr(packed)] //~ ERROR may not be used on #[repr(packed)] types +struct S { #[cfg(any())] #[pin] - field: u32, + f: u32, #[cfg(not(any()))] #[pin] - field: u8, + f: u8, } fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-2.stderr b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-2.stderr index ff418101ca16..cc2795ac68c1 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-2.stderr +++ b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky-span-issue-2.stderr @@ -1,7 +1,5 @@ error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/cfg/packed_sneaky-span-issue-2.rs:8:1 + --> tests/ui/cfg/packed_sneaky-span-issue-2.rs:5:15 | -8 | #[hidden_repr(packed)] - | ^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the attribute macro `hidden_repr` (in Nightly builds, run with -Z macro-backtrace for more info) +5 | #[hidden_repr(packed)] //~ ERROR may not be used on #[repr(packed)] types + | ^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky.rs b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky.rs index ab98b0650be2..0b01dc90e24f 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky.rs +++ b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky.rs @@ -4,9 +4,9 @@ use pin_project::pin_project; // `#[hidden_repr_cfg_not_any(packed)]` generates `#[cfg_attr(not(any()), repr(packed))]`. #[pin_project] #[hidden_repr_cfg_not_any(packed)] //~ ERROR may not be used on #[repr(packed)] types -struct Foo { +struct S { #[pin] - field: u32, + f: u32, } fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky.stderr b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky.stderr index 9c30192d317e..a54c2ec2d2ba 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky.stderr +++ b/third_party/rust/pin-project/tests/ui/cfg/packed_sneaky.stderr @@ -1,7 +1,5 @@ error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/cfg/packed_sneaky.rs:6:1 + --> tests/ui/cfg/packed_sneaky.rs:6:27 | 6 | #[hidden_repr_cfg_not_any(packed)] //~ ERROR may not be used on #[repr(packed)] types - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the attribute macro `hidden_repr_cfg_not_any` (in Nightly builds, run with -Z macro-backtrace for more info) + | ^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/cfg/proper_unpin.rs b/third_party/rust/pin-project/tests/ui/cfg/proper_unpin.rs deleted file mode 100644 index b7bb04de802e..000000000000 --- a/third_party/rust/pin-project/tests/ui/cfg/proper_unpin.rs +++ /dev/null @@ -1,28 +0,0 @@ -use pin_project::pin_project; -use std::marker::PhantomPinned; - -#[pin_project] -struct Foo { - #[cfg(any())] - #[pin] - inner: T, - #[cfg(not(any()))] - inner: T, -} - -#[pin_project] -struct Bar { - #[cfg(any())] - inner: T, - #[cfg(not(any()))] - #[pin] - inner: T, -} - -fn is_unpin() {} - -fn main() { - is_unpin::>(); // Ok - is_unpin::>(); // Ok - is_unpin::>(); //~ ERROR E0277 -} diff --git a/third_party/rust/pin-project/tests/ui/cfg/proper_unpin.stderr b/third_party/rust/pin-project/tests/ui/cfg/proper_unpin.stderr deleted file mode 100644 index af448c0064b0..000000000000 --- a/third_party/rust/pin-project/tests/ui/cfg/proper_unpin.stderr +++ /dev/null @@ -1,25 +0,0 @@ -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/cfg/proper_unpin.rs:27:5 - | -27 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `__Bar<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because it appears within the type `__Bar<'_, PhantomPinned>` - --> tests/ui/cfg/proper_unpin.rs:14:8 - | -14 | struct Bar { - | ^^^ -note: required because of the requirements on the impl of `Unpin` for `Bar` - --> tests/ui/cfg/proper_unpin.rs:13:1 - | -13 | #[pin_project] - | ^^^^^^^^^^^^^^ -14 | struct Bar { - | ^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/cfg/proper_unpin.rs:22:16 - | -22 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/cfg/unsupported.rs b/third_party/rust/pin-project/tests/ui/cfg/unsupported.rs index 5205307d221d..b950d4b828de 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/unsupported.rs +++ b/third_party/rust/pin-project/tests/ui/cfg/unsupported.rs @@ -1,10 +1,8 @@ use pin_project::pin_project; -//~ ERROR may not be used on structs with zero fields -// span is lost. -// Refs: https://github.com/rust-lang/rust/issues/43081 #[pin_project] -struct Struct { +struct S { + //~^ ERROR may not be used on structs with zero fields #[cfg(any())] #[pin] f: u8, diff --git a/third_party/rust/pin-project/tests/ui/cfg/unsupported.stderr b/third_party/rust/pin-project/tests/ui/cfg/unsupported.stderr index 127df5706377..e1c871c01144 100644 --- a/third_party/rust/pin-project/tests/ui/cfg/unsupported.stderr +++ b/third_party/rust/pin-project/tests/ui/cfg/unsupported.stderr @@ -1,10 +1,11 @@ error: #[pin_project] attribute may not be used on structs with zero fields - --> tests/ui/cfg/unsupported.rs:7:15 - | -7 | struct Struct { - | _______________^ -8 | | #[cfg(any())] -9 | | #[pin] -10 | | f: u8, -11 | | } - | |_^ + --> tests/ui/cfg/unsupported.rs:4:10 + | +4 | struct S { + | __________^ +5 | | //~^ ERROR may not be used on structs with zero fields +6 | | #[cfg(any())] +7 | | #[pin] +8 | | f: u8, +9 | | } + | |_^ diff --git a/third_party/rust/pin-project/tests/ui/not_unpin/assert-not-unpin.rs b/third_party/rust/pin-project/tests/ui/not_unpin/assert-not-unpin.rs deleted file mode 100644 index b8f8238e8b98..000000000000 --- a/third_party/rust/pin-project/tests/ui/not_unpin/assert-not-unpin.rs +++ /dev/null @@ -1,40 +0,0 @@ -use pin_project::pin_project; -use std::marker::PhantomPinned; - -struct Inner { - val: T, -} - -#[pin_project(!Unpin)] -struct Foo { - #[pin] - inner: Inner, - other: U, -} - -#[pin_project(!Unpin)] -struct TrivialBounds { - #[pin] - field1: PhantomPinned, -} - -#[pin_project(!Unpin)] -struct Bar<'a, T, U> { - #[pin] - inner: &'a mut Inner, - other: U, -} - -fn is_unpin() {} - -fn main() { - is_unpin::>(); //~ ERROR E0277 - is_unpin::>(); //~ ERROR E0277 - is_unpin::>(); //~ ERROR E0277 - is_unpin::>(); //~ ERROR E0277 - - is_unpin::(); //~ ERROR E0277 - - is_unpin::>(); //~ ERROR E0277 - is_unpin::>(); //~ ERROR E0277 -} diff --git a/third_party/rust/pin-project/tests/ui/not_unpin/assert-not-unpin.stderr b/third_party/rust/pin-project/tests/ui/not_unpin/assert-not-unpin.stderr deleted file mode 100644 index be5b0e67b02c..000000000000 --- a/third_party/rust/pin-project/tests/ui/not_unpin/assert-not-unpin.stderr +++ /dev/null @@ -1,146 +0,0 @@ -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/not_unpin/assert-not-unpin.rs:31:5 - | -31 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^ within `Wrapper<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` - = note: required because it appears within the type `Wrapper<'_, PhantomPinned>` -note: required because of the requirements on the impl of `Unpin` for `Foo<(), ()>` - --> tests/ui/not_unpin/assert-not-unpin.rs:8:15 - | -8 | #[pin_project(!Unpin)] - | ^^^^^^ -9 | struct Foo { - | ^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/not_unpin/assert-not-unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/not_unpin/assert-not-unpin.rs:32:5 - | -32 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `Wrapper<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` - = note: required because it appears within the type `Wrapper<'_, PhantomPinned>` -note: required because of the requirements on the impl of `Unpin` for `Foo` - --> tests/ui/not_unpin/assert-not-unpin.rs:8:15 - | -8 | #[pin_project(!Unpin)] - | ^^^^^^ -9 | struct Foo { - | ^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/not_unpin/assert-not-unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/not_unpin/assert-not-unpin.rs:33:5 - | -33 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `Wrapper<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` - = note: required because it appears within the type `Wrapper<'_, PhantomPinned>` -note: required because of the requirements on the impl of `Unpin` for `Foo<(), PhantomPinned>` - --> tests/ui/not_unpin/assert-not-unpin.rs:8:15 - | -8 | #[pin_project(!Unpin)] - | ^^^^^^ -9 | struct Foo { - | ^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/not_unpin/assert-not-unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/not_unpin/assert-not-unpin.rs:34:5 - | -34 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `Wrapper<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` - = note: required because it appears within the type `Wrapper<'_, PhantomPinned>` -note: required because of the requirements on the impl of `Unpin` for `Foo` - --> tests/ui/not_unpin/assert-not-unpin.rs:8:15 - | -8 | #[pin_project(!Unpin)] - | ^^^^^^ -9 | struct Foo { - | ^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/not_unpin/assert-not-unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/not_unpin/assert-not-unpin.rs:36:5 - | -36 | is_unpin::(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^ within `Wrapper<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` - = note: required because it appears within the type `Wrapper<'_, PhantomPinned>` -note: required because of the requirements on the impl of `Unpin` for `TrivialBounds` - --> tests/ui/not_unpin/assert-not-unpin.rs:15:15 - | -15 | #[pin_project(!Unpin)] - | ^^^^^^ -16 | struct TrivialBounds { - | ^^^^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/not_unpin/assert-not-unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/not_unpin/assert-not-unpin.rs:38:5 - | -38 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `Wrapper<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` - = note: required because it appears within the type `Wrapper<'_, PhantomPinned>` -note: required because of the requirements on the impl of `Unpin` for `Bar<'_, (), ()>` - --> tests/ui/not_unpin/assert-not-unpin.rs:21:15 - | -21 | #[pin_project(!Unpin)] - | ^^^^^^ -22 | struct Bar<'a, T, U> { - | ^^^^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/not_unpin/assert-not-unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/not_unpin/assert-not-unpin.rs:39:5 - | -39 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `Wrapper<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` - = note: required because it appears within the type `Wrapper<'_, PhantomPinned>` -note: required because of the requirements on the impl of `Unpin` for `Bar<'_, PhantomPinned, PhantomPinned>` - --> tests/ui/not_unpin/assert-not-unpin.rs:21:15 - | -21 | #[pin_project(!Unpin)] - | ^^^^^^ -22 | struct Bar<'a, T, U> { - | ^^^^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/not_unpin/assert-not-unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` diff --git a/third_party/rust/pin-project/tests/ui/not_unpin/conflict-unpin.rs b/third_party/rust/pin-project/tests/ui/not_unpin/conflict-unpin.rs index f259f6c01471..8985f371035f 100644 --- a/third_party/rust/pin-project/tests/ui/not_unpin/conflict-unpin.rs +++ b/third_party/rust/pin-project/tests/ui/not_unpin/conflict-unpin.rs @@ -3,8 +3,8 @@ use pin_project::pin_project; #[pin_project(!Unpin)] //~ ERROR E0119 struct Foo { #[pin] - future: T, - field: U, + f1: T, + f2: U, } impl Unpin for Foo where T: Unpin {} @@ -12,8 +12,8 @@ impl Unpin for Foo where T: Unpin {} #[pin_project(!Unpin)] //~ ERROR E0119 struct Bar { #[pin] - future: T, - field: U, + f1: T, + f2: U, } impl Unpin for Bar {} @@ -21,8 +21,8 @@ impl Unpin for Bar {} #[pin_project(!Unpin)] //~ ERROR E0119 struct Baz { #[pin] - future: T, - field: U, + f1: T, + f2: U, } impl Unpin for Baz {} diff --git a/third_party/rust/pin-project/tests/ui/not_unpin/impl-unsafe-unpin.rs b/third_party/rust/pin-project/tests/ui/not_unpin/impl-unsafe-unpin.rs index 625dc29075d9..2c078c71ff8f 100644 --- a/third_party/rust/pin-project/tests/ui/not_unpin/impl-unsafe-unpin.rs +++ b/third_party/rust/pin-project/tests/ui/not_unpin/impl-unsafe-unpin.rs @@ -3,8 +3,8 @@ use pin_project::{pin_project, UnsafeUnpin}; #[pin_project(!Unpin)] //~ ERROR E0119 struct Foo { #[pin] - future: T, - field: U, + f1: T, + f2: U, } unsafe impl UnsafeUnpin for Foo where T: Unpin {} @@ -12,8 +12,8 @@ unsafe impl UnsafeUnpin for Foo where T: Unpin {} #[pin_project(!Unpin)] //~ ERROR E0119 struct Bar { #[pin] - future: T, - field: U, + f1: T, + f2: U, } unsafe impl UnsafeUnpin for Bar {} @@ -21,8 +21,8 @@ unsafe impl UnsafeUnpin for Bar {} #[pin_project(!Unpin)] //~ ERROR E0119 struct Baz { #[pin] - future: T, - field: U, + f1: T, + f2: U, } unsafe impl UnsafeUnpin for Baz {} diff --git a/third_party/rust/pin-project/tests/ui/not_unpin/impl-unsafe-unpin.stderr b/third_party/rust/pin-project/tests/ui/not_unpin/impl-unsafe-unpin.stderr index efc519847c13..a944e95970ca 100644 --- a/third_party/rust/pin-project/tests/ui/not_unpin/impl-unsafe-unpin.stderr +++ b/third_party/rust/pin-project/tests/ui/not_unpin/impl-unsafe-unpin.stderr @@ -1,4 +1,4 @@ -error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` for type `Foo<_, _>` +error[E0119]: conflicting implementations of trait `_::_pin_project::UnsafeUnpin` for type `Foo<_, _>` --> tests/ui/not_unpin/impl-unsafe-unpin.rs:3:1 | 3 | #[pin_project(!Unpin)] //~ ERROR E0119 @@ -9,7 +9,7 @@ error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` fo | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` for type `Bar<_, _>` +error[E0119]: conflicting implementations of trait `_::_pin_project::UnsafeUnpin` for type `Bar<_, _>` --> tests/ui/not_unpin/impl-unsafe-unpin.rs:12:1 | 12 | #[pin_project(!Unpin)] //~ ERROR E0119 @@ -20,7 +20,7 @@ error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` fo | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` for type `Baz<_, _>` +error[E0119]: conflicting implementations of trait `_::_pin_project::UnsafeUnpin` for type `Baz<_, _>` --> tests/ui/not_unpin/impl-unsafe-unpin.rs:21:1 | 21 | #[pin_project(!Unpin)] //~ ERROR E0119 diff --git a/third_party/rust/pin-project/tests/ui/pin_project/add-attr-to-struct.rs b/third_party/rust/pin-project/tests/ui/pin_project/add-attr-to-struct.rs index aea2cbff23c2..045e79664d53 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/add-attr-to-struct.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/add-attr-to-struct.rs @@ -1,19 +1,20 @@ +use std::marker::PhantomPinned; + use auxiliary_macro::add_pin_attr; use pin_project::pin_project; -use std::marker::PhantomPinned; #[pin_project] #[add_pin_attr(struct)] //~ ERROR duplicate #[pin] attribute struct Foo { #[pin] - field: PhantomPinned, + f: PhantomPinned, } #[add_pin_attr(struct)] //~ ERROR #[pin] attribute may only be used on fields of structs or variants #[pin_project] struct Bar { #[pin] - field: PhantomPinned, + f: PhantomPinned, } fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/pin_project/add-attr-to-struct.stderr b/third_party/rust/pin-project/tests/ui/pin_project/add-attr-to-struct.stderr index c148410243f6..6fb88e80593c 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/add-attr-to-struct.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/add-attr-to-struct.stderr @@ -1,15 +1,15 @@ error: duplicate #[pin] attribute - --> tests/ui/pin_project/add-attr-to-struct.rs:6:1 + --> tests/ui/pin_project/add-attr-to-struct.rs:7:1 | -6 | #[add_pin_attr(struct)] //~ ERROR duplicate #[pin] attribute +7 | #[add_pin_attr(struct)] //~ ERROR duplicate #[pin] attribute | ^^^^^^^^^^^^^^^^^^^^^^^ | = note: this error originates in the attribute macro `add_pin_attr` (in Nightly builds, run with -Z macro-backtrace for more info) error: #[pin] attribute may only be used on fields of structs or variants - --> tests/ui/pin_project/add-attr-to-struct.rs:12:1 + --> tests/ui/pin_project/add-attr-to-struct.rs:13:1 | -12 | #[add_pin_attr(struct)] //~ ERROR #[pin] attribute may only be used on fields of structs or variants +13 | #[add_pin_attr(struct)] //~ ERROR #[pin] attribute may only be used on fields of structs or variants | ^^^^^^^^^^^^^^^^^^^^^^^ | = note: this error originates in the attribute macro `add_pin_attr` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/pin_project/add-pinned-field.rs b/third_party/rust/pin-project/tests/ui/pin_project/add-pinned-field.rs index c4e1e3ba3cd7..c415f9c90491 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/add-pinned-field.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/add-pinned-field.rs @@ -7,14 +7,14 @@ fn is_unpin() {} #[add_pinned_field] struct Foo { #[pin] - field: u32, + f: u32, } #[add_pinned_field] #[pin_project] struct Bar { #[pin] - field: u32, + f: u32, } fn main() { diff --git a/third_party/rust/pin-project/tests/ui/pin_project/conflict-drop.rs b/third_party/rust/pin-project/tests/ui/pin_project/conflict-drop.rs index c9651848a420..4fdb118d06ff 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/conflict-drop.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/conflict-drop.rs @@ -1,11 +1,12 @@ -use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; +use pin_project::{pin_project, pinned_drop}; + #[pin_project] //~ ERROR E0119 struct Foo { #[pin] - future: T, - field: U, + f1: T, + f2: U, } impl Drop for Foo { @@ -15,8 +16,8 @@ impl Drop for Foo { #[pin_project(PinnedDrop)] //~ ERROR E0119 struct Bar { #[pin] - future: T, - field: U, + f1: T, + f2: U, } #[pinned_drop] diff --git a/third_party/rust/pin-project/tests/ui/pin_project/conflict-drop.stderr b/third_party/rust/pin-project/tests/ui/pin_project/conflict-drop.stderr index 6f7c3f1797ae..4ae628da75da 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/conflict-drop.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/conflict-drop.stderr @@ -1,7 +1,7 @@ error[E0119]: conflicting implementations of trait `_::FooMustNotImplDrop` for type `Foo<_, _>` - --> tests/ui/pin_project/conflict-drop.rs:4:1 + --> tests/ui/pin_project/conflict-drop.rs:5:1 | -4 | #[pin_project] //~ ERROR E0119 +5 | #[pin_project] //~ ERROR E0119 | ^^^^^^^^^^^^^^ | | | first implementation here @@ -10,10 +10,10 @@ error[E0119]: conflicting implementations of trait `_::FooMustNotImplDrop` for t = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0119]: conflicting implementations of trait `std::ops::Drop` for type `Bar<_, _>` - --> tests/ui/pin_project/conflict-drop.rs:15:15 + --> tests/ui/pin_project/conflict-drop.rs:16:15 | -15 | #[pin_project(PinnedDrop)] //~ ERROR E0119 +16 | #[pin_project(PinnedDrop)] //~ ERROR E0119 | ^^^^^^^^^^ conflicting implementation for `Bar<_, _>` ... -27 | impl Drop for Bar { +28 | impl Drop for Bar { | ----------------------------- first implementation here diff --git a/third_party/rust/pin-project/tests/ui/pin_project/conflict-unpin.rs b/third_party/rust/pin-project/tests/ui/pin_project/conflict-unpin.rs index 0c48d27907c7..f58c45e09fa9 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/conflict-unpin.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/conflict-unpin.rs @@ -5,8 +5,8 @@ use pin_project::pin_project; #[pin_project] //~ ERROR E0119 struct Foo { #[pin] - future: T, - field: U, + f1: T, + f2: U, } // conflicting implementations @@ -17,8 +17,8 @@ impl Unpin for Foo where T: Unpin {} // Conditional Unpin impl #[pin_project] //~ ERROR E0119 struct Bar { #[pin] - future: T, - field: U, + f1: T, + f2: U, } // conflicting implementations @@ -27,8 +27,8 @@ impl Unpin for Bar {} // Non-conditional Unpin impl #[pin_project] //~ ERROR E0119 struct Baz { #[pin] - future: T, - field: U, + f1: T, + f2: U, } // conflicting implementations diff --git a/third_party/rust/pin-project/tests/ui/pin_project/impl-unsafe-unpin.rs b/third_party/rust/pin-project/tests/ui/pin_project/impl-unsafe-unpin.rs index 94af322a3f95..562c9b64f675 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/impl-unsafe-unpin.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/impl-unsafe-unpin.rs @@ -3,8 +3,8 @@ use pin_project::{pin_project, UnsafeUnpin}; #[pin_project] //~ ERROR E0119 struct Foo { #[pin] - future: T, - field: U, + f1: T, + f2: U, } unsafe impl UnsafeUnpin for Foo where T: Unpin {} @@ -12,8 +12,8 @@ unsafe impl UnsafeUnpin for Foo where T: Unpin {} #[pin_project] //~ ERROR E0119 struct Bar { #[pin] - future: T, - field: U, + f1: T, + f2: U, } unsafe impl UnsafeUnpin for Bar {} @@ -21,8 +21,8 @@ unsafe impl UnsafeUnpin for Bar {} #[pin_project] //~ ERROR E0119 struct Baz { #[pin] - future: T, - field: U, + f1: T, + f2: U, } unsafe impl UnsafeUnpin for Baz {} diff --git a/third_party/rust/pin-project/tests/ui/pin_project/impl-unsafe-unpin.stderr b/third_party/rust/pin-project/tests/ui/pin_project/impl-unsafe-unpin.stderr index 5680daafaa28..ba4230383c47 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/impl-unsafe-unpin.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/impl-unsafe-unpin.stderr @@ -1,4 +1,4 @@ -error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` for type `Foo<_, _>` +error[E0119]: conflicting implementations of trait `_::_pin_project::UnsafeUnpin` for type `Foo<_, _>` --> tests/ui/pin_project/impl-unsafe-unpin.rs:3:1 | 3 | #[pin_project] //~ ERROR E0119 @@ -9,7 +9,7 @@ error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` fo | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` for type `Bar<_, _>` +error[E0119]: conflicting implementations of trait `_::_pin_project::UnsafeUnpin` for type `Bar<_, _>` --> tests/ui/pin_project/impl-unsafe-unpin.rs:12:1 | 12 | #[pin_project] //~ ERROR E0119 @@ -20,7 +20,7 @@ error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` fo | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0119]: conflicting implementations of trait `pin_project::UnsafeUnpin` for type `Baz<_, _>` +error[E0119]: conflicting implementations of trait `_::_pin_project::UnsafeUnpin` for type `Baz<_, _>` --> tests/ui/pin_project/impl-unsafe-unpin.rs:21:1 | 21 | #[pin_project] //~ ERROR E0119 diff --git a/third_party/rust/pin-project/tests/ui/pin_project/import_unnamed.rs b/third_party/rust/pin-project/tests/ui/pin_project/import_unnamed.rs new file mode 100644 index 000000000000..7926e61a6356 --- /dev/null +++ b/third_party/rust/pin-project/tests/ui/pin_project/import_unnamed.rs @@ -0,0 +1,30 @@ +/// Only named projected types can be imported. +/// See visibility.rs for named projected types. + +mod pub_ { + use pin_project::pin_project; + + #[pin_project] + pub struct Default(()); + + #[pin_project(project_replace)] + pub struct Replace(()); +} +#[allow(unused_imports)] +pub mod use_ { + #[rustfmt::skip] + use crate::pub_::__DefaultProjection; //~ ERROR E0432 + #[rustfmt::skip] + use crate::pub_::__DefaultProjectionRef; //~ ERROR E0432 + #[rustfmt::skip] + use crate::pub_::__ReplaceProjection; //~ ERROR E0432 + #[rustfmt::skip] + use crate::pub_::__ReplaceProjectionOwned; //~ ERROR E0432 + #[rustfmt::skip] + use crate::pub_::__ReplaceProjectionRef; //~ ERROR E0432 + + // Confirm that the visibility of the original type is not changed. + pub use crate::pub_::{Default, Replace}; +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/pin_project/import_unnamed.stderr b/third_party/rust/pin-project/tests/ui/pin_project/import_unnamed.stderr new file mode 100644 index 000000000000..260a35a5c023 --- /dev/null +++ b/third_party/rust/pin-project/tests/ui/pin_project/import_unnamed.stderr @@ -0,0 +1,29 @@ +error[E0432]: unresolved import `crate::pub_::__DefaultProjection` + --> tests/ui/pin_project/import_unnamed.rs:16:9 + | +16 | use crate::pub_::__DefaultProjection; //~ ERROR E0432 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ no `__DefaultProjection` in `pub_` + +error[E0432]: unresolved import `crate::pub_::__DefaultProjectionRef` + --> tests/ui/pin_project/import_unnamed.rs:18:9 + | +18 | use crate::pub_::__DefaultProjectionRef; //~ ERROR E0432 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ no `__DefaultProjectionRef` in `pub_` + +error[E0432]: unresolved import `crate::pub_::__ReplaceProjection` + --> tests/ui/pin_project/import_unnamed.rs:20:9 + | +20 | use crate::pub_::__ReplaceProjection; //~ ERROR E0432 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ no `__ReplaceProjection` in `pub_` + +error[E0432]: unresolved import `crate::pub_::__ReplaceProjectionOwned` + --> tests/ui/pin_project/import_unnamed.rs:22:9 + | +22 | use crate::pub_::__ReplaceProjectionOwned; //~ ERROR E0432 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ no `__ReplaceProjectionOwned` in `pub_` + +error[E0432]: unresolved import `crate::pub_::__ReplaceProjectionRef` + --> tests/ui/pin_project/import_unnamed.rs:24:9 + | +24 | use crate::pub_::__ReplaceProjectionRef; //~ ERROR E0432 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ no `__ReplaceProjectionRef` in `pub_` diff --git a/third_party/rust/pin-project/tests/ui/pin_project/invalid.rs b/third_party/rust/pin-project/tests/ui/pin_project/invalid.rs index 9c51bec83a69..d39a1fd4c94f 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/invalid.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/invalid.rs @@ -4,7 +4,7 @@ mod pin_argument { #[pin_project] struct Struct { #[pin()] //~ ERROR unexpected token - field: (), + f: (), } #[pin_project] @@ -19,7 +19,7 @@ mod pin_argument { enum EnumStruct { V { #[pin(foo)] //~ ERROR unexpected token - field: (), + f: (), }, } } @@ -31,7 +31,7 @@ mod pin_attribute { struct DuplicateStruct { #[pin] #[pin] //~ ERROR duplicate #[pin] attribute - field: (), + f: (), } #[pin_project] @@ -57,7 +57,7 @@ mod pin_attribute { V { #[pin] #[pin] //~ ERROR duplicate #[pin] attribute - field: (), + f: (), }, } } @@ -69,7 +69,7 @@ mod pin_item { #[pin] //~ ERROR may only be used on fields of structs or variants struct Struct { #[pin] - field: (), + f: (), } #[pin_project] @@ -88,6 +88,9 @@ mod pin_item { mod pin_project_argument { use pin_project::pin_project; + #[pin_project(Replace)] //~ ERROR `Replace` argument was removed, use `project_replace` argument instead + struct RemovedReplace(#[pin] ()); + #[pin_project(UnsafeUnpin,,)] //~ ERROR expected identifier struct Unexpected1(#[pin] ()); @@ -106,9 +109,6 @@ mod pin_project_argument { #[pin_project(PinnedDrop, PinnedDrop)] //~ ERROR duplicate `PinnedDrop` argument struct DuplicatePinnedDrop(#[pin] ()); - #[pin_project(Replace, Replace)] //~ ERROR duplicate `Replace` argument - struct DuplicateReplace(#[pin] ()); - #[pin_project(UnsafeUnpin, UnsafeUnpin)] //~ ERROR duplicate `UnsafeUnpin` argument struct DuplicateUnsafeUnpin(#[pin] ()); @@ -142,24 +142,12 @@ mod pin_project_argument { #[pin_project(project_replace = A)] // Ok struct ProjectReplaceWithoutReplace(#[pin] ()); - #[pin_project(PinnedDrop, Replace)] //~ ERROR arguments `PinnedDrop` and `Replace` are mutually exclusive - struct PinnedDropWithReplace1(#[pin] ()); - - #[pin_project(Replace, UnsafeUnpin, PinnedDrop)] //~ ERROR arguments `PinnedDrop` and `Replace` are mutually exclusive - struct PinnedDropWithReplace2(#[pin] ()); - #[pin_project(PinnedDrop, project_replace)] //~ ERROR arguments `PinnedDrop` and `project_replace` are mutually exclusive struct PinnedDropWithProjectReplace1(#[pin] ()); #[pin_project(project_replace, UnsafeUnpin, PinnedDrop)] //~ ERROR arguments `PinnedDrop` and `project_replace` are mutually exclusive struct PinnedDropWithProjectReplace2(#[pin] ()); - #[pin_project(project_replace, Replace)] // Ok - struct ProjectReplaceWithReplace1(#[pin] ()); - - #[pin_project(project_replace = B, Replace)] // Ok - struct ProjectReplaceWithReplace2(#[pin] ()); - #[pin_project(UnsafeUnpin, !Unpin)] //~ ERROR arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive struct UnsafeUnpinWithNotUnpin1(#[pin] ()); @@ -198,11 +186,25 @@ mod pin_project_argument { #[pin_project(project_replace = !)] //~ ERROR expected identifier struct ProjectReplace3(#[pin] ()); + + #[pin_project(project_replace)] //~ ERROR `project_replace` argument requires a value when used on enums + enum ProjectReplaceEnum { + V(#[pin] ()), + } } mod pin_project_conflict_naming { use pin_project::pin_project; + #[pin_project(project = OrigAndProj)] //~ ERROR name `OrigAndProj` is the same as the original type name + struct OrigAndProj(#[pin] ()); + + #[pin_project(project_ref = OrigAndProjRef)] //~ ERROR name `OrigAndProjRef` is the same as the original type name + struct OrigAndProjRef(#[pin] ()); + + #[pin_project(project_replace = OrigAndProjOwn)] //~ ERROR name `OrigAndProjOwn` is the same as the original type name + struct OrigAndProjOwn(#[pin] ()); + #[pin_project(project = A, project_ref = A)] //~ ERROR name `A` is already specified by `project` argument struct ProjAndProjRef(#[pin] ()); @@ -253,6 +255,9 @@ mod pin_project_item { //~^ ERROR may only be used on structs or enums f: (), } + + #[pin_project] + impl Impl {} //~ ERROR may only be used on structs or enums } // #[repr(packed)] is always detected first, even on unsupported structs. diff --git a/third_party/rust/pin-project/tests/ui/pin_project/invalid.stderr b/third_party/rust/pin-project/tests/ui/pin_project/invalid.stderr index b4406652256a..c43d363de8f9 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/invalid.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/invalid.stderr @@ -1,22 +1,22 @@ -error: unexpected token: () +error: unexpected token: `()` --> tests/ui/pin_project/invalid.rs:6:14 | 6 | #[pin()] //~ ERROR unexpected token | ^^ -error: unexpected token: (foo) +error: unexpected token: `(foo)` --> tests/ui/pin_project/invalid.rs:11:29 | 11 | struct TupleStruct(#[pin(foo)] ()); //~ ERROR unexpected token | ^^^^^ -error: unexpected token: (foo) +error: unexpected token: `(foo)` --> tests/ui/pin_project/invalid.rs:15:16 | 15 | V(#[pin(foo)] ()), //~ ERROR unexpected token | ^^^^^ -error: unexpected token: (foo) +error: unexpected token: `(foo)` --> tests/ui/pin_project/invalid.rs:21:18 | 21 | #[pin(foo)] //~ ERROR unexpected token @@ -64,42 +64,42 @@ error: #[pin] attribute may only be used on fields of structs or variants 82 | #[pin] //~ ERROR may only be used on fields of structs or variants | ^^^^^^ -error: expected identifier - --> tests/ui/pin_project/invalid.rs:91:31 +error: `Replace` argument was removed, use `project_replace` argument instead + --> tests/ui/pin_project/invalid.rs:91:19 | -91 | #[pin_project(UnsafeUnpin,,)] //~ ERROR expected identifier +91 | #[pin_project(Replace)] //~ ERROR `Replace` argument was removed, use `project_replace` argument instead + | ^^^^^^^ + +error: expected identifier + --> tests/ui/pin_project/invalid.rs:94:31 + | +94 | #[pin_project(UnsafeUnpin,,)] //~ ERROR expected identifier | ^ error: unexpected argument: Foo - --> tests/ui/pin_project/invalid.rs:94:19 + --> tests/ui/pin_project/invalid.rs:97:19 | -94 | #[pin_project(Foo)] //~ ERROR unexpected argument +97 | #[pin_project(Foo)] //~ ERROR unexpected argument | ^^^ error: expected identifier - --> tests/ui/pin_project/invalid.rs:97:19 - | -97 | #[pin_project(,UnsafeUnpin)] //~ ERROR expected identifier - | ^ + --> tests/ui/pin_project/invalid.rs:100:19 + | +100 | #[pin_project(,UnsafeUnpin)] //~ ERROR expected identifier + | ^ error: expected `,` - --> tests/ui/pin_project/invalid.rs:103:30 + --> tests/ui/pin_project/invalid.rs:106:30 | -103 | #[pin_project(PinnedDrop PinnedDrop)] //~ ERROR expected `,` +106 | #[pin_project(PinnedDrop PinnedDrop)] //~ ERROR expected `,` | ^^^^^^^^^^ error: duplicate `PinnedDrop` argument - --> tests/ui/pin_project/invalid.rs:106:31 + --> tests/ui/pin_project/invalid.rs:109:31 | -106 | #[pin_project(PinnedDrop, PinnedDrop)] //~ ERROR duplicate `PinnedDrop` argument +109 | #[pin_project(PinnedDrop, PinnedDrop)] //~ ERROR duplicate `PinnedDrop` argument | ^^^^^^^^^^ -error: duplicate `Replace` argument - --> tests/ui/pin_project/invalid.rs:109:28 - | -109 | #[pin_project(Replace, Replace)] //~ ERROR duplicate `Replace` argument - | ^^^^^^^ - error: duplicate `UnsafeUnpin` argument --> tests/ui/pin_project/invalid.rs:112:32 | @@ -160,187 +160,205 @@ error: duplicate `project_replace` argument 139 | #[pin_project(project_replace = A, project_replace)] //~ ERROR duplicate `project_replace` argument | ^^^^^^^^^^^^^^^ -error: arguments `PinnedDrop` and `Replace` are mutually exclusive +error: arguments `PinnedDrop` and `project_replace` are mutually exclusive --> tests/ui/pin_project/invalid.rs:145:19 | -145 | #[pin_project(PinnedDrop, Replace)] //~ ERROR arguments `PinnedDrop` and `Replace` are mutually exclusive - | ^^^^^^^^^^ - -error: arguments `PinnedDrop` and `Replace` are mutually exclusive - --> tests/ui/pin_project/invalid.rs:148:41 - | -148 | #[pin_project(Replace, UnsafeUnpin, PinnedDrop)] //~ ERROR arguments `PinnedDrop` and `Replace` are mutually exclusive - | ^^^^^^^^^^ - -error: arguments `PinnedDrop` and `project_replace` are mutually exclusive - --> tests/ui/pin_project/invalid.rs:151:19 - | -151 | #[pin_project(PinnedDrop, project_replace)] //~ ERROR arguments `PinnedDrop` and `project_replace` are mutually exclusive +145 | #[pin_project(PinnedDrop, project_replace)] //~ ERROR arguments `PinnedDrop` and `project_replace` are mutually exclusive | ^^^^^^^^^^ error: arguments `PinnedDrop` and `project_replace` are mutually exclusive - --> tests/ui/pin_project/invalid.rs:154:49 + --> tests/ui/pin_project/invalid.rs:148:49 | -154 | #[pin_project(project_replace, UnsafeUnpin, PinnedDrop)] //~ ERROR arguments `PinnedDrop` and `project_replace` are mutually exclusive +148 | #[pin_project(project_replace, UnsafeUnpin, PinnedDrop)] //~ ERROR arguments `PinnedDrop` and `project_replace` are mutually exclusive | ^^^^^^^^^^ error: arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive - --> tests/ui/pin_project/invalid.rs:163:19 + --> tests/ui/pin_project/invalid.rs:151:19 | -163 | #[pin_project(UnsafeUnpin, !Unpin)] //~ ERROR arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive +151 | #[pin_project(UnsafeUnpin, !Unpin)] //~ ERROR arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive | ^^^^^^^^^^^ error: arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive - --> tests/ui/pin_project/invalid.rs:166:39 + --> tests/ui/pin_project/invalid.rs:154:39 | -166 | #[pin_project(!Unpin, PinnedDrop, UnsafeUnpin)] //~ ERROR arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive +154 | #[pin_project(!Unpin, PinnedDrop, UnsafeUnpin)] //~ ERROR arguments `UnsafeUnpin` and `!Unpin` are mutually exclusive | ^^^^^^^^^^^ error: expected `!Unpin`, found `!` - --> tests/ui/pin_project/invalid.rs:169:19 + --> tests/ui/pin_project/invalid.rs:157:19 | -169 | #[pin_project(!)] //~ ERROR expected `!Unpin`, found `!` +157 | #[pin_project(!)] //~ ERROR expected `!Unpin`, found `!` | ^ error: unexpected argument: Unpin - --> tests/ui/pin_project/invalid.rs:172:19 + --> tests/ui/pin_project/invalid.rs:160:19 | -172 | #[pin_project(Unpin)] //~ ERROR unexpected argument +160 | #[pin_project(Unpin)] //~ ERROR unexpected argument | ^^^^^ error: expected `project = `, found `project` - --> tests/ui/pin_project/invalid.rs:175:19 + --> tests/ui/pin_project/invalid.rs:163:19 | -175 | #[pin_project(project)] //~ ERROR expected `project = `, found `project` +163 | #[pin_project(project)] //~ ERROR expected `project = `, found `project` | ^^^^^^^ error: expected `project = `, found `project =` - --> tests/ui/pin_project/invalid.rs:178:19 + --> tests/ui/pin_project/invalid.rs:166:19 | -178 | #[pin_project(project = )] //~ ERROR expected `project = `, found `project =` +166 | #[pin_project(project = )] //~ ERROR expected `project = `, found `project =` | ^^^^^^^^^ error: expected identifier - --> tests/ui/pin_project/invalid.rs:181:29 + --> tests/ui/pin_project/invalid.rs:169:29 | -181 | #[pin_project(project = !)] //~ ERROR expected identifier +169 | #[pin_project(project = !)] //~ ERROR expected identifier | ^ error: expected `project_ref = `, found `project_ref` - --> tests/ui/pin_project/invalid.rs:184:19 + --> tests/ui/pin_project/invalid.rs:172:19 | -184 | #[pin_project(project_ref)] //~ ERROR expected `project_ref = `, found `project_ref` +172 | #[pin_project(project_ref)] //~ ERROR expected `project_ref = `, found `project_ref` | ^^^^^^^^^^^ error: expected `project_ref = `, found `project_ref =` - --> tests/ui/pin_project/invalid.rs:187:19 + --> tests/ui/pin_project/invalid.rs:175:19 | -187 | #[pin_project(project_ref = )] //~ ERROR expected `project_ref = `, found `project_ref =` +175 | #[pin_project(project_ref = )] //~ ERROR expected `project_ref = `, found `project_ref =` | ^^^^^^^^^^^^^ error: expected identifier - --> tests/ui/pin_project/invalid.rs:190:33 + --> tests/ui/pin_project/invalid.rs:178:33 | -190 | #[pin_project(project_ref = !)] //~ ERROR expected identifier +178 | #[pin_project(project_ref = !)] //~ ERROR expected identifier | ^ error: expected `project_replace = `, found `project_replace =` - --> tests/ui/pin_project/invalid.rs:196:19 + --> tests/ui/pin_project/invalid.rs:184:19 | -196 | #[pin_project(project_replace = )] //~ ERROR expected `project_replace = `, found `project_replace =` +184 | #[pin_project(project_replace = )] //~ ERROR expected `project_replace = `, found `project_replace =` | ^^^^^^^^^^^^^^^^^ error: expected identifier - --> tests/ui/pin_project/invalid.rs:199:37 + --> tests/ui/pin_project/invalid.rs:187:37 | -199 | #[pin_project(project_replace = !)] //~ ERROR expected identifier +187 | #[pin_project(project_replace = !)] //~ ERROR expected identifier | ^ -error: name `A` is already specified by `project` argument - --> tests/ui/pin_project/invalid.rs:206:46 +error: `project_replace` argument requires a value when used on enums + --> tests/ui/pin_project/invalid.rs:190:19 | -206 | #[pin_project(project = A, project_ref = A)] //~ ERROR name `A` is already specified by `project` argument +190 | #[pin_project(project_replace)] //~ ERROR `project_replace` argument requires a value when used on enums + | ^^^^^^^^^^^^^^^ + +error: name `OrigAndProj` is the same as the original type name + --> tests/ui/pin_project/invalid.rs:199:29 + | +199 | #[pin_project(project = OrigAndProj)] //~ ERROR name `OrigAndProj` is the same as the original type name + | ^^^^^^^^^^^ + +error: name `OrigAndProjRef` is the same as the original type name + --> tests/ui/pin_project/invalid.rs:202:33 + | +202 | #[pin_project(project_ref = OrigAndProjRef)] //~ ERROR name `OrigAndProjRef` is the same as the original type name + | ^^^^^^^^^^^^^^ + +error: name `OrigAndProjOwn` is the same as the original type name + --> tests/ui/pin_project/invalid.rs:205:37 + | +205 | #[pin_project(project_replace = OrigAndProjOwn)] //~ ERROR name `OrigAndProjOwn` is the same as the original type name + | ^^^^^^^^^^^^^^ + +error: name `A` is already specified by `project` argument + --> tests/ui/pin_project/invalid.rs:208:46 + | +208 | #[pin_project(project = A, project_ref = A)] //~ ERROR name `A` is already specified by `project` argument | ^ error: name `A` is already specified by `project` argument - --> tests/ui/pin_project/invalid.rs:209:50 + --> tests/ui/pin_project/invalid.rs:211:50 | -209 | #[pin_project(project = A, project_replace = A)] //~ ERROR name `A` is already specified by `project` argument +211 | #[pin_project(project = A, project_replace = A)] //~ ERROR name `A` is already specified by `project` argument | ^ error: name `A` is already specified by `project_ref` argument - --> tests/ui/pin_project/invalid.rs:212:54 + --> tests/ui/pin_project/invalid.rs:214:54 | -212 | #[pin_project(project_ref = A, project_replace = A)] //~ ERROR name `A` is already specified by `project_ref` argument +214 | #[pin_project(project_ref = A, project_replace = A)] //~ ERROR name `A` is already specified by `project_ref` argument | ^ error: duplicate #[pin_project] attribute - --> tests/ui/pin_project/invalid.rs:220:5 + --> tests/ui/pin_project/invalid.rs:222:5 | -220 | #[pin_project] //~ ERROR duplicate #[pin_project] attribute +222 | #[pin_project] //~ ERROR duplicate #[pin_project] attribute | ^^^^^^^^^^^^^^ error: #[pin_project] attribute may not be used on structs with zero fields - --> tests/ui/pin_project/invalid.rs:228:19 + --> tests/ui/pin_project/invalid.rs:230:19 | -228 | struct Struct {} //~ ERROR may not be used on structs with zero fields +230 | struct Struct {} //~ ERROR may not be used on structs with zero fields | ^^ error: #[pin_project] attribute may not be used on structs with zero fields - --> tests/ui/pin_project/invalid.rs:231:23 + --> tests/ui/pin_project/invalid.rs:233:23 | -231 | struct TupleStruct(); //~ ERROR may not be used on structs with zero fields +233 | struct TupleStruct(); //~ ERROR may not be used on structs with zero fields | ^^ error: #[pin_project] attribute may not be used on structs with zero fields - --> tests/ui/pin_project/invalid.rs:234:12 + --> tests/ui/pin_project/invalid.rs:236:12 | -234 | struct UnitStruct; //~ ERROR may not be used on structs with zero fields +236 | struct UnitStruct; //~ ERROR may not be used on structs with zero fields | ^^^^^^^^^^ error: #[pin_project] attribute may not be used on enums without variants - --> tests/ui/pin_project/invalid.rs:237:20 + --> tests/ui/pin_project/invalid.rs:239:20 | -237 | enum EnumEmpty {} //~ ERROR may not be used on enums without variants +239 | enum EnumEmpty {} //~ ERROR may not be used on enums without variants | ^^ error: #[pin_project] attribute may not be used on enums with discriminants - --> tests/ui/pin_project/invalid.rs:241:13 + --> tests/ui/pin_project/invalid.rs:243:13 | -241 | V = 2, //~ ERROR may not be used on enums with discriminants +243 | V = 2, //~ ERROR may not be used on enums with discriminants | ^ error: #[pin_project] attribute may not be used on enums with zero fields - --> tests/ui/pin_project/invalid.rs:246:9 + --> tests/ui/pin_project/invalid.rs:248:9 | -246 | / Unit, //~ ERROR may not be used on enums with zero fields -247 | | Tuple(), -248 | | Struct {}, +248 | / Unit, //~ ERROR may not be used on enums with zero fields +249 | | Tuple(), +250 | | Struct {}, | |__________________^ error: #[pin_project] attribute may only be used on structs or enums - --> tests/ui/pin_project/invalid.rs:252:5 + --> tests/ui/pin_project/invalid.rs:254:5 | -252 | / union Union { -253 | | //~^ ERROR may only be used on structs or enums -254 | | f: (), -255 | | } +254 | / union Union { +255 | | //~^ ERROR may only be used on structs or enums +256 | | f: (), +257 | | } | |_____^ -error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/pin_project/invalid.rs:263:12 +error: #[pin_project] attribute may only be used on structs or enums + --> tests/ui/pin_project/invalid.rs:260:5 | -263 | #[repr(packed)] +260 | impl Impl {} //~ ERROR may only be used on structs or enums + | ^^^^^^^^^^^^ + +error: #[pin_project] attribute may not be used on #[repr(packed)] types + --> tests/ui/pin_project/invalid.rs:268:12 + | +268 | #[repr(packed)] | ^^^^^^ error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/pin_project/invalid.rs:267:12 + --> tests/ui/pin_project/invalid.rs:272:12 | -267 | #[repr(packed)] +272 | #[repr(packed)] | ^^^^^^ error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/pin_project/invalid.rs:271:12 + --> tests/ui/pin_project/invalid.rs:276:12 | -271 | #[repr(packed)] +276 | #[repr(packed)] | ^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/pin_project/overlapping_unpin_struct.rs b/third_party/rust/pin-project/tests/ui/pin_project/overlapping_unpin_struct.rs index 00fef3cc3b35..abfd5d134424 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/overlapping_unpin_struct.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/overlapping_unpin_struct.rs @@ -1,18 +1,19 @@ -use pin_project::pin_project; use std::marker::PhantomPinned; +use pin_project::pin_project; + #[pin_project] -struct Foo { +struct S { #[pin] - inner: T, + f: T, } -struct __Foo {} +struct __S {} -impl Unpin for __Foo {} +impl Unpin for __S {} fn is_unpin() {} fn main() { - is_unpin::>(); //~ ERROR E0277 + is_unpin::>(); //~ ERROR E0277 } diff --git a/third_party/rust/pin-project/tests/ui/pin_project/overlapping_unpin_struct.stderr b/third_party/rust/pin-project/tests/ui/pin_project/overlapping_unpin_struct.stderr index 63cabfeaef06..1bd200d9c8e4 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/overlapping_unpin_struct.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/overlapping_unpin_struct.stderr @@ -1,25 +1,25 @@ error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/pin_project/overlapping_unpin_struct.rs:17:5 + --> tests/ui/pin_project/overlapping_unpin_struct.rs:18:5 | -17 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `_::__Foo<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` +18 | is_unpin::>(); //~ ERROR E0277 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `_::__S<'_, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` | = note: consider using `Box::pin` -note: required because it appears within the type `_::__Foo<'_, PhantomPinned>` - --> tests/ui/pin_project/overlapping_unpin_struct.rs:5:8 +note: required because it appears within the type `_::__S<'_, PhantomPinned>` + --> tests/ui/pin_project/overlapping_unpin_struct.rs:6:8 | -5 | struct Foo { - | ^^^ -note: required because of the requirements on the impl of `Unpin` for `Foo` - --> tests/ui/pin_project/overlapping_unpin_struct.rs:4:1 +6 | struct S { + | ^ +note: required because of the requirements on the impl of `Unpin` for `S` + --> tests/ui/pin_project/overlapping_unpin_struct.rs:5:1 | -4 | #[pin_project] +5 | #[pin_project] | ^^^^^^^^^^^^^^ -5 | struct Foo { - | ^^^^^^ +6 | struct S { + | ^^^^ note: required by a bound in `is_unpin` - --> tests/ui/pin_project/overlapping_unpin_struct.rs:14:16 + --> tests/ui/pin_project/overlapping_unpin_struct.rs:15:16 | -14 | fn is_unpin() {} +15 | fn is_unpin() {} | ^^^^^ required by this bound in `is_unpin` = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/pin_project/override-priv-mod.rs b/third_party/rust/pin-project/tests/ui/pin_project/override-priv-mod.rs new file mode 100644 index 000000000000..890fd5b8dea8 --- /dev/null +++ b/third_party/rust/pin-project/tests/ui/pin_project/override-priv-mod.rs @@ -0,0 +1,32 @@ +// https://discord.com/channels/273534239310479360/512792629516173323/870075511009857617 + +extern crate pin_project as pin_project_orig; +extern crate self as pin_project; + +pub use ::pin_project_orig::*; +mod __private { + pub use ::pin_project_orig::__private::*; + pub trait Drop {} +} + +use std::{marker::PhantomPinned, mem}; + +#[pin_project] //~ ERROR conflicting implementations of trait `_::FooMustNotImplDrop` +struct S { + #[pin] + f: (u8, PhantomPinned), +} + +impl Drop for S { + fn drop(&mut self) { + let prev = &self.f.0 as *const _ as usize; + let moved = mem::take(&mut self.f); // move pinned field + let moved = &moved.0 as *const _ as usize; + assert_eq!(prev, moved); // panic + } +} + +fn main() { + let mut x = Box::pin(S { f: (1, PhantomPinned) }); + let _f = x.as_mut().project().f; // first mutable access +} diff --git a/third_party/rust/pin-project/tests/ui/pin_project/override-priv-mod.stderr b/third_party/rust/pin-project/tests/ui/pin_project/override-priv-mod.stderr new file mode 100644 index 000000000000..8a3fb9b61aa4 --- /dev/null +++ b/third_party/rust/pin-project/tests/ui/pin_project/override-priv-mod.stderr @@ -0,0 +1,10 @@ +error[E0119]: conflicting implementations of trait `_::SMustNotImplDrop` for type `S` + --> tests/ui/pin_project/override-priv-mod.rs:14:1 + | +14 | #[pin_project] //~ ERROR conflicting implementations of trait `_::FooMustNotImplDrop` + | ^^^^^^^^^^^^^^ + | | + | first implementation here + | conflicting implementation for `S` + | + = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed-enum.rs b/third_party/rust/pin-project/tests/ui/pin_project/packed-enum.rs index 9d4a4c3150d2..023c08d14fca 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/packed-enum.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed-enum.rs @@ -1,5 +1,9 @@ use pin_project::pin_project; +// #[repr(packed)] cannot be apply on enums and will be rejected by rustc. +// However, we should not rely on the behavior of rustc that rejects this. +// https://github.com/taiki-e/pin-project/pull/324#discussion_r612388001 + #[repr(packed)] //~ ERROR E0517 enum E1 { V(()), diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed-enum.stderr b/third_party/rust/pin-project/tests/ui/pin_project/packed-enum.stderr index 1ce52bdab20a..187221120638 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/packed-enum.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed-enum.stderr @@ -1,30 +1,42 @@ -error[E0517]: attribute should be applied to a struct or union - --> tests/ui/pin_project/packed-enum.rs:3:8 - | -3 | #[repr(packed)] //~ ERROR E0517 - | ^^^^^^ -4 | / enum E1 { -5 | | V(()), -6 | | } - | |_- not a struct or union +error: #[repr(packed)] attribute should be applied to a struct or union + --> tests/ui/pin_project/packed-enum.rs:13:8 + | +13 | #[repr(packed)] //~ ERROR E0517 + | ^^^^^^ + +error: #[repr(packed)] attribute should be applied to a struct or union + --> tests/ui/pin_project/packed-enum.rs:18:8 + | +18 | #[repr(packed)] //~ ERROR E0517 + | ^^^^^^ error[E0517]: attribute should be applied to a struct or union - --> tests/ui/pin_project/packed-enum.rs:9:8 + --> tests/ui/pin_project/packed-enum.rs:7:8 | -9 | #[repr(packed)] //~ ERROR E0517 +7 | #[repr(packed)] //~ ERROR E0517 | ^^^^^^ -10 | / enum E2 { -11 | | V(()), -12 | | } +8 | / enum E1 { +9 | | V(()), +10 | | } | |_- not a struct or union error[E0517]: attribute should be applied to a struct or union - --> tests/ui/pin_project/packed-enum.rs:14:8 + --> tests/ui/pin_project/packed-enum.rs:13:8 | -14 | #[repr(packed)] //~ ERROR E0517 +13 | #[repr(packed)] //~ ERROR E0517 | ^^^^^^ -15 | #[pin_project] -16 | / enum E3 { -17 | | V(()), -18 | | } +14 | / enum E2 { +15 | | V(()), +16 | | } + | |_- not a struct or union + +error[E0517]: attribute should be applied to a struct or union + --> tests/ui/pin_project/packed-enum.rs:18:8 + | +18 | #[repr(packed)] //~ ERROR E0517 + | ^^^^^^ +19 | #[pin_project] +20 | / enum E3 { +21 | | V(()), +22 | | } | |_- not a struct or union diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed-name-value.rs b/third_party/rust/pin-project/tests/ui/pin_project/packed-name-value.rs index ed819ca431fe..dedc4030facc 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/packed-name-value.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed-name-value.rs @@ -1,17 +1,24 @@ use pin_project::pin_project; -#[repr(packed = "")] //~ ERROR E0552 -struct S1 { - f: (), -} +// #[repr(packed = "")] is not valid format of #[repr(packed)] and will be +// rejected by rustc. +// However, we should not rely on the behavior of rustc that rejects this. +// https://github.com/taiki-e/pin-project/pull/324#discussion_r612388001 + +// https://github.com/taiki-e/pin-project/pull/324#discussion_r612388001 +// https://github.com/rust-lang/rust/issues/83921 +// #[repr(packed = "")] //~ ERROR E0552 +// struct S1 { +// f: (), +// } #[pin_project] -#[repr(packed = "")] //~ ERROR E0552 +#[repr(packed = "")] //~ ERROR attribute should not be name-value pair struct S2 { f: (), } -#[repr(packed = "")] //~ ERROR E0552 +#[repr(packed = "")] //~ ERROR attribute should not be name-value pair #[pin_project] struct S3 { f: (), diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed-name-value.stderr b/third_party/rust/pin-project/tests/ui/pin_project/packed-name-value.stderr index 1f40a2d2729f..d8b2194713e9 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/packed-name-value.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed-name-value.stderr @@ -1,17 +1,23 @@ -error[E0693]: incorrect `repr(packed)` attribute format - --> tests/ui/pin_project/packed-name-value.rs:3:8 - | -3 | #[repr(packed = "")] //~ ERROR E0552 - | ^^^^^^^^^^^ help: use parentheses instead: `packed()` - -error[E0693]: incorrect `repr(packed)` attribute format - --> tests/ui/pin_project/packed-name-value.rs:9:8 - | -9 | #[repr(packed = "")] //~ ERROR E0552 - | ^^^^^^^^^^^ help: use parentheses instead: `packed()` - -error[E0693]: incorrect `repr(packed)` attribute format - --> tests/ui/pin_project/packed-name-value.rs:14:8 +error: #[repr(packed)] attribute should not be name-value pair + --> tests/ui/pin_project/packed-name-value.rs:16:8 | -14 | #[repr(packed = "")] //~ ERROR E0552 +16 | #[repr(packed = "")] //~ ERROR attribute should not be name-value pair + | ^^^^^^^^^^^ + +error: #[repr(packed)] attribute should not be name-value pair + --> tests/ui/pin_project/packed-name-value.rs:21:8 + | +21 | #[repr(packed = "")] //~ ERROR attribute should not be name-value pair + | ^^^^^^^^^^^ + +error[E0693]: incorrect `repr(packed)` attribute format + --> tests/ui/pin_project/packed-name-value.rs:16:8 + | +16 | #[repr(packed = "")] //~ ERROR attribute should not be name-value pair + | ^^^^^^^^^^^ help: use parentheses instead: `packed()` + +error[E0693]: incorrect `repr(packed)` attribute format + --> tests/ui/pin_project/packed-name-value.rs:21:8 + | +21 | #[repr(packed = "")] //~ ERROR attribute should not be name-value pair | ^^^^^^^^^^^ help: use parentheses instead: `packed()` diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-1.rs b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-1.rs index d7b7f2b4b366..72d3d3e87534 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-1.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-1.rs @@ -1,19 +1,20 @@ +use std::pin::Pin; + use auxiliary_macro::hidden_repr; use pin_project::{pin_project, pinned_drop, UnsafeUnpin}; -use std::pin::Pin; #[pin_project] //~ ERROR may not be used on #[repr(packed)] types #[hidden_repr(packed)] struct A { #[pin] - field: u32, + f: u32, } #[pin_project(UnsafeUnpin)] //~ ERROR may not be used on #[repr(packed)] types #[hidden_repr(packed)] struct C { #[pin] - field: u32, + f: u32, } unsafe impl UnsafeUnpin for C {} @@ -22,7 +23,7 @@ unsafe impl UnsafeUnpin for C {} #[hidden_repr(packed)] struct D { #[pin] - field: u32, + f: u32, } #[pinned_drop] diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-1.stderr b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-1.stderr index 6b40123b1384..32fe4074792c 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-1.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-1.stderr @@ -1,23 +1,17 @@ error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/pin_project/packed_sneaky-1.rs:6:1 + --> tests/ui/pin_project/packed_sneaky-1.rs:7:15 | -6 | #[hidden_repr(packed)] - | ^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the attribute macro `hidden_repr` (in Nightly builds, run with -Z macro-backtrace for more info) +7 | #[hidden_repr(packed)] + | ^^^^^^ error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/pin_project/packed_sneaky-1.rs:13:1 + --> tests/ui/pin_project/packed_sneaky-1.rs:14:15 | -13 | #[hidden_repr(packed)] - | ^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the attribute macro `hidden_repr` (in Nightly builds, run with -Z macro-backtrace for more info) +14 | #[hidden_repr(packed)] + | ^^^^^^ error: #[pin_project] attribute may not be used on #[repr(packed)] types - --> tests/ui/pin_project/packed_sneaky-1.rs:22:1 + --> tests/ui/pin_project/packed_sneaky-1.rs:23:15 | -22 | #[hidden_repr(packed)] - | ^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the attribute macro `hidden_repr` (in Nightly builds, run with -Z macro-backtrace for more info) +23 | #[hidden_repr(packed)] + | ^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-2.rs b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-2.rs index 9627f58f2611..b098358e6f4f 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-2.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-2.rs @@ -5,7 +5,7 @@ hidden_repr_macro! { //~ ERROR may not be used on #[repr(packed)] types #[pin_project] struct B { #[pin] - field: u32, + f: u32, } } diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-2.stderr b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-2.stderr index 5e11071c9d5c..d643052fdab4 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-2.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-2.stderr @@ -5,7 +5,7 @@ error: #[pin_project] attribute may not be used on #[repr(packed)] types 5 | | #[pin_project] 6 | | struct B { 7 | | #[pin] -8 | | field: u32, +8 | | f: u32, 9 | | } 10 | | } | |_^ diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-3.rs b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-3.rs new file mode 100644 index 000000000000..d3f00f3d2318 --- /dev/null +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-3.rs @@ -0,0 +1,32 @@ +use auxiliary_macro::{hidden_repr_macro, HiddenRepr}; +use pin_project::pin_project; + +hidden_repr_macro! {} //~ ERROR expected item after attributes +#[pin_project] +struct S1 { + #[pin] + f: u32, +} + +macro_rules! hidden_repr_macro2 { + () => { + #[repr(packed)] //~ ERROR expected item after attributes + }; +} + +hidden_repr_macro2! {} +#[pin_project] +struct S2 { + #[pin] + f: u32, +} + +#[derive(HiddenRepr)] //~ ERROR expected item after attributes +struct S3 {} +#[pin_project] +struct S4 { + #[pin] + f: u32, +} + +fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-3.stderr b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-3.stderr new file mode 100644 index 000000000000..c97f18b75a0d --- /dev/null +++ b/third_party/rust/pin-project/tests/ui/pin_project/packed_sneaky-3.stderr @@ -0,0 +1,32 @@ +error: expected item after attributes + --> tests/ui/pin_project/packed_sneaky-3.rs:4:1 + | +4 | hidden_repr_macro! {} //~ ERROR expected item after attributes + | ^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the macro `hidden_repr_macro` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: expected item after attributes + --> tests/ui/pin_project/packed_sneaky-3.rs:13:9 + | +13 | #[repr(packed)] //~ ERROR expected item after attributes + | ^^^^^^^^^^^^^^^ +... +17 | hidden_repr_macro2! {} + | ---------------------- in this macro invocation + | + = note: this error originates in the macro `hidden_repr_macro2` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: expected item after attributes + --> tests/ui/pin_project/packed_sneaky-3.rs:24:10 + | +24 | #[derive(HiddenRepr)] //~ ERROR expected item after attributes + | ^^^^^^^^^^ + | + = note: this error originates in the derive macro `HiddenRepr` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: proc-macro derive produced unparseable tokens + --> tests/ui/pin_project/packed_sneaky-3.rs:24:10 + | +24 | #[derive(HiddenRepr)] //~ ERROR expected item after attributes + | ^^^^^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/pin_project/private_in_public-enum.rs b/third_party/rust/pin-project/tests/ui/pin_project/private_in_public-enum.rs index cbffa2049de7..15a82a9a928c 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/private_in_public-enum.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/private_in_public-enum.rs @@ -3,20 +3,20 @@ #![allow(private_in_public)] pub enum PublicEnum { - Variant(PrivateEnum), //~ ERROR E0446 + V(PrivateEnum), //~ ERROR E0446 } enum PrivateEnum { - Variant(u8), + V(u8), } mod foo { pub(crate) enum CrateEnum { - Variant(PrivateEnum), //~ ERROR E0446 + V(PrivateEnum), //~ ERROR E0446 } enum PrivateEnum { - Variant(u8), + V(u8), } } diff --git a/third_party/rust/pin-project/tests/ui/pin_project/private_in_public-enum.stderr b/third_party/rust/pin-project/tests/ui/pin_project/private_in_public-enum.stderr index 208e004d550c..c93b265916f8 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/private_in_public-enum.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/private_in_public-enum.stderr @@ -1,17 +1,17 @@ error[E0446]: private type `PrivateEnum` in public interface - --> tests/ui/pin_project/private_in_public-enum.rs:6:13 + --> tests/ui/pin_project/private_in_public-enum.rs:6:7 | -6 | Variant(PrivateEnum), //~ ERROR E0446 - | ^^^^^^^^^^^ can't leak private type +6 | V(PrivateEnum), //~ ERROR E0446 + | ^^^^^^^^^^^ can't leak private type ... 9 | enum PrivateEnum { | ---------------- `PrivateEnum` declared as private error[E0446]: private type `foo::PrivateEnum` in public interface - --> tests/ui/pin_project/private_in_public-enum.rs:15:17 + --> tests/ui/pin_project/private_in_public-enum.rs:15:11 | -15 | Variant(PrivateEnum), //~ ERROR E0446 - | ^^^^^^^^^^^ can't leak private type +15 | V(PrivateEnum), //~ ERROR E0446 + | ^^^^^^^^^^^ can't leak private type ... 18 | enum PrivateEnum { | ---------------- `foo::PrivateEnum` declared as private diff --git a/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized.rs b/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized.rs index 706a0c1d3f42..20dde12bd3d4 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized.rs @@ -2,7 +2,7 @@ use pin_project::pin_project; #[pin_project(project_replace)] //~ ERROR E0277 struct Struct { - x: T, + f: T, } #[pin_project(project_replace)] //~ ERROR E0277 diff --git a/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized.stderr b/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized.stderr index caea4044fc58..86cc14889ff7 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized.stderr @@ -22,6 +22,31 @@ help: function arguments must have a statically known size, borrowed types alway 3 | #[pin_project(&project_replace)] //~ ERROR E0277 | + +error[E0277]: the size for values of type `T` cannot be known at compilation time + --> tests/ui/pin_project/project_replace_unsized.rs:3:1 + | +3 | #[pin_project(project_replace)] //~ ERROR E0277 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time +4 | struct Struct { + | - this type parameter needs to be `std::marker::Sized` + | +note: required because it appears within the type `Struct` + --> tests/ui/pin_project/project_replace_unsized.rs:4:8 + | +4 | struct Struct { + | ^^^^^^ +note: required by a bound in `UnsafeOverwriteGuard::::new` + --> src/lib.rs + | + | impl UnsafeOverwriteGuard { + | ^ required by this bound in `UnsafeOverwriteGuard::::new` + = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider removing the `?Sized` bound to make the type parameter `Sized` + | +4 - struct Struct { +4 + struct Struct { + | + error[E0277]: the size for values of type `T` cannot be known at compilation time --> tests/ui/pin_project/project_replace_unsized.rs:5:5 | @@ -29,7 +54,7 @@ error[E0277]: the size for values of type `T` cannot be known at compilation tim | ------------------------------- required by a bound introduced by this call 4 | struct Struct { | - this type parameter needs to be `std::marker::Sized` -5 | x: T, +5 | f: T, | ^ doesn't have a size known at compile-time | note: required by a bound in `std::ptr::read` @@ -43,27 +68,6 @@ help: consider removing the `?Sized` bound to make the type parameter `Sized` 4 + struct Struct { | -error[E0277]: the size for values of type `T` cannot be known at compilation time - --> tests/ui/pin_project/project_replace_unsized.rs:3:1 - | -3 | #[pin_project(project_replace)] //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time -4 | struct Struct { - | - this type parameter needs to be `std::marker::Sized` - | -note: required because it appears within the type `__StructProjectionOwned` - --> tests/ui/pin_project/project_replace_unsized.rs:4:8 - | -4 | struct Struct { - | ^^^^^^ - = note: structs must have a statically known size to be initialized - = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider removing the `?Sized` bound to make the type parameter `Sized` - | -4 - struct Struct { -4 + struct Struct { - | - error[E0277]: the size for values of type `T` cannot be known at compilation time --> tests/ui/pin_project/project_replace_unsized.rs:8:15 | @@ -96,11 +100,16 @@ error[E0277]: the size for values of type `T` cannot be known at compilation tim 9 | struct TupleStruct(T); | - this type parameter needs to be `std::marker::Sized` | -note: required by a bound in `std::ptr::read` - --> $RUST/core/src/ptr/mod.rs +note: required because it appears within the type `TupleStruct` + --> tests/ui/pin_project/project_replace_unsized.rs:9:8 | - | pub const unsafe fn read(src: *const T) -> T { - | ^ required by this bound in `std::ptr::read` +9 | struct TupleStruct(T); + | ^^^^^^^^^^^ +note: required by a bound in `UnsafeOverwriteGuard::::new` + --> src/lib.rs + | + | impl UnsafeOverwriteGuard { + | ^ required by this bound in `UnsafeOverwriteGuard::::new` = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider removing the `?Sized` bound to make the type parameter `Sized` | diff --git a/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized_fn_params.rs b/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized_fn_params.rs index 0b311c00888c..e0fa25bf7d6d 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized_fn_params.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized_fn_params.rs @@ -4,7 +4,7 @@ use pin_project::pin_project; #[pin_project(project_replace)] //~ ERROR E0277 struct Struct { - x: T, + f: T, } #[pin_project(project_replace)] //~ ERROR E0277 diff --git a/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized_fn_params.stderr b/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized_fn_params.stderr index ebdabb7ae2aa..e398ca4e161b 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized_fn_params.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/project_replace_unsized_fn_params.stderr @@ -19,6 +19,31 @@ help: consider removing the `?Sized` bound to make the type parameter `Sized` 6 + struct Struct { | +error[E0277]: the size for values of type `T` cannot be known at compilation time + --> tests/ui/pin_project/project_replace_unsized_fn_params.rs:5:1 + | +5 | #[pin_project(project_replace)] //~ ERROR E0277 + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time +6 | struct Struct { + | - this type parameter needs to be `std::marker::Sized` + | +note: required because it appears within the type `Struct` + --> tests/ui/pin_project/project_replace_unsized_fn_params.rs:6:8 + | +6 | struct Struct { + | ^^^^^^ +note: required by a bound in `UnsafeOverwriteGuard::::new` + --> src/lib.rs + | + | impl UnsafeOverwriteGuard { + | ^ required by this bound in `UnsafeOverwriteGuard::::new` + = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider removing the `?Sized` bound to make the type parameter `Sized` + | +6 - struct Struct { +6 + struct Struct { + | + error[E0277]: the size for values of type `T` cannot be known at compilation time --> tests/ui/pin_project/project_replace_unsized_fn_params.rs:7:5 | @@ -26,7 +51,7 @@ error[E0277]: the size for values of type `T` cannot be known at compilation tim | ------------------------------- required by a bound introduced by this call 6 | struct Struct { | - this type parameter needs to be `std::marker::Sized` -7 | x: T, +7 | f: T, | ^ doesn't have a size known at compile-time | note: required by a bound in `std::ptr::read` @@ -40,27 +65,6 @@ help: consider removing the `?Sized` bound to make the type parameter `Sized` 6 + struct Struct { | -error[E0277]: the size for values of type `T` cannot be known at compilation time - --> tests/ui/pin_project/project_replace_unsized_fn_params.rs:5:1 - | -5 | #[pin_project(project_replace)] //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time -6 | struct Struct { - | - this type parameter needs to be `std::marker::Sized` - | -note: required because it appears within the type `__StructProjectionOwned` - --> tests/ui/pin_project/project_replace_unsized_fn_params.rs:6:8 - | -6 | struct Struct { - | ^^^^^^ - = note: structs must have a statically known size to be initialized - = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider removing the `?Sized` bound to make the type parameter `Sized` - | -6 - struct Struct { -6 + struct Struct { - | - error[E0277]: the size for values of type `T` cannot be known at compilation time --> tests/ui/pin_project/project_replace_unsized_fn_params.rs:11:8 | @@ -90,11 +94,16 @@ error[E0277]: the size for values of type `T` cannot be known at compilation tim 11 | struct TupleStruct(T); | - this type parameter needs to be `std::marker::Sized` | -note: required by a bound in `std::ptr::read` - --> $RUST/core/src/ptr/mod.rs +note: required because it appears within the type `TupleStruct` + --> tests/ui/pin_project/project_replace_unsized_fn_params.rs:11:8 | - | pub const unsafe fn read(src: *const T) -> T { - | ^ required by this bound in `std::ptr::read` +11 | struct TupleStruct(T); + | ^^^^^^^^^^^ +note: required by a bound in `UnsafeOverwriteGuard::::new` + --> src/lib.rs + | + | impl UnsafeOverwriteGuard { + | ^ required by this bound in `UnsafeOverwriteGuard::::new` = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider removing the `?Sized` bound to make the type parameter `Sized` | diff --git a/third_party/rust/pin-project/tests/ui/pin_project/proper_unpin.rs b/third_party/rust/pin-project/tests/ui/pin_project/proper_unpin.rs deleted file mode 100644 index e61789ba9ece..000000000000 --- a/third_party/rust/pin-project/tests/ui/pin_project/proper_unpin.rs +++ /dev/null @@ -1,38 +0,0 @@ -use pin_project::pin_project; -use std::marker::PhantomPinned; - -struct Inner { - val: T, -} - -#[pin_project] -struct Foo { - #[pin] - inner: Inner, - other: U, -} - -#[pin_project] -struct TrivialBounds { - #[pin] - field1: PhantomPinned, -} - -#[pin_project] -struct Bar<'a, T, U> { - #[pin] - inner: &'a mut Inner, - other: U, -} - -fn is_unpin() {} - -fn main() { - is_unpin::>(); //~ ERROR E0277 - is_unpin::>(); // Ok - is_unpin::>(); //~ ERROR E0277 - - is_unpin::(); //~ ERROR E0277 - - is_unpin::>(); // Ok -} diff --git a/third_party/rust/pin-project/tests/ui/pin_project/proper_unpin.stderr b/third_party/rust/pin-project/tests/ui/pin_project/proper_unpin.stderr deleted file mode 100644 index 112aa8edd0c0..000000000000 --- a/third_party/rust/pin-project/tests/ui/pin_project/proper_unpin.stderr +++ /dev/null @@ -1,87 +0,0 @@ -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/pin_project/proper_unpin.rs:31:5 - | -31 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `__Foo<'_, PhantomPinned, ()>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because it appears within the type `Inner` - --> tests/ui/pin_project/proper_unpin.rs:4:8 - | -4 | struct Inner { - | ^^^^^ -note: required because it appears within the type `__Foo<'_, PhantomPinned, ()>` - --> tests/ui/pin_project/proper_unpin.rs:9:8 - | -9 | struct Foo { - | ^^^ -note: required because of the requirements on the impl of `Unpin` for `Foo` - --> tests/ui/pin_project/proper_unpin.rs:8:1 - | -8 | #[pin_project] - | ^^^^^^^^^^^^^^ -9 | struct Foo { - | ^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/pin_project/proper_unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/pin_project/proper_unpin.rs:33:5 - | -33 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `__Foo<'_, PhantomPinned, PhantomPinned>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because it appears within the type `Inner` - --> tests/ui/pin_project/proper_unpin.rs:4:8 - | -4 | struct Inner { - | ^^^^^ -note: required because it appears within the type `__Foo<'_, PhantomPinned, PhantomPinned>` - --> tests/ui/pin_project/proper_unpin.rs:9:8 - | -9 | struct Foo { - | ^^^ -note: required because of the requirements on the impl of `Unpin` for `Foo` - --> tests/ui/pin_project/proper_unpin.rs:8:1 - | -8 | #[pin_project] - | ^^^^^^^^^^^^^^ -9 | struct Foo { - | ^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/pin_project/proper_unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/pin_project/proper_unpin.rs:35:5 - | -35 | is_unpin::(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^ within `__TrivialBounds<'_>`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because it appears within the type `__TrivialBounds<'_>` - --> tests/ui/pin_project/proper_unpin.rs:16:8 - | -16 | struct TrivialBounds { - | ^^^^^^^^^^^^^ -note: required because of the requirements on the impl of `Unpin` for `TrivialBounds` - --> tests/ui/pin_project/proper_unpin.rs:15:1 - | -15 | #[pin_project] - | ^^^^^^^^^^^^^^ -16 | struct TrivialBounds { - | ^^^^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/pin_project/proper_unpin.rs:28:16 - | -28 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-field.rs b/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-field.rs index 88a0b16e4a70..fd14da37c154 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-field.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-field.rs @@ -1,6 +1,7 @@ +use std::{marker::PhantomPinned, pin::Pin}; + use auxiliary_macro::remove_attr; use pin_project::pin_project; -use std::{marker::PhantomPinned, pin::Pin}; fn is_unpin() {} @@ -8,25 +9,25 @@ fn is_unpin() {} #[remove_attr(field_all)] struct A { #[pin] - field: PhantomPinned, + f: PhantomPinned, } #[remove_attr(field_all)] #[pin_project] struct B { #[pin] - field: PhantomPinned, + f: PhantomPinned, } fn main() { is_unpin::(); is_unpin::(); - let mut x = A { field: PhantomPinned }; + let mut x = A { f: PhantomPinned }; let x = Pin::new(&mut x).project(); - let _: Pin<&mut PhantomPinned> = x.field; //~ ERROR E0308 + let _: Pin<&mut PhantomPinned> = x.f; //~ ERROR E0308 - let mut x = B { field: PhantomPinned }; + let mut x = B { f: PhantomPinned }; let x = Pin::new(&mut x).project(); - let _: Pin<&mut PhantomPinned> = x.field; //~ ERROR E0308 + let _: Pin<&mut PhantomPinned> = x.f; //~ ERROR E0308 } diff --git a/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-field.stderr b/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-field.stderr index fc81de420517..697cd635498c 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-field.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-field.stderr @@ -1,8 +1,8 @@ error[E0308]: mismatched types - --> tests/ui/pin_project/remove-attr-from-field.rs:27:38 + --> tests/ui/pin_project/remove-attr-from-field.rs:28:38 | -27 | let _: Pin<&mut PhantomPinned> = x.field; //~ ERROR E0308 - | ----------------------- ^^^^^^^ expected struct `Pin`, found `&mut PhantomPinned` +28 | let _: Pin<&mut PhantomPinned> = x.f; //~ ERROR E0308 + | ----------------------- ^^^ expected struct `Pin`, found `&mut PhantomPinned` | | | expected due to this | @@ -10,10 +10,10 @@ error[E0308]: mismatched types found mutable reference `&mut PhantomPinned` error[E0308]: mismatched types - --> tests/ui/pin_project/remove-attr-from-field.rs:31:38 + --> tests/ui/pin_project/remove-attr-from-field.rs:32:38 | -31 | let _: Pin<&mut PhantomPinned> = x.field; //~ ERROR E0308 - | ----------------------- ^^^^^^^ expected struct `Pin`, found `&mut PhantomPinned` +32 | let _: Pin<&mut PhantomPinned> = x.f; //~ ERROR E0308 + | ----------------------- ^^^ expected struct `Pin`, found `&mut PhantomPinned` | | | expected due to this | diff --git a/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-struct.rs b/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-struct.rs index e7c07f2c3373..cbe5aba2344a 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-struct.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-struct.rs @@ -1,6 +1,7 @@ +use std::{marker::PhantomPinned, pin::Pin}; + use auxiliary_macro::remove_attr; use pin_project::pin_project; -use std::{marker::PhantomPinned, pin::Pin}; fn is_unpin() {} @@ -8,26 +9,26 @@ fn is_unpin() {} #[remove_attr(struct_all)] struct A { #[pin] //~ ERROR cannot find attribute `pin` in this scope - field: PhantomPinned, + f: PhantomPinned, } #[remove_attr(struct_all)] #[pin_project] struct B { #[pin] //~ ERROR cannot find attribute `pin` in this scope - field: PhantomPinned, + f: PhantomPinned, } #[pin_project] //~ ERROR has been removed #[remove_attr(struct_pin)] struct C { - field: PhantomPinned, + f: PhantomPinned, } #[remove_attr(struct_pin)] #[pin_project] // Ok struct D { - field: PhantomPinned, + f: PhantomPinned, } fn main() { @@ -35,12 +36,12 @@ fn main() { is_unpin::(); //~ ERROR E0277 is_unpin::(); // Ok - let mut x = A { field: PhantomPinned }; + let mut x = A { f: PhantomPinned }; let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 - let mut x = B { field: PhantomPinned }; + let mut x = B { f: PhantomPinned }; let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 - let mut x = D { field: PhantomPinned }; + let mut x = D { f: PhantomPinned }; let _ = Pin::new(&mut x).project(); //~ Ok } diff --git a/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-struct.stderr b/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-struct.stderr index e794ba8af7f1..1a9cd42ce20b 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-struct.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/remove-attr-from-struct.stderr @@ -1,72 +1,72 @@ error: #[pin_project] attribute has been removed - --> tests/ui/pin_project/remove-attr-from-struct.rs:21:1 + --> tests/ui/pin_project/remove-attr-from-struct.rs:22:1 | -21 | #[pin_project] //~ ERROR has been removed +22 | #[pin_project] //~ ERROR has been removed | ^^^^^^^^^^^^^^ | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) error: cannot find attribute `pin` in this scope - --> tests/ui/pin_project/remove-attr-from-struct.rs:17:7 + --> tests/ui/pin_project/remove-attr-from-struct.rs:18:7 | -17 | #[pin] //~ ERROR cannot find attribute `pin` in this scope +18 | #[pin] //~ ERROR cannot find attribute `pin` in this scope | ^^^ error: cannot find attribute `pin` in this scope - --> tests/ui/pin_project/remove-attr-from-struct.rs:10:7 + --> tests/ui/pin_project/remove-attr-from-struct.rs:11:7 | -10 | #[pin] //~ ERROR cannot find attribute `pin` in this scope +11 | #[pin] //~ ERROR cannot find attribute `pin` in this scope | ^^^ -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/pin_project/remove-attr-from-struct.rs:34:5 - | -34 | is_unpin::(); //~ ERROR E0277 - | ^^^^^^^^^^^^^ within `A`, the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because it appears within the type `A` - --> tests/ui/pin_project/remove-attr-from-struct.rs:9:8 - | -9 | struct A { - | ^ -note: required by a bound in `is_unpin` - --> tests/ui/pin_project/remove-attr-from-struct.rs:5:16 - | -5 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - error[E0277]: `PhantomPinned` cannot be unpinned --> tests/ui/pin_project/remove-attr-from-struct.rs:35:5 | -35 | is_unpin::(); //~ ERROR E0277 +35 | is_unpin::(); //~ ERROR E0277 + | ^^^^^^^^^^^^^ within `A`, the trait `Unpin` is not implemented for `PhantomPinned` + | + = note: consider using `Box::pin` +note: required because it appears within the type `A` + --> tests/ui/pin_project/remove-attr-from-struct.rs:10:8 + | +10 | struct A { + | ^ +note: required by a bound in `is_unpin` + --> tests/ui/pin_project/remove-attr-from-struct.rs:6:16 + | +6 | fn is_unpin() {} + | ^^^^^ required by this bound in `is_unpin` + +error[E0277]: `PhantomPinned` cannot be unpinned + --> tests/ui/pin_project/remove-attr-from-struct.rs:36:5 + | +36 | is_unpin::(); //~ ERROR E0277 | ^^^^^^^^^^^^^ within `B`, the trait `Unpin` is not implemented for `PhantomPinned` | = note: consider using `Box::pin` note: required because it appears within the type `B` - --> tests/ui/pin_project/remove-attr-from-struct.rs:16:8 + --> tests/ui/pin_project/remove-attr-from-struct.rs:17:8 | -16 | struct B { +17 | struct B { | ^ note: required by a bound in `is_unpin` - --> tests/ui/pin_project/remove-attr-from-struct.rs:5:16 + --> tests/ui/pin_project/remove-attr-from-struct.rs:6:16 | -5 | fn is_unpin() {} +6 | fn is_unpin() {} | ^^^^^ required by this bound in `is_unpin` error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/pin_project/remove-attr-from-struct.rs:39:22 + --> tests/ui/pin_project/remove-attr-from-struct.rs:40:22 | -39 | let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 +40 | let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 | -------- ^^^^^^ within `A`, the trait `Unpin` is not implemented for `PhantomPinned` | | | required by a bound introduced by this call | = note: consider using `Box::pin` note: required because it appears within the type `A` - --> tests/ui/pin_project/remove-attr-from-struct.rs:9:8 + --> tests/ui/pin_project/remove-attr-from-struct.rs:10:8 | -9 | struct A { +10 | struct A { | ^ note: required by a bound in `Pin::

::new` --> $RUST/core/src/pin.rs @@ -75,24 +75,24 @@ note: required by a bound in `Pin::

::new` | ^^^^^ required by this bound in `Pin::

::new` error[E0599]: no method named `project` found for struct `Pin<&mut A>` in the current scope - --> tests/ui/pin_project/remove-attr-from-struct.rs:39:30 + --> tests/ui/pin_project/remove-attr-from-struct.rs:40:30 | -39 | let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 +40 | let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 | ^^^^^^^ method not found in `Pin<&mut A>` error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/pin_project/remove-attr-from-struct.rs:42:22 + --> tests/ui/pin_project/remove-attr-from-struct.rs:43:22 | -42 | let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 +43 | let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 | -------- ^^^^^^ within `B`, the trait `Unpin` is not implemented for `PhantomPinned` | | | required by a bound introduced by this call | = note: consider using `Box::pin` note: required because it appears within the type `B` - --> tests/ui/pin_project/remove-attr-from-struct.rs:16:8 + --> tests/ui/pin_project/remove-attr-from-struct.rs:17:8 | -16 | struct B { +17 | struct B { | ^ note: required by a bound in `Pin::

::new` --> $RUST/core/src/pin.rs @@ -101,7 +101,7 @@ note: required by a bound in `Pin::

::new` | ^^^^^ required by this bound in `Pin::

::new` error[E0599]: no method named `project` found for struct `Pin<&mut B>` in the current scope - --> tests/ui/pin_project/remove-attr-from-struct.rs:42:30 + --> tests/ui/pin_project/remove-attr-from-struct.rs:43:30 | -42 | let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 +43 | let _ = Pin::new(&mut x).project(); //~ ERROR E0277,E0599 | ^^^^^^^ method not found in `Pin<&mut B>` diff --git a/third_party/rust/pin-project/tests/ui/pin_project/unpin_sneaky.rs b/third_party/rust/pin-project/tests/ui/pin_project/unpin_sneaky.rs index 3ccb1a95d1e2..3f5f32be6947 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/unpin_sneaky.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/unpin_sneaky.rs @@ -1,11 +1,11 @@ use pin_project::pin_project; #[pin_project] -struct Foo { +struct S { #[pin] - inner: u8, + f: u8, } -impl Unpin for __Foo {} //~ ERROR E0412,E0321 +impl Unpin for __S {} //~ ERROR E0412,E0321 fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/pin_project/unpin_sneaky.stderr b/third_party/rust/pin-project/tests/ui/pin_project/unpin_sneaky.stderr index 66bb03fad55c..ddf700deb6b2 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/unpin_sneaky.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/unpin_sneaky.stderr @@ -1,11 +1,11 @@ -error[E0412]: cannot find type `__Foo` in this scope +error[E0412]: cannot find type `__S` in this scope --> tests/ui/pin_project/unpin_sneaky.rs:9:16 | -9 | impl Unpin for __Foo {} //~ ERROR E0412,E0321 - | ^^^^^ not found in this scope +9 | impl Unpin for __S {} //~ ERROR E0412,E0321 + | ^^^ not found in this scope error[E0321]: cross-crate traits with a default impl, like `Unpin`, can only be implemented for a struct/enum type, not `[type error]` --> tests/ui/pin_project/unpin_sneaky.rs:9:1 | -9 | impl Unpin for __Foo {} //~ ERROR E0412,E0321 - | ^^^^^^^^^^^^^^^^^^^^ can't implement cross-crate trait with a default impl for non-struct/enum type +9 | impl Unpin for __S {} //~ ERROR E0412,E0321 + | ^^^^^^^^^^^^^^^^^^ can't implement cross-crate trait with a default impl for non-struct/enum type diff --git a/third_party/rust/pin-project/tests/ui/pin_project/visibility.rs b/third_party/rust/pin-project/tests/ui/pin_project/visibility.rs index 01c0831b42b1..fdff5a678333 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/visibility.rs +++ b/third_party/rust/pin-project/tests/ui/pin_project/visibility.rs @@ -1,82 +1,49 @@ +/// Only named projected types can be imported. +/// See import_unnamed.rs for unnamed projected types. + mod pub_ { use pin_project::pin_project; - #[pin_project] - pub struct Default(()); - - #[pin_project(project_replace)] - pub struct Replace(()); -} -pub mod pub_use { - #[rustfmt::skip] - pub use crate::pub_::__DefaultProjection; //~ ERROR E0365 - #[rustfmt::skip] - pub use crate::pub_::__DefaultProjectionRef; //~ ERROR E0365 - #[rustfmt::skip] - pub use crate::pub_::__ReplaceProjection; //~ ERROR E0365 - #[rustfmt::skip] - pub use crate::pub_::__ReplaceProjectionOwned; //~ ERROR E0365 - #[rustfmt::skip] - pub use crate::pub_::__ReplaceProjectionRef; //~ ERROR E0365 - - // Confirm that the visibility of the original type is not changed. - pub use crate::pub_::{Default, Replace}; -} -pub mod pub_use2 { - // Ok - #[allow(unused_imports)] - pub(crate) use crate::pub_::{ - __DefaultProjection, __DefaultProjectionRef, __ReplaceProjection, __ReplaceProjectionOwned, - __ReplaceProjectionRef, - }; -} - -mod pub_crate { - use pin_project::pin_project; - - #[pin_project] - pub(crate) struct Default(()); - - #[pin_project(project_replace)] - pub(crate) struct Replace(()); -} -pub mod pub_crate_use { - // Ok - #[allow(unused_imports)] - pub(crate) use crate::pub_crate::{ - __DefaultProjection, __DefaultProjectionRef, __ReplaceProjection, __ReplaceProjectionOwned, - __ReplaceProjectionRef, - }; -} - -mod pub_renamed { - use pin_project::pin_project; - #[pin_project(project = DProj, project_ref = DProjRef)] pub struct Default(()); #[pin_project(project = RProj, project_ref = RProjRef, project_replace = RProjOwn)] pub struct Replace(()); } -pub mod pub_renamed_use { +pub mod pub_use { #[rustfmt::skip] - pub use crate::pub_renamed::DProj; //~ ERROR E0365 + pub use crate::pub_::DProj; //~ ERROR E0365 #[rustfmt::skip] - pub use crate::pub_renamed::DProjRef; //~ ERROR E0365 + pub use crate::pub_::DProjRef; //~ ERROR E0365 #[rustfmt::skip] - pub use crate::pub_renamed::RProj; //~ ERROR E0365 + pub use crate::pub_::RProj; //~ ERROR E0365 #[rustfmt::skip] - pub use crate::pub_renamed::RProjOwn; //~ ERROR E0365 + pub use crate::pub_::RProjOwn; //~ ERROR E0365 #[rustfmt::skip] - pub use crate::pub_renamed::RProjRef; //~ ERROR E0365 + pub use crate::pub_::RProjRef; //~ ERROR E0365 // Confirm that the visibility of the original type is not changed. - pub use crate::pub_renamed::{Default, Replace}; + pub use crate::pub_::{Default, Replace}; } -pub mod pub_renamed_use2 { +pub mod pub_use2 { // Ok #[allow(unused_imports)] - pub(crate) use crate::pub_renamed::{DProj, DProjRef, RProj, RProjOwn, RProjRef}; + pub(crate) use crate::pub_::{DProj, DProjRef, RProj, RProjOwn, RProjRef}; +} + +mod pub_crate { + use pin_project::pin_project; + + #[pin_project(project = DProj, project_ref = DProjRef)] + pub(crate) struct Default(()); + + #[pin_project(project = RProj, project_ref = RProjRef, project_replace = RProjOwn)] + pub(crate) struct Replace(()); +} +pub mod pub_crate_use { + // Ok + #[allow(unused_imports)] + pub(crate) use crate::pub_crate::{DProj, DProjRef, RProj, RProjOwn, RProjRef}; } fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/pin_project/visibility.stderr b/third_party/rust/pin-project/tests/ui/pin_project/visibility.stderr index 6fbee1542ce4..4d1b7229a6a6 100644 --- a/third_party/rust/pin-project/tests/ui/pin_project/visibility.stderr +++ b/third_party/rust/pin-project/tests/ui/pin_project/visibility.stderr @@ -1,79 +1,39 @@ -error[E0365]: `__DefaultProjection` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:12:13 - | -12 | pub use crate::pub_::__DefaultProjection; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `__DefaultProjection` - | - = note: consider declaring type or module `__DefaultProjection` with `pub` - -error[E0365]: `__DefaultProjectionRef` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:14:13 - | -14 | pub use crate::pub_::__DefaultProjectionRef; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `__DefaultProjectionRef` - | - = note: consider declaring type or module `__DefaultProjectionRef` with `pub` - -error[E0365]: `__ReplaceProjection` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:16:13 - | -16 | pub use crate::pub_::__ReplaceProjection; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `__ReplaceProjection` - | - = note: consider declaring type or module `__ReplaceProjection` with `pub` - -error[E0365]: `__ReplaceProjectionOwned` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:18:13 - | -18 | pub use crate::pub_::__ReplaceProjectionOwned; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `__ReplaceProjectionOwned` - | - = note: consider declaring type or module `__ReplaceProjectionOwned` with `pub` - -error[E0365]: `__ReplaceProjectionRef` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:20:13 - | -20 | pub use crate::pub_::__ReplaceProjectionRef; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `__ReplaceProjectionRef` - | - = note: consider declaring type or module `__ReplaceProjectionRef` with `pub` - error[E0365]: `DProj` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:63:13 + --> tests/ui/pin_project/visibility.rs:15:13 | -63 | pub use crate::pub_renamed::DProj; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `DProj` +15 | pub use crate::pub_::DProj; //~ ERROR E0365 + | ^^^^^^^^^^^^^^^^^^ re-export of crate public `DProj` | = note: consider declaring type or module `DProj` with `pub` error[E0365]: `DProjRef` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:65:13 + --> tests/ui/pin_project/visibility.rs:17:13 | -65 | pub use crate::pub_renamed::DProjRef; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `DProjRef` +17 | pub use crate::pub_::DProjRef; //~ ERROR E0365 + | ^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `DProjRef` | = note: consider declaring type or module `DProjRef` with `pub` error[E0365]: `RProj` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:67:13 + --> tests/ui/pin_project/visibility.rs:19:13 | -67 | pub use crate::pub_renamed::RProj; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `RProj` +19 | pub use crate::pub_::RProj; //~ ERROR E0365 + | ^^^^^^^^^^^^^^^^^^ re-export of crate public `RProj` | = note: consider declaring type or module `RProj` with `pub` error[E0365]: `RProjOwn` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:69:13 + --> tests/ui/pin_project/visibility.rs:21:13 | -69 | pub use crate::pub_renamed::RProjOwn; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `RProjOwn` +21 | pub use crate::pub_::RProjOwn; //~ ERROR E0365 + | ^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `RProjOwn` | = note: consider declaring type or module `RProjOwn` with `pub` error[E0365]: `RProjRef` is only public within the crate, and cannot be re-exported outside - --> tests/ui/pin_project/visibility.rs:71:13 + --> tests/ui/pin_project/visibility.rs:23:13 | -71 | pub use crate::pub_renamed::RProjRef; //~ ERROR E0365 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `RProjRef` +23 | pub use crate::pub_::RProjRef; //~ ERROR E0365 + | ^^^^^^^^^^^^^^^^^^^^^ re-export of crate public `RProjRef` | = note: consider declaring type or module `RProjRef` with `pub` diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/call-drop-inner.rs b/third_party/rust/pin-project/tests/ui/pinned_drop/call-drop-inner.rs index c953acbbb430..9f89942b2148 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/call-drop-inner.rs +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/call-drop-inner.rs @@ -1,9 +1,10 @@ -use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; +use pin_project::{pin_project, pinned_drop}; + #[pin_project(PinnedDrop)] struct Struct { - dropped: bool, + f: bool, } #[pinned_drop] diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/call-drop-inner.stderr b/third_party/rust/pin-project/tests/ui/pinned_drop/call-drop-inner.stderr index ce753b1a135d..b11bee50baa2 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/call-drop-inner.stderr +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/call-drop-inner.stderr @@ -1,13 +1,14 @@ error[E0061]: this function takes 0 arguments but 1 argument was supplied - --> tests/ui/pinned_drop/call-drop-inner.rs:12:9 + --> tests/ui/pinned_drop/call-drop-inner.rs:13:9 | -12 | __drop_inner(__self); +13 | __drop_inner(__self); | ^^^^^^^^^^^^ ------ supplied 1 argument | | | expected 0 arguments | note: function defined here - --> tests/ui/pinned_drop/call-drop-inner.rs:11:8 + --> tests/ui/pinned_drop/call-drop-inner.rs:10:1 | -11 | fn drop(mut self: Pin<&mut Self>) { - | ^^^^ +10 | #[pinned_drop] + | ^^^^^^^^^^^^^^ + = note: this error originates in the attribute macro `pinned_drop` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/conditional-drop-impl.rs b/third_party/rust/pin-project/tests/ui/pinned_drop/conditional-drop-impl.rs index 42d18b79d5cd..23d756d4dc1b 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/conditional-drop-impl.rs +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/conditional-drop-impl.rs @@ -1,10 +1,11 @@ -use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; +use pin_project::{pin_project, pinned_drop}; + // In `Drop` impl, the implementor must specify the same requirement as type definition. struct DropImpl { - field: T, + f: T, } impl Drop for DropImpl { @@ -15,7 +16,7 @@ impl Drop for DropImpl { #[pin_project(PinnedDrop)] //~ ERROR E0277 struct PinnedDropImpl { #[pin] - field: T, + f: T, } #[pinned_drop] diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/conditional-drop-impl.stderr b/third_party/rust/pin-project/tests/ui/pinned_drop/conditional-drop-impl.stderr index cc47c44e0ef5..0c55bc2a3bf6 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/conditional-drop-impl.stderr +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/conditional-drop-impl.stderr @@ -1,30 +1,30 @@ error[E0367]: `Drop` impl requires `T: Unpin` but the struct it is implemented for does not - --> tests/ui/pinned_drop/conditional-drop-impl.rs:10:9 + --> tests/ui/pinned_drop/conditional-drop-impl.rs:11:9 | -10 | impl Drop for DropImpl { +11 | impl Drop for DropImpl { | ^^^^^ | note: the implementor must specify the same requirement - --> tests/ui/pinned_drop/conditional-drop-impl.rs:6:1 + --> tests/ui/pinned_drop/conditional-drop-impl.rs:7:1 | -6 | / struct DropImpl { -7 | | field: T, -8 | | } +7 | / struct DropImpl { +8 | | f: T, +9 | | } | |_^ error[E0277]: `T` cannot be unpinned - --> tests/ui/pinned_drop/conditional-drop-impl.rs:15:15 + --> tests/ui/pinned_drop/conditional-drop-impl.rs:16:15 | -15 | #[pin_project(PinnedDrop)] //~ ERROR E0277 +16 | #[pin_project(PinnedDrop)] //~ ERROR E0277 | ^^^^^^^^^^ the trait `Unpin` is not implemented for `T` | = note: consider using `Box::pin` note: required because of the requirements on the impl of `PinnedDrop` for `PinnedDropImpl` - --> tests/ui/pinned_drop/conditional-drop-impl.rs:22:16 + --> tests/ui/pinned_drop/conditional-drop-impl.rs:23:16 | -22 | impl PinnedDrop for PinnedDropImpl { +23 | impl PinnedDrop for PinnedDropImpl { | ^^^^^^^^^^ ^^^^^^^^^^^^^^^^^ help: consider restricting type parameter `T` | -16 | struct PinnedDropImpl { +17 | struct PinnedDropImpl { | ++++++++++++++++++++ diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/forget-pinned-drop-impl.rs b/third_party/rust/pin-project/tests/ui/pinned_drop/forget-pinned-drop-impl.rs index 6c9f718e649b..e31f46f262e7 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/forget-pinned-drop-impl.rs +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/forget-pinned-drop-impl.rs @@ -3,7 +3,7 @@ use pin_project::pin_project; #[pin_project(PinnedDrop)] //~ ERROR E0277 struct Struct { #[pin] - field: u8, + f: u8, } fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/invalid-self.rs b/third_party/rust/pin-project/tests/ui/pinned_drop/invalid-self.rs index 73d3b43e47a4..783167fc0d80 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/invalid-self.rs +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/invalid-self.rs @@ -2,13 +2,13 @@ use std::pin::Pin; -struct Struct {} +struct S {} -impl Struct { +impl S { fn take_ref_self(ref self: Pin<&mut Self>) {} //~ ERROR expected identifier, found keyword `self` fn take_ref_mut_self(ref mut self: Pin<&mut Self>) {} //~ ERROR expected identifier, found keyword `self` - fn self_subpat(self @ Struct {}: Self) {} //~ ERROR expected one of `)`, `,`, or `:`, found `@` + fn self_subpat(self @ S {}: Self) {} //~ ERROR expected one of `)`, `,`, or `:`, found `@` } fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/invalid-self.stderr b/third_party/rust/pin-project/tests/ui/pinned_drop/invalid-self.stderr index a3f26960784f..464be5e5b553 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/invalid-self.stderr +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/invalid-self.stderr @@ -13,13 +13,13 @@ error: expected identifier, found keyword `self` error: expected parameter name, found `@` --> tests/ui/pinned_drop/invalid-self.rs:11:25 | -11 | fn self_subpat(self @ Struct {}: Self) {} //~ ERROR expected one of `)`, `,`, or `:`, found `@` +11 | fn self_subpat(self @ S {}: Self) {} //~ ERROR expected one of `)`, `,`, or `:`, found `@` | ^ expected parameter name error: expected one of `)`, `,`, or `:`, found `@` --> tests/ui/pinned_drop/invalid-self.rs:11:25 | -11 | fn self_subpat(self @ Struct {}: Self) {} //~ ERROR expected one of `)`, `,`, or `:`, found `@` +11 | fn self_subpat(self @ S {}: Self) {} //~ ERROR expected one of `)`, `,`, or `:`, found `@` | -^ expected one of `)`, `,`, or `:` | | | help: missing `,` diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/invalid.rs b/third_party/rust/pin-project/tests/ui/pinned_drop/invalid.rs index 7065ec9ba5a0..fdadf8a51631 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/invalid.rs +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/invalid.rs @@ -1,7 +1,8 @@ mod argument { - use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; + use pin_project::{pin_project, pinned_drop}; + #[pin_project(PinnedDrop)] struct UnexpectedArg1(()); @@ -48,7 +49,7 @@ mod item { impl InherentImpl {} //~ ERROR may only be used on implementation for the `PinnedDrop` trait #[pinned_drop] - fn drop(_: Pin<&mut ()>) {} //~ ERROR expected `impl` + fn func(_: Pin<&mut ()>) {} //~ ERROR expected `impl` } mod unsafety { @@ -128,9 +129,10 @@ mod assoc_item { } mod method { - use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; + use pin_project::{pin_project, pinned_drop}; + #[pin_project(PinnedDrop)] struct RetUnit(()); @@ -200,7 +202,7 @@ mod method { #[pinned_drop] impl PinnedDrop for InvalidName { - fn pinned_drop(&mut self) {} //~ ERROR method `pinned_drop` is not a member of trait `PinnedDrop + fn pinned_drop(self: Pin<&mut Self>) {} //~ ERROR method `pinned_drop` is not a member of trait `PinnedDrop } } diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/invalid.stderr b/third_party/rust/pin-project/tests/ui/pinned_drop/invalid.stderr index 679842018909..d509964b24f3 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/invalid.stderr +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/invalid.stderr @@ -1,143 +1,143 @@ -error: unexpected token: foo - --> tests/ui/pinned_drop/invalid.rs:8:19 +error: unexpected token: `foo` + --> tests/ui/pinned_drop/invalid.rs:9:19 | -8 | #[pinned_drop(foo)] //~ ERROR unexpected token +9 | #[pinned_drop(foo)] //~ ERROR unexpected token | ^^^ error: duplicate #[pinned_drop] attribute - --> tests/ui/pinned_drop/invalid.rs:29:5 + --> tests/ui/pinned_drop/invalid.rs:30:5 | -29 | #[pinned_drop] //~ ERROR duplicate #[pinned_drop] attribute +30 | #[pinned_drop] //~ ERROR duplicate #[pinned_drop] attribute | ^^^^^^^^^^^^^^ error: #[pinned_drop] may only be used on implementation for the `PinnedDrop` trait - --> tests/ui/pinned_drop/invalid.rs:42:10 + --> tests/ui/pinned_drop/invalid.rs:43:10 | -42 | impl Drop for TraitImpl {} //~ ERROR may only be used on implementation for the `PinnedDrop` trait +43 | impl Drop for TraitImpl {} //~ ERROR may only be used on implementation for the `PinnedDrop` trait | ^^^^ error: #[pinned_drop] may only be used on implementation for the `PinnedDrop` trait - --> tests/ui/pinned_drop/invalid.rs:48:10 + --> tests/ui/pinned_drop/invalid.rs:49:10 | -48 | impl InherentImpl {} //~ ERROR may only be used on implementation for the `PinnedDrop` trait +49 | impl InherentImpl {} //~ ERROR may only be used on implementation for the `PinnedDrop` trait | ^^^^^^^^^^^^ error: expected `impl` - --> tests/ui/pinned_drop/invalid.rs:51:5 + --> tests/ui/pinned_drop/invalid.rs:52:5 | -51 | fn drop(_: Pin<&mut ()>) {} //~ ERROR expected `impl` +52 | fn func(_: Pin<&mut ()>) {} //~ ERROR expected `impl` | ^^ error: implementing the trait `PinnedDrop` is not unsafe - --> tests/ui/pinned_drop/invalid.rs:61:5 + --> tests/ui/pinned_drop/invalid.rs:62:5 | -61 | unsafe impl PinnedDrop for Impl { +62 | unsafe impl PinnedDrop for Impl { | ^^^^^^ error: implementing the method `drop` is not unsafe - --> tests/ui/pinned_drop/invalid.rs:71:9 + --> tests/ui/pinned_drop/invalid.rs:72:9 | -71 | unsafe fn drop(self: Pin<&mut Self>) {} //~ ERROR implementing the method `drop` is not unsafe +72 | unsafe fn drop(self: Pin<&mut Self>) {} //~ ERROR implementing the method `drop` is not unsafe | ^^^^^^ error: not all trait items implemented, missing: `drop` - --> tests/ui/pinned_drop/invalid.rs:82:5 + --> tests/ui/pinned_drop/invalid.rs:83:5 | -82 | impl PinnedDrop for Empty {} //~ ERROR not all trait items implemented, missing: `drop` +83 | impl PinnedDrop for Empty {} //~ ERROR not all trait items implemented, missing: `drop` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error: const `A` is not a member of trait `PinnedDrop` - --> tests/ui/pinned_drop/invalid.rs:89:9 + --> tests/ui/pinned_drop/invalid.rs:90:9 | -89 | const A: u8 = 0; //~ ERROR const `A` is not a member of trait `PinnedDrop` +90 | const A: u8 = 0; //~ ERROR const `A` is not a member of trait `PinnedDrop` | ^^^^^^^^^^^^^^^^ error: const `A` is not a member of trait `PinnedDrop` - --> tests/ui/pinned_drop/invalid.rs:99:9 - | -99 | const A: u8 = 0; //~ ERROR const `A` is not a member of trait `PinnedDrop` - | ^^^^^^^^^^^^^^^^ + --> tests/ui/pinned_drop/invalid.rs:100:9 + | +100 | const A: u8 = 0; //~ ERROR const `A` is not a member of trait `PinnedDrop` + | ^^^^^^^^^^^^^^^^ error: type `A` is not a member of trait `PinnedDrop` - --> tests/ui/pinned_drop/invalid.rs:107:9 + --> tests/ui/pinned_drop/invalid.rs:108:9 | -107 | type A = u8; //~ ERROR type `A` is not a member of trait `PinnedDrop` +108 | type A = u8; //~ ERROR type `A` is not a member of trait `PinnedDrop` | ^^^^^^^^^^^^ error: type `A` is not a member of trait `PinnedDrop` - --> tests/ui/pinned_drop/invalid.rs:117:9 + --> tests/ui/pinned_drop/invalid.rs:118:9 | -117 | type A = u8; //~ ERROR type `A` is not a member of trait `PinnedDrop` +118 | type A = u8; //~ ERROR type `A` is not a member of trait `PinnedDrop` | ^^^^^^^^^^^^ error: duplicate definitions with name `drop` - --> tests/ui/pinned_drop/invalid.rs:126:9 + --> tests/ui/pinned_drop/invalid.rs:127:9 | -126 | fn drop(self: Pin<&mut Self>) {} //~ ERROR duplicate definitions with name `drop` +127 | fn drop(self: Pin<&mut Self>) {} //~ ERROR duplicate definitions with name `drop` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error: method `drop` must return the unit type - --> tests/ui/pinned_drop/invalid.rs:147:42 + --> tests/ui/pinned_drop/invalid.rs:149:42 | -147 | fn drop(self: Pin<&mut Self>) -> Self {} //~ ERROR method `drop` must return the unit type +149 | fn drop(self: Pin<&mut Self>) -> Self {} //~ ERROR method `drop` must return the unit type | ^^^^ error: method `drop` must take an argument `self: Pin<&mut Self>` - --> tests/ui/pinned_drop/invalid.rs:155:16 + --> tests/ui/pinned_drop/invalid.rs:157:16 | -155 | fn drop() {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` +157 | fn drop() {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` | ^^ error: method `drop` must take an argument `self: Pin<&mut Self>` - --> tests/ui/pinned_drop/invalid.rs:163:17 + --> tests/ui/pinned_drop/invalid.rs:165:17 | -163 | fn drop(self: Pin<&mut Self>, _: ()) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` +165 | fn drop(self: Pin<&mut Self>, _: ()) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ error: method `drop` must take an argument `self: Pin<&mut Self>` - --> tests/ui/pinned_drop/invalid.rs:171:17 + --> tests/ui/pinned_drop/invalid.rs:173:17 | -171 | fn drop(&mut self) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` +173 | fn drop(&mut self) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` | ^^^^^^^^^ error: method `drop` must take an argument `self: Pin<&mut Self>` - --> tests/ui/pinned_drop/invalid.rs:179:17 + --> tests/ui/pinned_drop/invalid.rs:181:17 | -179 | fn drop(_: Pin<&mut Self>) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` +181 | fn drop(_: Pin<&mut Self>) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` | ^^^^^^^^^^^^^^^^^ error: method `drop` must take an argument `self: Pin<&mut Self>` - --> tests/ui/pinned_drop/invalid.rs:187:17 + --> tests/ui/pinned_drop/invalid.rs:189:17 | -187 | fn drop(self: Pin<&Self>) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` +189 | fn drop(self: Pin<&Self>) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` | ^^^^^^^^^^^^^^^^ error: method `drop` must take an argument `self: Pin<&mut Self>` - --> tests/ui/pinned_drop/invalid.rs:195:17 + --> tests/ui/pinned_drop/invalid.rs:197:17 | -195 | fn drop(self: Pin<&mut ()>) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` +197 | fn drop(self: Pin<&mut ()>) {} //~ ERROR method `drop` must take an argument `self: Pin<&mut Self>` | ^^^^^^^^^^^^^^^^^^ error: method `pinned_drop` is not a member of trait `PinnedDrop - --> tests/ui/pinned_drop/invalid.rs:203:12 + --> tests/ui/pinned_drop/invalid.rs:205:12 | -203 | fn pinned_drop(&mut self) {} //~ ERROR method `pinned_drop` is not a member of trait `PinnedDrop +205 | fn pinned_drop(self: Pin<&mut Self>) {} //~ ERROR method `pinned_drop` is not a member of trait `PinnedDrop | ^^^^^^^^^^^ error: implementing the trait `PinnedDrop` on this type is unsupported - --> tests/ui/pinned_drop/invalid.rs:211:25 + --> tests/ui/pinned_drop/invalid.rs:213:25 | -211 | impl PinnedDrop for () { +213 | impl PinnedDrop for () { | ^^ error: implementing the trait `PinnedDrop` on this type is unsupported - --> tests/ui/pinned_drop/invalid.rs:217:25 + --> tests/ui/pinned_drop/invalid.rs:219:25 | -217 | impl PinnedDrop for &mut A { +219 | impl PinnedDrop for &mut A { | ^^^^^^ error: implementing the trait `PinnedDrop` on this type is unsupported - --> tests/ui/pinned_drop/invalid.rs:223:25 + --> tests/ui/pinned_drop/invalid.rs:225:25 | -223 | impl PinnedDrop for [A] { +225 | impl PinnedDrop for [A] { | ^^^ diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/pinned-drop-no-attr-arg.rs b/third_party/rust/pin-project/tests/ui/pinned_drop/pinned-drop-no-attr-arg.rs index 1241b5be845f..391f2901ed1f 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/pinned-drop-no-attr-arg.rs +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/pinned-drop-no-attr-arg.rs @@ -1,14 +1,16 @@ -use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; +use pin_project::{pin_project, pinned_drop}; + #[pin_project] -struct Foo { +struct S { #[pin] - field: u8, + f: u8, } #[pinned_drop] -impl PinnedDrop for Foo { //~ ERROR E0119 +impl PinnedDrop for S { + //~^ ERROR E0119 fn drop(self: Pin<&mut Self>) {} } diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/pinned-drop-no-attr-arg.stderr b/third_party/rust/pin-project/tests/ui/pinned_drop/pinned-drop-no-attr-arg.stderr index f907e96074aa..254225454a1d 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/pinned-drop-no-attr-arg.stderr +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/pinned-drop-no-attr-arg.stderr @@ -1,8 +1,8 @@ -error[E0119]: conflicting implementations of trait `pin_project::__private::PinnedDrop` for type `Foo` - --> tests/ui/pinned_drop/pinned-drop-no-attr-arg.rs:11:1 +error[E0119]: conflicting implementations of trait `_::_pin_project::__private::PinnedDrop` for type `S` + --> tests/ui/pinned_drop/pinned-drop-no-attr-arg.rs:12:1 | -4 | #[pin_project] +5 | #[pin_project] | -------------- first implementation here ... -11 | impl PinnedDrop for Foo { //~ ERROR E0119 - | ^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `Foo` +12 | impl PinnedDrop for S { + | ^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `S` diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/self.rs b/third_party/rust/pin-project/tests/ui/pinned_drop/self.rs index 91760662d305..ff634022e572 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/self.rs +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/self.rs @@ -1,14 +1,15 @@ pub mod self_in_macro_def { - use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; + use pin_project::{pin_project, pinned_drop}; + #[pin_project(PinnedDrop)] - pub struct Struct { - x: (), + pub struct S { + f: (), } #[pinned_drop] - impl PinnedDrop for Struct { + impl PinnedDrop for S { fn drop(self: Pin<&mut Self>) { macro_rules! t { () => {{ @@ -23,12 +24,13 @@ pub mod self_in_macro_def { } pub mod self_span { - use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; + use pin_project::{pin_project, pinned_drop}; + #[pin_project(PinnedDrop)] pub struct S { - x: (), + f: (), } #[pinned_drop] @@ -41,7 +43,7 @@ pub mod self_span { #[pin_project(PinnedDrop)] pub enum E { - V { x: () }, + V { f: () }, } #[pinned_drop] diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/self.stderr b/third_party/rust/pin-project/tests/ui/pinned_drop/self.stderr index 76cf1e173c86..0f96dc9cecb6 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/self.stderr +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/self.stderr @@ -1,53 +1,53 @@ error: `self` parameter is only allowed in associated functions - --> tests/ui/pinned_drop/self.rs:17:26 + --> tests/ui/pinned_drop/self.rs:18:26 | -17 | fn f(self: ()) {} //~ ERROR `self` parameter is only allowed in associated functions +18 | fn f(self: ()) {} //~ ERROR `self` parameter is only allowed in associated functions | ^^^^ not semantically valid as function parameter ... -20 | t!(); +21 | t!(); | ---- in this macro invocation | = note: associated functions are those in `impl` or `trait` definitions = note: this error originates in the macro `t` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0434]: can't capture dynamic environment in a fn item - --> tests/ui/pinned_drop/self.rs:15:29 + --> tests/ui/pinned_drop/self.rs:16:29 | -15 | let _ = self; //~ ERROR E0434 +16 | let _ = self; //~ ERROR E0434 | ^^^^ ... -20 | t!(); +21 | t!(); | ---- in this macro invocation | = help: use the `|| { ... }` closure form instead = note: this error originates in the macro `t` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0423]: expected value, found struct `S` - --> tests/ui/pinned_drop/self.rs:38:27 + --> tests/ui/pinned_drop/self.rs:40:27 | -30 | / pub struct S { -31 | | x: (), -32 | | } +32 | / pub struct S { +33 | | f: (), +34 | | } | |_____- `S` defined here ... -38 | let _: Self = Self; //~ ERROR E0423 - | ^^^^ help: use struct literal syntax instead: `S { x: val }` +40 | let _: Self = Self; //~ ERROR E0423 + | ^^^^ help: use struct literal syntax instead: `S { f: val }` error[E0308]: mismatched types - --> tests/ui/pinned_drop/self.rs:37:25 + --> tests/ui/pinned_drop/self.rs:39:25 | -37 | let _: () = self; //~ ERROR E0308 +39 | let _: () = self; //~ ERROR E0308 | -- ^^^^ expected `()`, found struct `Pin` | | | expected due to this | = note: expected unit type `()` - found struct `Pin<&mut S>` + found struct `Pin<&mut self_span::S>` error[E0308]: mismatched types - --> tests/ui/pinned_drop/self.rs:50:25 + --> tests/ui/pinned_drop/self.rs:52:25 | -50 | let _: () = self; //~ ERROR E0308 +52 | let _: () = self; //~ ERROR E0308 | -- ^^^^ expected `()`, found struct `Pin` | | | expected due to this @@ -56,7 +56,7 @@ error[E0308]: mismatched types found struct `Pin<&mut E>` error[E0533]: expected unit struct, unit variant or constant, found struct variant `Self::V` - --> tests/ui/pinned_drop/self.rs:51:27 + --> tests/ui/pinned_drop/self.rs:53:27 | -51 | let _: Self = Self::V; //~ ERROR E0533 +53 | let _: Self = Self::V; //~ ERROR E0533 | ^^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/unsafe-call.rs b/third_party/rust/pin-project/tests/ui/pinned_drop/unsafe-call.rs index 2f400c1de89e..3ee2b56c37e5 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/unsafe-call.rs +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/unsafe-call.rs @@ -1,16 +1,17 @@ -use pin_project::{pin_project, pinned_drop}; use std::pin::Pin; +use pin_project::{pin_project, pinned_drop}; + #[pin_project(PinnedDrop)] -struct Struct { +struct S { #[pin] - field: u8, + f: u8, } #[pinned_drop] -impl PinnedDrop for Struct { +impl PinnedDrop for S { fn drop(self: Pin<&mut Self>) { - self.project().field.get_unchecked_mut(); //~ ERROR call to unsafe function is unsafe and requires unsafe function or block [E0133] + self.project().f.get_unchecked_mut(); //~ ERROR call to unsafe function is unsafe and requires unsafe function or block [E0133] } } diff --git a/third_party/rust/pin-project/tests/ui/pinned_drop/unsafe-call.stderr b/third_party/rust/pin-project/tests/ui/pinned_drop/unsafe-call.stderr index e02c10a7affe..9f7dd440a539 100644 --- a/third_party/rust/pin-project/tests/ui/pinned_drop/unsafe-call.stderr +++ b/third_party/rust/pin-project/tests/ui/pinned_drop/unsafe-call.stderr @@ -1,7 +1,7 @@ error[E0133]: call to unsafe function is unsafe and requires unsafe function or block - --> tests/ui/pinned_drop/unsafe-call.rs:13:9 + --> tests/ui/pinned_drop/unsafe-call.rs:14:9 | -13 | self.project().field.get_unchecked_mut(); //~ ERROR call to unsafe function is unsafe and requires unsafe function or block [E0133] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ call to unsafe function +14 | self.project().f.get_unchecked_mut(); //~ ERROR call to unsafe function is unsafe and requires unsafe function or block [E0133] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ call to unsafe function | = note: consult the function's documentation for information on how to avoid undefined behavior diff --git a/third_party/rust/pin-project/tests/ui/project/ambiguous-let.rs b/third_party/rust/pin-project/tests/ui/project/ambiguous-let.rs deleted file mode 100644 index bbb3a2c26743..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/ambiguous-let.rs +++ /dev/null @@ -1,26 +0,0 @@ -#![allow(deprecated)] - -use pin_project::{pin_project, project}; - -#[pin_project] -enum Enum { - A(#[pin] A), - B(B), -} - -struct Struct(T); - -#[project] -fn foo() { - let mut foo: Enum = Enum::A(true); - - #[project] - let Struct(x) = match Pin::new(&mut foo).project() { - //~^ ERROR Both initializer expression and pattern are replaceable, you need to split the initializer expression into separate let bindings to avoid ambiguity - Enum::A(_) => Struct(true), - Enum::B(_) => unreachable!(), - }; - assert!(x); -} - -fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/project/ambiguous-let.stderr b/third_party/rust/pin-project/tests/ui/project/ambiguous-let.stderr deleted file mode 100644 index f808c6fdd0d4..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/ambiguous-let.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Both initializer expression and pattern are replaceable, you need to split the initializer expression into separate let bindings to avoid ambiguity - --> tests/ui/project/ambiguous-let.rs:18:9 - | -18 | let Struct(x) = match Pin::new(&mut foo).project() { - | ^^^^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/project/deprecated.rs b/third_party/rust/pin-project/tests/ui/project/deprecated.rs deleted file mode 100644 index 78d593d808c3..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/deprecated.rs +++ /dev/null @@ -1,8 +0,0 @@ -#![deny(deprecated)] - -use pin_project::{project, project_ref, project_replace}; - -#[project] -#[project_ref] -#[project_replace] -fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/project/deprecated.stderr b/third_party/rust/pin-project/tests/ui/project/deprecated.stderr deleted file mode 100644 index 5d67e8959e09..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/deprecated.stderr +++ /dev/null @@ -1,23 +0,0 @@ -error: use of deprecated macro `project`: consider naming projected type by passing `project` argument to #[pin_project] attribute instead, see release note for details - --> tests/ui/project/deprecated.rs:5:3 - | -5 | #[project] - | ^^^^^^^ - | -note: the lint level is defined here - --> tests/ui/project/deprecated.rs:1:9 - | -1 | #![deny(deprecated)] - | ^^^^^^^^^^ - -error: use of deprecated macro `project_ref`: consider naming projected type by passing `project_ref` argument to #[pin_project] attribute instead, see release note for details - --> tests/ui/project/deprecated.rs:6:3 - | -6 | #[project_ref] - | ^^^^^^^^^^^ - -error: use of deprecated macro `project_replace`: consider naming projected type by passing `project_replace` argument to #[pin_project] attribute instead, see release note for details - --> tests/ui/project/deprecated.rs:7:3 - | -7 | #[project_replace] - | ^^^^^^^^^^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/project/invalid.rs b/third_party/rust/pin-project/tests/ui/project/invalid.rs deleted file mode 100644 index e72f84c3fff1..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/invalid.rs +++ /dev/null @@ -1,192 +0,0 @@ -#![allow(deprecated)] - -mod argument { - use pin_project::{pin_project, project}; - - #[pin_project] - struct A(#[pin] ()); - - #[project] - fn unexpected_local1() { - let mut x = A(()); - #[project()] //~ ERROR unexpected token - let A(_) = Pin::new(&mut x).project(); - } - - #[project] - fn unexpected_local1() { - let mut x = A(()); - #[project(foo)] //~ ERROR unexpected token - let A(_) = Pin::new(&mut x).project(); - } - - #[project] - fn unexpected_expr1() { - let mut x = A(()); - #[project()] //~ ERROR unexpected token - match Pin::new(&mut x).project() { - A(_) => {} - } - } - - #[project] - fn unexpected_expr1() { - let mut x = A(()); - #[project(foo)] //~ ERROR unexpected token - match Pin::new(&mut x).project() { - A(_) => {} - } - } - - #[project()] // Ok - fn unexpected_item1() {} - - #[project(foo)] //~ ERROR unexpected token - fn unexpected_item2() {} -} - -mod attribute { - use pin_project::{pin_project, project, project_ref, project_replace}; - - #[pin_project(project_replace)] - struct A(#[pin] ()); - - #[project] - fn duplicate_stmt_project() { - let mut x = A(()); - #[project] - #[project] //~ ERROR duplicate #[project] attribute - let A(_) = Pin::new(&mut x).project(); - } - - #[project_ref] - fn duplicate_stmt_project_ref() { - let mut x = A(()); - #[project_ref] - #[project_ref] //~ ERROR duplicate #[project_ref] attribute - let A(_) = Pin::new(&mut x).project(); - } - - #[project_replace] - fn duplicate_stmt_project_replace() { - let mut x = A(()); - #[project_replace] - #[project_replace] //~ ERROR duplicate #[project_replace] attribute - let A(_) = Pin::new(&mut x).project(); - } - - #[project] - fn combine_stmt_project1() { - let mut x = A(()); - #[project] - #[project_ref] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project] - fn combine_stmt_project2() { - let mut x = A(()); - #[project] - #[project_replace] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project] - fn combine_stmt_project3() { - let mut x = A(()); - #[project_ref] - #[project_replace] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project_ref] - fn combine_stmt_project_ref1() { - let mut x = A(()); - #[project] - #[project_ref] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project_ref] - fn combine_stmt_project_ref2() { - let mut x = A(()); - #[project] - #[project_replace] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project_ref] - fn combine_stmt_project_ref3() { - let mut x = A(()); - #[project_ref] - #[project_replace] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project_replace] - fn combine_stmt_project_replace1() { - let mut x = A(()); - #[project] - #[project_ref] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project_replace] - fn combine_stmt_project_replace2() { - let mut x = A(()); - #[project] - #[project_replace] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project_replace] - fn combine_stmt_project_replace3() { - let mut x = A(()); - #[project_ref] - #[project_replace] //~ ERROR are mutually exclusive - let A(_) = Pin::new(&mut x).project(); - } - - #[project] - #[project] //~ ERROR duplicate #[project] attribute - fn duplicate_fn_project() {} - - #[project_ref] - #[project_ref] //~ ERROR duplicate #[project_ref] attribute - fn duplicate_fn_project_ref() {} - - #[project_replace] - #[project_replace] //~ ERROR duplicate #[project_replace] attribute - fn duplicate_fn_project_replace() {} - - #[project] - #[project] //~ ERROR duplicate #[project] attribute - impl A {} - - #[project_ref] - #[project_ref] //~ ERROR duplicate #[project_ref] attribute - impl A {} - - #[project_replace] - #[project_replace] //~ ERROR duplicate #[project_replace] attribute - impl A {} - - #[allow(unused_imports)] - mod use_ { - use pin_project::{project, project_ref, project_replace}; - - #[project] - #[project] //~ ERROR duplicate #[project] attribute - use super::A; - - #[project_ref] - #[project_ref] //~ ERROR duplicate #[project_ref] attribute - use super::A; - - #[project_replace] - #[project_replace] //~ ERROR duplicate #[project_replace] attribute - use super::A; - } -} - -fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/project/invalid.stderr b/third_party/rust/pin-project/tests/ui/project/invalid.stderr deleted file mode 100644 index 6cbce48651ad..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/invalid.stderr +++ /dev/null @@ -1,155 +0,0 @@ -error: unexpected token: () - --> tests/ui/project/invalid.rs:12:18 - | -12 | #[project()] //~ ERROR unexpected token - | ^^ - -error: unexpected token: (foo) - --> tests/ui/project/invalid.rs:19:18 - | -19 | #[project(foo)] //~ ERROR unexpected token - | ^^^^^ - -error: unexpected token: () - --> tests/ui/project/invalid.rs:26:18 - | -26 | #[project()] //~ ERROR unexpected token - | ^^ - -error: unexpected token: (foo) - --> tests/ui/project/invalid.rs:35:18 - | -35 | #[project(foo)] //~ ERROR unexpected token - | ^^^^^ - -error: unexpected token: foo - --> tests/ui/project/invalid.rs:44:15 - | -44 | #[project(foo)] //~ ERROR unexpected token - | ^^^ - -error: duplicate #[project] attribute - --> tests/ui/project/invalid.rs:58:9 - | -58 | #[project] //~ ERROR duplicate #[project] attribute - | ^^^^^^^^^^ - -error: duplicate #[project_ref] attribute - --> tests/ui/project/invalid.rs:66:9 - | -66 | #[project_ref] //~ ERROR duplicate #[project_ref] attribute - | ^^^^^^^^^^^^^^ - -error: duplicate #[project_replace] attribute - --> tests/ui/project/invalid.rs:74:9 - | -74 | #[project_replace] //~ ERROR duplicate #[project_replace] attribute - | ^^^^^^^^^^^^^^^^^^ - -error: attributes `project` and `project_ref` are mutually exclusive - --> tests/ui/project/invalid.rs:82:9 - | -82 | #[project_ref] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^ - -error: attributes `project` and `project_replace` are mutually exclusive - --> tests/ui/project/invalid.rs:90:9 - | -90 | #[project_replace] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^^^^^ - -error: attributes `project_ref` and `project_replace` are mutually exclusive - --> tests/ui/project/invalid.rs:98:9 - | -98 | #[project_replace] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^^^^^ - -error: attributes `project` and `project_ref` are mutually exclusive - --> tests/ui/project/invalid.rs:106:9 - | -106 | #[project_ref] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^ - -error: attributes `project` and `project_replace` are mutually exclusive - --> tests/ui/project/invalid.rs:114:9 - | -114 | #[project_replace] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^^^^^ - -error: attributes `project_ref` and `project_replace` are mutually exclusive - --> tests/ui/project/invalid.rs:122:9 - | -122 | #[project_replace] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^^^^^ - -error: attributes `project` and `project_ref` are mutually exclusive - --> tests/ui/project/invalid.rs:130:9 - | -130 | #[project_ref] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^ - -error: attributes `project` and `project_replace` are mutually exclusive - --> tests/ui/project/invalid.rs:138:9 - | -138 | #[project_replace] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^^^^^ - -error: attributes `project_ref` and `project_replace` are mutually exclusive - --> tests/ui/project/invalid.rs:146:9 - | -146 | #[project_replace] //~ ERROR are mutually exclusive - | ^^^^^^^^^^^^^^^^^^ - -error: duplicate #[project] attribute - --> tests/ui/project/invalid.rs:151:5 - | -151 | #[project] //~ ERROR duplicate #[project] attribute - | ^^^^^^^^^^ - -error: duplicate #[project_ref] attribute - --> tests/ui/project/invalid.rs:155:5 - | -155 | #[project_ref] //~ ERROR duplicate #[project_ref] attribute - | ^^^^^^^^^^^^^^ - -error: duplicate #[project_replace] attribute - --> tests/ui/project/invalid.rs:159:5 - | -159 | #[project_replace] //~ ERROR duplicate #[project_replace] attribute - | ^^^^^^^^^^^^^^^^^^ - -error: duplicate #[project] attribute - --> tests/ui/project/invalid.rs:163:5 - | -163 | #[project] //~ ERROR duplicate #[project] attribute - | ^^^^^^^^^^ - -error: duplicate #[project_ref] attribute - --> tests/ui/project/invalid.rs:167:5 - | -167 | #[project_ref] //~ ERROR duplicate #[project_ref] attribute - | ^^^^^^^^^^^^^^ - -error: duplicate #[project_replace] attribute - --> tests/ui/project/invalid.rs:171:5 - | -171 | #[project_replace] //~ ERROR duplicate #[project_replace] attribute - | ^^^^^^^^^^^^^^^^^^ - -error: duplicate #[project] attribute - --> tests/ui/project/invalid.rs:179:9 - | -179 | #[project] //~ ERROR duplicate #[project] attribute - | ^^^^^^^^^^ - -error: duplicate #[project_ref] attribute - --> tests/ui/project/invalid.rs:183:9 - | -183 | #[project_ref] //~ ERROR duplicate #[project_ref] attribute - | ^^^^^^^^^^^^^^ - -error: duplicate #[project_replace] attribute - --> tests/ui/project/invalid.rs:187:9 - | -187 | #[project_replace] //~ ERROR duplicate #[project_replace] attribute - | ^^^^^^^^^^^^^^^^^^ diff --git a/third_party/rust/pin-project/tests/ui/project/type-mismatch.rs b/third_party/rust/pin-project/tests/ui/project/type-mismatch.rs deleted file mode 100644 index 0e40c836f1a0..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/type-mismatch.rs +++ /dev/null @@ -1,72 +0,0 @@ -#![allow(deprecated)] -#![feature(proc_macro_hygiene, stmt_expr_attributes)] - -use pin_project::{pin_project, project}; -use std::pin::Pin; - -#[project] -fn type_mismatch() { - #[pin_project] - enum Enum { - Variant1(#[pin] A, B), - Variant2 { - #[pin] - field1: C, - field2: D, - }, - None, - } - - let mut foo = Enum::Variant1(1, 2); - let mut foo = Pin::new(&mut foo).project(); - - #[project] - match &mut foo { - Enum::Variant1(x, y) => { - let x: &mut Pin<&mut i32> = x; - assert_eq!(**x, 1); - - let y: &mut &mut i32 = y; - assert_eq!(**y, 2); - } - Enum::Variant2 { field1, field2 } => { - let _x: &mut Pin<&mut i32> = field1; - let _y: &mut &mut i32 = field2; - } - None => {} //~ ERROR mismatched types - } -} - -fn type_mismatch_span_issue() { - #[pin_project] - enum Enum { - Variant1(#[pin] A, B), - Variant2 { - #[pin] - field1: C, - field2: D, - }, - None, - } - - let mut foo = Enum::Variant1(1, 2); - let mut foo = Pin::new(&mut foo).project(); - - #[project] - match &mut foo { - Enum::Variant1(x, y) => { - let x: &mut Pin<&mut i32> = x; - assert_eq!(**x, 1); - - let y: &mut &mut i32 = y; - assert_eq!(**y, 2); - } - Enum::Variant2 { field1, field2 } => { - let _x: &mut Pin<&mut i32> = field1; - let _y: &mut &mut i32 = field2; - } - None => {} //~ ERROR mismatched types - } -} - -fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/project/type-mismatch.stderr b/third_party/rust/pin-project/tests/ui/project/type-mismatch.stderr deleted file mode 100644 index 4a5c421f4a9a..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/type-mismatch.stderr +++ /dev/null @@ -1,17 +0,0 @@ -error[E0308]: mismatched types - --> tests/ui/project/type-mismatch.rs:68:9 - | -68 | None => {} //~ ERROR mismatched types - | ^^^^ expected enum `type_mismatch_span_issue::__EnumProjection`, found enum `Option` - | - = note: expected enum `type_mismatch_span_issue::__EnumProjection<'_, {integer}, {integer}, _, _>` - found enum `Option<_>` - -error[E0308]: mismatched types - --> tests/ui/project/type-mismatch.rs:36:9 - | -36 | None => {} //~ ERROR mismatched types - | ^^^^ expected enum `type_mismatch::__EnumProjection`, found enum `Option` - | - = note: expected enum `type_mismatch::__EnumProjection<'_, {integer}, {integer}, _, _>` - found enum `Option<_>` diff --git a/third_party/rust/pin-project/tests/ui/project/use-public.rs b/third_party/rust/pin-project/tests/ui/project/use-public.rs deleted file mode 100644 index aa82a95a38f3..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/use-public.rs +++ /dev/null @@ -1,17 +0,0 @@ -#![allow(deprecated)] - -use pin_project::pin_project; - -#[pin_project] -struct A { - field: u8, -} - -pub mod b { - use pin_project::project; - - #[project] - pub use crate::A; //~ ERROR E0365 -} - -fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/project/use-public.stderr b/third_party/rust/pin-project/tests/ui/project/use-public.stderr deleted file mode 100644 index 621f19c6bba0..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/use-public.stderr +++ /dev/null @@ -1,7 +0,0 @@ -error[E0365]: `__AProjection` is only public within the crate, and cannot be re-exported outside - --> tests/ui/project/use-public.rs:14:13 - | -14 | pub use crate::A; //~ ERROR E0365 - | ^^^^^^^^ re-export of crate public `__AProjection` - | - = note: consider declaring type or module `__AProjection` with `pub` diff --git a/third_party/rust/pin-project/tests/ui/project/use.rs b/third_party/rust/pin-project/tests/ui/project/use.rs deleted file mode 100644 index ba5638201178..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/use.rs +++ /dev/null @@ -1,19 +0,0 @@ -#![allow(deprecated)] - -use pin_project::pin_project; - -#[pin_project] -struct A { - field: u8, -} - -mod b { - use pin_project::project; - - #[project] - use crate::A as B; //~ ERROR #[project] attribute may not be used on renamed imports - #[project] - use crate::*; //~ ERROR #[project] attribute may not be used on glob imports -} - -fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/project/use.stderr b/third_party/rust/pin-project/tests/ui/project/use.stderr deleted file mode 100644 index db89e225bc61..000000000000 --- a/third_party/rust/pin-project/tests/ui/project/use.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: #[project] attribute may not be used on renamed imports - --> tests/ui/project/use.rs:14:16 - | -14 | use crate::A as B; //~ ERROR #[project] attribute may not be used on renamed imports - | ^^^^^^ - -error: #[project] attribute may not be used on glob imports - --> tests/ui/project/use.rs:16:16 - | -16 | use crate::*; //~ ERROR #[project] attribute may not be used on glob imports - | ^ diff --git a/third_party/rust/pin-project/tests/ui/unsafe_unpin/conflict-unpin.rs b/third_party/rust/pin-project/tests/ui/unsafe_unpin/conflict-unpin.rs index e0c8a7b1a3b8..ac9d1f868240 100644 --- a/third_party/rust/pin-project/tests/ui/unsafe_unpin/conflict-unpin.rs +++ b/third_party/rust/pin-project/tests/ui/unsafe_unpin/conflict-unpin.rs @@ -3,8 +3,8 @@ use pin_project::pin_project; #[pin_project(UnsafeUnpin)] //~ ERROR E0119 struct Foo { #[pin] - future: T, - field: U, + f1: T, + f2: U, } impl Unpin for Foo where T: Unpin {} @@ -12,8 +12,8 @@ impl Unpin for Foo where T: Unpin {} #[pin_project(UnsafeUnpin)] //~ ERROR E0119 struct Bar { #[pin] - future: T, - field: U, + f1: T, + f2: U, } impl Unpin for Bar {} @@ -21,8 +21,8 @@ impl Unpin for Bar {} #[pin_project(UnsafeUnpin)] //~ ERROR E0119 struct Baz { #[pin] - future: T, - field: U, + f1: T, + f2: U, } impl Unpin for Baz {} diff --git a/third_party/rust/pin-project/tests/ui/unsafe_unpin/conflict-unpin.stderr b/third_party/rust/pin-project/tests/ui/unsafe_unpin/conflict-unpin.stderr index 6add4ab8dcb4..20d4e0885be8 100644 --- a/third_party/rust/pin-project/tests/ui/unsafe_unpin/conflict-unpin.stderr +++ b/third_party/rust/pin-project/tests/ui/unsafe_unpin/conflict-unpin.stderr @@ -7,7 +7,7 @@ error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type 10 | impl Unpin for Foo where T: Unpin {} | --------------------------------------------- first implementation here | - = note: upstream crates may add a new impl of trait `pin_project::UnsafeUnpin` for type `pin_project::__private::Wrapper<'_, Foo<_, _>>` in future versions + = note: upstream crates may add a new impl of trait `_::_pin_project::UnsafeUnpin` for type `_::_pin_project::__private::Wrapper<'_, Foo<_, _>>` in future versions error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Bar<_, _>` --> tests/ui/unsafe_unpin/conflict-unpin.rs:12:15 @@ -18,7 +18,7 @@ error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type 19 | impl Unpin for Bar {} | ------------------------------ first implementation here | - = note: upstream crates may add a new impl of trait `pin_project::UnsafeUnpin` for type `pin_project::__private::Wrapper<'_, Bar<_, _>>` in future versions + = note: upstream crates may add a new impl of trait `_::_pin_project::UnsafeUnpin` for type `_::_pin_project::__private::Wrapper<'_, Bar<_, _>>` in future versions error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Baz<_, _>` --> tests/ui/unsafe_unpin/conflict-unpin.rs:21:15 @@ -29,4 +29,4 @@ error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type 28 | impl Unpin for Baz {} | -------------------------------------------- first implementation here | - = note: upstream crates may add a new impl of trait `pin_project::UnsafeUnpin` for type `pin_project::__private::Wrapper<'_, Baz<_, _>>` in future versions + = note: upstream crates may add a new impl of trait `_::_pin_project::UnsafeUnpin` for type `_::_pin_project::__private::Wrapper<'_, Baz<_, _>>` in future versions diff --git a/third_party/rust/pin-project/tests/ui/unsafe_unpin/not-implement-unsafe-unpin.rs b/third_party/rust/pin-project/tests/ui/unsafe_unpin/not-implement-unsafe-unpin.rs deleted file mode 100644 index 429d60f04460..000000000000 --- a/third_party/rust/pin-project/tests/ui/unsafe_unpin/not-implement-unsafe-unpin.rs +++ /dev/null @@ -1,14 +0,0 @@ -use pin_project::pin_project; - -#[pin_project(UnsafeUnpin)] -struct Struct { - #[pin] - inner: T, - other: U, -} - -fn is_unpin() {} - -fn main() { - is_unpin::>(); //~ ERROR E0277 -} diff --git a/third_party/rust/pin-project/tests/ui/unsafe_unpin/not-implement-unsafe-unpin.stderr b/third_party/rust/pin-project/tests/ui/unsafe_unpin/not-implement-unsafe-unpin.stderr deleted file mode 100644 index 65fec0aaa354..000000000000 --- a/third_party/rust/pin-project/tests/ui/unsafe_unpin/not-implement-unsafe-unpin.stderr +++ /dev/null @@ -1,19 +0,0 @@ -error[E0277]: the trait bound `Struct<(), ()>: UnsafeUnpin` is not satisfied - --> tests/ui/unsafe_unpin/not-implement-unsafe-unpin.rs:13:16 - | -13 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^ the trait `UnsafeUnpin` is not implemented for `Struct<(), ()>` - | - = note: required because of the requirements on the impl of `UnsafeUnpin` for `Wrapper<'_, Struct<(), ()>>` -note: required because of the requirements on the impl of `Unpin` for `Struct<(), ()>` - --> tests/ui/unsafe_unpin/not-implement-unsafe-unpin.rs:3:15 - | -3 | #[pin_project(UnsafeUnpin)] - | ^^^^^^^^^^^ -4 | struct Struct { - | ^^^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/unsafe_unpin/not-implement-unsafe-unpin.rs:10:16 - | -10 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` diff --git a/third_party/rust/pin-project/tests/ui/unsafe_unpin/proper_unpin.rs b/third_party/rust/pin-project/tests/ui/unsafe_unpin/proper_unpin.rs deleted file mode 100644 index 6573aec95ff7..000000000000 --- a/third_party/rust/pin-project/tests/ui/unsafe_unpin/proper_unpin.rs +++ /dev/null @@ -1,41 +0,0 @@ -use pin_project::{pin_project, UnsafeUnpin}; -use std::marker::PhantomPinned; - -fn is_unpin() {} - -#[pin_project(UnsafeUnpin)] -struct Blah { - field1: U, - #[pin] - field2: T, -} - -unsafe impl UnsafeUnpin for Blah {} - -#[pin_project(UnsafeUnpin)] -struct TrivialBounds { - #[pin] - field1: PhantomPinned, -} - -#[pin_project(UnsafeUnpin)] -struct OverlappingLifetimeNames<'pin, T, U> { - #[pin] - field1: U, - #[pin] - field2: Option, - field3: &'pin (), -} - -unsafe impl UnsafeUnpin for OverlappingLifetimeNames<'_, T, U> {} - -fn main() { - is_unpin::>(); //~ ERROR E0277 - is_unpin::>(); // Ok - is_unpin::>(); //~ ERROR E0277 - - is_unpin::(); //~ ERROR E0277 - - is_unpin::>(); //~ ERROR E0277 - is_unpin::>(); //~ ERROR E0277 -} diff --git a/third_party/rust/pin-project/tests/ui/unsafe_unpin/proper_unpin.stderr b/third_party/rust/pin-project/tests/ui/unsafe_unpin/proper_unpin.stderr deleted file mode 100644 index 59e44b39736f..000000000000 --- a/third_party/rust/pin-project/tests/ui/unsafe_unpin/proper_unpin.stderr +++ /dev/null @@ -1,127 +0,0 @@ -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/unsafe_unpin/proper_unpin.rs:33:5 - | -33 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because of the requirements on the impl of `UnsafeUnpin` for `Blah` - --> tests/ui/unsafe_unpin/proper_unpin.rs:13:26 - | -13 | unsafe impl UnsafeUnpin for Blah {} - | ^^^^^^^^^^^ ^^^^^^^^^^ - = note: 1 redundant requirement hidden - = note: required because of the requirements on the impl of `UnsafeUnpin` for `Wrapper<'_, Blah>` -note: required because of the requirements on the impl of `Unpin` for `Blah` - --> tests/ui/unsafe_unpin/proper_unpin.rs:6:15 - | -6 | #[pin_project(UnsafeUnpin)] - | ^^^^^^^^^^^ -7 | struct Blah { - | ^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/unsafe_unpin/proper_unpin.rs:4:16 - | -4 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/unsafe_unpin/proper_unpin.rs:35:5 - | -35 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because of the requirements on the impl of `UnsafeUnpin` for `Blah` - --> tests/ui/unsafe_unpin/proper_unpin.rs:13:26 - | -13 | unsafe impl UnsafeUnpin for Blah {} - | ^^^^^^^^^^^ ^^^^^^^^^^ - = note: 1 redundant requirement hidden - = note: required because of the requirements on the impl of `UnsafeUnpin` for `Wrapper<'_, Blah>` -note: required because of the requirements on the impl of `Unpin` for `Blah` - --> tests/ui/unsafe_unpin/proper_unpin.rs:6:15 - | -6 | #[pin_project(UnsafeUnpin)] - | ^^^^^^^^^^^ -7 | struct Blah { - | ^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/unsafe_unpin/proper_unpin.rs:4:16 - | -4 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: the trait bound `TrivialBounds: UnsafeUnpin` is not satisfied - --> tests/ui/unsafe_unpin/proper_unpin.rs:37:16 - | -37 | is_unpin::(); //~ ERROR E0277 - | ^^^^^^^^^^^^^ the trait `UnsafeUnpin` is not implemented for `TrivialBounds` - | - = note: required because of the requirements on the impl of `UnsafeUnpin` for `Wrapper<'_, TrivialBounds>` -note: required because of the requirements on the impl of `Unpin` for `TrivialBounds` - --> tests/ui/unsafe_unpin/proper_unpin.rs:15:15 - | -15 | #[pin_project(UnsafeUnpin)] - | ^^^^^^^^^^^ -16 | struct TrivialBounds { - | ^^^^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/unsafe_unpin/proper_unpin.rs:4:16 - | -4 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/unsafe_unpin/proper_unpin.rs:39:5 - | -39 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because of the requirements on the impl of `UnsafeUnpin` for `OverlappingLifetimeNames<'_, PhantomPinned, ()>` - --> tests/ui/unsafe_unpin/proper_unpin.rs:30:33 - | -30 | unsafe impl UnsafeUnpin for OverlappingLifetimeNames<'_, T, U> {} - | ^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: 1 redundant requirement hidden - = note: required because of the requirements on the impl of `UnsafeUnpin` for `Wrapper<'_, OverlappingLifetimeNames<'_, PhantomPinned, ()>>` -note: required because of the requirements on the impl of `Unpin` for `OverlappingLifetimeNames<'_, PhantomPinned, ()>` - --> tests/ui/unsafe_unpin/proper_unpin.rs:21:15 - | -21 | #[pin_project(UnsafeUnpin)] - | ^^^^^^^^^^^ -22 | struct OverlappingLifetimeNames<'pin, T, U> { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/unsafe_unpin/proper_unpin.rs:4:16 - | -4 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` - -error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/unsafe_unpin/proper_unpin.rs:40:5 - | -40 | is_unpin::>(); //~ ERROR E0277 - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Unpin` is not implemented for `PhantomPinned` - | - = note: consider using `Box::pin` -note: required because of the requirements on the impl of `UnsafeUnpin` for `OverlappingLifetimeNames<'_, (), PhantomPinned>` - --> tests/ui/unsafe_unpin/proper_unpin.rs:30:33 - | -30 | unsafe impl UnsafeUnpin for OverlappingLifetimeNames<'_, T, U> {} - | ^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: 1 redundant requirement hidden - = note: required because of the requirements on the impl of `UnsafeUnpin` for `Wrapper<'_, OverlappingLifetimeNames<'_, (), PhantomPinned>>` -note: required because of the requirements on the impl of `Unpin` for `OverlappingLifetimeNames<'_, (), PhantomPinned>` - --> tests/ui/unsafe_unpin/proper_unpin.rs:21:15 - | -21 | #[pin_project(UnsafeUnpin)] - | ^^^^^^^^^^^ -22 | struct OverlappingLifetimeNames<'pin, T, U> { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -note: required by a bound in `is_unpin` - --> tests/ui/unsafe_unpin/proper_unpin.rs:4:16 - | -4 | fn is_unpin() {} - | ^^^^^ required by this bound in `is_unpin` diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/README.md b/third_party/rust/pin-project/tests/ui/unstable-features/README.md index b9215b6500c9..96f370ca77bd 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/README.md +++ b/third_party/rust/pin-project/tests/ui/unstable-features/README.md @@ -1,5 +1,7 @@ # UI tests for unstable features -These tests check how the guarantees and features provided by pin-project interact with unstable language features. +These tests check how the guarantees and features provided by pin-project +interact with unstable language features. -The names of the files contained in this directory need to begin with the name of the feature. +The names of the files contained in this directory need to begin with the name +of the feature. diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr-feature-gate.rs b/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr-feature-gate.rs index fa4b01ea04e2..542250bd902b 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr-feature-gate.rs +++ b/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr-feature-gate.rs @@ -1,12 +1,13 @@ -// NB: If you change this test, change 'marker_trait_attr.rs' at the same time. +// Note: If you change this test, change 'marker_trait_attr.rs' at the same time. + +use std::marker::PhantomPinned; use pin_project::pin_project; -use std::marker::PhantomPinned; #[pin_project] //~ ERROR E0119 struct Struct { #[pin] - x: T, + f: T, } // unsound Unpin impl diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr-feature-gate.stderr b/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr-feature-gate.stderr index ef03a00a6767..3412f2e224c6 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr-feature-gate.stderr +++ b/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr-feature-gate.stderr @@ -1,10 +1,10 @@ error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Struct<_>` - --> tests/ui/unstable-features/marker_trait_attr-feature-gate.rs:6:1 + --> tests/ui/unstable-features/marker_trait_attr-feature-gate.rs:7:1 | -6 | #[pin_project] //~ ERROR E0119 +7 | #[pin_project] //~ ERROR E0119 | ^^^^^^^^^^^^^^ conflicting implementation for `Struct<_>` ... -13 | impl Unpin for Struct {} +14 | impl Unpin for Struct {} | --------------------------- first implementation here | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr.rs b/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr.rs index 0b8b30ad6461..9c8e6643e0de 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr.rs +++ b/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr.rs @@ -1,4 +1,4 @@ -// NB: If you change this test, change 'marker_trait_attr-feature-gate.rs' at the same time. +// Note: If you change this test, change 'marker_trait_attr-feature-gate.rs' at the same time. // marker_trait_attr // Tracking issue: https://github.com/rust-lang/rust/issues/29864 @@ -6,13 +6,14 @@ // See https://github.com/taiki-e/pin-project/issues/105#issuecomment-535355974 -use pin_project::pin_project; use std::marker::PhantomPinned; +use pin_project::pin_project; + #[pin_project] //~ ERROR E0119 struct Struct { #[pin] - x: T, + f: T, } // unsound Unpin impl diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr.stderr b/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr.stderr index 8974f971e877..2b68c80ffbb4 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr.stderr +++ b/third_party/rust/pin-project/tests/ui/unstable-features/marker_trait_attr.stderr @@ -1,10 +1,10 @@ error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Struct<_>` - --> tests/ui/unstable-features/marker_trait_attr.rs:12:1 + --> tests/ui/unstable-features/marker_trait_attr.rs:13:1 | -12 | #[pin_project] //~ ERROR E0119 +13 | #[pin_project] //~ ERROR E0119 | ^^^^^^^^^^^^^^ conflicting implementation for `Struct<_>` ... -19 | impl Unpin for Struct {} +20 | impl Unpin for Struct {} | --------------------------- first implementation here | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits-feature-gate.rs b/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits-feature-gate.rs index 0bd4a32cc1ab..012c8709bd4a 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits-feature-gate.rs +++ b/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits-feature-gate.rs @@ -1,12 +1,13 @@ -// NB: If you change this test, change 'overlapping_marker_traits.rs' at the same time. +// Note: If you change this test, change 'overlapping_marker_traits.rs' at the same time. + +use std::marker::PhantomPinned; use pin_project::pin_project; -use std::marker::PhantomPinned; #[pin_project] //~ ERROR E0119 struct Struct { #[pin] - x: T, + f: T, } // unsound Unpin impl diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits-feature-gate.stderr b/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits-feature-gate.stderr index 624a396d4152..918d804d0e95 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits-feature-gate.stderr +++ b/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits-feature-gate.stderr @@ -1,10 +1,10 @@ error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Struct<_>` - --> tests/ui/unstable-features/overlapping_marker_traits-feature-gate.rs:6:1 + --> tests/ui/unstable-features/overlapping_marker_traits-feature-gate.rs:7:1 | -6 | #[pin_project] //~ ERROR E0119 +7 | #[pin_project] //~ ERROR E0119 | ^^^^^^^^^^^^^^ conflicting implementation for `Struct<_>` ... -13 | impl Unpin for Struct {} +14 | impl Unpin for Struct {} | --------------------------- first implementation here | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits.rs b/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits.rs index 27d37a3114e6..8dc27c1dbad9 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits.rs +++ b/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits.rs @@ -1,22 +1,23 @@ -// NB: If you change this test, change 'overlapping_marker_traits-feature-gate.rs' at the same time. +// Note: If you change this test, change 'overlapping_marker_traits-feature-gate.rs' at the same time. // This feature could break the guarantee for Unpin provided by pin-project, // but was removed in https://github.com/rust-lang/rust/pull/68544 (nightly-2020-02-06). // Refs: -// * https://github.com/rust-lang/rust/issues/29864#issuecomment-515780867. -// * https://github.com/taiki-e/pin-project/issues/105 +// - https://github.com/rust-lang/rust/issues/29864#issuecomment-515780867 +// - https://github.com/taiki-e/pin-project/issues/105 // overlapping_marker_traits // Tracking issue: https://github.com/rust-lang/rust/issues/29864 #![feature(overlapping_marker_traits)] -use pin_project::pin_project; use std::marker::PhantomPinned; +use pin_project::pin_project; + #[pin_project] struct Struct { #[pin] - x: T, + f: T, } // unsound Unpin impl diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits.stderr b/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits.stderr index a9f3a6e92ebb..3e8411d4b096 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits.stderr +++ b/third_party/rust/pin-project/tests/ui/unstable-features/overlapping_marker_traits.stderr @@ -7,12 +7,12 @@ error[E0557]: feature has been removed = note: removed in favor of `#![feature(marker_trait_attr)]` error[E0119]: conflicting implementations of trait `std::marker::Unpin` for type `Struct<_>` - --> tests/ui/unstable-features/overlapping_marker_traits.rs:16:1 + --> tests/ui/unstable-features/overlapping_marker_traits.rs:17:1 | -16 | #[pin_project] +17 | #[pin_project] | ^^^^^^^^^^^^^^ conflicting implementation for `Struct<_>` ... -23 | impl Unpin for Struct {} +24 | impl Unpin for Struct {} | --------------------------- first implementation here | = note: this error originates in the derive macro `::pin_project::__private::__PinProjectInternalDerive` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/run-pass/stmt_expr_attributes.rs b/third_party/rust/pin-project/tests/ui/unstable-features/run-pass/stmt_expr_attributes.rs deleted file mode 100644 index 2b6377a6feb6..000000000000 --- a/third_party/rust/pin-project/tests/ui/unstable-features/run-pass/stmt_expr_attributes.rs +++ /dev/null @@ -1,63 +0,0 @@ -// NB: If you change this test, change 'stmt_expr_attributes-feature-gate.rs' at the same time. - -#![allow(deprecated)] -// proc_macro_hygiene -// Tracking issue: https://github.com/rust-lang/rust/issues/54727 -#![feature(proc_macro_hygiene)] -// stmt_expr_attributes -// Tracking issue: https://github.com/rust-lang/rust/issues/15701 -#![feature(stmt_expr_attributes)] - -use pin_project::{pin_project, project}; -use std::pin::Pin; - -fn project_stmt_expr_nightly() { - #[pin_project] - enum Baz { - Variant1(#[pin] A, B), - Variant2 { - #[pin] - field1: C, - field2: D, - }, - None, - } - - let mut baz = Baz::Variant1(1, 2); - - let mut baz = Pin::new(&mut baz).project(); - - #[project] - match &mut baz { - Baz::Variant1(x, y) => { - let x: &mut Pin<&mut i32> = x; - assert_eq!(**x, 1); - - let y: &mut &mut i32 = y; - assert_eq!(**y, 2); - } - Baz::Variant2 { field1, field2 } => { - let _x: &mut Pin<&mut i32> = field1; - let _y: &mut &mut i32 = field2; - } - Baz::None => {} - } - - let () = #[project] - match &mut baz { - Baz::Variant1(x, y) => { - let x: &mut Pin<&mut i32> = x; - assert_eq!(**x, 1); - - let y: &mut &mut i32 = y; - assert_eq!(**y, 2); - } - Baz::Variant2 { field1, field2 } => { - let _x: &mut Pin<&mut i32> = field1; - let _y: &mut &mut i32 = field2; - } - Baz::None => {} - }; -} - -fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs b/third_party/rust/pin-project/tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs deleted file mode 100644 index 5dbe52307a43..000000000000 --- a/third_party/rust/pin-project/tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs +++ /dev/null @@ -1,57 +0,0 @@ -// NB: If you change this test, change 'stmt_expr_attributes.rs' at the same time. - -#![allow(deprecated)] - -use pin_project::{pin_project, project}; -use std::pin::Pin; - -fn project_stmt_expr_nightly() { - #[pin_project] - enum Enum { - Variant1(#[pin] A, B), - Variant2 { - #[pin] - field1: C, - field2: D, - }, - None, - } - - let mut baz = Enum::Variant1(1, 2); - - let mut baz = Pin::new(&mut baz).project(); - - #[project] //~ ERROR E0658 - match &mut baz { - Enum::Variant1(x, y) => { - let x: &mut Pin<&mut i32> = x; - assert_eq!(**x, 1); - - let y: &mut &mut i32 = y; - assert_eq!(**y, 2); - } - Enum::Variant2 { field1, field2 } => { - let _x: &mut Pin<&mut i32> = field1; - let _y: &mut &mut i32 = field2; - } - Enum::None => {} - } - - let () = #[project] //~ ERROR E0658 - match &mut baz { - Enum::Variant1(x, y) => { - let x: &mut Pin<&mut i32> = x; - assert_eq!(**x, 1); - - let y: &mut &mut i32 = y; - assert_eq!(**y, 2); - } - Enum::Variant2 { field1, field2 } => { - let _x: &mut Pin<&mut i32> = field1; - let _y: &mut &mut i32 = field2; - } - Enum::None => {} - }; -} - -fn main() {} diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/stmt_expr_attributes-feature-gate.stderr b/third_party/rust/pin-project/tests/ui/unstable-features/stmt_expr_attributes-feature-gate.stderr deleted file mode 100644 index 76ed4f096bfd..000000000000 --- a/third_party/rust/pin-project/tests/ui/unstable-features/stmt_expr_attributes-feature-gate.stderr +++ /dev/null @@ -1,35 +0,0 @@ -error[E0658]: attributes on expressions are experimental - --> tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs:24:5 - | -24 | #[project] //~ ERROR E0658 - | ^^^^^^^^^^ - | - = note: see issue #15701 for more information - = help: add `#![feature(stmt_expr_attributes)]` to the crate attributes to enable - -error[E0658]: attributes on expressions are experimental - --> tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs:40:14 - | -40 | let () = #[project] //~ ERROR E0658 - | ^^^^^^^^^^ - | - = note: see issue #15701 for more information - = help: add `#![feature(stmt_expr_attributes)]` to the crate attributes to enable - -error[E0658]: custom attributes cannot be applied to expressions - --> tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs:24:5 - | -24 | #[project] //~ ERROR E0658 - | ^^^^^^^^^^ - | - = note: see issue #54727 for more information - = help: add `#![feature(proc_macro_hygiene)]` to the crate attributes to enable - -error[E0658]: custom attributes cannot be applied to expressions - --> tests/ui/unstable-features/stmt_expr_attributes-feature-gate.rs:40:14 - | -40 | let () = #[project] //~ ERROR E0658 - | ^^^^^^^^^^ - | - = note: see issue #54727 for more information - = help: add `#![feature(proc_macro_hygiene)]` to the crate attributes to enable diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/trivial_bounds-feature-gate.rs b/third_party/rust/pin-project/tests/ui/unstable-features/trivial_bounds-feature-gate.rs index 0453a3fd47f6..f8467b082e5b 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/trivial_bounds-feature-gate.rs +++ b/third_party/rust/pin-project/tests/ui/unstable-features/trivial_bounds-feature-gate.rs @@ -1,4 +1,4 @@ -// NB: If you change this test, change 'trivial_bounds.rs' at the same time. +// Note: If you change this test, change 'trivial_bounds.rs' at the same time. mod phantom_pinned { use std::marker::{PhantomData, PhantomPinned}; @@ -21,8 +21,7 @@ mod phantom_pinned { struct C(PhantomPinned); - impl<'a> Unpin for C where WrapperWithLifetime<'a, PhantomPinned>: Unpin {} - // Ok + impl<'a> Unpin for C where WrapperWithLifetime<'a, PhantomPinned>: Unpin {} // Ok } mod inner { diff --git a/third_party/rust/pin-project/tests/ui/unstable-features/trivial_bounds-feature-gate.stderr b/third_party/rust/pin-project/tests/ui/unstable-features/trivial_bounds-feature-gate.stderr index 0d5ed55bdfff..2e316585ed2d 100644 --- a/third_party/rust/pin-project/tests/ui/unstable-features/trivial_bounds-feature-gate.stderr +++ b/third_party/rust/pin-project/tests/ui/unstable-features/trivial_bounds-feature-gate.stderr @@ -24,36 +24,36 @@ note: required because of the requirements on the impl of `Unpin` for `phantom_p = help: add `#![feature(trivial_bounds)]` to the crate attributes to enable error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:35:28 + --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:34:28 | -35 | impl Unpin for A where Inner: Unpin {} //~ ERROR E0277 +34 | impl Unpin for A where Inner: Unpin {} //~ ERROR E0277 | ^^^^^^^^^^^^ within `Inner`, the trait `Unpin` is not implemented for `PhantomPinned` | = note: consider using `Box::pin` note: required because it appears within the type `Inner` - --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:31:12 + --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:30:12 | -31 | struct Inner(PhantomPinned); +30 | struct Inner(PhantomPinned); | ^^^^^ = help: see issue #48214 = help: add `#![feature(trivial_bounds)]` to the crate attributes to enable error[E0277]: `PhantomPinned` cannot be unpinned - --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:43:28 + --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:42:28 | -43 | impl Unpin for B where Wrapper: Unpin {} //~ ERROR E0277 +42 | impl Unpin for B where Wrapper: Unpin {} //~ ERROR E0277 | ^^^^^^^^^^^^^^^^^^^^^ within `Inner`, the trait `Unpin` is not implemented for `PhantomPinned` | = note: consider using `Box::pin` note: required because it appears within the type `Inner` - --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:31:12 + --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:30:12 | -31 | struct Inner(PhantomPinned); +30 | struct Inner(PhantomPinned); | ^^^^^ note: required because of the requirements on the impl of `Unpin` for `inner::Wrapper` - --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:39:13 + --> tests/ui/unstable-features/trivial_bounds-feature-gate.rs:38:13 | -39 | impl Unpin for Wrapper where T: Unpin {} +38 | impl Unpin for Wrapper where T: Unpin {} | ^^^^^ ^^^^^^^^^^ = help: see issue #48214 = help: add `#![feature(trivial_bounds)]` to the crate attributes to enable diff --git a/third_party/rust/pin-project/tests/unsafe_unpin.rs b/third_party/rust/pin-project/tests/unsafe_unpin.rs index 1085b56358c1..8b6411c775d3 100644 --- a/third_party/rust/pin-project/tests/unsafe_unpin.rs +++ b/third_party/rust/pin-project/tests/unsafe_unpin.rs @@ -1,54 +1,50 @@ #![warn(rust_2018_idioms, single_use_lifetimes)] #![allow(dead_code)] -use std::{marker::PhantomPinned, pin::Pin}; +#[macro_use] +mod auxiliary; + +use std::marker::PhantomPinned; use pin_project::{pin_project, UnsafeUnpin}; -fn is_unpin() {} - #[pin_project(UnsafeUnpin)] pub struct Blah { - field1: U, + f1: U, #[pin] - field2: T, + f2: T, } unsafe impl UnsafeUnpin for Blah {} +assert_unpin!(Blah<(), ()>); +assert_unpin!(Blah<(), PhantomPinned>); +assert_not_unpin!(Blah); +assert_not_unpin!(Blah); + #[pin_project(UnsafeUnpin)] -pub struct OverlappingLifetimeNames<'pin, T, U> { +struct OverlappingLifetimeNames<'pin, T, U> { #[pin] - field1: T, - field2: U, - field3: &'pin (), + f1: U, + #[pin] + f2: Option, + f3: &'pin (), } -unsafe impl UnsafeUnpin for OverlappingLifetimeNames<'_, T, U> {} +unsafe impl UnsafeUnpin for OverlappingLifetimeNames<'_, T, U> {} -#[test] -fn unsafe_unpin() { - is_unpin::>(); - is_unpin::>(); -} +assert_unpin!(OverlappingLifetimeNames<'_, (), ()>); +assert_not_unpin!(OverlappingLifetimeNames<'_, PhantomPinned, ()>); +assert_not_unpin!(OverlappingLifetimeNames<'_, (), PhantomPinned>); +assert_not_unpin!(OverlappingLifetimeNames<'_, PhantomPinned, PhantomPinned>); #[test] fn trivial_bounds() { #[pin_project(UnsafeUnpin)] pub struct NotImplementUnsafUnpin { #[pin] - field: PhantomPinned, + f: PhantomPinned, } -} -#[test] -fn test() { - let mut x = OverlappingLifetimeNames { field1: 0, field2: 1, field3: &() }; - let x = Pin::new(&mut x); - let y = x.as_ref().project_ref(); - let _: Pin<&u8> = y.field1; - let _: &u8 = y.field2; - let y = x.project(); - let _: Pin<&mut u8> = y.field1; - let _: &mut u8 = y.field2; + assert_not_unpin!(NotImplementUnsafUnpin); } diff --git a/third_party/rust/serde_urlencoded/.cargo-checksum.json b/third_party/rust/serde_urlencoded/.cargo-checksum.json index d9c93ff825bc..126414d161b0 100644 --- a/third_party/rust/serde_urlencoded/.cargo-checksum.json +++ b/third_party/rust/serde_urlencoded/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"7f9a0ac0673db3e3894a56da2cdb7cb410075a09a1ef2162bbdc97fbe9ed73db","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"b9eb266294324f672cbe945fe8f2e32f85024f0d61a1a7d14382cdde0ac44769","README.md":"22d4c36a0b84f531cd8c254287593f6d963437cfb6df80839eb6989c240d3d7d","bors.toml":"1d8a7a56c5c76925a3daa8c50a40cc82cbfc638f521f864106bd60b1e8a219a2","rustfmt.toml":"b9eb3a50d2c594712da21780876526d5a9f79fdd2d795becaeb0f7016303006f","src/de.rs":"034b769aac212a93895cdfd72987b4f924cb6c9937ffd29cde9c1336ed5aface","src/lib.rs":"b69d313ea5c2f5f2da8b89dd0e268921ccf2d6e14d4f570e0c97ede26a8db2a8","src/ser/key.rs":"f0a4bd175b78b3127c3fbe050c22f45371c17b84968ea8a8a0a6df7847f43f5d","src/ser/mod.rs":"2d360e7e465d88bd76cbf059e7d8aa1f80bc5d66d7e91bfed0dbd80c2702819d","src/ser/pair.rs":"2ce910603e2f83a72b2789377ba27d1b66c49c6a07eff2f13f806da54ebc8bd7","src/ser/part.rs":"5b09e3c60c6eaad94f68e91e8712246d89215b94913772b3ef1e8ba3fcdda871","src/ser/value.rs":"e589e2b35e19d4f7f866bdb706686ea350c8d6a63dbe775e6433c502d27a716d","tests/test_deserialize.rs":"3f3bae6903150757f1dbaf42eeccdd2fcccba5e277bce83c7a3d0eafcc60a79e","tests/test_serialize.rs":"2bb829519c35ea9873d7cc18ebff7aa4b09f09be5a554963f4d18bc6e6c3348f"},"package":"9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97"} \ No newline at end of file +{"files":{"Cargo.toml":"776416273e0e2004aaf8869df552bd0ff39858184730540604dad69e4dd17873","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"b9eb266294324f672cbe945fe8f2e32f85024f0d61a1a7d14382cdde0ac44769","README.md":"0adc2e76e529922436075eddeaaf8decf04f1a642f0e6d9c513b634e72c00699","rustfmt.toml":"5dab9ecd7e76bc1f49cc5a6985196912b9ac8086dfb70833b6251721ba5bf74c","src/de.rs":"3e7ed20d227e2dab88c201f9fda80ebc39b8219fe66ceaf6bef478ca5c9bd891","src/lib.rs":"83718fe61b847408cd08da3515ce4f4ec45998615605b6e878461313986d8571","src/ser/key.rs":"4651a34088cf08b948b27e885efe5c1f6876dbd70f502892bbb7710ce76c878f","src/ser/mod.rs":"2a2eeaf30790e24fbee6bb2ba140dc29343ef797bf3668587892f2c6d2644648","src/ser/pair.rs":"be19e319092dba66aac06bae47f77d2ef39c4c308bbd67c13d031ab88c0e68e7","src/ser/part.rs":"8da25ff5a5159a05dd4b221a757aac514975243b4860aa5eee4ad4500a46d48b","src/ser/value.rs":"5eacb91e054476b982c5fa1b0b38179e844ca79842170d47665aca9e4515552a","tests/test_deserialize.rs":"4525a4d05fd86b2535d50857adab101a02bb5c61a6cb9c138513c8687eca32a7","tests/test_serialize.rs":"2f57ffda172d84573c5d9abe2d700b756fa844639ff0f1bf295441429ddd662b"},"package":"d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"} \ No newline at end of file diff --git a/third_party/rust/serde_urlencoded/Cargo.toml b/third_party/rust/serde_urlencoded/Cargo.toml index 4dad7187fcc0..dfe8cc1ff288 100644 --- a/third_party/rust/serde_urlencoded/Cargo.toml +++ b/third_party/rust/serde_urlencoded/Cargo.toml @@ -3,19 +3,20 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] +edition = "2018" name = "serde_urlencoded" -version = "0.6.1" +version = "0.7.1" authors = ["Anthony Ramine "] +exclude = ["/.travis.yml", "/bors.toml"] description = "`x-www-form-urlencoded` meets Serde" -documentation = "https://docs.rs/serde_urlencoded" +documentation = "https://docs.rs/serde_urlencoded/0.7.1/serde_urlencoded/" keywords = ["serde", "serialization", "urlencoded"] categories = ["encoding", "web-programming"] license = "MIT/Apache-2.0" @@ -23,18 +24,18 @@ repository = "https://github.com/nox/serde_urlencoded" [lib] test = false -[dependencies.dtoa] -version = "0.4.0" +[dependencies.form_urlencoded] +version = "1" [dependencies.itoa] -version = "0.4.0" +version = "1" + +[dependencies.ryu] +version = "1" [dependencies.serde] -version = "1.0.0" - -[dependencies.url] -version = "2.0.0" +version = "1.0.69" [dev-dependencies.serde_derive] -version = "1.0" +version = "1" [badges.travis-ci] repository = "nox/serde_urlencoded" diff --git a/third_party/rust/serde_urlencoded/README.md b/third_party/rust/serde_urlencoded/README.md index e55a86c21727..92cacb65b8a7 100644 --- a/third_party/rust/serde_urlencoded/README.md +++ b/third_party/rust/serde_urlencoded/README.md @@ -18,10 +18,13 @@ This crate works with Cargo and can be found on ```toml [dependencies] -serde_urlencoded = "0.5.1" +serde_urlencoded = "0.7" ``` +The documentation is available on [docs.rs]. + [crates.io]: https://crates.io/crates/serde_urlencoded +[docs.rs]: https://docs.rs/serde_urlencoded/0.7.1/serde_urlencoded/ ## Getting help diff --git a/third_party/rust/serde_urlencoded/bors.toml b/third_party/rust/serde_urlencoded/bors.toml deleted file mode 100644 index 359f8947bac9..000000000000 --- a/third_party/rust/serde_urlencoded/bors.toml +++ /dev/null @@ -1 +0,0 @@ -status = ["continuous-integration/travis-ci/push"] diff --git a/third_party/rust/serde_urlencoded/rustfmt.toml b/third_party/rust/serde_urlencoded/rustfmt.toml index 2b7608b300c6..f80ce4e9c8a5 100644 --- a/third_party/rust/serde_urlencoded/rustfmt.toml +++ b/third_party/rust/serde_urlencoded/rustfmt.toml @@ -1,4 +1,4 @@ -match_block_trailing_comma = true +match_block_trailing_comma = false max_width = 80 newline_style = "Unix" reorder_imports = true diff --git a/third_party/rust/serde_urlencoded/src/de.rs b/third_party/rust/serde_urlencoded/src/de.rs index 3558f3e2be47..d906eaae2a3d 100644 --- a/third_party/rust/serde_urlencoded/src/de.rs +++ b/third_party/rust/serde_urlencoded/src/de.rs @@ -1,17 +1,18 @@ //! Deserialization support for the `application/x-www-form-urlencoded` format. +use form_urlencoded::parse; +use form_urlencoded::Parse as UrlEncodedParse; use serde::de::value::MapDeserializer; use serde::de::Error as de_Error; use serde::de::{self, IntoDeserializer}; +use serde::forward_to_deserialize_any; use std::borrow::Cow; use std::io::Read; -use url::form_urlencoded::parse; -use url::form_urlencoded::Parse as UrlEncodedParse; #[doc(inline)] pub use serde::de::value::Error; -/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`. +/// Deserializes a `application/x-www-form-urlencoded` value from a `&[u8]`. /// /// ``` /// let meal = vec![ @@ -33,7 +34,7 @@ where T::deserialize(Deserializer::new(parse(input))) } -/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`. +/// Deserializes a `application/x-www-form-urlencoded` value from a `&str`. /// /// ``` /// let meal = vec![ diff --git a/third_party/rust/serde_urlencoded/src/lib.rs b/third_party/rust/serde_urlencoded/src/lib.rs index 776ae54ab1ac..b7ccc783f59d 100644 --- a/third_party/rust/serde_urlencoded/src/lib.rs +++ b/third_party/rust/serde_urlencoded/src/lib.rs @@ -1,17 +1,12 @@ //! `x-www-form-urlencoded` meets Serde #![warn(unused_extern_crates)] - -extern crate dtoa; -extern crate itoa; -#[macro_use] -extern crate serde; -extern crate url; +#![forbid(unsafe_code)] pub mod de; pub mod ser; #[doc(inline)] -pub use de::{from_bytes, from_reader, from_str, Deserializer}; +pub use crate::de::{from_bytes, from_reader, from_str, Deserializer}; #[doc(inline)] -pub use ser::{to_string, Serializer}; +pub use crate::ser::{to_string, Serializer}; diff --git a/third_party/rust/serde_urlencoded/src/ser/key.rs b/third_party/rust/serde_urlencoded/src/ser/key.rs index 2a2e63ac0365..8128a64ebbbe 100644 --- a/third_party/rust/serde_urlencoded/src/ser/key.rs +++ b/third_party/rust/serde_urlencoded/src/ser/key.rs @@ -1,5 +1,5 @@ -use ser::part::Sink; -use ser::Error; +use crate::ser::part::Sink; +use crate::ser::Error; use serde::Serialize; use std::borrow::Cow; use std::ops::Deref; @@ -38,7 +38,7 @@ where End: for<'key> FnOnce(Key<'key>) -> Result, { pub fn new(end: End) -> Self { - KeySink { end: end } + KeySink { end } } } diff --git a/third_party/rust/serde_urlencoded/src/ser/mod.rs b/third_party/rust/serde_urlencoded/src/ser/mod.rs index 598b3a4dff3d..d75b9022b2c2 100644 --- a/third_party/rust/serde_urlencoded/src/ser/mod.rs +++ b/third_party/rust/serde_urlencoded/src/ser/mod.rs @@ -5,15 +5,15 @@ mod pair; mod part; mod value; +use form_urlencoded::Serializer as UrlEncodedSerializer; +use form_urlencoded::Target as UrlEncodedTarget; use serde::ser; use std::borrow::Cow; use std::error; use std::fmt; use std::str; -use url::form_urlencoded::Serializer as UrlEncodedSerializer; -use url::form_urlencoded::Target as UrlEncodedTarget; -/// Serializes a value into a `application/x-wwww-url-encoded` `String` buffer. +/// Serializes a value into a `application/x-www-form-urlencoded` `String` buffer. /// /// ``` /// let meal = &[ @@ -42,16 +42,18 @@ pub fn to_string(input: T) -> Result { /// unit structs and unit variants. /// /// * Newtype structs defer to their inner values. -pub struct Serializer<'input, 'output, Target: 'output + UrlEncodedTarget> { +pub struct Serializer<'input, 'output, Target: UrlEncodedTarget> { urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, } -impl<'input, 'output, Target: 'output + UrlEncodedTarget> Serializer<'input, 'output, Target> { +impl<'input, 'output, Target: 'output + UrlEncodedTarget> + Serializer<'input, 'output, Target> +{ /// Returns a new `Serializer`. - pub fn new(urlencoder: &'output mut UrlEncodedSerializer<'input, Target>) -> Self { - Serializer { - urlencoder: urlencoder, - } + pub fn new( + urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, + ) -> Self { + Serializer { urlencoder } } } @@ -63,7 +65,7 @@ pub enum Error { } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Error::Custom(ref msg) => msg.fmt(f), Error::Utf8(ref err) => write!(f, "invalid UTF-8: {}", err), @@ -80,7 +82,15 @@ impl error::Error for Error { } /// The lower-level cause of this error, in the case of a `Utf8` error. - fn cause(&self) -> Option<&error::Error> { + fn cause(&self) -> Option<&dyn error::Error> { + match *self { + Error::Custom(_) => None, + Error::Utf8(ref err) => Some(err), + } + } + + /// The lower-level source of this error, in the case of a `Utf8` error. + fn source(&self) -> Option<&(dyn error::Error + 'static)> { match *self { Error::Custom(_) => None, Error::Utf8(ref err) => Some(err), @@ -95,50 +105,51 @@ impl ser::Error for Error { } /// Sequence serializer. -pub struct SeqSerializer<'input, 'output, Target: 'output + UrlEncodedTarget> { +pub struct SeqSerializer<'input, 'output, Target: UrlEncodedTarget> { urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, } /// Tuple serializer. /// /// Mostly used for arrays. -pub struct TupleSerializer<'input, 'output, Target: 'output + UrlEncodedTarget> { +pub struct TupleSerializer<'input, 'output, Target: UrlEncodedTarget> { urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, } /// Tuple struct serializer. /// /// Never instantiated, tuple structs are not supported. -pub struct TupleStructSerializer<'input, 'output, T: 'output + UrlEncodedTarget> { +pub struct TupleStructSerializer<'input, 'output, T: UrlEncodedTarget> { inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, } /// Tuple variant serializer. /// /// Never instantiated, tuple variants are not supported. -pub struct TupleVariantSerializer<'input, 'output, T: 'output + UrlEncodedTarget> { +pub struct TupleVariantSerializer<'input, 'output, T: UrlEncodedTarget> { inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, } /// Map serializer. -pub struct MapSerializer<'input, 'output, Target: 'output + UrlEncodedTarget> { +pub struct MapSerializer<'input, 'output, Target: UrlEncodedTarget> { urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, key: Option>, } /// Struct serializer. -pub struct StructSerializer<'input, 'output, Target: 'output + UrlEncodedTarget> { +pub struct StructSerializer<'input, 'output, Target: UrlEncodedTarget> { urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, } /// Struct variant serializer. /// /// Never instantiated, struct variants are not supported. -pub struct StructVariantSerializer<'input, 'output, T: 'output + UrlEncodedTarget> { +pub struct StructVariantSerializer<'input, 'output, T: UrlEncodedTarget> { inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, } -impl<'input, 'output, Target> ser::Serializer for Serializer<'input, 'output, Target> +impl<'input, 'output, Target> ser::Serializer + for Serializer<'input, 'output, Target> where Target: 'output + UrlEncodedTarget, { @@ -147,10 +158,12 @@ where type SerializeSeq = SeqSerializer<'input, 'output, Target>; type SerializeTuple = TupleSerializer<'input, 'output, Target>; type SerializeTupleStruct = TupleStructSerializer<'input, 'output, Target>; - type SerializeTupleVariant = TupleVariantSerializer<'input, 'output, Target>; + type SerializeTupleVariant = + TupleVariantSerializer<'input, 'output, Target>; type SerializeMap = MapSerializer<'input, 'output, Target>; type SerializeStruct = StructSerializer<'input, 'output, Target>; - type SerializeStructVariant = StructVariantSerializer<'input, 'output, Target>; + type SerializeStructVariant = + StructVariantSerializer<'input, 'output, Target>; /// Returns an error. fn serialize_bool(self, _v: bool) -> Result { @@ -222,9 +235,9 @@ where Err(Error::top_level()) } - /// Returns an error. + /// Returns `Ok`. fn serialize_unit(self) -> Result { - Err(Error::top_level()) + Ok(self.urlencoder) } /// Returns `Ok`. @@ -352,7 +365,8 @@ where } } -impl<'input, 'output, Target> ser::SerializeSeq for SeqSerializer<'input, 'output, Target> +impl<'input, 'output, Target> ser::SerializeSeq + for SeqSerializer<'input, 'output, Target> where Target: 'output + UrlEncodedTarget, { @@ -371,7 +385,8 @@ where } } -impl<'input, 'output, Target> ser::SerializeTuple for TupleSerializer<'input, 'output, Target> +impl<'input, 'output, Target> ser::SerializeTuple + for TupleSerializer<'input, 'output, Target> where Target: 'output + UrlEncodedTarget, { @@ -430,7 +445,8 @@ where } } -impl<'input, 'output, Target> ser::SerializeMap for MapSerializer<'input, 'output, Target> +impl<'input, 'output, Target> ser::SerializeMap + for MapSerializer<'input, 'output, Target> where Target: 'output + UrlEncodedTarget, { @@ -470,7 +486,7 @@ where value: &T, ) -> Result<(), Error> { { - let key = self.key.as_ref().ok_or_else(|| Error::no_key())?; + let key = self.key.as_ref().ok_or_else(Error::no_key)?; let value_sink = value::ValueSink::new(self.urlencoder, &key); value.serialize(part::PartSerializer::new(value_sink))?; } @@ -483,7 +499,8 @@ where } } -impl<'input, 'output, Target> ser::SerializeStruct for StructSerializer<'input, 'output, Target> +impl<'input, 'output, Target> ser::SerializeStruct + for StructSerializer<'input, 'output, Target> where Target: 'output + UrlEncodedTarget, { diff --git a/third_party/rust/serde_urlencoded/src/ser/pair.rs b/third_party/rust/serde_urlencoded/src/ser/pair.rs index e7235e43e3da..429ce4b2bdd1 100644 --- a/third_party/rust/serde_urlencoded/src/ser/pair.rs +++ b/third_party/rust/serde_urlencoded/src/ser/pair.rs @@ -1,14 +1,14 @@ -use ser::key::KeySink; -use ser::part::PartSerializer; -use ser::value::ValueSink; -use ser::Error; +use crate::ser::key::KeySink; +use crate::ser::part::PartSerializer; +use crate::ser::value::ValueSink; +use crate::ser::Error; +use form_urlencoded::Serializer as UrlEncodedSerializer; +use form_urlencoded::Target as UrlEncodedTarget; use serde::ser; use std::borrow::Cow; use std::mem; -use url::form_urlencoded::Serializer as UrlEncodedSerializer; -use url::form_urlencoded::Target as UrlEncodedTarget; -pub struct PairSerializer<'input, 'target, Target: 'target + UrlEncodedTarget> { +pub struct PairSerializer<'input, 'target, Target: UrlEncodedTarget> { urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, state: PairState, } @@ -17,15 +17,18 @@ impl<'input, 'target, Target> PairSerializer<'input, 'target, Target> where Target: 'target + UrlEncodedTarget, { - pub fn new(urlencoder: &'target mut UrlEncodedSerializer<'input, Target>) -> Self { + pub fn new( + urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, + ) -> Self { PairSerializer { - urlencoder: urlencoder, + urlencoder, state: PairState::WaitingForKey, } } } -impl<'input, 'target, Target> ser::Serializer for PairSerializer<'input, 'target, Target> +impl<'input, 'target, Target> ser::Serializer + for PairSerializer<'input, 'target, Target> where Target: 'target + UrlEncodedTarget, { @@ -200,7 +203,8 @@ where } } -impl<'input, 'target, Target> ser::SerializeTuple for PairSerializer<'input, 'target, Target> +impl<'input, 'target, Target> ser::SerializeTuple + for PairSerializer<'input, 'target, Target> where Target: 'target + UrlEncodedTarget, { @@ -219,7 +223,7 @@ where key: value.serialize(key_serializer)?, }; Ok(()) - }, + } PairState::WaitingForValue { key } => { let result = { let value_sink = ValueSink::new(self.urlencoder, &key); @@ -229,10 +233,10 @@ where if result.is_ok() { self.state = PairState::Done; } else { - self.state = PairState::WaitingForValue { key: key }; + self.state = PairState::WaitingForValue { key }; } result - }, + } PairState::Done => Err(Error::done()), } } diff --git a/third_party/rust/serde_urlencoded/src/ser/part.rs b/third_party/rust/serde_urlencoded/src/ser/part.rs index f72846cc0067..1deffa54b5c7 100644 --- a/third_party/rust/serde_urlencoded/src/ser/part.rs +++ b/third_party/rust/serde_urlencoded/src/ser/part.rs @@ -1,6 +1,4 @@ -use dtoa; -use itoa; -use ser::Error; +use crate::ser::Error; use serde::ser; use std::str; @@ -10,7 +8,7 @@ pub struct PartSerializer { impl PartSerializer { pub fn new(sink: S) -> Self { - PartSerializer { sink: sink } + PartSerializer { sink } } } @@ -82,6 +80,14 @@ impl ser::Serializer for PartSerializer { self.serialize_integer(v) } + fn serialize_u128(self, v: u128) -> Result { + self.serialize_integer(v) + } + + fn serialize_i128(self, v: i128) -> Result { + self.serialize_integer(v) + } + fn serialize_f32(self, v: f32) -> Result { self.serialize_floating(v) } @@ -110,7 +116,7 @@ impl ser::Serializer for PartSerializer { } fn serialize_unit_struct(self, name: &'static str) -> Result { - self.sink.serialize_static_str(name.into()) + self.sink.serialize_static_str(name) } fn serialize_unit_variant( @@ -119,7 +125,7 @@ impl ser::Serializer for PartSerializer { _variant_index: u32, variant: &'static str, ) -> Result { - self.sink.serialize_static_str(variant.into()) + self.sink.serialize_static_str(variant) } fn serialize_newtype_struct( @@ -214,19 +220,17 @@ impl PartSerializer { where I: itoa::Integer, { - let mut buf = [b'\0'; 20]; - let len = itoa::write(&mut buf[..], value).unwrap(); - let part = unsafe { str::from_utf8_unchecked(&buf[0..len]) }; + let mut buf = itoa::Buffer::new(); + let part = buf.format(value); ser::Serializer::serialize_str(self, part) } fn serialize_floating(self, value: F) -> Result where - F: dtoa::Floating, + F: ryu::Float, { - let mut buf = [b'\0'; 24]; - let len = dtoa::write(&mut buf[..], value).unwrap(); - let part = unsafe { str::from_utf8_unchecked(&buf[0..len]) }; + let mut buf = ryu::Buffer::new(); + let part = buf.format(value); ser::Serializer::serialize_str(self, part) } } diff --git a/third_party/rust/serde_urlencoded/src/ser/value.rs b/third_party/rust/serde_urlencoded/src/ser/value.rs index fc12076ae08f..e8823ce70381 100644 --- a/third_party/rust/serde_urlencoded/src/ser/value.rs +++ b/third_party/rust/serde_urlencoded/src/ser/value.rs @@ -1,13 +1,13 @@ -use ser::part::{PartSerializer, Sink}; -use ser::Error; +use crate::ser::part::{PartSerializer, Sink}; +use crate::ser::Error; +use form_urlencoded::Serializer as UrlEncodedSerializer; +use form_urlencoded::Target as UrlEncodedTarget; use serde::ser::Serialize; use std::str; -use url::form_urlencoded::Serializer as UrlEncodedSerializer; -use url::form_urlencoded::Target as UrlEncodedTarget; pub struct ValueSink<'input, 'key, 'target, Target> where - Target: 'target + UrlEncodedTarget, + Target: UrlEncodedTarget, { urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, key: &'key str, @@ -21,14 +21,12 @@ where urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, key: &'key str, ) -> Self { - ValueSink { - urlencoder: urlencoder, - key: key, - } + ValueSink { urlencoder, key } } } -impl<'input, 'key, 'target, Target> Sink for ValueSink<'input, 'key, 'target, Target> +impl<'input, 'key, 'target, Target> Sink + for ValueSink<'input, 'key, 'target, Target> where Target: 'target + UrlEncodedTarget, { diff --git a/third_party/rust/serde_urlencoded/tests/test_deserialize.rs b/third_party/rust/serde_urlencoded/tests/test_deserialize.rs index 6910599547ce..00700d306033 100644 --- a/third_party/rust/serde_urlencoded/tests/test_deserialize.rs +++ b/third_party/rust/serde_urlencoded/tests/test_deserialize.rs @@ -1,6 +1,4 @@ -extern crate serde_urlencoded; -#[macro_use] -extern crate serde_derive; +use serde_derive::Deserialize; #[derive(Deserialize, Debug, PartialEq)] struct NewType(T); @@ -83,3 +81,8 @@ fn deserialize_unit_enum() { Ok(result) ); } + +#[test] +fn deserialize_unit_type() { + assert_eq!(serde_urlencoded::from_str(""), Ok(())); +} diff --git a/third_party/rust/serde_urlencoded/tests/test_serialize.rs b/third_party/rust/serde_urlencoded/tests/test_serialize.rs index b0276d2ee324..abb4907a46e4 100644 --- a/third_party/rust/serde_urlencoded/tests/test_serialize.rs +++ b/third_party/rust/serde_urlencoded/tests/test_serialize.rs @@ -1,6 +1,4 @@ -extern crate serde_urlencoded; -#[macro_use] -extern crate serde_derive; +use serde_derive::Serialize; #[derive(Serialize)] struct NewType(T); @@ -14,6 +12,24 @@ fn serialize_newtype_i32() { ); } +#[test] +fn serialize_newtype_u128() { + let params = &[("field", Some(NewType(u128::MAX)))]; + assert_eq!( + serde_urlencoded::to_string(params), + Ok(format!("field={}", u128::MAX)) + ); +} + +#[test] +fn serialize_newtype_i128() { + let params = &[("field", Some(NewType(i128::MIN)))]; + assert_eq!( + serde_urlencoded::to_string(params), + Ok(format!("field={}", i128::MIN)) + ); +} + #[test] fn serialize_option_map_int() { let params = &[("first", Some(23)), ("middle", None), ("last", Some(42))]; @@ -81,3 +97,8 @@ struct Unit; fn serialize_unit_struct() { assert_eq!(serde_urlencoded::to_string(Unit), Ok("".to_owned())); } + +#[test] +fn serialize_unit_type() { + assert_eq!(serde_urlencoded::to_string(()), Ok("".to_owned())); +} diff --git a/third_party/rust/socket2/.cargo-checksum.json b/third_party/rust/socket2/.cargo-checksum.json index d395b486a4c7..40fcf7f38c74 100644 --- a/third_party/rust/socket2/.cargo-checksum.json +++ b/third_party/rust/socket2/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"54fecae223099432de172b711c6f23f4bb46d99e8fcb3f63f52ae1960c23b8c6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"9c3d94f57a3464986df30edf46250fa70200b40b9c70d8cac6b2fc3a76d24631","SO_ACCEPTCONN.patch":"e942fd2208a41d2b895354a6d80bc0c4aed875d48e6f5a66fa94cf193233c5c2","TODO":"4d0c6a6a858204015a6eac72f2e6d6fc8638ee78595c5600a07a8015975fdaf2","check_targets.bash":"dd9de020e3991988c8183327ab1d1b7c0cfbb21364676a6c4b8a4a0f8fafd6ef","diff.patch":"acb27c495149dab10f9436efcca5f5637d2f4a1527028eeeb58858a3e1ad13e0","src/lib.rs":"1f2159502b551883652e32a02a6f4d60b284ad916cd970994c8beb40b4cc9118","src/sockaddr.rs":"109824754ba77687a47e51a3ed5f8b0abecbe397a633a99fc94cc637bc698d76","src/socket.rs":"6aeb83003381e4a4424c2c7b08bd01bbd837fd4edcec156983df07316e1dca57","src/sys/unix.rs":"10e3d4d2e4e460f45acc670c7fb9ad24f3f60cdf0971bf4ed6f94e158e0551a8","src/sys/windows.rs":"5180e6fbe029fcfc710e1d3498646f4a13b39a7eb62a2b3b0b01c29a7e9dc411","src/tests.rs":"d17e44cc61c646f41b4a18a7f7cb4e632ea34dc1c7d347928e0ed6295840a0ae","src/utils.rs":"53968de8c8e078a650fa316438622be9bb78bca95ed07c4f8da2c940ae27e0ae"},"package":"122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e"} \ No newline at end of file +{"files":{"Cargo.toml":"a8878d08e3b1b2f217ca564f02ffb908d07ddc8584edf29ac33738693be7e1a9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"b870d442505bc8047b641815c8283f29fc213dcfd1331894802e6ae55cc09f1b","src/lib.rs":"b4409a10c9b4c1e16f20cc4b6cd087ad5fa0ec8fc701653bc93fedeb158583d2","src/sockaddr.rs":"bed988fb306072fdb67718f3a215180d18f78f32321fecf64797f4c1d6bc7d84","src/socket.rs":"2beb154dfbadeb79408d987d074acd4a8daaf28e2dcadac0357e2d4fd4be7526","src/sockref.rs":"02de263cce039aaddaee5d6c2bb3940bdfae5697a3fc9df47d226fb3cac03cd9","src/sys/unix.rs":"b784e64a55ce666d8b3c8e30949b6d4965d6961c1bb964017f7fc54c109b7ad7","src/sys/windows.rs":"85840bf5c99fc984c9126cebfb065caefa7a8158b1c6bbc1d876f347187eefdb"},"package":"66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"} \ No newline at end of file diff --git a/third_party/rust/socket2/Cargo.toml b/third_party/rust/socket2/Cargo.toml index 7894d061e3f6..8fd3884cd487 100644 --- a/third_party/rust/socket2/Cargo.toml +++ b/third_party/rust/socket2/Cargo.toml @@ -3,38 +3,37 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "socket2" -version = "0.3.19" -authors = ["Alex Crichton "] +version = "0.4.4" +authors = ["Alex Crichton ", "Thomas de Zeeuw "] +include = ["Cargo.toml", "LICENSE-APACHE", "LICENSE-MIT", "README.md", "src/**/*.rs"] description = "Utilities for handling networking sockets with a maximal amount of configuration\npossible intended.\n" -homepage = "https://github.com/alexcrichton/socket2-rs" +homepage = "https://github.com/rust-lang/socket2" +documentation = "https://docs.rs/socket2" readme = "README.md" -license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/socket2-rs" +keywords = ["io", "socket", "network"] +categories = ["api-bindings", "network-programming"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/socket2" [package.metadata.docs.rs] all-features = true -[dev-dependencies.tempdir] -version = "0.3" +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +features = ["all"] [features] -pair = [] -reuseport = [] -unix = [] -[target."cfg(unix)".dependencies.cfg-if] -version = "1.0" - +all = [] [target."cfg(unix)".dependencies.libc] -version = "0.2.66" -features = ["align"] +version = "0.2.114" [target."cfg(windows)".dependencies.winapi] -version = "0.3.3" -features = ["handleapi", "ws2def", "ws2ipdef", "ws2tcpip", "minwindef"] +version = "0.3.9" +features = ["handleapi", "ws2ipdef", "ws2tcpip"] diff --git a/third_party/rust/socket2/README.md b/third_party/rust/socket2/README.md index 10d9b1c81ea7..45f4cf84bd7c 100644 --- a/third_party/rust/socket2/README.md +++ b/third_party/rust/socket2/README.md @@ -1,6 +1,70 @@ -# socket2-rs +# Socket2 -[Documentation](https://docs.rs/socket2) +Socket2 is a crate that provides utilities for creating and using sockets. + +The goal of this crate is to create and use a socket using advanced +configuration options (those that are not available in the types in the standard +library) without using any unsafe code. + +This crate provides as direct as possible access to the system's functionality +for sockets, this means little effort to provide cross-platform utilities. It is +up to the user to know how to use sockets when using this crate. *If you don't +know how to create a socket using libc/system calls then this crate is not for +you*. Most, if not all, functions directly relate to the equivalent system call +with no error handling applied, so no handling errors such as `EINTR`. As a +result using this crate can be a little wordy, but it should give you maximal +flexibility over configuration of sockets. + +See the [API documentation] for more. + +[API documentation]: https://docs.rs/socket2 + +# Two branches + +Currently Socket2 supports two versions: v0.4 and v0.3. Version 0.4 is developed +in the master branch, version 0.3 in the [v0.3.x branch]. + +[v0.3.x branch]: https://github.com/rust-lang/socket2/tree/v0.3.x + +# OS support + +Socket2 attempts to support the same OS/architectures as Rust does, see +https://doc.rust-lang.org/nightly/rustc/platform-support.html. However this is +not always possible, below is current list of support OSs. + +*If your favorite OS is not on the list consider contributing it! See [issue +#78].* + +[issue #78]: https://github.com/rust-lang/socket2/issues/78 + +### Tier 1 + +These OSs are tested with each commit in the CI and must always pass the tests. +All functions/types/etc., excluding ones behind the `all` feature, must work on +these OSs. + +* Linux +* macOS +* Windows + +### Tier 2 + +These OSs are currently build in the CI, but not tested. Not all +functions/types/etc. may work on these OSs, even ones **not** behind the `all` +feature flag. + +* Android +* FreeBSD +* Fuchsia +* iOS +* illumos +* NetBSD +* Redox +* Solaris + +# Minimum Supported Rust Version (MSRV) + +Socket2 uses 1.46.0 as MSRV. # License @@ -17,4 +81,4 @@ at your option. Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this project by you, as defined in the Apache-2.0 license, - shall be dual licensed as above, without any additional terms or conditions. +shall be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/socket2/SO_ACCEPTCONN.patch b/third_party/rust/socket2/SO_ACCEPTCONN.patch deleted file mode 100644 index 116951af0d8d..000000000000 --- a/third_party/rust/socket2/SO_ACCEPTCONN.patch +++ /dev/null @@ -1,96 +0,0 @@ -diff --git a/src/socket.rs b/src/socket.rs -index 725ad2a..3f7b499 100644 ---- a/src/socket.rs -+++ b/src/socket.rs -@@ -963,6 +963,26 @@ fn inner(&self) -> &sys::Socket { - } - } - -+/// Socket options get/set using `SOL_SOCKET`. -+/// -+/// Additional documentation can be found in documentation of the OS. -+/// * Linux: -+/// * Windows: -+impl Socket { -+ /// Gets the value of the `SO_ACCEPTCONN` option on this socket. -+ /// -+ /// Returns `true` if this socket has been marked to accept connections with -+ /// [`listen`]. -+ /// -+ /// [`listen`]: Socket::listen -+ pub fn is_listener(&self) -> io::Result { -+ unsafe { -+ getsockopt::(self.inner, sys::SOL_SOCKET, sys::SO_ACCEPTCONN) -+ .map(|is_listener| is_listener != 0) -+ } -+ } -+} -+ - /// Socket options for TCP socket, get/set using `IPPROTO_TCP`. - /// - /// Additional documentation can be found in documentation of the OS. -diff --git a/src/sys/unix.rs b/src/sys/unix.rs -index 60593a1..51b916a 100644 ---- a/src/sys/unix.rs -+++ b/src/sys/unix.rs -@@ -54,8 +54,8 @@ - pub(crate) use libc::MSG_OOB; - pub(crate) use libc::{ - IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, IPV6_MULTICAST_LOOP, IPV6_UNICAST_HOPS, -- IPV6_V6ONLY, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, IP_TTL, MSG_PEEK, SOL_SOCKET, SO_BROADCAST, -- SO_ERROR, TCP_NODELAY, -+ IPV6_V6ONLY, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, IP_TTL, MSG_PEEK, SOL_SOCKET, SO_ACCEPTCONN, -+ SO_BROADCAST, SO_ERROR, TCP_NODELAY, - }; - - // See this type in the Windows file. -diff --git a/src/sys/windows.rs b/src/sys/windows.rs -index 663b63f..a212525 100644 ---- a/src/sys/windows.rs -+++ b/src/sys/windows.rs -@@ -60,7 +60,7 @@ - pub(crate) use winapi::um::ws2tcpip::socklen_t; - // Used in `Socket`. - pub(crate) use winapi::shared::ws2def::{ -- IPPROTO_IP, SOL_SOCKET, SO_BROADCAST, SO_ERROR, TCP_NODELAY, -+ IPPROTO_IP, SOL_SOCKET, SO_ACCEPTCONN, SO_BROADCAST, SO_ERROR, TCP_NODELAY, - }; - pub(crate) use winapi::shared::ws2ipdef::{ - IPV6_MULTICAST_HOPS, IPV6_MULTICAST_LOOP, IPV6_UNICAST_HOPS, IPV6_V6ONLY, IP_MULTICAST_LOOP, -diff --git a/tests/options.rs b/tests/options.rs -index 9c3d1c2..2d256a7 100644 ---- a/tests/options.rs -+++ b/tests/options.rs -@@ -1,6 +1,8 @@ - //! Tests for getting and setting socket options. - --use socket2::{Domain, Socket, Type}; -+use std::net::SocketAddr; -+ -+use socket2::{Domain, Protocol, Socket, Type}; - - /// Macro to create a simple test to set and get a socket option. - macro_rules! test { -@@ -86,6 +88,22 @@ fn $get_fn() { - set_mark(123) - ); - -+#[test] -+fn is_listener() { -+ // TODO: IPv6. -+ -+ let socket = Socket::new(Domain::IPV4, Type::STREAM, Some(Protocol::TCP)) -+ .expect("failed to create `Socket`"); -+ //assert_eq!(socket.is_listener().unwrap(), false); -+ -+ let addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); -+ let addr = addr.into(); -+ socket.bind(&addr).unwrap(); -+ socket.listen(1).unwrap(); -+ dbg!(socket.is_listener()); -+ assert_eq!(socket.is_listener().unwrap(), true); -+} -+ - test!(IPv4 ttl, set_ttl(40)); - #[cfg(not(windows))] // TODO: returns `WSAENOPROTOOPT` (10042) on Windows. - test!(IPv4 broadcast, set_broadcast(true)); diff --git a/third_party/rust/socket2/TODO b/third_party/rust/socket2/TODO deleted file mode 100644 index ce1f1fa5ba57..000000000000 --- a/third_party/rust/socket2/TODO +++ /dev/null @@ -1,20 +0,0 @@ -# Refactor - -* [ ] CONTRIBUTING.md -* [ ] Move tests from src/*.rs to tests dir. -* [ ] Tracking issue for tests. Check all functions/coverage (with `rustcov`). -* [ ] Look at `SockAddr`. - * [ ] Add docs about its size. - -## Maybe add socket options - -# SOL_SOCKET -SO_ACCEPTCONN -SO_DOMAIN -SO_TYPE -SO_INCOMING_CPU // Linux only? -SO_INCOMING_NAPI_ID // Linux only? - -# No-op/error returned on Windows: -SO_RCVLOWAT -SO_SNDLOWAT diff --git a/third_party/rust/socket2/check_targets.bash b/third_party/rust/socket2/check_targets.bash deleted file mode 100755 index 343b42a25f98..000000000000 --- a/third_party/rust/socket2/check_targets.bash +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -set -eu - -declare -a targets=( - "x86_64-apple-darwin" - "x86_64-unknown-freebsd" - "x86_64-unknown-linux-gnu" - "x86_64-pc-windows-gnu" -) - -for target in "${targets[@]}"; do - cargo check --target "$target" --all-targets --examples --bins --tests --no-default-features - cargo check --target "$target" --all-targets --examples --bins --tests --all-features -done diff --git a/third_party/rust/socket2/diff.patch b/third_party/rust/socket2/diff.patch deleted file mode 100644 index 7c4badacdfec..000000000000 --- a/third_party/rust/socket2/diff.patch +++ /dev/null @@ -1,134 +0,0 @@ -commit 3851430dec41c204d6219a84c47ca6885622a98e -Author: kolapapa -Date: Sun Dec 20 14:02:52 2020 +0100 - - Add Socket::(bind_)device - - Co-authored-by: Thomas de Zeeuw - -diff --git a/src/sys/unix.rs b/src/sys/unix.rs -index 72097ae..1a0f24e 100644 ---- a/src/sys/unix.rs -+++ b/src/sys/unix.rs -@@ -7,6 +7,8 @@ - // except according to those terms. - - use std::cmp::min; -+#[cfg(all(feature = "all", target_os = "linux"))] -+use std::ffi::{CStr, CString}; - #[cfg(not(target_os = "redox"))] - use std::io::{IoSlice, IoSliceMut}; - use std::mem::{self, size_of, MaybeUninit}; -@@ -19,6 +21,8 @@ - use std::os::unix::net::{UnixDatagram, UnixListener, UnixStream}; - #[cfg(feature = "all")] - use std::path::Path; -+#[cfg(all(feature = "all", target_os = "linux"))] -+use std::slice; - use std::time::Duration; - use std::{io, ptr}; - -@@ -867,6 +871,73 @@ pub fn set_mark(&self, mark: u32) -> io::Result<()> { - unsafe { setsockopt::(self.inner, libc::SOL_SOCKET, libc::SO_MARK, mark as c_int) } - } - -+ /// Gets the value for the `SO_BINDTODEVICE` option on this socket. -+ /// -+ /// This value gets the socket binded device's interface name. -+ /// -+ /// This function is only available on Linux. -+ #[cfg(all(feature = "all", target_os = "linux"))] -+ pub fn device(&self) -> io::Result> { -+ // TODO: replace with `MaybeUninit::uninit_array` once stable. -+ let mut buf: [MaybeUninit; libc::IFNAMSIZ] = -+ unsafe { MaybeUninit::<[MaybeUninit; libc::IFNAMSIZ]>::uninit().assume_init() }; -+ let mut len = buf.len() as libc::socklen_t; -+ unsafe { -+ syscall!(getsockopt( -+ self.inner, -+ libc::SOL_SOCKET, -+ libc::SO_BINDTODEVICE, -+ buf.as_mut_ptr().cast(), -+ &mut len, -+ ))?; -+ } -+ if len == 0 { -+ Ok(None) -+ } else { -+ // Allocate a buffer for `CString` with the length including the -+ // null terminator. -+ let len = len as usize; -+ let mut name = Vec::with_capacity(len); -+ -+ // TODO: use `MaybeUninit::slice_assume_init_ref` once stable. -+ // Safety: `len` bytes are writen by the OS, this includes a null -+ // terminator. However we don't copy the null terminator because -+ // `CString::from_vec_unchecked` adds its own null terminator. -+ let buf = unsafe { slice::from_raw_parts(buf.as_ptr().cast(), len - 1) }; -+ name.extend_from_slice(buf); -+ -+ // Safety: the OS initialised the string for us, which shouldn't -+ // include any null bytes. -+ Ok(Some(unsafe { CString::from_vec_unchecked(name) })) -+ } -+ } -+ -+ /// Sets the value for the `SO_BINDTODEVICE` option on this socket. -+ /// -+ /// If a socket is bound to an interface, only packets received from that -+ /// particular interface are processed by the socket. Note that this only -+ /// works for some socket types, particularly `AF_INET` sockets. -+ /// -+ /// If `interface` is `None` or an empty string it removes the binding. -+ /// -+ /// This function is only available on Linux. -+ #[cfg(all(feature = "all", target_os = "linux"))] -+ pub fn bind_device(&self, interface: Option<&CStr>) -> io::Result<()> { -+ let (value, len) = if let Some(interface) = interface { -+ (interface.as_ptr(), interface.to_bytes_with_nul().len()) -+ } else { -+ (ptr::null(), 0) -+ }; -+ syscall!(setsockopt( -+ self.inner, -+ libc::SOL_SOCKET, -+ libc::SO_BINDTODEVICE, -+ value.cast(), -+ len as libc::socklen_t, -+ )) -+ .map(|_| ()) -+ } -+ - /// Get the value of the `SO_REUSEPORT` option on this socket. - /// - /// For more information about this option, see [`set_reuse_port`]. -diff --git a/tests/socket.rs b/tests/socket.rs -index 11a3d9c..75890af 100644 ---- a/tests/socket.rs -+++ b/tests/socket.rs -@@ -1,3 +1,5 @@ -+#[cfg(all(feature = "all", target_os = "linux"))] -+use std::ffi::CStr; - #[cfg(any(windows, target_vendor = "apple"))] - use std::io; - #[cfg(unix)] -@@ -271,3 +273,19 @@ fn keepalive() { - ))] - assert_eq!(socket.keepalive_retries().unwrap(), 10); - } -+ -+#[cfg(all(feature = "all", target_os = "linux"))] -+#[test] -+fn device() { -+ const INTERFACE: &str = "lo0\0"; -+ let interface = CStr::from_bytes_with_nul(INTERFACE.as_bytes()).unwrap(); -+ let socket = Socket::new(Domain::IPV4, Type::STREAM, None).unwrap(); -+ -+ assert_eq!(socket.device().unwrap(), None); -+ -+ socket.bind_device(Some(interface)).unwrap(); -+ assert_eq!(socket.device().unwrap().as_deref(), Some(interface)); -+ -+ socket.bind_device(None).unwrap(); -+ assert_eq!(socket.device().unwrap(), None); -+} diff --git a/third_party/rust/socket2/src/lib.rs b/third_party/rust/socket2/src/lib.rs index c0354f125125..d01b652ce554 100644 --- a/third_party/rust/socket2/src/lib.rs +++ b/third_party/rust/socket2/src/lib.rs @@ -1,6 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. +// Copyright 2015 The Rust Project Developers. // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -8,36 +6,64 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Utilities for handling sockets +//! Utilities for creating and using sockets. //! -//! This crate is sort of an evolution of the `net2` crate after seeing the -//! issues on it over time. The intention of this crate is to provide as direct -//! as possible access to the system's functionality for sockets as possible. No -//! extra fluff (e.g. multiple syscalls or builders) provided in this crate. As -//! a result using this crate can be a little wordy, but it should give you -//! maximal flexibility over configuration of sockets. +//! The goal of this crate is to create and use a socket using advanced +//! configuration options (those that are not available in the types in the +//! standard library) without using any unsafe code. +//! +//! This crate provides as direct as possible access to the system's +//! functionality for sockets, this means little effort to provide +//! cross-platform utilities. It is up to the user to know how to use sockets +//! when using this crate. *If you don't know how to create a socket using +//! libc/system calls then this crate is not for you*. Most, if not all, +//! functions directly relate to the equivalent system call with no error +//! handling applied, so no handling errors such as [`EINTR`]. As a result using +//! this crate can be a little wordy, but it should give you maximal flexibility +//! over configuration of sockets. +//! +//! [`EINTR`]: std::io::ErrorKind::Interrupted //! //! # Examples //! //! ```no_run -//! use std::net::SocketAddr; +//! # fn main() -> std::io::Result<()> { +//! use std::net::{SocketAddr, TcpListener}; //! use socket2::{Socket, Domain, Type}; //! -//! // create a TCP listener bound to two addresses -//! let socket = Socket::new(Domain::ipv6(), Type::stream(), None).unwrap(); +//! // Create a TCP listener bound to two addresses. +//! let socket = Socket::new(Domain::IPV6, Type::STREAM, None)?; //! -//! socket.bind(&"[::1]:12345".parse::().unwrap().into()).unwrap(); -//! socket.set_only_v6(false); -//! socket.listen(128).unwrap(); +//! socket.set_only_v6(false)?; +//! let address: SocketAddr = "[::1]:12345".parse().unwrap(); +//! socket.bind(&address.into())?; +//! socket.listen(128)?; //! -//! let listener = socket.into_tcp_listener(); +//! let listener: TcpListener = socket.into(); //! // ... +//! # drop(listener); +//! # Ok(()) } //! ``` +//! +//! ## Features +//! +//! This crate has a single feature `all`, which enables all functions even ones +//! that are not available on all OSs. #![doc(html_root_url = "https://docs.rs/socket2/0.3")] -#![deny(missing_docs)] +#![deny(missing_docs, missing_debug_implementations, rust_2018_idioms)] +// Show required OS/features on docs.rs. +#![cfg_attr(docsrs, feature(doc_cfg))] +// Disallow warnings when running tests. +#![cfg_attr(test, deny(warnings))] +// Disallow warnings in examples. +#![doc(test(attr(deny(warnings))))] -use crate::utils::NetInt; +use std::fmt; +use std::mem::MaybeUninit; +use std::net::SocketAddr; +use std::ops::{Deref, DerefMut}; +use std::time::Duration; /// Macro to implement `fmt::Debug` for a type, printing the constant names /// rather than a number. @@ -71,46 +97,75 @@ macro_rules! impl_debug { }; } +/// Macro to convert from one network type to another. +macro_rules! from { + ($from: ty, $for: ty) => { + impl From<$from> for $for { + fn from(socket: $from) -> $for { + #[cfg(unix)] + unsafe { + <$for>::from_raw_fd(socket.into_raw_fd()) + } + #[cfg(windows)] + unsafe { + <$for>::from_raw_socket(socket.into_raw_socket()) + } + } + } + }; +} + mod sockaddr; mod socket; -mod utils; +mod sockref; -#[cfg(test)] -mod tests; +#[cfg_attr(unix, path = "sys/unix.rs")] +#[cfg_attr(windows, path = "sys/windows.rs")] +mod sys; -#[cfg(unix)] -#[path = "sys/unix.rs"] -mod sys; -#[cfg(windows)] -#[path = "sys/windows.rs"] -mod sys; +#[cfg(not(any(windows, unix)))] +compile_error!("Socket2 doesn't support the compile target"); use sys::c_int; pub use sockaddr::SockAddr; pub use socket::Socket; +pub use sockref::SockRef; + +#[cfg(not(any( + target_os = "haiku", + target_os = "illumos", + target_os = "netbsd", + target_os = "redox", + target_os = "solaris", +)))] +pub use socket::InterfaceIndexOrAddress; /// Specification of the communication domain for a socket. /// /// This is a newtype wrapper around an integer which provides a nicer API in -/// addition to an injection point for documentation. Convenience constructors -/// such as `Domain::ipv4`, `Domain::ipv6`, etc, are provided to avoid reaching +/// addition to an injection point for documentation. Convenience constants such +/// as [`Domain::IPV4`], [`Domain::IPV6`], etc, are provided to avoid reaching /// into libc for various constants. /// /// This type is freely interconvertible with C's `int` type, however, if a raw /// value needs to be provided. -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Eq, PartialEq)] pub struct Domain(c_int); impl Domain { /// Domain for IPv4 communication, corresponding to `AF_INET`. - pub fn ipv4() -> Domain { - Domain(sys::AF_INET) - } + pub const IPV4: Domain = Domain(sys::AF_INET); /// Domain for IPv6 communication, corresponding to `AF_INET6`. - pub fn ipv6() -> Domain { - Domain(sys::AF_INET6) + pub const IPV6: Domain = Domain(sys::AF_INET6); + + /// Returns the correct domain for `address`. + pub const fn for_address(address: SocketAddr) -> Domain { + match address { + SocketAddr::V4(_) => Domain::IPV4, + SocketAddr::V6(_) => Domain::IPV6, + } } } @@ -129,40 +184,35 @@ impl From for c_int { /// Specification of communication semantics on a socket. /// /// This is a newtype wrapper around an integer which provides a nicer API in -/// addition to an injection point for documentation. Convenience constructors -/// such as `Type::stream`, `Type::dgram`, etc, are provided to avoid reaching +/// addition to an injection point for documentation. Convenience constants such +/// as [`Type::STREAM`], [`Type::DGRAM`], etc, are provided to avoid reaching /// into libc for various constants. /// /// This type is freely interconvertible with C's `int` type, however, if a raw /// value needs to be provided. -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Eq, PartialEq)] pub struct Type(c_int); impl Type { /// Type corresponding to `SOCK_STREAM`. /// /// Used for protocols such as TCP. - pub fn stream() -> Type { - Type(sys::SOCK_STREAM) - } + pub const STREAM: Type = Type(sys::SOCK_STREAM); /// Type corresponding to `SOCK_DGRAM`. /// /// Used for protocols such as UDP. - pub fn dgram() -> Type { - Type(sys::SOCK_DGRAM) - } + pub const DGRAM: Type = Type(sys::SOCK_DGRAM); /// Type corresponding to `SOCK_SEQPACKET`. - pub fn seqpacket() -> Type { - Type(sys::SOCK_SEQPACKET) - } + #[cfg(feature = "all")] + #[cfg_attr(docsrs, doc(cfg(feature = "all")))] + pub const SEQPACKET: Type = Type(sys::SOCK_SEQPACKET); /// Type corresponding to `SOCK_RAW`. - #[cfg(not(target_os = "redox"))] - pub fn raw() -> Type { - Type(sys::SOCK_RAW) - } + #[cfg(all(feature = "all", not(target_os = "redox")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", not(target_os = "redox")))))] + pub const RAW: Type = Type(sys::SOCK_RAW); } impl From for Type { @@ -184,29 +234,21 @@ impl From for c_int { /// /// This type is freely interconvertible with C's `int` type, however, if a raw /// value needs to be provided. -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Eq, PartialEq)] pub struct Protocol(c_int); impl Protocol { /// Protocol corresponding to `ICMPv4`. - pub fn icmpv4() -> Self { - Protocol(sys::IPPROTO_ICMP) - } + pub const ICMPV4: Protocol = Protocol(sys::IPPROTO_ICMP); /// Protocol corresponding to `ICMPv6`. - pub fn icmpv6() -> Self { - Protocol(sys::IPPROTO_ICMPV6) - } + pub const ICMPV6: Protocol = Protocol(sys::IPPROTO_ICMPV6); /// Protocol corresponding to `TCP`. - pub fn tcp() -> Self { - Protocol(sys::IPPROTO_TCP) - } + pub const TCP: Protocol = Protocol(sys::IPPROTO_TCP); /// Protocol corresponding to `UDP`. - pub fn udp() -> Self { - Protocol(sys::IPPROTO_UDP) - } + pub const UDP: Protocol = Protocol(sys::IPPROTO_UDP); } impl From for Protocol { @@ -221,10 +263,180 @@ impl From for c_int { } } -fn hton(i: I) -> I { - i.to_be() +/// Flags for incoming messages. +/// +/// Flags provide additional information about incoming messages. +#[cfg(not(target_os = "redox"))] +#[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct RecvFlags(c_int); + +#[cfg(not(target_os = "redox"))] +impl RecvFlags { + /// Check if the message contains a truncated datagram. + /// + /// This flag is only used for datagram-based sockets, + /// not for stream sockets. + /// + /// On Unix this corresponds to the `MSG_TRUNC` flag. + /// On Windows this corresponds to the `WSAEMSGSIZE` error code. + pub const fn is_truncated(self) -> bool { + self.0 & sys::MSG_TRUNC != 0 + } } -fn ntoh(i: I) -> I { - I::from_be(i) +/// A version of [`IoSliceMut`] that allows the buffer to be uninitialised. +/// +/// [`IoSliceMut`]: std::io::IoSliceMut +#[repr(transparent)] +pub struct MaybeUninitSlice<'a>(sys::MaybeUninitSlice<'a>); + +impl<'a> fmt::Debug for MaybeUninitSlice<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(self.0.as_slice(), fmt) + } +} + +impl<'a> MaybeUninitSlice<'a> { + /// Creates a new `MaybeUninitSlice` wrapping a byte slice. + /// + /// # Panics + /// + /// Panics on Windows if the slice is larger than 4GB. + pub fn new(buf: &'a mut [MaybeUninit]) -> MaybeUninitSlice<'a> { + MaybeUninitSlice(sys::MaybeUninitSlice::new(buf)) + } +} + +impl<'a> Deref for MaybeUninitSlice<'a> { + type Target = [MaybeUninit]; + + fn deref(&self) -> &[MaybeUninit] { + self.0.as_slice() + } +} + +impl<'a> DerefMut for MaybeUninitSlice<'a> { + fn deref_mut(&mut self) -> &mut [MaybeUninit] { + self.0.as_mut_slice() + } +} + +/// Configures a socket's TCP keepalive parameters. +/// +/// See [`Socket::set_tcp_keepalive`]. +#[derive(Debug, Clone)] +pub struct TcpKeepalive { + time: Option, + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + interval: Option, + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + retries: Option, +} + +impl TcpKeepalive { + /// Returns a new, empty set of TCP keepalive parameters. + pub const fn new() -> TcpKeepalive { + TcpKeepalive { + time: None, + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + interval: None, + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + retries: None, + } + } + + /// Set the amount of time after which TCP keepalive probes will be sent on + /// idle connections. + /// + /// This will set `TCP_KEEPALIVE` on macOS and iOS, and + /// `TCP_KEEPIDLE` on all other Unix operating systems, except + /// OpenBSD and Haiku which don't support any way to set this + /// option. On Windows, this sets the value of the `tcp_keepalive` + /// struct's `keepalivetime` field. + /// + /// Some platforms specify this value in seconds, so sub-second + /// specifications may be omitted. + pub const fn with_time(self, time: Duration) -> Self { + Self { + time: Some(time), + ..self + } + } + + /// Set the value of the `TCP_KEEPINTVL` option. On Windows, this sets the + /// value of the `tcp_keepalive` struct's `keepaliveinterval` field. + /// + /// Sets the time interval between TCP keepalive probes. + /// + /// Some platforms specify this value in seconds, so sub-second + /// specifications may be omitted. + #[cfg(all( + feature = "all", + any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + ) + ))) + )] + pub const fn with_interval(self, interval: Duration) -> Self { + Self { + interval: Some(interval), + ..self + } + } + + /// Set the value of the `TCP_KEEPCNT` option. + /// + /// Set the maximum number of TCP keepalive probes that will be sent before + /// dropping a connection, if TCP keepalive is enabled on this socket. + #[cfg(all( + feature = "all", + any( + doc, + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ) + ))) + )] + pub const fn with_retries(self, retries: u32) -> Self { + Self { + retries: Some(retries), + ..self + } + } } diff --git a/third_party/rust/socket2/src/sockaddr.rs b/third_party/rust/socket2/src/sockaddr.rs index dd8ffc016890..55951b63d257 100644 --- a/third_party/rust/socket2/src/sockaddr.rs +++ b/third_party/rust/socket2/src/sockaddr.rs @@ -1,171 +1,179 @@ -use std::fmt; -use std::mem::{self, MaybeUninit}; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; -use std::ptr; +use std::mem::{self, size_of, MaybeUninit}; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::{fmt, io}; -#[cfg(any(unix, target_os = "redox"))] -use libc::{ - in6_addr, in_addr, sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, - socklen_t, AF_INET, AF_INET6, +use crate::sys::{ + sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t, AF_INET, + AF_INET6, }; #[cfg(windows)] -use winapi::shared::in6addr::{in6_addr_u, IN6_ADDR as in6_addr}; -#[cfg(windows)] -use winapi::shared::inaddr::{in_addr_S_un, IN_ADDR as in_addr}; -#[cfg(windows)] -use winapi::shared::ws2def::{ - ADDRESS_FAMILY as sa_family_t, AF_INET, AF_INET6, SOCKADDR as sockaddr, - SOCKADDR_IN as sockaddr_in, SOCKADDR_STORAGE as sockaddr_storage, -}; -#[cfg(windows)] -use winapi::shared::ws2ipdef::{SOCKADDR_IN6_LH_u, SOCKADDR_IN6_LH as sockaddr_in6}; -#[cfg(windows)] -use winapi::um::ws2tcpip::socklen_t; +use winapi::shared::ws2ipdef::SOCKADDR_IN6_LH_u; /// The address of a socket. /// /// `SockAddr`s may be constructed directly to and from the standard library -/// `SocketAddr`, `SocketAddrV4`, and `SocketAddrV6` types. +/// [`SocketAddr`], [`SocketAddrV4`], and [`SocketAddrV6`] types. pub struct SockAddr { storage: sockaddr_storage, len: socklen_t, } -impl fmt::Debug for SockAddr { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut builder = fmt.debug_struct("SockAddr"); - builder.field("family", &self.family()); - if let Some(addr) = self.as_inet() { - builder.field("inet", &addr); - } else if let Some(addr) = self.as_inet6() { - builder.field("inet6", &addr); - } - builder.finish() - } -} - +#[allow(clippy::len_without_is_empty)] impl SockAddr { - /// Constructs a `SockAddr` from its raw components. - pub unsafe fn from_raw_parts(addr: *const sockaddr, len: socklen_t) -> SockAddr { - let mut storage = MaybeUninit::::zeroed(); - ptr::copy_nonoverlapping( - addr as *const _ as *const u8, - storage.as_mut_ptr() as *mut u8, - len as usize, - ); - - SockAddr { - // This is safe as we written the address to `storage` above. - storage: storage.assume_init(), - len, - } + /// Create a `SockAddr` from the underlying storage and its length. + /// + /// # Safety + /// + /// Caller must ensure that the address family and length match the type of + /// storage address. For example if `storage.ss_family` is set to `AF_INET` + /// the `storage` must be initialised as `sockaddr_in`, setting the content + /// and length appropriately. + /// + /// # Examples + /// + /// ``` + /// # fn main() -> std::io::Result<()> { + /// # #[cfg(unix)] { + /// use std::io; + /// use std::mem; + /// use std::os::unix::io::AsRawFd; + /// + /// use socket2::{SockAddr, Socket, Domain, Type}; + /// + /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; + /// + /// // Initialise a `SocketAddr` byte calling `getsockname(2)`. + /// let mut addr_storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; + /// let mut len = mem::size_of_val(&addr_storage) as libc::socklen_t; + /// + /// // The `getsockname(2)` system call will intiliase `storage` for + /// // us, setting `len` to the correct length. + /// let res = unsafe { + /// libc::getsockname( + /// socket.as_raw_fd(), + /// (&mut addr_storage as *mut libc::sockaddr_storage).cast(), + /// &mut len, + /// ) + /// }; + /// if res == -1 { + /// return Err(io::Error::last_os_error()); + /// } + /// + /// let address = unsafe { SockAddr::new(addr_storage, len) }; + /// # drop(address); + /// # } + /// # Ok(()) + /// # } + /// ``` + pub const unsafe fn new(storage: sockaddr_storage, len: socklen_t) -> SockAddr { + SockAddr { storage, len } } - /// Constructs a `SockAddr` with the family `AF_UNIX` and the provided path. + /// Initialise a `SockAddr` by calling the function `init`. /// - /// This function is only available on Unix when the `unix` feature is - /// enabled. + /// The type of the address storage and length passed to the function `init` + /// is OS/architecture specific. /// - /// # Failure + /// The address is zeroed before `init` is called and is thus valid to + /// dereference and read from. The length initialised to the maximum length + /// of the storage. /// - /// Returns an error if the path is longer than `SUN_LEN`. - #[cfg(all(unix, feature = "unix"))] - pub fn unix

(path: P) -> ::std::io::Result + /// # Safety + /// + /// Caller must ensure that the address family and length match the type of + /// storage address. For example if `storage.ss_family` is set to `AF_INET` + /// the `storage` must be initialised as `sockaddr_in`, setting the content + /// and length appropriately. + /// + /// # Examples + /// + /// ``` + /// # fn main() -> std::io::Result<()> { + /// # #[cfg(unix)] { + /// use std::io; + /// use std::os::unix::io::AsRawFd; + /// + /// use socket2::{SockAddr, Socket, Domain, Type}; + /// + /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; + /// + /// // Initialise a `SocketAddr` byte calling `getsockname(2)`. + /// let (_, address) = unsafe { + /// SockAddr::init(|addr_storage, len| { + /// // The `getsockname(2)` system call will intiliase `storage` for + /// // us, setting `len` to the correct length. + /// if libc::getsockname(socket.as_raw_fd(), addr_storage.cast(), len) == -1 { + /// Err(io::Error::last_os_error()) + /// } else { + /// Ok(()) + /// } + /// }) + /// }?; + /// # drop(address); + /// # } + /// # Ok(()) + /// # } + /// ``` + pub unsafe fn init(init: F) -> io::Result<(T, SockAddr)> where - P: AsRef<::std::path::Path>, + F: FnOnce(*mut sockaddr_storage, *mut socklen_t) -> io::Result, { - use libc::{c_char, sockaddr_un, AF_UNIX}; - use std::cmp::Ordering; - use std::io; - use std::os::unix::ffi::OsStrExt; - - unsafe { - let mut addr = mem::zeroed::(); - addr.sun_family = AF_UNIX as sa_family_t; - - let bytes = path.as_ref().as_os_str().as_bytes(); - - match (bytes.get(0), bytes.len().cmp(&addr.sun_path.len())) { - // Abstract paths don't need a null terminator - (Some(&0), Ordering::Greater) => { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "path must be no longer than SUN_LEN", - )); - } - (Some(&0), _) => {} - (_, Ordering::Greater) | (_, Ordering::Equal) => { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "path must be shorter than SUN_LEN", - )); - } - _ => {} - } - - for (dst, src) in addr.sun_path.iter_mut().zip(bytes) { - *dst = *src as c_char; - } - // null byte for pathname is already there since we zeroed up front - - let base = &addr as *const _ as usize; - let path = &addr.sun_path as *const _ as usize; - let sun_path_offset = path - base; - - let mut len = sun_path_offset + bytes.len(); - match bytes.get(0) { - Some(&0) | None => {} - Some(_) => len += 1, - } - Ok(SockAddr::from_raw_parts( - &addr as *const _ as *const _, - len as socklen_t, - )) - } + const STORAGE_SIZE: socklen_t = size_of::() as socklen_t; + // NOTE: `SockAddr::unix` depends on the storage being zeroed before + // calling `init`. + // NOTE: calling `recvfrom` with an empty buffer also depends on the + // storage being zeroed before calling `init` as the OS might not + // initialise it. + let mut storage = MaybeUninit::::zeroed(); + let mut len = STORAGE_SIZE; + init(storage.as_mut_ptr(), &mut len).map(|res| { + debug_assert!(len <= STORAGE_SIZE, "overflown address storage"); + let addr = SockAddr { + // Safety: zeroed-out `sockaddr_storage` is valid, caller must + // ensure at least `len` bytes are valid. + storage: storage.assume_init(), + len, + }; + (res, addr) + }) } - /// Returns this address as a `SocketAddrV4` if it is in the `AF_INET` - /// family. - pub fn as_inet(&self) -> Option { - match self.as_std() { - Some(SocketAddr::V4(addr)) => Some(addr), - _ => None, - } + /// Returns this address's family. + pub const fn family(&self) -> sa_family_t { + self.storage.ss_family } - /// Returns this address as a `SocketAddrV6` if it is in the `AF_INET6` - /// family. - pub fn as_inet6(&self) -> Option { - match self.as_std() { - Some(SocketAddr::V6(addr)) => Some(addr), - _ => None, - } + /// Returns the size of this address in bytes. + pub const fn len(&self) -> socklen_t { + self.len } - /// Returns this address as a `SocketAddr` if it is in the `AF_INET` - /// or `AF_INET6` family, otherwise returns `None`. - pub fn as_std(&self) -> Option { + /// Returns a raw pointer to the address. + pub const fn as_ptr(&self) -> *const sockaddr { + &self.storage as *const _ as *const _ + } + + /// Returns a raw pointer to the address storage. + #[cfg(all(unix, not(target_os = "redox")))] + pub(crate) const fn as_storage_ptr(&self) -> *const sockaddr_storage { + &self.storage + } + + /// Returns this address as a `SocketAddr` if it is in the `AF_INET` (IPv4) + /// or `AF_INET6` (IPv6) family, otherwise returns `None`. + pub fn as_socket(&self) -> Option { if self.storage.ss_family == AF_INET as sa_family_t { // Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in. let addr = unsafe { &*(&self.storage as *const _ as *const sockaddr_in) }; - #[cfg(unix)] - let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes()); - #[cfg(windows)] - let ip = { - let ip_bytes = unsafe { addr.sin_addr.S_un.S_un_b() }; - Ipv4Addr::from([ip_bytes.s_b1, ip_bytes.s_b2, ip_bytes.s_b3, ip_bytes.s_b4]) - }; + let ip = crate::sys::from_in_addr(addr.sin_addr); let port = u16::from_be(addr.sin_port); Some(SocketAddr::V4(SocketAddrV4::new(ip, port))) } else if self.storage.ss_family == AF_INET6 as sa_family_t { // Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6. let addr = unsafe { &*(&self.storage as *const _ as *const sockaddr_in6) }; - #[cfg(unix)] - let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr); - #[cfg(windows)] - let ip = Ipv6Addr::from(*unsafe { addr.sin6_addr.u.Byte() }); + let ip = crate::sys::from_in6_addr(addr.sin6_addr); let port = u16::from_be(addr.sin6_port); Some(SocketAddr::V6(SocketAddrV6::new( ip, @@ -183,39 +191,40 @@ impl SockAddr { } } - /// Returns this address's family. - pub fn family(&self) -> sa_family_t { - self.storage.ss_family + /// Returns this address as a [`SocketAddrV4`] if it is in the `AF_INET` + /// family. + pub fn as_socket_ipv4(&self) -> Option { + match self.as_socket() { + Some(SocketAddr::V4(addr)) => Some(addr), + _ => None, + } } - /// Returns the size of this address in bytes. - pub fn len(&self) -> socklen_t { - self.len + /// Returns this address as a [`SocketAddrV6`] if it is in the `AF_INET6` + /// family. + pub fn as_socket_ipv6(&self) -> Option { + match self.as_socket() { + Some(SocketAddr::V6(addr)) => Some(addr), + _ => None, + } } +} - /// Returns a raw pointer to the address. - pub fn as_ptr(&self) -> *const sockaddr { - &self.storage as *const _ as *const _ +impl From for SockAddr { + fn from(addr: SocketAddr) -> SockAddr { + match addr { + SocketAddr::V4(addr) => addr.into(), + SocketAddr::V6(addr) => addr.into(), + } } } impl From for SockAddr { fn from(addr: SocketAddrV4) -> SockAddr { - #[cfg(unix)] - let sin_addr = in_addr { - s_addr: u32::from_ne_bytes(addr.ip().octets()), - }; - #[cfg(windows)] - let sin_addr = unsafe { - let mut s_un = mem::zeroed::(); - *s_un.S_addr_mut() = u32::from_ne_bytes(addr.ip().octets()); - in_addr { S_un: s_un } - }; - let sockaddr_in = sockaddr_in { sin_family: AF_INET as sa_family_t, sin_port: addr.port().to_be(), - sin_addr, + sin_addr: crate::sys::to_in_addr(addr.ip()), sin_zero: Default::default(), #[cfg(any( target_os = "dragonfly", @@ -240,16 +249,6 @@ impl From for SockAddr { impl From for SockAddr { fn from(addr: SocketAddrV6) -> SockAddr { - #[cfg(unix)] - let sin6_addr = in6_addr { - s6_addr: addr.ip().octets(), - }; - #[cfg(windows)] - let sin6_addr = unsafe { - let mut u = mem::zeroed::(); - *u.Byte_mut() = addr.ip().octets(); - in6_addr { u } - }; #[cfg(windows)] let u = unsafe { let mut u = mem::zeroed::(); @@ -260,7 +259,7 @@ impl From for SockAddr { let sockaddr_in6 = sockaddr_in6 { sin6_family: AF_INET6 as sa_family_t, sin6_port: addr.port().to_be(), - sin6_addr, + sin6_addr: crate::sys::to_in6_addr(addr.ip()), sin6_flowinfo: addr.flowinfo(), #[cfg(unix)] sin6_scope_id: addr.scope_id(), @@ -289,36 +288,61 @@ impl From for SockAddr { } } -impl From for SockAddr { - fn from(addr: SocketAddr) -> SockAddr { - match addr { - SocketAddr::V4(addr) => addr.into(), - SocketAddr::V6(addr) => addr.into(), - } +impl fmt::Debug for SockAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut f = fmt.debug_struct("SockAddr"); + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "haiku", + target_os = "hermit", + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "vxworks", + ))] + f.field("ss_len", &self.storage.ss_len); + f.field("ss_family", &self.storage.ss_family) + .field("len", &self.len) + .finish() } } -#[cfg(test)] -mod test { - use super::*; +#[test] +fn ipv4() { + use std::net::Ipv4Addr; + let std = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876); + let addr = SockAddr::from(std); + assert_eq!(addr.family(), AF_INET as sa_family_t); + assert_eq!(addr.len(), size_of::() as socklen_t); + assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std))); + assert_eq!(addr.as_socket_ipv4(), Some(std)); + assert!(addr.as_socket_ipv6().is_none()); - #[test] - fn inet() { - let raw = "127.0.0.1:80".parse::().unwrap(); - let addr = SockAddr::from(raw); - assert!(addr.as_inet6().is_none()); - let addr = addr.as_inet().unwrap(); - assert_eq!(raw, addr); - } - - #[test] - fn inet6() { - let raw = "[2001:db8::ff00:42:8329]:80" - .parse::() - .unwrap(); - let addr = SockAddr::from(raw); - assert!(addr.as_inet().is_none()); - let addr = addr.as_inet6().unwrap(); - assert_eq!(raw, addr); - } + let addr = SockAddr::from(SocketAddr::from(std)); + assert_eq!(addr.family(), AF_INET as sa_family_t); + assert_eq!(addr.len(), size_of::() as socklen_t); + assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std))); + assert_eq!(addr.as_socket_ipv4(), Some(std)); + assert!(addr.as_socket_ipv6().is_none()); +} + +#[test] +fn ipv6() { + use std::net::Ipv6Addr; + let std = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12); + let addr = SockAddr::from(std); + assert_eq!(addr.family(), AF_INET6 as sa_family_t); + assert_eq!(addr.len(), size_of::() as socklen_t); + assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std))); + assert!(addr.as_socket_ipv4().is_none()); + assert_eq!(addr.as_socket_ipv6(), Some(std)); + + let addr = SockAddr::from(SocketAddr::from(std)); + assert_eq!(addr.family(), AF_INET6 as sa_family_t); + assert_eq!(addr.len(), size_of::() as socklen_t); + assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std))); + assert!(addr.as_socket_ipv4().is_none()); + assert_eq!(addr.as_socket_ipv6(), Some(std)); } diff --git a/third_party/rust/socket2/src/socket.rs b/third_party/rust/socket2/src/socket.rs index 1f1fcc92b258..028c4aed5dfe 100644 --- a/third_party/rust/socket2/src/socket.rs +++ b/third_party/rust/socket2/src/socket.rs @@ -1,6 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. +// Copyright 2015 The Rust Project Developers. // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -8,24 +6,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#[cfg(target_os = "linux")] -use std::ffi::{CStr, CString}; use std::fmt; use std::io::{self, Read, Write}; +#[cfg(not(target_os = "redox"))] +use std::io::{IoSlice, IoSliceMut}; +use std::mem::MaybeUninit; use std::net::{self, Ipv4Addr, Ipv6Addr, Shutdown}; -#[cfg(all(unix, feature = "unix"))] -use std::os::unix::net::{UnixDatagram, UnixListener, UnixStream}; +#[cfg(unix)] +use std::os::unix::io::{FromRawFd, IntoRawFd}; +#[cfg(windows)] +use std::os::windows::io::{FromRawSocket, IntoRawSocket}; use std::time::Duration; -#[cfg(any(unix, target_os = "redox"))] -use libc::MSG_OOB; -#[cfg(windows)] -use winapi::um::winsock2::MSG_OOB; +use crate::sys::{self, c_int, getsockopt, setsockopt, Bool}; +use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type}; +#[cfg(not(target_os = "redox"))] +use crate::{MaybeUninitSlice, RecvFlags}; -use crate::sys; -use crate::{Domain, Protocol, SockAddr, Type}; - -/// Newtype, owned, wrapper around a system socket. +/// Owned wrapper around a system socket. /// /// This type simply wraps an instance of a file descriptor (`c_int`) on Unix /// and an instance of `SOCKET` on Windows. This is the main type exported by @@ -34,108 +32,171 @@ use crate::{Domain, Protocol, SockAddr, Type}; /// precisely one libc or OS API call which is essentially just a "Rustic /// translation" of what's below. /// -/// # Examples +/// ## Converting to and from other types /// +/// This type can be freely converted into the network primitives provided by +/// the standard library, such as [`TcpStream`] or [`UdpSocket`], using the +/// [`From`] trait, see the example below. +/// +/// [`TcpStream`]: std::net::TcpStream +/// [`UdpSocket`]: std::net::UdpSocket +/// +/// # Notes +/// +/// Some methods that set options on `Socket` require two system calls to set +/// there options without overwriting previously set options. We do this by +/// first getting the current settings, applying the desired changes and than +/// updating the settings. This means that the operation is **not** atomic. This +/// can lead to a data race when two threads are changing options in parallel. +/// +/// # Examples /// ```no_run -/// use std::net::SocketAddr; -/// use socket2::{Socket, Domain, Type, SockAddr}; +/// # fn main() -> std::io::Result<()> { +/// use std::net::{SocketAddr, TcpListener}; +/// use socket2::{Socket, Domain, Type}; /// /// // create a TCP listener bound to two addresses -/// let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); +/// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; /// -/// socket.bind(&"127.0.0.1:12345".parse::().unwrap().into()).unwrap(); -/// socket.bind(&"127.0.0.1:12346".parse::().unwrap().into()).unwrap(); -/// socket.listen(128).unwrap(); +/// let address: SocketAddr = "[::1]:12345".parse().unwrap(); +/// let address = address.into(); +/// socket.bind(&address)?; +/// socket.bind(&address)?; +/// socket.listen(128)?; /// -/// let listener = socket.into_tcp_listener(); +/// let listener: TcpListener = socket.into(); /// // ... +/// # drop(listener); +/// # Ok(()) } /// ``` pub struct Socket { - // The `sys` module most have access to the socket. - pub(crate) inner: sys::Socket, + inner: Inner, } +/// Store a `TcpStream` internally to take advantage of its niche optimizations on Unix platforms. +pub(crate) type Inner = std::net::TcpStream; + impl Socket { + /// # Safety + /// + /// The caller must ensure `raw` is a valid file descriptor/socket. NOTE: + /// this should really be marked `unsafe`, but this being an internal + /// function, often passed as mapping function, it's makes it very + /// inconvenient to mark it as `unsafe`. + pub(crate) fn from_raw(raw: sys::Socket) -> Socket { + Socket { + inner: unsafe { + // SAFETY: the caller must ensure that `raw` is a valid file + // descriptor, but when it isn't it could return I/O errors, or + // potentially close a fd it doesn't own. All of that isn't + // memory unsafe, so it's not desired but never memory unsafe or + // causes UB. + // + // However there is one exception. We use `TcpStream` to + // represent the `Socket` internally (see `Inner` type), + // `TcpStream` has a layout optimisation that doesn't allow for + // negative file descriptors (as those are always invalid). + // Violating this assumption (fd never negative) causes UB, + // something we don't want. So check for that we have this + // `assert!`. + #[cfg(unix)] + assert!(raw >= 0, "tried to create a `Socket` with an invalid fd"); + sys::socket_from_raw(raw) + }, + } + } + + pub(crate) fn as_raw(&self) -> sys::Socket { + sys::socket_as_raw(&self.inner) + } + + pub(crate) fn into_raw(self) -> sys::Socket { + sys::socket_into_raw(self.inner) + } + + /// Creates a new socket and sets common flags. + /// + /// This function corresponds to `socket(2)` on Unix and `WSASocketW` on + /// Windows. + /// + /// On Unix-like systems, the close-on-exec flag is set on the new socket. + /// Additionally, on Apple platforms `SOCK_NOSIGPIPE` is set. On Windows, + /// the socket is made non-inheritable. + /// + /// [`Socket::new_raw`] can be used if you don't want these flags to be set. + pub fn new(domain: Domain, ty: Type, protocol: Option) -> io::Result { + let ty = set_common_type(ty); + Socket::new_raw(domain, ty, protocol).and_then(set_common_flags) + } + /// Creates a new socket ready to be configured. /// - /// This function corresponds to `socket(2)` and simply creates a new - /// socket, no other configuration is done and further functions must be - /// invoked to configure this socket. - pub fn new(domain: Domain, type_: Type, protocol: Option) -> io::Result { + /// This function corresponds to `socket(2)` on Unix and `WSASocketW` on + /// Windows and simply creates a new socket, no other configuration is done. + pub fn new_raw(domain: Domain, ty: Type, protocol: Option) -> io::Result { let protocol = protocol.map(|p| p.0).unwrap_or(0); - Ok(Socket { - inner: sys::Socket::new(domain.0, type_.0, protocol)?, - }) + sys::socket(domain.0, ty.0, protocol).map(Socket::from_raw) } /// Creates a pair of sockets which are connected to each other. /// /// This function corresponds to `socketpair(2)`. /// - /// This function is only available on Unix when the `pair` feature is - /// enabled. - #[cfg(all(unix, feature = "pair"))] + /// This function sets the same flags as in done for [`Socket::new`], + /// [`Socket::pair_raw`] can be used if you don't want to set those flags. + #[cfg(any(doc, all(feature = "all", unix)))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] pub fn pair( domain: Domain, - type_: Type, + ty: Type, + protocol: Option, + ) -> io::Result<(Socket, Socket)> { + let ty = set_common_type(ty); + let (a, b) = Socket::pair_raw(domain, ty, protocol)?; + let a = set_common_flags(a)?; + let b = set_common_flags(b)?; + Ok((a, b)) + } + + /// Creates a pair of sockets which are connected to each other. + /// + /// This function corresponds to `socketpair(2)`. + #[cfg(any(doc, all(feature = "all", unix)))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] + pub fn pair_raw( + domain: Domain, + ty: Type, protocol: Option, ) -> io::Result<(Socket, Socket)> { let protocol = protocol.map(|p| p.0).unwrap_or(0); - let sockets = sys::Socket::pair(domain.0, type_.0, protocol)?; - Ok((Socket { inner: sockets.0 }, Socket { inner: sockets.1 })) + sys::socketpair(domain.0, ty.0, protocol) + .map(|[a, b]| (Socket::from_raw(a), Socket::from_raw(b))) } - /// Consumes this `Socket`, converting it to a `TcpStream`. - pub fn into_tcp_stream(self) -> net::TcpStream { - self.into() - } - - /// Consumes this `Socket`, converting it to a `TcpListener`. - pub fn into_tcp_listener(self) -> net::TcpListener { - self.into() - } - - /// Consumes this `Socket`, converting it to a `UdpSocket`. - pub fn into_udp_socket(self) -> net::UdpSocket { - self.into() - } - - /// Consumes this `Socket`, converting it into a `UnixStream`. + /// Binds this socket to the specified address. /// - /// This function is only available on Unix when the `unix` feature is - /// enabled. - #[cfg(all(unix, feature = "unix"))] - pub fn into_unix_stream(self) -> UnixStream { - self.into() - } - - /// Consumes this `Socket`, converting it into a `UnixListener`. - /// - /// This function is only available on Unix when the `unix` feature is - /// enabled. - #[cfg(all(unix, feature = "unix"))] - pub fn into_unix_listener(self) -> UnixListener { - self.into() - } - - /// Consumes this `Socket`, converting it into a `UnixDatagram`. - /// - /// This function is only available on Unix when the `unix` feature is - /// enabled. - #[cfg(all(unix, feature = "unix"))] - pub fn into_unix_datagram(self) -> UnixDatagram { - self.into() + /// This function directly corresponds to the `bind(2)` function on Windows + /// and Unix. + pub fn bind(&self, address: &SockAddr) -> io::Result<()> { + sys::bind(self.as_raw(), address) } /// Initiate a connection on this socket to the specified address. /// - /// This function directly corresponds to the connect(2) function on Windows - /// and Unix. + /// This function directly corresponds to the `connect(2)` function on + /// Windows and Unix. /// /// An error will be returned if `listen` or `connect` has already been /// called on this builder. - pub fn connect(&self, addr: &SockAddr) -> io::Result<()> { - self.inner.connect(addr) + /// + /// # Notes + /// + /// When using a non-blocking connect (by setting the socket into + /// non-blocking mode before calling this function), socket option can't be + /// set *while connecting*. This will cause errors on Windows. Socket + /// options can be safely set before and after connecting the socket. + pub fn connect(&self, address: &SockAddr) -> io::Result<()> { + sys::connect(self.as_raw(), address) } /// Initiate a connection on this socket to the specified address, only @@ -151,82 +212,147 @@ impl Socket { /// /// # Warnings /// - /// The nonblocking state of the socket is overridden by this function - + /// The non-blocking state of the socket is overridden by this function - /// it will be returned in blocking mode on success, and in an indeterminate /// state on failure. /// /// If the connection request times out, it may still be processing in the /// background - a second call to `connect` or `connect_timeout` may fail. pub fn connect_timeout(&self, addr: &SockAddr, timeout: Duration) -> io::Result<()> { - self.inner.connect_timeout(addr, timeout) - } + self.set_nonblocking(true)?; + let res = self.connect(addr); + self.set_nonblocking(false)?; - /// Binds this socket to the specified address. - /// - /// This function directly corresponds to the bind(2) function on Windows - /// and Unix. - pub fn bind(&self, addr: &SockAddr) -> io::Result<()> { - self.inner.bind(addr) + match res { + Ok(()) => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} + #[cfg(unix)] + Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {} + Err(e) => return Err(e), + } + + sys::poll_connect(self, timeout) } /// Mark a socket as ready to accept incoming connection requests using - /// accept() + /// [`Socket::accept()`]. /// - /// This function directly corresponds to the listen(2) function on Windows - /// and Unix. + /// This function directly corresponds to the `listen(2)` function on + /// Windows and Unix. /// /// An error will be returned if `listen` or `connect` has already been /// called on this builder. - pub fn listen(&self, backlog: i32) -> io::Result<()> { - self.inner.listen(backlog) + pub fn listen(&self, backlog: c_int) -> io::Result<()> { + sys::listen(self.as_raw(), backlog) } /// Accept a new incoming connection from this listener. /// - /// This function will block the calling thread until a new connection is - /// established. When established, the corresponding `Socket` and the - /// remote peer's address will be returned. + /// This function uses `accept4(2)` on platforms that support it and + /// `accept(2)` platforms that do not. + /// + /// This function sets the same flags as in done for [`Socket::new`], + /// [`Socket::accept_raw`] can be used if you don't want to set those flags. pub fn accept(&self) -> io::Result<(Socket, SockAddr)> { - self.inner - .accept() - .map(|(socket, addr)| (Socket { inner: socket }, addr)) + // Use `accept4` on platforms that support it. + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd", + ))] + return self._accept4(libc::SOCK_CLOEXEC); + + // Fall back to `accept` on platforms that do not support `accept4`. + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd", + )))] + { + let (socket, addr) = self.accept_raw()?; + let socket = set_common_flags(socket)?; + // `set_common_flags` does not disable inheritance on Windows because `Socket::new` + // unlike `accept` is able to create the socket with inheritance disabled. + #[cfg(windows)] + socket._set_no_inherit(true)?; + Ok((socket, addr)) + } } - /// Returns the socket address of the local half of this TCP connection. + /// Accept a new incoming connection from this listener. + /// + /// This function directly corresponds to the `accept(2)` function on + /// Windows and Unix. + pub fn accept_raw(&self) -> io::Result<(Socket, SockAddr)> { + sys::accept(self.as_raw()).map(|(inner, addr)| (Socket::from_raw(inner), addr)) + } + + /// Returns the socket address of the local half of this socket. + /// + /// # Notes + /// + /// Depending on the OS this may return an error if the socket is not + /// [bound]. + /// + /// [bound]: Socket::bind pub fn local_addr(&self) -> io::Result { - self.inner.local_addr() + sys::getsockname(self.as_raw()) } - /// Returns the socket address of the remote peer of this TCP connection. + /// Returns the socket address of the remote peer of this socket. + /// + /// # Notes + /// + /// This returns an error if the socket is not [`connect`ed]. + /// + /// [`connect`ed]: Socket::connect pub fn peer_addr(&self) -> io::Result { - self.inner.peer_addr() + sys::getpeername(self.as_raw()) + } + + /// Returns the [`Type`] of this socket by checking the `SO_TYPE` option on + /// this socket. + pub fn r#type(&self) -> io::Result { + unsafe { getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_TYPE).map(Type) } } /// Creates a new independently owned handle to the underlying socket. /// - /// The returned `TcpStream` is a reference to the same stream that this - /// object references. Both handles will read and write the same stream of - /// data, and options set on one stream will be propagated to the other - /// stream. - pub fn try_clone(&self) -> io::Result { - self.inner.try_clone().map(|s| Socket { inner: s }) - } - - /// Get the value of the `SO_ERROR` option on this socket. + /// # Notes /// - /// This will retrieve the stored error in the underlying socket, clearing - /// the field in the process. This can be useful for checking errors between - /// calls. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() + /// On Unix this uses `F_DUPFD_CLOEXEC` and thus sets the `FD_CLOEXEC` on + /// the returned socket. + /// + /// On Windows this uses `WSA_FLAG_NO_HANDLE_INHERIT` setting inheriting to + /// false. + /// + /// On Windows this can **not** be used function cannot be used on a + /// QOS-enabled socket, see + /// . + pub fn try_clone(&self) -> io::Result { + sys::try_clone(self.as_raw()).map(Socket::from_raw) } /// Moves this TCP stream into or out of nonblocking mode. /// - /// On Unix this corresponds to calling fcntl, and on Windows this - /// corresponds to calling ioctlsocket. + /// # Notes + /// + /// On Unix this corresponds to calling `fcntl` (un)setting `O_NONBLOCK`. + /// + /// On Windows this corresponds to calling `ioctlsocket` (un)setting + /// `FIONBIO`. pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { - self.inner.set_nonblocking(nonblocking) + sys::set_nonblocking(self.as_raw(), nonblocking) } /// Shuts down the read, write, or both halves of this connection. @@ -234,26 +360,31 @@ impl Socket { /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value. pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.inner.shutdown(how) + sys::shutdown(self.as_raw(), how) } /// Receives data on the socket from the remote address to which it is /// connected. /// - /// The [`connect`] method will connect this socket to a remote address. This - /// method will fail if the socket is not connected. + /// The [`connect`] method will connect this socket to a remote address. + /// This method might fail if the socket is not connected. /// - /// [`connect`]: #method.connect - pub fn recv(&self, buf: &mut [u8]) -> io::Result { - self.inner.recv(buf, 0) - } - - /// Identical to [`recv`] but allows for specification of arbitrary flags to the underlying - /// `recv` call. + /// [`connect`]: Socket::connect /// - /// [`recv`]: #method.recv - pub fn recv_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result { - self.inner.recv(buf, flags) + /// # Safety + /// + /// Normally casting a `&mut [u8]` to `&mut [MaybeUninit]` would be + /// unsound, as that allows us to write uninitialised bytes to the buffer. + /// However this implementation promises to not write uninitialised bytes to + /// the `buf`fer and passes it directly to `recv(2)` system call. This + /// promise ensures that this function can be called using a `buf`fer of + /// type `&mut [u8]`. + /// + /// Note that the [`io::Read::read`] implementation calls this function with + /// a `buf`fer of type `&mut [u8]`, allowing initialised buffers to be used + /// without using `unsafe`. + pub fn recv(&self, buf: &mut [MaybeUninit]) -> io::Result { + self.recv_with_flags(buf, 0) } /// Receives out-of-band (OOB) data on the socket from the remote address to @@ -261,10 +392,77 @@ impl Socket { /// /// For more information, see [`recv`], [`out_of_band_inline`]. /// - /// [`recv`]: #method.recv - /// [`out_of_band_inline`]: #method.out_of_band_inline - pub fn recv_out_of_band(&self, buf: &mut [u8]) -> io::Result { - self.inner.recv(buf, MSG_OOB) + /// [`recv`]: Socket::recv + /// [`out_of_band_inline`]: Socket::out_of_band_inline + pub fn recv_out_of_band(&self, buf: &mut [MaybeUninit]) -> io::Result { + self.recv_with_flags(buf, sys::MSG_OOB) + } + + /// Identical to [`recv`] but allows for specification of arbitrary flags to + /// the underlying `recv` call. + /// + /// [`recv`]: Socket::recv + pub fn recv_with_flags( + &self, + buf: &mut [MaybeUninit], + flags: sys::c_int, + ) -> io::Result { + sys::recv(self.as_raw(), buf, flags) + } + + /// Receives data on the socket from the remote address to which it is + /// connected. Unlike [`recv`] this allows passing multiple buffers. + /// + /// The [`connect`] method will connect this socket to a remote address. + /// This method might fail if the socket is not connected. + /// + /// In addition to the number of bytes read, this function returns the flags + /// for the received message. See [`RecvFlags`] for more information about + /// the returned flags. + /// + /// [`recv`]: Socket::recv + /// [`connect`]: Socket::connect + /// + /// # Safety + /// + /// Normally casting a `IoSliceMut` to `MaybeUninitSlice` would be unsound, + /// as that allows us to write uninitialised bytes to the buffer. However + /// this implementation promises to not write uninitialised bytes to the + /// `bufs` and passes it directly to `recvmsg(2)` system call. This promise + /// ensures that this function can be called using `bufs` of type `&mut + /// [IoSliceMut]`. + /// + /// Note that the [`io::Read::read_vectored`] implementation calls this + /// function with `buf`s of type `&mut [IoSliceMut]`, allowing initialised + /// buffers to be used without using `unsafe`. + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn recv_vectored( + &self, + bufs: &mut [MaybeUninitSlice<'_>], + ) -> io::Result<(usize, RecvFlags)> { + self.recv_vectored_with_flags(bufs, 0) + } + + /// Identical to [`recv_vectored`] but allows for specification of arbitrary + /// flags to the underlying `recvmsg`/`WSARecv` call. + /// + /// [`recv_vectored`]: Socket::recv_vectored + /// + /// # Safety + /// + /// `recv_from_vectored` makes the same safety guarantees regarding `bufs` + /// as [`recv_vectored`]. + /// + /// [`recv_vectored`]: Socket::recv_vectored + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn recv_vectored_with_flags( + &self, + bufs: &mut [MaybeUninitSlice<'_>], + flags: c_int, + ) -> io::Result<(usize, RecvFlags)> { + sys::recv_vectored(self.as_raw(), bufs, flags) } /// Receives data on the socket from the remote adress to which it is @@ -273,26 +471,82 @@ impl Socket { /// /// Successive calls return the same data. This is accomplished by passing /// `MSG_PEEK` as a flag to the underlying `recv` system call. - pub fn peek(&self, buf: &mut [u8]) -> io::Result { - self.inner.peek(buf) + /// + /// # Safety + /// + /// `peek` makes the same safety guarantees regarding the `buf`fer as + /// [`recv`]. + /// + /// [`recv`]: Socket::recv + pub fn peek(&self, buf: &mut [MaybeUninit]) -> io::Result { + self.recv_with_flags(buf, sys::MSG_PEEK) } /// Receives data from the socket. On success, returns the number of bytes /// read and the address from whence the data came. - pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { - self.inner.recv_from(buf, 0) + /// + /// # Safety + /// + /// `recv_from` makes the same safety guarantees regarding the `buf`fer as + /// [`recv`]. + /// + /// [`recv`]: Socket::recv + pub fn recv_from(&self, buf: &mut [MaybeUninit]) -> io::Result<(usize, SockAddr)> { + self.recv_from_with_flags(buf, 0) } - /// Identical to [`recv_from`] but allows for specification of arbitrary flags to the underlying - /// `recvfrom` call. + /// Identical to [`recv_from`] but allows for specification of arbitrary + /// flags to the underlying `recvfrom` call. /// - /// [`recv_from`]: #method.recv_from + /// [`recv_from`]: Socket::recv_from pub fn recv_from_with_flags( &self, - buf: &mut [u8], - flags: i32, + buf: &mut [MaybeUninit], + flags: c_int, ) -> io::Result<(usize, SockAddr)> { - self.inner.recv_from(buf, flags) + sys::recv_from(self.as_raw(), buf, flags) + } + + /// Receives data from the socket. Returns the amount of bytes read, the + /// [`RecvFlags`] and the remote address from the data is coming. Unlike + /// [`recv_from`] this allows passing multiple buffers. + /// + /// [`recv_from`]: Socket::recv_from + /// + /// # Safety + /// + /// `recv_from_vectored` makes the same safety guarantees regarding `bufs` + /// as [`recv_vectored`]. + /// + /// [`recv_vectored`]: Socket::recv_vectored + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn recv_from_vectored( + &self, + bufs: &mut [MaybeUninitSlice<'_>], + ) -> io::Result<(usize, RecvFlags, SockAddr)> { + self.recv_from_vectored_with_flags(bufs, 0) + } + + /// Identical to [`recv_from_vectored`] but allows for specification of + /// arbitrary flags to the underlying `recvmsg`/`WSARecvFrom` call. + /// + /// [`recv_from_vectored`]: Socket::recv_from_vectored + /// + /// # Safety + /// + /// `recv_from_vectored` makes the same safety guarantees regarding `bufs` + /// as [`recv_vectored`]. + /// + /// [`recv_vectored`]: Socket::recv_vectored + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn recv_from_vectored_with_flags( + &self, + bufs: &mut [MaybeUninitSlice<'_>], + flags: c_int, + ) -> io::Result<(usize, RecvFlags, SockAddr)> { + sys::recv_from_vectored(self.as_raw(), bufs, flags) } /// Receives data from the socket, without removing it from the queue. @@ -302,8 +556,15 @@ impl Socket { /// /// On success, returns the number of bytes peeked and the address from /// whence the data came. - pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { - self.inner.peek_from(buf) + /// + /// # Safety + /// + /// `peek_from` makes the same safety guarantees regarding the `buf`fer as + /// [`recv`]. + /// + /// [`recv`]: Socket::recv + pub fn peek_from(&self, buf: &mut [MaybeUninit]) -> io::Result<(usize, SockAddr)> { + self.recv_from_with_flags(buf, sys::MSG_PEEK) } /// Sends data on the socket to a connected peer. @@ -313,15 +574,36 @@ impl Socket { /// /// On success returns the number of bytes that were sent. pub fn send(&self, buf: &[u8]) -> io::Result { - self.inner.send(buf, 0) + self.send_with_flags(buf, 0) } /// Identical to [`send`] but allows for specification of arbitrary flags to the underlying /// `send` call. /// /// [`send`]: #method.send - pub fn send_with_flags(&self, buf: &[u8], flags: i32) -> io::Result { - self.inner.send(buf, flags) + pub fn send_with_flags(&self, buf: &[u8], flags: c_int) -> io::Result { + sys::send(self.as_raw(), buf, flags) + } + + /// Send data to the connected peer. Returns the amount of bytes written. + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result { + self.send_vectored_with_flags(bufs, 0) + } + + /// Identical to [`send_vectored`] but allows for specification of arbitrary + /// flags to the underlying `sendmsg`/`WSASend` call. + /// + /// [`send_vectored`]: Socket::send_vectored + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn send_vectored_with_flags( + &self, + bufs: &[IoSlice<'_>], + flags: c_int, + ) -> io::Result { + sys::send_vectored(self.as_raw(), bufs, flags) } /// Sends out-of-band (OOB) data on the socket to connected peer @@ -332,387 +614,313 @@ impl Socket { /// [`send`]: #method.send /// [`out_of_band_inline`]: #method.out_of_band_inline pub fn send_out_of_band(&self, buf: &[u8]) -> io::Result { - self.inner.send(buf, MSG_OOB) + self.send_with_flags(buf, sys::MSG_OOB) } /// Sends data on the socket to the given address. On success, returns the /// number of bytes written. /// - /// This is typically used on UDP or datagram-oriented sockets. On success - /// returns the number of bytes that were sent. + /// This is typically used on UDP or datagram-oriented sockets. pub fn send_to(&self, buf: &[u8], addr: &SockAddr) -> io::Result { - self.inner.send_to(buf, 0, addr) + self.send_to_with_flags(buf, addr, 0) } - /// Identical to [`send_to`] but allows for specification of arbitrary flags to the underlying - /// `sendto` call. + /// Identical to [`send_to`] but allows for specification of arbitrary flags + /// to the underlying `sendto` call. /// - /// [`send_to`]: #method.send_to - pub fn send_to_with_flags(&self, buf: &[u8], addr: &SockAddr, flags: i32) -> io::Result { - self.inner.send_to(buf, flags, addr) + /// [`send_to`]: Socket::send_to + pub fn send_to_with_flags( + &self, + buf: &[u8], + addr: &SockAddr, + flags: c_int, + ) -> io::Result { + sys::send_to(self.as_raw(), buf, addr, flags) } - // ================================================ - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`][link]. - /// - /// [link]: #method.set_ttl - pub fn ttl(&self) -> io::Result { - self.inner.ttl() + /// Send data to a peer listening on `addr`. Returns the amount of bytes + /// written. + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn send_to_vectored(&self, bufs: &[IoSlice<'_>], addr: &SockAddr) -> io::Result { + self.send_to_vectored_with_flags(bufs, addr, 0) } - /// Sets the value for the `IP_TTL` option on this socket. + /// Identical to [`send_to_vectored`] but allows for specification of + /// arbitrary flags to the underlying `sendmsg`/`WSASendTo` call. /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.inner.set_ttl(ttl) + /// [`send_to_vectored`]: Socket::send_to_vectored + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn send_to_vectored_with_flags( + &self, + bufs: &[IoSlice<'_>], + addr: &SockAddr, + flags: c_int, + ) -> io::Result { + sys::send_to_vectored(self.as_raw(), bufs, addr, flags) + } +} + +/// Set `SOCK_CLOEXEC` and `NO_HANDLE_INHERIT` on the `ty`pe on platforms that +/// support it. +#[inline(always)] +fn set_common_type(ty: Type) -> Type { + // On platforms that support it set `SOCK_CLOEXEC`. + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd", + ))] + let ty = ty._cloexec(); + + // On windows set `NO_HANDLE_INHERIT`. + #[cfg(windows)] + let ty = ty._no_inherit(); + + ty +} + +/// Set `FD_CLOEXEC` and `NOSIGPIPE` on the `socket` for platforms that need it. +#[inline(always)] +#[allow(clippy::unnecessary_wraps)] +fn set_common_flags(socket: Socket) -> io::Result { + // On platforms that don't have `SOCK_CLOEXEC` use `FD_CLOEXEC`. + #[cfg(all( + unix, + not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd", + )) + ))] + socket._set_cloexec(true)?; + + // On Apple platforms set `NOSIGPIPE`. + #[cfg(target_vendor = "apple")] + socket._set_nosigpipe(true)?; + + Ok(socket) +} + +/// A local interface specified by its index or an address assigned to it. +/// +/// `Index(0)` and `Address(Ipv4Addr::UNSPECIFIED)` are equivalent and indicate +/// that an appropriate interface should be selected by the system. +#[cfg(not(any( + target_os = "haiku", + target_os = "illumos", + target_os = "netbsd", + target_os = "redox", + target_os = "solaris", +)))] +#[derive(Debug)] +pub enum InterfaceIndexOrAddress { + /// An interface index. + Index(u32), + /// An address assigned to an interface. + Address(Ipv4Addr), +} + +/// Socket options get/set using `SOL_SOCKET`. +/// +/// Additional documentation can be found in documentation of the OS. +/// * Linux: +/// * Windows: +impl Socket { + /// Get the value of the `SO_BROADCAST` option for this socket. + /// + /// For more information about this option, see [`set_broadcast`]. + /// + /// [`set_broadcast`]: Socket::set_broadcast + pub fn broadcast(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_BROADCAST) + .map(|broadcast| broadcast != 0) + } } - /// Gets the value of the `TCP_MAXSEG` option on this socket. - /// - /// The `TCP_MAXSEG` option denotes the TCP Maximum Segment - /// Size and is only available on TCP sockets. - #[cfg(all(unix, not(target_os = "redox")))] - pub fn mss(&self) -> io::Result { - self.inner.mss() - } - - /// Sets the value of the `TCP_MAXSEG` option on this socket. - /// - /// The `TCP_MAXSEG` option denotes the TCP Maximum Segment - /// Size and is only available on TCP sockets. - #[cfg(all(unix, not(target_os = "redox")))] - pub fn set_mss(&self, mss: u32) -> io::Result<()> { - self.inner.set_mss(mss) - } - - /// Gets the value for the `SO_MARK` option on this socket. - /// - /// This value gets the socket mark field for each packet sent through - /// this socket. - /// - /// This function is only available on Linux and requires the - /// `CAP_NET_ADMIN` capability. - #[cfg(target_os = "linux")] - pub fn mark(&self) -> io::Result { - self.inner.mark() - } - - /// Sets the value for the `SO_MARK` option on this socket. - /// - /// This value sets the socket mark field for each packet sent through - /// this socket. Changing the mark can be used for mark-based routing - /// without netfilter or for packet filtering. - /// - /// This function is only available on Linux and requires the - /// `CAP_NET_ADMIN` capability. - #[cfg(target_os = "linux")] - pub fn set_mark(&self, mark: u32) -> io::Result<()> { - self.inner.set_mark(mark) - } - - /// Gets the value for the `SO_BINDTODEVICE` option on this socket. - /// - /// This value gets the socket binded device's interface name. - /// - /// This function is only available on Linux. - #[cfg(target_os = "linux")] - pub fn device(&self) -> io::Result> { - self.inner.device() - } - - /// Sets the value for the `SO_BINDTODEVICE` option on this socket. - /// - /// If a socket is bound to an interface, only packets received from that - /// particular interface are processed by the socket. Note that this only - /// works for some socket types, particularly `AF_INET` sockets. - /// - /// If `interface` is `None` or an empty string it removes the binding. - /// - /// This function is only available on Linux. - #[cfg(target_os = "linux")] - pub fn bind_device(&self, interface: Option<&CStr>) -> io::Result<()> { - self.inner.bind_device(interface) - } - - /// Gets the value of the `IPV6_UNICAST_HOPS` option for this socket. - /// - /// Specifies the hop limit for ipv6 unicast packets - pub fn unicast_hops_v6(&self) -> io::Result { - self.inner.unicast_hops_v6() - } - - /// Sets the value for the `IPV6_UNICAST_HOPS` option on this socket. - /// - /// Specifies the hop limit for ipv6 unicast packets - pub fn set_unicast_hops_v6(&self, ttl: u32) -> io::Result<()> { - self.inner.set_unicast_hops_v6(ttl) - } - - /// Gets the value of the `IPV6_V6ONLY` option for this socket. - /// - /// For more information about this option, see [`set_only_v6`][link]. - /// - /// [link]: #method.set_only_v6 - pub fn only_v6(&self) -> io::Result { - self.inner.only_v6() - } - - /// Sets the value for the `IPV6_V6ONLY` option on this socket. - /// - /// If this is set to `true` then the socket is restricted to sending and - /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications - /// can bind the same port at the same time. - /// - /// If this is set to `false` then the socket can be used to send and - /// receive packets from an IPv4-mapped IPv6 address. - pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { - self.inner.set_only_v6(only_v6) - } - - /// Returns the read timeout of this socket. - /// - /// If the timeout is `None`, then `read` calls will block indefinitely. - pub fn read_timeout(&self) -> io::Result> { - self.inner.read_timeout() - } - - /// Sets the read timeout to the timeout specified. - /// - /// If the value specified is `None`, then `read` calls will block - /// indefinitely. It is an error to pass the zero `Duration` to this - /// method. - pub fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - self.inner.set_read_timeout(dur) - } - - /// Returns the write timeout of this socket. - /// - /// If the timeout is `None`, then `write` calls will block indefinitely. - pub fn write_timeout(&self) -> io::Result> { - self.inner.write_timeout() - } - - /// Sets the write timeout to the timeout specified. - /// - /// If the value specified is `None`, then `write` calls will block - /// indefinitely. It is an error to pass the zero `Duration` to this - /// method. - pub fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - self.inner.set_write_timeout(dur) - } - - /// Gets the value of the `TCP_NODELAY` option on this socket. - /// - /// For more information about this option, see [`set_nodelay`][link]. - /// - /// [link]: #method.set_nodelay - pub fn nodelay(&self) -> io::Result { - self.inner.nodelay() - } - - /// Sets the value of the `TCP_NODELAY` option on this socket. - /// - /// If set, this option disables the Nagle algorithm. This means that - /// segments are always sent as soon as possible, even if there is only a - /// small amount of data. When not set, data is buffered until there is a - /// sufficient amount to send out, thereby avoiding the frequent sending of - /// small packets. - pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { - self.inner.set_nodelay(nodelay) - } - - /// Sets the value of the `SO_BROADCAST` option for this socket. + /// Set the value of the `SO_BROADCAST` option for this socket. /// /// When enabled, this socket is allowed to send packets to a broadcast /// address. - pub fn broadcast(&self) -> io::Result { - self.inner.broadcast() - } - - /// Gets the value of the `SO_BROADCAST` option for this socket. - /// - /// For more information about this option, see - /// [`set_broadcast`][link]. - /// - /// [link]: #method.set_broadcast pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> { - self.inner.set_broadcast(broadcast) + unsafe { + setsockopt( + self.as_raw(), + sys::SOL_SOCKET, + sys::SO_BROADCAST, + broadcast as c_int, + ) + } } - /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// Get the value of the `SO_ERROR` option on this socket. /// - /// For more information about this option, see - /// [`set_multicast_loop_v4`][link]. - /// - /// [link]: #method.set_multicast_loop_v4 - pub fn multicast_loop_v4(&self) -> io::Result { - self.inner.multicast_loop_v4() + /// This will retrieve the stored error in the underlying socket, clearing + /// the field in the process. This can be useful for checking errors between + /// calls. + pub fn take_error(&self) -> io::Result> { + match unsafe { getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_ERROR) } { + Ok(0) => Ok(None), + Ok(errno) => Ok(Some(io::Error::from_raw_os_error(errno))), + Err(err) => Err(err), + } } - /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// Get the value of the `SO_KEEPALIVE` option on this socket. /// - /// If enabled, multicast packets will be looped back to the local socket. - /// Note that this may not have any affect on IPv6 sockets. - pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> { - self.inner.set_multicast_loop_v4(multicast_loop_v4) + /// For more information about this option, see [`set_keepalive`]. + /// + /// [`set_keepalive`]: Socket::set_keepalive + pub fn keepalive(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_KEEPALIVE) + .map(|keepalive| keepalive != 0) + } } - /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. + /// Set value for the `SO_KEEPALIVE` option on this socket. /// - /// For more information about this option, see - /// [`set_multicast_ttl_v4`][link]. - /// - /// [link]: #method.set_multicast_ttl_v4 - pub fn multicast_ttl_v4(&self) -> io::Result { - self.inner.multicast_ttl_v4() + /// Enable sending of keep-alive messages on connection-oriented sockets. + pub fn set_keepalive(&self, keepalive: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::SOL_SOCKET, + sys::SO_KEEPALIVE, + keepalive as c_int, + ) + } } - /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. + /// Get the value of the `SO_LINGER` option on this socket. /// - /// Indicates the time-to-live value of outgoing multicast packets for - /// this socket. The default value is 1 which means that multicast packets - /// don't leave the local network unless explicitly requested. + /// For more information about this option, see [`set_linger`]. /// - /// Note that this may not have any affect on IPv6 sockets. - pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> { - self.inner.set_multicast_ttl_v4(multicast_ttl_v4) - } - - /// Gets the value of the `IPV6_MULTICAST_HOPS` option for this socket - /// - /// For more information about this option, see - /// [`set_multicast_hops_v6`][link]. - /// - /// [link]: #method.set_multicast_hops_v6 - pub fn multicast_hops_v6(&self) -> io::Result { - self.inner.multicast_hops_v6() - } - - /// Sets the value of the `IPV6_MULTICAST_HOPS` option for this socket - /// - /// Indicates the number of "routers" multicast packets will transit for - /// this socket. The default value is 1 which means that multicast packets - /// don't leave the local network unless explicitly requested. - pub fn set_multicast_hops_v6(&self, hops: u32) -> io::Result<()> { - self.inner.set_multicast_hops_v6(hops) - } - - /// Gets the value of the `IP_MULTICAST_IF` option for this socket. - /// - /// For more information about this option, see - /// [`set_multicast_if_v4`][link]. - /// - /// [link]: #method.set_multicast_if_v4 - /// - /// Returns the interface to use for routing multicast packets. - pub fn multicast_if_v4(&self) -> io::Result { - self.inner.multicast_if_v4() - } - - /// Sets the value of the `IP_MULTICAST_IF` option for this socket. - /// - /// Specifies the interface to use for routing multicast packets. - pub fn set_multicast_if_v4(&self, interface: &Ipv4Addr) -> io::Result<()> { - self.inner.set_multicast_if_v4(interface) - } - - /// Gets the value of the `IPV6_MULTICAST_IF` option for this socket. - /// - /// For more information about this option, see - /// [`set_multicast_if_v6`][link]. - /// - /// [link]: #method.set_multicast_if_v6 - /// - /// Returns the interface to use for routing multicast packets. - pub fn multicast_if_v6(&self) -> io::Result { - self.inner.multicast_if_v6() - } - - /// Sets the value of the `IPV6_MULTICAST_IF` option for this socket. - /// - /// Specifies the interface to use for routing multicast packets. Unlike ipv4, this - /// is generally required in ipv6 contexts where network routing prefixes may - /// overlap. - pub fn set_multicast_if_v6(&self, interface: u32) -> io::Result<()> { - self.inner.set_multicast_if_v6(interface) - } - - /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see - /// [`set_multicast_loop_v6`][link]. - /// - /// [link]: #method.set_multicast_loop_v6 - pub fn multicast_loop_v6(&self) -> io::Result { - self.inner.multicast_loop_v6() - } - - /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// Controls whether this socket sees the multicast packets it sends itself. - /// Note that this may not have any affect on IPv4 sockets. - pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> { - self.inner.set_multicast_loop_v6(multicast_loop_v6) - } - - /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// address of the local interface with which the system should join the - /// multicast group. If it's equal to `INADDR_ANY` then an appropriate - /// interface is chosen by the system. - pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - self.inner.join_multicast_v4(multiaddr, interface) - } - - /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// index of the interface to join/leave (or 0 to indicate any interface). - pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - self.inner.join_multicast_v6(multiaddr, interface) - } - - /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. - /// - /// For more information about this option, see - /// [`join_multicast_v4`][link]. - /// - /// [link]: #method.join_multicast_v4 - pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - self.inner.leave_multicast_v4(multiaddr, interface) - } - - /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. - /// - /// For more information about this option, see - /// [`join_multicast_v6`][link]. - /// - /// [link]: #method.join_multicast_v6 - pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - self.inner.leave_multicast_v6(multiaddr, interface) - } - - /// Reads the linger duration for this socket by getting the SO_LINGER - /// option + /// [`set_linger`]: Socket::set_linger pub fn linger(&self) -> io::Result> { - self.inner.linger() + unsafe { + getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_LINGER) + .map(from_linger) + } } - /// Sets the linger duration of this socket by setting the SO_LINGER option - pub fn set_linger(&self, dur: Option) -> io::Result<()> { - self.inner.set_linger(dur) + /// Set value for the `SO_LINGER` option on this socket. + /// + /// If `linger` is not `None`, a close(2) or shutdown(2) will not return + /// until all queued messages for the socket have been successfully sent or + /// the linger timeout has been reached. Otherwise, the call returns + /// immediately and the closing is done in the background. When the socket + /// is closed as part of exit(2), it always lingers in the background. + /// + /// # Notes + /// + /// On most OSs the duration only has a precision of seconds and will be + /// silently truncated. + /// + /// On Apple platforms (e.g. macOS, iOS, etc) this uses `SO_LINGER_SEC`. + pub fn set_linger(&self, linger: Option) -> io::Result<()> { + let linger = into_linger(linger); + unsafe { setsockopt(self.as_raw(), sys::SOL_SOCKET, sys::SO_LINGER, linger) } } - /// Check the `SO_REUSEADDR` option on this socket. + /// Get value for the `SO_OOBINLINE` option on this socket. + /// + /// For more information about this option, see [`set_out_of_band_inline`]. + /// + /// [`set_out_of_band_inline`]: Socket::set_out_of_band_inline + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn out_of_band_inline(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_OOBINLINE) + .map(|oob_inline| oob_inline != 0) + } + } + + /// Set value for the `SO_OOBINLINE` option on this socket. + /// + /// If this option is enabled, out-of-band data is directly placed into the + /// receive data stream. Otherwise, out-of-band data is passed only when the + /// `MSG_OOB` flag is set during receiving. As per RFC6093, TCP sockets + /// using the Urgent mechanism are encouraged to set this flag. + #[cfg(not(target_os = "redox"))] + #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] + pub fn set_out_of_band_inline(&self, oob_inline: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::SOL_SOCKET, + sys::SO_OOBINLINE, + oob_inline as c_int, + ) + } + } + + /// Get value for the `SO_RCVBUF` option on this socket. + /// + /// For more information about this option, see [`set_recv_buffer_size`]. + /// + /// [`set_recv_buffer_size`]: Socket::set_recv_buffer_size + pub fn recv_buffer_size(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVBUF) + .map(|size| size as usize) + } + } + + /// Set value for the `SO_RCVBUF` option on this socket. + /// + /// Changes the size of the operating system's receive buffer associated + /// with the socket. + pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::SOL_SOCKET, + sys::SO_RCVBUF, + size as c_int, + ) + } + } + + /// Get value for the `SO_RCVTIMEO` option on this socket. + /// + /// If the returned timeout is `None`, then `read` and `recv` calls will + /// block indefinitely. + pub fn read_timeout(&self) -> io::Result> { + sys::timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVTIMEO) + } + + /// Set value for the `SO_RCVTIMEO` option on this socket. + /// + /// If `timeout` is `None`, then `read` and `recv` calls will block + /// indefinitely. + pub fn set_read_timeout(&self, duration: Option) -> io::Result<()> { + sys::set_timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVTIMEO, duration) + } + + /// Get the value of the `SO_REUSEADDR` option on this socket. + /// + /// For more information about this option, see [`set_reuse_address`]. + /// + /// [`set_reuse_address`]: Socket::set_reuse_address pub fn reuse_address(&self) -> io::Result { - self.inner.reuse_address() + unsafe { + getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_REUSEADDR) + .map(|reuse| reuse != 0) + } } /// Set value for the `SO_REUSEADDR` option on this socket. @@ -721,408 +929,853 @@ impl Socket { /// addresses. For IPv4 sockets this means that a socket may bind even when /// there's a socket already listening on this port. pub fn set_reuse_address(&self, reuse: bool) -> io::Result<()> { - self.inner.set_reuse_address(reuse) + unsafe { + setsockopt( + self.as_raw(), + sys::SOL_SOCKET, + sys::SO_REUSEADDR, + reuse as c_int, + ) + } } - /// Gets the value of the `SO_RCVBUF` option on this socket. + /// Get the value of the `SO_SNDBUF` option on this socket. /// - /// For more information about this option, see - /// [`set_recv_buffer_size`][link]. + /// For more information about this option, see [`set_send_buffer_size`]. /// - /// [link]: #method.set_recv_buffer_size - pub fn recv_buffer_size(&self) -> io::Result { - self.inner.recv_buffer_size() - } - - /// Sets the value of the `SO_RCVBUF` option on this socket. - /// - /// Changes the size of the operating system's receive buffer associated - /// with the socket. - pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { - self.inner.set_recv_buffer_size(size) - } - - /// Gets the value of the `SO_SNDBUF` option on this socket. - /// - /// For more information about this option, see [`set_send_buffer`][link]. - /// - /// [link]: #method.set_send_buffer + /// [`set_send_buffer_size`]: Socket::set_send_buffer_size pub fn send_buffer_size(&self) -> io::Result { - self.inner.send_buffer_size() + unsafe { + getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDBUF) + .map(|size| size as usize) + } } - /// Sets the value of the `SO_SNDBUF` option on this socket. + /// Set value for the `SO_SNDBUF` option on this socket. /// /// Changes the size of the operating system's send buffer associated with /// the socket. pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { - self.inner.set_send_buffer_size(size) + unsafe { + setsockopt( + self.as_raw(), + sys::SOL_SOCKET, + sys::SO_SNDBUF, + size as c_int, + ) + } } - /// Returns whether keepalive messages are enabled on this socket, and if so - /// the duration of time between them. + /// Get value for the `SO_SNDTIMEO` option on this socket. /// - /// For more information about this option, see [`set_keepalive`][link]. - /// - /// [link]: #method.set_keepalive - pub fn keepalive(&self) -> io::Result> { - self.inner.keepalive() + /// If the returned timeout is `None`, then `write` and `send` calls will + /// block indefinitely. + pub fn write_timeout(&self) -> io::Result> { + sys::timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDTIMEO) } - /// Sets whether keepalive messages are enabled to be sent on this socket. + /// Set value for the `SO_SNDTIMEO` option on this socket. /// - /// On Unix, this option will set the `SO_KEEPALIVE` as well as the - /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform). - /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. + /// If `timeout` is `None`, then `write` and `send` calls will block + /// indefinitely. + pub fn set_write_timeout(&self, duration: Option) -> io::Result<()> { + sys::set_timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDTIMEO, duration) + } +} + +fn from_linger(linger: sys::linger) -> Option { + if linger.l_onoff == 0 { + None + } else { + Some(Duration::from_secs(linger.l_linger as u64)) + } +} + +fn into_linger(duration: Option) -> sys::linger { + match duration { + Some(duration) => sys::linger { + l_onoff: 1, + l_linger: duration.as_secs() as _, + }, + None => sys::linger { + l_onoff: 0, + l_linger: 0, + }, + } +} + +/// Socket options for IPv4 sockets, get/set using `IPPROTO_IP`. +/// +/// Additional documentation can be found in documentation of the OS. +/// * Linux: +/// * Windows: +impl Socket { + /// Get the value of the `IP_HDRINCL` option on this socket. /// - /// If `None` is specified then keepalive messages are disabled, otherwise - /// the duration specified will be the time to remain idle before sending a - /// TCP keepalive probe. + /// For more information about this option, see [`set_header_included`]. /// - /// Some platforms specify this value in seconds, so sub-second - /// specifications may be omitted. - pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { - self.inner.set_keepalive(keepalive) + /// [`set_header_included`]: Socket::set_header_included + #[cfg(all(feature = "all", not(target_os = "redox")))] + #[cfg_attr(docsrs, doc(all(feature = "all", not(target_os = "redox"))))] + pub fn header_included(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_HDRINCL) + .map(|included| included != 0) + } } - /// Returns the value of the `SO_OOBINLINE` flag of the underlying socket. - /// For more information about this option, see [`set_out_of_band_inline`][link]. + /// Set the value of the `IP_HDRINCL` option on this socket. /// - /// [link]: #method.set_out_of_band_inline - pub fn out_of_band_inline(&self) -> io::Result { - self.inner.out_of_band_inline() + /// If enabled, the user supplies an IP header in front of the user data. + /// Valid only for [`SOCK_RAW`] sockets; see [raw(7)] for more information. + /// When this flag is enabled, the values set by `IP_OPTIONS`, [`IP_TTL`], + /// and [`IP_TOS`] are ignored. + /// + /// [`SOCK_RAW`]: Type::RAW + /// [raw(7)]: https://man7.org/linux/man-pages/man7/raw.7.html + /// [`IP_TTL`]: Socket::set_ttl + /// [`IP_TOS`]: Socket::set_tos + #[cfg(all(feature = "all", not(target_os = "redox")))] + #[cfg_attr(docsrs, doc(all(feature = "all", not(target_os = "redox"))))] + pub fn set_header_included(&self, included: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IP, + sys::IP_HDRINCL, + included as c_int, + ) + } } - /// Sets the `SO_OOBINLINE` flag of the underlying socket. - /// as per RFC6093, TCP sockets using the Urgent mechanism - /// are encouraged to set this flag. + /// Get the value of the `IP_TRANSPARENT` option on this socket. /// - /// If this flag is not set, the `MSG_OOB` flag is needed - /// while `recv`ing to aquire the out-of-band data. - pub fn set_out_of_band_inline(&self, oob_inline: bool) -> io::Result<()> { - self.inner.set_out_of_band_inline(oob_inline) + /// For more information about this option, see [`set_ip_transparent`]. + /// + /// [`set_ip_transparent`]: Socket::set_ip_transparent + #[cfg(any(doc, all(feature = "all", target_os = "linux")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] + pub fn ip_transparent(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IP, libc::IP_TRANSPARENT) + .map(|transparent| transparent != 0) + } } - /// Check the value of the `SO_REUSEPORT` option on this socket. + /// Set the value of the `IP_TRANSPARENT` option on this socket. /// - /// This function is only available on Unix when the `reuseport` feature is - /// enabled. - #[cfg(all( - unix, - not(any(target_os = "solaris", target_os = "illumos")), - feature = "reuseport" + /// Setting this boolean option enables transparent proxying + /// on this socket. This socket option allows the calling + /// application to bind to a nonlocal IP address and operate + /// both as a client and a server with the foreign address as + /// the local endpoint. NOTE: this requires that routing be + /// set up in a way that packets going to the foreign address + /// are routed through the TProxy box (i.e., the system + /// hosting the application that employs the IP_TRANSPARENT + /// socket option). Enabling this socket option requires + /// superuser privileges (the `CAP_NET_ADMIN` capability). + /// + /// TProxy redirection with the iptables TPROXY target also + /// requires that this option be set on the redirected socket. + #[cfg(any(doc, all(feature = "all", target_os = "linux")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] + pub fn set_ip_transparent(&self, transparent: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IP, + libc::IP_TRANSPARENT, + transparent as c_int, + ) + } + } + + /// Join a multicast group using `IP_ADD_MEMBERSHIP` option on this socket. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// address of the local interface with which the system should join the + /// multicast group. If it's [`Ipv4Addr::UNSPECIFIED`] (`INADDR_ANY`) then + /// an appropriate interface is chosen by the system. + pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { + let mreq = sys::IpMreq { + imr_multiaddr: sys::to_in_addr(multiaddr), + imr_interface: sys::to_in_addr(interface), + }; + unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_ADD_MEMBERSHIP, mreq) } + } + + /// Leave a multicast group using `IP_DROP_MEMBERSHIP` option on this socket. + /// + /// For more information about this option, see [`join_multicast_v4`]. + /// + /// [`join_multicast_v4`]: Socket::join_multicast_v4 + pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { + let mreq = sys::IpMreq { + imr_multiaddr: sys::to_in_addr(multiaddr), + imr_interface: sys::to_in_addr(interface), + }; + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IP, + sys::IP_DROP_MEMBERSHIP, + mreq, + ) + } + } + + /// Join a multicast group using `IP_ADD_MEMBERSHIP` option on this socket. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` specifies + /// the local interface with which the system should join the multicast + /// group. See [`InterfaceIndexOrAddress`]. + #[cfg(not(any( + target_os = "haiku", + target_os = "illumos", + target_os = "netbsd", + target_os = "redox", + target_os = "solaris", + )))] + pub fn join_multicast_v4_n( + &self, + multiaddr: &Ipv4Addr, + interface: &InterfaceIndexOrAddress, + ) -> io::Result<()> { + let mreqn = sys::to_mreqn(multiaddr, interface); + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IP, + sys::IP_ADD_MEMBERSHIP, + mreqn, + ) + } + } + + /// Leave a multicast group using `IP_DROP_MEMBERSHIP` option on this socket. + /// + /// For more information about this option, see [`join_multicast_v4_n`]. + /// + /// [`join_multicast_v4_n`]: Socket::join_multicast_v4_n + #[cfg(not(any( + target_os = "haiku", + target_os = "illumos", + target_os = "netbsd", + target_os = "redox", + target_os = "solaris", + )))] + pub fn leave_multicast_v4_n( + &self, + multiaddr: &Ipv4Addr, + interface: &InterfaceIndexOrAddress, + ) -> io::Result<()> { + let mreqn = sys::to_mreqn(multiaddr, interface); + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IP, + sys::IP_DROP_MEMBERSHIP, + mreqn, + ) + } + } + + /// Get the value of the `IP_MULTICAST_IF` option for this socket. + /// + /// For more information about this option, see [`set_multicast_if_v4`]. + /// + /// [`set_multicast_if_v4`]: Socket::set_multicast_if_v4 + pub fn multicast_if_v4(&self) -> io::Result { + unsafe { + getsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_IF).map(sys::from_in_addr) + } + } + + /// Set the value of the `IP_MULTICAST_IF` option for this socket. + /// + /// Specifies the interface to use for routing multicast packets. + pub fn set_multicast_if_v4(&self, interface: &Ipv4Addr) -> io::Result<()> { + let interface = sys::to_in_addr(interface); + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IP, + sys::IP_MULTICAST_IF, + interface, + ) + } + } + + /// Get the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see [`set_multicast_loop_v4`]. + /// + /// [`set_multicast_loop_v4`]: Socket::set_multicast_loop_v4 + pub fn multicast_loop_v4(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_LOOP) + .map(|loop_v4| loop_v4 != 0) + } + } + + /// Set the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// If enabled, multicast packets will be looped back to the local socket. + /// Note that this may not have any affect on IPv6 sockets. + pub fn set_multicast_loop_v4(&self, loop_v4: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IP, + sys::IP_MULTICAST_LOOP, + loop_v4 as c_int, + ) + } + } + + /// Get the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// For more information about this option, see [`set_multicast_ttl_v4`]. + /// + /// [`set_multicast_ttl_v4`]: Socket::set_multicast_ttl_v4 + pub fn multicast_ttl_v4(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_TTL) + .map(|ttl| ttl as u32) + } + } + + /// Set the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// Indicates the time-to-live value of outgoing multicast packets for + /// this socket. The default value is 1 which means that multicast packets + /// don't leave the local network unless explicitly requested. + /// + /// Note that this may not have any affect on IPv6 sockets. + pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IP, + sys::IP_MULTICAST_TTL, + ttl as c_int, + ) + } + } + + /// Get the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`]. + /// + /// [`set_ttl`]: Socket::set_ttl + pub fn ttl(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_TTL).map(|ttl| ttl as u32) + } + } + + /// Set the value of the `IP_TTL` option for this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_TTL, ttl as c_int) } + } + + /// Set the value of the `IP_TOS` option for this socket. + /// + /// This value sets the type-of-service field that is used in every packet + /// sent from this socket. + /// + /// NOTE: + /// documents that not all versions of windows support `IP_TOS`. + #[cfg(not(any( + target_os = "fuschia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))] + pub fn set_tos(&self, tos: u32) -> io::Result<()> { + unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_TOS, tos as c_int) } + } + + /// Get the value of the `IP_TOS` option for this socket. + /// + /// For more information about this option, see [`set_tos`]. + /// + /// NOTE: + /// documents that not all versions of windows support `IP_TOS`. + /// + /// [`set_tos`]: Socket::set_tos + #[cfg(not(any( + target_os = "fuschia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", + )))] + pub fn tos(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_TOS).map(|tos| tos as u32) + } + } +} + +/// Socket options for IPv6 sockets, get/set using `IPPROTO_IPV6`. +/// +/// Additional documentation can be found in documentation of the OS. +/// * Linux: +/// * Windows: +impl Socket { + /// Join a multicast group using `IPV6_ADD_MEMBERSHIP` option on this socket. + /// + /// Some OSs use `IPV6_JOIN_GROUP` for this option. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// index of the interface to join/leave (or 0 to indicate any interface). + pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { + let mreq = sys::Ipv6Mreq { + ipv6mr_multiaddr: sys::to_in6_addr(multiaddr), + // NOTE: some OSs use `c_int`, others use `c_uint`. + ipv6mr_interface: interface as _, + }; + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IPV6, + sys::IPV6_ADD_MEMBERSHIP, + mreq, + ) + } + } + + /// Leave a multicast group using `IPV6_DROP_MEMBERSHIP` option on this socket. + /// + /// Some OSs use `IPV6_LEAVE_GROUP` for this option. + /// + /// For more information about this option, see [`join_multicast_v6`]. + /// + /// [`join_multicast_v6`]: Socket::join_multicast_v6 + pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { + let mreq = sys::Ipv6Mreq { + ipv6mr_multiaddr: sys::to_in6_addr(multiaddr), + // NOTE: some OSs use `c_int`, others use `c_uint`. + ipv6mr_interface: interface as _, + }; + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IPV6, + sys::IPV6_DROP_MEMBERSHIP, + mreq, + ) + } + } + + /// Get the value of the `IPV6_MULTICAST_HOPS` option for this socket + /// + /// For more information about this option, see [`set_multicast_hops_v6`]. + /// + /// [`set_multicast_hops_v6`]: Socket::set_multicast_hops_v6 + pub fn multicast_hops_v6(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_HOPS) + .map(|hops| hops as u32) + } + } + + /// Set the value of the `IPV6_MULTICAST_HOPS` option for this socket + /// + /// Indicates the number of "routers" multicast packets will transit for + /// this socket. The default value is 1 which means that multicast packets + /// don't leave the local network unless explicitly requested. + pub fn set_multicast_hops_v6(&self, hops: u32) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IPV6, + sys::IPV6_MULTICAST_HOPS, + hops as c_int, + ) + } + } + + /// Get the value of the `IPV6_MULTICAST_IF` option for this socket. + /// + /// For more information about this option, see [`set_multicast_if_v6`]. + /// + /// [`set_multicast_if_v6`]: Socket::set_multicast_if_v6 + pub fn multicast_if_v6(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_IF) + .map(|interface| interface as u32) + } + } + + /// Set the value of the `IPV6_MULTICAST_IF` option for this socket. + /// + /// Specifies the interface to use for routing multicast packets. Unlike + /// ipv4, this is generally required in ipv6 contexts where network routing + /// prefixes may overlap. + pub fn set_multicast_if_v6(&self, interface: u32) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IPV6, + sys::IPV6_MULTICAST_IF, + interface as c_int, + ) + } + } + + /// Get the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see [`set_multicast_loop_v6`]. + /// + /// [`set_multicast_loop_v6`]: Socket::set_multicast_loop_v6 + pub fn multicast_loop_v6(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_LOOP) + .map(|loop_v6| loop_v6 != 0) + } + } + + /// Set the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// Controls whether this socket sees the multicast packets it sends itself. + /// Note that this may not have any affect on IPv4 sockets. + pub fn set_multicast_loop_v6(&self, loop_v6: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IPV6, + sys::IPV6_MULTICAST_LOOP, + loop_v6 as c_int, + ) + } + } + + /// Get the value of the `IPV6_UNICAST_HOPS` option for this socket. + /// + /// Specifies the hop limit for ipv6 unicast packets + pub fn unicast_hops_v6(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_UNICAST_HOPS) + .map(|hops| hops as u32) + } + } + + /// Set the value for the `IPV6_UNICAST_HOPS` option on this socket. + /// + /// Specifies the hop limit for ipv6 unicast packets + pub fn set_unicast_hops_v6(&self, hops: u32) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IPV6, + sys::IPV6_UNICAST_HOPS, + hops as c_int, + ) + } + } + + /// Get the value of the `IPV6_V6ONLY` option for this socket. + /// + /// For more information about this option, see [`set_only_v6`]. + /// + /// [`set_only_v6`]: Socket::set_only_v6 + pub fn only_v6(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_V6ONLY) + .map(|only_v6| only_v6 != 0) + } + } + + /// Set the value for the `IPV6_V6ONLY` option on this socket. + /// + /// If this is set to `true` then the socket is restricted to sending and + /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications + /// can bind the same port at the same time. + /// + /// If this is set to `false` then the socket can be used to send and + /// receive packets from an IPv4-mapped IPv6 address. + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_IPV6, + sys::IPV6_V6ONLY, + only_v6 as c_int, + ) + } + } +} + +/// Socket options for TCP sockets, get/set using `IPPROTO_TCP`. +/// +/// Additional documentation can be found in documentation of the OS. +/// * Linux: +/// * Windows: +impl Socket { + /// Get the value of the `TCP_KEEPIDLE` option on this socket. + /// + /// This returns the value of `TCP_KEEPALIVE` on macOS and iOS and `TCP_KEEPIDLE` on all other + /// supported Unix operating systems. + #[cfg(any( + doc, + all( + feature = "all", + not(any(windows, target_os = "haiku", target_os = "openbsd")) + ) ))] - pub fn reuse_port(&self) -> io::Result { - self.inner.reuse_port() + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + not(any(windows, target_os = "haiku", target_os = "openbsd")) + ))) + )] + pub fn keepalive_time(&self) -> io::Result { + sys::keepalive_time(self.as_raw()) } - /// Set value for the `SO_REUSEPORT` option on this socket. + /// Get the value of the `TCP_KEEPINTVL` option on this socket. /// - /// This indicates that further calls to `bind` may allow reuse of local - /// addresses. For IPv4 sockets this means that a socket may bind even when - /// there's a socket already listening on this port. + /// For more information about this option, see [`set_tcp_keepalive`]. /// - /// This function is only available on Unix when the `reuseport` feature is - /// enabled. + /// [`set_tcp_keepalive`]: Socket::set_tcp_keepalive #[cfg(all( - unix, - not(any(target_os = "solaris", target_os = "illumos")), - feature = "reuseport" + feature = "all", + any( + doc, + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ) ))] - pub fn set_reuse_port(&self, reuse: bool) -> io::Result<()> { - self.inner.set_reuse_port(reuse) + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ) + ))) + )] + pub fn keepalive_interval(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_KEEPINTVL) + .map(|secs| Duration::from_secs(secs as u64)) + } + } + + /// Get the value of the `TCP_KEEPCNT` option on this socket. + /// + /// For more information about this option, see [`set_tcp_keepalive`]. + /// + /// [`set_tcp_keepalive`]: Socket::set_tcp_keepalive + #[cfg(all( + feature = "all", + any( + doc, + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ) + ))) + )] + pub fn keepalive_retries(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_KEEPCNT) + .map(|retries| retries as u32) + } + } + + /// Set parameters configuring TCP keepalive probes for this socket. + /// + /// The supported parameters depend on the operating system, and are + /// configured using the [`TcpKeepalive`] struct. At a minimum, all systems + /// support configuring the [keepalive time]: the time after which the OS + /// will start sending keepalive messages on an idle connection. + /// + /// [keepalive time]: TcpKeepalive::with_time + /// + /// # Notes + /// + /// * This will enable `SO_KEEPALIVE` on this socket, if it is not already + /// enabled. + /// * On some platforms, such as Windows, any keepalive parameters *not* + /// configured by the `TcpKeepalive` struct passed to this function may be + /// overwritten with their default values. Therefore, this function should + /// either only be called once per socket, or the same parameters should + /// be passed every time it is called. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// use socket2::{Socket, TcpKeepalive, Domain, Type}; + /// + /// # fn main() -> std::io::Result<()> { + /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; + /// let keepalive = TcpKeepalive::new() + /// .with_time(Duration::from_secs(4)); + /// // Depending on the target operating system, we may also be able to + /// // configure the keepalive probe interval and/or the number of + /// // retries here as well. + /// + /// socket.set_tcp_keepalive(&keepalive)?; + /// # Ok(()) } + /// ``` + /// + pub fn set_tcp_keepalive(&self, params: &TcpKeepalive) -> io::Result<()> { + self.set_keepalive(true)?; + sys::set_tcp_keepalive(self.as_raw(), params) + } + + /// Get the value of the `TCP_NODELAY` option on this socket. + /// + /// For more information about this option, see [`set_nodelay`]. + /// + /// [`set_nodelay`]: Socket::set_nodelay + pub fn nodelay(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_NODELAY) + .map(|nodelay| nodelay != 0) + } + } + + /// Set the value of the `TCP_NODELAY` option on this socket. + /// + /// If set, this option disables the Nagle algorithm. This means that + /// segments are always sent as soon as possible, even if there is only a + /// small amount of data. When not set, data is buffered until there is a + /// sufficient amount to send out, thereby avoiding the frequent sending of + /// small packets. + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + sys::IPPROTO_TCP, + sys::TCP_NODELAY, + nodelay as c_int, + ) + } } } impl Read for Socket { fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.read(buf) + // Safety: the `recv` implementation promises not to write uninitialised + // bytes to the `buf`fer, so this casting is safe. + let buf = unsafe { &mut *(buf as *mut [u8] as *mut [MaybeUninit]) }; + self.recv(buf) + } + + #[cfg(not(target_os = "redox"))] + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { + // Safety: both `IoSliceMut` and `MaybeUninitSlice` promise to have the + // same layout, that of `iovec`/`WSABUF`. Furthermore `recv_vectored` + // promises to not write unitialised bytes to the `bufs` and pass it + // directly to the `recvmsg` system call, so this is safe. + let bufs = unsafe { &mut *(bufs as *mut [IoSliceMut<'_>] as *mut [MaybeUninitSlice<'_>]) }; + self.recv_vectored(bufs).map(|(n, _)| n) } } impl<'a> Read for &'a Socket { fn read(&mut self, buf: &mut [u8]) -> io::Result { - (&self.inner).read(buf) + // Safety: see other `Read::read` impl. + let buf = unsafe { &mut *(buf as *mut [u8] as *mut [MaybeUninit]) }; + self.recv(buf) + } + + #[cfg(not(target_os = "redox"))] + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { + // Safety: see other `Read::read` impl. + let bufs = unsafe { &mut *(bufs as *mut [IoSliceMut<'_>] as *mut [MaybeUninitSlice<'_>]) }; + self.recv_vectored(bufs).map(|(n, _)| n) } } impl Write for Socket { fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) + self.send(buf) + } + + #[cfg(not(target_os = "redox"))] + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { + self.send_vectored(bufs) } fn flush(&mut self) -> io::Result<()> { - self.inner.flush() + Ok(()) } } impl<'a> Write for &'a Socket { fn write(&mut self, buf: &[u8]) -> io::Result { - (&self.inner).write(buf) + self.send(buf) + } + + #[cfg(not(target_os = "redox"))] + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { + self.send_vectored(bufs) } fn flush(&mut self) -> io::Result<()> { - (&self.inner).flush() + Ok(()) } } impl fmt::Debug for Socket { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Socket") + .field("raw", &self.as_raw()) + .field("local_addr", &self.local_addr().ok()) + .field("peer_addr", &self.peer_addr().ok()) + .finish() } } -impl From for Socket { - fn from(socket: net::TcpStream) -> Socket { - Socket { - inner: socket.into(), - } - } -} - -impl From for Socket { - fn from(socket: net::TcpListener) -> Socket { - Socket { - inner: socket.into(), - } - } -} - -impl From for Socket { - fn from(socket: net::UdpSocket) -> Socket { - Socket { - inner: socket.into(), - } - } -} - -#[cfg(all(unix, feature = "unix"))] -impl From for Socket { - fn from(socket: UnixStream) -> Socket { - Socket { - inner: socket.into(), - } - } -} - -#[cfg(all(unix, feature = "unix"))] -impl From for Socket { - fn from(socket: UnixListener) -> Socket { - Socket { - inner: socket.into(), - } - } -} - -#[cfg(all(unix, feature = "unix"))] -impl From for Socket { - fn from(socket: UnixDatagram) -> Socket { - Socket { - inner: socket.into(), - } - } -} - -impl From for net::TcpStream { - fn from(socket: Socket) -> net::TcpStream { - socket.inner.into() - } -} - -impl From for net::TcpListener { - fn from(socket: Socket) -> net::TcpListener { - socket.inner.into() - } -} - -impl From for net::UdpSocket { - fn from(socket: Socket) -> net::UdpSocket { - socket.inner.into() - } -} - -#[cfg(all(unix, feature = "unix"))] -impl From for UnixStream { - fn from(socket: Socket) -> UnixStream { - socket.inner.into() - } -} - -#[cfg(all(unix, feature = "unix"))] -impl From for UnixListener { - fn from(socket: Socket) -> UnixListener { - socket.inner.into() - } -} - -#[cfg(all(unix, feature = "unix"))] -impl From for UnixDatagram { - fn from(socket: Socket) -> UnixDatagram { - socket.inner.into() - } -} - -#[cfg(test)] -mod test { - use std::net::SocketAddr; - - use super::*; - - #[test] - fn connect_timeout_unrouteable() { - // this IP is unroutable, so connections should always time out - let addr = "10.255.255.1:80".parse::().unwrap().into(); - - let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - match socket.connect_timeout(&addr, Duration::from_millis(250)) { - Ok(_) => panic!("unexpected success"), - Err(ref e) if e.kind() == io::ErrorKind::TimedOut => {} - Err(e) => panic!("unexpected error {}", e), - } - } - - #[test] - fn connect_timeout_unbound() { - // bind and drop a socket to track down a "probably unassigned" port - let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - let addr = "127.0.0.1:0".parse::().unwrap().into(); - socket.bind(&addr).unwrap(); - let addr = socket.local_addr().unwrap(); - drop(socket); - - let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - match socket.connect_timeout(&addr, Duration::from_millis(250)) { - Ok(_) => panic!("unexpected success"), - Err(ref e) - if e.kind() == io::ErrorKind::ConnectionRefused - || e.kind() == io::ErrorKind::TimedOut => {} - Err(e) => panic!("unexpected error {}", e), - } - } - - #[test] - fn connect_timeout_valid() { - let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - socket - .bind(&"127.0.0.1:0".parse::().unwrap().into()) - .unwrap(); - socket.listen(128).unwrap(); - - let addr = socket.local_addr().unwrap(); - - let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - socket - .connect_timeout(&addr, Duration::from_millis(250)) - .unwrap(); - } - - #[test] - #[cfg(all(unix, feature = "pair", feature = "unix"))] - fn pair() { - let (mut a, mut b) = Socket::pair(Domain::unix(), Type::stream(), None).unwrap(); - a.write_all(b"hello world").unwrap(); - let mut buf = [0; 11]; - b.read_exact(&mut buf).unwrap(); - assert_eq!(buf, &b"hello world"[..]); - } - - #[test] - #[cfg(all(unix, feature = "unix"))] - fn unix() { - use tempdir::TempDir; - - let dir = TempDir::new("unix").unwrap(); - let addr = SockAddr::unix(dir.path().join("sock")).unwrap(); - - let listener = Socket::new(Domain::unix(), Type::stream(), None).unwrap(); - listener.bind(&addr).unwrap(); - listener.listen(10).unwrap(); - - let mut a = Socket::new(Domain::unix(), Type::stream(), None).unwrap(); - a.connect(&addr).unwrap(); - - let mut b = listener.accept().unwrap().0; - - a.write_all(b"hello world").unwrap(); - let mut buf = [0; 11]; - b.read_exact(&mut buf).unwrap(); - assert_eq!(buf, &b"hello world"[..]); - } - - #[test] - fn keepalive() { - let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - socket.set_keepalive(Some(Duration::from_secs(7))).unwrap(); - // socket.keepalive() doesn't work on Windows #24 - #[cfg(unix)] - assert_eq!(socket.keepalive().unwrap(), Some(Duration::from_secs(7))); - socket.set_keepalive(None).unwrap(); - #[cfg(unix)] - assert_eq!(socket.keepalive().unwrap(), None); - } - - #[test] - fn nodelay() { - let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - - assert!(socket.set_nodelay(true).is_ok()); - - let result = socket.nodelay(); - - assert!(result.is_ok()); - assert!(result.unwrap()); - } - - #[test] - fn out_of_band_inline() { - let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - - assert_eq!(socket.out_of_band_inline().unwrap(), false); - - socket.set_out_of_band_inline(true).unwrap(); - assert_eq!(socket.out_of_band_inline().unwrap(), true); - } - - #[test] - #[cfg(any(target_os = "windows", target_os = "linux"))] - fn out_of_band_send_recv() { - let s1 = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - s1.bind(&"127.0.0.1:0".parse::().unwrap().into()) - .unwrap(); - let s1_addr = s1.local_addr().unwrap(); - s1.listen(1).unwrap(); - - let s2 = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - s2.connect(&s1_addr).unwrap(); - - let (s3, _) = s1.accept().unwrap(); - - let mut buf = [0; 10]; - // send some plain inband data - s2.send(&mut buf).unwrap(); - // send a single out of band byte - assert_eq!(s2.send_out_of_band(&mut [b"!"[0]]).unwrap(), 1); - // recv the OOB data first - assert_eq!(s3.recv_out_of_band(&mut buf).unwrap(), 1); - assert_eq!(buf[0], b"!"[0]); - assert_eq!(s3.recv(&mut buf).unwrap(), 10); - } - - #[test] - fn tcp() { - let s1 = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - s1.bind(&"127.0.0.1:0".parse::().unwrap().into()) - .unwrap(); - let s1_addr = s1.local_addr().unwrap(); - s1.listen(1).unwrap(); - - let s2 = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); - s2.connect(&s1_addr).unwrap(); - - let (s3, _) = s1.accept().unwrap(); - - let mut buf = [0; 11]; - assert_eq!(s2.send(&mut buf).unwrap(), 11); - assert_eq!(s3.recv(&mut buf).unwrap(), 11); - } -} +from!(net::TcpStream, Socket); +from!(net::TcpListener, Socket); +from!(net::UdpSocket, Socket); +from!(Socket, net::TcpStream); +from!(Socket, net::TcpListener); +from!(Socket, net::UdpSocket); diff --git a/third_party/rust/socket2/src/sockref.rs b/third_party/rust/socket2/src/sockref.rs new file mode 100644 index 000000000000..257323beb356 --- /dev/null +++ b/third_party/rust/socket2/src/sockref.rs @@ -0,0 +1,147 @@ +use std::fmt; +use std::marker::PhantomData; +use std::mem::ManuallyDrop; +use std::ops::Deref; +#[cfg(unix)] +use std::os::unix::io::{AsRawFd, FromRawFd}; +#[cfg(windows)] +use std::os::windows::io::{AsRawSocket, FromRawSocket}; + +use crate::Socket; + +/// A reference to a [`Socket`] that can be used to configure socket types other +/// than the `Socket` type itself. +/// +/// This allows for example a [`TcpStream`], found in the standard library, to +/// be configured using all the additional methods found in the [`Socket`] API. +/// +/// `SockRef` can be created from any socket type that implements [`AsRawFd`] +/// (Unix) or [`AsRawSocket`] (Windows) using the [`From`] implementation, but +/// the caller must ensure the file descriptor/socket is a valid. +/// +/// [`TcpStream`]: std::net::TcpStream +// Don't use intra-doc links because they won't build on every platform. +/// [`AsRawFd`]: https://doc.rust-lang.org/stable/std/os/unix/io/trait.AsRawFd.html +/// [`AsRawSocket`]: https://doc.rust-lang.org/stable/std/os/windows/io/trait.AsRawSocket.html +/// +/// # Examples +/// +/// Below is an example of converting a [`TcpStream`] into a [`SockRef`]. +/// +/// ``` +/// use std::net::{TcpStream, SocketAddr}; +/// +/// use socket2::SockRef; +/// +/// # fn main() -> Result<(), Box> { +/// // Create `TcpStream` from the standard library. +/// let address: SocketAddr = "127.0.0.1:1234".parse()?; +/// # let b1 = std::sync::Arc::new(std::sync::Barrier::new(2)); +/// # let b2 = b1.clone(); +/// # let handle = std::thread::spawn(move || { +/// # let listener = std::net::TcpListener::bind(address).unwrap(); +/// # b2.wait(); +/// # let (stream, _) = listener.accept().unwrap(); +/// # std::thread::sleep(std::time::Duration::from_millis(10)); +/// # drop(stream); +/// # }); +/// # b1.wait(); +/// let stream = TcpStream::connect(address)?; +/// +/// // Create a `SockRef`erence to the stream. +/// let socket_ref = SockRef::from(&stream); +/// // Use `Socket::set_nodelay` on the stream. +/// socket_ref.set_nodelay(true)?; +/// drop(socket_ref); +/// +/// assert_eq!(stream.nodelay()?, true); +/// # handle.join().unwrap(); +/// # Ok(()) +/// # } +/// ``` +/// +/// Below is an example of **incorrect usage** of `SockRef::from`, which is +/// currently possible (but not intended and will be fixed in future versions). +/// +/// ```compile_fail +/// use socket2::SockRef; +/// +/// # fn main() -> Result<(), Box> { +/// /// THIS USAGE IS NOT VALID! +/// let socket_ref = SockRef::from(&123); +/// // The above line is overseen possibility when using `SockRef::from`, it +/// // uses the `RawFd` (on Unix), which is a type alias for `c_int`/`i32`, +/// // which implements `AsRawFd`. However it may be clear that this usage is +/// // invalid as it doesn't guarantee that `123` is a valid file descriptor. +/// +/// // Using `Socket::set_nodelay` now will call it on a file descriptor we +/// // don't own! We don't even not if the file descriptor is valid or a socket. +/// socket_ref.set_nodelay(true)?; +/// drop(socket_ref); +/// # Ok(()) +/// # } +/// # DO_NOT_COMPILE +/// ``` +pub struct SockRef<'s> { + /// Because this is a reference we don't own the `Socket`, however `Socket` + /// closes itself when dropped, so we use `ManuallyDrop` to prevent it from + /// closing itself. + socket: ManuallyDrop, + /// Because we don't own the socket we need to ensure the socket remains + /// open while we have a "reference" to it, the lifetime `'s` ensures this. + _lifetime: PhantomData<&'s Socket>, +} + +impl<'s> Deref for SockRef<'s> { + type Target = Socket; + + fn deref(&self) -> &Self::Target { + &self.socket + } +} + +/// On Windows, a corresponding `From<&impl AsRawSocket>` implementation exists. +#[cfg(unix)] +#[cfg_attr(docsrs, doc(cfg(unix)))] +impl<'s, S> From<&'s S> for SockRef<'s> +where + S: AsRawFd, +{ + /// The caller must ensure `S` is actually a socket. + fn from(socket: &'s S) -> Self { + let fd = socket.as_raw_fd(); + assert!(fd >= 0); + SockRef { + socket: ManuallyDrop::new(unsafe { Socket::from_raw_fd(fd) }), + _lifetime: PhantomData, + } + } +} + +/// On Unix, a corresponding `From<&impl AsRawFd>` implementation exists. +#[cfg(windows)] +#[cfg_attr(docsrs, doc(cfg(windows)))] +impl<'s, S> From<&'s S> for SockRef<'s> +where + S: AsRawSocket, +{ + /// See the `From<&impl AsRawFd>` implementation. + fn from(socket: &'s S) -> Self { + let socket = socket.as_raw_socket(); + assert!(socket != winapi::um::winsock2::INVALID_SOCKET as _); + SockRef { + socket: ManuallyDrop::new(unsafe { Socket::from_raw_socket(socket) }), + _lifetime: PhantomData, + } + } +} + +impl fmt::Debug for SockRef<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SockRef") + .field("raw", &self.socket.as_raw()) + .field("local_addr", &self.socket.local_addr().ok()) + .field("peer_addr", &self.socket.peer_addr().ok()) + .finish() + } +} diff --git a/third_party/rust/socket2/src/sys/unix.rs b/third_party/rust/socket2/src/sys/unix.rs index 87794002e27a..873b3238a472 100644 --- a/third_party/rust/socket2/src/sys/unix.rs +++ b/third_party/rust/socket2/src/sys/unix.rs @@ -1,6 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. +// Copyright 2015 The Rust Project Developers. // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -8,86 +6,229 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -#[cfg(target_os = "linux")] -use std::ffi::{CStr, CString}; -use std::fmt; -use std::io; -use std::io::{ErrorKind, Read, Write}; -use std::mem; -#[cfg(target_os = "linux")] -use std::mem::MaybeUninit; +use std::cmp::min; +#[cfg(not(target_os = "redox"))] +use std::io::IoSlice; +use std::marker::PhantomData; +use std::mem::{self, size_of, MaybeUninit}; use std::net::Shutdown; -use std::net::{self, Ipv4Addr, Ipv6Addr}; -use std::ops::Neg; -#[cfg(feature = "unix")] +use std::net::{Ipv4Addr, Ipv6Addr}; +#[cfg(all(feature = "all", target_vendor = "apple"))] +use std::num::NonZeroU32; +#[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "freebsd", + target_os = "linux", + target_vendor = "apple", + ) +))] +use std::num::NonZeroUsize; +#[cfg(feature = "all")] +use std::os::unix::ffi::OsStrExt; +#[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "freebsd", + target_os = "linux", + target_vendor = "apple", + ) +))] +use std::os::unix::io::RawFd; +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}; +#[cfg(feature = "all")] use std::os::unix::net::{UnixDatagram, UnixListener, UnixStream}; -use std::os::unix::prelude::*; -#[cfg(target_os = "linux")] +#[cfg(feature = "all")] +use std::path::Path; +#[cfg(not(all(target_os = "redox", not(feature = "all"))))] use std::ptr; -#[cfg(target_os = "linux")] -use std::slice; -use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Duration, Instant}; +use std::{io, slice}; -use libc::{self, c_void, socklen_t, ssize_t}; +#[cfg(not(target_vendor = "apple"))] +use libc::ssize_t; +use libc::{c_void, in6_addr, in_addr}; -use crate::{Domain, Type}; +#[cfg(not(target_os = "redox"))] +use crate::RecvFlags; +use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type}; -pub use libc::c_int; +pub(crate) use libc::c_int; // Used in `Domain`. pub(crate) use libc::{AF_INET, AF_INET6}; // Used in `Type`. -#[cfg(not(target_os = "redox"))] +#[cfg(all(feature = "all", not(target_os = "redox")))] pub(crate) use libc::SOCK_RAW; -pub(crate) use libc::{SOCK_DGRAM, SOCK_SEQPACKET, SOCK_STREAM}; +#[cfg(feature = "all")] +pub(crate) use libc::SOCK_SEQPACKET; +pub(crate) use libc::{SOCK_DGRAM, SOCK_STREAM}; // Used in `Protocol`. pub(crate) use libc::{IPPROTO_ICMP, IPPROTO_ICMPV6, IPPROTO_TCP, IPPROTO_UDP}; +// Used in `SockAddr`. +pub(crate) use libc::{ + sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t, +}; +// Used in `RecvFlags`. +#[cfg(not(target_os = "redox"))] +pub(crate) use libc::{MSG_TRUNC, SO_OOBINLINE}; +// Used in `Socket`. +#[cfg(all(feature = "all", not(target_os = "redox")))] +pub(crate) use libc::IP_HDRINCL; +#[cfg(not(any( + target_os = "fuschia", + target_os = "redox", + target_os = "solaris", + target_os = "illumos", +)))] +pub(crate) use libc::IP_TOS; +#[cfg(not(target_vendor = "apple"))] +pub(crate) use libc::SO_LINGER; +#[cfg(target_vendor = "apple")] +pub(crate) use libc::SO_LINGER_SEC as SO_LINGER; +pub(crate) use libc::{ + ip_mreq as IpMreq, ipv6_mreq as Ipv6Mreq, linger, IPPROTO_IP, IPPROTO_IPV6, + IPV6_MULTICAST_HOPS, IPV6_MULTICAST_IF, IPV6_MULTICAST_LOOP, IPV6_UNICAST_HOPS, IPV6_V6ONLY, + IP_ADD_MEMBERSHIP, IP_DROP_MEMBERSHIP, IP_MULTICAST_IF, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, + IP_TTL, MSG_OOB, MSG_PEEK, SOL_SOCKET, SO_BROADCAST, SO_ERROR, SO_KEEPALIVE, SO_RCVBUF, + SO_RCVTIMEO, SO_REUSEADDR, SO_SNDBUF, SO_SNDTIMEO, SO_TYPE, TCP_NODELAY, +}; +#[cfg(not(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "haiku", + target_os = "illumos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "solaris", + target_vendor = "apple" +)))] +pub(crate) use libc::{IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP}; +#[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "haiku", + target_os = "illumos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "solaris", + target_vendor = "apple", +))] +pub(crate) use libc::{ + IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP, IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP, +}; +#[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ) +))] +pub(crate) use libc::{TCP_KEEPCNT, TCP_KEEPINTVL}; -cfg_if::cfg_if! { - if #[cfg(any(target_os = "dragonfly", target_os = "freebsd", - target_os = "ios", target_os = "macos", - target_os = "openbsd", target_os = "netbsd", - target_os = "solaris", target_os = "illumos", - target_os = "haiku"))] { - use libc::IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP; - use libc::IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP; - } else { - use libc::IPV6_ADD_MEMBERSHIP; - use libc::IPV6_DROP_MEMBERSHIP; - } +// See this type in the Windows file. +pub(crate) type Bool = c_int; + +#[cfg(target_vendor = "apple")] +use libc::TCP_KEEPALIVE as KEEPALIVE_TIME; +#[cfg(not(any(target_vendor = "apple", target_os = "haiku", target_os = "openbsd")))] +use libc::TCP_KEEPIDLE as KEEPALIVE_TIME; + +/// Helper macro to execute a system call that returns an `io::Result`. +macro_rules! syscall { + ($fn: ident ( $($arg: expr),* $(,)* ) ) => {{ + #[allow(unused_unsafe)] + let res = unsafe { libc::$fn($($arg, )*) }; + if res == -1 { + Err(std::io::Error::last_os_error()) + } else { + Ok(res) + } + }}; } -cfg_if::cfg_if! { - if #[cfg(any(target_os = "macos", target_os = "ios"))] { - use libc::TCP_KEEPALIVE as KEEPALIVE_OPTION; - } else if #[cfg(any(target_os = "openbsd", target_os = "netbsd", target_os = "haiku"))] { - use libc::SO_KEEPALIVE as KEEPALIVE_OPTION; - } else { - use libc::TCP_KEEPIDLE as KEEPALIVE_OPTION; - } -} +/// Maximum size of a buffer passed to system call like `recv` and `send`. +#[cfg(not(target_vendor = "apple"))] +const MAX_BUF_LEN: usize = ::max_value() as usize; -use crate::utils::One; -use crate::SockAddr; +// The maximum read limit on most posix-like systems is `SSIZE_MAX`, with the +// man page quoting that if the count of bytes to read is greater than +// `SSIZE_MAX` the result is "unspecified". +// +// On macOS, however, apparently the 64-bit libc is either buggy or +// intentionally showing odd behavior by rejecting any read with a size larger +// than or equal to INT_MAX. To handle both of these the read size is capped on +// both platforms. +#[cfg(target_vendor = "apple")] +const MAX_BUF_LEN: usize = ::max_value() as usize - 1; + +#[cfg(any( + all( + target_os = "linux", + any( + target_env = "gnu", + all(target_env = "uclibc", target_pointer_width = "64") + ) + ), + target_os = "android", +))] +type IovLen = usize; + +#[cfg(any( + all( + target_os = "linux", + any( + target_env = "musl", + all(target_env = "uclibc", target_pointer_width = "32") + ) + ), + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "haiku", + target_os = "illumos", + target_os = "netbsd", + target_os = "openbsd", + target_os = "solaris", + target_vendor = "apple", +))] +type IovLen = c_int; /// Unix only API. impl Domain { /// Domain for Unix socket communication, corresponding to `AF_UNIX`. - pub fn unix() -> Domain { - Domain(libc::AF_UNIX) - } + #[cfg_attr(docsrs, doc(cfg(unix)))] + pub const UNIX: Domain = Domain(libc::AF_UNIX); /// Domain for low-level packet interface, corresponding to `AF_PACKET`. - /// - /// # Notes - /// - /// This function is only available on Linux. - #[cfg(target_os = "linux")] - pub fn packet() -> Domain { - Domain(libc::AF_PACKET) - } + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub const PACKET: Domain = Domain(libc::AF_PACKET); + + /// Domain for low-level VSOCK interface, corresponding to `AF_VSOCK`. + #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] + #[cfg_attr( + docsrs, + doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) + )] + pub const VSOCK: Domain = Domain(libc::AF_VSOCK); } impl_debug!( @@ -95,55 +236,110 @@ impl_debug!( libc::AF_INET, libc::AF_INET6, libc::AF_UNIX, + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + #[cfg_attr( + docsrs, + doc(cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))) + )] + libc::AF_PACKET, + #[cfg(any(target_os = "android", target_os = "linux"))] + #[cfg_attr(docsrs, doc(cfg(any(target_os = "android", target_os = "linux"))))] + libc::AF_VSOCK, libc::AF_UNSPEC, // = 0. ); /// Unix only API. impl Type { /// Set `SOCK_NONBLOCK` on the `Type`. - /// - /// # Notes - /// - /// This function is only available on Android, DragonFlyBSD, FreeBSD, - /// Linux, NetBSD and OpenBSD. - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" + #[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + ) ))] - pub fn non_blocking(self) -> Type { + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + ) + ))) + )] + pub const fn nonblocking(self) -> Type { Type(self.0 | libc::SOCK_NONBLOCK) } /// Set `SOCK_CLOEXEC` on the `Type`. - /// - /// # Notes - /// - /// This function is only available on Android, DragonFlyBSD, FreeBSD, - /// Linux, NetBSD and OpenBSD. + #[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + ) + ))) + )] + pub const fn cloexec(self) -> Type { + self._cloexec() + } + #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", target_os = "linux", target_os = "netbsd", target_os = "openbsd" ))] - pub fn cloexec(self) -> Type { + pub(crate) const fn _cloexec(self) -> Type { Type(self.0 | libc::SOCK_CLOEXEC) } } impl_debug!( - crate::Type, + Type, libc::SOCK_STREAM, libc::SOCK_DGRAM, #[cfg(not(target_os = "redox"))] libc::SOCK_RAW, - #[cfg(not(any(target_os = "haiku", target_os = "redox")))] + #[cfg(not(any(target_os = "redox", target_os = "haiku")))] libc::SOCK_RDM, libc::SOCK_SEQPACKET, /* TODO: add these optional bit OR-ed flags: @@ -151,6 +347,7 @@ impl_debug!( target_os = "android", target_os = "dragonfly", target_os = "freebsd", + target_os = "fuchsia", target_os = "linux", target_os = "netbsd", target_os = "openbsd" @@ -160,6 +357,7 @@ impl_debug!( target_os = "android", target_os = "dragonfly", target_os = "freebsd", + target_os = "fuchsia", target_os = "linux", target_os = "netbsd", target_os = "openbsd" @@ -169,1149 +367,1687 @@ impl_debug!( ); impl_debug!( - crate::Protocol, + Protocol, libc::IPPROTO_ICMP, libc::IPPROTO_ICMPV6, libc::IPPROTO_TCP, libc::IPPROTO_UDP, ); -pub struct Socket { - fd: c_int, +/// Unix-only API. +#[cfg(not(target_os = "redox"))] +impl RecvFlags { + /// Check if the message terminates a record. + /// + /// Not all socket types support the notion of records. + /// For socket types that do support it (such as [`SEQPACKET`][Type::SEQPACKET]), + /// a record is terminated by sending a message with the end-of-record flag set. + /// + /// On Unix this corresponds to the MSG_EOR flag. + pub const fn is_end_of_record(self) -> bool { + self.0 & libc::MSG_EOR != 0 + } + + /// Check if the message contains out-of-band data. + /// + /// This is useful for protocols where you receive out-of-band data + /// mixed in with the normal data stream. + /// + /// On Unix this corresponds to the MSG_OOB flag. + pub const fn is_out_of_band(self) -> bool { + self.0 & libc::MSG_OOB != 0 + } } -impl Socket { - pub fn new(family: c_int, ty: c_int, protocol: c_int) -> io::Result { - unsafe { - // On linux we first attempt to pass the SOCK_CLOEXEC flag to - // atomically create the socket and set it as CLOEXEC. Support for - // this option, however, was added in 2.6.27, and we still support - // 2.6.18 as a kernel, so if the returned error is EINVAL we - // fallthrough to the fallback. - #[cfg(target_os = "linux")] - { - match cvt(libc::socket(family, ty | libc::SOCK_CLOEXEC, protocol)) { - Ok(fd) => return Ok(Socket::from_raw_fd(fd)), - Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {} - Err(e) => return Err(e), - } - } - - let fd = cvt(libc::socket(family, ty, protocol))?; - let fd = Socket::from_raw_fd(fd); - set_cloexec(fd.as_raw_fd())?; - #[cfg(any(target_os = "macos", target_os = "ios"))] - { - fd.setsockopt(libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1i32)?; - } - Ok(fd) - } +#[cfg(not(target_os = "redox"))] +impl std::fmt::Debug for RecvFlags { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RecvFlags") + .field("is_end_of_record", &self.is_end_of_record()) + .field("is_out_of_band", &self.is_out_of_band()) + .field("is_truncated", &self.is_truncated()) + .finish() } +} - pub fn pair(family: c_int, ty: c_int, protocol: c_int) -> io::Result<(Socket, Socket)> { - unsafe { - let mut fds = [0, 0]; - cvt(libc::socketpair(family, ty, protocol, fds.as_mut_ptr()))?; - let fds = (Socket::from_raw_fd(fds[0]), Socket::from_raw_fd(fds[1])); - set_cloexec(fds.0.as_raw_fd())?; - set_cloexec(fds.1.as_raw_fd())?; - #[cfg(any(target_os = "macos", target_os = "ios"))] - { - fds.0 - .setsockopt(libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1i32)?; - fds.1 - .setsockopt(libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1i32)?; - } - Ok(fds) - } - } +#[repr(transparent)] +pub struct MaybeUninitSlice<'a> { + vec: libc::iovec, + _lifetime: PhantomData<&'a mut [MaybeUninit]>, +} - pub fn bind(&self, addr: &SockAddr) -> io::Result<()> { - unsafe { cvt(libc::bind(self.fd, addr.as_ptr(), addr.len() as _)).map(|_| ()) } - } +unsafe impl<'a> Send for MaybeUninitSlice<'a> {} - pub fn listen(&self, backlog: i32) -> io::Result<()> { - unsafe { cvt(libc::listen(self.fd, backlog)).map(|_| ()) } - } +unsafe impl<'a> Sync for MaybeUninitSlice<'a> {} - pub fn connect(&self, addr: &SockAddr) -> io::Result<()> { - unsafe { cvt(libc::connect(self.fd, addr.as_ptr(), addr.len())).map(|_| ()) } - } - - pub fn connect_timeout(&self, addr: &SockAddr, timeout: Duration) -> io::Result<()> { - self.set_nonblocking(true)?; - let r = self.connect(addr); - self.set_nonblocking(false)?; - - match r { - Ok(()) => return Ok(()), - // there's no io::ErrorKind conversion registered for EINPROGRESS :( - Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {} - Err(e) => return Err(e), - } - - let mut pollfd = libc::pollfd { - fd: self.fd, - events: libc::POLLOUT, - revents: 0, - }; - - if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "cannot set a 0 duration timeout", - )); - } - - let start = Instant::now(); - - loop { - let elapsed = start.elapsed(); - if elapsed >= timeout { - return Err(io::Error::new( - io::ErrorKind::TimedOut, - "connection timed out", - )); - } - - let timeout = timeout - elapsed; - let mut timeout = timeout - .as_secs() - .saturating_mul(1_000) - .saturating_add(timeout.subsec_nanos() as u64 / 1_000_000); - if timeout == 0 { - timeout = 1; - } - - let timeout = cmp::min(timeout, c_int::max_value() as u64) as c_int; - - match unsafe { libc::poll(&mut pollfd, 1, timeout) } { - -1 => { - let err = io::Error::last_os_error(); - if err.kind() != io::ErrorKind::Interrupted { - return Err(err); - } - } - 0 => { - return Err(io::Error::new( - io::ErrorKind::TimedOut, - "connection timed out", - )) - } - _ => { - // linux returns POLLOUT|POLLERR|POLLHUP for refused connections (!), so look - // for POLLHUP rather than read readiness - if pollfd.revents & libc::POLLHUP != 0 { - let e = self.take_error()?.unwrap_or_else(|| { - io::Error::new(io::ErrorKind::Other, "no error set after POLLHUP") - }); - return Err(e); - } - return Ok(()); - } - } - } - } - - pub fn local_addr(&self) -> io::Result { - unsafe { - let mut storage: libc::sockaddr_storage = mem::zeroed(); - let mut len = mem::size_of_val(&storage) as libc::socklen_t; - cvt(libc::getsockname( - self.fd, - &mut storage as *mut _ as *mut _, - &mut len, - ))?; - Ok(SockAddr::from_raw_parts( - &storage as *const _ as *const _, - len, - )) - } - } - - pub fn peer_addr(&self) -> io::Result { - unsafe { - let mut storage: libc::sockaddr_storage = mem::zeroed(); - let mut len = mem::size_of_val(&storage) as libc::socklen_t; - cvt(libc::getpeername( - self.fd, - &mut storage as *mut _ as *mut _, - &mut len, - ))?; - Ok(SockAddr::from_raw_parts( - &storage as *const _ as *const _, - len, - )) - } - } - - pub fn try_clone(&self) -> io::Result { - // implementation lifted from libstd - #[cfg(any(target_os = "android", target_os = "haiku"))] - use libc::F_DUPFD as F_DUPFD_CLOEXEC; - #[cfg(not(any(target_os = "android", target_os = "haiku")))] - use libc::F_DUPFD_CLOEXEC; - - static CLOEXEC_FAILED: AtomicBool = AtomicBool::new(false); - unsafe { - if !CLOEXEC_FAILED.load(Ordering::Relaxed) { - match cvt(libc::fcntl(self.fd, F_DUPFD_CLOEXEC, 0)) { - Ok(fd) => { - let fd = Socket::from_raw_fd(fd); - if cfg!(target_os = "linux") { - set_cloexec(fd.as_raw_fd())?; - } - return Ok(fd); - } - Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => { - CLOEXEC_FAILED.store(true, Ordering::Relaxed); - } - Err(e) => return Err(e), - } - } - let fd = cvt(libc::fcntl(self.fd, libc::F_DUPFD, 0))?; - let fd = Socket::from_raw_fd(fd); - set_cloexec(fd.as_raw_fd())?; - Ok(fd) - } - } - - #[allow(unused_mut)] - pub fn accept(&self) -> io::Result<(Socket, SockAddr)> { - let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; - let mut len = mem::size_of_val(&storage) as socklen_t; - - let mut socket = None; - #[cfg(target_os = "linux")] - { - let res = cvt_r(|| unsafe { - libc::syscall( - libc::SYS_accept4, - self.fd as libc::c_long, - &mut storage as *mut _ as libc::c_long, - &mut len, - libc::SOCK_CLOEXEC as libc::c_long, - ) as libc::c_int - }); - match res { - Ok(fd) => socket = Some(Socket { fd: fd }), - Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => {} - Err(e) => return Err(e), - } - } - - let socket = match socket { - Some(socket) => socket, - None => unsafe { - let fd = - cvt_r(|| libc::accept(self.fd, &mut storage as *mut _ as *mut _, &mut len))?; - let fd = Socket::from_raw_fd(fd); - set_cloexec(fd.as_raw_fd())?; - fd +impl<'a> MaybeUninitSlice<'a> { + pub(crate) fn new(buf: &'a mut [MaybeUninit]) -> MaybeUninitSlice<'a> { + MaybeUninitSlice { + vec: libc::iovec { + iov_base: buf.as_mut_ptr().cast(), + iov_len: buf.len(), }, - }; - let addr = unsafe { SockAddr::from_raw_parts(&storage as *const _ as *const _, len) }; - Ok((socket, addr)) - } - - pub fn take_error(&self) -> io::Result> { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_ERROR)?; - if raw == 0 { - Ok(None) - } else { - Ok(Some(io::Error::from_raw_os_error(raw as i32))) - } + _lifetime: PhantomData, } } - pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { - unsafe { - let previous = cvt(libc::fcntl(self.fd, libc::F_GETFL))?; - let new = if nonblocking { - previous | libc::O_NONBLOCK - } else { - previous & !libc::O_NONBLOCK - }; - if new != previous { - cvt(libc::fcntl(self.fd, libc::F_SETFL, new))?; - } - Ok(()) - } + pub(crate) fn as_slice(&self) -> &[MaybeUninit] { + unsafe { slice::from_raw_parts(self.vec.iov_base.cast(), self.vec.iov_len) } } - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - let how = match how { - Shutdown::Write => libc::SHUT_WR, - Shutdown::Read => libc::SHUT_RD, - Shutdown::Both => libc::SHUT_RDWR, - }; - cvt(unsafe { libc::shutdown(self.fd, how) })?; - Ok(()) + pub(crate) fn as_mut_slice(&mut self) -> &mut [MaybeUninit] { + unsafe { slice::from_raw_parts_mut(self.vec.iov_base.cast(), self.vec.iov_len) } } +} - pub fn recv(&self, buf: &mut [u8], flags: c_int) -> io::Result { - unsafe { - let n = cvt({ - libc::recv( - self.fd, - buf.as_mut_ptr() as *mut c_void, - cmp::min(buf.len(), max_len()), - flags, - ) - })?; - Ok(n as usize) - } - } - - pub fn peek(&self, buf: &mut [u8]) -> io::Result { - unsafe { - let n = cvt({ - libc::recv( - self.fd, - buf.as_mut_ptr() as *mut c_void, - cmp::min(buf.len(), max_len()), - libc::MSG_PEEK, - ) - })?; - Ok(n as usize) - } - } - - pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { - self.recv_from(buf, libc::MSG_PEEK) - } - - pub fn recv_from(&self, buf: &mut [u8], flags: c_int) -> io::Result<(usize, SockAddr)> { - unsafe { - let mut storage: libc::sockaddr_storage = mem::zeroed(); - let mut addrlen = mem::size_of_val(&storage) as socklen_t; - - let n = cvt({ - libc::recvfrom( - self.fd, - buf.as_mut_ptr() as *mut c_void, - cmp::min(buf.len(), max_len()), - flags, - &mut storage as *mut _ as *mut _, - &mut addrlen, - ) - })?; - let addr = SockAddr::from_raw_parts(&storage as *const _ as *const _, addrlen); - Ok((n as usize, addr)) - } - } - - pub fn send(&self, buf: &[u8], flags: c_int) -> io::Result { - unsafe { - let n = cvt({ - libc::send( - self.fd, - buf.as_ptr() as *const c_void, - cmp::min(buf.len(), max_len()), - flags, - ) - })?; - Ok(n as usize) - } - } - - pub fn send_to(&self, buf: &[u8], flags: c_int, addr: &SockAddr) -> io::Result { - unsafe { - let n = cvt({ - libc::sendto( - self.fd, - buf.as_ptr() as *const c_void, - cmp::min(buf.len(), max_len()), - flags, - addr.as_ptr(), - addr.len(), - ) - })?; - Ok(n as usize) - } - } - - // ================================================ - - pub fn ttl(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_IP, libc::IP_TTL)?; - Ok(raw as u32) - } - } - - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - unsafe { self.setsockopt(libc::IPPROTO_IP, libc::IP_TTL, ttl as c_int) } - } - - #[cfg(not(target_os = "redox"))] - pub fn mss(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_TCP, libc::TCP_MAXSEG)?; - Ok(raw as u32) - } - } - - #[cfg(not(target_os = "redox"))] - pub fn set_mss(&self, mss: u32) -> io::Result<()> { - unsafe { self.setsockopt(libc::IPPROTO_TCP, libc::TCP_MAXSEG, mss as c_int) } - } - - #[cfg(target_os = "linux")] - pub fn mark(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_MARK)?; - Ok(raw as u32) - } - } - - #[cfg(target_os = "linux")] - pub fn set_mark(&self, mark: u32) -> io::Result<()> { - unsafe { self.setsockopt(libc::SOL_SOCKET, libc::SO_MARK, mark as c_int) } - } - - #[cfg(target_os = "linux")] - pub fn device(&self) -> io::Result> { - // TODO: replace with `MaybeUninit::uninit_array` once stable. - let mut buf: [MaybeUninit; libc::IFNAMSIZ] = - unsafe { MaybeUninit::<[MaybeUninit; libc::IFNAMSIZ]>::uninit().assume_init() }; - let mut len = buf.len() as libc::socklen_t; - let len = unsafe { - cvt(libc::getsockopt( - self.fd, - libc::SOL_SOCKET, - libc::SO_BINDTODEVICE, - buf.as_mut_ptr().cast(), - &mut len, - ))? - }; - if len == 0 { - Ok(None) - } else { - // Allocate a buffer for `CString` with the length including the - // null terminator. - let len = len as usize; - let mut name = Vec::with_capacity(len); - - // TODO: use `MaybeUninit::slice_assume_init_ref` once stable. - // Safety: `len` bytes are writen by the OS, this includes a null - // terminator. However we don't copy the null terminator because - // `CString::from_vec_unchecked` adds its own null terminator. - let buf = unsafe { slice::from_raw_parts(buf.as_ptr().cast(), len - 1) }; - name.extend_from_slice(buf); - - // Safety: the OS initialised the string for us, which shouldn't - // include any null bytes. - Ok(Some(unsafe { CString::from_vec_unchecked(name) })) - } - } - - #[cfg(target_os = "linux")] - pub fn bind_device(&self, interface: Option<&CStr>) -> io::Result<()> { - let (value, len) = if let Some(interface) = interface { - (interface.as_ptr(), interface.to_bytes_with_nul().len()) - } else { - (ptr::null(), 0) - }; - - unsafe { - cvt(libc::setsockopt( - self.fd, - libc::SOL_SOCKET, - libc::SO_BINDTODEVICE, - value.cast(), - len as libc::socklen_t, - )) - .map(|_| ()) - } - } - - pub fn unicast_hops_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_IPV6, libc::IPV6_UNICAST_HOPS)?; - Ok(raw as u32) - } - } - - pub fn set_unicast_hops_v6(&self, hops: u32) -> io::Result<()> { - unsafe { - self.setsockopt( - libc::IPPROTO_IPV6 as c_int, - libc::IPV6_UNICAST_HOPS, - hops as c_int, - ) - } - } - - pub fn only_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_IPV6, libc::IPV6_V6ONLY)?; - Ok(raw != 0) - } - } - - pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { - unsafe { self.setsockopt(libc::IPPROTO_IPV6, libc::IPV6_V6ONLY, only_v6 as c_int) } - } - - pub fn read_timeout(&self) -> io::Result> { - unsafe { - Ok(timeval2dur( - self.getsockopt(libc::SOL_SOCKET, libc::SO_RCVTIMEO)?, - )) - } - } - - pub fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - unsafe { self.setsockopt(libc::SOL_SOCKET, libc::SO_RCVTIMEO, dur2timeval(dur)?) } - } - - pub fn write_timeout(&self) -> io::Result> { - unsafe { - Ok(timeval2dur( - self.getsockopt(libc::SOL_SOCKET, libc::SO_SNDTIMEO)?, - )) - } - } - - pub fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - unsafe { self.setsockopt(libc::SOL_SOCKET, libc::SO_SNDTIMEO, dur2timeval(dur)?) } - } - - pub fn nodelay(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_TCP, libc::TCP_NODELAY)?; - Ok(raw != 0) - } - } - - pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { - unsafe { self.setsockopt(libc::IPPROTO_TCP, libc::TCP_NODELAY, nodelay as c_int) } - } - - pub fn broadcast(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_BROADCAST)?; - Ok(raw != 0) - } - } - - pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> { - unsafe { self.setsockopt(libc::SOL_SOCKET, libc::SO_BROADCAST, broadcast as c_int) } - } - - pub fn multicast_loop_v4(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_IP, libc::IP_MULTICAST_LOOP)?; - Ok(raw != 0) - } - } - - pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> { - unsafe { - self.setsockopt( - libc::IPPROTO_IP, - libc::IP_MULTICAST_LOOP, - multicast_loop_v4 as c_int, - ) - } - } - - pub fn multicast_ttl_v4(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_IP, libc::IP_MULTICAST_TTL)?; - Ok(raw as u32) - } - } - - pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> { - unsafe { - self.setsockopt( - libc::IPPROTO_IP, - libc::IP_MULTICAST_TTL, - multicast_ttl_v4 as c_int, - ) - } - } - - pub fn multicast_hops_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_IPV6, libc::IPV6_MULTICAST_HOPS)?; - Ok(raw as u32) - } - } - - pub fn set_multicast_hops_v6(&self, hops: u32) -> io::Result<()> { - unsafe { self.setsockopt(libc::IPPROTO_IPV6, libc::IPV6_MULTICAST_HOPS, hops as c_int) } - } - - pub fn multicast_if_v4(&self) -> io::Result { - unsafe { - let imr_interface: libc::in_addr = - self.getsockopt(libc::IPPROTO_IP, libc::IP_MULTICAST_IF)?; - Ok(from_s_addr(imr_interface.s_addr)) - } - } - - pub fn set_multicast_if_v4(&self, interface: &Ipv4Addr) -> io::Result<()> { - let interface = to_s_addr(interface); - let imr_interface = libc::in_addr { s_addr: interface }; - - unsafe { self.setsockopt(libc::IPPROTO_IP, libc::IP_MULTICAST_IF, imr_interface) } - } - - pub fn multicast_if_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_IPV6, libc::IPV6_MULTICAST_IF)?; - Ok(raw as u32) - } - } - - pub fn set_multicast_if_v6(&self, interface: u32) -> io::Result<()> { - unsafe { - self.setsockopt( - libc::IPPROTO_IPV6, - libc::IPV6_MULTICAST_IF, - interface as c_int, - ) - } - } - - pub fn multicast_loop_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::IPPROTO_IPV6, libc::IPV6_MULTICAST_LOOP)?; - Ok(raw != 0) - } - } - - pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> { - unsafe { - self.setsockopt( - libc::IPPROTO_IPV6, - libc::IPV6_MULTICAST_LOOP, - multicast_loop_v6 as c_int, - ) - } - } - - pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - let multiaddr = to_s_addr(multiaddr); - let interface = to_s_addr(interface); - let mreq = libc::ip_mreq { - imr_multiaddr: libc::in_addr { s_addr: multiaddr }, - imr_interface: libc::in_addr { s_addr: interface }, - }; - unsafe { self.setsockopt(libc::IPPROTO_IP, libc::IP_ADD_MEMBERSHIP, mreq) } - } - - pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - let multiaddr = to_in6_addr(multiaddr); - let mreq = libc::ipv6_mreq { - ipv6mr_multiaddr: multiaddr, - ipv6mr_interface: to_ipv6mr_interface(interface), - }; - unsafe { self.setsockopt(libc::IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, mreq) } - } - - pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - let multiaddr = to_s_addr(multiaddr); - let interface = to_s_addr(interface); - let mreq = libc::ip_mreq { - imr_multiaddr: libc::in_addr { s_addr: multiaddr }, - imr_interface: libc::in_addr { s_addr: interface }, - }; - unsafe { self.setsockopt(libc::IPPROTO_IP, libc::IP_DROP_MEMBERSHIP, mreq) } - } - - pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - let multiaddr = to_in6_addr(multiaddr); - let mreq = libc::ipv6_mreq { - ipv6mr_multiaddr: multiaddr, - ipv6mr_interface: to_ipv6mr_interface(interface), - }; - unsafe { self.setsockopt(libc::IPPROTO_IPV6, IPV6_DROP_MEMBERSHIP, mreq) } - } - - pub fn linger(&self) -> io::Result> { - unsafe { - Ok(linger2dur( - self.getsockopt(libc::SOL_SOCKET, libc::SO_LINGER)?, - )) - } - } - - pub fn set_linger(&self, dur: Option) -> io::Result<()> { - unsafe { self.setsockopt(libc::SOL_SOCKET, libc::SO_LINGER, dur2linger(dur)) } - } - - pub fn set_reuse_address(&self, reuse: bool) -> io::Result<()> { - unsafe { self.setsockopt(libc::SOL_SOCKET, libc::SO_REUSEADDR, reuse as c_int) } - } - - pub fn reuse_address(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_REUSEADDR)?; - Ok(raw != 0) - } - } - - pub fn recv_buffer_size(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_RCVBUF)?; - Ok(raw as usize) - } - } - - pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { - unsafe { - // TODO: casting usize to a c_int should be a checked cast - self.setsockopt(libc::SOL_SOCKET, libc::SO_RCVBUF, size as c_int) - } - } - - pub fn send_buffer_size(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_SNDBUF)?; - Ok(raw as usize) - } - } - - pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { - unsafe { - // TODO: casting usize to a c_int should be a checked cast - self.setsockopt(libc::SOL_SOCKET, libc::SO_SNDBUF, size as c_int) - } - } - - pub fn keepalive(&self) -> io::Result> { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_KEEPALIVE)?; - if raw == 0 { - return Ok(None); - } - let secs: c_int = self.getsockopt(libc::IPPROTO_TCP, KEEPALIVE_OPTION)?; - Ok(Some(Duration::new(secs as u64, 0))) - } - } - - pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { - unsafe { - self.setsockopt( - libc::SOL_SOCKET, - libc::SO_KEEPALIVE, - keepalive.is_some() as c_int, - )?; - if let Some(dur) = keepalive { - // TODO: checked cast here - self.setsockopt(libc::IPPROTO_TCP, KEEPALIVE_OPTION, dur.as_secs() as c_int)?; - } - Ok(()) - } - } - - #[cfg(all( - unix, - not(any(target_os = "solaris", target_os = "illumos")), - feature = "reuseport" - ))] - pub fn reuse_port(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_REUSEPORT)?; - Ok(raw != 0) - } - } - - #[cfg(all( - unix, - not(any(target_os = "solaris", target_os = "illumos")), - feature = "reuseport" - ))] - pub fn set_reuse_port(&self, reuse: bool) -> io::Result<()> { - unsafe { self.setsockopt(libc::SOL_SOCKET, libc::SO_REUSEPORT, reuse as c_int) } - } - - pub fn out_of_band_inline(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_OOBINLINE)?; - Ok(raw != 0) - } - } - - pub fn set_out_of_band_inline(&self, oob_inline: bool) -> io::Result<()> { - unsafe { self.setsockopt(libc::SOL_SOCKET, libc::SO_OOBINLINE, oob_inline as c_int) } - } - - unsafe fn setsockopt(&self, opt: c_int, val: c_int, payload: T) -> io::Result<()> +/// Unix only API. +impl SockAddr { + /// Constructs a `SockAddr` with the family `AF_UNIX` and the provided path. + /// + /// # Failure + /// + /// Returns an error if the path is longer than `SUN_LEN`. + #[cfg(feature = "all")] + #[cfg_attr(docsrs, doc(cfg(all(unix, feature = "all"))))] + #[allow(unused_unsafe)] // TODO: replace with `unsafe_op_in_unsafe_fn` once stable. + pub fn unix

(path: P) -> io::Result where - T: Copy, + P: AsRef, { - let payload = &payload as *const T as *const c_void; - cvt(libc::setsockopt( - self.fd, - opt, - val, - payload, - mem::size_of::() as libc::socklen_t, - ))?; - Ok(()) - } - - unsafe fn getsockopt(&self, opt: c_int, val: c_int) -> io::Result { - let mut slot: T = mem::zeroed(); - let mut len = mem::size_of::() as libc::socklen_t; - cvt(libc::getsockopt( - self.fd, - opt, - val, - &mut slot as *mut _ as *mut _, - &mut len, - ))?; - assert_eq!(len as usize, mem::size_of::()); - Ok(slot) - } -} - -impl Read for Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - <&Socket>::read(&mut &*self, buf) - } -} - -impl<'a> Read for &'a Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { unsafe { - let n = cvt({ - libc::read( - self.fd, - buf.as_mut_ptr() as *mut c_void, - cmp::min(buf.len(), max_len()), - ) - })?; - Ok(n as usize) + SockAddr::init(|storage, len| { + // Safety: `SockAddr::init` zeros the address, which is a valid + // representation. + let storage: &mut libc::sockaddr_un = unsafe { &mut *storage.cast() }; + let len: &mut socklen_t = unsafe { &mut *len }; + + let bytes = path.as_ref().as_os_str().as_bytes(); + let too_long = match bytes.first() { + None => false, + // linux abstract namespaces aren't null-terminated + Some(&0) => bytes.len() > storage.sun_path.len(), + Some(_) => bytes.len() >= storage.sun_path.len(), + }; + if too_long { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "path must be shorter than SUN_LEN", + )); + } + + storage.sun_family = libc::AF_UNIX as sa_family_t; + // Safety: `bytes` and `addr.sun_path` are not overlapping and + // both point to valid memory. + // `SockAddr::init` zeroes the memory, so the path is already + // null terminated. + unsafe { + ptr::copy_nonoverlapping( + bytes.as_ptr(), + storage.sun_path.as_mut_ptr() as *mut u8, + bytes.len(), + ) + }; + + let base = storage as *const _ as usize; + let path = &storage.sun_path as *const _ as usize; + let sun_path_offset = path - base; + let length = sun_path_offset + + bytes.len() + + match bytes.first() { + Some(&0) | None => 0, + Some(_) => 1, + }; + *len = length as socklen_t; + + Ok(()) + }) } + .map(|(_, addr)| addr) } } -impl Write for Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - <&Socket>::write(&mut &*self, buf) - } - - fn flush(&mut self) -> io::Result<()> { - <&Socket>::flush(&mut &*self) - } -} - -impl<'a> Write for &'a Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.send(buf, 0) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -impl fmt::Debug for Socket { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut f = f.debug_struct("Socket"); - f.field("fd", &self.fd); - if let Ok(addr) = self.local_addr() { - f.field("local_addr", &addr); - } - if let Ok(addr) = self.peer_addr() { - f.field("peer_addr", &addr); - } - f.finish() - } -} - -impl AsRawFd for Socket { - fn as_raw_fd(&self) -> c_int { - self.fd - } -} - -impl IntoRawFd for Socket { - fn into_raw_fd(self) -> c_int { - let fd = self.fd; - mem::forget(self); - return fd; - } -} - -impl FromRawFd for Socket { - unsafe fn from_raw_fd(fd: c_int) -> Socket { - Socket { fd: fd } - } -} - -impl AsRawFd for crate::Socket { - fn as_raw_fd(&self) -> c_int { - self.inner.as_raw_fd() - } -} - -impl IntoRawFd for crate::Socket { - fn into_raw_fd(self) -> c_int { - self.inner.into_raw_fd() - } -} - -impl FromRawFd for crate::Socket { - unsafe fn from_raw_fd(fd: c_int) -> crate::Socket { - crate::Socket { - inner: Socket::from_raw_fd(fd), - } - } -} - -impl Drop for Socket { - fn drop(&mut self) { +impl SockAddr { + /// Constructs a `SockAddr` with the family `AF_VSOCK` and the provided CID/port. + /// + /// # Errors + /// + /// This function can never fail. In a future version of this library it will be made + /// infallible. + #[allow(unused_unsafe)] // TODO: replace with `unsafe_op_in_unsafe_fn` once stable. + #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] + #[cfg_attr( + docsrs, + doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) + )] + pub fn vsock(cid: u32, port: u32) -> io::Result { unsafe { - let _ = libc::close(self.fd); + SockAddr::init(|storage, len| { + // Safety: `SockAddr::init` zeros the address, which is a valid + // representation. + let storage: &mut libc::sockaddr_vm = unsafe { &mut *storage.cast() }; + let len: &mut socklen_t = unsafe { &mut *len }; + + storage.svm_family = libc::AF_VSOCK as sa_family_t; + storage.svm_cid = cid; + storage.svm_port = port; + + *len = mem::size_of::() as socklen_t; + + Ok(()) + }) + } + .map(|(_, addr)| addr) + } + + /// Returns this address VSOCK CID/port if it is in the `AF_VSOCK` family, + /// otherwise return `None`. + #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] + #[cfg_attr( + docsrs, + doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) + )] + pub fn vsock_address(&self) -> Option<(u32, u32)> { + if self.family() == libc::AF_VSOCK as sa_family_t { + // Safety: if the ss_family field is AF_VSOCK then storage must be a sockaddr_vm. + let addr = unsafe { &*(self.as_ptr() as *const libc::sockaddr_vm) }; + Some((addr.svm_cid, addr.svm_port)) + } else { + None } } } -impl From for net::TcpStream { - fn from(socket: Socket) -> net::TcpStream { - unsafe { net::TcpStream::from_raw_fd(socket.into_raw_fd()) } - } +pub(crate) type Socket = c_int; + +pub(crate) unsafe fn socket_from_raw(socket: Socket) -> crate::socket::Inner { + crate::socket::Inner::from_raw_fd(socket) } -impl From for net::TcpListener { - fn from(socket: Socket) -> net::TcpListener { - unsafe { net::TcpListener::from_raw_fd(socket.into_raw_fd()) } - } +pub(crate) fn socket_as_raw(socket: &crate::socket::Inner) -> Socket { + socket.as_raw_fd() } -impl From for net::UdpSocket { - fn from(socket: Socket) -> net::UdpSocket { - unsafe { net::UdpSocket::from_raw_fd(socket.into_raw_fd()) } - } +pub(crate) fn socket_into_raw(socket: crate::socket::Inner) -> Socket { + socket.into_raw_fd() } -#[cfg(all(unix, feature = "unix"))] -impl From for UnixStream { - fn from(socket: Socket) -> UnixStream { - unsafe { UnixStream::from_raw_fd(socket.into_raw_fd()) } - } +pub(crate) fn socket(family: c_int, ty: c_int, protocol: c_int) -> io::Result { + syscall!(socket(family, ty, protocol)) } -#[cfg(all(unix, feature = "unix"))] -impl From for UnixListener { - fn from(socket: Socket) -> UnixListener { - unsafe { UnixListener::from_raw_fd(socket.into_raw_fd()) } - } +#[cfg(feature = "all")] +pub(crate) fn socketpair(family: c_int, ty: c_int, protocol: c_int) -> io::Result<[Socket; 2]> { + let mut fds = [0, 0]; + syscall!(socketpair(family, ty, protocol, fds.as_mut_ptr())).map(|_| fds) } -#[cfg(all(unix, feature = "unix"))] -impl From for UnixDatagram { - fn from(socket: Socket) -> UnixDatagram { - unsafe { UnixDatagram::from_raw_fd(socket.into_raw_fd()) } - } +pub(crate) fn bind(fd: Socket, addr: &SockAddr) -> io::Result<()> { + syscall!(bind(fd, addr.as_ptr(), addr.len() as _)).map(|_| ()) } -impl From for Socket { - fn from(socket: net::TcpStream) -> Socket { - unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } - } +pub(crate) fn connect(fd: Socket, addr: &SockAddr) -> io::Result<()> { + syscall!(connect(fd, addr.as_ptr(), addr.len())).map(|_| ()) } -impl From for Socket { - fn from(socket: net::TcpListener) -> Socket { - unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } - } -} +pub(crate) fn poll_connect(socket: &crate::Socket, timeout: Duration) -> io::Result<()> { + let start = Instant::now(); -impl From for Socket { - fn from(socket: net::UdpSocket) -> Socket { - unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } - } -} + let mut pollfd = libc::pollfd { + fd: socket.as_raw(), + events: libc::POLLIN | libc::POLLOUT, + revents: 0, + }; -#[cfg(all(unix, feature = "unix"))] -impl From for Socket { - fn from(socket: UnixStream) -> Socket { - unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } - } -} - -#[cfg(all(unix, feature = "unix"))] -impl From for Socket { - fn from(socket: UnixListener) -> Socket { - unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } - } -} - -#[cfg(all(unix, feature = "unix"))] -impl From for Socket { - fn from(socket: UnixDatagram) -> Socket { - unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } - } -} - -fn max_len() -> usize { - // The maximum read limit on most posix-like systems is `SSIZE_MAX`, - // with the man page quoting that if the count of bytes to read is - // greater than `SSIZE_MAX` the result is "unspecified". - // - // On macOS, however, apparently the 64-bit libc is either buggy or - // intentionally showing odd behavior by rejecting any read with a size - // larger than or equal to INT_MAX. To handle both of these the read - // size is capped on both platforms. - if cfg!(target_os = "macos") { - ::max_value() as usize - 1 - } else { - ::max_value() as usize - } -} - -fn cvt>(t: T) -> io::Result { - let one: T = T::one(); - if t == -one { - Err(io::Error::last_os_error()) - } else { - Ok(t) - } -} - -fn cvt_r(mut f: F) -> io::Result -where - F: FnMut() -> T, - T: One + PartialEq + Neg, -{ loop { - match cvt(f()) { - Err(ref e) if e.kind() == ErrorKind::Interrupted => {} - other => return other, + let elapsed = start.elapsed(); + if elapsed >= timeout { + return Err(io::ErrorKind::TimedOut.into()); + } + + let timeout = (timeout - elapsed).as_millis(); + let timeout = clamp(timeout, 1, c_int::max_value() as u128) as c_int; + + match syscall!(poll(&mut pollfd, 1, timeout)) { + Ok(0) => return Err(io::ErrorKind::TimedOut.into()), + Ok(_) => { + // Error or hang up indicates an error (or failure to connect). + if (pollfd.revents & libc::POLLHUP) != 0 || (pollfd.revents & libc::POLLERR) != 0 { + match socket.take_error() { + Ok(Some(err)) => return Err(err), + Ok(None) => { + return Err(io::Error::new( + io::ErrorKind::Other, + "no error set after POLLHUP", + )) + } + Err(err) => return Err(err), + } + } + return Ok(()); + } + // Got interrupted, try again. + Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, + Err(err) => return Err(err), } } } -fn set_cloexec(fd: c_int) -> io::Result<()> { +// TODO: use clamp from std lib, stable since 1.50. +fn clamp(value: T, min: T, max: T) -> T +where + T: Ord, +{ + if value <= min { + min + } else if value >= max { + max + } else { + value + } +} + +pub(crate) fn listen(fd: Socket, backlog: c_int) -> io::Result<()> { + syscall!(listen(fd, backlog)).map(|_| ()) +} + +pub(crate) fn accept(fd: Socket) -> io::Result<(Socket, SockAddr)> { + // Safety: `accept` initialises the `SockAddr` for us. + unsafe { SockAddr::init(|storage, len| syscall!(accept(fd, storage.cast(), len))) } +} + +pub(crate) fn getsockname(fd: Socket) -> io::Result { + // Safety: `accept` initialises the `SockAddr` for us. + unsafe { SockAddr::init(|storage, len| syscall!(getsockname(fd, storage.cast(), len))) } + .map(|(_, addr)| addr) +} + +pub(crate) fn getpeername(fd: Socket) -> io::Result { + // Safety: `accept` initialises the `SockAddr` for us. + unsafe { SockAddr::init(|storage, len| syscall!(getpeername(fd, storage.cast(), len))) } + .map(|(_, addr)| addr) +} + +pub(crate) fn try_clone(fd: Socket) -> io::Result { + syscall!(fcntl(fd, libc::F_DUPFD_CLOEXEC, 0)) +} + +pub(crate) fn set_nonblocking(fd: Socket, nonblocking: bool) -> io::Result<()> { + if nonblocking { + fcntl_add(fd, libc::F_GETFL, libc::F_SETFL, libc::O_NONBLOCK) + } else { + fcntl_remove(fd, libc::F_GETFL, libc::F_SETFL, libc::O_NONBLOCK) + } +} + +pub(crate) fn shutdown(fd: Socket, how: Shutdown) -> io::Result<()> { + let how = match how { + Shutdown::Write => libc::SHUT_WR, + Shutdown::Read => libc::SHUT_RD, + Shutdown::Both => libc::SHUT_RDWR, + }; + syscall!(shutdown(fd, how)).map(|_| ()) +} + +pub(crate) fn recv(fd: Socket, buf: &mut [MaybeUninit], flags: c_int) -> io::Result { + syscall!(recv( + fd, + buf.as_mut_ptr().cast(), + min(buf.len(), MAX_BUF_LEN), + flags, + )) + .map(|n| n as usize) +} + +pub(crate) fn recv_from( + fd: Socket, + buf: &mut [MaybeUninit], + flags: c_int, +) -> io::Result<(usize, SockAddr)> { + // Safety: `recvfrom` initialises the `SockAddr` for us. unsafe { - let previous = cvt(libc::fcntl(fd, libc::F_GETFD))?; - let new = previous | libc::FD_CLOEXEC; - if new != previous { - cvt(libc::fcntl(fd, libc::F_SETFD, new))?; - } - Ok(()) + SockAddr::init(|addr, addrlen| { + syscall!(recvfrom( + fd, + buf.as_mut_ptr().cast(), + min(buf.len(), MAX_BUF_LEN), + flags, + addr.cast(), + addrlen + )) + .map(|n| n as usize) + }) } } -fn dur2timeval(dur: Option) -> io::Result { - match dur { - Some(dur) => { - if dur.as_secs() == 0 && dur.subsec_nanos() == 0 { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "cannot set a 0 duration timeout", - )); - } - - let secs = if dur.as_secs() > libc::time_t::max_value() as u64 { - libc::time_t::max_value() - } else { - dur.as_secs() as libc::time_t - }; - let mut timeout = libc::timeval { - tv_sec: secs, - tv_usec: (dur.subsec_nanos() / 1000) as libc::suseconds_t, - }; - if timeout.tv_sec == 0 && timeout.tv_usec == 0 { - timeout.tv_usec = 1; - } - Ok(timeout) - } - None => Ok(libc::timeval { - tv_sec: 0, - tv_usec: 0, - }), - } +#[cfg(not(target_os = "redox"))] +pub(crate) fn recv_vectored( + fd: Socket, + bufs: &mut [crate::MaybeUninitSlice<'_>], + flags: c_int, +) -> io::Result<(usize, RecvFlags)> { + recvmsg(fd, ptr::null_mut(), bufs, flags).map(|(n, _, recv_flags)| (n, recv_flags)) } -fn timeval2dur(raw: libc::timeval) -> Option { - if raw.tv_sec == 0 && raw.tv_usec == 0 { +#[cfg(not(target_os = "redox"))] +pub(crate) fn recv_from_vectored( + fd: Socket, + bufs: &mut [crate::MaybeUninitSlice<'_>], + flags: c_int, +) -> io::Result<(usize, RecvFlags, SockAddr)> { + // Safety: `recvmsg` initialises the address storage and we set the length + // manually. + unsafe { + SockAddr::init(|storage, len| { + recvmsg(fd, storage, bufs, flags).map(|(n, addrlen, recv_flags)| { + // Set the correct address length. + *len = addrlen; + (n, recv_flags) + }) + }) + } + .map(|((n, recv_flags), addr)| (n, recv_flags, addr)) +} + +/// Returns the (bytes received, sending address len, `RecvFlags`). +#[cfg(not(target_os = "redox"))] +fn recvmsg( + fd: Socket, + msg_name: *mut sockaddr_storage, + bufs: &mut [crate::MaybeUninitSlice<'_>], + flags: c_int, +) -> io::Result<(usize, libc::socklen_t, RecvFlags)> { + let msg_namelen = if msg_name.is_null() { + 0 + } else { + size_of::() as libc::socklen_t + }; + // libc::msghdr contains unexported padding fields on Fuchsia. + let mut msg: libc::msghdr = unsafe { mem::zeroed() }; + msg.msg_name = msg_name.cast(); + msg.msg_namelen = msg_namelen; + msg.msg_iov = bufs.as_mut_ptr().cast(); + msg.msg_iovlen = min(bufs.len(), IovLen::MAX as usize) as IovLen; + syscall!(recvmsg(fd, &mut msg, flags)) + .map(|n| (n as usize, msg.msg_namelen, RecvFlags(msg.msg_flags))) +} + +pub(crate) fn send(fd: Socket, buf: &[u8], flags: c_int) -> io::Result { + syscall!(send( + fd, + buf.as_ptr().cast(), + min(buf.len(), MAX_BUF_LEN), + flags, + )) + .map(|n| n as usize) +} + +#[cfg(not(target_os = "redox"))] +pub(crate) fn send_vectored(fd: Socket, bufs: &[IoSlice<'_>], flags: c_int) -> io::Result { + sendmsg(fd, ptr::null(), 0, bufs, flags) +} + +pub(crate) fn send_to(fd: Socket, buf: &[u8], addr: &SockAddr, flags: c_int) -> io::Result { + syscall!(sendto( + fd, + buf.as_ptr().cast(), + min(buf.len(), MAX_BUF_LEN), + flags, + addr.as_ptr(), + addr.len(), + )) + .map(|n| n as usize) +} + +#[cfg(not(target_os = "redox"))] +pub(crate) fn send_to_vectored( + fd: Socket, + bufs: &[IoSlice<'_>], + addr: &SockAddr, + flags: c_int, +) -> io::Result { + sendmsg(fd, addr.as_storage_ptr(), addr.len(), bufs, flags) +} + +/// Returns the (bytes received, sending address len, `RecvFlags`). +#[cfg(not(target_os = "redox"))] +fn sendmsg( + fd: Socket, + msg_name: *const sockaddr_storage, + msg_namelen: socklen_t, + bufs: &[IoSlice<'_>], + flags: c_int, +) -> io::Result { + // libc::msghdr contains unexported padding fields on Fuchsia. + let mut msg: libc::msghdr = unsafe { mem::zeroed() }; + // Safety: we're creating a `*mut` pointer from a reference, which is UB + // once actually used. However the OS should not write to it in the + // `sendmsg` system call. + msg.msg_name = (msg_name as *mut sockaddr_storage).cast(); + msg.msg_namelen = msg_namelen; + // Safety: Same as above about `*const` -> `*mut`. + msg.msg_iov = bufs.as_ptr() as *mut _; + msg.msg_iovlen = min(bufs.len(), IovLen::MAX as usize) as IovLen; + syscall!(sendmsg(fd, &msg, flags)).map(|n| n as usize) +} + +/// Wrapper around `getsockopt` to deal with platform specific timeouts. +pub(crate) fn timeout_opt(fd: Socket, opt: c_int, val: c_int) -> io::Result> { + unsafe { getsockopt(fd, opt, val).map(from_timeval) } +} + +fn from_timeval(duration: libc::timeval) -> Option { + if duration.tv_sec == 0 && duration.tv_usec == 0 { None } else { - let sec = raw.tv_sec as u64; - let nsec = (raw.tv_usec as u32) * 1000; + let sec = duration.tv_sec as u64; + let nsec = (duration.tv_usec as u32) * 1000; Some(Duration::new(sec, nsec)) } } -fn to_s_addr(addr: &Ipv4Addr) -> libc::in_addr_t { - let octets = addr.octets(); - crate::hton( - ((octets[0] as libc::in_addr_t) << 24) - | ((octets[1] as libc::in_addr_t) << 16) - | ((octets[2] as libc::in_addr_t) << 8) - | ((octets[3] as libc::in_addr_t) << 0), - ) +/// Wrapper around `setsockopt` to deal with platform specific timeouts. +pub(crate) fn set_timeout_opt( + fd: Socket, + opt: c_int, + val: c_int, + duration: Option, +) -> io::Result<()> { + let duration = into_timeval(duration); + unsafe { setsockopt(fd, opt, val, duration) } } -fn from_s_addr(in_addr: libc::in_addr_t) -> Ipv4Addr { - let h_addr = crate::ntoh(in_addr); - - let a: u8 = (h_addr >> 24) as u8; - let b: u8 = (h_addr >> 16) as u8; - let c: u8 = (h_addr >> 8) as u8; - let d: u8 = (h_addr >> 0) as u8; - - Ipv4Addr::new(a, b, c, d) +fn into_timeval(duration: Option) -> libc::timeval { + match duration { + // https://github.com/rust-lang/libc/issues/1848 + #[cfg_attr(target_env = "musl", allow(deprecated))] + Some(duration) => libc::timeval { + tv_sec: min(duration.as_secs(), libc::time_t::max_value() as u64) as libc::time_t, + tv_usec: duration.subsec_micros() as libc::suseconds_t, + }, + None => libc::timeval { + tv_sec: 0, + tv_usec: 0, + }, + } } -fn to_in6_addr(addr: &Ipv6Addr) -> libc::in6_addr { - let mut ret: libc::in6_addr = unsafe { mem::zeroed() }; - ret.s6_addr = addr.octets(); - return ret; +#[cfg(feature = "all")] +#[cfg(not(any(target_os = "haiku", target_os = "openbsd")))] +pub(crate) fn keepalive_time(fd: Socket) -> io::Result { + unsafe { + getsockopt::(fd, IPPROTO_TCP, KEEPALIVE_TIME) + .map(|secs| Duration::from_secs(secs as u64)) + } } -#[cfg(target_os = "android")] -fn to_ipv6mr_interface(value: u32) -> c_int { - value as c_int +#[allow(unused_variables)] +pub(crate) fn set_tcp_keepalive(fd: Socket, keepalive: &TcpKeepalive) -> io::Result<()> { + #[cfg(not(any(target_os = "haiku", target_os = "openbsd")))] + if let Some(time) = keepalive.time { + let secs = into_secs(time); + unsafe { setsockopt(fd, libc::IPPROTO_TCP, KEEPALIVE_TIME, secs)? } + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ))] + { + if let Some(interval) = keepalive.interval { + let secs = into_secs(interval); + unsafe { setsockopt(fd, libc::IPPROTO_TCP, libc::TCP_KEEPINTVL, secs)? } + } + + if let Some(retries) = keepalive.retries { + unsafe { setsockopt(fd, libc::IPPROTO_TCP, libc::TCP_KEEPCNT, retries as c_int)? } + } + } + + Ok(()) } -#[cfg(not(target_os = "android"))] -fn to_ipv6mr_interface(value: u32) -> libc::c_uint { - value as libc::c_uint +#[cfg(not(any(target_os = "haiku", target_os = "openbsd")))] +fn into_secs(duration: Duration) -> c_int { + min(duration.as_secs(), c_int::max_value() as u64) as c_int } -fn linger2dur(linger_opt: libc::linger) -> Option { - if linger_opt.l_onoff == 0 { - None +/// Add `flag` to the current set flags of `F_GETFD`. +fn fcntl_add(fd: Socket, get_cmd: c_int, set_cmd: c_int, flag: c_int) -> io::Result<()> { + let previous = syscall!(fcntl(fd, get_cmd))?; + let new = previous | flag; + if new != previous { + syscall!(fcntl(fd, set_cmd, new)).map(|_| ()) } else { - Some(Duration::from_secs(linger_opt.l_linger as u64)) + // Flag was already set. + Ok(()) } } -fn dur2linger(dur: Option) -> libc::linger { - match dur { - Some(d) => libc::linger { - l_onoff: 1, - l_linger: d.as_secs() as c_int, +/// Remove `flag` to the current set flags of `F_GETFD`. +fn fcntl_remove(fd: Socket, get_cmd: c_int, set_cmd: c_int, flag: c_int) -> io::Result<()> { + let previous = syscall!(fcntl(fd, get_cmd))?; + let new = previous & !flag; + if new != previous { + syscall!(fcntl(fd, set_cmd, new)).map(|_| ()) + } else { + // Flag was already set. + Ok(()) + } +} + +/// Caller must ensure `T` is the correct type for `opt` and `val`. +pub(crate) unsafe fn getsockopt(fd: Socket, opt: c_int, val: c_int) -> io::Result { + let mut payload: MaybeUninit = MaybeUninit::uninit(); + let mut len = size_of::() as libc::socklen_t; + syscall!(getsockopt( + fd, + opt, + val, + payload.as_mut_ptr().cast(), + &mut len, + )) + .map(|_| { + debug_assert_eq!(len as usize, size_of::()); + // Safety: `getsockopt` initialised `payload` for us. + payload.assume_init() + }) +} + +/// Caller must ensure `T` is the correct type for `opt` and `val`. +pub(crate) unsafe fn setsockopt( + fd: Socket, + opt: c_int, + val: c_int, + payload: T, +) -> io::Result<()> { + let payload = &payload as *const T as *const c_void; + syscall!(setsockopt( + fd, + opt, + val, + payload, + mem::size_of::() as libc::socklen_t, + )) + .map(|_| ()) +} + +pub(crate) fn to_in_addr(addr: &Ipv4Addr) -> in_addr { + // `s_addr` is stored as BE on all machines, and the array is in BE order. + // So the native endian conversion method is used so that it's never + // swapped. + in_addr { + s_addr: u32::from_ne_bytes(addr.octets()), + } +} + +pub(crate) fn from_in_addr(in_addr: in_addr) -> Ipv4Addr { + Ipv4Addr::from(in_addr.s_addr.to_ne_bytes()) +} + +pub(crate) fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { + in6_addr { + s6_addr: addr.octets(), + } +} + +pub(crate) fn from_in6_addr(addr: in6_addr) -> Ipv6Addr { + Ipv6Addr::from(addr.s6_addr) +} + +#[cfg(not(any( + target_os = "haiku", + target_os = "illumos", + target_os = "netbsd", + target_os = "redox", + target_os = "solaris", +)))] +pub(crate) fn to_mreqn( + multiaddr: &Ipv4Addr, + interface: &crate::socket::InterfaceIndexOrAddress, +) -> libc::ip_mreqn { + match interface { + crate::socket::InterfaceIndexOrAddress::Index(interface) => libc::ip_mreqn { + imr_multiaddr: to_in_addr(multiaddr), + imr_address: to_in_addr(&Ipv4Addr::UNSPECIFIED), + imr_ifindex: *interface as _, }, - None => libc::linger { - l_onoff: 0, - l_linger: 0, + crate::socket::InterfaceIndexOrAddress::Address(interface) => libc::ip_mreqn { + imr_multiaddr: to_in_addr(multiaddr), + imr_address: to_in_addr(interface), + imr_ifindex: 0, }, } } +/// Unix only API. +impl crate::Socket { + /// Accept a new incoming connection from this listener. + /// + /// This function directly corresponds to the `accept4(2)` function. + /// + /// This function will block the calling thread until a new connection is + /// established. When established, the corresponding `Socket` and the remote + /// peer's address will be returned. + #[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + ) + ))) + )] + pub fn accept4(&self, flags: c_int) -> io::Result<(crate::Socket, SockAddr)> { + self._accept4(flags) + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + ))] + pub(crate) fn _accept4(&self, flags: c_int) -> io::Result<(crate::Socket, SockAddr)> { + // Safety: `accept4` initialises the `SockAddr` for us. + unsafe { + SockAddr::init(|storage, len| { + syscall!(accept4(self.as_raw(), storage.cast(), len, flags)) + .map(crate::Socket::from_raw) + }) + } + } + + /// Sets `CLOEXEC` on the socket. + /// + /// # Notes + /// + /// On supported platforms you can use [`Type::cloexec`]. + #[cfg(feature = "all")] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] + pub fn set_cloexec(&self, close_on_exec: bool) -> io::Result<()> { + self._set_cloexec(close_on_exec) + } + + pub(crate) fn _set_cloexec(&self, close_on_exec: bool) -> io::Result<()> { + if close_on_exec { + fcntl_add( + self.as_raw(), + libc::F_GETFD, + libc::F_SETFD, + libc::FD_CLOEXEC, + ) + } else { + fcntl_remove( + self.as_raw(), + libc::F_GETFD, + libc::F_SETFD, + libc::FD_CLOEXEC, + ) + } + } + + /// Sets `SO_NOSIGPIPE` on the socket. + #[cfg(all(feature = "all", any(doc, target_vendor = "apple")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] + pub fn set_nosigpipe(&self, nosigpipe: bool) -> io::Result<()> { + self._set_nosigpipe(nosigpipe) + } + + #[cfg(target_vendor = "apple")] + pub(crate) fn _set_nosigpipe(&self, nosigpipe: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::SOL_SOCKET, + libc::SO_NOSIGPIPE, + nosigpipe as c_int, + ) + } + } + + /// Gets the value of the `TCP_MAXSEG` option on this socket. + /// + /// For more information about this option, see [`set_mss`]. + /// + /// [`set_mss`]: crate::Socket::set_mss + #[cfg(all(feature = "all", not(target_os = "redox")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix, not(target_os = "redox")))))] + pub fn mss(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_MAXSEG) + .map(|mss| mss as u32) + } + } + + /// Sets the value of the `TCP_MAXSEG` option on this socket. + /// + /// The `TCP_MAXSEG` option denotes the TCP Maximum Segment Size and is only + /// available on TCP sockets. + #[cfg(all(feature = "all", not(target_os = "redox")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix, not(target_os = "redox")))))] + pub fn set_mss(&self, mss: u32) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::IPPROTO_TCP, + libc::TCP_MAXSEG, + mss as c_int, + ) + } + } + + /// Returns `true` if `listen(2)` was called on this socket by checking the + /// `SO_ACCEPTCONN` option on this socket. + #[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + ) + ))) + )] + pub fn is_listener(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_ACCEPTCONN) + .map(|v| v != 0) + } + } + + /// Returns the [`Domain`] of this socket by checking the `SO_DOMAIN` option + /// on this socket. + #[cfg(all( + feature = "all", + any( + target_os = "android", + // TODO: add FreeBSD. + // target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + ) + ))] + #[cfg_attr(docsrs, doc(cfg(all( + feature = "all", + any( + target_os = "android", + // TODO: add FreeBSD. + // target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + ) + ))))] + pub fn domain(&self) -> io::Result { + unsafe { getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_DOMAIN).map(Domain) } + } + + /// Returns the [`Protocol`] of this socket by checking the `SO_PROTOCOL` + /// option on this socket. + #[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "linux", + ) + ))) + )] + pub fn protocol(&self) -> io::Result> { + unsafe { + getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_PROTOCOL).map(|v| match v + { + 0 => None, + p => Some(Protocol(p)), + }) + } + } + + /// Gets the value for the `SO_MARK` option on this socket. + /// + /// This value gets the socket mark field for each packet sent through + /// this socket. + /// + /// On Linux this function requires the `CAP_NET_ADMIN` capability. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn mark(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_MARK) + .map(|mark| mark as u32) + } + } + + /// Sets the value for the `SO_MARK` option on this socket. + /// + /// This value sets the socket mark field for each packet sent through + /// this socket. Changing the mark can be used for mark-based routing + /// without netfilter or for packet filtering. + /// + /// On Linux this function requires the `CAP_NET_ADMIN` capability. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn set_mark(&self, mark: u32) -> io::Result<()> { + unsafe { + setsockopt::( + self.as_raw(), + libc::SOL_SOCKET, + libc::SO_MARK, + mark as c_int, + ) + } + } + + /// Get the value of the `TCP_CORK` option on this socket. + /// + /// For more information about this option, see [`set_cork`]. + /// + /// [`set_cork`]: Socket::set_cork + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn cork(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_CORK) + .map(|cork| cork != 0) + } + } + + /// Set the value of the `TCP_CORK` option on this socket. + /// + /// If set, don't send out partial frames. All queued partial frames are + /// sent when the option is cleared again. There is a 200 millisecond ceiling on + /// the time for which output is corked by `TCP_CORK`. If this ceiling is reached, + /// then queued data is automatically transmitted. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn set_cork(&self, cork: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::IPPROTO_TCP, + libc::TCP_CORK, + cork as c_int, + ) + } + } + + /// Get the value of the `TCP_QUICKACK` option on this socket. + /// + /// For more information about this option, see [`set_quickack`]. + /// + /// [`set_quickack`]: Socket::set_quickack + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn quickack(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_QUICKACK) + .map(|quickack| quickack != 0) + } + } + + /// Set the value of the `TCP_QUICKACK` option on this socket. + /// + /// If set, acks are sent immediately, rather than delayed if needed in accordance to normal + /// TCP operation. This flag is not permanent, it only enables a switch to or from quickack mode. + /// Subsequent operation of the TCP protocol will once again enter/leave quickack mode depending on + /// internal protocol processing and factors such as delayed ack timeouts occurring and data transfer. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn set_quickack(&self, quickack: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::IPPROTO_TCP, + libc::TCP_QUICKACK, + quickack as c_int, + ) + } + } + + /// Get the value of the `TCP_THIN_LINEAR_TIMEOUTS` option on this socket. + /// + /// For more information about this option, see [`set_thin_linear_timeouts`]. + /// + /// [`set_thin_linear_timeouts`]: Socket::set_thin_linear_timeouts + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn thin_linear_timeouts(&self) -> io::Result { + unsafe { + getsockopt::( + self.as_raw(), + libc::IPPROTO_TCP, + libc::TCP_THIN_LINEAR_TIMEOUTS, + ) + .map(|timeouts| timeouts != 0) + } + } + + /// Set the value of the `TCP_THIN_LINEAR_TIMEOUTS` option on this socket. + /// + /// If set, the kernel will dynamically detect a thin-stream connection if there are less than four packets in flight. + /// With less than four packets in flight the normal TCP fast retransmission will not be effective. + /// The kernel will modify the retransmission to avoid the very high latencies that thin stream suffer because of exponential backoff. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn set_thin_linear_timeouts(&self, timeouts: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::IPPROTO_TCP, + libc::TCP_THIN_LINEAR_TIMEOUTS, + timeouts as c_int, + ) + } + } + + /// Gets the value for the `SO_BINDTODEVICE` option on this socket. + /// + /// This value gets the socket binded device's interface name. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn device(&self) -> io::Result>> { + // TODO: replace with `MaybeUninit::uninit_array` once stable. + let mut buf: [MaybeUninit; libc::IFNAMSIZ] = + unsafe { MaybeUninit::uninit().assume_init() }; + let mut len = buf.len() as libc::socklen_t; + unsafe { + syscall!(getsockopt( + self.as_raw(), + libc::SOL_SOCKET, + libc::SO_BINDTODEVICE, + buf.as_mut_ptr().cast(), + &mut len, + ))?; + } + if len == 0 { + Ok(None) + } else { + let buf = &buf[..len as usize - 1]; + // TODO: use `MaybeUninit::slice_assume_init_ref` once stable. + Ok(Some(unsafe { &*(buf as *const [_] as *const [u8]) }.into())) + } + } + + /// Sets the value for the `SO_BINDTODEVICE` option on this socket. + /// + /// If a socket is bound to an interface, only packets received from that + /// particular interface are processed by the socket. Note that this only + /// works for some socket types, particularly `AF_INET` sockets. + /// + /// If `interface` is `None` or an empty string it removes the binding. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn bind_device(&self, interface: Option<&[u8]>) -> io::Result<()> { + let (value, len) = if let Some(interface) = interface { + (interface.as_ptr(), interface.len()) + } else { + (ptr::null(), 0) + }; + syscall!(setsockopt( + self.as_raw(), + libc::SOL_SOCKET, + libc::SO_BINDTODEVICE, + value.cast(), + len as libc::socklen_t, + )) + .map(|_| ()) + } + + /// Sets the value for the `SO_SETFIB` option on this socket. + /// + /// Bind socket to the specified forwarding table (VRF) on a FreeBSD. + #[cfg(all(feature = "all", any(target_os = "freebsd")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", any(target_os = "freebsd")))))] + pub fn set_fib(&self, fib: u32) -> io::Result<()> { + syscall!(setsockopt( + self.as_raw(), + libc::SOL_SOCKET, + libc::SO_SETFIB, + (&fib as *const u32).cast(), + mem::size_of::() as libc::socklen_t, + )) + .map(|_| ()) + } + + /// Sets the value for `IP_BOUND_IF` option on this socket. + /// + /// If a socket is bound to an interface, only packets received from that + /// particular interface are processed by the socket. + /// + /// If `interface` is `None`, the binding is removed. If the `interface` + /// index is not valid, an error is returned. + /// + /// One can use `libc::if_nametoindex` to convert an interface alias to an + /// index. + #[cfg(all(feature = "all", target_vendor = "apple"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] + pub fn bind_device_by_index(&self, interface: Option) -> io::Result<()> { + let index = interface.map(NonZeroU32::get).unwrap_or(0); + unsafe { setsockopt(self.as_raw(), IPPROTO_IP, libc::IP_BOUND_IF, index) } + } + + /// Gets the value for `IP_BOUND_IF` option on this socket, i.e. the index + /// for the interface to which the socket is bound. + /// + /// Returns `None` if the socket is not bound to any interface, otherwise + /// returns an interface index. + #[cfg(all(feature = "all", target_vendor = "apple"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] + pub fn device_index(&self) -> io::Result> { + let index = + unsafe { getsockopt::(self.as_raw(), IPPROTO_IP, libc::IP_BOUND_IF)? }; + Ok(NonZeroU32::new(index)) + } + + /// Get the value of the `SO_INCOMING_CPU` option on this socket. + /// + /// For more information about this option, see [`set_cpu_affinity`]. + /// + /// [`set_cpu_affinity`]: crate::Socket::set_cpu_affinity + #[cfg(all(feature = "all", target_os = "linux"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] + pub fn cpu_affinity(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_INCOMING_CPU) + .map(|cpu| cpu as usize) + } + } + + /// Set value for the `SO_INCOMING_CPU` option on this socket. + /// + /// Sets the CPU affinity of the socket. + #[cfg(all(feature = "all", target_os = "linux"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] + pub fn set_cpu_affinity(&self, cpu: usize) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::SOL_SOCKET, + libc::SO_INCOMING_CPU, + cpu as c_int, + ) + } + } + + /// Get the value of the `SO_REUSEPORT` option on this socket. + /// + /// For more information about this option, see [`set_reuse_port`]. + /// + /// [`set_reuse_port`]: crate::Socket::set_reuse_port + #[cfg(all( + feature = "all", + not(any(target_os = "solaris", target_os = "illumos")) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + unix, + not(any(target_os = "solaris", target_os = "illumos")) + ))) + )] + pub fn reuse_port(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_REUSEPORT) + .map(|reuse| reuse != 0) + } + } + + /// Set value for the `SO_REUSEPORT` option on this socket. + /// + /// This indicates that further calls to `bind` may allow reuse of local + /// addresses. For IPv4 sockets this means that a socket may bind even when + /// there's a socket already listening on this port. + #[cfg(all( + feature = "all", + not(any(target_os = "solaris", target_os = "illumos")) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + unix, + not(any(target_os = "solaris", target_os = "illumos")) + ))) + )] + pub fn set_reuse_port(&self, reuse: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::SOL_SOCKET, + libc::SO_REUSEPORT, + reuse as c_int, + ) + } + } + + /// Get the value of the `IP_FREEBIND` option on this socket. + /// + /// For more information about this option, see [`set_freebind`]. + /// + /// [`set_freebind`]: crate::Socket::set_freebind + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn freebind(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::SOL_IP, libc::IP_FREEBIND) + .map(|freebind| freebind != 0) + } + } + + /// Set value for the `IP_FREEBIND` option on this socket. + /// + /// If enabled, this boolean option allows binding to an IP address that is + /// nonlocal or does not (yet) exist. This permits listening on a socket, + /// without requiring the underlying network interface or the specified + /// dynamic IP address to be up at the time that the application is trying + /// to bind to it. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn set_freebind(&self, freebind: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::SOL_IP, + libc::IP_FREEBIND, + freebind as c_int, + ) + } + } + + /// Get the value of the `IPV6_FREEBIND` option on this socket. + /// + /// This is an IPv6 counterpart of `IP_FREEBIND` socket option on + /// Android/Linux. For more information about this option, see + /// [`set_freebind`]. + /// + /// [`set_freebind`]: crate::Socket::set_freebind + #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] + #[cfg_attr( + docsrs, + doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) + )] + pub fn freebind_ipv6(&self) -> io::Result { + unsafe { + getsockopt::(self.as_raw(), libc::SOL_IPV6, libc::IPV6_FREEBIND) + .map(|freebind| freebind != 0) + } + } + + /// Set value for the `IPV6_FREEBIND` option on this socket. + /// + /// This is an IPv6 counterpart of `IP_FREEBIND` socket option on + /// Android/Linux. For more information about this option, see + /// [`set_freebind`]. + /// + /// [`set_freebind`]: crate::Socket::set_freebind + /// + /// # Examples + /// + /// On Linux: + /// + /// ``` + /// use socket2::{Domain, Socket, Type}; + /// use std::io::{self, Error, ErrorKind}; + /// + /// fn enable_freebind(socket: &Socket) -> io::Result<()> { + /// match socket.domain()? { + /// Domain::IPV4 => socket.set_freebind(true)?, + /// Domain::IPV6 => socket.set_freebind_ipv6(true)?, + /// _ => return Err(Error::new(ErrorKind::Other, "unsupported domain")), + /// }; + /// Ok(()) + /// } + /// + /// # fn main() -> io::Result<()> { + /// # let socket = Socket::new(Domain::IPV6, Type::STREAM, None)?; + /// # enable_freebind(&socket) + /// # } + /// ``` + #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] + #[cfg_attr( + docsrs, + doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) + )] + pub fn set_freebind_ipv6(&self, freebind: bool) -> io::Result<()> { + unsafe { + setsockopt( + self.as_raw(), + libc::SOL_IPV6, + libc::IPV6_FREEBIND, + freebind as c_int, + ) + } + } + + /// Copies data between a `file` and this socket using the `sendfile(2)` + /// system call. Because this copying is done within the kernel, + /// `sendfile()` is more efficient than the combination of `read(2)` and + /// `write(2)`, which would require transferring data to and from user + /// space. + /// + /// Different OSs support different kinds of `file`s, see the OS + /// documentation for what kind of files are supported. Generally *regular* + /// files are supported by all OSs. + /// + /// The `offset` is the absolute offset into the `file` to use as starting + /// point. + /// + /// Depending on the OS this function *may* change the offset of `file`. For + /// the best results reset the offset of the file before using it again. + /// + /// The `length` determines how many bytes to send, where a length of `None` + /// means it will try to send all bytes. + #[cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "freebsd", + target_os = "linux", + target_vendor = "apple", + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "freebsd", + target_os = "linux", + target_vendor = "apple", + ) + ))) + )] + pub fn sendfile( + &self, + file: &F, + offset: usize, + length: Option, + ) -> io::Result + where + F: AsRawFd, + { + self._sendfile(file.as_raw_fd(), offset as _, length) + } + + #[cfg(all(feature = "all", target_vendor = "apple"))] + fn _sendfile( + &self, + file: RawFd, + offset: libc::off_t, + length: Option, + ) -> io::Result { + // On macOS `length` is value-result parameter. It determines the number + // of bytes to write and returns the number of bytes written. + let mut length = match length { + Some(n) => n.get() as libc::off_t, + // A value of `0` means send all bytes. + None => 0, + }; + syscall!(sendfile( + file, + self.as_raw(), + offset, + &mut length, + ptr::null_mut(), + 0, + )) + .map(|_| length as usize) + } + + #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] + fn _sendfile( + &self, + file: RawFd, + offset: libc::off_t, + length: Option, + ) -> io::Result { + let count = match length { + Some(n) => n.get() as libc::size_t, + // The maximum the Linux kernel will write in a single call. + None => 0x7ffff000, // 2,147,479,552 bytes. + }; + let mut offset = offset; + syscall!(sendfile(self.as_raw(), file, &mut offset, count)).map(|n| n as usize) + } + + #[cfg(all(feature = "all", target_os = "freebsd"))] + fn _sendfile( + &self, + file: RawFd, + offset: libc::off_t, + length: Option, + ) -> io::Result { + let nbytes = match length { + Some(n) => n.get() as libc::size_t, + // A value of `0` means send all bytes. + None => 0, + }; + let mut sbytes: libc::off_t = 0; + syscall!(sendfile( + file, + self.as_raw(), + offset, + nbytes, + ptr::null_mut(), + &mut sbytes, + 0, + )) + .map(|_| sbytes as usize) + } + + /// Set the value of the `TCP_USER_TIMEOUT` option on this socket. + /// + /// If set, this specifies the maximum amount of time that transmitted data may remain + /// unacknowledged or buffered data may remain untransmitted before TCP will forcibly close the + /// corresponding connection. + /// + /// Setting `timeout` to `None` or a zero duration causes the system default timeouts to + /// be used. If `timeout` in milliseconds is larger than `c_uint::MAX`, the timeout is clamped + /// to `c_uint::MAX`. For example, when `c_uint` is a 32-bit value, this limits the timeout to + /// approximately 49.71 days. + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn set_tcp_user_timeout(&self, timeout: Option) -> io::Result<()> { + let timeout = timeout + .map(|to| min(to.as_millis(), libc::c_uint::MAX as u128) as libc::c_uint) + .unwrap_or(0); + unsafe { + setsockopt( + self.as_raw(), + libc::IPPROTO_TCP, + libc::TCP_USER_TIMEOUT, + timeout, + ) + } + } + + /// Get the value of the `TCP_USER_TIMEOUT` option on this socket. + /// + /// For more information about this option, see [`set_tcp_user_timeout`]. + /// + /// [`set_tcp_user_timeout`]: Socket::set_tcp_user_timeout + #[cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any(target_os = "android", target_os = "fuchsia", target_os = "linux") + ))) + )] + pub fn tcp_user_timeout(&self) -> io::Result> { + unsafe { + getsockopt::(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_USER_TIMEOUT) + .map(|millis| { + if millis == 0 { + None + } else { + Some(Duration::from_millis(millis as u64)) + } + }) + } + } + + /// Attach Berkeley Packet Filter(BPF) on this socket. + /// + /// BPF allows a user-space program to attach a filter onto any socket + /// and allow or disallow certain types of data to come through the socket. + /// + /// For more information about this option, see [filter](https://www.kernel.org/doc/html/v5.12/networking/filter.html) + #[cfg(all(feature = "all", any(target_os = "linux", target_os = "android")))] + pub fn attach_filter(&self, filters: &[libc::sock_filter]) -> io::Result<()> { + let prog = libc::sock_fprog { + len: filters.len() as u16, + filter: filters.as_ptr() as *mut _, + }; + + unsafe { + setsockopt( + self.as_raw(), + libc::SOL_SOCKET, + libc::SO_ATTACH_FILTER, + prog, + ) + } + } + + /// Detach Berkeley Packet Filter(BPF) from this socket. + /// + /// For more information about this option, see [`attach_filter`] + #[cfg(all(feature = "all", any(target_os = "linux", target_os = "android")))] + pub fn detach_filter(&self) -> io::Result<()> { + unsafe { setsockopt(self.as_raw(), libc::SOL_SOCKET, libc::SO_DETACH_FILTER, 0) } + } +} + +#[cfg_attr(docsrs, doc(cfg(unix)))] +impl AsRawFd for crate::Socket { + fn as_raw_fd(&self) -> c_int { + self.as_raw() + } +} + +#[cfg_attr(docsrs, doc(cfg(unix)))] +impl IntoRawFd for crate::Socket { + fn into_raw_fd(self) -> c_int { + self.into_raw() + } +} + +#[cfg_attr(docsrs, doc(cfg(unix)))] +impl FromRawFd for crate::Socket { + unsafe fn from_raw_fd(fd: c_int) -> crate::Socket { + crate::Socket::from_raw(fd) + } +} + +#[cfg(feature = "all")] +from!(UnixStream, crate::Socket); +#[cfg(feature = "all")] +from!(UnixListener, crate::Socket); +#[cfg(feature = "all")] +from!(UnixDatagram, crate::Socket); +#[cfg(feature = "all")] +from!(crate::Socket, UnixStream); +#[cfg(feature = "all")] +from!(crate::Socket, UnixListener); +#[cfg(feature = "all")] +from!(crate::Socket, UnixDatagram); + #[test] -fn test_ip() { +fn in_addr_convertion() { let ip = Ipv4Addr::new(127, 0, 0, 1); - assert_eq!(ip, from_s_addr(to_s_addr(&ip))); + let raw = to_in_addr(&ip); + // NOTE: `in_addr` is packed on NetBSD and it's unsafe to borrow. + let a = raw.s_addr; + assert_eq!(a, u32::from_ne_bytes([127, 0, 0, 1])); + assert_eq!(from_in_addr(raw), ip); + + let ip = Ipv4Addr::new(127, 34, 4, 12); + let raw = to_in_addr(&ip); + let a = raw.s_addr; + assert_eq!(a, u32::from_ne_bytes([127, 34, 4, 12])); + assert_eq!(from_in_addr(raw), ip); } #[test] -fn test_out_of_band_inline() { - let tcp = Socket::new(libc::AF_INET, libc::SOCK_STREAM, 0).unwrap(); - assert_eq!(tcp.out_of_band_inline().unwrap(), false); - - tcp.set_out_of_band_inline(true).unwrap(); - assert_eq!(tcp.out_of_band_inline().unwrap(), true); +fn in6_addr_convertion() { + let ip = Ipv6Addr::new(0x2000, 1, 2, 3, 4, 5, 6, 7); + let raw = to_in6_addr(&ip); + let want = [32, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7]; + assert_eq!(raw.s6_addr, want); + assert_eq!(from_in6_addr(raw), ip); } diff --git a/third_party/rust/socket2/src/sys/windows.rs b/third_party/rust/socket2/src/sys/windows.rs index 443ef265e2a7..ab598399a8ce 100644 --- a/third_party/rust/socket2/src/sys/windows.rs +++ b/third_party/rust/socket2/src/sys/windows.rs @@ -1,6 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. +// Copyright 2015 The Rust Project Developers. // // Licensed under the Apache License, Version 2.0 or the MIT license @@ -8,51 +6,101 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use std::fmt; -use std::io; -use std::io::{Read, Write}; -use std::mem; -use std::net::Shutdown; -use std::net::{self, Ipv4Addr, Ipv6Addr}; +use std::cmp::min; +use std::io::{self, IoSlice}; +use std::marker::PhantomData; +use std::mem::{self, size_of, MaybeUninit}; +use std::net::{self, Ipv4Addr, Ipv6Addr, Shutdown}; use std::os::windows::prelude::*; -use std::ptr; use std::sync::Once; -use std::time::Duration; +use std::time::{Duration, Instant}; +use std::{ptr, slice}; -use winapi::ctypes::{c_char, c_long, c_ulong}; +use winapi::ctypes::c_long; use winapi::shared::in6addr::*; use winapi::shared::inaddr::*; use winapi::shared::minwindef::DWORD; -use winapi::shared::ntdef::{HANDLE, ULONG}; -use winapi::shared::ws2def::{self, *}; -use winapi::shared::ws2ipdef::*; +use winapi::shared::minwindef::ULONG; +use winapi::shared::mstcpip::{tcp_keepalive, SIO_KEEPALIVE_VALS}; +use winapi::shared::ntdef::HANDLE; +use winapi::shared::ws2def; +use winapi::shared::ws2def::WSABUF; use winapi::um::handleapi::SetHandleInformation; use winapi::um::processthreadsapi::GetCurrentProcessId; -use winapi::um::winbase::INFINITE; -use winapi::um::winsock2 as sock; +use winapi::um::winbase::{self, INFINITE}; +use winapi::um::winsock2::{ + self as sock, u_long, POLLERR, POLLHUP, POLLRDNORM, POLLWRNORM, SD_BOTH, SD_RECEIVE, SD_SEND, + WSAPOLLFD, +}; -use crate::SockAddr; +use crate::{RecvFlags, SockAddr, TcpKeepalive, Type}; -const HANDLE_FLAG_INHERIT: DWORD = 0x00000001; -const MSG_PEEK: c_int = 0x2; -const SD_BOTH: c_int = 2; -const SD_RECEIVE: c_int = 0; -const SD_SEND: c_int = 1; -const SIO_KEEPALIVE_VALS: DWORD = 0x98000004; -const WSA_FLAG_OVERLAPPED: DWORD = 0x01; +pub(crate) use winapi::ctypes::c_int; -pub use winapi::ctypes::c_int; +/// Fake MSG_TRUNC flag for the [`RecvFlags`] struct. +/// +/// The flag is enabled when a `WSARecv[From]` call returns `WSAEMSGSIZE`. The +/// value of the flag is defined by us. +pub(crate) const MSG_TRUNC: c_int = 0x01; // Used in `Domain`. pub(crate) use winapi::shared::ws2def::{AF_INET, AF_INET6}; // Used in `Type`. -pub(crate) use winapi::shared::ws2def::{SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET, SOCK_STREAM}; +pub(crate) use winapi::shared::ws2def::{SOCK_DGRAM, SOCK_STREAM}; +#[cfg(feature = "all")] +pub(crate) use winapi::shared::ws2def::{SOCK_RAW, SOCK_SEQPACKET}; // Used in `Protocol`. pub(crate) const IPPROTO_ICMP: c_int = winapi::shared::ws2def::IPPROTO_ICMP as c_int; pub(crate) const IPPROTO_ICMPV6: c_int = winapi::shared::ws2def::IPPROTO_ICMPV6 as c_int; pub(crate) const IPPROTO_TCP: c_int = winapi::shared::ws2def::IPPROTO_TCP as c_int; pub(crate) const IPPROTO_UDP: c_int = winapi::shared::ws2def::IPPROTO_UDP as c_int; +// Used in `SockAddr`. +pub(crate) use winapi::shared::ws2def::{ + ADDRESS_FAMILY as sa_family_t, SOCKADDR as sockaddr, SOCKADDR_IN as sockaddr_in, + SOCKADDR_STORAGE as sockaddr_storage, +}; +pub(crate) use winapi::shared::ws2ipdef::SOCKADDR_IN6_LH as sockaddr_in6; +pub(crate) use winapi::um::ws2tcpip::socklen_t; +// Used in `Socket`. +pub(crate) use winapi::shared::ws2def::{ + IPPROTO_IP, SOL_SOCKET, SO_BROADCAST, SO_ERROR, SO_KEEPALIVE, SO_LINGER, SO_OOBINLINE, + SO_RCVBUF, SO_RCVTIMEO, SO_REUSEADDR, SO_SNDBUF, SO_SNDTIMEO, SO_TYPE, TCP_NODELAY, +}; +#[cfg(feature = "all")] +pub(crate) use winapi::shared::ws2ipdef::IP_HDRINCL; +pub(crate) use winapi::shared::ws2ipdef::{ + IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP, IPV6_MREQ as Ipv6Mreq, IPV6_MULTICAST_HOPS, + IPV6_MULTICAST_IF, IPV6_MULTICAST_LOOP, IPV6_UNICAST_HOPS, IPV6_V6ONLY, IP_ADD_MEMBERSHIP, + IP_DROP_MEMBERSHIP, IP_MREQ as IpMreq, IP_MULTICAST_IF, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, + IP_TOS, IP_TTL, +}; +pub(crate) use winapi::um::winsock2::{linger, MSG_OOB, MSG_PEEK}; +pub(crate) const IPPROTO_IPV6: c_int = winapi::shared::ws2def::IPPROTO_IPV6 as c_int; + +/// Type used in set/getsockopt to retrieve the `TCP_NODELAY` option. +/// +/// NOTE: +/// documents that options such as `TCP_NODELAY` and `SO_KEEPALIVE` expect a +/// `BOOL` (alias for `c_int`, 4 bytes), however in practice this turns out to +/// be false (or misleading) as a `BOOLEAN` (`c_uchar`, 1 byte) is returned by +/// `getsockopt`. +pub(crate) type Bool = winapi::shared::ntdef::BOOLEAN; + +/// Maximum size of a buffer passed to system call like `recv` and `send`. +const MAX_BUF_LEN: usize = ::max_value() as usize; + +/// Helper macro to execute a system call that returns an `io::Result`. +macro_rules! syscall { + ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{ + #[allow(unused_unsafe)] + let res = unsafe { sock::$fn($($arg, )*) }; + if $err_test(&res, &$err_value) { + Err(io::Error::last_os_error()) + } else { + Ok(res) + } + }}; +} impl_debug!( crate::Domain, @@ -62,6 +110,24 @@ impl_debug!( ws2def::AF_UNSPEC, // = 0. ); +/// Windows only API. +impl Type { + /// Our custom flag to set `WSA_FLAG_NO_HANDLE_INHERIT` on socket creation. + /// Trying to mimic `Type::cloexec` on windows. + const NO_INHERIT: c_int = 1 << ((size_of::() * 8) - 1); // Last bit. + + /// Set `WSA_FLAG_NO_HANDLE_INHERIT` on the socket. + #[cfg(feature = "all")] + #[cfg_attr(docsrs, doc(cfg(all(windows, feature = "all"))))] + pub const fn no_inherit(self) -> Type { + self._no_inherit() + } + + pub(crate) const fn _no_inherit(self) -> Type { + Type(self.0 | Type::NO_INHERIT) + } +} + impl_debug!( crate::Type, ws2def::SOCK_STREAM, @@ -79,11 +145,43 @@ impl_debug!( self::IPPROTO_UDP, ); -#[repr(C)] -struct tcp_keepalive { - onoff: c_ulong, - keepalivetime: c_ulong, - keepaliveinterval: c_ulong, +impl std::fmt::Debug for RecvFlags { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RecvFlags") + .field("is_truncated", &self.is_truncated()) + .finish() + } +} + +#[repr(transparent)] +pub struct MaybeUninitSlice<'a> { + vec: WSABUF, + _lifetime: PhantomData<&'a mut [MaybeUninit]>, +} + +unsafe impl<'a> Send for MaybeUninitSlice<'a> {} + +unsafe impl<'a> Sync for MaybeUninitSlice<'a> {} + +impl<'a> MaybeUninitSlice<'a> { + pub fn new(buf: &'a mut [MaybeUninit]) -> MaybeUninitSlice<'a> { + assert!(buf.len() <= ULONG::MAX as usize); + MaybeUninitSlice { + vec: WSABUF { + len: buf.len() as ULONG, + buf: buf.as_mut_ptr().cast(), + }, + _lifetime: PhantomData, + } + } + + pub fn as_slice(&self) -> &[MaybeUninit] { + unsafe { slice::from_raw_parts(self.vec.buf.cast(), self.vec.len as usize) } + } + + pub fn as_mut_slice(&mut self) -> &mut [MaybeUninit] { + unsafe { slice::from_raw_parts_mut(self.vec.buf.cast(), self.vec.len as usize) } + } } fn init() { @@ -97,887 +195,541 @@ fn init() { }); } -fn last_error() -> io::Error { - io::Error::from_raw_os_error(unsafe { sock::WSAGetLastError() }) +pub(crate) type Socket = sock::SOCKET; + +pub(crate) unsafe fn socket_from_raw(socket: Socket) -> crate::socket::Inner { + crate::socket::Inner::from_raw_socket(socket as RawSocket) } -pub struct Socket { - socket: sock::SOCKET, +pub(crate) fn socket_as_raw(socket: &crate::socket::Inner) -> Socket { + socket.as_raw_socket() as Socket } -impl Socket { - pub fn new(family: c_int, ty: c_int, protocol: c_int) -> io::Result { - init(); - unsafe { - let socket = match sock::WSASocketW( - family, - ty, - protocol, - ptr::null_mut(), - 0, - WSA_FLAG_OVERLAPPED, - ) { - sock::INVALID_SOCKET => return Err(last_error()), - socket => socket, - }; - let socket = Socket::from_raw_socket(socket as RawSocket); - socket.set_no_inherit()?; - Ok(socket) - } - } +pub(crate) fn socket_into_raw(socket: crate::socket::Inner) -> Socket { + socket.into_raw_socket() as Socket +} - pub fn bind(&self, addr: &SockAddr) -> io::Result<()> { - unsafe { - if sock::bind(self.socket, addr.as_ptr(), addr.len()) == 0 { - Ok(()) - } else { - Err(last_error()) - } - } - } +pub(crate) fn socket(family: c_int, mut ty: c_int, protocol: c_int) -> io::Result { + init(); - pub fn listen(&self, backlog: i32) -> io::Result<()> { - unsafe { - if sock::listen(self.socket, backlog) == 0 { - Ok(()) - } else { - Err(last_error()) - } - } - } + // Check if we set our custom flag. + let flags = if ty & Type::NO_INHERIT != 0 { + ty = ty & !Type::NO_INHERIT; + sock::WSA_FLAG_NO_HANDLE_INHERIT + } else { + 0 + }; - pub fn connect(&self, addr: &SockAddr) -> io::Result<()> { - unsafe { - if sock::connect(self.socket, addr.as_ptr(), addr.len()) == 0 { - Ok(()) - } else { - Err(last_error()) - } - } - } + syscall!( + WSASocketW( + family, + ty, + protocol, + ptr::null_mut(), + 0, + sock::WSA_FLAG_OVERLAPPED | flags, + ), + PartialEq::eq, + sock::INVALID_SOCKET + ) +} - pub fn connect_timeout(&self, addr: &SockAddr, timeout: Duration) -> io::Result<()> { - self.set_nonblocking(true)?; - let r = self.connect(addr); - self.set_nonblocking(false)?; +pub(crate) fn bind(socket: Socket, addr: &SockAddr) -> io::Result<()> { + syscall!(bind(socket, addr.as_ptr(), addr.len()), PartialEq::ne, 0).map(|_| ()) +} - match r { - Ok(()) => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} - Err(e) => return Err(e), +pub(crate) fn connect(socket: Socket, addr: &SockAddr) -> io::Result<()> { + syscall!(connect(socket, addr.as_ptr(), addr.len()), PartialEq::ne, 0).map(|_| ()) +} + +pub(crate) fn poll_connect(socket: &crate::Socket, timeout: Duration) -> io::Result<()> { + let start = Instant::now(); + + let mut fd_array = WSAPOLLFD { + fd: socket.as_raw(), + events: POLLRDNORM | POLLWRNORM, + revents: 0, + }; + + loop { + let elapsed = start.elapsed(); + if elapsed >= timeout { + return Err(io::ErrorKind::TimedOut.into()); } - if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "cannot set a 0 duration timeout", - )); - } + let timeout = (timeout - elapsed).as_millis(); + let timeout = clamp(timeout, 1, c_int::max_value() as u128) as c_int; - let mut timeout = sock::timeval { - tv_sec: timeout.as_secs() as c_long, - tv_usec: (timeout.subsec_nanos() / 1000) as c_long, - }; - if timeout.tv_sec == 0 && timeout.tv_usec == 0 { - timeout.tv_usec = 1; - } - - let fds = unsafe { - let mut fds = mem::zeroed::(); - fds.fd_count = 1; - fds.fd_array[0] = self.socket; - fds - }; - - let mut writefds = fds; - let mut errorfds = fds; - - match unsafe { sock::select(1, ptr::null_mut(), &mut writefds, &mut errorfds, &timeout) } { - sock::SOCKET_ERROR => return Err(io::Error::last_os_error()), - 0 => { - return Err(io::Error::new( - io::ErrorKind::TimedOut, - "connection timed out", - )) - } - _ => { - if writefds.fd_count != 1 { - if let Some(e) = self.take_error()? { - return Err(e); + match syscall!( + WSAPoll(&mut fd_array, 1, timeout), + PartialEq::eq, + sock::SOCKET_ERROR + ) { + Ok(0) => return Err(io::ErrorKind::TimedOut.into()), + Ok(_) => { + // Error or hang up indicates an error (or failure to connect). + if (fd_array.revents & POLLERR) != 0 || (fd_array.revents & POLLHUP) != 0 { + match socket.take_error() { + Ok(Some(err)) => return Err(err), + Ok(None) => { + return Err(io::Error::new( + io::ErrorKind::Other, + "no error set after POLLHUP", + )) + } + Err(err) => return Err(err), } } - Ok(()) + return Ok(()); } + // Got interrupted, try again. + Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, + Err(err) => return Err(err), } } +} - pub fn local_addr(&self) -> io::Result { - unsafe { - let mut storage: SOCKADDR_STORAGE = mem::zeroed(); - let mut len = mem::size_of_val(&storage) as c_int; - if sock::getsockname(self.socket, &mut storage as *mut _ as *mut _, &mut len) != 0 { - return Err(last_error()); - } - Ok(SockAddr::from_raw_parts( - &storage as *const _ as *const _, - len, - )) - } +// TODO: use clamp from std lib, stable since 1.50. +fn clamp(value: T, min: T, max: T) -> T +where + T: Ord, +{ + if value <= min { + min + } else if value >= max { + max + } else { + value } +} - pub fn peer_addr(&self) -> io::Result { - unsafe { - let mut storage: SOCKADDR_STORAGE = mem::zeroed(); - let mut len = mem::size_of_val(&storage) as c_int; - if sock::getpeername(self.socket, &mut storage as *mut _ as *mut _, &mut len) != 0 { - return Err(last_error()); - } - Ok(SockAddr::from_raw_parts( - &storage as *const _ as *const _, - len, - )) - } +pub(crate) fn listen(socket: Socket, backlog: c_int) -> io::Result<()> { + syscall!(listen(socket, backlog), PartialEq::ne, 0).map(|_| ()) +} + +pub(crate) fn accept(socket: Socket) -> io::Result<(Socket, SockAddr)> { + // Safety: `accept` initialises the `SockAddr` for us. + unsafe { + SockAddr::init(|storage, len| { + syscall!( + accept(socket, storage.cast(), len), + PartialEq::eq, + sock::INVALID_SOCKET + ) + }) } +} - pub fn try_clone(&self) -> io::Result { - unsafe { - let mut info: sock::WSAPROTOCOL_INFOW = mem::zeroed(); - let r = sock::WSADuplicateSocketW(self.socket, GetCurrentProcessId(), &mut info); - if r != 0 { - return Err(io::Error::last_os_error()); - } - let socket = sock::WSASocketW( - info.iAddressFamily, - info.iSocketType, - info.iProtocol, - &mut info, - 0, - WSA_FLAG_OVERLAPPED, +pub(crate) fn getsockname(socket: Socket) -> io::Result { + // Safety: `getsockname` initialises the `SockAddr` for us. + unsafe { + SockAddr::init(|storage, len| { + syscall!( + getsockname(socket, storage.cast(), len), + PartialEq::eq, + sock::SOCKET_ERROR + ) + }) + } + .map(|(_, addr)| addr) +} + +pub(crate) fn getpeername(socket: Socket) -> io::Result { + // Safety: `getpeername` initialises the `SockAddr` for us. + unsafe { + SockAddr::init(|storage, len| { + syscall!( + getpeername(socket, storage.cast(), len), + PartialEq::eq, + sock::SOCKET_ERROR + ) + }) + } + .map(|(_, addr)| addr) +} + +pub(crate) fn try_clone(socket: Socket) -> io::Result { + let mut info: MaybeUninit = MaybeUninit::uninit(); + syscall!( + WSADuplicateSocketW(socket, GetCurrentProcessId(), info.as_mut_ptr()), + PartialEq::eq, + sock::SOCKET_ERROR + )?; + // Safety: `WSADuplicateSocketW` intialised `info` for us. + let mut info = unsafe { info.assume_init() }; + + syscall!( + WSASocketW( + info.iAddressFamily, + info.iSocketType, + info.iProtocol, + &mut info, + 0, + sock::WSA_FLAG_OVERLAPPED | sock::WSA_FLAG_NO_HANDLE_INHERIT, + ), + PartialEq::eq, + sock::INVALID_SOCKET + ) +} + +pub(crate) fn set_nonblocking(socket: Socket, nonblocking: bool) -> io::Result<()> { + let mut nonblocking = nonblocking as u_long; + ioctlsocket(socket, sock::FIONBIO, &mut nonblocking) +} + +pub(crate) fn shutdown(socket: Socket, how: Shutdown) -> io::Result<()> { + let how = match how { + Shutdown::Write => SD_SEND, + Shutdown::Read => SD_RECEIVE, + Shutdown::Both => SD_BOTH, + }; + syscall!(shutdown(socket, how), PartialEq::eq, sock::SOCKET_ERROR).map(|_| ()) +} + +pub(crate) fn recv(socket: Socket, buf: &mut [MaybeUninit], flags: c_int) -> io::Result { + let res = syscall!( + recv( + socket, + buf.as_mut_ptr().cast(), + min(buf.len(), MAX_BUF_LEN) as c_int, + flags, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ); + match res { + Ok(n) => Ok(n as usize), + Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => Ok(0), + Err(err) => Err(err), + } +} + +pub(crate) fn recv_vectored( + socket: Socket, + bufs: &mut [crate::MaybeUninitSlice<'_>], + flags: c_int, +) -> io::Result<(usize, RecvFlags)> { + let mut nread = 0; + let mut flags = flags as DWORD; + let res = syscall!( + WSARecv( + socket, + bufs.as_mut_ptr().cast(), + min(bufs.len(), DWORD::max_value() as usize) as DWORD, + &mut nread, + &mut flags, + ptr::null_mut(), + None, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ); + match res { + Ok(_) => Ok((nread as usize, RecvFlags(0))), + Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => { + Ok((0, RecvFlags(0))) + } + Err(ref err) if err.raw_os_error() == Some(sock::WSAEMSGSIZE as i32) => { + Ok((nread as usize, RecvFlags(MSG_TRUNC))) + } + Err(err) => Err(err), + } +} + +pub(crate) fn recv_from( + socket: Socket, + buf: &mut [MaybeUninit], + flags: c_int, +) -> io::Result<(usize, SockAddr)> { + // Safety: `recvfrom` initialises the `SockAddr` for us. + unsafe { + SockAddr::init(|storage, addrlen| { + let res = syscall!( + recvfrom( + socket, + buf.as_mut_ptr().cast(), + min(buf.len(), MAX_BUF_LEN) as c_int, + flags, + storage.cast(), + addrlen, + ), + PartialEq::eq, + sock::SOCKET_ERROR ); - let socket = match socket { - sock::INVALID_SOCKET => return Err(last_error()), - n => Socket::from_raw_socket(n as RawSocket), - }; - socket.set_no_inherit()?; - Ok(socket) - } - } - - pub fn accept(&self) -> io::Result<(Socket, SockAddr)> { - unsafe { - let mut storage: SOCKADDR_STORAGE = mem::zeroed(); - let mut len = mem::size_of_val(&storage) as c_int; - let socket = { sock::accept(self.socket, &mut storage as *mut _ as *mut _, &mut len) }; - let socket = match socket { - sock::INVALID_SOCKET => return Err(last_error()), - socket => Socket::from_raw_socket(socket as RawSocket), - }; - socket.set_no_inherit()?; - let addr = SockAddr::from_raw_parts(&storage as *const _ as *const _, len); - Ok((socket, addr)) - } - } - - pub fn take_error(&self) -> io::Result> { - unsafe { - let raw: c_int = self.getsockopt(SOL_SOCKET, SO_ERROR)?; - if raw == 0 { - Ok(None) - } else { - Ok(Some(io::Error::from_raw_os_error(raw as i32))) + match res { + Ok(n) => Ok(n as usize), + Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => Ok(0), + Err(err) => Err(err), } - } + }) } +} - pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { - unsafe { - let mut nonblocking = nonblocking as c_ulong; - let r = sock::ioctlsocket(self.socket, sock::FIONBIO as c_int, &mut nonblocking); - if r == 0 { - Ok(()) - } else { - Err(io::Error::last_os_error()) +pub(crate) fn recv_from_vectored( + socket: Socket, + bufs: &mut [crate::MaybeUninitSlice<'_>], + flags: c_int, +) -> io::Result<(usize, RecvFlags, SockAddr)> { + // Safety: `recvfrom` initialises the `SockAddr` for us. + unsafe { + SockAddr::init(|storage, addrlen| { + let mut nread = 0; + let mut flags = flags as DWORD; + let res = syscall!( + WSARecvFrom( + socket, + bufs.as_mut_ptr().cast(), + min(bufs.len(), DWORD::max_value() as usize) as DWORD, + &mut nread, + &mut flags, + storage.cast(), + addrlen, + ptr::null_mut(), + None, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ); + match res { + Ok(_) => Ok((nread as usize, RecvFlags(0))), + Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => { + Ok((nread as usize, RecvFlags(0))) + } + Err(ref err) if err.raw_os_error() == Some(sock::WSAEMSGSIZE as i32) => { + Ok((nread as usize, RecvFlags(MSG_TRUNC))) + } + Err(err) => Err(err), } - } - } - - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - let how = match how { - Shutdown::Write => SD_SEND, - Shutdown::Read => SD_RECEIVE, - Shutdown::Both => SD_BOTH, - }; - if unsafe { sock::shutdown(self.socket, how) == 0 } { - Ok(()) - } else { - Err(last_error()) - } - } - - pub fn recv(&self, buf: &mut [u8], flags: c_int) -> io::Result { - unsafe { - let n = { - sock::recv( - self.socket, - buf.as_mut_ptr() as *mut c_char, - clamp(buf.len()), - flags, - ) - }; - match n { - sock::SOCKET_ERROR if sock::WSAGetLastError() == sock::WSAESHUTDOWN as i32 => Ok(0), - sock::SOCKET_ERROR => Err(last_error()), - n => Ok(n as usize), - } - } - } - - pub fn peek(&self, buf: &mut [u8]) -> io::Result { - unsafe { - let n = { - sock::recv( - self.socket, - buf.as_mut_ptr() as *mut c_char, - clamp(buf.len()), - MSG_PEEK, - ) - }; - match n { - sock::SOCKET_ERROR if sock::WSAGetLastError() == sock::WSAESHUTDOWN as i32 => Ok(0), - sock::SOCKET_ERROR => Err(last_error()), - n => Ok(n as usize), - } - } - } - - pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { - self.recv_from(buf, MSG_PEEK) - } - - pub fn recv_from(&self, buf: &mut [u8], flags: c_int) -> io::Result<(usize, SockAddr)> { - unsafe { - let mut storage: SOCKADDR_STORAGE = mem::zeroed(); - let mut addrlen = mem::size_of_val(&storage) as c_int; - - let n = { - sock::recvfrom( - self.socket, - buf.as_mut_ptr() as *mut c_char, - clamp(buf.len()), - flags, - &mut storage as *mut _ as *mut _, - &mut addrlen, - ) - }; - let n = match n { - sock::SOCKET_ERROR if sock::WSAGetLastError() == sock::WSAESHUTDOWN as i32 => 0, - sock::SOCKET_ERROR => return Err(last_error()), - n => n as usize, - }; - let addr = SockAddr::from_raw_parts(&storage as *const _ as *const _, addrlen); - Ok((n, addr)) - } - } - - pub fn send(&self, buf: &[u8], flags: c_int) -> io::Result { - unsafe { - let n = { - sock::send( - self.socket, - buf.as_ptr() as *const c_char, - clamp(buf.len()), - flags, - ) - }; - if n == sock::SOCKET_ERROR { - Err(last_error()) - } else { - Ok(n as usize) - } - } - } - - pub fn send_to(&self, buf: &[u8], flags: c_int, addr: &SockAddr) -> io::Result { - unsafe { - let n = { - sock::sendto( - self.socket, - buf.as_ptr() as *const c_char, - clamp(buf.len()), - flags, - addr.as_ptr(), - addr.len(), - ) - }; - if n == sock::SOCKET_ERROR { - Err(last_error()) - } else { - Ok(n as usize) - } - } - } - - // ================================================ - - pub fn ttl(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(IPPROTO_IP, IP_TTL)?; - Ok(raw as u32) - } - } - - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - unsafe { self.setsockopt(IPPROTO_IP, IP_TTL, ttl as c_int) } - } - - pub fn unicast_hops_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(IPPROTO_IPV6 as c_int, IPV6_UNICAST_HOPS)?; - Ok(raw as u32) - } - } - - pub fn set_unicast_hops_v6(&self, hops: u32) -> io::Result<()> { - unsafe { self.setsockopt(IPPROTO_IPV6 as c_int, IPV6_UNICAST_HOPS, hops as c_int) } - } - - pub fn only_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(IPPROTO_IPV6 as c_int, IPV6_V6ONLY)?; - Ok(raw != 0) - } - } - - pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { - unsafe { self.setsockopt(IPPROTO_IPV6 as c_int, IPV6_V6ONLY, only_v6 as c_int) } - } - - pub fn read_timeout(&self) -> io::Result> { - unsafe { Ok(ms2dur(self.getsockopt(SOL_SOCKET, SO_RCVTIMEO)?)) } - } - - pub fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - unsafe { self.setsockopt(SOL_SOCKET, SO_RCVTIMEO, dur2ms(dur)?) } - } - - pub fn write_timeout(&self) -> io::Result> { - unsafe { Ok(ms2dur(self.getsockopt(SOL_SOCKET, SO_SNDTIMEO)?)) } - } - - pub fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - unsafe { self.setsockopt(SOL_SOCKET, SO_SNDTIMEO, dur2ms(dur)?) } - } - - pub fn nodelay(&self) -> io::Result { - unsafe { - let raw: c_char = self.getsockopt(IPPROTO_TCP, TCP_NODELAY)?; - Ok(raw != 0) - } - } - - pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { - unsafe { self.setsockopt(IPPROTO_TCP, TCP_NODELAY, nodelay as c_char) } - } - - pub fn broadcast(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(SOL_SOCKET, SO_BROADCAST)?; - Ok(raw != 0) - } - } - - pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> { - unsafe { self.setsockopt(SOL_SOCKET, SO_BROADCAST, broadcast as c_int) } - } - - pub fn multicast_loop_v4(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(IPPROTO_IP, IP_MULTICAST_LOOP)?; - Ok(raw != 0) - } - } - - pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> { - unsafe { self.setsockopt(IPPROTO_IP, IP_MULTICAST_LOOP, multicast_loop_v4 as c_int) } - } - - pub fn multicast_ttl_v4(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(IPPROTO_IP, IP_MULTICAST_TTL)?; - Ok(raw as u32) - } - } - - pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> { - unsafe { self.setsockopt(IPPROTO_IP, IP_MULTICAST_TTL, multicast_ttl_v4 as c_int) } - } - - pub fn multicast_hops_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(IPPROTO_IPV6 as c_int, IPV6_MULTICAST_HOPS)?; - Ok(raw as u32) - } - } - - pub fn set_multicast_hops_v6(&self, hops: u32) -> io::Result<()> { - unsafe { self.setsockopt(IPPROTO_IPV6 as c_int, IPV6_MULTICAST_HOPS, hops as c_int) } - } - - pub fn multicast_if_v4(&self) -> io::Result { - unsafe { - let imr_interface: IN_ADDR = self.getsockopt(IPPROTO_IP, IP_MULTICAST_IF)?; - Ok(from_s_addr(imr_interface.S_un)) - } - } - - pub fn set_multicast_if_v4(&self, interface: &Ipv4Addr) -> io::Result<()> { - let interface = to_s_addr(interface); - let imr_interface = IN_ADDR { S_un: interface }; - - unsafe { self.setsockopt(IPPROTO_IP, IP_MULTICAST_IF, imr_interface) } - } - - pub fn multicast_if_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(IPPROTO_IPV6 as c_int, IPV6_MULTICAST_IF)?; - Ok(raw as u32) - } - } - - pub fn set_multicast_if_v6(&self, interface: u32) -> io::Result<()> { - unsafe { self.setsockopt(IPPROTO_IPV6 as c_int, IPV6_MULTICAST_IF, interface as c_int) } - } - - pub fn multicast_loop_v6(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(IPPROTO_IPV6 as c_int, IPV6_MULTICAST_LOOP)?; - Ok(raw != 0) - } - } - - pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> { - unsafe { - self.setsockopt( - IPPROTO_IPV6 as c_int, - IPV6_MULTICAST_LOOP, - multicast_loop_v6 as c_int, - ) - } - } - - pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - let multiaddr = to_s_addr(multiaddr); - let interface = to_s_addr(interface); - let mreq = IP_MREQ { - imr_multiaddr: IN_ADDR { S_un: multiaddr }, - imr_interface: IN_ADDR { S_un: interface }, - }; - unsafe { self.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq) } - } - - pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - let multiaddr = to_in6_addr(multiaddr); - let mreq = IPV6_MREQ { - ipv6mr_multiaddr: multiaddr, - ipv6mr_interface: interface, - }; - unsafe { self.setsockopt(IPPROTO_IP, IPV6_ADD_MEMBERSHIP, mreq) } - } - - pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - let multiaddr = to_s_addr(multiaddr); - let interface = to_s_addr(interface); - let mreq = IP_MREQ { - imr_multiaddr: IN_ADDR { S_un: multiaddr }, - imr_interface: IN_ADDR { S_un: interface }, - }; - unsafe { self.setsockopt(IPPROTO_IP, IP_DROP_MEMBERSHIP, mreq) } - } - - pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - let multiaddr = to_in6_addr(multiaddr); - let mreq = IPV6_MREQ { - ipv6mr_multiaddr: multiaddr, - ipv6mr_interface: interface, - }; - unsafe { self.setsockopt(IPPROTO_IP, IPV6_DROP_MEMBERSHIP, mreq) } - } - - pub fn linger(&self) -> io::Result> { - unsafe { Ok(linger2dur(self.getsockopt(SOL_SOCKET, SO_LINGER)?)) } - } - - pub fn set_linger(&self, dur: Option) -> io::Result<()> { - unsafe { self.setsockopt(SOL_SOCKET, SO_LINGER, dur2linger(dur)) } - } - - pub fn set_reuse_address(&self, reuse: bool) -> io::Result<()> { - unsafe { self.setsockopt(SOL_SOCKET, SO_REUSEADDR, reuse as c_int) } - } - - pub fn reuse_address(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(SOL_SOCKET, SO_REUSEADDR)?; - Ok(raw != 0) - } - } - - pub fn recv_buffer_size(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(SOL_SOCKET, SO_RCVBUF)?; - Ok(raw as usize) - } - } - - pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { - unsafe { - // TODO: casting usize to a c_int should be a checked cast - self.setsockopt(SOL_SOCKET, SO_RCVBUF, size as c_int) - } - } - - pub fn send_buffer_size(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(SOL_SOCKET, SO_SNDBUF)?; - Ok(raw as usize) - } - } - - pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { - unsafe { - // TODO: casting usize to a c_int should be a checked cast - self.setsockopt(SOL_SOCKET, SO_SNDBUF, size as c_int) - } - } - - pub fn keepalive(&self) -> io::Result> { - let mut ka = tcp_keepalive { - onoff: 0, - keepalivetime: 0, - keepaliveinterval: 0, - }; - let n = unsafe { - sock::WSAIoctl( - self.socket, - SIO_KEEPALIVE_VALS, - 0 as *mut _, - 0, - &mut ka as *mut _ as *mut _, - mem::size_of_val(&ka) as DWORD, - 0 as *mut _, - 0 as *mut _, - None, - ) - }; - if n == 0 { - Ok(if ka.onoff == 0 { - None - } else if ka.keepaliveinterval == 0 { - None - } else { - let seconds = ka.keepaliveinterval / 1000; - let nanos = (ka.keepaliveinterval % 1000) * 1_000_000; - Some(Duration::new(seconds as u64, nanos as u32)) - }) - } else { - Err(last_error()) - } - } - - pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { - let ms = dur2ms(keepalive)?; - // TODO: checked casts here - let ka = tcp_keepalive { - onoff: keepalive.is_some() as c_ulong, - keepalivetime: ms as c_ulong, - keepaliveinterval: ms as c_ulong, - }; - let mut out = 0; - let n = unsafe { - sock::WSAIoctl( - self.socket, - SIO_KEEPALIVE_VALS, - &ka as *const _ as *mut _, - mem::size_of_val(&ka) as DWORD, - 0 as *mut _, - 0, - &mut out, - 0 as *mut _, - None, - ) - }; - if n == 0 { - Ok(()) - } else { - Err(last_error()) - } - } - - pub fn out_of_band_inline(&self) -> io::Result { - unsafe { - let raw: c_int = self.getsockopt(SOL_SOCKET, SO_OOBINLINE)?; - Ok(raw != 0) - } - } - - pub fn set_out_of_band_inline(&self, oob_inline: bool) -> io::Result<()> { - unsafe { self.setsockopt(SOL_SOCKET, SO_OOBINLINE, oob_inline as c_int) } - } - - unsafe fn setsockopt(&self, opt: c_int, val: c_int, payload: T) -> io::Result<()> - where - T: Copy, - { - let payload = &payload as *const T as *const c_char; - if sock::setsockopt(self.socket, opt, val, payload, mem::size_of::() as c_int) == 0 { - Ok(()) - } else { - Err(last_error()) - } - } - - unsafe fn getsockopt(&self, opt: c_int, val: c_int) -> io::Result { - let mut slot: T = mem::zeroed(); - let mut len = mem::size_of::() as c_int; - if sock::getsockopt( - self.socket, - opt, - val, - &mut slot as *mut _ as *mut _, - &mut len, - ) == 0 - { - assert_eq!(len as usize, mem::size_of::()); - Ok(slot) - } else { - Err(last_error()) - } - } - - fn set_no_inherit(&self) -> io::Result<()> { - unsafe { - let r = SetHandleInformation(self.socket as HANDLE, HANDLE_FLAG_INHERIT, 0); - if r == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(()) - } - } + }) } + .map(|((n, recv_flags), addr)| (n, recv_flags, addr)) } -impl Read for Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - <&Socket>::read(&mut &*self, buf) - } +pub(crate) fn send(socket: Socket, buf: &[u8], flags: c_int) -> io::Result { + syscall!( + send( + socket, + buf.as_ptr().cast(), + min(buf.len(), MAX_BUF_LEN) as c_int, + flags, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ) + .map(|n| n as usize) } -impl<'a> Read for &'a Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.recv(buf, 0) - } -} - -impl Write for Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - <&Socket>::write(&mut &*self, buf) - } - - fn flush(&mut self) -> io::Result<()> { - <&Socket>::flush(&mut &*self) - } -} - -impl<'a> Write for &'a Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.send(buf, 0) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -impl fmt::Debug for Socket { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut f = f.debug_struct("Socket"); - f.field("socket", &self.socket); - if let Ok(addr) = self.local_addr() { - f.field("local_addr", &addr); - } - if let Ok(addr) = self.peer_addr() { - f.field("peer_addr", &addr); - } - f.finish() - } -} - -impl AsRawSocket for Socket { - fn as_raw_socket(&self) -> RawSocket { - self.socket as RawSocket - } -} - -impl IntoRawSocket for Socket { - fn into_raw_socket(self) -> RawSocket { - let socket = self.socket; - mem::forget(self); - socket as RawSocket - } -} - -impl FromRawSocket for Socket { - unsafe fn from_raw_socket(socket: RawSocket) -> Socket { - Socket { - socket: socket as sock::SOCKET, - } - } -} - -impl AsRawSocket for crate::Socket { - fn as_raw_socket(&self) -> RawSocket { - self.inner.as_raw_socket() - } -} - -impl IntoRawSocket for crate::Socket { - fn into_raw_socket(self) -> RawSocket { - self.inner.into_raw_socket() - } -} - -impl FromRawSocket for crate::Socket { - unsafe fn from_raw_socket(socket: RawSocket) -> crate::Socket { - crate::Socket { - inner: Socket::from_raw_socket(socket), - } - } -} - -impl Drop for Socket { - fn drop(&mut self) { - unsafe { - let _ = sock::closesocket(self.socket); - } - } -} - -impl From for net::TcpStream { - fn from(socket: Socket) -> net::TcpStream { - unsafe { net::TcpStream::from_raw_socket(socket.into_raw_socket()) } - } -} - -impl From for net::TcpListener { - fn from(socket: Socket) -> net::TcpListener { - unsafe { net::TcpListener::from_raw_socket(socket.into_raw_socket()) } - } -} - -impl From for net::UdpSocket { - fn from(socket: Socket) -> net::UdpSocket { - unsafe { net::UdpSocket::from_raw_socket(socket.into_raw_socket()) } - } -} - -impl From for Socket { - fn from(socket: net::TcpStream) -> Socket { - unsafe { Socket::from_raw_socket(socket.into_raw_socket()) } - } -} - -impl From for Socket { - fn from(socket: net::TcpListener) -> Socket { - unsafe { Socket::from_raw_socket(socket.into_raw_socket()) } - } -} - -impl From for Socket { - fn from(socket: net::UdpSocket) -> Socket { - unsafe { Socket::from_raw_socket(socket.into_raw_socket()) } - } -} - -fn clamp(input: usize) -> c_int { - cmp::min(input, ::max_value() as usize) as c_int -} - -fn dur2ms(dur: Option) -> io::Result { - match dur { - Some(dur) => { - // Note that a duration is a (u64, u32) (seconds, nanoseconds) - // pair, and the timeouts in windows APIs are typically u32 - // milliseconds. To translate, we have two pieces to take care of: +pub(crate) fn send_vectored( + socket: Socket, + bufs: &[IoSlice<'_>], + flags: c_int, +) -> io::Result { + let mut nsent = 0; + syscall!( + WSASend( + socket, + // FIXME: From the `WSASend` docs [1]: + // > For a Winsock application, once the WSASend function is called, + // > the system owns these buffers and the application may not + // > access them. // - // * Nanosecond precision is rounded up - // * Greater than u32::MAX milliseconds (50 days) is rounded up to - // INFINITE (never time out). - let ms = dur - .as_secs() - .checked_mul(1000) - .and_then(|ms| ms.checked_add((dur.subsec_nanos() as u64) / 1_000_000)) - .and_then(|ms| { - ms.checked_add(if dur.subsec_nanos() % 1_000_000 > 0 { - 1 - } else { - 0 - }) - }) - .map(|ms| { - if ms > ::max_value() as u64 { - INFINITE - } else { - ms as DWORD - } - }) - .unwrap_or(INFINITE); - if ms == 0 { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "cannot set a 0 duration timeout", - )); - } - Ok(ms) - } - None => Ok(0), - } + // So what we're doing is actually UB as `bufs` needs to be `&mut + // [IoSlice<'_>]`. + // + // Tracking issue: https://github.com/rust-lang/socket2-rs/issues/129. + // + // NOTE: `send_to_vectored` has the same problem. + // + // [1] https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsasend + bufs.as_ptr() as *mut _, + min(bufs.len(), DWORD::max_value() as usize) as DWORD, + &mut nsent, + flags as DWORD, + std::ptr::null_mut(), + None, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ) + .map(|_| nsent as usize) } -fn ms2dur(raw: DWORD) -> Option { - if raw == 0 { +pub(crate) fn send_to( + socket: Socket, + buf: &[u8], + addr: &SockAddr, + flags: c_int, +) -> io::Result { + syscall!( + sendto( + socket, + buf.as_ptr().cast(), + min(buf.len(), MAX_BUF_LEN) as c_int, + flags, + addr.as_ptr(), + addr.len(), + ), + PartialEq::eq, + sock::SOCKET_ERROR + ) + .map(|n| n as usize) +} + +pub(crate) fn send_to_vectored( + socket: Socket, + bufs: &[IoSlice<'_>], + addr: &SockAddr, + flags: c_int, +) -> io::Result { + let mut nsent = 0; + syscall!( + WSASendTo( + socket, + // FIXME: Same problem as in `send_vectored`. + bufs.as_ptr() as *mut _, + bufs.len().min(DWORD::MAX as usize) as DWORD, + &mut nsent, + flags as DWORD, + addr.as_ptr(), + addr.len(), + ptr::null_mut(), + None, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ) + .map(|_| nsent as usize) +} + +/// Wrapper around `getsockopt` to deal with platform specific timeouts. +pub(crate) fn timeout_opt(fd: Socket, lvl: c_int, name: c_int) -> io::Result> { + unsafe { getsockopt(fd, lvl, name).map(from_ms) } +} + +fn from_ms(duration: DWORD) -> Option { + if duration == 0 { None } else { - let secs = raw / 1000; - let nsec = (raw % 1000) * 1000000; + let secs = duration / 1000; + let nsec = (duration % 1000) * 1000000; Some(Duration::new(secs as u64, nsec as u32)) } } -fn to_s_addr(addr: &Ipv4Addr) -> in_addr_S_un { - let octets = addr.octets(); - let res = crate::hton( - ((octets[0] as ULONG) << 24) - | ((octets[1] as ULONG) << 16) - | ((octets[2] as ULONG) << 8) - | ((octets[3] as ULONG) << 0), - ); - let mut new_addr: in_addr_S_un = unsafe { mem::zeroed() }; - unsafe { *(new_addr.S_addr_mut()) = res }; - new_addr +/// Wrapper around `setsockopt` to deal with platform specific timeouts. +pub(crate) fn set_timeout_opt( + fd: Socket, + level: c_int, + optname: c_int, + duration: Option, +) -> io::Result<()> { + let duration = into_ms(duration); + unsafe { setsockopt(fd, level, optname, duration) } } -fn from_s_addr(in_addr: in_addr_S_un) -> Ipv4Addr { - let h_addr = crate::ntoh(unsafe { *in_addr.S_addr() }); - - let a: u8 = (h_addr >> 24) as u8; - let b: u8 = (h_addr >> 16) as u8; - let c: u8 = (h_addr >> 8) as u8; - let d: u8 = (h_addr >> 0) as u8; - - Ipv4Addr::new(a, b, c, d) +fn into_ms(duration: Option) -> DWORD { + // Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the + // timeouts in windows APIs are typically u32 milliseconds. To translate, we + // have two pieces to take care of: + // + // * Nanosecond precision is rounded up + // * Greater than u32::MAX milliseconds (50 days) is rounded up to + // INFINITE (never time out). + duration + .map(|duration| min(duration.as_millis(), INFINITE as u128) as DWORD) + .unwrap_or(0) } -fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { +pub(crate) fn set_tcp_keepalive(socket: Socket, keepalive: &TcpKeepalive) -> io::Result<()> { + let mut keepalive = tcp_keepalive { + onoff: 1, + keepalivetime: into_ms(keepalive.time), + keepaliveinterval: into_ms(keepalive.interval), + }; + let mut out = 0; + syscall!( + WSAIoctl( + socket, + SIO_KEEPALIVE_VALS, + &mut keepalive as *mut _ as *mut _, + size_of::() as _, + ptr::null_mut(), + 0, + &mut out, + ptr::null_mut(), + None, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ) + .map(|_| ()) +} + +/// Caller must ensure `T` is the correct type for `level` and `optname`. +pub(crate) unsafe fn getsockopt(socket: Socket, level: c_int, optname: c_int) -> io::Result { + let mut optval: MaybeUninit = MaybeUninit::uninit(); + let mut optlen = mem::size_of::() as c_int; + syscall!( + getsockopt( + socket, + level, + optname, + optval.as_mut_ptr().cast(), + &mut optlen, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ) + .map(|_| { + debug_assert_eq!(optlen as usize, mem::size_of::()); + // Safety: `getsockopt` initialised `optval` for us. + optval.assume_init() + }) +} + +/// Caller must ensure `T` is the correct type for `level` and `optname`. +pub(crate) unsafe fn setsockopt( + socket: Socket, + level: c_int, + optname: c_int, + optval: T, +) -> io::Result<()> { + syscall!( + setsockopt( + socket, + level, + optname, + (&optval as *const T).cast(), + mem::size_of::() as c_int, + ), + PartialEq::eq, + sock::SOCKET_ERROR + ) + .map(|_| ()) +} + +fn ioctlsocket(socket: Socket, cmd: c_long, payload: &mut u_long) -> io::Result<()> { + syscall!( + ioctlsocket(socket, cmd, payload), + PartialEq::eq, + sock::SOCKET_ERROR + ) + .map(|_| ()) +} + +pub(crate) fn to_in_addr(addr: &Ipv4Addr) -> IN_ADDR { + let mut s_un: in_addr_S_un = unsafe { mem::zeroed() }; + // `S_un` is stored as BE on all machines, and the array is in BE order. So + // the native endian conversion method is used so that it's never swapped. + unsafe { *(s_un.S_addr_mut()) = u32::from_ne_bytes(addr.octets()) }; + IN_ADDR { S_un: s_un } +} + +pub(crate) fn from_in_addr(in_addr: IN_ADDR) -> Ipv4Addr { + Ipv4Addr::from(unsafe { *in_addr.S_un.S_addr() }.to_ne_bytes()) +} + +pub(crate) fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { let mut ret_addr: in6_addr_u = unsafe { mem::zeroed() }; unsafe { *(ret_addr.Byte_mut()) = addr.octets() }; let mut ret: in6_addr = unsafe { mem::zeroed() }; @@ -985,38 +737,112 @@ fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { ret } -fn linger2dur(linger_opt: sock::linger) -> Option { - if linger_opt.l_onoff == 0 { - None - } else { - Some(Duration::from_secs(linger_opt.l_linger as u64)) +pub(crate) fn from_in6_addr(addr: in6_addr) -> Ipv6Addr { + Ipv6Addr::from(*unsafe { addr.u.Byte() }) +} + +pub(crate) fn to_mreqn( + multiaddr: &Ipv4Addr, + interface: &crate::socket::InterfaceIndexOrAddress, +) -> IpMreq { + IpMreq { + imr_multiaddr: to_in_addr(multiaddr), + // Per https://docs.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-ip_mreq#members: + // + // imr_interface + // + // The local IPv4 address of the interface or the interface index on + // which the multicast group should be joined or dropped. This value is + // in network byte order. If this member specifies an IPv4 address of + // 0.0.0.0, the default IPv4 multicast interface is used. + // + // To use an interface index of 1 would be the same as an IP address of + // 0.0.0.1. + imr_interface: match interface { + crate::socket::InterfaceIndexOrAddress::Index(interface) => { + to_in_addr(&(*interface).into()) + } + crate::socket::InterfaceIndexOrAddress::Address(interface) => to_in_addr(interface), + }, } } -fn dur2linger(dur: Option) -> sock::linger { - match dur { - Some(d) => sock::linger { - l_onoff: 1, - l_linger: d.as_secs() as u16, - }, - None => sock::linger { - l_onoff: 0, - l_linger: 0, - }, +/// Windows only API. +impl crate::Socket { + /// Sets `HANDLE_FLAG_INHERIT` using `SetHandleInformation`. + #[cfg(feature = "all")] + #[cfg_attr(docsrs, doc(cfg(all(windows, feature = "all"))))] + pub fn set_no_inherit(&self, no_inherit: bool) -> io::Result<()> { + self._set_no_inherit(no_inherit) + } + + pub(crate) fn _set_no_inherit(&self, no_inherit: bool) -> io::Result<()> { + // NOTE: can't use `syscall!` because it expects the function in the + // `sock::` path. + let res = unsafe { + SetHandleInformation( + self.as_raw() as HANDLE, + winbase::HANDLE_FLAG_INHERIT, + !no_inherit as _, + ) + }; + if res == 0 { + // Zero means error. + Err(io::Error::last_os_error()) + } else { + Ok(()) + } + } +} + +impl AsRawSocket for crate::Socket { + fn as_raw_socket(&self) -> RawSocket { + self.as_raw() as RawSocket + } +} + +impl IntoRawSocket for crate::Socket { + fn into_raw_socket(self) -> RawSocket { + self.into_raw() as RawSocket + } +} + +impl FromRawSocket for crate::Socket { + unsafe fn from_raw_socket(socket: RawSocket) -> crate::Socket { + crate::Socket::from_raw(socket as Socket) } } #[test] -fn test_ip() { +fn in_addr_convertion() { let ip = Ipv4Addr::new(127, 0, 0, 1); - assert_eq!(ip, from_s_addr(to_s_addr(&ip))); + let raw = to_in_addr(&ip); + assert_eq!(unsafe { *raw.S_un.S_addr() }, 127 << 0 | 1 << 24); + assert_eq!(from_in_addr(raw), ip); + + let ip = Ipv4Addr::new(127, 34, 4, 12); + let raw = to_in_addr(&ip); + assert_eq!( + unsafe { *raw.S_un.S_addr() }, + 127 << 0 | 34 << 8 | 4 << 16 | 12 << 24 + ); + assert_eq!(from_in_addr(raw), ip); } #[test] -fn test_out_of_band_inline() { - let tcp = Socket::new(AF_INET, SOCK_STREAM, 0).unwrap(); - assert_eq!(tcp.out_of_band_inline().unwrap(), false); - - tcp.set_out_of_band_inline(true).unwrap(); - assert_eq!(tcp.out_of_band_inline().unwrap(), true); +fn in6_addr_convertion() { + let ip = Ipv6Addr::new(0x2000, 1, 2, 3, 4, 5, 6, 7); + let raw = to_in6_addr(&ip); + let want = [ + 0x2000u16.to_be(), + 1u16.to_be(), + 2u16.to_be(), + 3u16.to_be(), + 4u16.to_be(), + 5u16.to_be(), + 6u16.to_be(), + 7u16.to_be(), + ]; + assert_eq!(unsafe { *raw.u.Word() }, want); + assert_eq!(from_in6_addr(raw), ip); } diff --git a/third_party/rust/socket2/src/tests.rs b/third_party/rust/socket2/src/tests.rs deleted file mode 100644 index 4a2f1f8ee452..000000000000 --- a/third_party/rust/socket2/src/tests.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::io::Write; -use std::str; - -use crate::{Domain, Protocol, Type}; - -#[test] -fn domain_fmt_debug() { - let tests = &[ - (Domain::ipv4(), "AF_INET"), - (Domain::ipv6(), "AF_INET6"), - #[cfg(unix)] - (Domain::unix(), "AF_UNIX"), - (0.into(), "AF_UNSPEC"), - (500.into(), "500"), - ]; - - let mut buf = Vec::new(); - for (input, want) in tests { - buf.clear(); - write!(buf, "{:?}", input).unwrap(); - let got = str::from_utf8(&buf).unwrap(); - assert_eq!(got, *want); - } -} - -#[test] -fn type_fmt_debug() { - let tests = &[ - (Type::stream(), "SOCK_STREAM"), - (Type::dgram(), "SOCK_DGRAM"), - (Type::seqpacket(), "SOCK_SEQPACKET"), - (Type::raw(), "SOCK_RAW"), - (500.into(), "500"), - ]; - - let mut buf = Vec::new(); - for (input, want) in tests { - buf.clear(); - write!(buf, "{:?}", input).unwrap(); - let got = str::from_utf8(&buf).unwrap(); - assert_eq!(got, *want); - } -} - -#[test] -fn protocol_fmt_debug() { - let tests = &[ - (Protocol::icmpv4(), "IPPROTO_ICMP"), - (Protocol::icmpv6(), "IPPROTO_ICMPV6"), - (Protocol::tcp(), "IPPROTO_TCP"), - (Protocol::udp(), "IPPROTO_UDP"), - (500.into(), "500"), - ]; - - let mut buf = Vec::new(); - for (input, want) in tests { - buf.clear(); - write!(buf, "{:?}", input).unwrap(); - let got = str::from_utf8(&buf).unwrap(); - assert_eq!(got, *want); - } -} diff --git a/third_party/rust/socket2/src/utils.rs b/third_party/rust/socket2/src/utils.rs deleted file mode 100644 index e676489f35fd..000000000000 --- a/third_party/rust/socket2/src/utils.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[doc(hidden)] -pub trait NetInt { - fn from_be(i: Self) -> Self; - fn to_be(&self) -> Self; -} -macro_rules! doit { - ($($t:ident)*) => ($(impl NetInt for $t { - fn from_be(i: Self) -> Self { <$t>::from_be(i) } - fn to_be(&self) -> Self { <$t>::to_be(*self) } - })*) -} -doit! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } - -#[doc(hidden)] -pub trait One { - fn one() -> Self; -} - -macro_rules! one { - ($($t:ident)*) => ($( - impl One for $t { fn one() -> $t { 1 } } - )*) -} - -one! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } - -#[doc(hidden)] -pub trait Zero { - fn zero() -> Self; -} - -macro_rules! zero { - ($($t:ident)*) => ($( - impl Zero for $t { fn zero() -> $t { 0 } } - )*) -} - -zero! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } diff --git a/third_party/rust/tokio-0.2.25/.cargo-checksum.json b/third_party/rust/tokio-0.2.25/.cargo-checksum.json deleted file mode 100644 index c7ee5fb76da8..000000000000 --- a/third_party/rust/tokio-0.2.25/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"CHANGELOG.md":"8c09df8363c29ac878d79c289525b40d81573a421f703fa5c2978d0d336776d6","Cargo.toml":"1adcbeedb2762f5f1a11df9430629bff34832950e020070cec333d0f48927a0b","LICENSE":"898b1ae9821e98daf8964c8d6c7f61641f5f5aa78ad500020771c0939ee0dea1","README.md":"db560230d144afc32dec5677a43b9722fb5c6573a3705a114145473edcdced09","src/coop.rs":"2884a7593e43bdc309a5d90fcf36bf780c75ef5c8fe4584c1c6646165ef96ded","src/fs/canonicalize.rs":"93c64b72abdca17877d6ab61d50a43765d6aef9e0a9f7aaf41b6b0b7d9a8a380","src/fs/copy.rs":"6c9ba20cba87eea6806cff998a3326f4f505f0f8eddda50363cd005da9e149fa","src/fs/create_dir.rs":"b279bf045c5168eb14ad04c91732f575fecd96190c74706989980e6484ba52e6","src/fs/create_dir_all.rs":"56081d541caadca0fc59e84d55e78e702fe9373679598016224ad0b072b189a7","src/fs/dir_builder.rs":"9a15afdc3ae550428720f6361e9b019e9a2ee45af485ed319ee6e539dbd40744","src/fs/file.rs":"4b44bf50f4c11f66664692b726146d606d8c11eeb81a8f7f59b514bad6bef5ab","src/fs/hard_link.rs":"98cccbbb3719baee11c232e79723ab1cb3d6c8056bddb109c4990fe2c236c1fb","src/fs/metadata.rs":"782a1a5dbc2cd6c40e928579fbfcf39e5f1de28def78781590c0280acdf02960","src/fs/mod.rs":"156696a9ceb12f47f4f1948f074f1c041b6a314fc7fc7cc42698019e3c5c7cf5","src/fs/open_options.rs":"d1e549f4e6ce5e88350714b6eed381e42b454b488f14a087b99d99e8fc66549d","src/fs/os/mod.rs":"65d0bd0e2b2c7e142daf6e0f24f3f7fe964dd0fb222a320dfb54435ef05d70ee","src/fs/os/unix/dir_builder_ext.rs":"65d391f16da23d152957d9a7f4fdf3dce28a52407f2fe25d757763735a3b3fae","src/fs/os/unix/mod.rs":"e16f12a976f45497ea8ecae2608a5fc4dd3f13726b6be5627ded93db73777a13","src/fs/os/unix/open_options_ext.rs":"fbce771fd45adbed2a322013a366d654a06744d124d366a556672b365d0cd8da","src/fs/os/unix/symlink.rs":"32cf3e906531d30ebe6d8be7ee3bfe049949759b566015b56d0851f51abcff50","src/fs/os/windows/mod.rs":"ca6b3a8a287fdeeff2468e29ae38822f3d0ac23e46b88246536dc85a46250335","src/fs/os/windows/symlink_dir.rs":"5fbd05365555ba7942ffc1c2dfddf201ddad2cf9b005be2ea99849a473fe982b","src/fs/os/windows/symlink_file.rs":"a1170fd40a000dc449de972267f579a0d14f50dbb39466f985f183fdcd1d3438","src/fs/read.rs":"d5b9261d4deba4e43fd6002c12eff7ddd813319c23634795f4551e3c3f89756b","src/fs/read_dir.rs":"251be63189e9d287597591fda447a498ffad56e522e1de226167592e84e0d66b","src/fs/read_link.rs":"93c104a21253372fef7056ab82e6065b0a1a8fc9be8b7329dfd5a8dd07c618b0","src/fs/read_to_string.rs":"7b4c1760bda91a294da19f2154c39a866a761a494af411258df5d29e6fe98601","src/fs/remove_dir.rs":"96475771e9c52678768288d8df6814216e0801bebc848481597ad34e829a5854","src/fs/remove_dir_all.rs":"b85abd05c7ab64ee8dc6cf5663a11e713aa51b357759ef660ef3cae3365ccc42","src/fs/remove_file.rs":"1cdf8bf16b3a164c594dac8773b7d1f9ebb28de169343184d34d6aac3b3a7eaa","src/fs/rename.rs":"a97875e92626fa46e23fece7b8698c9c4cea2bae8f1be8726f30ae6fe80ae0c7","src/fs/set_permissions.rs":"8adccafa475dcfc1bc3989af73374d90683c1be4953ef812e5fd606f968d7b7a","src/fs/symlink_metadata.rs":"f5ce1e05f137da995e3e0d9582bae0a5f7ef4251285c64e912b0eedbb068b395","src/fs/write.rs":"062aa6c0c4862ca7071171f505182202b88d5e919806e61ef83b24a4a4bda36c","src/future/maybe_done.rs":"8098e69168044c2bbdba95a3a3da339fbb2d8f95edd9e74db3ae744a4b6c379f","src/future/mod.rs":"f83ace028a163a352e32ab38a8065886421179a9cd5edba0aece889af9a3145f","src/future/pending.rs":"2bfa584d25299f8d610cae8ebd652f1840dace5086b6d50c64400f4d8680ddd6","src/future/poll_fn.rs":"1ace3c3f0f392589d6454b56218c825cdbf11d72d95b170986782a413a929627","src/future/ready.rs":"168e965951c21794b21e0f151b29614c07371caa040271fc51541f97451fe94c","src/future/try_join.rs":"a12b2c22ef92b7887a7e30410a8947fa0f03e3bccf0c754a1f1e4e3c89e23599","src/io/async_buf_read.rs":"b37caa8f6c974b3c97327c635218803e573c531d4197950840549aa794357c99","src/io/async_read.rs":"da37ab8f60acef8864d0ceaad70f666369358c96d7aec2b5678de1a090076ddc","src/io/async_seek.rs":"3c75e1b51c9865bb87557f1616bc97611c32010911a013c4288c7f8221ec86b6","src/io/async_write.rs":"b0a892b9be7bd1ed9812b52e930d8c593a68a4399a0528398b094f28d3b9760f","src/io/blocking.rs":"30378c4d6806047ac0f34cb333f158c3dc147d65757bd4452a5fe580c6d9c3c0","src/io/driver/mod.rs":"e89c41b5b3334ce4e2a994fcc88638c63c32468231a69b50d5b010b1276d0070","src/io/driver/platform.rs":"023acd3f2703d241b3e91ab6e4d4c0bc5ccc3451655fffd9f37938224f915494","src/io/driver/scheduled_io.rs":"f8270a5274314cf131cd2ac07b77cbd0a6f500f07a8de1248dbe7974d1be307b","src/io/mod.rs":"56b6ff59759189d33d47c21a766c2346937081bbfd1c9eef95d908ff86492f32","src/io/poll_evented.rs":"6576918df48177db8b2f38c5781f30c985daa5abd077df663d43bf56ef7febdd","src/io/registration.rs":"3057480839485674046b823c3d84569df4186accd12fc40de7aea9e5ad60d049","src/io/seek.rs":"5a33c2d4ef28fbd8e409603222462da88a2c21c8d1419be3ac16c95aac1b2259","src/io/split.rs":"da44112c70436841d8ce04695ba61aec2c1fe41d449bb6c098f7d40c875bb32b","src/io/stderr.rs":"b9a5a87305740bc2220f2838168dda6ed10390a2f990ddcdc61eb79040547265","src/io/stdin.rs":"35b02f30ecf22c8274fdb674a2c208c5781efe9c1bcb7290e66f077ce061ae74","src/io/stdout.rs":"35e751837fd6311180d360b007e7cb77cf314118441347c1037aeca59bdbf53f","src/io/util/async_buf_read_ext.rs":"9ba5c10964369415cdb58891ee2a883faa31b62a56657c6dfea4226c8e06cbed","src/io/util/async_read_ext.rs":"69d206c24c3e0119d3b9609c492b56654d0db00b180ab1e0246fc030dbb8bb4d","src/io/util/async_seek_ext.rs":"e1703662ddcc3069c594d35e5a60ba4a6bcfff1b4fc83d7a33517894dd8ecd84","src/io/util/async_write_ext.rs":"e976b1f9681f87f1fceec50f4da5df6828fb46c54a242652ef7a795ed4f1a205","src/io/util/buf_reader.rs":"6d739a53160878a70238ff1cd457f4a01f51ffb192ac712726ba25f5d20494c1","src/io/util/buf_stream.rs":"405557d37265e4459fac7d57be9cc634fcf083dba9cc098436f916c750f53661","src/io/util/buf_writer.rs":"d31562223daac0c3992b5e1ee1ab247040410d99c046a83e15d06817b13d08ee","src/io/util/chain.rs":"7c724503f750a793271eae6df1fcaacea30af6ebba4024adf86d013baa437ae2","src/io/util/copy.rs":"ac44563dc13bf8e9b2116a6ceec14e2027dd0a2034ead5d6a937b05dd15ff9c4","src/io/util/empty.rs":"d293f96af822558e7647c0a9433be4a4a7509fe38430cc16d6c72156aed70487","src/io/util/flush.rs":"44f40cce603575dfd938eb9b33be47e54714149e786817bc0b4c163a56ffd00f","src/io/util/lines.rs":"6265bc2ad87ad6b813218ef710515f97dbc6fcf38f061be824868f181f267389","src/io/util/mem.rs":"9b0767ea3964c98f9a017fa474a68fc23e144ab58b2e5405db1e017d38ca7f6a","src/io/util/mod.rs":"f90bd9645f43c34281991eab018a5a4d4982e779bbd4de4c4063ddbb94def9a2","src/io/util/read.rs":"0d79b5939801dd5ed54332432d55a965e5e99f898cb1955d30a36b12bd0cccda","src/io/util/read_buf.rs":"12b5e3cef39415f99b2f1aab187520b7933b5e33c860807eaac8f66cf6d64aed","src/io/util/read_exact.rs":"4d2e31f451169ace9dd438f0d8d31423804c6a0d80ff0baeb741490092e7b2af","src/io/util/read_int.rs":"73fe441637e3988b7d96263fdb24474f99a9b4a226db1690f3ab81d0116b550d","src/io/util/read_line.rs":"d66fea4ca10b33bfbee0fc441a3a5c7b9408ffd4d842bf1299b89d8bf74bccac","src/io/util/read_to_end.rs":"4e2b4b868fccd8b054ecf31c61f958a3c852a57c27f5194883138f0e3b80610a","src/io/util/read_to_string.rs":"828b9d2a46c700fc5896eb6a380f6075b243cdfd40b8ac6a68ff8177bcfff0dd","src/io/util/read_until.rs":"a5450512b5dfdac9e9d908d229fd2dfec7c9342fa8bc34c6529a6a0f114a20a5","src/io/util/reader_stream.rs":"b32393a9b00e24ecf3c8025b5d577439faab8fde9bc337755d0d8fc8a02d035f","src/io/util/repeat.rs":"da881955fb7123fc1d69a081cff2c2dc1909747dbeca51224e97f2321d7a5fb5","src/io/util/shutdown.rs":"bcbe5e38c8fcb392116e7dbba2ae6ae5284c0958d1cc83f89dbc9149a91ab11f","src/io/util/sink.rs":"0dcb794e48ca9b1c28e5f9f2051073ea0951a54c9c7dfc903ce9e5489d3d8cd7","src/io/util/split.rs":"eabcc4a0704a6a38237a2ca54b3401fee3f1810970e18f0ab467dfae27f1ab36","src/io/util/stream_reader.rs":"853bdd3518eae4612231d68301a51f27abd8a23000a05ff0b87e2c087af415b2","src/io/util/take.rs":"836051a1976ef3aea336005b883c3c787059038e82b4291898af93e16de8e6cb","src/io/util/write.rs":"566fa287807cbb760660cb4979d975aa742673bc5a384df0b4420e753093b22c","src/io/util/write_all.rs":"ff9ea6e45ce6ee23507c8d01e746cf304fd6d3c96a9f4329501f7f06c17ce874","src/io/util/write_buf.rs":"e7bbcf17b57c067f7276be6786ac5a8d951149188904a9cb6248d1aae427ac53","src/io/util/write_int.rs":"c537ffbed20443f2eb83c336270223eda5682a0668aef3a74933202155a6794f","src/lib.rs":"787099cbae6fd8ef0d1ebde8847020a2a63da894534224ad81158ec05f5350f6","src/loom/mocked.rs":"7b0cc296fbbc42ffe5fec908cf75ded902dea3be7041f511f15c9a695a4de292","src/loom/mod.rs":"1e4d6445ed9c33495086dbfa640ab8626d86b71927e102c8aa97932c3635eb56","src/loom/std/atomic_ptr.rs":"dcb87a0dddf13fdac9788d99aa07b6e96c2e1b23f468d858f2ad8470bd0d73c0","src/loom/std/atomic_u16.rs":"70a016e77d99248ca7fc2b60352ff13ca9747b88fde15cfc30dcd03074aa7259","src/loom/std/atomic_u32.rs":"9fdc8d389f156510b43274b3230cbc6659aa7497f504cf51190c9400f43d981a","src/loom/std/atomic_u64.rs":"ebcc370af4116e4ad0a3bfb33f693cbc4ce5b254ff270eae26670d971e10cee2","src/loom/std/atomic_u8.rs":"07730a5eeea1034cbba4fcec0a7fcf5a91a40bdc9b5f68e9c12df7867707b564","src/loom/std/atomic_usize.rs":"cdaa249cfb53d42421bbfd3f491e294b24edfe55ee26ddf511935765f2d63c91","src/loom/std/mod.rs":"88d5c281a932ce14d83307492704012b190ae52ee3256db593a061dcab222c24","src/loom/std/parking_lot.rs":"ea7a3004324261e47be741319d7bb99d960a2a7303cd47da5ce045916d5951b2","src/loom/std/unsafe_cell.rs":"5a153493edc71b28a2f6867349e5f9903b198bc2faf7ae61528f08b09bf9fe04","src/macros/cfg.rs":"6421ecc6c9f290a50a76d2e9d5e1b2335fa320d88ab8f65aa7c8812ff587e514","src/macros/join.rs":"ab10af3f968d26ca82257c2cccf8efa9c4ea21077682660a2c49b88d45152580","src/macros/loom.rs":"80d2e4af9fc50d0bda1b20b95f8873b2f59c3c0e70f2e812a6207855df76204e","src/macros/mod.rs":"f46caf9b639449db666d8537932660179269284840214201017e84fce7b4eac2","src/macros/pin.rs":"791dd5c6133eb8070ac86997448c75399f2232636fca6a2a810b0b1e019f351b","src/macros/ready.rs":"6efd4c866c4718c3a9a7b5564b435e2d13e9c1ae91fd98b1313d5e7c182942d6","src/macros/scoped_tls.rs":"9b2f8f73b71048c6a330fb674bbd7d384f6cf6fcf27c57631a4c1f1c35dcf06f","src/macros/select.rs":"4f185b209895124d05a4f02b1df0fd79328f6604f8c4d7b9747387ab1d881ccb","src/macros/support.rs":"b25ae54e6a21180d1c11b19899ad64f51e4f95026df443e1019b52708c4d9762","src/macros/thread_local.rs":"8602495ed102b63e3048a261eda7483dc9a24b15a74d7059c31635e8f45de19a","src/macros/try_join.rs":"8e87155b3ef4717783128b5af1c1d4fb6f883ed70df7adec1dce75f1ad5734b1","src/net/addr.rs":"32e8b56f3150bbb10ff0800e1e3eb2d255f61eb89d7f1831da475584e10b13e1","src/net/lookup_host.rs":"178ec6165315702fce5aad5ef5e715959945cf33ca9d57c1f3fbc454694a8b05","src/net/mod.rs":"fea7fdbae63cac82fea3a7e58729e59f5efe483f795c4c59b55dff3f2aa6c161","src/net/tcp/incoming.rs":"aefda2a41273a833faec35e4e064f21bdf79811d269a5d419ad0d4984827eb7e","src/net/tcp/listener.rs":"ac4412b221665d07d1a237770b3f8676b2c1a3f091c00e559de599ddd0aa7f63","src/net/tcp/mod.rs":"3400e304bf32903deb4f8859fff819d5b2b0c61dff63ec00f0140aebadfe42bc","src/net/tcp/split.rs":"a2e49f715b3394588a5152d6749996303e1b7113277b1f1e54560f8bf60650d8","src/net/tcp/split_owned.rs":"4dcfcc7c19cee713e3c526a724789201bcf95bde898d91c059c74a63e15449bf","src/net/tcp/stream.rs":"863de5f38445d05829d482e2cbc01851927e423185ea9527e93d14f193fe689a","src/net/udp/mod.rs":"eecf3139b6a5f8cd8e02c5db18d4f7ce3cb005448e490b48f88c0e80e19c5ee0","src/net/udp/socket.rs":"52c11314f10a5dbfe92b68c343843a49227c01cadf3b1ffd0b905da519372734","src/net/udp/split.rs":"c01b3193b2832604b87b3fe57404334218d67f52a35744e90c5fd392e85ab6ee","src/net/unix/datagram/mod.rs":"466574c652a83ed8d6bd997a80cf9cbe5bf3b0965a7315ef69c49c4218f48dab","src/net/unix/datagram/socket.rs":"ab705d8a28b412e8ce349abc93f53d42c534969bf91fe988e4865f5b0edcb133","src/net/unix/datagram/split.rs":"741b6bd97ec1b63cc7c4ed023e3595ac79c43514a2aac93c53e89f9a370f19ac","src/net/unix/datagram/split_owned.rs":"211018872b537cfea71a86861c50a4a74aa0ec9696b0e41dbbf21100bf12321f","src/net/unix/incoming.rs":"597cf15c347f4a48285cc20db5c2a82c46fd033e529c30378988ffd27b68616e","src/net/unix/listener.rs":"34a4e403801e24da44055d258034204c0ad8ae5ca5c4e2bd20398b4202c8c910","src/net/unix/mod.rs":"b6d316ee7385d32ed356eb596f4f7fbdb3047907ab020b7acfc02c33d8bdb6de","src/net/unix/split.rs":"55131f0e32db762d462736e956a6f19561b356d9e762546ac09fbb60733e1311","src/net/unix/split_owned.rs":"569ba680c914adc5e69336e3aff836fd8262cf0ce80fc5ab025ba25900993580","src/net/unix/stream.rs":"afc59e3a6deb958aff7aeab9728e04a2e8d5e83b4f8a980dcc979016f4434f03","src/net/unix/ucred.rs":"e56ea09bf2c011f156bbfb886e06a30433fef1adc4d2fffabff30bf8dc6f2a03","src/park/either.rs":"20847f6ac12fdfbcf2881948865b0974846503f1e77407c9ba136eb29604a6b3","src/park/mod.rs":"a3040a37c51c079788cd801032e5470f3732ea5fc0cced2e8d0dfa8a0ce75308","src/park/thread.rs":"0a122331875b3d623d8278921200170bf8d64b58825648152f368b968b25288b","src/prelude.rs":"939b8305ef2c1b74425b1a50c6f25b0a5bed14215fc296385777e60d42d6361d","src/process/kill.rs":"2f98bd1bd28ab37bedc34ab7b737760407ab5315420538acbd18da31d2662d94","src/process/mod.rs":"c1450fcbb1b4e741d4ef96b629427e586fcd5149637e2b7abd55b530a2e0807b","src/process/unix/mod.rs":"1c4e2c8c2bb0a34fb584ca3553f97d25ca3cee25adf7f6239efe38742e2fc22b","src/process/unix/orphan.rs":"54c137c389ec981d59975721d52b7e01cc51100522759fa0fb5bb9c252b937b9","src/process/unix/reap.rs":"9b1c7c5d2f50f7e7d2ce4f4ef9e662a01a577b63c45ba428fc4bd088ec92a75a","src/process/windows.rs":"1ed0999f5e7e3d8c081d55b1f1dd977bca6f20024c1a55de49d374c0dce25605","src/runtime/basic_scheduler.rs":"cc29036ed72f2945aabdd2ce034ce73b26909c8a031c20b04aa6ccf6dda6dc7b","src/runtime/blocking/mod.rs":"9715a6c90fa39d3dbd60fab44e15cf12aa25cf936fc7c6b8a1982e00453a3bc5","src/runtime/blocking/pool.rs":"648aa9e86d696b1e69a47d2d78a7576e50f76543b7366eb975263058349e0ae3","src/runtime/blocking/schedule.rs":"9c00803a2ec9a5e3a749378e17aa7ff8055abb8b357fad5217e7a6863a093ff7","src/runtime/blocking/shutdown.rs":"3a68c0795ab736741e95182bca80701c5fb758a3c64cc56f7976742d1cff32df","src/runtime/blocking/task.rs":"676f154529d26186f8ef9e974e7f21da151e3757fdca39769a44f7afdbb4c32b","src/runtime/builder.rs":"ba02ca365e063189240a85105051853e0360375442b1dbcc06d5712e4a6b1cb3","src/runtime/context.rs":"917597c3a9c5850808b34762e16c310a6eeb09d19383af83de0e0fa0a77f7412","src/runtime/enter.rs":"20c66edb3db9f01b9e29bf7682a95540370fba3434a073566ac4ffd1bb9a9fd6","src/runtime/handle.rs":"2689b6008babae2916eee0ae4a3303e5ad477ffad0ef9c280dc6d586d977d49e","src/runtime/io.rs":"647031bab22ecc9db5eaacdd77d83748cd34f56f49040fe6280db6d90f6851ca","src/runtime/mod.rs":"1f16d39ebec726bac46920ab95dc5e98fa34dd4d78aa7825dc78b40dc12a19f3","src/runtime/park.rs":"9684177367b919bcc34d8ed560b2aa5b26457e049d8ab5b2987e26741ce76b78","src/runtime/queue.rs":"0fe349c5efaa038064fc65ccc67b376b31f4b6df5dd797cca754d25ad65a0da2","src/runtime/shell.rs":"e0b702170964541b4f829a9dd80a8afa95b7044b114472a3c22f2c48bbb6fbb0","src/runtime/spawner.rs":"7de47d33f4ea7f12e3e5b9fa8d57a47a9b57f89b293d53e8d1fe1bdc719b0aef","src/runtime/task/core.rs":"866a5ae4373276679988b28d7b3b995a61801479fa6ecde08056655e14932f88","src/runtime/task/error.rs":"c50e7e0945ccd046803d745655566c8f5b83438b10168e01e9e972146941214b","src/runtime/task/harness.rs":"be9cece0e218ea461e8b1358a6f661ae67688e4e0438337d7f9c667a0f931ab7","src/runtime/task/join.rs":"07500fe8a10b7f0ce42f537f856bed93413ceabc60db32702496608e2caecec5","src/runtime/task/mod.rs":"b7c258ef949f2146714c20c2df75f90a04de7b82722a98cc7d99b15d917bd738","src/runtime/task/raw.rs":"8ca0d5e199a0c87ad8dbe5fe1ec7dca38ac18f5088f37e99587e752bb2945083","src/runtime/task/stack.rs":"6f1204e3f96f0b9375c53dce0f061dff7a91e9f111e0be24fe07a5490f6f1720","src/runtime/task/state.rs":"e7c9577e66990704e36f8ccad9b3db41a30cf42aaa74611d648541895fac5146","src/runtime/task/waker.rs":"06d937d78301589f7b6baccdb84bce1266edf0e97462719d8816fdcecb0ebb42","src/runtime/tests/loom_blocking.rs":"38ea6c220fab212a3436c682375e19bbbbf2a49bcd9aeb1adfa200fd205f82f3","src/runtime/tests/loom_oneshot.rs":"82e21a3ae98f937e64c5f5c7357537f60ba2738f312feed5e1c9ff727ee3ee39","src/runtime/tests/loom_pool.rs":"a42836021d5843be97cbd88bd9a8e561426004b15655391395bc54026f433e54","src/runtime/tests/loom_queue.rs":"022527b961e310b6c624a118b71c1f69b3b3cb7b179005b3ccf55b330b0c0a8e","src/runtime/tests/mod.rs":"8d73d4e756ee657bc0ee0f123a9ea0c007a42b9f1a96a521528ab14d51a8c1f1","src/runtime/tests/queue.rs":"4ebc75356991564bc083fbd7b6e823bcce2c9c6ba30bff13eeb1b8afd718de77","src/runtime/tests/task.rs":"a013950824ed3ad940d9a8929b3cf3f914115ed21393e16cc366a7fdd87ee907","src/runtime/thread_pool/atomic_cell.rs":"caa3b2262f68a46a8d9be7f3a2e52025b2595767fa6f8ff2b511504ac32cb246","src/runtime/thread_pool/idle.rs":"c8a7864b00c75baada20a8b57df732f23337428d958da8837951d76af577bb98","src/runtime/thread_pool/mod.rs":"1307ca5a1625fe1098e605b879d32af8849f063c31718dbf2b7aa29483bd13d7","src/runtime/thread_pool/worker.rs":"09d61686b7da5ed5ed8e7f4125f54e0294c270b46c93727e13c83bf2d988aef0","src/runtime/time.rs":"ef51c298a438fadead903a97b9e508ea3013af8ca8198db95922ed787a19b5cc","src/signal/ctrl_c.rs":"d58397d8057c86408156a5f2e7322de2d99995aba54cdc185bcfff1a702f84e9","src/signal/mod.rs":"d2094a189aa9c7a4b450adaa78b5b6df5f73fb8f474fc3aec04b9338fcc60133","src/signal/registry.rs":"24ce538cf5393cfe34aaba46ab9c081f01803dc7431b9b5da0acab89fa7fb991","src/signal/unix.rs":"a1291e4e2e8582422f09052fad66540f6f1a2804fa80f1c56bbbecf717a88b89","src/signal/windows.rs":"7cdebd8924aaa81ba98e047db08a1bca29f844974f2a1feb59a1ebaf3cbd0434","src/stream/all.rs":"124d4f3846e9a79dbc128c37f2180ea72e6d48ca760e22eb42bf835e31638fa8","src/stream/any.rs":"cc6638cc89c20969ff8df7cde21d5158c1d4f64135cddd56dcc178f7d56ccc8a","src/stream/chain.rs":"4f7bd88e3adeba8b29ae4b4d96e64600d3a24bbfffcb942b3cc1110aec64ed4c","src/stream/collect.rs":"bec115678f3975fa8a1e8fd088a39cd55c384edc7158cf33a4b225c60ba1756f","src/stream/empty.rs":"e91a2217b2a7ca2d2e5ee9fe5e8642bafde5343d107f69630695ce2c75ff68b3","src/stream/filter.rs":"2191e2c734ec97c9b70da380c808483d62e1099d7feccdb8449192a09dc72ac4","src/stream/filter_map.rs":"70934237d4acc3b21393ff73876dd6c28fb17d9a350c9827fe19261438970a7f","src/stream/fold.rs":"f81518d6f7058f249889fe107f7bf322436f28ff99317682f093ccb41094affe","src/stream/fuse.rs":"a374a9c268cea72a787dde18193f3420133a2acc80e9a242f766430ac72d06cc","src/stream/iter.rs":"b370acfc6ce108da97f6ff486710a795fb228745294c9c4616172c92408edf62","src/stream/map.rs":"c8c84658bd00f1e5a32d2d7b2066f7b9c0f3a5ef76cfccc0a40afde3732b139c","src/stream/merge.rs":"daac1e1429dd7aac4b2673032ce6e0d592c547ff51790ed9697b66c3e65b81c8","src/stream/mod.rs":"e0b9c66d7fdeb3f4bb7de98bc7e6b1e04635b71385f530bd8cdd5f25bdaa241d","src/stream/next.rs":"b2cadcfd467a8673e6de5023b56131ff23ee5508847c806b778cf48123664407","src/stream/once.rs":"5d99a3b9a397c519a5d7d8cb4aabef0a0c3a927b828dcfb5c469c63f92174bfd","src/stream/pending.rs":"551f30ae19fcde725671cc4329fb1104fe0959085c14b79d21b870a054b214ec","src/stream/skip.rs":"f3f54b1afc5d353f9480f3e0ad0272f82da458610f772c4becead03983d34b87","src/stream/skip_while.rs":"1a6d9332ff7e5d530e442a2661bd0a30a75674b7bb2c16fb5fdfdb671f42a1de","src/stream/stream_map.rs":"4fa77eebcb58cc79976dd9d04bd497f733b526736bd1e19d4a8a6130d3ee0291","src/stream/take.rs":"4bfbb36f50cc1b90ecc49494e063e113a366468e7daa7643781821668a8fb2ec","src/stream/take_while.rs":"35523b54651a31de22cabdffe09a4ce1cbb548ea121f76625744dad3c0366d97","src/stream/timeout.rs":"28e4d7a18cbf6291f9dedaeac47439db0d0756c3295307a145f494561ab53697","src/stream/try_next.rs":"2a26cd73d624bc936840f8cb2c894ac69dd47ae612f45bfcd47c36447aa1318f","src/sync/barrier.rs":"f5953c6117ec0b0813639d636e4a8e9ece87854f496711605e8dccc7aa7331b3","src/sync/batch_semaphore.rs":"8b2c906fb283ce33ca09149b99c1f8b2410d2e33b495105720e60e0d14cb3542","src/sync/broadcast.rs":"70037ef87ba1c9070bfe6e683e9c40e35d9fb4c3dbd73baae2a2addad0500e19","src/sync/cancellation_token.rs":"dfbcfc65f16e3cdb1a8a83616beb8119f2ce1d187243829b72a079e2ffd9c6ce","src/sync/mod.rs":"41a30fff829bc02a855a89575ec683cf2cf579b4d48b59f933c4a30f805a9c35","src/sync/mpsc/block.rs":"afb7210cac1bbdc6940b5f64376692152c769989c40028d7ce5480ffbcda816e","src/sync/mpsc/bounded.rs":"a6abf8814b4bd222c54e29b27d4ed511c9376a416375fd3d652f460d41b471f1","src/sync/mpsc/chan.rs":"7600bae4e8437e8d8cf44a4962737e03673011d8b0e7e3313c67e6c478aeb5e7","src/sync/mpsc/error.rs":"74e5cb0f3d037b3cecd42c59260bca806a2fdee230e5af150e0fee95ead6b6ef","src/sync/mpsc/list.rs":"8a186b3f3cdfd39f4c9af7079104a0daf1bf08eb07dd638ecffc53494cf25db5","src/sync/mpsc/mod.rs":"eaef7d081dbab6e03f05b18d41ea673127afd47ad9fa8c1ff19e9dede5ab49d9","src/sync/mpsc/unbounded.rs":"97dc583a6c97c936ce9c19303f59a5b5d2514eb31a7888636e909c7f5d24bf5a","src/sync/mutex.rs":"7f1708a9d7bc96758504fa182864fc677ac80869cd93372bbae8f871e5675852","src/sync/notify.rs":"6ec7e93d8ce4f1fb413aceb84c22ca1541074fc3e92a24cf69d68bf98a63bc58","src/sync/oneshot.rs":"9be4ecb84136628dc0cb395fbf9fca112b185bac79e878d5d81ba9d976c17df2","src/sync/rwlock.rs":"0cce6e60ad30966e478fbfaf27d039ec3a8718d63b7e74bf7d401968d5a4ccf8","src/sync/semaphore.rs":"e5681d434c4473d78ae5510bfc100fc9b7217e71751a5463e61a0a8f640cf8c9","src/sync/semaphore_ll.rs":"6dfe56703eede7e6fee2612924fc4c643f5aa10897c8d933e0d1613b3c82743d","src/sync/task/atomic_waker.rs":"ee1191ce709e650a7a9ae43cf5e2529e82013460c1a50fb34dd4d1d9522caf5d","src/sync/task/mod.rs":"f5e38105c7f8a942c0e49b973bad0a8c2a1df81deea19f3c5228edc4896c1725","src/sync/tests/atomic_waker.rs":"aa0184eaef7fb5098d9120c941c282e5400dcce8ad0880d3568644eff134ee94","src/sync/tests/loom_atomic_waker.rs":"d6c110bd8cc99989f1f4160cea929ee6faca695d0bac1a71d257898faaad5d27","src/sync/tests/loom_broadcast.rs":"f251c32d8f5c959707c36b29ab4b0bde518b45205d2e02512a3d2963ae452549","src/sync/tests/loom_cancellation_token.rs":"6393c5a12f09abef9300be4b66bb039bf02a63a04d6175fb7cfe68464784bdbd","src/sync/tests/loom_list.rs":"f0ce15a0f965fe558a21bca24863c712156eaeb10feb8ef91031a6d6e3cc5dba","src/sync/tests/loom_mpsc.rs":"94a62f2c727b1dce8cbfbcd65303550fffcb5a302359b6f51cd475ba3f5d97b7","src/sync/tests/loom_notify.rs":"36d918142044b2e617330ab3d156d0c3f8f0398bf5d534683428e30f8115fec7","src/sync/tests/loom_oneshot.rs":"32583f9b711b79a74886a23241773819a473ba7e95abfb7d6531f8ddff18bbec","src/sync/tests/loom_rwlock.rs":"a1aca5bb8bdd073d8f8ae1dd921c95f301e6d4aa26eb31332c3c10a93fe2dbdf","src/sync/tests/loom_semaphore_batch.rs":"c6f69b8d5b2e6842287ed34638a9045095d9f94c86ba6bb84c1224bbe10026ff","src/sync/tests/loom_semaphore_ll.rs":"d56bc913f1bce3ee1254cea943dfb6b202ead552fd6fa69418e78cbcc36cfd9c","src/sync/tests/mod.rs":"26d789e2b0c2f51776e77d8195bb85444324aa6b70225d3de5d882434bb5340d","src/sync/tests/semaphore_batch.rs":"a2eda9631a34c1c12dc39cfb584c4135b7da5b1ce3a00751c6e51ef81ef81e2b","src/sync/tests/semaphore_ll.rs":"9f00c8cc55ab788f63765e11f9d76f10a9a11186d8dd5cf585eaff42415e743c","src/sync/watch.rs":"341e5b48064fad2e529ad4d435de359bb38dec8b615fcedb7fea4182262fba31","src/task/blocking.rs":"34a9cb4667d5dde8f856d1a4ffc742343ec6c093a92ba6e30baa54b3ccc05297","src/task/local.rs":"eea031fea545cf4aa1fa38c44f23e54938c145fe0c5b89e08e8474fbab440525","src/task/mod.rs":"41117cf8803340679aa07ff853c94456e41b2f6f6c373c052463f330e2dfcaaf","src/task/spawn.rs":"e06ae9fcd3c0c2a4e295103d8defa1a29a79b0e7f48a9f46b96ef5e900ff0d7e","src/task/task_local.rs":"25e32885e204a419a205027deacc959d0d690ad140b1e1c77366701316389da0","src/task/yield_now.rs":"d8cb414972c7e867ce44a854dfd799e965868390c63e003616e4b132b38211a9","src/time/clock.rs":"85cb74d5c0fff8cc2d472fd127b27f30d2b364d449594f0064358ceb078aa04f","src/time/delay.rs":"3ca1387b77e835cb509c1ab0457d0f4933acd57415574fa817bf81bad50529ec","src/time/delay_queue.rs":"15cb1de1ad92b6db92b3f2745bb5c61e08b1fda662c109b7a766ff3679e0ac25","src/time/driver/atomic_stack.rs":"16031b8f83df4b48de4954252fc1f6b740dd4795ae92b71a125138ef6d89e309","src/time/driver/entry.rs":"8cf83ffcc5027b2d2cbc2a6fe210328a40297b5af69f8b75542134d443d29f6f","src/time/driver/handle.rs":"71b1325852f3c17ee996852af2546603eaba7816e847731627080fa79ce7502b","src/time/driver/mod.rs":"bf9d73a32e62f471b0b53111a52206ee6326ff90cf99bd2647a36103f18cc2ec","src/time/driver/registration.rs":"c8ca64c50339032764d54d3af650deb0021e9f96b424c2e0d8661519e7b144c1","src/time/driver/stack.rs":"ebfeaa2bc3fbb2ebd0dea0b0e7faccce2786e79a48c81e45809a29142e150260","src/time/driver/tests/mod.rs":"26da5216d385f3d4bedfb0228e698f3a41a92c2f58dec0e371077e6bfc73fd7c","src/time/error.rs":"42d3bc1e88e09ce68283a891eaaf1d27892ee0af3be8c3e932b44dc477341478","src/time/instant.rs":"406116d79117135b9717f71d585e3e92139d615d5cd03071beaa9dc14ac913ea","src/time/interval.rs":"8668bbe57f8c44ca9ac56a58277835b7e95a66c4a7821d5c32a95b06b3a41302","src/time/mod.rs":"b3bb132088e60206169229615c79015d7e874df3af820bfefcefa54c16546255","src/time/tests/mod.rs":"bcbf2373c1b950da5b1418b7e2100f6df492c081cde606e132edae5b22ef37a8","src/time/tests/test_delay.rs":"5f18a883ba8c1174619c0355e856f11fdeab8d9d2c7782521c3448a1e4e789d4","src/time/throttle.rs":"a4fad4be170b445b1cdec8ac1ca5dccf5c96babb15f881114433339436826cc9","src/time/timeout.rs":"557bae886ffbc18c5071ede6f5703d2192fbefc72a9e7b5b7b91b782f175b46d","src/time/wheel/level.rs":"bfa1096cc91b9fa7c58328179194d606c1cc850f953a3ea344f8517c4cb25d72","src/time/wheel/mod.rs":"e49465f9ffbfa61283ce6eb766cf70edc62721cb033d8b98915e61d3814dd316","src/time/wheel/stack.rs":"ac5fd0f6cac1bfdd9497b6eaa4f64beb816bc5605ecae463bd10a5e4c48e78c0","src/util/bit.rs":"df2987358940db917437ac5269d38dce75837cdaae3b5f124cb7cf5fbd806fa8","src/util/intrusive_double_linked_list.rs":"58d5bfb2ce1a2ddb9eeced387c6ca244e820f72c145d857c838ec6d81c3ca4c9","src/util/linked_list.rs":"5d4023dfbe9410190c7176b66883550813bcf299d146c51646631f4c5974233b","src/util/mod.rs":"b02c93d9678c14292823fca80f66a501b87fda96241905c2eded742d9a28fb56","src/util/pad.rs":"5dc99dbb3d3d16fecd6228fd2e2f67e5301b2d426e6149f79f93c1af1b4d1d90","src/util/rand.rs":"df99cdedfdc191b9b138c0b8f2bf7b200da09e646f7977706b918593d7f08335","src/util/slab/addr.rs":"1df854b698a1124cad1111113900755ebd8ccdfc282558ff85dec66403cb69d4","src/util/slab/entry.rs":"718e2a7e19c78fe5693376d9152bb16435e5cb898e5191aa7265646e2f19a1d6","src/util/slab/generation.rs":"4cbe46a38c41a3c670ebef7d18862ab00a03b36a2c2597467e68cfa7af5a2374","src/util/slab/mod.rs":"27f921fc35aab54fc9affe7093a0bcace4763c907d105a0d3cfad3961f8aa6ff","src/util/slab/page.rs":"13ca549b1ee909d0dd575fbd3981b1a639663f2f43b3867fd790cdc0b57c459c","src/util/slab/shard.rs":"491db737b4033ae21a3f44d2ed437f4a2aa9f1fd52df3999acb0d27a6b06cba8","src/util/slab/slot.rs":"49e83978919c7fdb9a6fe3cdcd03e83114de6e926c409046b279a2ee95fa3d11","src/util/slab/stack.rs":"82e3c8bd907ae25d05a51a638c48cb2a0cf39312198c5c32498fbcb325654e06","src/util/slab/tests/loom_slab.rs":"f8c787e4ea2976889a355b91c9e594dbacbcecc26dfdda94767e927dfefed6c7","src/util/slab/tests/loom_stack.rs":"d29db751ba9c48908ded814ef957456a054988136b07cd26c97f62d3ba5745e6","src/util/slab/tests/mod.rs":"0423b219f919b8ad7f0c9b6978aaebd7d01139063d68445c8383ddca011cec3f","src/util/trace.rs":"ad5f971888933eac34e5ce7285a0be2355712e5498aad70db60bdc182ad03d9f","src/util/try_lock.rs":"c4ee49e1751ee0a7df1a8cbd4f8d36ea1d7355e3ac584fdb8697a94cd7a7a8f8","src/util/wake.rs":"3bdc5724211c852ed19a9ac1676ac8e12dd46381fa5bcc899cd5ecdc33528a79","tests/_require_full.rs":"a7d828d85aee8507a0292fe74ced5f4e75b0aaf3a2789cf0bddd276e9fa12dca","tests/async_send_sync.rs":"1417a9496ad9778302daf4ad9d197cfe7ec09fe925861f8f8f63f42882ff8a60","tests/buffered.rs":"001ffebf7a37cfc718a6e73838e6199000ff710ce387def7f4407b733998bf4e","tests/fs.rs":"b4902aaff2c28ef4d2676462381b04559fb4f7cdc0ecf46c46bccbb6276feb5d","tests/fs_copy.rs":"83448b19bdc332ec315024d4903b0a2ae81221895725a8b750025b47a43b0e79","tests/fs_dir.rs":"5cd196805dfa07fedc0acda1ddcf129f839ec9dc0f87c21c0c6ae17d2391d45c","tests/fs_file.rs":"9804b09f7c824f58ad3f06db7c769e8f0808d297ac8d78f68b32503eb40b3036","tests/fs_file_mocked.rs":"644ef206cf8a2e4e103c1e4fae10af9a26dc66ae27451faa7d4799bc61c6c518","tests/fs_link.rs":"f4cc85530965d97916073fbcaa81c65832b9a29ac4261e086546a11a89364052","tests/io_async_read.rs":"5dfbbb40cd9fb20a8b7af60a62126d34a218062f8f6117b2dc1282b6e2fb2538","tests/io_chain.rs":"f5d3ddc9f6e8152ceb08b5dda2ca3168b174f1f67ff28a4c5983bcbad69d8af6","tests/io_copy.rs":"6b94cd77e6034f39865c93611d65870a70dbae9d3b8e33253095b9d5d16410ad","tests/io_driver.rs":"e945fd55ece9fe5d3c2ef695d151e9799fee645f18b8eddde72c2a9d3336eb3c","tests/io_driver_drop.rs":"168417ec9f256d2604eb022a73a64a88ab5b6fe8d0f4be1d3460dc41d5083571","tests/io_lines.rs":"6660dfaf3d789ed383be6b54bd19c7fcc8a6b58cf685ec17a0d9588333a5099a","tests/io_mem_stream.rs":"8b9d775df283fdc7150f5fd12834b1ce78f585b3756444bbccd63d7a147b8453","tests/io_read.rs":"900e56103d816575bf5647188823bf2ca66e7c9cad29154a9f0b3e9c280c611b","tests/io_read_exact.rs":"b6387dbeb0baceb7a1f74a9a3a8b4a654894465368be27c3bbf4352b79fc4314","tests/io_read_line.rs":"8296624b4f5e162c79024f3beab2f561f4195a244cfd4c53e4d06282f56a31bf","tests/io_read_to_end.rs":"7d50b76452c84822650225095d2cc83c8a162973470418819ae89268056c8523","tests/io_read_to_string.rs":"c9ebfee5cb262d822119c2881ea1cc0c73598b13c517c297663e35bb120a089d","tests/io_read_until.rs":"b6c0df9e4852766910ec68affcd92fbfbc280018b7f9c16cf5f4830f9b8389f0","tests/io_reader_stream.rs":"7b510365449fb6b0b01cf37ecf6ad56bfc5b32a032fd756ddfb2192e9c36d50f","tests/io_split.rs":"35b3de189ff171d69715fc929b74be0f181cb6dbe4b2392272ff5172e012548c","tests/io_take.rs":"8f4bfc9182539335704b6370a66998ef2a75f508fcdb73a7f8aa50baf0f4aea6","tests/io_write.rs":"98668a8c8feae0f85714df1dfecfcd94fba4ba347bdc3d8aaa4ea8b175055c69","tests/io_write_all.rs":"e171af1ecab45a439b384c3bae7198959c3f5e2e998967dbd9296760b52951b7","tests/io_write_int.rs":"3f4b50345f7d7d558e71ac7f2a8c1c4b7b771dad09fe2e1fbf9a17d4fb93c001","tests/macros_join.rs":"107e8b96a8654ed1e3ec1d9c7f41f44f2542f802c6324e993f0fbd6679912d87","tests/macros_pin.rs":"572d65b3894858ad8c2491b6a5f8ffdb3b37ec71d2996831b2ad929c4e47d067","tests/macros_select.rs":"f8c8629d5a796af807d30a5fe8d46bc11945f26565cbecb9eafe9313a5ddee43","tests/macros_test.rs":"fda9c1fe4a47f938ec0ee38cd28569d77647d21e702c508f5f4de72f0fd59cfc","tests/macros_try_join.rs":"3b60eacc2afa5d39a2e447e96d5ae2d4bf0ea71e213a249bc0db3b8d234a80a1","tests/net_bind_resource.rs":"3abdf9457ebc9f8262c03fa5834f1ceb6312d4a1573b6bdd4e2f584e3cf76b66","tests/net_lookup_host.rs":"fa43c0127277dd0cf750b17157fdc1d438a8009dd890458f9c566b3a1302a461","tests/no_rt.rs":"3f5be6964c08f850b565e9d14ca698395e72d607da69f34aa1ea1d127f5b5f42","tests/process_issue_2174.rs":"2ca9267533ec563df9b8a9572b21f557d38e1ec95a5c4ab8a0c77269b89a832a","tests/process_issue_42.rs":"b2a491c9093f67df000fcaf550ad06a5b09247a61255c7e708418ad7bb0500fa","tests/process_kill_on_drop.rs":"f3c67649fdf69ed83a81906b93774f5c529f2196b4f8b781870f51d5d9160223","tests/process_smoke.rs":"032ff4ab64ffd36da41a453d1d22ecd5dc9f2294048cdf94c62f015e22ce608e","tests/rt_basic.rs":"0d3320873d292238dd7bed5276539da411bf7e2c73e24035bbf39ab1e1991e7f","tests/rt_common.rs":"0300b79c41132d0342a5c75538d773f1fdc18a127faf3460a0253889d0317517","tests/rt_threaded.rs":"6dd06813d0f231b8dde123e67dccb1362d7aff106b732f7b207f624c2205e093","tests/signal_ctrl_c.rs":"9b53065781b37f3db5f7c67938239b0f3b0ebbc5938c14a5b730ad7ec07415d2","tests/signal_drop_recv.rs":"d1ec97213d9c6fd9fb25ea8c2b015c9e9ee1a62fe0853fc558bc8801e5a3a841","tests/signal_drop_rt.rs":"afd272df50241c16c72d0e3cbd26a6d90e94420ceb90314008ee5fd53d95da2a","tests/signal_drop_signal.rs":"041940550863250f359630dc67ef133874d809ddaf0a6c1238cee1565a19efec","tests/signal_multi_rt.rs":"52a964ebb7963f8b84816c2e8019056901abf7cb42a9f9c612611d8327091f05","tests/signal_no_rt.rs":"99714bf488a26b6b394d93e61639c4b6807f9d756c8d5836f31111a30d42609b","tests/signal_notify_both.rs":"bf0b9def20f530d146ee865305833d8e9bee07a0515e66573d7ff30e2c631123","tests/signal_twice.rs":"bce33093eed151955d13c334d6d8a5bc5ca67cf5b37c246e435a24c15bc166a0","tests/signal_usr1.rs":"86ad07594b09d35e71011d1e12a1fa2c477bfbc4a2a36df1421b6594a0930074","tests/stream_chain.rs":"0277c268b25f2418d8d48c0626d8770cee51e448cfd7f65c37962450b9f9f097","tests/stream_collect.rs":"27edfaea1c757dd29e53c253f639fe6d78244042721e1a571bb4e88c6b35ea28","tests/stream_empty.rs":"71c9e7f6040f128341b49a39c08b9616afc6137e764de2f6ab5b4d8f2c601873","tests/stream_fuse.rs":"13af891fa873ec942d8c87decad9419ed5f22a7903de6af1382e8be1dc1a0df6","tests/stream_iter.rs":"621f6b99e39e1e1c42220b04862b7d24584deb251e4de163d1f429a69873b0b9","tests/stream_merge.rs":"9aa72fbd88efebba1c9637155ad1a6932b22bdc6901332945bd2b858e63b9986","tests/stream_once.rs":"58fcd541bfa4ae6d4df1b83c6aa1f303cf3b7410a5fdc7a7049d4682edb9d7d3","tests/stream_pending.rs":"2c1e798dfd15f69b7fd2518999473f835fab77d36c504fee573f23ca65351522","tests/stream_reader.rs":"2d5a02f00e10804452016455f56e6321bbfe7f0c5ee6179aad592c8e17db6b8a","tests/stream_stream_map.rs":"2aceac06bd22c9d0366faa44ed7b285b89087e883f18073c9d417bce0f581221","tests/stream_timeout.rs":"69d440f463c4031a9447409ba72839f1ff826679696dfbfdd863616020ca0894","tests/support/mock_file.rs":"9373a85e4dee4c5aa380e8a499402db357e92a5783c82a2d3ac348afbdc78abb","tests/support/mock_pool.rs":"1ddbf09d10787c5e413880322b70ec5d01ed4ee4b866c14a888d3cabc0275ea5","tests/support/signal.rs":"83531afa2e8e71cfd90cd4e1fc821490ffa824f0f9f0c9c4a027c08fed6b8712","tests/sync_barrier.rs":"7771f9e75ecf24d1a8ff0a8731a6bfced34cc129aba0e6f7b9b73d6a1d6df999","tests/sync_broadcast.rs":"b1e3c869a347ada7ede16e8efe459c13903fe49f5ec66b610108c87fb1808828","tests/sync_cancellation_token.rs":"dbfc695a24fcf577d7550e314d796d4310249bba9b5a45a753cefa00d125c1a9","tests/sync_errors.rs":"d75d041f946ef8b0a726f4fcb3da29d7248485b5a28f0fd68adcffadd8852549","tests/sync_mpsc.rs":"6232b0763d650191723aec5f74682cb5f9268e04dc90d3b4b1626a01a322576b","tests/sync_mutex.rs":"585c0fd5023f0a10f6c95f53bd5dcb0aa16ee5cd4976296cac28bb476de729b4","tests/sync_mutex_owned.rs":"11c0fb1cf6eeaf07cc9529261e91868476c64d907c62f8e09fbf218bee081bdd","tests/sync_notify.rs":"ea0da778622c8105dbcdefc709984f61fd36e54f39e3c80d9eaedcf036ef3086","tests/sync_oneshot.rs":"da46ed4514aa465e832c12dc00223e2da6a2eec7d0b3bfa7d1186f4d4741b627","tests/sync_rwlock.rs":"1a54885310a87a2f7b2408732530adb0ead97775ebeea336b614ba3960e9e6ed","tests/sync_semaphore.rs":"a86b2839571490b8a83db2fc727ecfe85c8c1a11baee92e7f212ff128f73400c","tests/sync_semaphore_owned.rs":"67cd4b0217295c31b943c9f541b3bf8fd72dac7e202defb4bdea96a4a6543e63","tests/sync_watch.rs":"51a84de8da804e883ddc97ccbea222168be1dcd9e0a54263839a5b7850dfd050","tests/task_blocking.rs":"774c0bd8c9836adf9d489a682c5dbe00083083e958ef1b9716850ba7ac6dbb22","tests/task_local.rs":"09ecfac3dab2d92d443f0267aebbd484c99474617174459621692185ad366d6d","tests/task_local_set.rs":"c15e9029a06e5a408b6ef9e31cb8f20ba9d23b272664a7aa7b13fb6fbf8b67a7","tests/tcp_accept.rs":"f67ce288c5dccfa88489d6af9ed2b2acbbe700f2097d7e3083194a206265d312","tests/tcp_connect.rs":"bcdefff6cec1f8accca910c27af6bf999265c10629287678f8a89ce08f203b05","tests/tcp_echo.rs":"4943257ba23c326a52b4f6f984259b471165d07a302c4dd92ccc90d2694e78a3","tests/tcp_into_split.rs":"4ee1ef03600f9ae47ede2e80810c3a589f026a139b4771503b15b5c8bb8bb486","tests/tcp_peek.rs":"ea904d05f9684e6108a698bdbbd856c9849c1a51eb334cf5bd45ef74c8fe585c","tests/tcp_shutdown.rs":"48c626375cb25c77cda4f0893f066348ee000969f0e5fd4e6adf2b56619c4dea","tests/tcp_split.rs":"e967b01bb90e2081e5e08e8cfd619cbaf0d1dcd08c4e77dbd5bae893698cae85","tests/test_clock.rs":"d5c9bf7bb5d926e2b29b43e47b5bb051b0f761bfe44d5fef349ed442ea7b416f","tests/time_delay.rs":"0ab5f3c25070a5f473cfb98812e77a0d5788207d120233b7ea40f1a80f05ab3e","tests/time_delay_queue.rs":"1c9452e7d6e644bc806272717d0a6d96459c30975ebb234366d63f44eef36c00","tests/time_interval.rs":"71e262819ff32948e7131ae2a945b2dbf053a5a3ae3c7f30a1cf8f2bc4db843a","tests/time_rt.rs":"730728a3206d5f8ed28dd656bd225dcadda478f5ae773e789336814dca638845","tests/time_throttle.rs":"de3e7374076ab3c05d3deb1af10911f236e39537a254f78ccbcb542bd7b998b1","tests/time_timeout.rs":"cb0bc0f4312040fa0d8f10a12bfca081355e22cb6e490577f942a8de4aed43b8","tests/udp.rs":"4b1eb9a9ccf883c300cfaada80cf5b75b579aa1d8962120f8491ee4824b4f6cf","tests/uds_cred.rs":"e57a552dd9e70e9ad71b930f6d130e6df939b48b899384f7434cfb7a2539713f","tests/uds_datagram.rs":"2018b23ba2d7ba494108749a25e539d105fae302a6c9739fe3423021cf8e970f","tests/uds_split.rs":"9b7cb3b0fde549279133367003606485058b9f8505d7f956eb26e7792dc32402","tests/uds_stream.rs":"af789c0198ee71749178c5784cf21762c1324f2b1888449da0d15e846571b8d7"},"package":"6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092"} \ No newline at end of file diff --git a/third_party/rust/tokio-0.2.25/CHANGELOG.md b/third_party/rust/tokio-0.2.25/CHANGELOG.md deleted file mode 100644 index 025da0143c7c..000000000000 --- a/third_party/rust/tokio-0.2.25/CHANGELOG.md +++ /dev/null @@ -1,697 +0,0 @@ -# 0.2.25 (January 28, 2021) - -### Changes - -- chore: upgrade mio dependency (#3207) -- runtime: update panic messages to include version (#3460) - -### Fixes - -- task: add missing feature flags for `task_local` (#3236) - -# 0.2.24 (December 7, 2020) - -### Fixes - - - sync: fix mpsc bug related to closing the channel (#3215) - -# 0.2.23 (November 12, 2020) - -### Fixes - - - time: report correct error for timers that exceed max duration (#2023) - - time: fix resetting expired timers causing panics (#2587) - - macros: silence `unreachable_code` warning in `select!` (#2678) - - rt: fix potential leak during runtime shutdown (#2649) - - sync: fix missing notification during mpsc close (#2854) - -### Changes - - - io: always re-export `std::io` (#2606) - - dependencies: update `parking_lot` dependency to 0.11.0 (#2676) - - io: rewrite `read_to_end` and `read_to_string` (#2560) - - coop: reset coop budget when blocking in `block_on` (#2711) - - sync: better Debug for Mutex (#2725) - - net: make `UnixListener::poll_accept` public (#2880) - - dep: raise `lazy_static` to `1.4.0` (#3132) - - dep: raise `slab` to `0.4.2` (#3132) - -### Added - - - io: add `io::duplex()` as bidirectional reader/writer (#2661) - - net: introduce split and `into_split` on `UnixDatagram` (#2557) - - net: ensure that unix sockets have both `split` and `into_split` (#2687) - - net: add `try_recv`/`from` & `try_send`/`to` to UnixDatagram (#1677) - - net: Add `UdpSocket::{try_send,try_send_to}` methods (#1979) - - net: implement `ToSocketAddrs` for `(String, u16)` (#2724) - - io: add `ReaderStream` (#2714) - - sync: implement map methods (#2771) - -# 0.2.22 (July 21, 2020) - -### Fixes -- docs: misc improvements (#2572, #2658, #2663, #2656, #2647, #2630, #2487, #2621, - #2624, #2600, #2623, #2622, #2577, #2569, #2589, #2575, #2540, #2564, #2567, - #2520, #2521, #2493) -- rt: allow calls to `block_on` inside calls to `block_in_place` that are - themselves inside `block_on` (#2645) -- net: fix non-portable behavior when dropping `TcpStream` `OwnedWriteHalf` (#2597) -- io: improve stack usage by allocating large buffers on directly on the heap - (#2634) -- io: fix unsound pin projection in `AsyncReadExt::read_buf` and - `AsyncWriteExt::write_buf` (#2612) -- io: fix unnecessary zeroing for `AsyncRead` implementors (#2525) -- io: Fix `BufReader` not correctly forwarding `poll_write_buf` (#2654) -- io: fix panic in `AsyncReadExt::read_line` (#2541) - -### Changes -- coop: returning `Poll::Pending` no longer decrements the task budget (#2549) - -### Added -- io: little-endian variants of `AsyncReadExt` and `AsyncWriteExt` methods - (#1915) -- task: add [`tracing`] instrumentation to spawned tasks (#2655) -- sync: allow unsized types in `Mutex` and `RwLock` (via `default` constructors) - (#2615) -- net: add `ToSocketAddrs` implementation for `&[SocketAddr]` (#2604) -- fs: add `OpenOptionsExt` for `OpenOptions` (#2515) -- fs: add `DirBuilder` (#2524) - -[`tracing`]: https://crates.io/crates/tracing - -# 0.2.21 (May 13, 2020) - -### Fixes - -- macros: disambiguate built-in `#[test]` attribute in macro expansion (#2503) -- rt: `LocalSet` and task budgeting (#2462). -- rt: task budgeting with `block_in_place` (#2502). -- sync: release `broadcast` channel memory without sending a value (#2509). -- time: notify when resetting a `Delay` to a time in the past (#2290) - -### Added -- io: `get_mut`, `get_ref`, and `into_inner` to `Lines` (#2450). -- io: `mio::Ready` argument to `PollEvented` (#2419). -- os: illumos support (#2486). -- rt: `Handle::spawn_blocking` (#2501). -- sync: `OwnedMutexGuard` for `Arc>` (#2455). - -# 0.2.20 (April 28, 2020) - -### Fixes -- sync: `broadcast` closing the channel no longer requires capacity (#2448). -- rt: regression when configuring runtime with `max_threads` less than number of CPUs (#2457). - -# 0.2.19 (April 24, 2020) - -### Fixes -- docs: misc improvements (#2400, #2405, #2414, #2420, #2423, #2426, #2427, #2434, #2436, #2440). -- rt: support `block_in_place` in more contexts (#2409, #2410). -- stream: no panic in `merge()` and `chain()` when using `size_hint()` (#2430). -- task: include visibility modifier when defining a task-local (#2416). - -### Added -- rt: `runtime::Handle::block_on` (#2437). -- sync: owned `Semaphore` permit (#2421). -- tcp: owned split (#2270). - -# 0.2.18 (April 12, 2020) - -### Fixes -- task: `LocalSet` was incorrectly marked as `Send` (#2398) -- io: correctly report `WriteZero` failure in `write_int` (#2334) - -# 0.2.17 (April 9, 2020) - -### Fixes -- rt: bug in work-stealing queue (#2387) - -### Changes -- rt: threadpool uses logical CPU count instead of physical by default (#2391) - -# 0.2.16 (April 3, 2020) - -### Fixes - -- sync: fix a regression where `Mutex`, `Semaphore`, and `RwLock` futures no - longer implement `Sync` ([#2375]) -- fs: fix `fs::copy` not copying file permissions ([#2354]) - -### Added - -- time: added `deadline` method to `delay_queue::Expired` ([#2300]) -- io: added `StreamReader` ([#2052]) - -# 0.2.15 (April 2, 2020) - -### Fixes - -- rt: fix queue regression ([#2362]). - -### Added - -- sync: Add disarm to `mpsc::Sender` ([#2358]). - -# 0.2.14 (April 1, 2020) - -### Fixes -- rt: concurrency bug in scheduler ([#2273]). -- rt: concurrency bug with shell runtime ([#2333]). -- test-util: correct pause/resume of time ([#2253]). -- time: `DelayQueue` correct wakeup after `insert` ([#2285]). - -### Added -- io: impl `RawFd`, `AsRawHandle` for std io types ([#2335]). -- rt: automatic cooperative task yielding (#2160, #2343, #2349). -- sync: `RwLock::into_inner` ([#2321]). - -### Changed -- sync: semaphore, mutex internals rewritten to avoid allocations ([#2325]). - -# 0.2.13 (February 28, 2020) - -### Fixes -- macros: unresolved import in `pin!` ([#2281]). - -# 0.2.12 (February 27, 2020) - -### Fixes -- net: `UnixStream::poll_shutdown` should call `shutdown(Write)` ([#2245]). -- process: Wake up read and write on `EPOLLERR` ([#2218]). -- rt: potential deadlock when using `block_in_place` and shutting down the - runtime ([#2119]). -- rt: only detect number of CPUs if `core_threads` not specified ([#2238]). -- sync: reduce `watch::Receiver` struct size ([#2191]). -- time: succeed when setting delay of `$MAX-1` ([#2184]). -- time: avoid having to poll `DelayQueue` after inserting new delay ([#2217]). - -### Added -- macros: `pin!` variant that assigns to identifier and pins ([#2274]). -- net: impl `Stream` for `Listener` types ([#2275]). -- rt: `Runtime::shutdown_timeout` waits for runtime to shutdown for specified - duration ([#2186]). -- stream: `StreamMap` merges streams and can insert / remove streams at - runtime ([#2185]). -- stream: `StreamExt::skip()` skips a fixed number of items ([#2204]). -- stream: `StreamExt::skip_while()` skips items based on a predicate ([#2205]). -- sync: `Notify` provides basic `async` / `await` task notification ([#2210]). -- sync: `Mutex::into_inner` retrieves guarded data ([#2250]). -- sync: `mpsc::Sender::send_timeout` sends, waiting for up to specified duration - for channel capacity ([#2227]). -- time: impl `Ord` and `Hash` for `Instant` ([#2239]). - -# 0.2.11 (January 27, 2020) - -### Fixes -- docs: misc fixes and tweaks (#2155, #2103, #2027, #2167, #2175). -- macros: handle generics in `#[tokio::main]` method ([#2177]). -- sync: `broadcast` potential lost notifications ([#2135]). -- rt: improve "no runtime" panic messages ([#2145]). - -### Added -- optional support for using `parking_lot` internally ([#2164]). -- fs: `fs::copy`, an async version of `std::fs::copy` ([#2079]). -- macros: `select!` waits for the first branch to complete ([#2152]). -- macros: `join!` waits for all branches to complete ([#2158]). -- macros: `try_join!` waits for all branches to complete or the first error ([#2169]). -- macros: `pin!` pins a value to the stack ([#2163]). -- net: `ReadHalf::poll()` and `ReadHalf::poll_peak` ([#2151]) -- stream: `StreamExt::timeout()` sets a per-item max duration ([#2149]). -- stream: `StreamExt::fold()` applies a function, producing a single value. ([#2122]). -- sync: impl `Eq`, `PartialEq` for `oneshot::RecvError` ([#2168]). -- task: methods for inspecting the `JoinError` cause ([#2051]). - -# 0.2.10 (January 21, 2020) - -### Fixes -- `#[tokio::main]` when `rt-core` feature flag is not enabled ([#2139]). -- remove `AsyncBufRead` from `BufStream` impl block ([#2108]). -- potential undefined behavior when implementing `AsyncRead` incorrectly ([#2030]). - -### Added -- `BufStream::with_capacity` ([#2125]). -- impl `From` and `Default` for `RwLock` ([#2089]). -- `io::ReadHalf::is_pair_of` checks if provided `WriteHalf` is for the same - underlying object (#1762, #2144). -- `runtime::Handle::try_current()` returns a handle to the current runtime ([#2118]). -- `stream::empty()` returns an immediately ready empty stream ([#2092]). -- `stream::once(val)` returns a stream that yields a single value: `val` ([#2094]). -- `stream::pending()` returns a stream that never becomes ready ([#2092]). -- `StreamExt::chain()` sequences a second stream after the first completes ([#2093]). -- `StreamExt::collect()` transform a stream into a collection ([#2109]). -- `StreamExt::fuse` ends the stream after the first `None` ([#2085]). -- `StreamExt::merge` combines two streams, yielding values as they become ready ([#2091]). -- Task-local storage ([#2126]). - -# 0.2.9 (January 9, 2020) - -### Fixes -- `AsyncSeek` impl for `File` ([#1986]). -- rt: shutdown deadlock in `threaded_scheduler` (#2074, #2082). -- rt: memory ordering when dropping `JoinHandle` ([#2044]). -- docs: misc API documentation fixes and improvements. - -# 0.2.8 (January 7, 2020) - -### Fixes -- depend on new version of `tokio-macros`. - -# 0.2.7 (January 7, 2020) - -### Fixes -- potential deadlock when dropping `basic_scheduler` Runtime. -- calling `spawn_blocking` from within a `spawn_blocking` ([#2006]). -- storing a `Runtime` instance in a thread-local ([#2011]). -- miscellaneous documentation fixes. -- rt: fix `Waker::will_wake` to return true when tasks match ([#2045]). -- test-util: `time::advance` runs pending tasks before changing the time ([#2059]). - -### Added -- `net::lookup_host` maps a `T: ToSocketAddrs` to a stream of `SocketAddrs` ([#1870]). -- `process::Child` fields are made public to match `std` ([#2014]). -- impl `Stream` for `sync::broadcast::Receiver` ([#2012]). -- `sync::RwLock` provides an asynchonous read-write lock ([#1699]). -- `runtime::Handle::current` returns the handle for the current runtime ([#2040]). -- `StreamExt::filter` filters stream values according to a predicate ([#2001]). -- `StreamExt::filter_map` simultaneously filter and map stream values ([#2001]). -- `StreamExt::try_next` convenience for streams of `Result` ([#2005]). -- `StreamExt::take` limits a stream to a specified number of values ([#2025]). -- `StreamExt::take_while` limits a stream based on a predicate ([#2029]). -- `StreamExt::all` tests if every element of the stream matches a predicate ([#2035]). -- `StreamExt::any` tests if any element of the stream matches a predicate ([#2034]). -- `task::LocalSet.await` runs spawned tasks until the set is idle ([#1971]). -- `time::DelayQueue::len` returns the number entries in the queue ([#1755]). -- expose runtime options from the `#[tokio::main]` and `#[tokio::test]` ([#2022]). - -# 0.2.6 (December 19, 2019) - -### Fixes -- `fs::File::seek` API regression ([#1991]). - -# 0.2.5 (December 18, 2019) - -### Added -- `io::AsyncSeek` trait ([#1924]). -- `Mutex::try_lock` ([#1939]) -- `mpsc::Receiver::try_recv` and `mpsc::UnboundedReceiver::try_recv` ([#1939]). -- `writev` support for `TcpStream` ([#1956]). -- `time::throttle` for throttling streams ([#1949]). -- implement `Stream` for `time::DelayQueue` ([#1975]). -- `sync::broadcast` provides a fan-out channel ([#1943]). -- `sync::Semaphore` provides an async semaphore ([#1973]). -- `stream::StreamExt` provides stream utilities ([#1962]). - -### Fixes -- deadlock risk while shutting down the runtime ([#1972]). -- panic while shutting down the runtime ([#1978]). -- `sync::MutexGuard` debug output ([#1961]). -- misc doc improvements (#1933, #1934, #1940, #1942). - -### Changes -- runtime threads are configured with `runtime::Builder::core_threads` and - `runtime::Builder::max_threads`. `runtime::Builder::num_threads` is - deprecated ([#1977]). - -# 0.2.4 (December 6, 2019) - -### Fixes -- `sync::Mutex` deadlock when `lock()` future is dropped early ([#1898]). - -# 0.2.3 (December 6, 2019) - -### Added -- read / write integers using `AsyncReadExt` and `AsyncWriteExt` ([#1863]). -- `read_buf` / `write_buf` for reading / writing `Buf` / `BufMut` ([#1881]). -- `TcpStream::poll_peek` - pollable API for performing TCP peek ([#1864]). -- `sync::oneshot::error::TryRecvError` provides variants to detect the error - kind ([#1874]). -- `LocalSet::block_on` accepts `!'static` task ([#1882]). -- `task::JoinError` is now `Sync` ([#1888]). -- impl conversions between `tokio::time::Instant` and - `std::time::Instant` ([#1904]). - -### Fixes -- calling `spawn_blocking` after runtime shutdown ([#1875]). -- `LocalSet` drop inifinite loop ([#1892]). -- `LocalSet` hang under load ([#1905]). -- improved documentation (#1865, #1866, #1868, #1874, #1876, #1911). - -# 0.2.2 (November 29, 2019) - -### Fixes -- scheduling with `basic_scheduler` ([#1861]). -- update `spawn` panic message to specify that a task scheduler is required ([#1839]). -- API docs example for `runtime::Builder` to include a task scheduler ([#1841]). -- general documentation ([#1834]). -- building on illumos/solaris ([#1772]). -- panic when dropping `LocalSet` ([#1843]). -- API docs mention the required Cargo features for `Builder::{basic, threaded}_scheduler` ([#1858]). - -### Added -- impl `Stream` for `signal::unix::Signal` ([#1849]). -- API docs for platform specific behavior of `signal::ctrl_c` and `signal::unix::Signal` ([#1854]). -- API docs for `signal::unix::Signal::{recv, poll_recv}` and `signal::windows::CtrlBreak::{recv, poll_recv}` ([#1854]). -- `File::into_std` and `File::try_into_std` methods ([#1856]). - -# 0.2.1 (November 26, 2019) - -### Fixes -- API docs for `TcpListener::incoming`, `UnixListener::incoming` ([#1831]). - -### Added -- `tokio::task::LocalSet` provides a strategy for spawning `!Send` tasks ([#1733]). -- export `tokio::time::Elapsed` ([#1826]). -- impl `AsRawFd`, `AsRawHandle` for `tokio::fs::File` ([#1827]). - -# 0.2.0 (November 26, 2019) - -A major breaking change. Most implementation and APIs have changed one way or -another. This changelog entry contains a highlight - -### Changed -- APIs are updated to use `async / await`. -- most `tokio-*` crates are collapsed into this crate. -- Scheduler is rewritten. -- `tokio::spawn` returns a `JoinHandle`. -- A single I/O / timer is used per runtime. -- I/O driver uses a concurrent slab for allocating state. -- components are made available via feature flag. -- Use `bytes` 0.5 -- `tokio::codec` is moved to `tokio-util`. - -### Removed -- Standalone `timer` and `net` drivers are removed, use `Runtime` instead -- `current_thread` runtime is removed, use `tokio::runtime::Runtime` with - `basic_scheduler` instead. - -# 0.1.21 (May 30, 2019) - -### Changed -- Bump `tokio-trace-core` version to 0.2 ([#1111]). - -# 0.1.20 (May 14, 2019) - -### Added -- `tokio::runtime::Builder::panic_handler` allows configuring handling - panics on the runtime ([#1055]). - -# 0.1.19 (April 22, 2019) - -### Added -- Re-export `tokio::sync::Mutex` primitive ([#964]). - -# 0.1.18 (March 22, 2019) - -### Added -- `TypedExecutor` re-export and implementations ([#993]). - -# 0.1.17 (March 13, 2019) - -### Added -- Propagate trace subscriber in the runtime ([#966]). - -# 0.1.16 (March 1, 2019) - -### Fixed -- async-await: track latest nightly changes ([#940]). - -### Added -- `sync::Watch`, a single value broadcast channel ([#922]). -- Async equivalent of read / write file helpers being added to `std` ([#896]). - -# 0.1.15 (January 24, 2019) - -### Added -- Re-export tokio-sync APIs ([#839]). -- Stream enumerate combinator ([#832]). - -# 0.1.14 (January 6, 2019) - -* Use feature flags to break up the crate, allowing users to pick & choose - components ([#808]). -* Export `UnixDatagram` and `UnixDatagramFramed` ([#772]). - -# 0.1.13 (November 21, 2018) - -* Fix `Runtime::reactor()` when no tasks are spawned ([#721]). -* `runtime::Builder` no longer uses deprecated methods ([#749]). -* Provide `after_start` and `before_stop` configuration settings for - `Runtime` ([#756]). -* Implement throttle stream combinator ([#736]). - -# 0.1.12 (October 23, 2018) - -* runtime: expose `keep_alive` on runtime builder ([#676]). -* runtime: create a reactor per worker thread ([#660]). -* codec: fix panic in `LengthDelimitedCodec` ([#682]). -* io: re-export `tokio_io::io::read` function ([#689]). -* runtime: check for executor re-entry in more places ([#708]). - -# 0.1.11 (September 28, 2018) - -* Fix `tokio-async-await` dependency ([#675]). - -# 0.1.10 (September 27, 2018) - -* Fix minimal versions - -# 0.1.9 (September 27, 2018) - -* Experimental async/await improvements ([#661]). -* Re-export `TaskExecutor` from `tokio-current-thread` ([#652]). -* Improve `Runtime` builder API ([#645]). -* `tokio::run` panics when called from the context of an executor - ([#646]). -* Introduce `StreamExt` with a `timeout` helper ([#573]). -* Move `length_delimited` into `tokio` ([#575]). -* Re-organize `tokio::net` module ([#548]). -* Re-export `tokio-current-thread::spawn` in current_thread runtime - ([#579]). - -# 0.1.8 (August 23, 2018) - -* Extract tokio::executor::current_thread to a sub crate ([#370]) -* Add `Runtime::block_on` ([#398]) -* Add `runtime::current_thread::block_on_all` ([#477]) -* Misc documentation improvements ([#450]) -* Implement `std::error::Error` for error types ([#501]) - -# 0.1.7 (June 6, 2018) - -* Add `Runtime::block_on` for concurrent runtime ([#391]). -* Provide handle to `current_thread::Runtime` that allows spawning tasks from - other threads ([#340]). -* Provide `clock::now()`, a configurable source of time ([#381]). - -# 0.1.6 (May 2, 2018) - -* Add asynchronous filesystem APIs ([#323]). -* Add "current thread" runtime variant ([#308]). -* `CurrentThread`: Expose inner `Park` instance. -* Improve fairness of `CurrentThread` executor ([#313]). - -# 0.1.5 (March 30, 2018) - -* Provide timer API ([#266]) - -# 0.1.4 (March 22, 2018) - -* Fix build on FreeBSD ([#218]) -* Shutdown the Runtime when the handle is dropped ([#214]) -* Set Runtime thread name prefix for worker threads ([#232]) -* Add builder for Runtime ([#234]) -* Extract TCP and UDP types into separate crates ([#224]) -* Optionally support futures 0.2. - -# 0.1.3 (March 09, 2018) - -* Fix `CurrentThread::turn` to block on idle ([#212]). - -# 0.1.2 (March 09, 2018) - -* Introduce Tokio Runtime ([#141]) -* Provide `CurrentThread` for more flexible usage of current thread executor ([#141]). -* Add Lio for platforms that support it ([#142]). -* I/O resources now lazily bind to the reactor ([#160]). -* Extract Reactor to dedicated crate ([#169]) -* Add facade to sub crates and add prelude ([#166]). -* Switch TCP/UDP fns to poll_ -> Poll<...> style ([#175]) - -# 0.1.1 (February 09, 2018) - -* Doc fixes - -# 0.1.0 (February 07, 2018) - -* Initial crate released based on [RFC](https://github.com/tokio-rs/tokio-rfcs/pull/3). - -[#2375]: https://github.com/tokio-rs/tokio/pull/2375 -[#2362]: https://github.com/tokio-rs/tokio/pull/2362 -[#2358]: https://github.com/tokio-rs/tokio/pull/2358 -[#2354]: https://github.com/tokio-rs/tokio/pull/2354 -[#2335]: https://github.com/tokio-rs/tokio/pull/2335 -[#2333]: https://github.com/tokio-rs/tokio/pull/2333 -[#2325]: https://github.com/tokio-rs/tokio/pull/2325 -[#2321]: https://github.com/tokio-rs/tokio/pull/2321 -[#2300]: https://github.com/tokio-rs/tokio/pull/2300 -[#2285]: https://github.com/tokio-rs/tokio/pull/2285 -[#2281]: https://github.com/tokio-rs/tokio/pull/2281 -[#2275]: https://github.com/tokio-rs/tokio/pull/2275 -[#2274]: https://github.com/tokio-rs/tokio/pull/2274 -[#2273]: https://github.com/tokio-rs/tokio/pull/2273 -[#2253]: https://github.com/tokio-rs/tokio/pull/2253 -[#2250]: https://github.com/tokio-rs/tokio/pull/2250 -[#2245]: https://github.com/tokio-rs/tokio/pull/2245 -[#2239]: https://github.com/tokio-rs/tokio/pull/2239 -[#2238]: https://github.com/tokio-rs/tokio/pull/2238 -[#2227]: https://github.com/tokio-rs/tokio/pull/2227 -[#2218]: https://github.com/tokio-rs/tokio/pull/2218 -[#2217]: https://github.com/tokio-rs/tokio/pull/2217 -[#2210]: https://github.com/tokio-rs/tokio/pull/2210 -[#2205]: https://github.com/tokio-rs/tokio/pull/2205 -[#2204]: https://github.com/tokio-rs/tokio/pull/2204 -[#2191]: https://github.com/tokio-rs/tokio/pull/2191 -[#2186]: https://github.com/tokio-rs/tokio/pull/2186 -[#2185]: https://github.com/tokio-rs/tokio/pull/2185 -[#2184]: https://github.com/tokio-rs/tokio/pull/2184 -[#2177]: https://github.com/tokio-rs/tokio/pull/2177 -[#2169]: https://github.com/tokio-rs/tokio/pull/2169 -[#2168]: https://github.com/tokio-rs/tokio/pull/2168 -[#2164]: https://github.com/tokio-rs/tokio/pull/2164 -[#2163]: https://github.com/tokio-rs/tokio/pull/2163 -[#2158]: https://github.com/tokio-rs/tokio/pull/2158 -[#2152]: https://github.com/tokio-rs/tokio/pull/2152 -[#2151]: https://github.com/tokio-rs/tokio/pull/2151 -[#2149]: https://github.com/tokio-rs/tokio/pull/2149 -[#2145]: https://github.com/tokio-rs/tokio/pull/2145 -[#2139]: https://github.com/tokio-rs/tokio/pull/2139 -[#2135]: https://github.com/tokio-rs/tokio/pull/2135 -[#2126]: https://github.com/tokio-rs/tokio/pull/2126 -[#2125]: https://github.com/tokio-rs/tokio/pull/2125 -[#2122]: https://github.com/tokio-rs/tokio/pull/2122 -[#2119]: https://github.com/tokio-rs/tokio/pull/2119 -[#2118]: https://github.com/tokio-rs/tokio/pull/2118 -[#2109]: https://github.com/tokio-rs/tokio/pull/2109 -[#2108]: https://github.com/tokio-rs/tokio/pull/2108 -[#2094]: https://github.com/tokio-rs/tokio/pull/2094 -[#2093]: https://github.com/tokio-rs/tokio/pull/2093 -[#2092]: https://github.com/tokio-rs/tokio/pull/2092 -[#2091]: https://github.com/tokio-rs/tokio/pull/2091 -[#2089]: https://github.com/tokio-rs/tokio/pull/2089 -[#2085]: https://github.com/tokio-rs/tokio/pull/2085 -[#2079]: https://github.com/tokio-rs/tokio/pull/2079 -[#2059]: https://github.com/tokio-rs/tokio/pull/2059 -[#2052]: https://github.com/tokio-rs/tokio/pull/2052 -[#2051]: https://github.com/tokio-rs/tokio/pull/2051 -[#2045]: https://github.com/tokio-rs/tokio/pull/2045 -[#2044]: https://github.com/tokio-rs/tokio/pull/2044 -[#2040]: https://github.com/tokio-rs/tokio/pull/2040 -[#2035]: https://github.com/tokio-rs/tokio/pull/2035 -[#2034]: https://github.com/tokio-rs/tokio/pull/2034 -[#2030]: https://github.com/tokio-rs/tokio/pull/2030 -[#2029]: https://github.com/tokio-rs/tokio/pull/2029 -[#2025]: https://github.com/tokio-rs/tokio/pull/2025 -[#2022]: https://github.com/tokio-rs/tokio/pull/2022 -[#2014]: https://github.com/tokio-rs/tokio/pull/2014 -[#2012]: https://github.com/tokio-rs/tokio/pull/2012 -[#2011]: https://github.com/tokio-rs/tokio/pull/2011 -[#2006]: https://github.com/tokio-rs/tokio/pull/2006 -[#2005]: https://github.com/tokio-rs/tokio/pull/2005 -[#2001]: https://github.com/tokio-rs/tokio/pull/2001 -[#1991]: https://github.com/tokio-rs/tokio/pull/1991 -[#1986]: https://github.com/tokio-rs/tokio/pull/1986 -[#1978]: https://github.com/tokio-rs/tokio/pull/1978 -[#1977]: https://github.com/tokio-rs/tokio/pull/1977 -[#1975]: https://github.com/tokio-rs/tokio/pull/1975 -[#1973]: https://github.com/tokio-rs/tokio/pull/1973 -[#1972]: https://github.com/tokio-rs/tokio/pull/1972 -[#1971]: https://github.com/tokio-rs/tokio/pull/1971 -[#1962]: https://github.com/tokio-rs/tokio/pull/1962 -[#1961]: https://github.com/tokio-rs/tokio/pull/1961 -[#1956]: https://github.com/tokio-rs/tokio/pull/1956 -[#1949]: https://github.com/tokio-rs/tokio/pull/1949 -[#1943]: https://github.com/tokio-rs/tokio/pull/1943 -[#1939]: https://github.com/tokio-rs/tokio/pull/1939 -[#1924]: https://github.com/tokio-rs/tokio/pull/1924 -[#1905]: https://github.com/tokio-rs/tokio/pull/1905 -[#1904]: https://github.com/tokio-rs/tokio/pull/1904 -[#1898]: https://github.com/tokio-rs/tokio/pull/1898 -[#1892]: https://github.com/tokio-rs/tokio/pull/1892 -[#1888]: https://github.com/tokio-rs/tokio/pull/1888 -[#1882]: https://github.com/tokio-rs/tokio/pull/1882 -[#1881]: https://github.com/tokio-rs/tokio/pull/1881 -[#1875]: https://github.com/tokio-rs/tokio/pull/1875 -[#1874]: https://github.com/tokio-rs/tokio/pull/1874 -[#1870]: https://github.com/tokio-rs/tokio/pull/1870 -[#1864]: https://github.com/tokio-rs/tokio/pull/1864 -[#1863]: https://github.com/tokio-rs/tokio/pull/1863 -[#1861]: https://github.com/tokio-rs/tokio/pull/1861 -[#1858]: https://github.com/tokio-rs/tokio/pull/1858 -[#1856]: https://github.com/tokio-rs/tokio/pull/1856 -[#1854]: https://github.com/tokio-rs/tokio/pull/1854 -[#1849]: https://github.com/tokio-rs/tokio/pull/1849 -[#1843]: https://github.com/tokio-rs/tokio/pull/1843 -[#1841]: https://github.com/tokio-rs/tokio/pull/1841 -[#1839]: https://github.com/tokio-rs/tokio/pull/1839 -[#1834]: https://github.com/tokio-rs/tokio/pull/1834 -[#1831]: https://github.com/tokio-rs/tokio/pull/1831 -[#1827]: https://github.com/tokio-rs/tokio/pull/1827 -[#1826]: https://github.com/tokio-rs/tokio/pull/1826 -[#1772]: https://github.com/tokio-rs/tokio/pull/1772 -[#1755]: https://github.com/tokio-rs/tokio/pull/1755 -[#1733]: https://github.com/tokio-rs/tokio/pull/1733 -[#1699]: https://github.com/tokio-rs/tokio/pull/1699 -[#1111]: https://github.com/tokio-rs/tokio/pull/1111 -[#1055]: https://github.com/tokio-rs/tokio/pull/1055 -[#993]: https://github.com/tokio-rs/tokio/pull/993 -[#966]: https://github.com/tokio-rs/tokio/pull/966 -[#964]: https://github.com/tokio-rs/tokio/pull/964 -[#940]: https://github.com/tokio-rs/tokio/pull/940 -[#922]: https://github.com/tokio-rs/tokio/pull/922 -[#896]: https://github.com/tokio-rs/tokio/pull/896 -[#839]: https://github.com/tokio-rs/tokio/pull/839 -[#832]: https://github.com/tokio-rs/tokio/pull/832 -[#808]: https://github.com/tokio-rs/tokio/pull/808 -[#772]: https://github.com/tokio-rs/tokio/pull/772 -[#756]: https://github.com/tokio-rs/tokio/pull/756 -[#749]: https://github.com/tokio-rs/tokio/pull/749 -[#736]: https://github.com/tokio-rs/tokio/pull/736 -[#721]: https://github.com/tokio-rs/tokio/pull/721 -[#708]: https://github.com/tokio-rs/tokio/pull/708 -[#689]: https://github.com/tokio-rs/tokio/pull/689 -[#682]: https://github.com/tokio-rs/tokio/pull/682 -[#676]: https://github.com/tokio-rs/tokio/pull/676 -[#675]: https://github.com/tokio-rs/tokio/pull/675 -[#661]: https://github.com/tokio-rs/tokio/pull/661 -[#660]: https://github.com/tokio-rs/tokio/pull/660 -[#652]: https://github.com/tokio-rs/tokio/pull/652 -[#646]: https://github.com/tokio-rs/tokio/pull/646 -[#645]: https://github.com/tokio-rs/tokio/pull/645 -[#579]: https://github.com/tokio-rs/tokio/pull/579 -[#575]: https://github.com/tokio-rs/tokio/pull/575 -[#573]: https://github.com/tokio-rs/tokio/pull/573 -[#548]: https://github.com/tokio-rs/tokio/pull/548 -[#501]: https://github.com/tokio-rs/tokio/pull/501 -[#477]: https://github.com/tokio-rs/tokio/pull/477 -[#450]: https://github.com/tokio-rs/tokio/pull/450 -[#398]: https://github.com/tokio-rs/tokio/pull/398 -[#391]: https://github.com/tokio-rs/tokio/pull/391 -[#381]: https://github.com/tokio-rs/tokio/pull/381 -[#370]: https://github.com/tokio-rs/tokio/pull/370 -[#340]: https://github.com/tokio-rs/tokio/pull/340 -[#323]: https://github.com/tokio-rs/tokio/pull/323 -[#313]: https://github.com/tokio-rs/tokio/pull/313 -[#308]: https://github.com/tokio-rs/tokio/pull/308 -[#266]: https://github.com/tokio-rs/tokio/pull/266 -[#234]: https://github.com/tokio-rs/tokio/pull/234 -[#232]: https://github.com/tokio-rs/tokio/pull/232 -[#224]: https://github.com/tokio-rs/tokio/pull/224 -[#218]: https://github.com/tokio-rs/tokio/pull/218 -[#214]: https://github.com/tokio-rs/tokio/pull/214 -[#212]: https://github.com/tokio-rs/tokio/pull/212 -[#175]: https://github.com/tokio-rs/tokio/pull/175 -[#169]: https://github.com/tokio-rs/tokio/pull/169 -[#166]: https://github.com/tokio-rs/tokio/pull/166 -[#160]: https://github.com/tokio-rs/tokio/pull/160 -[#142]: https://github.com/tokio-rs/tokio/pull/142 -[#141]: https://github.com/tokio-rs/tokio/pull/141 diff --git a/third_party/rust/tokio-0.2.25/Cargo.toml b/third_party/rust/tokio-0.2.25/Cargo.toml deleted file mode 100644 index 1332b0ced9d4..000000000000 --- a/third_party/rust/tokio-0.2.25/Cargo.toml +++ /dev/null @@ -1,143 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "tokio" -version = "0.2.25" -authors = ["Tokio Contributors "] -description = "An event-driven, non-blocking I/O platform for writing asynchronous I/O\nbacked applications.\n" -homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio/0.2.25/tokio/" -readme = "README.md" -keywords = ["io", "async", "non-blocking", "futures"] -categories = ["asynchronous", "network-programming"] -license = "MIT" -repository = "https://github.com/tokio-rs/tokio" -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[package.metadata.playground] -features = ["full"] -[dependencies.bytes] -version = "0.5.0" - -[dependencies.fnv] -version = "1.0.6" -optional = true - -[dependencies.futures-core] -version = "0.3.0" -optional = true - -[dependencies.iovec] -version = "0.1.4" -optional = true - -[dependencies.lazy_static] -version = "1.4.0" -optional = true - -[dependencies.memchr] -version = "2.2" -optional = true - -[dependencies.mio] -version = "0.6.23" -optional = true - -[dependencies.num_cpus] -version = "1.8.0" -optional = true - -[dependencies.parking_lot] -version = "0.11.0" -optional = true - -[dependencies.pin-project-lite] -version = "0.1.1" - -[dependencies.slab] -version = "0.4.2" -optional = true - -[dependencies.tokio-macros] -version = "0.2.6" -optional = true - -[dependencies.tracing] -version = "0.1.16" -features = ["std"] -optional = true -default-features = false -[dev-dependencies.futures] -version = "0.3.0" -features = ["async-await"] - -[dev-dependencies.futures-test] -version = "0.3.0" - -[dev-dependencies.proptest] -version = "0.9.4" - -[dev-dependencies.tempfile] -version = "3.1.0" - -[dev-dependencies.tokio-test] -version = "0.2.0" - -[features] -blocking = ["rt-core"] -default = [] -dns = ["rt-core"] -fs = ["rt-core", "io-util"] -full = ["blocking", "dns", "fs", "io-driver", "io-util", "io-std", "macros", "net", "process", "rt-core", "rt-util", "rt-threaded", "signal", "stream", "sync", "time"] -io-driver = ["mio", "lazy_static"] -io-std = ["rt-core"] -io-util = ["memchr"] -macros = ["tokio-macros"] -net = ["dns", "tcp", "udp", "uds"] -process = ["io-driver", "libc", "mio-named-pipes", "signal", "winapi/consoleapi", "winapi/minwindef", "winapi/threadpoollegacyapiset", "winapi/winerror"] -rt-core = ["slab"] -rt-threaded = ["num_cpus", "rt-core"] -rt-util = [] -signal = ["io-driver", "lazy_static", "libc", "mio-uds", "signal-hook-registry", "winapi/consoleapi", "winapi/minwindef"] -stream = ["futures-core"] -sync = ["fnv"] -tcp = ["io-driver", "iovec"] -test-util = [] -time = ["slab"] -udp = ["io-driver"] -uds = ["io-driver", "mio-uds", "libc"] -[target."cfg(loom)".dev-dependencies.loom] -version = "0.3.5" -features = ["futures", "checkpoint"] -[target."cfg(unix)".dependencies.libc] -version = "0.2.42" -optional = true - -[target."cfg(unix)".dependencies.mio-uds] -version = "0.6.5" -optional = true - -[target."cfg(unix)".dependencies.signal-hook-registry] -version = "1.1.1" -optional = true -[target."cfg(windows)".dependencies.mio-named-pipes] -version = "0.1.6" -optional = true - -[target."cfg(windows)".dependencies.winapi] -version = "0.3.8" -optional = true -default-features = false diff --git a/third_party/rust/tokio-0.2.25/README.md b/third_party/rust/tokio-0.2.25/README.md deleted file mode 100644 index da9078c58248..000000000000 --- a/third_party/rust/tokio-0.2.25/README.md +++ /dev/null @@ -1,174 +0,0 @@ -# Tokio - -A runtime for writing reliable, asynchronous, and slim applications with -the Rust programming language. It is: - -* **Fast**: Tokio's zero-cost abstractions give you bare-metal - performance. - -* **Reliable**: Tokio leverages Rust's ownership, type system, and - concurrency model to reduce bugs and ensure thread safety. - -* **Scalable**: Tokio has a minimal footprint, and handles backpressure - and cancellation naturally. - -[![Crates.io][crates-badge]][crates-url] -[![MIT licensed][mit-badge]][mit-url] -[![Build Status][azure-badge]][azure-url] -[![Discord chat][discord-badge]][discord-url] - -[crates-badge]: https://img.shields.io/crates/v/tokio.svg -[crates-url]: https://crates.io/crates/tokio -[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg -[mit-url]: https://github.com/tokio-rs/tokio/blob/master/LICENSE -[azure-badge]: https://dev.azure.com/tokio-rs/Tokio/_apis/build/status/tokio-rs.tokio?branchName=master -[azure-url]: https://dev.azure.com/tokio-rs/Tokio/_build/latest?definitionId=1&branchName=master -[discord-badge]: https://img.shields.io/discord/500028886025895936.svg?logo=discord&style=flat-square -[discord-url]: https://discord.gg/tokio - -[Website](https://tokio.rs) | -[Guides](https://tokio.rs/tokio/tutorial) | -[API Docs](https://docs.rs/tokio/latest/tokio) | -[Roadmap](https://github.com/tokio-rs/tokio/blob/master/ROADMAP.md) | -[Chat](https://discord.gg/tokio) - -## Overview - -Tokio is an event-driven, non-blocking I/O platform for writing -asynchronous applications with the Rust programming language. At a high -level, it provides a few major components: - -* A multithreaded, work-stealing based task [scheduler]. -* A reactor backed by the operating system's event queue (epoll, kqueue, - IOCP, etc...). -* Asynchronous [TCP and UDP][net] sockets. - -These components provide the runtime components necessary for building -an asynchronous application. - -[net]: https://docs.rs/tokio/latest/tokio/net/index.html -[scheduler]: https://docs.rs/tokio/latest/tokio/runtime/index.html - -## Example - -A basic TCP echo server with Tokio: - -```rust,no_run -use tokio::net::TcpListener; -use tokio::prelude::*; - -#[tokio::main] -async fn main() -> Result<(), Box> { - let mut listener = TcpListener::bind("127.0.0.1:8080").await?; - - loop { - let (mut socket, _) = listener.accept().await?; - - tokio::spawn(async move { - let mut buf = [0; 1024]; - - // In a loop, read data from the socket and write the data back. - loop { - let n = match socket.read(&mut buf).await { - // socket closed - Ok(n) if n == 0 => return, - Ok(n) => n, - Err(e) => { - eprintln!("failed to read from socket; err = {:?}", e); - return; - } - }; - - // Write the data back - if let Err(e) = socket.write_all(&buf[0..n]).await { - eprintln!("failed to write to socket; err = {:?}", e); - return; - } - } - }); - } -} -``` - -More examples can be found [here][examples]. For a larger "real world" example, see the -[mini-redis] repository. - -[examples]: https://github.com/tokio-rs/tokio/tree/master/examples -[mini-redis]: https://github.com/tokio-rs/mini-redis/ - -To see a list of the available features flags that can be enabled, check our -[docs][feature-flag-docs]. - -## Getting Help - -First, see if the answer to your question can be found in the [Guides] or the -[API documentation]. If the answer is not there, there is an active community in -the [Tokio Discord server][chat]. We would be happy to try to answer your -question. You can also ask your question on [the discussions page][discussions]. - -[Guides]: https://tokio.rs/tokio/tutorial -[API documentation]: https://docs.rs/tokio/latest/tokio -[chat]: https://discord.gg/tokio -[discussions]: https://github.com/tokio-rs/tokio/discussions -[feature-flag-docs]: https://docs.rs/tokio/#feature-flags - -## Contributing - -:balloon: Thanks for your help improving the project! We are so happy to have -you! We have a [contributing guide][guide] to help you get involved in the Tokio -project. - -[guide]: https://github.com/tokio-rs/tokio/blob/master/CONTRIBUTING.md - -## Related Projects - -In addition to the crates in this repository, the Tokio project also maintains -several other libraries, including: - -* [`hyper`]: A fast and correct HTTP/1.1 and HTTP/2 implementation for Rust. - -* [`tonic`]: A gRPC over HTTP/2 implementation focused on high performance, interoperability, and flexibility. - -* [`warp`]: A super-easy, composable, web server framework for warp speeds. - -* [`tower`]: A library of modular and reusable components for building robust networking clients and servers. - -* [`tracing`] (formerly `tokio-trace`): A framework for application-level - tracing and async-aware diagnostics. - -* [`rdbc`]: A Rust database connectivity library for MySQL, Postgres and SQLite. - -* [`mio`]: A low-level, cross-platform abstraction over OS I/O APIs that powers - `tokio`. - -* [`bytes`]: Utilities for working with bytes, including efficient byte buffers. - -* [`loom`]: A testing tool for concurrent Rust code - -[`warp`]: https://github.com/seanmonstar/warp -[`hyper`]: https://github.com/hyperium/hyper -[`tonic`]: https://github.com/hyperium/tonic -[`tower`]: https://github.com/tower-rs/tower -[`loom`]: https://github.com/tokio-rs/loom -[`rdbc`]: https://github.com/tokio-rs/rdbc -[`tracing`]: https://github.com/tokio-rs/tracing -[`mio`]: https://github.com/tokio-rs/mio -[`bytes`]: https://github.com/tokio-rs/bytes - -## Supported Rust Versions - -Tokio is built against the latest stable release. The minimum supported version is 1.39. -The current Tokio version is not guaranteed to build on Rust versions earlier than the -minimum supported version. - -## License - -This project is licensed under the [MIT license]. - -[MIT license]: https://github.com/tokio-rs/tokio/blob/master/LICENSE - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Tokio by you, shall be licensed as MIT, without any additional -terms or conditions. diff --git a/third_party/rust/tokio-0.2.25/src/coop.rs b/third_party/rust/tokio-0.2.25/src/coop.rs deleted file mode 100644 index 27e969c59d40..000000000000 --- a/third_party/rust/tokio-0.2.25/src/coop.rs +++ /dev/null @@ -1,301 +0,0 @@ -//! Opt-in yield points for improved cooperative scheduling. -//! -//! A single call to [`poll`] on a top-level task may potentially do a lot of -//! work before it returns `Poll::Pending`. If a task runs for a long period of -//! time without yielding back to the executor, it can starve other tasks -//! waiting on that executor to execute them, or drive underlying resources. -//! Since Rust does not have a runtime, it is difficult to forcibly preempt a -//! long-running task. Instead, this module provides an opt-in mechanism for -//! futures to collaborate with the executor to avoid starvation. -//! -//! Consider a future like this one: -//! -//! ``` -//! # use tokio::stream::{Stream, StreamExt}; -//! async fn drop_all(mut input: I) { -//! while let Some(_) = input.next().await {} -//! } -//! ``` -//! -//! It may look harmless, but consider what happens under heavy load if the -//! input stream is _always_ ready. If we spawn `drop_all`, the task will never -//! yield, and will starve other tasks and resources on the same executor. With -//! opt-in yield points, this problem is alleviated: -//! -//! ```ignore -//! # use tokio::stream::{Stream, StreamExt}; -//! async fn drop_all(mut input: I) { -//! while let Some(_) = input.next().await { -//! tokio::coop::proceed().await; -//! } -//! } -//! ``` -//! -//! The `proceed` future will coordinate with the executor to make sure that -//! every so often control is yielded back to the executor so it can run other -//! tasks. -//! -//! # Placing yield points -//! -//! Voluntary yield points should be placed _after_ at least some work has been -//! done. If they are not, a future sufficiently deep in the task hierarchy may -//! end up _never_ getting to run because of the number of yield points that -//! inevitably appear before it is reached. In general, you will want yield -//! points to only appear in "leaf" futures -- those that do not themselves poll -//! other futures. By doing this, you avoid double-counting each iteration of -//! the outer future against the cooperating budget. -//! -//! [`poll`]: method@std::future::Future::poll - -// NOTE: The doctests in this module are ignored since the whole module is (currently) private. - -use std::cell::Cell; - -thread_local! { - static CURRENT: Cell = Cell::new(Budget::unconstrained()); -} - -/// Opaque type tracking the amount of "work" a task may still do before -/// yielding back to the scheduler. -#[derive(Debug, Copy, Clone)] -pub(crate) struct Budget(Option); - -impl Budget { - /// Budget assigned to a task on each poll. - /// - /// The value itself is chosen somewhat arbitrarily. It needs to be high - /// enough to amortize wakeup and scheduling costs, but low enough that we - /// do not starve other tasks for too long. The value also needs to be high - /// enough that particularly deep tasks are able to do at least some useful - /// work at all. - /// - /// Note that as more yield points are added in the ecosystem, this value - /// will probably also have to be raised. - const fn initial() -> Budget { - Budget(Some(128)) - } - - /// Returns an unconstrained budget. Operations will not be limited. - const fn unconstrained() -> Budget { - Budget(None) - } -} - -cfg_rt_threaded! { - impl Budget { - fn has_remaining(self) -> bool { - self.0.map(|budget| budget > 0).unwrap_or(true) - } - } -} - -/// Run the given closure with a cooperative task budget. When the function -/// returns, the budget is reset to the value prior to calling the function. -#[inline(always)] -pub(crate) fn budget(f: impl FnOnce() -> R) -> R { - with_budget(Budget::initial(), f) -} - -cfg_rt_threaded! { - /// Set the current task's budget - #[cfg(feature = "blocking")] - pub(crate) fn set(budget: Budget) { - CURRENT.with(|cell| cell.set(budget)) - } -} - -#[inline(always)] -fn with_budget(budget: Budget, f: impl FnOnce() -> R) -> R { - struct ResetGuard<'a> { - cell: &'a Cell, - prev: Budget, - } - - impl<'a> Drop for ResetGuard<'a> { - fn drop(&mut self) { - self.cell.set(self.prev); - } - } - - CURRENT.with(move |cell| { - let prev = cell.get(); - - cell.set(budget); - - let _guard = ResetGuard { cell, prev }; - - f() - }) -} - -cfg_rt_threaded! { - #[inline(always)] - pub(crate) fn has_budget_remaining() -> bool { - CURRENT.with(|cell| cell.get().has_remaining()) - } -} - -cfg_blocking_impl! { - /// Forcibly remove the budgeting constraints early. - /// - /// Returns the remaining budget - pub(crate) fn stop() -> Budget { - CURRENT.with(|cell| { - let prev = cell.get(); - cell.set(Budget::unconstrained()); - prev - }) - } -} - -cfg_coop! { - use std::task::{Context, Poll}; - - #[must_use] - pub(crate) struct RestoreOnPending(Cell); - - impl RestoreOnPending { - pub(crate) fn made_progress(&self) { - self.0.set(Budget::unconstrained()); - } - } - - impl Drop for RestoreOnPending { - fn drop(&mut self) { - // Don't reset if budget was unconstrained or if we made progress. - // They are both represented as the remembered budget being unconstrained. - let budget = self.0.get(); - if !budget.is_unconstrained() { - CURRENT.with(|cell| { - cell.set(budget); - }); - } - } - } - - /// Returns `Poll::Pending` if the current task has exceeded its budget and should yield. - /// - /// When you call this method, the current budget is decremented. However, to ensure that - /// progress is made every time a task is polled, the budget is automatically restored to its - /// former value if the returned `RestoreOnPending` is dropped. It is the caller's - /// responsibility to call `RestoreOnPending::made_progress` if it made progress, to ensure - /// that the budget empties appropriately. - /// - /// Note that `RestoreOnPending` restores the budget **as it was before `poll_proceed`**. - /// Therefore, if the budget is _further_ adjusted between when `poll_proceed` returns and - /// `RestRestoreOnPending` is dropped, those adjustments are erased unless the caller indicates - /// that progress was made. - #[inline] - pub(crate) fn poll_proceed(cx: &mut Context<'_>) -> Poll { - CURRENT.with(|cell| { - let mut budget = cell.get(); - - if budget.decrement() { - let restore = RestoreOnPending(Cell::new(cell.get())); - cell.set(budget); - Poll::Ready(restore) - } else { - cx.waker().wake_by_ref(); - Poll::Pending - } - }) - } - - impl Budget { - /// Decrement the budget. Returns `true` if successful. Decrementing fails - /// when there is not enough remaining budget. - fn decrement(&mut self) -> bool { - if let Some(num) = &mut self.0 { - if *num > 0 { - *num -= 1; - true - } else { - false - } - } else { - true - } - } - - fn is_unconstrained(self) -> bool { - self.0.is_none() - } - } -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::*; - - fn get() -> Budget { - CURRENT.with(|cell| cell.get()) - } - - #[test] - fn bugeting() { - use futures::future::poll_fn; - use tokio_test::*; - - assert!(get().0.is_none()); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - - assert!(get().0.is_none()); - drop(coop); - assert!(get().0.is_none()); - - budget(|| { - assert_eq!(get().0, Budget::initial().0); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - drop(coop); - // we didn't make progress - assert_eq!(get().0, Budget::initial().0); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - coop.made_progress(); - drop(coop); - // we _did_ make progress - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); - coop.made_progress(); - drop(coop); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); - - budget(|| { - assert_eq!(get().0, Budget::initial().0); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - coop.made_progress(); - drop(coop); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - }); - - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); - }); - - assert!(get().0.is_none()); - - budget(|| { - let n = get().0.unwrap(); - - for _ in 0..n { - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - coop.made_progress(); - } - - let mut task = task::spawn(poll_fn(|cx| { - let coop = ready!(poll_proceed(cx)); - coop.made_progress(); - Poll::Ready(()) - })); - - assert_pending!(task.poll()); - }); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/canonicalize.rs b/third_party/rust/tokio-0.2.25/src/fs/canonicalize.rs deleted file mode 100644 index 403662685c49..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/canonicalize.rs +++ /dev/null @@ -1,51 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::{Path, PathBuf}; - -/// Returns the canonical, absolute form of a path with all intermediate -/// components normalized and symbolic links resolved. -/// -/// This is an async version of [`std::fs::canonicalize`][std] -/// -/// [std]: std::fs::canonicalize -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `realpath` function on Unix -/// and the `CreateFile` and `GetFinalPathNameByHandle` functions on Windows. -/// Note that, this [may change in the future][changes]. -/// -/// On Windows, this converts the path to use [extended length path][path] -/// syntax, which allows your program to use longer path names, but means you -/// can only join backslash-delimited paths to it, and it may be incompatible -/// with other applications (if passed to the application on the command-line, -/// or written to a file another application may read). -/// -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// [path]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * `path` does not exist. -/// * A non-final component in path is not a directory. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// use std::io; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let path = fs::canonicalize("../a/../foo.txt").await?; -/// Ok(()) -/// } -/// ``` -pub async fn canonicalize(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::canonicalize(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/copy.rs b/third_party/rust/tokio-0.2.25/src/fs/copy.rs deleted file mode 100644 index d4d4d29c8508..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/copy.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::fs::asyncify; -use std::path::Path; - -/// Copies the contents of one file to another. This function will also copy the permission bits of the original file to the destination file. -/// This function will overwrite the contents of to. -/// -/// This is the async equivalent of `std::fs::copy`. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// # async fn dox() -> std::io::Result<()> { -/// fs::copy("foo.txt", "bar.txt").await?; -/// # Ok(()) -/// # } -/// ``` - -pub async fn copy, Q: AsRef>(from: P, to: Q) -> Result { - let from = from.as_ref().to_owned(); - let to = to.as_ref().to_owned(); - asyncify(|| std::fs::copy(from, to)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/create_dir.rs b/third_party/rust/tokio-0.2.25/src/fs/create_dir.rs deleted file mode 100644 index e03b04dc4b00..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/create_dir.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new, empty directory at the provided path -/// -/// This is an async version of [`std::fs::create_dir`][std] -/// -/// [std]: std::fs::create_dir -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `mkdir` function on Unix -/// and the `CreateDirectory` function on Windows. -/// Note that, this [may change in the future][changes]. -/// -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// -/// **NOTE**: If a parent of the given path doesn't exist, this function will -/// return an error. To create a directory and all its missing parents at the -/// same time, use the [`create_dir_all`] function. -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * User lacks permissions to create directory at `path`. -/// * A parent of the given path doesn't exist. (To create a directory and all -/// its missing parents at the same time, use the [`create_dir_all`] -/// function.) -/// * `path` already exists. -/// -/// [`create_dir_all`]: super::create_dir_all() -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// use std::io; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// fs::create_dir("/some/dir").await?; -/// Ok(()) -/// } -/// ``` -pub async fn create_dir(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::create_dir(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/create_dir_all.rs b/third_party/rust/tokio-0.2.25/src/fs/create_dir_all.rs deleted file mode 100644 index 21f0c82d1139..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/create_dir_all.rs +++ /dev/null @@ -1,53 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Recursively creates a directory and all of its parent components if they -/// are missing. -/// -/// This is an async version of [`std::fs::create_dir_all`][std] -/// -/// [std]: std::fs::create_dir_all -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `mkdir` function on Unix -/// and the `CreateDirectory` function on Windows. -/// Note that, this [may change in the future][changes]. -/// -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * If any directory in the path specified by `path` does not already exist -/// and it could not be created otherwise. The specific error conditions for -/// when a directory is being created (after it is determined to not exist) are -/// outlined by [`fs::create_dir`]. -/// -/// Notable exception is made for situations where any of the directories -/// specified in the `path` could not be created as it was being created concurrently. -/// Such cases are considered to be successful. That is, calling `create_dir_all` -/// concurrently from multiple threads or processes is guaranteed not to fail -/// due to a race condition with itself. -/// -/// [`fs::create_dir`]: std::fs::create_dir -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// #[tokio::main] -/// async fn main() -> std::io::Result<()> { -/// fs::create_dir_all("/some/dir").await?; -/// Ok(()) -/// } -/// ``` -pub async fn create_dir_all(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::create_dir_all(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/dir_builder.rs b/third_party/rust/tokio-0.2.25/src/fs/dir_builder.rs deleted file mode 100644 index 8752a3716aa0..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/dir_builder.rs +++ /dev/null @@ -1,117 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// A builder for creating directories in various manners. -/// -/// Additional Unix-specific options are available via importing the -/// [`DirBuilderExt`] trait. -/// -/// This is a specialized version of [`std::fs::DirBuilder`] for usage on -/// the Tokio runtime. -/// -/// [std::fs::DirBuilder]: std::fs::DirBuilder -/// [`DirBuilderExt`]: crate::fs::os::unix::DirBuilderExt -#[derive(Debug, Default)] -pub struct DirBuilder { - /// Indicates whether to create parent directories if they are missing. - recursive: bool, - - /// Set the Unix mode for newly created directories. - #[cfg(unix)] - pub(super) mode: Option, -} - -impl DirBuilder { - /// Creates a new set of options with default mode/security settings for all - /// platforms and also non-recursive. - /// - /// This is an async version of [`std::fs::DirBuilder::new`][std] - /// - /// [std]: std::fs::DirBuilder::new - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::DirBuilder; - /// - /// let builder = DirBuilder::new(); - /// ``` - pub fn new() -> Self { - Default::default() - } - - /// Indicates whether to create directories recursively (including all parent directories). - /// Parents that do not exist are created with the same security and permissions settings. - /// - /// This option defaults to `false`. - /// - /// This is an async version of [`std::fs::DirBuilder::recursive`][std] - /// - /// [std]: std::fs::DirBuilder::recursive - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::DirBuilder; - /// - /// let mut builder = DirBuilder::new(); - /// builder.recursive(true); - /// ``` - pub fn recursive(&mut self, recursive: bool) -> &mut Self { - self.recursive = recursive; - self - } - - /// Creates the specified directory with the configured options. - /// - /// It is considered an error if the directory already exists unless - /// recursive mode is enabled. - /// - /// This is an async version of [`std::fs::DirBuilder::create`][std] - /// - /// [std]: std::fs::DirBuilder::create - /// - /// # Errors - /// - /// An error will be returned under the following circumstances: - /// - /// * Path already points to an existing file. - /// * Path already points to an existing directory and the mode is - /// non-recursive. - /// * The calling process doesn't have permissions to create the directory - /// or its missing parents. - /// * Other I/O error occurred. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::DirBuilder; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// DirBuilder::new() - /// .recursive(true) - /// .create("/tmp/foo/bar/baz") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub async fn create>(&self, path: P) -> io::Result<()> { - let path = path.as_ref().to_owned(); - let mut builder = std::fs::DirBuilder::new(); - builder.recursive(self.recursive); - - #[cfg(unix)] - { - if let Some(mode) = self.mode { - std::os::unix::fs::DirBuilderExt::mode(&mut builder, mode); - } - } - - asyncify(move || builder.create(path)).await - } -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/file.rs b/third_party/rust/tokio-0.2.25/src/fs/file.rs deleted file mode 100644 index f3bc98546a97..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/file.rs +++ /dev/null @@ -1,790 +0,0 @@ -//! Types for working with [`File`]. -//! -//! [`File`]: File - -use self::State::*; -use crate::fs::{asyncify, sys}; -use crate::io::blocking::Buf; -use crate::io::{AsyncRead, AsyncSeek, AsyncWrite}; - -use std::fmt; -use std::fs::{Metadata, Permissions}; -use std::future::Future; -use std::io::{self, Seek, SeekFrom}; -use std::path::Path; -use std::pin::Pin; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll; -use std::task::Poll::*; - -/// A reference to an open file on the filesystem. -/// -/// This is a specialized version of [`std::fs::File`][std] for usage from the -/// Tokio runtime. -/// -/// An instance of a `File` can be read and/or written depending on what options -/// it was opened with. Files also implement [`AsyncSeek`] to alter the logical -/// cursor that the file contains internally. -/// -/// A file will not be closed immediately when it goes out of scope if there -/// are any IO operations that have not yet completed. To ensure that a file is -/// closed immediately when it is dropped, you should call [`flush`] before -/// dropping it. Note that this does not ensure that the file has been fully -/// written to disk; the operating system might keep the changes around in an -/// in-memory buffer. See the [`sync_all`] method for telling the OS to write -/// the data to disk. -/// -/// Reading and writing to a `File` is usually done using the convenience -/// methods found on the [`AsyncReadExt`] and [`AsyncWriteExt`] traits. Examples -/// import these traits through [the prelude]. -/// -/// [std]: struct@std::fs::File -/// [`AsyncSeek`]: trait@crate::io::AsyncSeek -/// [`flush`]: fn@crate::io::AsyncWriteExt::flush -/// [`sync_all`]: fn@crate::fs::File::sync_all -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -/// [the prelude]: crate::prelude -/// -/// # Examples -/// -/// Create a new file and asynchronously write bytes to it: -/// -/// ```no_run -/// use tokio::fs::File; -/// use tokio::prelude::*; // for write_all() -/// -/// # async fn dox() -> std::io::Result<()> { -/// let mut file = File::create("foo.txt").await?; -/// file.write_all(b"hello, world!").await?; -/// # Ok(()) -/// # } -/// ``` -/// -/// Read the contents of a file into a buffer -/// -/// ```no_run -/// use tokio::fs::File; -/// use tokio::prelude::*; // for read_to_end() -/// -/// # async fn dox() -> std::io::Result<()> { -/// let mut file = File::open("foo.txt").await?; -/// -/// let mut contents = vec![]; -/// file.read_to_end(&mut contents).await?; -/// -/// println!("len = {}", contents.len()); -/// # Ok(()) -/// # } -/// ``` -pub struct File { - std: Arc, - state: State, - - /// Errors from writes/flushes are returned in write/flush calls. If a write - /// error is observed while performing a read, it is saved until the next - /// write / flush call. - last_write_err: Option, -} - -#[derive(Debug)] -enum State { - Idle(Option), - Busy(sys::Blocking<(Operation, Buf)>), -} - -#[derive(Debug)] -enum Operation { - Read(io::Result), - Write(io::Result<()>), - Seek(io::Result), -} - -impl File { - /// Attempts to open a file in read-only mode. - /// - /// See [`OpenOptions`] for more details. - /// - /// [`OpenOptions`]: super::OpenOptions - /// - /// # Errors - /// - /// This function will return an error if called from outside of the Tokio - /// runtime or if path does not already exist. Other errors may also be - /// returned according to OpenOptions::open. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::prelude::*; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::open("foo.txt").await?; - /// - /// let mut contents = vec![]; - /// file.read_to_end(&mut contents).await?; - /// - /// println!("len = {}", contents.len()); - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`read_to_end`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read_to_end`]: fn@crate::io::AsyncReadExt::read_to_end - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn open(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - let std = asyncify(|| sys::File::open(path)).await?; - - Ok(File::from_std(std)) - } - - /// Opens a file in write-only mode. - /// - /// This function will create a file if it does not exist, and will truncate - /// it if it does. - /// - /// See [`OpenOptions`] for more details. - /// - /// [`OpenOptions`]: super::OpenOptions - /// - /// # Errors - /// - /// Results in an error if called from outside of the Tokio runtime or if - /// the underlying [`create`] call results in an error. - /// - /// [`create`]: std::fs::File::create - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::prelude::*; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// file.write_all(b"hello, world!").await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn create(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - let std_file = asyncify(move || sys::File::create(path)).await?; - Ok(File::from_std(std_file)) - } - - /// Converts a [`std::fs::File`][std] to a [`tokio::fs::File`][file]. - /// - /// [std]: std::fs::File - /// [file]: File - /// - /// # Examples - /// - /// ```no_run - /// // This line could block. It is not recommended to do this on the Tokio - /// // runtime. - /// let std_file = std::fs::File::open("foo.txt").unwrap(); - /// let file = tokio::fs::File::from_std(std_file); - /// ``` - pub fn from_std(std: sys::File) -> File { - File { - std: Arc::new(std), - state: State::Idle(Some(Buf::with_capacity(0))), - last_write_err: None, - } - } - - /// Seeks to an offset, in bytes, in a stream. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::prelude::*; - /// - /// use std::io::SeekFrom; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::open("foo.txt").await?; - /// file.seek(SeekFrom::Start(6)).await?; - /// - /// let mut contents = vec![0u8; 10]; - /// file.read_exact(&mut contents).await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`read_exact`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read_exact`]: fn@crate::io::AsyncReadExt::read_exact - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn seek(&mut self, mut pos: SeekFrom) -> io::Result { - self.complete_inflight().await; - - let mut buf = match self.state { - Idle(ref mut buf_cell) => buf_cell.take().unwrap(), - _ => unreachable!(), - }; - - // Factor in any unread data from the buf - if !buf.is_empty() { - let n = buf.discard_read(); - - if let SeekFrom::Current(ref mut offset) = pos { - *offset += n; - } - } - - let std = self.std.clone(); - - // Start the operation - self.state = Busy(sys::run(move || { - let res = (&*std).seek(pos); - (Operation::Seek(res), buf) - })); - - let (op, buf) = match self.state { - Idle(_) => unreachable!(), - Busy(ref mut rx) => rx.await.unwrap(), - }; - - self.state = Idle(Some(buf)); - - match op { - Operation::Seek(res) => res, - _ => unreachable!(), - } - } - - /// Attempts to sync all OS-internal metadata to disk. - /// - /// This function will attempt to ensure that all in-core data reaches the - /// filesystem before returning. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::prelude::*; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// file.write_all(b"hello, world!").await?; - /// file.sync_all().await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn sync_all(&mut self) -> io::Result<()> { - self.complete_inflight().await; - - let std = self.std.clone(); - asyncify(move || std.sync_all()).await - } - - /// This function is similar to `sync_all`, except that it may not - /// synchronize file metadata to the filesystem. - /// - /// This is intended for use cases that must synchronize content, but don't - /// need the metadata on disk. The goal of this method is to reduce disk - /// operations. - /// - /// Note that some platforms may simply implement this in terms of `sync_all`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::prelude::*; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// file.write_all(b"hello, world!").await?; - /// file.sync_data().await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn sync_data(&mut self) -> io::Result<()> { - self.complete_inflight().await; - - let std = self.std.clone(); - asyncify(move || std.sync_data()).await - } - - /// Truncates or extends the underlying file, updating the size of this file to become size. - /// - /// If the size is less than the current file's size, then the file will be - /// shrunk. If it is greater than the current file's size, then the file - /// will be extended to size and have all of the intermediate data filled in - /// with 0s. - /// - /// # Errors - /// - /// This function will return an error if the file is not opened for - /// writing. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::prelude::*; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// file.write_all(b"hello, world!").await?; - /// file.set_len(10).await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn set_len(&mut self, size: u64) -> io::Result<()> { - self.complete_inflight().await; - - let mut buf = match self.state { - Idle(ref mut buf_cell) => buf_cell.take().unwrap(), - _ => unreachable!(), - }; - - let seek = if !buf.is_empty() { - Some(SeekFrom::Current(buf.discard_read())) - } else { - None - }; - - let std = self.std.clone(); - - self.state = Busy(sys::run(move || { - let res = if let Some(seek) = seek { - (&*std).seek(seek).and_then(|_| std.set_len(size)) - } else { - std.set_len(size) - } - .map(|_| 0); // the value is discarded later - - // Return the result as a seek - (Operation::Seek(res), buf) - })); - - let (op, buf) = match self.state { - Idle(_) => unreachable!(), - Busy(ref mut rx) => rx.await?, - }; - - self.state = Idle(Some(buf)); - - match op { - Operation::Seek(res) => res.map(|_| ()), - _ => unreachable!(), - } - } - - /// Queries metadata about the underlying file. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let file = File::open("foo.txt").await?; - /// let metadata = file.metadata().await?; - /// - /// println!("{:?}", metadata); - /// # Ok(()) - /// # } - /// ``` - pub async fn metadata(&self) -> io::Result { - let std = self.std.clone(); - asyncify(move || std.metadata()).await - } - - /// Create a new `File` instance that shares the same underlying file handle - /// as the existing `File` instance. Reads, writes, and seeks will affect both - /// File instances simultaneously. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let file = File::open("foo.txt").await?; - /// let file_clone = file.try_clone().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn try_clone(&self) -> io::Result { - let std = self.std.clone(); - let std_file = asyncify(move || std.try_clone()).await?; - Ok(File::from_std(std_file)) - } - - /// Destructures `File` into a [`std::fs::File`][std]. This function is - /// async to allow any in-flight operations to complete. - /// - /// Use `File::try_into_std` to attempt conversion immediately. - /// - /// [std]: std::fs::File - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let tokio_file = File::open("foo.txt").await?; - /// let std_file = tokio_file.into_std().await; - /// # Ok(()) - /// # } - /// ``` - pub async fn into_std(mut self) -> sys::File { - self.complete_inflight().await; - Arc::try_unwrap(self.std).expect("Arc::try_unwrap failed") - } - - /// Tries to immediately destructure `File` into a [`std::fs::File`][std]. - /// - /// [std]: std::fs::File - /// - /// # Errors - /// - /// This function will return an error containing the file if some - /// operation is in-flight. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let tokio_file = File::open("foo.txt").await?; - /// let std_file = tokio_file.try_into_std().unwrap(); - /// # Ok(()) - /// # } - /// ``` - pub fn try_into_std(mut self) -> Result { - match Arc::try_unwrap(self.std) { - Ok(file) => Ok(file), - Err(std_file_arc) => { - self.std = std_file_arc; - Err(self) - } - } - } - - /// Changes the permissions on the underlying file. - /// - /// # Platform-specific behavior - /// - /// This function currently corresponds to the `fchmod` function on Unix and - /// the `SetFileInformationByHandle` function on Windows. Note that, this - /// [may change in the future][changes]. - /// - /// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior - /// - /// # Errors - /// - /// This function will return an error if the user lacks permission change - /// attributes on the underlying file. It may also return an error in other - /// os-specific unspecified cases. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let file = File::open("foo.txt").await?; - /// let mut perms = file.metadata().await?.permissions(); - /// perms.set_readonly(true); - /// file.set_permissions(perms).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn set_permissions(&self, perm: Permissions) -> io::Result<()> { - let std = self.std.clone(); - asyncify(move || std.set_permissions(perm)).await - } - - async fn complete_inflight(&mut self) { - use crate::future::poll_fn; - - if let Err(e) = poll_fn(|cx| Pin::new(&mut *self).poll_flush(cx)).await { - self.last_write_err = Some(e.kind()); - } - } -} - -impl AsyncRead for File { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [std::mem::MaybeUninit]) -> bool { - // https://github.com/rust-lang/rust/blob/09c817eeb29e764cfc12d0a8d94841e3ffe34023/src/libstd/fs.rs#L668 - false - } - - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - dst: &mut [u8], - ) -> Poll> { - loop { - match self.state { - Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - if !buf.is_empty() { - let n = buf.copy_to(dst); - *buf_cell = Some(buf); - return Ready(Ok(n)); - } - - buf.ensure_capacity_for(dst); - let std = self.std.clone(); - - self.state = Busy(sys::run(move || { - let res = buf.read_from(&mut &*std); - (Operation::Read(res), buf) - })); - } - Busy(ref mut rx) => { - let (op, mut buf) = ready!(Pin::new(rx).poll(cx))?; - - match op { - Operation::Read(Ok(_)) => { - let n = buf.copy_to(dst); - self.state = Idle(Some(buf)); - return Ready(Ok(n)); - } - Operation::Read(Err(e)) => { - assert!(buf.is_empty()); - - self.state = Idle(Some(buf)); - return Ready(Err(e)); - } - Operation::Write(Ok(_)) => { - assert!(buf.is_empty()); - self.state = Idle(Some(buf)); - continue; - } - Operation::Write(Err(e)) => { - assert!(self.last_write_err.is_none()); - self.last_write_err = Some(e.kind()); - self.state = Idle(Some(buf)); - } - Operation::Seek(_) => { - assert!(buf.is_empty()); - self.state = Idle(Some(buf)); - continue; - } - } - } - } - } - } -} - -impl AsyncSeek for File { - fn start_seek( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - mut pos: SeekFrom, - ) -> Poll> { - loop { - match self.state { - Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - // Factor in any unread data from the buf - if !buf.is_empty() { - let n = buf.discard_read(); - - if let SeekFrom::Current(ref mut offset) = pos { - *offset += n; - } - } - - let std = self.std.clone(); - - self.state = Busy(sys::run(move || { - let res = (&*std).seek(pos); - (Operation::Seek(res), buf) - })); - - return Ready(Ok(())); - } - Busy(ref mut rx) => { - let (op, buf) = ready!(Pin::new(rx).poll(cx))?; - self.state = Idle(Some(buf)); - - match op { - Operation::Read(_) => {} - Operation::Write(Err(e)) => { - assert!(self.last_write_err.is_none()); - self.last_write_err = Some(e.kind()); - } - Operation::Write(_) => {} - Operation::Seek(_) => {} - } - } - } - } - } - - fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - match self.state { - Idle(_) => panic!("must call start_seek before calling poll_complete"), - Busy(ref mut rx) => { - let (op, buf) = ready!(Pin::new(rx).poll(cx))?; - self.state = Idle(Some(buf)); - - match op { - Operation::Read(_) => {} - Operation::Write(Err(e)) => { - assert!(self.last_write_err.is_none()); - self.last_write_err = Some(e.kind()); - } - Operation::Write(_) => {} - Operation::Seek(res) => return Ready(res), - } - } - } - } - } -} - -impl AsyncWrite for File { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - src: &[u8], - ) -> Poll> { - if let Some(e) = self.last_write_err.take() { - return Ready(Err(e.into())); - } - - loop { - match self.state { - Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - let seek = if !buf.is_empty() { - Some(SeekFrom::Current(buf.discard_read())) - } else { - None - }; - - let n = buf.copy_from(src); - let std = self.std.clone(); - - self.state = Busy(sys::run(move || { - let res = if let Some(seek) = seek { - (&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std)) - } else { - buf.write_to(&mut &*std) - }; - - (Operation::Write(res), buf) - })); - - return Ready(Ok(n)); - } - Busy(ref mut rx) => { - let (op, buf) = ready!(Pin::new(rx).poll(cx))?; - self.state = Idle(Some(buf)); - - match op { - Operation::Read(_) => { - // We don't care about the result here. The fact - // that the cursor has advanced will be reflected in - // the next iteration of the loop - continue; - } - Operation::Write(res) => { - // If the previous write was successful, continue. - // Otherwise, error. - res?; - continue; - } - Operation::Seek(_) => { - // Ignore the seek - continue; - } - } - } - } - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if let Some(e) = self.last_write_err.take() { - return Ready(Err(e.into())); - } - - let (op, buf) = match self.state { - Idle(_) => return Ready(Ok(())), - Busy(ref mut rx) => ready!(Pin::new(rx).poll(cx))?, - }; - - // The buffer is not used here - self.state = Idle(Some(buf)); - - match op { - Operation::Read(_) => Ready(Ok(())), - Operation::Write(res) => Ready(res), - Operation::Seek(_) => Ready(Ok(())), - } - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -impl From for File { - fn from(std: sys::File) -> Self { - Self::from_std(std) - } -} - -impl fmt::Debug for File { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("tokio::fs::File") - .field("std", &self.std) - .finish() - } -} - -#[cfg(unix)] -impl std::os::unix::io::AsRawFd for File { - fn as_raw_fd(&self) -> std::os::unix::io::RawFd { - self.std.as_raw_fd() - } -} - -#[cfg(windows)] -impl std::os::windows::io::AsRawHandle for File { - fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { - self.std.as_raw_handle() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/hard_link.rs b/third_party/rust/tokio-0.2.25/src/fs/hard_link.rs deleted file mode 100644 index 50cc17d2861a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/hard_link.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new hard link on the filesystem. -/// -/// This is an async version of [`std::fs::hard_link`][std] -/// -/// [std]: std::fs::hard_link -/// -/// The `dst` path will be a link pointing to the `src` path. Note that systems -/// often require these two paths to both be located on the same filesystem. -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `link` function on Unix -/// and the `CreateHardLink` function on Windows. -/// Note that, this [may change in the future][changes]. -/// -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * The `src` path is not a file or doesn't exist. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// #[tokio::main] -/// async fn main() -> std::io::Result<()> { -/// fs::hard_link("a.txt", "b.txt").await?; // Hard link a.txt to b.txt -/// Ok(()) -/// } -/// ``` -pub async fn hard_link(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref().to_owned(); - let dst = dst.as_ref().to_owned(); - - asyncify(move || std::fs::hard_link(src, dst)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/metadata.rs b/third_party/rust/tokio-0.2.25/src/fs/metadata.rs deleted file mode 100644 index ff9cded79a24..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/metadata.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::fs::asyncify; - -use std::fs::Metadata; -use std::io; -use std::path::Path; - -/// Given a path, queries the file system to get information about a file, -/// directory, etc. -/// -/// This is an async version of [`std::fs::metadata`][std] -/// -/// This function will traverse symbolic links to query information about the -/// destination file. -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `stat` function on Unix and the -/// `GetFileAttributesEx` function on Windows. Note that, this [may change in -/// the future][changes]. -/// -/// [std]: std::fs::metadata -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * The user lacks permissions to perform `metadata` call on `path`. -/// * `path` does not exist. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::fs; -/// -/// #[tokio::main] -/// async fn main() -> std::io::Result<()> { -/// let attr = fs::metadata("/some/file/path.txt").await?; -/// // inspect attr ... -/// Ok(()) -/// } -/// ``` -pub async fn metadata(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(|| std::fs::metadata(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/mod.rs b/third_party/rust/tokio-0.2.25/src/fs/mod.rs deleted file mode 100644 index a2b062b1a305..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/mod.rs +++ /dev/null @@ -1,112 +0,0 @@ -#![cfg(not(loom))] - -//! Asynchronous file and standard stream adaptation. -//! -//! This module contains utility methods and adapter types for input/output to -//! files or standard streams (`Stdin`, `Stdout`, `Stderr`), and -//! filesystem manipulation, for use within (and only within) a Tokio runtime. -//! -//! Tasks run by *worker* threads should not block, as this could delay -//! servicing reactor events. Portable filesystem operations are blocking, -//! however. This module offers adapters which use a `blocking` annotation -//! to inform the runtime that a blocking operation is required. When -//! necessary, this allows the runtime to convert the current thread from a -//! *worker* to a *backup* thread, where blocking is acceptable. -//! -//! ## Usage -//! -//! Where possible, users should prefer the provided asynchronous-specific -//! traits such as [`AsyncRead`], or methods returning a `Future` or `Poll` -//! type. Adaptions also extend to traits like `std::io::Read` where methods -//! return `std::io::Result`. Be warned that these adapted methods may return -//! `std::io::ErrorKind::WouldBlock` if a *worker* thread can not be converted -//! to a *backup* thread immediately. -//! -//! [`AsyncRead`]: https://docs.rs/tokio-io/0.1/tokio_io/trait.AsyncRead.html - -mod canonicalize; -pub use self::canonicalize::canonicalize; - -mod create_dir; -pub use self::create_dir::create_dir; - -mod create_dir_all; -pub use self::create_dir_all::create_dir_all; - -mod dir_builder; -pub use self::dir_builder::DirBuilder; - -mod file; -pub use self::file::File; - -mod hard_link; -pub use self::hard_link::hard_link; - -mod metadata; -pub use self::metadata::metadata; - -mod open_options; -pub use self::open_options::OpenOptions; - -pub mod os; - -mod read; -pub use self::read::read; - -mod read_dir; -pub use self::read_dir::{read_dir, DirEntry, ReadDir}; - -mod read_link; -pub use self::read_link::read_link; - -mod read_to_string; -pub use self::read_to_string::read_to_string; - -mod remove_dir; -pub use self::remove_dir::remove_dir; - -mod remove_dir_all; -pub use self::remove_dir_all::remove_dir_all; - -mod remove_file; -pub use self::remove_file::remove_file; - -mod rename; -pub use self::rename::rename; - -mod set_permissions; -pub use self::set_permissions::set_permissions; - -mod symlink_metadata; -pub use self::symlink_metadata::symlink_metadata; - -mod write; -pub use self::write::write; - -mod copy; -pub use self::copy::copy; - -use std::io; - -pub(crate) async fn asyncify(f: F) -> io::Result -where - F: FnOnce() -> io::Result + Send + 'static, - T: Send + 'static, -{ - match sys::run(f).await { - Ok(res) => res, - Err(_) => Err(io::Error::new( - io::ErrorKind::Other, - "background task failed", - )), - } -} - -/// Types in this module can be mocked out in tests. -mod sys { - pub(crate) use std::fs::File; - - // TODO: don't rename - pub(crate) use crate::runtime::spawn_blocking as run; - pub(crate) use crate::task::JoinHandle as Blocking; -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/open_options.rs b/third_party/rust/tokio-0.2.25/src/fs/open_options.rs deleted file mode 100644 index ba3d9a6cf674..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/open_options.rs +++ /dev/null @@ -1,403 +0,0 @@ -use crate::fs::{asyncify, File}; - -use std::io; -use std::path::Path; - -/// Options and flags which can be used to configure how a file is opened. -/// -/// This builder exposes the ability to configure how a [`File`] is opened and -/// what operations are permitted on the open file. The [`File::open`] and -/// [`File::create`] methods are aliases for commonly used options using this -/// builder. -/// -/// Generally speaking, when using `OpenOptions`, you'll first call [`new`], -/// then chain calls to methods to set each option, then call [`open`], passing -/// the path of the file you're trying to open. This will give you a -/// [`io::Result`][result] with a [`File`] inside that you can further operate -/// on. -/// -/// This is a specialized version of [`std::fs::OpenOptions`] for usage from -/// the Tokio runtime. -/// -/// `From` is implemented for more advanced configuration -/// than the methods provided here. -/// -/// [`new`]: OpenOptions::new -/// [`open`]: OpenOptions::open -/// [result]: std::io::Result -/// [`File`]: File -/// [`File::open`]: File::open -/// [`File::create`]: File::create -/// [`std::fs::OpenOptions`]: std::fs::OpenOptions -/// -/// # Examples -/// -/// Opening a file to read: -/// -/// ```no_run -/// use tokio::fs::OpenOptions; -/// use std::io; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let file = OpenOptions::new() -/// .read(true) -/// .open("foo.txt") -/// .await?; -/// -/// Ok(()) -/// } -/// ``` -/// -/// Opening a file for both reading and writing, as well as creating it if it -/// doesn't exist: -/// -/// ```no_run -/// use tokio::fs::OpenOptions; -/// use std::io; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let file = OpenOptions::new() -/// .read(true) -/// .write(true) -/// .create(true) -/// .open("foo.txt") -/// .await?; -/// -/// Ok(()) -/// } -/// ``` -#[derive(Clone, Debug)] -pub struct OpenOptions(std::fs::OpenOptions); - -impl OpenOptions { - /// Creates a blank new set of options ready for configuration. - /// - /// All options are initially set to `false`. - /// - /// This is an async version of [`std::fs::OpenOptions::new`][std] - /// - /// [std]: std::fs::OpenOptions::new - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// - /// let mut options = OpenOptions::new(); - /// let future = options.read(true).open("foo.txt"); - /// ``` - pub fn new() -> OpenOptions { - OpenOptions(std::fs::OpenOptions::new()) - } - - /// Sets the option for read access. - /// - /// This option, when true, will indicate that the file should be - /// `read`-able if opened. - /// - /// This is an async version of [`std::fs::OpenOptions::read`][std] - /// - /// [std]: std::fs::OpenOptions::read - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .read(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn read(&mut self, read: bool) -> &mut OpenOptions { - self.0.read(read); - self - } - - /// Sets the option for write access. - /// - /// This option, when true, will indicate that the file should be - /// `write`-able if opened. - /// - /// This is an async version of [`std::fs::OpenOptions::write`][std] - /// - /// [std]: std::fs::OpenOptions::write - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn write(&mut self, write: bool) -> &mut OpenOptions { - self.0.write(write); - self - } - - /// Sets the option for the append mode. - /// - /// This option, when true, means that writes will append to a file instead - /// of overwriting previous contents. Note that setting - /// `.write(true).append(true)` has the same effect as setting only - /// `.append(true)`. - /// - /// For most filesystems, the operating system guarantees that all writes are - /// atomic: no writes get mangled because another process writes at the same - /// time. - /// - /// One maybe obvious note when using append-mode: make sure that all data - /// that belongs together is written to the file in one operation. This - /// can be done by concatenating strings before passing them to [`write()`], - /// or using a buffered writer (with a buffer of adequate size), - /// and calling [`flush()`] when the message is complete. - /// - /// If a file is opened with both read and append access, beware that after - /// opening, and after every write, the position for reading may be set at the - /// end of the file. So, before writing, save the current position (using - /// [`seek`]`(`[`SeekFrom`]`::`[`Current`]`(0))`), and restore it before the next read. - /// - /// This is an async version of [`std::fs::OpenOptions::append`][std] - /// - /// [std]: std::fs::OpenOptions::append - /// - /// ## Note - /// - /// This function doesn't create the file if it doesn't exist. Use the [`create`] - /// method to do so. - /// - /// [`write()`]: crate::io::AsyncWriteExt::write - /// [`flush()`]: crate::io::AsyncWriteExt::flush - /// [`seek`]: crate::io::AsyncSeekExt::seek - /// [`SeekFrom`]: std::io::SeekFrom - /// [`Current`]: std::io::SeekFrom::Current - /// [`create`]: OpenOptions::create - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .append(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn append(&mut self, append: bool) -> &mut OpenOptions { - self.0.append(append); - self - } - - /// Sets the option for truncating a previous file. - /// - /// If a file is successfully opened with this option set it will truncate - /// the file to 0 length if it already exists. - /// - /// The file must be opened with write access for truncate to work. - /// - /// This is an async version of [`std::fs::OpenOptions::truncate`][std] - /// - /// [std]: std::fs::OpenOptions::truncate - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .truncate(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions { - self.0.truncate(truncate); - self - } - - /// Sets the option for creating a new file. - /// - /// This option indicates whether a new file will be created if the file - /// does not yet already exist. - /// - /// In order for the file to be created, [`write`] or [`append`] access must - /// be used. - /// - /// This is an async version of [`std::fs::OpenOptions::create`][std] - /// - /// [std]: std::fs::OpenOptions::create - /// [`write`]: OpenOptions::write - /// [`append`]: OpenOptions::append - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .create(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn create(&mut self, create: bool) -> &mut OpenOptions { - self.0.create(create); - self - } - - /// Sets the option to always create a new file. - /// - /// This option indicates whether a new file will be created. No file is - /// allowed to exist at the target location, also no (dangling) symlink. - /// - /// This option is useful because it is atomic. Otherwise between checking - /// whether a file exists and creating a new one, the file may have been - /// created by another process (a TOCTOU race condition / attack). - /// - /// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are - /// ignored. - /// - /// The file must be opened with write or append access in order to create a - /// new file. - /// - /// This is an async version of [`std::fs::OpenOptions::create_new`][std] - /// - /// [std]: std::fs::OpenOptions::create_new - /// [`.create()`]: OpenOptions::create - /// [`.truncate()`]: OpenOptions::truncate - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .create_new(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn create_new(&mut self, create_new: bool) -> &mut OpenOptions { - self.0.create_new(create_new); - self - } - - /// Opens a file at `path` with the options specified by `self`. - /// - /// This is an async version of [`std::fs::OpenOptions::open`][std] - /// - /// [std]: std::fs::OpenOptions::open - /// - /// # Errors - /// - /// This function will return an error under a number of different - /// circumstances. Some of these error conditions are listed here, together - /// with their [`ErrorKind`]. The mapping to [`ErrorKind`]s is not part of - /// the compatibility contract of the function, especially the `Other` kind - /// might change to more specific kinds in the future. - /// - /// * [`NotFound`]: The specified file does not exist and neither `create` - /// or `create_new` is set. - /// * [`NotFound`]: One of the directory components of the file path does - /// not exist. - /// * [`PermissionDenied`]: The user lacks permission to get the specified - /// access rights for the file. - /// * [`PermissionDenied`]: The user lacks permission to open one of the - /// directory components of the specified path. - /// * [`AlreadyExists`]: `create_new` was specified and the file already - /// exists. - /// * [`InvalidInput`]: Invalid combinations of open options (truncate - /// without write access, no access mode set, etc.). - /// * [`Other`]: One of the directory components of the specified file path - /// was not, in fact, a directory. - /// * [`Other`]: Filesystem-level errors: full disk, write permission - /// requested on a read-only file system, exceeded disk quota, too many - /// open files, too long filename, too many symbolic links in the - /// specified path (Unix-like systems only), etc. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new().open("foo.txt").await?; - /// Ok(()) - /// } - /// ``` - /// - /// [`ErrorKind`]: std::io::ErrorKind - /// [`AlreadyExists`]: std::io::ErrorKind::AlreadyExists - /// [`InvalidInput`]: std::io::ErrorKind::InvalidInput - /// [`NotFound`]: std::io::ErrorKind::NotFound - /// [`Other`]: std::io::ErrorKind::Other - /// [`PermissionDenied`]: std::io::ErrorKind::PermissionDenied - pub async fn open(&self, path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - let opts = self.0.clone(); - - let std = asyncify(move || opts.open(path)).await?; - Ok(File::from_std(std)) - } - - /// Returns a mutable reference to the the underlying std::fs::OpenOptions - #[cfg(unix)] - pub(super) fn as_inner_mut(&mut self) -> &mut std::fs::OpenOptions { - &mut self.0 - } -} - -impl From for OpenOptions { - fn from(options: std::fs::OpenOptions) -> OpenOptions { - OpenOptions(options) - } -} - -impl Default for OpenOptions { - fn default() -> Self { - Self::new() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/os/mod.rs b/third_party/rust/tokio-0.2.25/src/fs/os/mod.rs deleted file mode 100644 index f4b8bfb617df..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/os/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! OS-specific functionality. - -#[cfg(unix)] -pub mod unix; - -#[cfg(windows)] -pub mod windows; diff --git a/third_party/rust/tokio-0.2.25/src/fs/os/unix/dir_builder_ext.rs b/third_party/rust/tokio-0.2.25/src/fs/os/unix/dir_builder_ext.rs deleted file mode 100644 index e9a25b959c6c..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/os/unix/dir_builder_ext.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::fs::dir_builder::DirBuilder; - -/// Unix-specific extensions to [`DirBuilder`]. -/// -/// [`DirBuilder`]: crate::fs::DirBuilder -pub trait DirBuilderExt { - /// Sets the mode to create new directories with. - /// - /// This option defaults to 0o777. - /// - /// # Examples - /// - /// - /// ```no_run - /// use tokio::fs::DirBuilder; - /// use tokio::fs::os::unix::DirBuilderExt; - /// - /// let mut builder = DirBuilder::new(); - /// builder.mode(0o775); - /// ``` - fn mode(&mut self, mode: u32) -> &mut Self; -} - -impl DirBuilderExt for DirBuilder { - fn mode(&mut self, mode: u32) -> &mut Self { - self.mode = Some(mode); - self - } -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/os/unix/mod.rs b/third_party/rust/tokio-0.2.25/src/fs/os/unix/mod.rs deleted file mode 100644 index 826222ebf230..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/os/unix/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! Unix-specific extensions to primitives in the `tokio_fs` module. - -mod symlink; -pub use self::symlink::symlink; - -mod open_options_ext; -pub use self::open_options_ext::OpenOptionsExt; - -mod dir_builder_ext; -pub use self::dir_builder_ext::DirBuilderExt; diff --git a/third_party/rust/tokio-0.2.25/src/fs/os/unix/open_options_ext.rs b/third_party/rust/tokio-0.2.25/src/fs/os/unix/open_options_ext.rs deleted file mode 100644 index ff8927588047..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/os/unix/open_options_ext.rs +++ /dev/null @@ -1,79 +0,0 @@ -use crate::fs::open_options::OpenOptions; -use std::os::unix::fs::OpenOptionsExt as StdOpenOptionsExt; - -/// Unix-specific extensions to [`fs::OpenOptions`]. -/// -/// This mirrors the definition of [`std::os::unix::fs::OpenOptionsExt`]. -/// -/// -/// [`fs::OpenOptions`]: crate::fs::OpenOptions -/// [`std::os::unix::fs::OpenOptionsExt`]: std::os::unix::fs::OpenOptionsExt -pub trait OpenOptionsExt { - /// Sets the mode bits that a new file will be created with. - /// - /// If a new file is created as part of an `OpenOptions::open` call then this - /// specified `mode` will be used as the permission bits for the new file. - /// If no `mode` is set, the default of `0o666` will be used. - /// The operating system masks out bits with the system's `umask`, to produce - /// the final permissions. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use tokio::fs::os::unix::OpenOptionsExt; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut options = OpenOptions::new(); - /// options.mode(0o644); // Give read/write for owner and read for others. - /// let file = options.open("foo.txt").await?; - /// - /// Ok(()) - /// } - /// ``` - fn mode(&mut self, mode: u32) -> &mut Self; - - /// Pass custom flags to the `flags` argument of `open`. - /// - /// The bits that define the access mode are masked out with `O_ACCMODE`, to - /// ensure they do not interfere with the access mode set by Rusts options. - /// - /// Custom flags can only set flags, not remove flags set by Rusts options. - /// This options overwrites any previously set custom flags. - /// - /// # Examples - /// - /// ```no_run - /// use libc; - /// use tokio::fs::OpenOptions; - /// use tokio::fs::os::unix::OpenOptionsExt; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut options = OpenOptions::new(); - /// options.write(true); - /// if cfg!(unix) { - /// options.custom_flags(libc::O_NOFOLLOW); - /// } - /// let file = options.open("foo.txt").await?; - /// - /// Ok(()) - /// } - /// ``` - fn custom_flags(&mut self, flags: i32) -> &mut Self; -} - -impl OpenOptionsExt for OpenOptions { - fn mode(&mut self, mode: u32) -> &mut OpenOptions { - self.as_inner_mut().mode(mode); - self - } - - fn custom_flags(&mut self, flags: i32) -> &mut OpenOptions { - self.as_inner_mut().custom_flags(flags); - self - } -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/os/unix/symlink.rs b/third_party/rust/tokio-0.2.25/src/fs/os/unix/symlink.rs deleted file mode 100644 index 22ece7250fdf..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/os/unix/symlink.rs +++ /dev/null @@ -1,18 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new symbolic link on the filesystem. -/// -/// The `dst` path will be a symbolic link pointing to the `src` path. -/// -/// This is an async version of [`std::os::unix::fs::symlink`][std] -/// -/// [std]: std::os::unix::fs::symlink -pub async fn symlink(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref().to_owned(); - let dst = dst.as_ref().to_owned(); - - asyncify(move || std::os::unix::fs::symlink(src, dst)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/os/windows/mod.rs b/third_party/rust/tokio-0.2.25/src/fs/os/windows/mod.rs deleted file mode 100644 index 42eb7bdb92f1..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/os/windows/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Windows-specific extensions for the primitives in the `tokio_fs` module. - -mod symlink_dir; -pub use self::symlink_dir::symlink_dir; - -mod symlink_file; -pub use self::symlink_file::symlink_file; diff --git a/third_party/rust/tokio-0.2.25/src/fs/os/windows/symlink_dir.rs b/third_party/rust/tokio-0.2.25/src/fs/os/windows/symlink_dir.rs deleted file mode 100644 index 736e762b48d0..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/os/windows/symlink_dir.rs +++ /dev/null @@ -1,19 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new directory symlink on the filesystem. -/// -/// The `dst` path will be a directory symbolic link pointing to the `src` -/// path. -/// -/// This is an async version of [`std::os::windows::fs::symlink_dir`][std] -/// -/// [std]: std::os::windows::fs::symlink_dir -pub async fn symlink_dir(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref().to_owned(); - let dst = dst.as_ref().to_owned(); - - asyncify(move || std::os::windows::fs::symlink_dir(src, dst)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/os/windows/symlink_file.rs b/third_party/rust/tokio-0.2.25/src/fs/os/windows/symlink_file.rs deleted file mode 100644 index 07d8e604192f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/os/windows/symlink_file.rs +++ /dev/null @@ -1,19 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new file symbolic link on the filesystem. -/// -/// The `dst` path will be a file symbolic link pointing to the `src` -/// path. -/// -/// This is an async version of [`std::os::windows::fs::symlink_file`][std] -/// -/// [std]: std::os::windows::fs::symlink_file -pub async fn symlink_file(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref().to_owned(); - let dst = dst.as_ref().to_owned(); - - asyncify(move || std::os::windows::fs::symlink_file(src, dst)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/read.rs b/third_party/rust/tokio-0.2.25/src/fs/read.rs deleted file mode 100644 index 2d80eb5bd349..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/read.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::fs::asyncify; - -use std::{io, path::Path}; - -/// Reads the entire contents of a file into a bytes vector. -/// -/// This is an async version of [`std::fs::read`][std] -/// -/// [std]: std::fs::read -/// -/// This is a convenience function for using [`File::open`] and [`read_to_end`] -/// with fewer imports and without an intermediate variable. It pre-allocates a -/// buffer based on the file size when available, so it is generally faster than -/// reading into a vector created with `Vec::new()`. -/// -/// [`File::open`]: super::File::open -/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end -/// -/// # Errors -/// -/// This function will return an error if `path` does not already exist. -/// Other errors may also be returned according to [`OpenOptions::open`]. -/// -/// [`OpenOptions::open`]: super::OpenOptions::open -/// -/// It will also return an error if it encounters while reading an error -/// of a kind other than [`ErrorKind::Interrupted`]. -/// -/// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// use std::net::SocketAddr; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// let contents = fs::read("address.txt").await?; -/// let foo: SocketAddr = String::from_utf8_lossy(&contents).parse()?; -/// Ok(()) -/// } -/// ``` -pub async fn read(path: impl AsRef) -> io::Result> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::read(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/read_dir.rs b/third_party/rust/tokio-0.2.25/src/fs/read_dir.rs deleted file mode 100644 index f9b16c66c5de..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/read_dir.rs +++ /dev/null @@ -1,244 +0,0 @@ -use crate::fs::{asyncify, sys}; - -use std::ffi::OsString; -use std::fs::{FileType, Metadata}; -use std::future::Future; -use std::io; -#[cfg(unix)] -use std::os::unix::fs::DirEntryExt; -use std::path::{Path, PathBuf}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll; - -/// Returns a stream over the entries within a directory. -/// -/// This is an async version of [`std::fs::read_dir`](std::fs::read_dir) -pub async fn read_dir(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - let std = asyncify(|| std::fs::read_dir(path)).await?; - - Ok(ReadDir(State::Idle(Some(std)))) -} - -/// Stream of the entries in a directory. -/// -/// This stream is returned from the [`read_dir`] function of this module and -/// will yield instances of [`DirEntry`]. Through a [`DirEntry`] -/// information like the entry's path and possibly other metadata can be -/// learned. -/// -/// # Errors -/// -/// This [`Stream`] will return an [`Err`] if there's some sort of intermittent -/// IO error during iteration. -/// -/// [`read_dir`]: read_dir -/// [`DirEntry`]: DirEntry -/// [`Stream`]: crate::stream::Stream -/// [`Err`]: std::result::Result::Err -#[derive(Debug)] -#[must_use = "streams do nothing unless polled"] -pub struct ReadDir(State); - -#[derive(Debug)] -enum State { - Idle(Option), - Pending(sys::Blocking<(Option>, std::fs::ReadDir)>), -} - -impl ReadDir { - /// Returns the next entry in the directory stream. - pub async fn next_entry(&mut self) -> io::Result> { - use crate::future::poll_fn; - poll_fn(|cx| self.poll_next_entry(cx)).await - } - - #[doc(hidden)] - pub fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll>> { - loop { - match self.0 { - State::Idle(ref mut std) => { - let mut std = std.take().unwrap(); - - self.0 = State::Pending(sys::run(move || { - let ret = std.next(); - (ret, std) - })); - } - State::Pending(ref mut rx) => { - let (ret, std) = ready!(Pin::new(rx).poll(cx))?; - self.0 = State::Idle(Some(std)); - - let ret = match ret { - Some(Ok(std)) => Ok(Some(DirEntry(Arc::new(std)))), - Some(Err(e)) => Err(e), - None => Ok(None), - }; - - return Poll::Ready(ret); - } - } - } - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for ReadDir { - type Item = io::Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Poll::Ready(match ready!(self.poll_next_entry(cx)) { - Ok(Some(entry)) => Some(Ok(entry)), - Ok(None) => None, - Err(err) => Some(Err(err)), - }) - } -} - -/// Entries returned by the [`ReadDir`] stream. -/// -/// [`ReadDir`]: struct@ReadDir -/// -/// This is a specialized version of [`std::fs::DirEntry`] for usage from the -/// Tokio runtime. -/// -/// An instance of `DirEntry` represents an entry inside of a directory on the -/// filesystem. Each entry can be inspected via methods to learn about the full -/// path or possibly other metadata through per-platform extension traits. -#[derive(Debug)] -pub struct DirEntry(Arc); - -impl DirEntry { - /// Returns the full path to the file that this entry represents. - /// - /// The full path is created by joining the original path to `read_dir` - /// with the filename of this entry. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// - /// while let Some(entry) = entries.next_entry().await? { - /// println!("{:?}", entry.path()); - /// } - /// # Ok(()) - /// # } - /// ``` - /// - /// This prints output like: - /// - /// ```text - /// "./whatever.txt" - /// "./foo.html" - /// "./hello_world.rs" - /// ``` - /// - /// The exact text, of course, depends on what files you have in `.`. - pub fn path(&self) -> PathBuf { - self.0.path() - } - - /// Returns the bare file name of this directory entry without any other - /// leading path component. - /// - /// # Examples - /// - /// ``` - /// use tokio::fs; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// - /// while let Some(entry) = entries.next_entry().await? { - /// println!("{:?}", entry.file_name()); - /// } - /// # Ok(()) - /// # } - /// ``` - pub fn file_name(&self) -> OsString { - self.0.file_name() - } - - /// Returns the metadata for the file that this entry points at. - /// - /// This function will not traverse symlinks if this entry points at a - /// symlink. - /// - /// # Platform-specific behavior - /// - /// On Windows this function is cheap to call (no extra system calls - /// needed), but on Unix platforms this function is the equivalent of - /// calling `symlink_metadata` on the path. - /// - /// # Examples - /// - /// ``` - /// use tokio::fs; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// - /// while let Some(entry) = entries.next_entry().await? { - /// if let Ok(metadata) = entry.metadata().await { - /// // Now let's show our entry's permissions! - /// println!("{:?}: {:?}", entry.path(), metadata.permissions()); - /// } else { - /// println!("Couldn't get file type for {:?}", entry.path()); - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn metadata(&self) -> io::Result { - let std = self.0.clone(); - asyncify(move || std.metadata()).await - } - - /// Returns the file type for the file that this entry points at. - /// - /// This function will not traverse symlinks if this entry points at a - /// symlink. - /// - /// # Platform-specific behavior - /// - /// On Windows and most Unix platforms this function is free (no extra - /// system calls needed), but some Unix platforms may require the equivalent - /// call to `symlink_metadata` to learn about the target file type. - /// - /// # Examples - /// - /// ``` - /// use tokio::fs; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// - /// while let Some(entry) = entries.next_entry().await? { - /// if let Ok(file_type) = entry.file_type().await { - /// // Now let's show our entry's file type! - /// println!("{:?}: {:?}", entry.path(), file_type); - /// } else { - /// println!("Couldn't get file type for {:?}", entry.path()); - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn file_type(&self) -> io::Result { - let std = self.0.clone(); - asyncify(move || std.file_type()).await - } -} - -#[cfg(unix)] -impl DirEntryExt for DirEntry { - fn ino(&self) -> u64 { - self.0.ino() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/read_link.rs b/third_party/rust/tokio-0.2.25/src/fs/read_link.rs deleted file mode 100644 index 6c48c5e15685..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/read_link.rs +++ /dev/null @@ -1,14 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::{Path, PathBuf}; - -/// Reads a symbolic link, returning the file that the link points to. -/// -/// This is an async version of [`std::fs::read_link`][std] -/// -/// [std]: std::fs::read_link -pub async fn read_link(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::read_link(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/read_to_string.rs b/third_party/rust/tokio-0.2.25/src/fs/read_to_string.rs deleted file mode 100644 index c743bb4ddc46..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/read_to_string.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::fs::asyncify; - -use std::{io, path::Path}; - -/// Creates a future which will open a file for reading and read the entire -/// contents into a string and return said string. -/// -/// This is the async equivalent of `std::fs::read_to_string`. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// # async fn dox() -> std::io::Result<()> { -/// let contents = fs::read_to_string("foo.txt").await?; -/// println!("foo.txt contains {} bytes", contents.len()); -/// # Ok(()) -/// # } -/// ``` -pub async fn read_to_string(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::read_to_string(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/remove_dir.rs b/third_party/rust/tokio-0.2.25/src/fs/remove_dir.rs deleted file mode 100644 index 6e7cbd08f6bb..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/remove_dir.rs +++ /dev/null @@ -1,12 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Removes an existing, empty directory. -/// -/// This is an async version of [`std::fs::remove_dir`](std::fs::remove_dir) -pub async fn remove_dir(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::remove_dir(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/remove_dir_all.rs b/third_party/rust/tokio-0.2.25/src/fs/remove_dir_all.rs deleted file mode 100644 index 0a237550f9c6..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/remove_dir_all.rs +++ /dev/null @@ -1,14 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Removes a directory at this path, after removing all its contents. Use carefully! -/// -/// This is an async version of [`std::fs::remove_dir_all`][std] -/// -/// [std]: fn@std::fs::remove_dir_all -pub async fn remove_dir_all(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::remove_dir_all(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/remove_file.rs b/third_party/rust/tokio-0.2.25/src/fs/remove_file.rs deleted file mode 100644 index d22a5bfc88a0..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/remove_file.rs +++ /dev/null @@ -1,18 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Removes a file from the filesystem. -/// -/// Note that there is no guarantee that the file is immediately deleted (e.g. -/// depending on platform, other open file descriptors may prevent immediate -/// removal). -/// -/// This is an async version of [`std::fs::remove_file`][std] -/// -/// [std]: std::fs::remove_file -pub async fn remove_file(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::remove_file(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/rename.rs b/third_party/rust/tokio-0.2.25/src/fs/rename.rs deleted file mode 100644 index 4f980821d2fe..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/rename.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Renames a file or directory to a new name, replacing the original file if -/// `to` already exists. -/// -/// This will not work if the new name is on a different mount point. -/// -/// This is an async version of [`std::fs::rename`](std::fs::rename) -pub async fn rename(from: impl AsRef, to: impl AsRef) -> io::Result<()> { - let from = from.as_ref().to_owned(); - let to = to.as_ref().to_owned(); - - asyncify(move || std::fs::rename(from, to)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/set_permissions.rs b/third_party/rust/tokio-0.2.25/src/fs/set_permissions.rs deleted file mode 100644 index 09be02ea013d..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/set_permissions.rs +++ /dev/null @@ -1,15 +0,0 @@ -use crate::fs::asyncify; - -use std::fs::Permissions; -use std::io; -use std::path::Path; - -/// Changes the permissions found on a file or a directory. -/// -/// This is an async version of [`std::fs::set_permissions`][std] -/// -/// [std]: fn@std::fs::set_permissions -pub async fn set_permissions(path: impl AsRef, perm: Permissions) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(|| std::fs::set_permissions(path, perm)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/symlink_metadata.rs b/third_party/rust/tokio-0.2.25/src/fs/symlink_metadata.rs deleted file mode 100644 index 1d0df1257605..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/symlink_metadata.rs +++ /dev/null @@ -1,15 +0,0 @@ -use crate::fs::asyncify; - -use std::fs::Metadata; -use std::io; -use std::path::Path; - -/// Queries the file system metadata for a path. -/// -/// This is an async version of [`std::fs::symlink_metadata`][std] -/// -/// [std]: fn@std::fs::symlink_metadata -pub async fn symlink_metadata(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(|| std::fs::symlink_metadata(path)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/fs/write.rs b/third_party/rust/tokio-0.2.25/src/fs/write.rs deleted file mode 100644 index 0114cab8a83a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/fs/write.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::fs::asyncify; - -use std::{io, path::Path}; - -/// Creates a future that will open a file for writing and write the entire -/// contents of `contents` to it. -/// -/// This is the async equivalent of `std::fs::write`. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// # async fn dox() -> std::io::Result<()> { -/// fs::write("foo.txt", b"Hello world!").await?; -/// # Ok(()) -/// # } -/// ``` -pub async fn write + Unpin>(path: impl AsRef, contents: C) -> io::Result<()> { - let path = path.as_ref().to_owned(); - let contents = contents.as_ref().to_owned(); - - asyncify(move || std::fs::write(path, contents)).await -} diff --git a/third_party/rust/tokio-0.2.25/src/future/maybe_done.rs b/third_party/rust/tokio-0.2.25/src/future/maybe_done.rs deleted file mode 100644 index 1e083ad7fd51..000000000000 --- a/third_party/rust/tokio-0.2.25/src/future/maybe_done.rs +++ /dev/null @@ -1,76 +0,0 @@ -//! Definition of the MaybeDone combinator - -use std::future::Future; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// A future that may have completed. -#[derive(Debug)] -pub enum MaybeDone { - /// A not-yet-completed future - Future(Fut), - /// The output of the completed future - Done(Fut::Output), - /// The empty variant after the result of a [`MaybeDone`] has been - /// taken using the [`take_output`](MaybeDone::take_output) method. - Gone, -} - -// Safe because we never generate `Pin<&mut Fut::Output>` -impl Unpin for MaybeDone {} - -/// Wraps a future into a `MaybeDone` -pub fn maybe_done(future: Fut) -> MaybeDone { - MaybeDone::Future(future) -} - -impl MaybeDone { - /// Returns an [`Option`] containing a mutable reference to the output of the future. - /// The output of this method will be [`Some`] if and only if the inner - /// future has been completed and [`take_output`](MaybeDone::take_output) - /// has not yet been called. - pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Output> { - unsafe { - let this = self.get_unchecked_mut(); - match this { - MaybeDone::Done(res) => Some(res), - _ => None, - } - } - } - - /// Attempts to take the output of a `MaybeDone` without driving it - /// towards completion. - #[inline] - pub fn take_output(self: Pin<&mut Self>) -> Option { - unsafe { - let this = self.get_unchecked_mut(); - match this { - MaybeDone::Done(_) => {} - MaybeDone::Future(_) | MaybeDone::Gone => return None, - }; - if let MaybeDone::Done(output) = mem::replace(this, MaybeDone::Gone) { - Some(output) - } else { - unreachable!() - } - } - } -} - -impl Future for MaybeDone { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let res = unsafe { - match self.as_mut().get_unchecked_mut() { - MaybeDone::Future(a) => ready!(Pin::new_unchecked(a).poll(cx)), - MaybeDone::Done(_) => return Poll::Ready(()), - MaybeDone::Gone => panic!("MaybeDone polled after value taken"), - } - }; - self.set(MaybeDone::Done(res)); - Poll::Ready(()) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/future/mod.rs b/third_party/rust/tokio-0.2.25/src/future/mod.rs deleted file mode 100644 index 770753f31916..000000000000 --- a/third_party/rust/tokio-0.2.25/src/future/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -#![allow(unused_imports, dead_code)] - -//! Asynchronous values. - -mod maybe_done; -pub use maybe_done::{maybe_done, MaybeDone}; - -mod poll_fn; -pub use poll_fn::poll_fn; - -mod ready; -pub(crate) use ready::{ok, Ready}; - -mod try_join; -pub(crate) use try_join::try_join3; diff --git a/third_party/rust/tokio-0.2.25/src/future/pending.rs b/third_party/rust/tokio-0.2.25/src/future/pending.rs deleted file mode 100644 index 287e836fd3ca..000000000000 --- a/third_party/rust/tokio-0.2.25/src/future/pending.rs +++ /dev/null @@ -1,44 +0,0 @@ -use sdt::pin::Pin; -use std::future::Future; -use std::marker; -use std::task::{Context, Poll}; - -/// Future for the [`pending()`] function. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -struct Pending { - _data: marker::PhantomData, -} - -/// Creates a future which never resolves, representing a computation that never -/// finishes. -/// -/// The returned future will forever return [`Poll::Pending`]. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::future; -/// -/// #[tokio::main] -/// async fn main { -/// future::pending().await; -/// unreachable!(); -/// } -/// ``` -pub async fn pending() -> ! { - Pending { - _data: marker::PhantomData, - } - .await -} - -impl Future for Pending { - type Output = !; - - fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll { - Poll::Pending - } -} - -impl Unpin for Pending {} diff --git a/third_party/rust/tokio-0.2.25/src/future/poll_fn.rs b/third_party/rust/tokio-0.2.25/src/future/poll_fn.rs deleted file mode 100644 index 9b3d1370ea92..000000000000 --- a/third_party/rust/tokio-0.2.25/src/future/poll_fn.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! Definition of the `PollFn` adapter combinator - -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Future for the [`poll_fn`] function. -pub struct PollFn { - f: F, -} - -impl Unpin for PollFn {} - -/// Creates a new future wrapping around a function returning [`Poll`]. -pub fn poll_fn(f: F) -> PollFn -where - F: FnMut(&mut Context<'_>) -> Poll, -{ - PollFn { f } -} - -impl fmt::Debug for PollFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PollFn").finish() - } -} - -impl Future for PollFn -where - F: FnMut(&mut Context<'_>) -> Poll, -{ - type Output = T; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - (&mut self.f)(cx) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/future/ready.rs b/third_party/rust/tokio-0.2.25/src/future/ready.rs deleted file mode 100644 index de2d60c13a2a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/future/ready.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Future for the [`ok`](ok()) function. -/// -/// `pub` in order to use the future as an associated type in a sealed trait. -#[derive(Debug)] -// Used as an associated type in a "sealed" trait. -#[allow(unreachable_pub)] -pub struct Ready(Option); - -impl Unpin for Ready {} - -impl Future for Ready { - type Output = T; - - #[inline] - fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - Poll::Ready(self.0.take().unwrap()) - } -} - -/// Creates a future that is immediately ready with a success value. -pub(crate) fn ok(t: T) -> Ready> { - Ready(Some(Ok(t))) -} diff --git a/third_party/rust/tokio-0.2.25/src/future/try_join.rs b/third_party/rust/tokio-0.2.25/src/future/try_join.rs deleted file mode 100644 index 5bd80dc89a2b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/future/try_join.rs +++ /dev/null @@ -1,82 +0,0 @@ -use crate::future::{maybe_done, MaybeDone}; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pub(crate) fn try_join3( - future1: F1, - future2: F2, - future3: F3, -) -> TryJoin3 -where - F1: Future>, - F2: Future>, - F3: Future>, -{ - TryJoin3 { - future1: maybe_done(future1), - future2: maybe_done(future2), - future3: maybe_done(future3), - } -} - -pin_project! { - pub(crate) struct TryJoin3 - where - F1: Future, - F2: Future, - F3: Future, - { - #[pin] - future1: MaybeDone, - #[pin] - future2: MaybeDone, - #[pin] - future3: MaybeDone, - } -} - -impl Future for TryJoin3 -where - F1: Future>, - F2: Future>, - F3: Future>, -{ - type Output = Result<(T1, T2, T3), E>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut all_done = true; - - let mut me = self.project(); - - if me.future1.as_mut().poll(cx).is_pending() { - all_done = false; - } else if me.future1.as_mut().output_mut().unwrap().is_err() { - return Poll::Ready(Err(me.future1.take_output().unwrap().err().unwrap())); - } - - if me.future2.as_mut().poll(cx).is_pending() { - all_done = false; - } else if me.future2.as_mut().output_mut().unwrap().is_err() { - return Poll::Ready(Err(me.future2.take_output().unwrap().err().unwrap())); - } - - if me.future3.as_mut().poll(cx).is_pending() { - all_done = false; - } else if me.future3.as_mut().output_mut().unwrap().is_err() { - return Poll::Ready(Err(me.future3.take_output().unwrap().err().unwrap())); - } - - if all_done { - Poll::Ready(Ok(( - me.future1.take_output().unwrap().ok().unwrap(), - me.future2.take_output().unwrap().ok().unwrap(), - me.future3.take_output().unwrap().ok().unwrap(), - ))) - } else { - Poll::Pending - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/async_buf_read.rs b/third_party/rust/tokio-0.2.25/src/io/async_buf_read.rs deleted file mode 100644 index ecaafba4c274..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/async_buf_read.rs +++ /dev/null @@ -1,117 +0,0 @@ -use crate::io::AsyncRead; - -use std::io; -use std::ops::DerefMut; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Reads bytes asynchronously. -/// -/// This trait is analogous to [`std::io::BufRead`], but integrates with -/// the asynchronous task system. In particular, the [`poll_fill_buf`] method, -/// unlike [`BufRead::fill_buf`], will automatically queue the current task for wakeup -/// and return if data is not yet available, rather than blocking the calling -/// thread. -/// -/// Utilities for working with `AsyncBufRead` values are provided by -/// [`AsyncBufReadExt`]. -/// -/// [`std::io::BufRead`]: std::io::BufRead -/// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf -/// [`BufRead::fill_buf`]: std::io::BufRead::fill_buf -/// [`AsyncBufReadExt`]: crate::io::AsyncBufReadExt -pub trait AsyncBufRead: AsyncRead { - /// Attempts to return the contents of the internal buffer, filling it with more data - /// from the inner reader if it is empty. - /// - /// On success, returns `Poll::Ready(Ok(buf))`. - /// - /// If no data is available for reading, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes - /// readable or is closed. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`consume`] method to function properly. When calling this - /// method, none of the contents will be "read" in the sense that later - /// calling [`poll_read`] may return the same contents. As such, [`consume`] must - /// be called with the number of bytes that are consumed from this buffer to - /// ensure that the bytes are never returned twice. - /// - /// An empty buffer returned indicates that the stream has reached EOF. - /// - /// [`poll_read`]: AsyncRead::poll_read - /// [`consume`]: AsyncBufRead::consume - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Tells this buffer that `amt` bytes have been consumed from the buffer, - /// so they should no longer be returned in calls to [`poll_read`]. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`poll_fill_buf`] method to function properly. This function does - /// not perform any I/O, it simply informs this object that some amount of - /// its buffer, returned from [`poll_fill_buf`], has been consumed and should - /// no longer be returned. As such, this function may do odd things if - /// [`poll_fill_buf`] isn't called before calling it. - /// - /// The `amt` must be `<=` the number of bytes in the buffer returned by - /// [`poll_fill_buf`]. - /// - /// [`poll_read`]: AsyncRead::poll_read - /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf - fn consume(self: Pin<&mut Self>, amt: usize); -} - -macro_rules! deref_async_buf_read { - () => { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self.get_mut()).poll_fill_buf(cx) - } - - fn consume(mut self: Pin<&mut Self>, amt: usize) { - Pin::new(&mut **self).consume(amt) - } - }; -} - -impl AsyncBufRead for Box { - deref_async_buf_read!(); -} - -impl AsyncBufRead for &mut T { - deref_async_buf_read!(); -} - -impl

AsyncBufRead for Pin

-where - P: DerefMut + Unpin, - P::Target: AsyncBufRead, -{ - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.get_mut().as_mut().consume(amt) - } -} - -impl AsyncBufRead for &[u8] { - fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(*self)) - } - - fn consume(mut self: Pin<&mut Self>, amt: usize) { - *self = &self[amt..]; - } -} - -impl + Unpin> AsyncBufRead for io::Cursor { - fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(io::BufRead::fill_buf(self.get_mut())) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - io::BufRead::consume(self.get_mut(), amt) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/async_read.rs b/third_party/rust/tokio-0.2.25/src/io/async_read.rs deleted file mode 100644 index 1aef41501663..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/async_read.rs +++ /dev/null @@ -1,205 +0,0 @@ -use bytes::BufMut; -use std::io; -use std::mem::MaybeUninit; -use std::ops::DerefMut; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Reads bytes from a source. -/// -/// This trait is analogous to the [`std::io::Read`] trait, but integrates with -/// the asynchronous task system. In particular, the [`poll_read`] method, -/// unlike [`Read::read`], will automatically queue the current task for wakeup -/// and return if data is not yet available, rather than blocking the calling -/// thread. -/// -/// Specifically, this means that the `poll_read` function will return one of -/// the following: -/// -/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately read -/// and placed into the output buffer, where `n` == 0 implies that EOF has -/// been reached. -/// -/// * `Poll::Pending` means that no data was read into the buffer -/// provided. The I/O object is not currently readable but may become readable -/// in the future. Most importantly, **the current future's task is scheduled -/// to get unparked when the object is readable**. This means that like -/// `Future::poll` you'll receive a notification when the I/O object is -/// readable again. -/// -/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the -/// underlying object. -/// -/// This trait importantly means that the `read` method only works in the -/// context of a future's task. The object may panic if used outside of a task. -/// -/// Utilities for working with `AsyncRead` values are provided by -/// [`AsyncReadExt`]. -/// -/// [`poll_read`]: AsyncRead::poll_read -/// [`std::io::Read`]: std::io::Read -/// [`Read::read`]: std::io::Read::read -/// [`AsyncReadExt`]: crate::io::AsyncReadExt -pub trait AsyncRead { - /// Prepares an uninitialized buffer to be safe to pass to `read`. Returns - /// `true` if the supplied buffer was zeroed out. - /// - /// While it would be highly unusual, implementations of [`io::Read`] are - /// able to read data from the buffer passed as an argument. Because of - /// this, the buffer passed to [`io::Read`] must be initialized memory. In - /// situations where large numbers of buffers are used, constantly having to - /// zero out buffers can be expensive. - /// - /// This function does any necessary work to prepare an uninitialized buffer - /// to be safe to pass to `read`. If `read` guarantees to never attempt to - /// read data out of the supplied buffer, then `prepare_uninitialized_buffer` - /// doesn't need to do any work. - /// - /// If this function returns `true`, then the memory has been zeroed out. - /// This allows implementations of `AsyncRead` which are composed of - /// multiple subimplementations to efficiently implement - /// `prepare_uninitialized_buffer`. - /// - /// This function isn't actually `unsafe` to call but `unsafe` to implement. - /// The implementer must ensure that either the whole `buf` has been zeroed - /// or `poll_read_buf()` overwrites the buffer without reading it and returns - /// correct value. - /// - /// This function is called from [`poll_read_buf`]. - /// - /// # Safety - /// - /// Implementations that return `false` must never read from data slices - /// that they did not write to. - /// - /// [`io::Read`]: std::io::Read - /// [`poll_read_buf`]: method@Self::poll_read_buf - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - for x in buf { - *x = MaybeUninit::new(0); - } - - true - } - - /// Attempts to read from the `AsyncRead` into `buf`. - /// - /// On success, returns `Poll::Ready(Ok(num_bytes_read))`. - /// - /// If no data is available for reading, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker()`) to receive a notification when the object becomes - /// readable or is closed. - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll>; - - /// Pulls some bytes from this source into the specified `BufMut`, returning - /// how many bytes were read. - /// - /// The `buf` provided will have bytes read into it and the internal cursor - /// will be advanced if any bytes were read. Note that this method typically - /// will not reallocate the buffer provided. - fn poll_read_buf( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - if !buf.has_remaining_mut() { - return Poll::Ready(Ok(0)); - } - - unsafe { - let n = { - let b = buf.bytes_mut(); - - self.prepare_uninitialized_buffer(b); - - // Convert to `&mut [u8]` - let b = &mut *(b as *mut [MaybeUninit] as *mut [u8]); - - let n = ready!(self.poll_read(cx, b))?; - assert!(n <= b.len(), "Bad AsyncRead implementation, more bytes were reported as read than the buffer can hold"); - n - }; - - buf.advance_mut(n); - Poll::Ready(Ok(n)) - } - } -} - -macro_rules! deref_async_read { - () => { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - (**self).prepare_uninitialized_buffer(buf) - } - - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut **self).poll_read(cx, buf) - } - }; -} - -impl AsyncRead for Box { - deref_async_read!(); -} - -impl AsyncRead for &mut T { - deref_async_read!(); -} - -impl

AsyncRead for Pin

-where - P: DerefMut + Unpin, - P::Target: AsyncRead, -{ - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - (**self).prepare_uninitialized_buffer(buf) - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.get_mut().as_mut().poll_read(cx, buf) - } -} - -impl AsyncRead for &[u8] { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Poll::Ready(io::Read::read(self.get_mut(), buf)) - } -} - -impl + Unpin> AsyncRead for io::Cursor { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Poll::Ready(io::Read::read(self.get_mut(), buf)) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/async_seek.rs b/third_party/rust/tokio-0.2.25/src/io/async_seek.rs deleted file mode 100644 index 32ed0a22ab9f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/async_seek.rs +++ /dev/null @@ -1,101 +0,0 @@ -use std::io::{self, SeekFrom}; -use std::ops::DerefMut; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Seek bytes asynchronously. -/// -/// This trait is analogous to the [`std::io::Seek`] trait, but integrates -/// with the asynchronous task system. In particular, the `start_seek` -/// method, unlike [`Seek::seek`], will not block the calling thread. -/// -/// Utilities for working with `AsyncSeek` values are provided by -/// [`AsyncSeekExt`]. -/// -/// [`std::io::Seek`]: std::io::Seek -/// [`Seek::seek`]: std::io::Seek::seek() -/// [`AsyncSeekExt`]: crate::io::AsyncSeekExt -pub trait AsyncSeek { - /// Attempts to seek to an offset, in bytes, in a stream. - /// - /// A seek beyond the end of a stream is allowed, but behavior is defined - /// by the implementation. - /// - /// If this function returns successfully, then the job has been submitted. - /// To find out when it completes, call `poll_complete`. - fn start_seek( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - position: SeekFrom, - ) -> Poll>; - - /// Waits for a seek operation to complete. - /// - /// If the seek operation completed successfully, - /// this method returns the new position from the start of the stream. - /// That position can be used later with [`SeekFrom::Start`]. - /// - /// # Errors - /// - /// Seeking to a negative offset is considered an error. - /// - /// # Panics - /// - /// Calling this method without calling `start_seek` first is an error. - fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; -} - -macro_rules! deref_async_seek { - () => { - fn start_seek( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - Pin::new(&mut **self).start_seek(cx, pos) - } - - fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_complete(cx) - } - }; -} - -impl AsyncSeek for Box { - deref_async_seek!(); -} - -impl AsyncSeek for &mut T { - deref_async_seek!(); -} - -impl

AsyncSeek for Pin

-where - P: DerefMut + Unpin, - P::Target: AsyncSeek, -{ - fn start_seek( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - self.get_mut().as_mut().start_seek(cx, pos) - } - - fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_complete(cx) - } -} - -impl + Unpin> AsyncSeek for io::Cursor { - fn start_seek( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - Poll::Ready(io::Seek::seek(&mut *self, pos).map(drop)) - } - fn poll_complete(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(self.get_mut().position())) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/async_write.rs b/third_party/rust/tokio-0.2.25/src/io/async_write.rs deleted file mode 100644 index ecf7575b1289..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/async_write.rs +++ /dev/null @@ -1,293 +0,0 @@ -use bytes::Buf; -use std::io; -use std::ops::DerefMut; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Writes bytes asynchronously. -/// -/// The trait inherits from [`std::io::Write`] and indicates that an I/O object is -/// **nonblocking**. All non-blocking I/O objects must return an error when -/// bytes cannot be written instead of blocking the current thread. -/// -/// Specifically, this means that the [`poll_write`] function will return one of -/// the following: -/// -/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately -/// written. -/// -/// * `Poll::Pending` means that no data was written from the buffer -/// provided. The I/O object is not currently writable but may become writable -/// in the future. Most importantly, **the current future's task is scheduled -/// to get unparked when the object is writable**. This means that like -/// `Future::poll` you'll receive a notification when the I/O object is -/// writable again. -/// -/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the -/// underlying object. -/// -/// This trait importantly means that the [`write`][stdwrite] method only works in -/// the context of a future's task. The object may panic if used outside of a task. -/// -/// Note that this trait also represents that the [`Write::flush`][stdflush] method -/// works very similarly to the `write` method, notably that `Ok(())` means that the -/// writer has successfully been flushed, a "would block" error means that the -/// current task is ready to receive a notification when flushing can make more -/// progress, and otherwise normal errors can happen as well. -/// -/// Utilities for working with `AsyncWrite` values are provided by -/// [`AsyncWriteExt`]. -/// -/// [`std::io::Write`]: std::io::Write -/// [`poll_write`]: AsyncWrite::poll_write() -/// [stdwrite]: std::io::Write::write() -/// [stdflush]: std::io::Write::flush() -/// [`AsyncWriteExt`]: crate::io::AsyncWriteExt -pub trait AsyncWrite { - /// Attempt to write bytes from `buf` into the object. - /// - /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. - /// - /// If the object is not ready for writing, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker()`) to receive a notification when the object becomes - /// writable or is closed. - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll>; - - /// Attempts to flush the object, ensuring that any buffered data reach - /// their destination. - /// - /// On success, returns `Poll::Ready(Ok(()))`. - /// - /// If flushing cannot immediately complete, this method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker()`) to receive a notification when the object can make - /// progress towards flushing. - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Initiates or attempts to shut down this writer, returning success when - /// the I/O connection has completely shut down. - /// - /// This method is intended to be used for asynchronous shutdown of I/O - /// connections. For example this is suitable for implementing shutdown of a - /// TLS connection or calling `TcpStream::shutdown` on a proxied connection. - /// Protocols sometimes need to flush out final pieces of data or otherwise - /// perform a graceful shutdown handshake, reading/writing more data as - /// appropriate. This method is the hook for such protocols to implement the - /// graceful shutdown logic. - /// - /// This `shutdown` method is required by implementers of the - /// `AsyncWrite` trait. Wrappers typically just want to proxy this call - /// through to the wrapped type, and base types will typically implement - /// shutdown logic here or just return `Ok(().into())`. Note that if you're - /// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that - /// transitively the entire stream has been shut down. After your wrapper's - /// shutdown logic has been executed you should shut down the underlying - /// stream. - /// - /// Invocation of a `shutdown` implies an invocation of `flush`. Once this - /// method returns `Ready` it implies that a flush successfully happened - /// before the shutdown happened. That is, callers don't need to call - /// `flush` before calling `shutdown`. They can rely that by calling - /// `shutdown` any pending buffered data will be written out. - /// - /// # Return value - /// - /// This function returns a `Poll>` classified as such: - /// - /// * `Poll::Ready(Ok(()))` - indicates that the connection was - /// successfully shut down and is now safe to deallocate/drop/close - /// resources associated with it. This method means that the current task - /// will no longer receive any notifications due to this method and the - /// I/O object itself is likely no longer usable. - /// - /// * `Poll::Pending` - indicates that shutdown is initiated but could - /// not complete just yet. This may mean that more I/O needs to happen to - /// continue this shutdown operation. The current task is scheduled to - /// receive a notification when it's otherwise ready to continue the - /// shutdown operation. When woken up this method should be called again. - /// - /// * `Poll::Ready(Err(e))` - indicates a fatal error has happened with shutdown, - /// indicating that the shutdown operation did not complete successfully. - /// This typically means that the I/O object is no longer usable. - /// - /// # Errors - /// - /// This function can return normal I/O errors through `Err`, described - /// above. Additionally this method may also render the underlying - /// `Write::write` method no longer usable (e.g. will return errors in the - /// future). It's recommended that once `shutdown` is called the - /// `write` method is no longer called. - /// - /// # Panics - /// - /// This function will panic if not called within the context of a future's - /// task. - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Writes a `Buf` into this value, returning how many bytes were written. - /// - /// Note that this method will advance the `buf` provided automatically by - /// the number of bytes written. - fn poll_write_buf( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - if !buf.has_remaining() { - return Poll::Ready(Ok(0)); - } - - let n = ready!(self.poll_write(cx, buf.bytes()))?; - buf.advance(n); - Poll::Ready(Ok(n)) - } -} - -macro_rules! deref_async_write { - () => { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut **self).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_shutdown(cx) - } - }; -} - -impl AsyncWrite for Box { - deref_async_write!(); -} - -impl AsyncWrite for &mut T { - deref_async_write!(); -} - -impl

AsyncWrite for Pin

-where - P: DerefMut + Unpin, - P::Target: AsyncWrite, -{ - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.get_mut().as_mut().poll_write(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_shutdown(cx) - } -} - -impl AsyncWrite for Vec { - fn poll_write( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.get_mut().extend_from_slice(buf); - Poll::Ready(Ok(buf.len())) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -impl AsyncWrite for io::Cursor<&mut [u8]> { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl AsyncWrite for io::Cursor<&mut Vec> { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl AsyncWrite for io::Cursor> { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl AsyncWrite for io::Cursor> { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/blocking.rs b/third_party/rust/tokio-0.2.25/src/io/blocking.rs deleted file mode 100644 index 2491039a3f30..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/blocking.rs +++ /dev/null @@ -1,279 +0,0 @@ -use crate::io::sys; -use crate::io::{AsyncRead, AsyncWrite}; - -use std::cmp; -use std::future::Future; -use std::io; -use std::io::prelude::*; -use std::pin::Pin; -use std::task::Poll::*; -use std::task::{Context, Poll}; - -use self::State::*; - -/// `T` should not implement _both_ Read and Write. -#[derive(Debug)] -pub(crate) struct Blocking { - inner: Option, - state: State, - /// `true` if the lower IO layer needs flushing - need_flush: bool, -} - -#[derive(Debug)] -pub(crate) struct Buf { - buf: Vec, - pos: usize, -} - -pub(crate) const MAX_BUF: usize = 16 * 1024; - -#[derive(Debug)] -enum State { - Idle(Option), - Busy(sys::Blocking<(io::Result, Buf, T)>), -} - -cfg_io_std! { - impl Blocking { - pub(crate) fn new(inner: T) -> Blocking { - Blocking { - inner: Some(inner), - state: State::Idle(Some(Buf::with_capacity(0))), - need_flush: false, - } - } - } -} - -impl AsyncRead for Blocking -where - T: Read + Unpin + Send + 'static, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - dst: &mut [u8], - ) -> Poll> { - loop { - match self.state { - Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - if !buf.is_empty() { - let n = buf.copy_to(dst); - *buf_cell = Some(buf); - return Ready(Ok(n)); - } - - buf.ensure_capacity_for(dst); - let mut inner = self.inner.take().unwrap(); - - self.state = Busy(sys::run(move || { - let res = buf.read_from(&mut inner); - (res, buf, inner) - })); - } - Busy(ref mut rx) => { - let (res, mut buf, inner) = ready!(Pin::new(rx).poll(cx))?; - self.inner = Some(inner); - - match res { - Ok(_) => { - let n = buf.copy_to(dst); - self.state = Idle(Some(buf)); - return Ready(Ok(n)); - } - Err(e) => { - assert!(buf.is_empty()); - - self.state = Idle(Some(buf)); - return Ready(Err(e)); - } - } - } - } - } - } -} - -impl AsyncWrite for Blocking -where - T: Write + Unpin + Send + 'static, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - src: &[u8], - ) -> Poll> { - loop { - match self.state { - Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - assert!(buf.is_empty()); - - let n = buf.copy_from(src); - let mut inner = self.inner.take().unwrap(); - - self.state = Busy(sys::run(move || { - let n = buf.len(); - let res = buf.write_to(&mut inner).map(|_| n); - - (res, buf, inner) - })); - self.need_flush = true; - - return Ready(Ok(n)); - } - Busy(ref mut rx) => { - let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?; - self.state = Idle(Some(buf)); - self.inner = Some(inner); - - // If error, return - res?; - } - } - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - let need_flush = self.need_flush; - match self.state { - // The buffer is not used here - Idle(ref mut buf_cell) => { - if need_flush { - let buf = buf_cell.take().unwrap(); - let mut inner = self.inner.take().unwrap(); - - self.state = Busy(sys::run(move || { - let res = inner.flush().map(|_| 0); - (res, buf, inner) - })); - - self.need_flush = false; - } else { - return Ready(Ok(())); - } - } - Busy(ref mut rx) => { - let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?; - self.state = Idle(Some(buf)); - self.inner = Some(inner); - - // If error, return - res?; - } - } - } - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -/// Repeates operations that are interrupted -macro_rules! uninterruptibly { - ($e:expr) => {{ - loop { - match $e { - Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} - res => break res, - } - } - }}; -} - -impl Buf { - pub(crate) fn with_capacity(n: usize) -> Buf { - Buf { - buf: Vec::with_capacity(n), - pos: 0, - } - } - - pub(crate) fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub(crate) fn len(&self) -> usize { - self.buf.len() - self.pos - } - - pub(crate) fn copy_to(&mut self, dst: &mut [u8]) -> usize { - let n = cmp::min(self.len(), dst.len()); - dst[..n].copy_from_slice(&self.bytes()[..n]); - self.pos += n; - - if self.pos == self.buf.len() { - self.buf.truncate(0); - self.pos = 0; - } - - n - } - - pub(crate) fn copy_from(&mut self, src: &[u8]) -> usize { - assert!(self.is_empty()); - - let n = cmp::min(src.len(), MAX_BUF); - - self.buf.extend_from_slice(&src[..n]); - n - } - - pub(crate) fn bytes(&self) -> &[u8] { - &self.buf[self.pos..] - } - - pub(crate) fn ensure_capacity_for(&mut self, bytes: &[u8]) { - assert!(self.is_empty()); - - let len = cmp::min(bytes.len(), MAX_BUF); - - if self.buf.len() < len { - self.buf.reserve(len - self.buf.len()); - } - - unsafe { - self.buf.set_len(len); - } - } - - pub(crate) fn read_from(&mut self, rd: &mut T) -> io::Result { - let res = uninterruptibly!(rd.read(&mut self.buf)); - - if let Ok(n) = res { - self.buf.truncate(n); - } else { - self.buf.clear(); - } - - assert_eq!(self.pos, 0); - - res - } - - pub(crate) fn write_to(&mut self, wr: &mut T) -> io::Result<()> { - assert_eq!(self.pos, 0); - - // `write_all` already ignores interrupts - let res = wr.write_all(&self.buf); - self.buf.clear(); - res - } -} - -cfg_fs! { - impl Buf { - pub(crate) fn discard_read(&mut self) -> i64 { - let ret = -(self.bytes().len() as i64); - self.pos = 0; - self.buf.truncate(0); - ret - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/driver/mod.rs b/third_party/rust/tokio-0.2.25/src/io/driver/mod.rs deleted file mode 100644 index 3809cf40a021..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/driver/mod.rs +++ /dev/null @@ -1,403 +0,0 @@ -pub(crate) mod platform; - -mod scheduled_io; -pub(crate) use scheduled_io::ScheduledIo; // pub(crate) for tests - -use crate::loom::sync::atomic::AtomicUsize; -use crate::park::{Park, Unpark}; -use crate::runtime::context; -use crate::util::slab::{Address, Slab}; - -use mio::event::Evented; -use std::fmt; -use std::io; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::{Arc, Weak}; -use std::task::Waker; -use std::time::Duration; - -/// I/O driver, backed by Mio -pub(crate) struct Driver { - /// Reuse the `mio::Events` value across calls to poll. - events: mio::Events, - - /// State shared between the reactor and the handles. - inner: Arc, - - _wakeup_registration: mio::Registration, -} - -/// A reference to an I/O driver -#[derive(Clone)] -pub(crate) struct Handle { - inner: Weak, -} - -pub(super) struct Inner { - /// The underlying system event queue. - io: mio::Poll, - - /// Dispatch slabs for I/O and futures events - pub(super) io_dispatch: Slab, - - /// The number of sources in `io_dispatch`. - n_sources: AtomicUsize, - - /// Used to wake up the reactor from a call to `turn` - wakeup: mio::SetReadiness, -} - -#[derive(Debug, Eq, PartialEq, Clone, Copy)] -pub(super) enum Direction { - Read, - Write, -} - -const TOKEN_WAKEUP: mio::Token = mio::Token(Address::NULL); - -fn _assert_kinds() { - fn _assert() {} - - _assert::(); -} - -// ===== impl Driver ===== - -impl Driver { - /// Creates a new event loop, returning any error that happened during the - /// creation. - pub(crate) fn new() -> io::Result { - let io = mio::Poll::new()?; - let wakeup_pair = mio::Registration::new2(); - - io.register( - &wakeup_pair.0, - TOKEN_WAKEUP, - mio::Ready::readable(), - mio::PollOpt::level(), - )?; - - Ok(Driver { - events: mio::Events::with_capacity(1024), - _wakeup_registration: wakeup_pair.0, - inner: Arc::new(Inner { - io, - io_dispatch: Slab::new(), - n_sources: AtomicUsize::new(0), - wakeup: wakeup_pair.1, - }), - }) - } - - /// Returns a handle to this event loop which can be sent across threads - /// and can be used as a proxy to the event loop itself. - /// - /// Handles are cloneable and clones always refer to the same event loop. - /// This handle is typically passed into functions that create I/O objects - /// to bind them to this event loop. - pub(crate) fn handle(&self) -> Handle { - Handle { - inner: Arc::downgrade(&self.inner), - } - } - - fn turn(&mut self, max_wait: Option) -> io::Result<()> { - // Block waiting for an event to happen, peeling out how many events - // happened. - match self.inner.io.poll(&mut self.events, max_wait) { - Ok(_) => {} - Err(e) => return Err(e), - } - - // Process all the events that came in, dispatching appropriately - - for event in self.events.iter() { - let token = event.token(); - - if token == TOKEN_WAKEUP { - self.inner - .wakeup - .set_readiness(mio::Ready::empty()) - .unwrap(); - } else { - self.dispatch(token, event.readiness()); - } - } - - Ok(()) - } - - fn dispatch(&self, token: mio::Token, ready: mio::Ready) { - let mut rd = None; - let mut wr = None; - - let address = Address::from_usize(token.0); - - let io = match self.inner.io_dispatch.get(address) { - Some(io) => io, - None => return, - }; - - if io - .set_readiness(address, |curr| curr | ready.as_usize()) - .is_err() - { - // token no longer valid! - return; - } - - if ready.is_writable() || platform::is_hup(ready) || platform::is_error(ready) { - wr = io.writer.take_waker(); - } - - if !(ready & (!mio::Ready::writable())).is_empty() { - rd = io.reader.take_waker(); - } - - if let Some(w) = rd { - w.wake(); - } - - if let Some(w) = wr { - w.wake(); - } - } -} - -impl Park for Driver { - type Unpark = Handle; - type Error = io::Error; - - fn unpark(&self) -> Self::Unpark { - self.handle() - } - - fn park(&mut self) -> io::Result<()> { - self.turn(None)?; - Ok(()) - } - - fn park_timeout(&mut self, duration: Duration) -> io::Result<()> { - self.turn(Some(duration))?; - Ok(()) - } - - fn shutdown(&mut self) {} -} - -impl fmt::Debug for Driver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Driver") - } -} - -// ===== impl Handle ===== - -impl Handle { - /// Returns a handle to the current reactor - /// - /// # Panics - /// - /// This function panics if there is no current reactor set. - pub(super) fn current() -> Self { - context::io_handle().expect( - "there is no reactor running, must be called from the context of a Tokio 0.2.x runtime", - ) - } - - /// Forces a reactor blocked in a call to `turn` to wakeup, or otherwise - /// makes the next call to `turn` return immediately. - /// - /// This method is intended to be used in situations where a notification - /// needs to otherwise be sent to the main reactor. If the reactor is - /// currently blocked inside of `turn` then it will wake up and soon return - /// after this method has been called. If the reactor is not currently - /// blocked in `turn`, then the next call to `turn` will not block and - /// return immediately. - fn wakeup(&self) { - if let Some(inner) = self.inner() { - inner.wakeup.set_readiness(mio::Ready::readable()).unwrap(); - } - } - - pub(super) fn inner(&self) -> Option> { - self.inner.upgrade() - } -} - -impl Unpark for Handle { - fn unpark(&self) { - self.wakeup(); - } -} - -impl fmt::Debug for Handle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Handle") - } -} - -// ===== impl Inner ===== - -impl Inner { - /// Registers an I/O resource with the reactor for a given `mio::Ready` state. - /// - /// The registration token is returned. - pub(super) fn add_source( - &self, - source: &dyn Evented, - ready: mio::Ready, - ) -> io::Result

{ - let address = self.io_dispatch.alloc().ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "reactor at max registered I/O resources", - ) - })?; - - self.n_sources.fetch_add(1, SeqCst); - - self.io.register( - source, - mio::Token(address.to_usize()), - ready, - mio::PollOpt::edge(), - )?; - - Ok(address) - } - - /// Deregisters an I/O resource from the reactor. - pub(super) fn deregister_source(&self, source: &dyn Evented) -> io::Result<()> { - self.io.deregister(source) - } - - pub(super) fn drop_source(&self, address: Address) { - self.io_dispatch.remove(address); - self.n_sources.fetch_sub(1, SeqCst); - } - - /// Registers interest in the I/O resource associated with `token`. - pub(super) fn register(&self, token: Address, dir: Direction, w: Waker) { - let sched = self - .io_dispatch - .get(token) - .unwrap_or_else(|| panic!("IO resource for token {:?} does not exist!", token)); - - let waker = match dir { - Direction::Read => &sched.reader, - Direction::Write => &sched.writer, - }; - - waker.register(w); - } -} - -impl Direction { - pub(super) fn mask(self) -> mio::Ready { - match self { - Direction::Read => { - // Everything except writable is signaled through read. - mio::Ready::all() - mio::Ready::writable() - } - Direction::Write => mio::Ready::writable() | platform::hup() | platform::error(), - } - } -} - -#[cfg(all(test, loom))] -mod tests { - use super::*; - use loom::thread; - - // No-op `Evented` impl just so we can have something to pass to `add_source`. - struct NotEvented; - - impl Evented for NotEvented { - fn register( - &self, - _: &mio::Poll, - _: mio::Token, - _: mio::Ready, - _: mio::PollOpt, - ) -> io::Result<()> { - Ok(()) - } - - fn reregister( - &self, - _: &mio::Poll, - _: mio::Token, - _: mio::Ready, - _: mio::PollOpt, - ) -> io::Result<()> { - Ok(()) - } - - fn deregister(&self, _: &mio::Poll) -> io::Result<()> { - Ok(()) - } - } - - #[test] - fn tokens_unique_when_dropped() { - loom::model(|| { - let reactor = Driver::new().unwrap(); - let inner = reactor.inner; - let inner2 = inner.clone(); - - let token_1 = inner.add_source(&NotEvented, mio::Ready::all()).unwrap(); - let thread = thread::spawn(move || { - inner2.drop_source(token_1); - }); - - let token_2 = inner.add_source(&NotEvented, mio::Ready::all()).unwrap(); - thread.join().unwrap(); - - assert!(token_1 != token_2); - }) - } - - #[test] - fn tokens_unique_when_dropped_on_full_page() { - loom::model(|| { - let reactor = Driver::new().unwrap(); - let inner = reactor.inner; - let inner2 = inner.clone(); - // add sources to fill up the first page so that the dropped index - // may be reused. - for _ in 0..31 { - inner.add_source(&NotEvented, mio::Ready::all()).unwrap(); - } - - let token_1 = inner.add_source(&NotEvented, mio::Ready::all()).unwrap(); - let thread = thread::spawn(move || { - inner2.drop_source(token_1); - }); - - let token_2 = inner.add_source(&NotEvented, mio::Ready::all()).unwrap(); - thread.join().unwrap(); - - assert!(token_1 != token_2); - }) - } - - #[test] - fn tokens_unique_concurrent_add() { - loom::model(|| { - let reactor = Driver::new().unwrap(); - let inner = reactor.inner; - let inner2 = inner.clone(); - - let thread = thread::spawn(move || { - let token_2 = inner2.add_source(&NotEvented, mio::Ready::all()).unwrap(); - token_2 - }); - - let token_1 = inner.add_source(&NotEvented, mio::Ready::all()).unwrap(); - let token_2 = thread.join().unwrap(); - - assert!(token_1 != token_2); - }) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/driver/platform.rs b/third_party/rust/tokio-0.2.25/src/io/driver/platform.rs deleted file mode 100644 index 6b27988ce61a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/driver/platform.rs +++ /dev/null @@ -1,44 +0,0 @@ -pub(crate) use self::sys::*; - -#[cfg(unix)] -mod sys { - use mio::unix::UnixReady; - use mio::Ready; - - pub(crate) fn hup() -> Ready { - UnixReady::hup().into() - } - - pub(crate) fn is_hup(ready: Ready) -> bool { - UnixReady::from(ready).is_hup() - } - - pub(crate) fn error() -> Ready { - UnixReady::error().into() - } - - pub(crate) fn is_error(ready: Ready) -> bool { - UnixReady::from(ready).is_error() - } -} - -#[cfg(windows)] -mod sys { - use mio::Ready; - - pub(crate) fn hup() -> Ready { - Ready::empty() - } - - pub(crate) fn is_hup(_: Ready) -> bool { - false - } - - pub(crate) fn error() -> Ready { - Ready::empty() - } - - pub(crate) fn is_error(_: Ready) -> bool { - false - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/driver/scheduled_io.rs b/third_party/rust/tokio-0.2.25/src/io/driver/scheduled_io.rs deleted file mode 100644 index 7f6446e3f58f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/driver/scheduled_io.rs +++ /dev/null @@ -1,141 +0,0 @@ -use crate::loom::future::AtomicWaker; -use crate::loom::sync::atomic::AtomicUsize; -use crate::util::bit; -use crate::util::slab::{Address, Entry, Generation}; - -use std::sync::atomic::Ordering::{AcqRel, Acquire, SeqCst}; - -#[derive(Debug)] -pub(crate) struct ScheduledIo { - readiness: AtomicUsize, - pub(crate) reader: AtomicWaker, - pub(crate) writer: AtomicWaker, -} - -const PACK: bit::Pack = bit::Pack::most_significant(Generation::WIDTH); - -impl Entry for ScheduledIo { - fn generation(&self) -> Generation { - unpack_generation(self.readiness.load(SeqCst)) - } - - fn reset(&self, generation: Generation) -> bool { - let mut current = self.readiness.load(Acquire); - - loop { - if unpack_generation(current) != generation { - return false; - } - - let next = PACK.pack(generation.next().to_usize(), 0); - - match self - .readiness - .compare_exchange(current, next, AcqRel, Acquire) - { - Ok(_) => break, - Err(actual) => current = actual, - } - } - - drop(self.reader.take_waker()); - drop(self.writer.take_waker()); - - true - } -} - -impl Default for ScheduledIo { - fn default() -> ScheduledIo { - ScheduledIo { - readiness: AtomicUsize::new(0), - reader: AtomicWaker::new(), - writer: AtomicWaker::new(), - } - } -} - -impl ScheduledIo { - #[cfg(all(test, loom))] - /// Returns the current readiness value of this `ScheduledIo`, if the - /// provided `token` is still a valid access. - /// - /// # Returns - /// - /// If the given token's generation no longer matches the `ScheduledIo`'s - /// generation, then the corresponding IO resource has been removed and - /// replaced with a new resource. In that case, this method returns `None`. - /// Otherwise, this returns the current readiness. - pub(crate) fn get_readiness(&self, address: Address) -> Option { - let ready = self.readiness.load(Acquire); - - if unpack_generation(ready) != address.generation() { - return None; - } - - Some(ready & !PACK.mask()) - } - - /// Sets the readiness on this `ScheduledIo` by invoking the given closure on - /// the current value, returning the previous readiness value. - /// - /// # Arguments - /// - `token`: the token for this `ScheduledIo`. - /// - `f`: a closure returning a new readiness value given the previous - /// readiness. - /// - /// # Returns - /// - /// If the given token's generation no longer matches the `ScheduledIo`'s - /// generation, then the corresponding IO resource has been removed and - /// replaced with a new resource. In that case, this method returns `Err`. - /// Otherwise, this returns the previous readiness. - pub(crate) fn set_readiness( - &self, - address: Address, - f: impl Fn(usize) -> usize, - ) -> Result { - let generation = address.generation(); - - let mut current = self.readiness.load(Acquire); - - loop { - // Check that the generation for this access is still the current - // one. - if unpack_generation(current) != generation { - return Err(()); - } - // Mask out the generation bits so that the modifying function - // doesn't see them. - let current_readiness = current & mio::Ready::all().as_usize(); - let new = f(current_readiness); - - debug_assert!( - new <= !PACK.max_value(), - "new readiness value would overwrite generation bits!" - ); - - match self.readiness.compare_exchange( - current, - PACK.pack(generation.to_usize(), new), - AcqRel, - Acquire, - ) { - Ok(_) => return Ok(current), - // we lost the race, retry! - Err(actual) => current = actual, - } - } - } -} - -impl Drop for ScheduledIo { - fn drop(&mut self) { - self.writer.wake(); - self.reader.wake(); - } -} - -fn unpack_generation(src: usize) -> Generation { - Generation::new(PACK.unpack(src)) -} diff --git a/third_party/rust/tokio-0.2.25/src/io/mod.rs b/third_party/rust/tokio-0.2.25/src/io/mod.rs deleted file mode 100644 index 37da942ff3d4..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/mod.rs +++ /dev/null @@ -1,256 +0,0 @@ -#![cfg_attr(loom, allow(dead_code, unreachable_pub))] - -//! Traits, helpers, and type definitions for asynchronous I/O functionality. -//! -//! This module is the asynchronous version of `std::io`. Primarily, it -//! defines two traits, [`AsyncRead`] and [`AsyncWrite`], which are asynchronous -//! versions of the [`Read`] and [`Write`] traits in the standard library. -//! -//! # AsyncRead and AsyncWrite -//! -//! Like the standard library's [`Read`] and [`Write`] traits, [`AsyncRead`] and -//! [`AsyncWrite`] provide the most general interface for reading and writing -//! input and output. Unlike the standard library's traits, however, they are -//! _asynchronous_ — meaning that reading from or writing to a `tokio::io` -//! type will _yield_ to the Tokio scheduler when IO is not ready, rather than -//! blocking. This allows other tasks to run while waiting on IO. -//! -//! Another difference is that `AsyncRead` and `AsyncWrite` only contain -//! core methods needed to provide asynchronous reading and writing -//! functionality. Instead, utility methods are defined in the [`AsyncReadExt`] -//! and [`AsyncWriteExt`] extension traits. These traits are automatically -//! implemented for all values that implement `AsyncRead` and `AsyncWrite` -//! respectively. -//! -//! End users will rarely interact directly with `AsyncRead` and -//! `AsyncWrite`. Instead, they will use the async functions defined in the -//! extension traits. Library authors are expected to implement `AsyncRead` -//! and `AsyncWrite` in order to provide types that behave like byte streams. -//! -//! Even with these differences, Tokio's `AsyncRead` and `AsyncWrite` traits -//! can be used in almost exactly the same manner as the standard library's -//! `Read` and `Write`. Most types in the standard library that implement `Read` -//! and `Write` have asynchronous equivalents in `tokio` that implement -//! `AsyncRead` and `AsyncWrite`, such as [`File`] and [`TcpStream`]. -//! -//! For example, the standard library documentation introduces `Read` by -//! [demonstrating][std_example] reading some bytes from a [`std::fs::File`]. We -//! can do the same with [`tokio::fs::File`][`File`]: -//! -//! ```no_run -//! use tokio::io::{self, AsyncReadExt}; -//! use tokio::fs::File; -//! -//! #[tokio::main] -//! async fn main() -> io::Result<()> { -//! let mut f = File::open("foo.txt").await?; -//! let mut buffer = [0; 10]; -//! -//! // read up to 10 bytes -//! let n = f.read(&mut buffer).await?; -//! -//! println!("The bytes: {:?}", &buffer[..n]); -//! Ok(()) -//! } -//! ``` -//! -//! [`File`]: crate::fs::File -//! [`TcpStream`]: crate::net::TcpStream -//! [`std::fs::File`]: std::fs::File -//! [std_example]: std::io#read-and-write -//! -//! ## Buffered Readers and Writers -//! -//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be -//! making near-constant calls to the operating system. To help with this, -//! `std::io` comes with [support for _buffered_ readers and writers][stdbuf], -//! and therefore, `tokio::io` does as well. -//! -//! Tokio provides an async version of the [`std::io::BufRead`] trait, -//! [`AsyncBufRead`]; and async [`BufReader`] and [`BufWriter`] structs, which -//! wrap readers and writers. These wrappers use a buffer, reducing the number -//! of calls and providing nicer methods for accessing exactly what you want. -//! -//! For example, [`BufReader`] works with the [`AsyncBufRead`] trait to add -//! extra methods to any async reader: -//! -//! ```no_run -//! use tokio::io::{self, BufReader, AsyncBufReadExt}; -//! use tokio::fs::File; -//! -//! #[tokio::main] -//! async fn main() -> io::Result<()> { -//! let f = File::open("foo.txt").await?; -//! let mut reader = BufReader::new(f); -//! let mut buffer = String::new(); -//! -//! // read a line into buffer -//! reader.read_line(&mut buffer).await?; -//! -//! println!("{}", buffer); -//! Ok(()) -//! } -//! ``` -//! -//! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call -//! to [`write`](crate::io::AsyncWriteExt::write). However, you **must** flush -//! [`BufWriter`] to ensure that any buffered data is written. -//! -//! ```no_run -//! use tokio::io::{self, BufWriter, AsyncWriteExt}; -//! use tokio::fs::File; -//! -//! #[tokio::main] -//! async fn main() -> io::Result<()> { -//! let f = File::create("foo.txt").await?; -//! { -//! let mut writer = BufWriter::new(f); -//! -//! // Write a byte to the buffer. -//! writer.write(&[42u8]).await?; -//! -//! // Flush the buffer before it goes out of scope. -//! writer.flush().await?; -//! -//! } // Unless flushed or shut down, the contents of the buffer is discarded on drop. -//! -//! Ok(()) -//! } -//! ``` -//! -//! [stdbuf]: std::io#bufreader-and-bufwriter -//! [`std::io::BufRead`]: std::io::BufRead -//! [`AsyncBufRead`]: crate::io::AsyncBufRead -//! [`BufReader`]: crate::io::BufReader -//! [`BufWriter`]: crate::io::BufWriter -//! -//! ## Implementing AsyncRead and AsyncWrite -//! -//! Because they are traits, we can implement [`AsyncRead`] and [`AsyncWrite`] for -//! our own types, as well. Note that these traits must only be implemented for -//! non-blocking I/O types that integrate with the futures type system. In -//! other words, these types must never block the thread, and instead the -//! current task is notified when the I/O resource is ready. -//! -//! ## Conversion to and from Sink/Stream -//! -//! It is often convenient to encapsulate the reading and writing of -//! bytes and instead work with a [`Sink`] or [`Stream`] of some data -//! type that is encoded as bytes and/or decoded from bytes. Tokio -//! provides some utility traits in the [tokio-util] crate that -//! abstract the asynchronous buffering that is required and allows -//! you to write [`Encoder`] and [`Decoder`] functions working with a -//! buffer of bytes, and then use that ["codec"] to transform anything -//! that implements [`AsyncRead`] and [`AsyncWrite`] into a `Sink`/`Stream` of -//! your structured data. -//! -//! [tokio-util]: https://docs.rs/tokio-util/0.3/tokio_util/codec/index.html -//! -//! # Standard input and output -//! -//! Tokio provides asynchronous APIs to standard [input], [output], and [error]. -//! These APIs are very similar to the ones provided by `std`, but they also -//! implement [`AsyncRead`] and [`AsyncWrite`]. -//! -//! Note that the standard input / output APIs **must** be used from the -//! context of the Tokio runtime, as they require Tokio-specific features to -//! function. Calling these functions outside of a Tokio runtime will panic. -//! -//! [input]: fn@stdin -//! [output]: fn@stdout -//! [error]: fn@stderr -//! -//! # `std` re-exports -//! -//! Additionally, [`Error`], [`ErrorKind`], [`Result`], and [`SeekFrom`] are -//! re-exported from `std::io` for ease of use. -//! -//! [`AsyncRead`]: trait@AsyncRead -//! [`AsyncWrite`]: trait@AsyncWrite -//! [`AsyncReadExt`]: trait@AsyncReadExt -//! [`AsyncWriteExt`]: trait@AsyncWriteExt -//! ["codec"]: https://docs.rs/tokio-util/0.3/tokio_util/codec/index.html -//! [`Encoder`]: https://docs.rs/tokio-util/0.3/tokio_util/codec/trait.Encoder.html -//! [`Decoder`]: https://docs.rs/tokio-util/0.3/tokio_util/codec/trait.Decoder.html -//! [`Error`]: struct@Error -//! [`ErrorKind`]: enum@ErrorKind -//! [`Result`]: type@Result -//! [`Read`]: std::io::Read -//! [`SeekFrom`]: enum@SeekFrom -//! [`Sink`]: https://docs.rs/futures/0.3/futures/sink/trait.Sink.html -//! [`Stream`]: crate::stream::Stream -//! [`Write`]: std::io::Write -cfg_io_blocking! { - pub(crate) mod blocking; -} - -mod async_buf_read; -pub use self::async_buf_read::AsyncBufRead; - -mod async_read; -pub use self::async_read::AsyncRead; - -mod async_seek; -pub use self::async_seek::AsyncSeek; - -mod async_write; -pub use self::async_write::AsyncWrite; - -// Re-export some types from `std::io` so that users don't have to deal -// with conflicts when `use`ing `tokio::io` and `std::io`. -pub use std::io::{Error, ErrorKind, Result, SeekFrom}; - -cfg_io_driver! { - pub(crate) mod driver; - - mod poll_evented; - pub use poll_evented::PollEvented; - - mod registration; - pub use registration::Registration; -} - -cfg_io_std! { - mod stderr; - pub use stderr::{stderr, Stderr}; - - mod stdin; - pub use stdin::{stdin, Stdin}; - - mod stdout; - pub use stdout::{stdout, Stdout}; -} - -cfg_io_util! { - mod split; - pub use split::{split, ReadHalf, WriteHalf}; - - pub(crate) mod seek; - pub use self::seek::Seek; - - pub(crate) mod util; - pub use util::{ - copy, duplex, empty, repeat, sink, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt, - BufReader, BufStream, BufWriter, DuplexStream, Copy, Empty, Lines, Repeat, Sink, Split, Take, - }; - - cfg_stream! { - pub use util::{stream_reader, StreamReader}; - pub use util::{reader_stream, ReaderStream}; - } -} - -cfg_not_io_util! { - cfg_process! { - pub(crate) mod util; - } -} - -cfg_io_blocking! { - /// Types in this module can be mocked out in tests. - mod sys { - // TODO: don't rename - pub(crate) use crate::runtime::spawn_blocking as run; - pub(crate) use crate::task::JoinHandle as Blocking; - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/poll_evented.rs b/third_party/rust/tokio-0.2.25/src/io/poll_evented.rs deleted file mode 100644 index 5295bd71ad85..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/poll_evented.rs +++ /dev/null @@ -1,461 +0,0 @@ -use crate::io::driver::platform; -use crate::io::{AsyncRead, AsyncWrite, Registration}; - -use mio::event::Evented; -use std::fmt; -use std::io::{self, Read, Write}; -use std::marker::Unpin; -use std::pin::Pin; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::Relaxed; -use std::task::{Context, Poll}; - -cfg_io_driver! { - /// Associates an I/O resource that implements the [`std::io::Read`] and/or - /// [`std::io::Write`] traits with the reactor that drives it. - /// - /// `PollEvented` uses [`Registration`] internally to take a type that - /// implements [`mio::Evented`] as well as [`std::io::Read`] and or - /// [`std::io::Write`] and associate it with a reactor that will drive it. - /// - /// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be - /// used from within the future's execution model. As such, the - /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`] - /// implementations using the underlying I/O resource as well as readiness - /// events provided by the reactor. - /// - /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is - /// `Sync`), the caller must ensure that there are at most two tasks that - /// use a `PollEvented` instance concurrently. One for reading and one for - /// writing. While violating this requirement is "safe" from a Rust memory - /// model point of view, it will result in unexpected behavior in the form - /// of lost notifications and tasks hanging. - /// - /// ## Readiness events - /// - /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, - /// this type also supports access to the underlying readiness event stream. - /// While similar in function to what [`Registration`] provides, the - /// semantics are a bit different. - /// - /// Two functions are provided to access the readiness events: - /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the - /// current readiness state of the `PollEvented` instance. If - /// [`poll_read_ready`] indicates read readiness, immediately calling - /// [`poll_read_ready`] again will also indicate read readiness. - /// - /// When the operation is attempted and is unable to succeed due to the I/O - /// resource not being ready, the caller must call [`clear_read_ready`] or - /// [`clear_write_ready`]. This clears the readiness state until a new - /// readiness event is received. - /// - /// This allows the caller to implement additional functions. For example, - /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and - /// [`clear_read_ready`]. - /// - /// ```rust - /// use tokio::io::PollEvented; - /// - /// use futures::ready; - /// use mio::Ready; - /// use mio::net::{TcpStream, TcpListener}; - /// use std::io; - /// use std::task::{Context, Poll}; - /// - /// struct MyListener { - /// poll_evented: PollEvented, - /// } - /// - /// impl MyListener { - /// pub fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll> { - /// let ready = Ready::readable(); - /// - /// ready!(self.poll_evented.poll_read_ready(cx, ready))?; - /// - /// match self.poll_evented.get_ref().accept() { - /// Ok((socket, _)) => Poll::Ready(Ok(socket)), - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// self.poll_evented.clear_read_ready(cx, ready)?; - /// Poll::Pending - /// } - /// Err(e) => Poll::Ready(Err(e)), - /// } - /// } - /// } - /// ``` - /// - /// ## Platform-specific events - /// - /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. - /// These events are included as part of the read readiness event stream. The - /// write readiness event stream is only for `Ready::writable()` events. - /// - /// [`std::io::Read`]: trait@std::io::Read - /// [`std::io::Write`]: trait@std::io::Write - /// [`AsyncRead`]: trait@AsyncRead - /// [`AsyncWrite`]: trait@AsyncWrite - /// [`mio::Evented`]: trait@mio::Evented - /// [`Registration`]: struct@Registration - /// [`TcpListener`]: struct@crate::net::TcpListener - /// [`clear_read_ready`]: method@Self::clear_read_ready - /// [`clear_write_ready`]: method@Self::clear_write_ready - /// [`poll_read_ready`]: method@Self::poll_read_ready - /// [`poll_write_ready`]: method@Self::poll_write_ready - pub struct PollEvented { - io: Option, - inner: Inner, - } -} - -struct Inner { - registration: Registration, - - /// Currently visible read readiness - read_readiness: AtomicUsize, - - /// Currently visible write readiness - write_readiness: AtomicUsize, -} - -// ===== impl PollEvented ===== - -macro_rules! poll_ready { - ($me:expr, $mask:expr, $cache:ident, $take:ident, $poll:expr) => {{ - // Load cached & encoded readiness. - let mut cached = $me.inner.$cache.load(Relaxed); - let mask = $mask | platform::hup() | platform::error(); - - // See if the current readiness matches any bits. - let mut ret = mio::Ready::from_usize(cached) & $mask; - - if ret.is_empty() { - // Readiness does not match, consume the registration's readiness - // stream. This happens in a loop to ensure that the stream gets - // drained. - loop { - let ready = match $poll? { - Poll::Ready(v) => v, - Poll::Pending => return Poll::Pending, - }; - cached |= ready.as_usize(); - - // Update the cache store - $me.inner.$cache.store(cached, Relaxed); - - ret |= ready & mask; - - if !ret.is_empty() { - return Poll::Ready(Ok(ret)); - } - } - } else { - // Check what's new with the registration stream. This will not - // request to be notified - if let Some(ready) = $me.inner.registration.$take()? { - cached |= ready.as_usize(); - $me.inner.$cache.store(cached, Relaxed); - } - - Poll::Ready(Ok(mio::Ready::from_usize(cached))) - } - }}; -} - -impl PollEvented -where - E: Evented, -{ - /// Creates a new `PollEvented` associated with the default reactor. - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn new(io: E) -> io::Result { - PollEvented::new_with_ready(io, mio::Ready::all()) - } - - /// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready` - /// state. `new_with_ready` should be used over `new` when you need control over the readiness - /// state, such as when a file descriptor only allows reads. This does not add `hup` or `error` - /// so if you are interested in those states, you will need to add them to the readiness state - /// passed to this function. - /// - /// An example to listen to read only - /// - /// ```rust - /// ##[cfg(unix)] - /// mio::Ready::from_usize( - /// mio::Ready::readable().as_usize() - /// | mio::unix::UnixReady::error().as_usize() - /// | mio::unix::UnixReady::hup().as_usize() - /// ); - /// ``` - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn new_with_ready(io: E, ready: mio::Ready) -> io::Result { - let registration = Registration::new_with_ready(&io, ready)?; - Ok(Self { - io: Some(io), - inner: Inner { - registration, - read_readiness: AtomicUsize::new(0), - write_readiness: AtomicUsize::new(0), - }, - }) - } - - /// Returns a shared reference to the underlying I/O object this readiness - /// stream is wrapping. - pub fn get_ref(&self) -> &E { - self.io.as_ref().unwrap() - } - - /// Returns a mutable reference to the underlying I/O object this readiness - /// stream is wrapping. - pub fn get_mut(&mut self) -> &mut E { - self.io.as_mut().unwrap() - } - - /// Consumes self, returning the inner I/O object - /// - /// This function will deregister the I/O resource from the reactor before - /// returning. If the deregistration operation fails, an error is returned. - /// - /// Note that deregistering does not guarantee that the I/O resource can be - /// registered with a different reactor. Some I/O resource types can only be - /// associated with a single reactor instance for their lifetime. - pub fn into_inner(mut self) -> io::Result { - let io = self.io.take().unwrap(); - self.inner.registration.deregister(&io)?; - Ok(io) - } - - /// Checks the I/O resource's read readiness state. - /// - /// The mask argument allows specifying what readiness to notify on. This - /// can be any value, including platform specific readiness, **except** - /// `writable`. HUP is always implicitly included on platforms that support - /// it. - /// - /// If the resource is not ready for a read then `Poll::Pending` is returned - /// and the current task is notified once a new event is received. - /// - /// The I/O resource will remain in a read-ready state until readiness is - /// cleared by calling [`clear_read_ready`]. - /// - /// [`clear_read_ready`]: method@Self::clear_read_ready - /// - /// # Panics - /// - /// This function panics if: - /// - /// * `ready` includes writable. - /// * called from outside of a task context. - /// - /// # Warning - /// - /// This method may not be called concurrently. It takes `&self` to allow - /// calling it concurrently with `poll_write_ready`. - pub fn poll_read_ready( - &self, - cx: &mut Context<'_>, - mask: mio::Ready, - ) -> Poll> { - assert!(!mask.is_writable(), "cannot poll for write readiness"); - poll_ready!( - self, - mask, - read_readiness, - take_read_ready, - self.inner.registration.poll_read_ready(cx) - ) - } - - /// Clears the I/O resource's read readiness state and registers the current - /// task to be notified once a read readiness event is received. - /// - /// After calling this function, `poll_read_ready` will return - /// `Poll::Pending` until a new read readiness event has been received. - /// - /// The `mask` argument specifies the readiness bits to clear. This may not - /// include `writable` or `hup`. - /// - /// # Panics - /// - /// This function panics if: - /// - /// * `ready` includes writable or HUP - /// * called from outside of a task context. - pub fn clear_read_ready(&self, cx: &mut Context<'_>, ready: mio::Ready) -> io::Result<()> { - // Cannot clear write readiness - assert!(!ready.is_writable(), "cannot clear write readiness"); - assert!(!platform::is_hup(ready), "cannot clear HUP readiness"); - - self.inner - .read_readiness - .fetch_and(!ready.as_usize(), Relaxed); - - if self.poll_read_ready(cx, ready)?.is_ready() { - // Notify the current task - cx.waker().wake_by_ref(); - } - - Ok(()) - } - - /// Checks the I/O resource's write readiness state. - /// - /// This always checks for writable readiness and also checks for HUP - /// readiness on platforms that support it. - /// - /// If the resource is not ready for a write then `Poll::Pending` is - /// returned and the current task is notified once a new event is received. - /// - /// The I/O resource will remain in a write-ready state until readiness is - /// cleared by calling [`clear_write_ready`]. - /// - /// [`clear_write_ready`]: method@Self::clear_write_ready - /// - /// # Panics - /// - /// This function panics if: - /// - /// * `ready` contains bits besides `writable` and `hup`. - /// * called from outside of a task context. - /// - /// # Warning - /// - /// This method may not be called concurrently. It takes `&self` to allow - /// calling it concurrently with `poll_read_ready`. - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - poll_ready!( - self, - mio::Ready::writable(), - write_readiness, - take_write_ready, - self.inner.registration.poll_write_ready(cx) - ) - } - - /// Resets the I/O resource's write readiness state and registers the current - /// task to be notified once a write readiness event is received. - /// - /// This only clears writable readiness. HUP (on platforms that support HUP) - /// cannot be cleared as it is a final state. - /// - /// After calling this function, `poll_write_ready(Ready::writable())` will - /// return `NotReady` until a new write readiness event has been received. - /// - /// # Panics - /// - /// This function will panic if called from outside of a task context. - pub fn clear_write_ready(&self, cx: &mut Context<'_>) -> io::Result<()> { - let ready = mio::Ready::writable(); - - self.inner - .write_readiness - .fetch_and(!ready.as_usize(), Relaxed); - - if self.poll_write_ready(cx)?.is_ready() { - // Notify the current task - cx.waker().wake_by_ref(); - } - - Ok(()) - } -} - -// ===== Read / Write impls ===== - -impl AsyncRead for PollEvented -where - E: Evented + Read + Unpin, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - ready!(self.poll_read_ready(cx, mio::Ready::readable()))?; - - let r = (*self).get_mut().read(buf); - - if is_wouldblock(&r) { - self.clear_read_ready(cx, mio::Ready::readable())?; - return Poll::Pending; - } - - Poll::Ready(r) - } -} - -impl AsyncWrite for PollEvented -where - E: Evented + Write + Unpin, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - ready!(self.poll_write_ready(cx))?; - - let r = (*self).get_mut().write(buf); - - if is_wouldblock(&r) { - self.clear_write_ready(cx)?; - return Poll::Pending; - } - - Poll::Ready(r) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.poll_write_ready(cx))?; - - let r = (*self).get_mut().flush(); - - if is_wouldblock(&r) { - self.clear_write_ready(cx)?; - return Poll::Pending; - } - - Poll::Ready(r) - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -fn is_wouldblock(r: &io::Result) -> bool { - match *r { - Ok(_) => false, - Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, - } -} - -impl fmt::Debug for PollEvented { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PollEvented").field("io", &self.io).finish() - } -} - -impl Drop for PollEvented { - fn drop(&mut self) { - if let Some(io) = self.io.take() { - // Ignore errors - let _ = self.inner.registration.deregister(&io); - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/registration.rs b/third_party/rust/tokio-0.2.25/src/io/registration.rs deleted file mode 100644 index 77fe6dbc7235..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/registration.rs +++ /dev/null @@ -1,340 +0,0 @@ -use crate::io::driver::{platform, Direction, Handle}; -use crate::util::slab::Address; - -use mio::{self, Evented}; -use std::io; -use std::task::{Context, Poll}; - -cfg_io_driver! { - /// Associates an I/O resource with the reactor instance that drives it. - /// - /// A registration represents an I/O resource registered with a Reactor such - /// that it will receive task notifications on readiness. This is the lowest - /// level API for integrating with a reactor. - /// - /// The association between an I/O resource is made by calling [`new`]. Once - /// the association is established, it remains established until the - /// registration instance is dropped. - /// - /// A registration instance represents two separate readiness streams. One - /// for the read readiness and one for write readiness. These streams are - /// independent and can be consumed from separate tasks. - /// - /// **Note**: while `Registration` is `Sync`, the caller must ensure that - /// there are at most two tasks that use a registration instance - /// concurrently. One task for [`poll_read_ready`] and one task for - /// [`poll_write_ready`]. While violating this requirement is "safe" from a - /// Rust memory safety point of view, it will result in unexpected behavior - /// in the form of lost notifications and tasks hanging. - /// - /// ## Platform-specific events - /// - /// `Registration` also allows receiving platform-specific `mio::Ready` - /// events. These events are included as part of the read readiness event - /// stream. The write readiness event stream is only for `Ready::writable()` - /// events. - /// - /// [`new`]: method@Self::new - /// [`poll_read_ready`]: method@Self::poll_read_ready` - /// [`poll_write_ready`]: method@Self::poll_write_ready` - #[derive(Debug)] - pub struct Registration { - handle: Handle, - address: Address, - } -} - -// ===== impl Registration ===== - -impl Registration { - /// Registers the I/O resource with the default reactor. - /// - /// # Return - /// - /// - `Ok` if the registration happened successfully - /// - `Err` if an error was encountered during registration - /// - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn new(io: &T) -> io::Result - where - T: Evented, - { - Registration::new_with_ready(io, mio::Ready::all()) - } - - /// Registers the I/O resource with the default reactor, for a specific `mio::Ready` state. - /// `new_with_ready` should be used over `new` when you need control over the readiness state, - /// such as when a file descriptor only allows reads. This does not add `hup` or `error` so if - /// you are interested in those states, you will need to add them to the readiness state passed - /// to this function. - /// - /// An example to listen to read only - /// - /// ```rust - /// ##[cfg(unix)] - /// mio::Ready::from_usize( - /// mio::Ready::readable().as_usize() - /// | mio::unix::UnixReady::error().as_usize() - /// | mio::unix::UnixReady::hup().as_usize() - /// ); - /// ``` - /// - /// # Return - /// - /// - `Ok` if the registration happened successfully - /// - `Err` if an error was encountered during registration - /// - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn new_with_ready(io: &T, ready: mio::Ready) -> io::Result - where - T: Evented, - { - let handle = Handle::current(); - let address = if let Some(inner) = handle.inner() { - inner.add_source(io, ready)? - } else { - return Err(io::Error::new( - io::ErrorKind::Other, - "failed to find event loop", - )); - }; - - Ok(Registration { handle, address }) - } - - /// Deregisters the I/O resource from the reactor it is associated with. - /// - /// This function must be called before the I/O resource associated with the - /// registration is dropped. - /// - /// Note that deregistering does not guarantee that the I/O resource can be - /// registered with a different reactor. Some I/O resource types can only be - /// associated with a single reactor instance for their lifetime. - /// - /// # Return - /// - /// If the deregistration was successful, `Ok` is returned. Any calls to - /// `Reactor::turn` that happen after a successful call to `deregister` will - /// no longer result in notifications getting sent for this registration. - /// - /// `Err` is returned if an error is encountered. - pub fn deregister(&mut self, io: &T) -> io::Result<()> - where - T: Evented, - { - let inner = match self.handle.inner() { - Some(inner) => inner, - None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")), - }; - inner.deregister_source(io) - } - - /// Polls for events on the I/O resource's read readiness stream. - /// - /// If the I/O resource receives a new read readiness event since the last - /// call to `poll_read_ready`, it is returned. If it has not, the current - /// task is notified once a new event is received. - /// - /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned, - /// the function will always return `Ready(HUP)`. This should be treated as - /// the end of the readiness stream. - /// - /// # Return value - /// - /// There are several possible return values: - /// - /// * `Poll::Ready(Ok(readiness))` means that the I/O resource has received - /// a new readiness event. The readiness value is included. - /// - /// * `Poll::Pending` means that no new readiness events have been received - /// since the last call to `poll_read_ready`. - /// - /// * `Poll::Ready(Err(err))` means that the registration has encountered an - /// error. This could represent a permanent internal error for example. - /// - /// [edge-triggered]: struct@mio::Poll#edge-triggered-and-level-triggered - /// - /// # Panics - /// - /// This function will panic if called from outside of a task context. - pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - let v = self.poll_ready(Direction::Read, Some(cx)).map_err(|e| { - coop.made_progress(); - e - })?; - match v { - Some(v) => { - coop.made_progress(); - Poll::Ready(Ok(v)) - } - None => Poll::Pending, - } - } - - /// Consume any pending read readiness event. - /// - /// This function is identical to [`poll_read_ready`] **except** that it - /// will not notify the current task when a new event is received. As such, - /// it is safe to call this function from outside of a task context. - /// - /// [`poll_read_ready`]: method@Self::poll_read_ready - pub fn take_read_ready(&self) -> io::Result> { - self.poll_ready(Direction::Read, None) - } - - /// Polls for events on the I/O resource's write readiness stream. - /// - /// If the I/O resource receives a new write readiness event since the last - /// call to `poll_write_ready`, it is returned. If it has not, the current - /// task is notified once a new event is received. - /// - /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned, - /// the function will always return `Ready(HUP)`. This should be treated as - /// the end of the readiness stream. - /// - /// # Return value - /// - /// There are several possible return values: - /// - /// * `Poll::Ready(Ok(readiness))` means that the I/O resource has received - /// a new readiness event. The readiness value is included. - /// - /// * `Poll::Pending` means that no new readiness events have been received - /// since the last call to `poll_write_ready`. - /// - /// * `Poll::Ready(Err(err))` means that the registration has encountered an - /// error. This could represent a permanent internal error for example. - /// - /// [edge-triggered]: struct@mio::Poll#edge-triggered-and-level-triggered - /// - /// # Panics - /// - /// This function will panic if called from outside of a task context. - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - let v = self.poll_ready(Direction::Write, Some(cx)).map_err(|e| { - coop.made_progress(); - e - })?; - match v { - Some(v) => { - coop.made_progress(); - Poll::Ready(Ok(v)) - } - None => Poll::Pending, - } - } - - /// Consumes any pending write readiness event. - /// - /// This function is identical to [`poll_write_ready`] **except** that it - /// will not notify the current task when a new event is received. As such, - /// it is safe to call this function from outside of a task context. - /// - /// [`poll_write_ready`]: method@Self::poll_write_ready - pub fn take_write_ready(&self) -> io::Result> { - self.poll_ready(Direction::Write, None) - } - - /// Polls for events on the I/O resource's `direction` readiness stream. - /// - /// If called with a task context, notify the task when a new event is - /// received. - fn poll_ready( - &self, - direction: Direction, - cx: Option<&mut Context<'_>>, - ) -> io::Result> { - let inner = match self.handle.inner() { - Some(inner) => inner, - None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")), - }; - - // If the task should be notified about new events, ensure that it has - // been registered - if let Some(ref cx) = cx { - inner.register(self.address, direction, cx.waker().clone()) - } - - let mask = direction.mask(); - let mask_no_hup = (mask - platform::hup() - platform::error()).as_usize(); - - let sched = inner.io_dispatch.get(self.address).unwrap(); - - // This consumes the current readiness state **except** for HUP and - // error. HUP and error are excluded because a) they are final states - // and never transitition out and b) both the read AND the write - // directions need to be able to obvserve these states. - // - // # Platform-specific behavior - // - // HUP and error readiness are platform-specific. On epoll platforms, - // HUP has specific conditions that must be met by both peers of a - // connection in order to be triggered. - // - // On epoll platforms, `EPOLLERR` is signaled through - // `UnixReady::error()` and is important to be observable by both read - // AND write. A specific case that `EPOLLERR` occurs is when the read - // end of a pipe is closed. When this occurs, a peer blocked by - // writing to the pipe should be notified. - let curr_ready = sched - .set_readiness(self.address, |curr| curr & (!mask_no_hup)) - .unwrap_or_else(|_| panic!("address {:?} no longer valid!", self.address)); - - let mut ready = mask & mio::Ready::from_usize(curr_ready); - - if ready.is_empty() { - if let Some(cx) = cx { - // Update the task info - match direction { - Direction::Read => sched.reader.register_by_ref(cx.waker()), - Direction::Write => sched.writer.register_by_ref(cx.waker()), - } - - // Try again - let curr_ready = sched - .set_readiness(self.address, |curr| curr & (!mask_no_hup)) - .unwrap_or_else(|_| panic!("address {:?} no longer valid!", self.address)); - ready = mask & mio::Ready::from_usize(curr_ready); - } - } - - if ready.is_empty() { - Ok(None) - } else { - Ok(Some(ready)) - } - } -} - -unsafe impl Send for Registration {} -unsafe impl Sync for Registration {} - -impl Drop for Registration { - fn drop(&mut self) { - let inner = match self.handle.inner() { - Some(inner) => inner, - None => return, - }; - inner.drop_source(self.address); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/seek.rs b/third_party/rust/tokio-0.2.25/src/io/seek.rs deleted file mode 100644 index e3b5bf6b6f4e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/seek.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::io::AsyncSeek; -use std::future::Future; -use std::io::{self, SeekFrom}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Future for the [`seek`](crate::io::AsyncSeekExt::seek) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Seek<'a, S: ?Sized> { - seek: &'a mut S, - pos: Option, -} - -pub(crate) fn seek(seek: &mut S, pos: SeekFrom) -> Seek<'_, S> -where - S: AsyncSeek + ?Sized + Unpin, -{ - Seek { - seek, - pos: Some(pos), - } -} - -impl Future for Seek<'_, S> -where - S: AsyncSeek + ?Sized + Unpin, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = &mut *self; - match me.pos { - Some(pos) => match Pin::new(&mut me.seek).start_seek(cx, pos) { - Poll::Ready(Ok(())) => { - me.pos = None; - Pin::new(&mut me.seek).poll_complete(cx) - } - Poll::Ready(Err(e)) => Poll::Ready(Err(e)), - Poll::Pending => Poll::Pending, - }, - None => Pin::new(&mut me.seek).poll_complete(cx), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/split.rs b/third_party/rust/tokio-0.2.25/src/io/split.rs deleted file mode 100644 index 134b937a5f1f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/split.rs +++ /dev/null @@ -1,195 +0,0 @@ -//! Split a single value implementing `AsyncRead + AsyncWrite` into separate -//! `AsyncRead` and `AsyncWrite` handles. -//! -//! To restore this read/write object from its `split::ReadHalf` and -//! `split::WriteHalf` use `unsplit`. - -use crate::io::{AsyncRead, AsyncWrite}; - -use bytes::{Buf, BufMut}; -use std::cell::UnsafeCell; -use std::fmt; -use std::io; -use std::pin::Pin; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering::{Acquire, Release}; -use std::sync::Arc; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// The readable half of a value returned from [`split`](split()). - pub struct ReadHalf { - inner: Arc>, - } - - /// The writable half of a value returned from [`split`](split()). - pub struct WriteHalf { - inner: Arc>, - } - - /// Splits a single value implementing `AsyncRead + AsyncWrite` into separate - /// `AsyncRead` and `AsyncWrite` handles. - /// - /// To restore this read/write object from its `ReadHalf` and - /// `WriteHalf` use [`unsplit`](ReadHalf::unsplit()). - pub fn split(stream: T) -> (ReadHalf, WriteHalf) - where - T: AsyncRead + AsyncWrite, - { - let inner = Arc::new(Inner { - locked: AtomicBool::new(false), - stream: UnsafeCell::new(stream), - }); - - let rd = ReadHalf { - inner: inner.clone(), - }; - - let wr = WriteHalf { inner }; - - (rd, wr) - } -} - -struct Inner { - locked: AtomicBool, - stream: UnsafeCell, -} - -struct Guard<'a, T> { - inner: &'a Inner, -} - -impl ReadHalf { - /// Checks if this `ReadHalf` and some `WriteHalf` were split from the same - /// stream. - pub fn is_pair_of(&self, other: &WriteHalf) -> bool { - other.is_pair_of(&self) - } - - /// Reunites with a previously split `WriteHalf`. - /// - /// # Panics - /// - /// If this `ReadHalf` and the given `WriteHalf` do not originate from the - /// same `split` operation this method will panic. - /// This can be checked ahead of time by comparing the stream ID - /// of the two halves. - pub fn unsplit(self, wr: WriteHalf) -> T { - if self.is_pair_of(&wr) { - drop(wr); - - let inner = Arc::try_unwrap(self.inner) - .ok() - .expect("`Arc::try_unwrap` failed"); - - inner.stream.into_inner() - } else { - panic!("Unrelated `split::Write` passed to `split::Read::unsplit`.") - } - } -} - -impl WriteHalf { - /// Check if this `WriteHalf` and some `ReadHalf` were split from the same - /// stream. - pub fn is_pair_of(&self, other: &ReadHalf) -> bool { - Arc::ptr_eq(&self.inner, &other.inner) - } -} - -impl AsyncRead for ReadHalf { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_read(cx, buf) - } - - fn poll_read_buf( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_read_buf(cx, buf) - } -} - -impl AsyncWrite for WriteHalf { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_write(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_shutdown(cx) - } - - fn poll_write_buf( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_write_buf(cx, buf) - } -} - -impl Inner { - fn poll_lock(&self, cx: &mut Context<'_>) -> Poll> { - if !self.locked.compare_and_swap(false, true, Acquire) { - Poll::Ready(Guard { inner: self }) - } else { - // Spin... but investigate a better strategy - - std::thread::yield_now(); - cx.waker().wake_by_ref(); - - Poll::Pending - } - } -} - -impl Guard<'_, T> { - fn stream_pin(&mut self) -> Pin<&mut T> { - // safety: the stream is pinned in `Arc` and the `Guard` ensures mutual - // exclusion. - unsafe { Pin::new_unchecked(&mut *self.inner.stream.get()) } - } -} - -impl Drop for Guard<'_, T> { - fn drop(&mut self) { - self.inner.locked.store(false, Release); - } -} - -unsafe impl Send for ReadHalf {} -unsafe impl Send for WriteHalf {} -unsafe impl Sync for ReadHalf {} -unsafe impl Sync for WriteHalf {} - -impl fmt::Debug for ReadHalf { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("split::ReadHalf").finish() - } -} - -impl fmt::Debug for WriteHalf { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("split::WriteHalf").finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/stderr.rs b/third_party/rust/tokio-0.2.25/src/io/stderr.rs deleted file mode 100644 index 99607dc6044e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/stderr.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::io::blocking::Blocking; -use crate::io::AsyncWrite; - -use std::io; -use std::pin::Pin; -use std::task::Context; -use std::task::Poll; - -cfg_io_std! { - /// A handle to the standard error stream of a process. - /// - /// Concurrent writes to stderr must be executed with care: Only individual - /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular - /// you should be aware that writes using [`write_all`] are not guaranteed - /// to occur as a single write, so multiple threads writing data with - /// [`write_all`] may result in interleaved output. - /// - /// Created by the [`stderr`] function. - /// - /// [`stderr`]: stderr() - /// [`AsyncWrite`]: AsyncWrite - /// [`write_all`]: crate::io::AsyncWriteExt::write_all() - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stderr = io::stdout(); - /// stderr.write_all(b"Print some error here.").await?; - /// Ok(()) - /// } - /// ``` - #[derive(Debug)] - pub struct Stderr { - std: Blocking, - } - - /// Constructs a new handle to the standard error of the current process. - /// - /// The returned handle allows writing to standard error from the within the - /// Tokio runtime. - /// - /// Concurrent writes to stderr must be executed with care: Only individual - /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular - /// you should be aware that writes using [`write_all`] are not guaranteed - /// to occur as a single write, so multiple threads writing data with - /// [`write_all`] may result in interleaved output. - /// - /// [`AsyncWrite`]: AsyncWrite - /// [`write_all`]: crate::io::AsyncWriteExt::write_all() - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stderr = io::stdout(); - /// stderr.write_all(b"Print some error here.").await?; - /// Ok(()) - /// } - /// ``` - pub fn stderr() -> Stderr { - let std = io::stderr(); - Stderr { - std: Blocking::new(std), - } - } -} - -#[cfg(unix)] -impl std::os::unix::io::AsRawFd for Stderr { - fn as_raw_fd(&self) -> std::os::unix::io::RawFd { - std::io::stderr().as_raw_fd() - } -} - -#[cfg(windows)] -impl std::os::windows::io::AsRawHandle for Stderr { - fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { - std::io::stderr().as_raw_handle() - } -} - -impl AsyncWrite for Stderr { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.std).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.std).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.std).poll_shutdown(cx) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/stdin.rs b/third_party/rust/tokio-0.2.25/src/io/stdin.rs deleted file mode 100644 index 325b8757ec17..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/stdin.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::io::blocking::Blocking; -use crate::io::AsyncRead; - -use std::io; -use std::pin::Pin; -use std::task::Context; -use std::task::Poll; - -cfg_io_std! { - /// A handle to the standard input stream of a process. - /// - /// The handle implements the [`AsyncRead`] trait, but beware that concurrent - /// reads of `Stdin` must be executed with care. - /// - /// This handle is best used for non-interactive uses, such as when a file - /// is piped into the application. For technical reasons, `stdin` is - /// implemented by using an ordinary blocking read on a separate thread, and - /// it is impossible to cancel that read. This can make shutdown of the - /// runtime hang until the user presses enter. - /// - /// For interactive uses, it is recommended to spawn a thread dedicated to - /// user input and use blocking IO directly in that thread. - /// - /// Created by the [`stdin`] function. - /// - /// [`stdin`]: fn@stdin - /// [`AsyncRead`]: trait@AsyncRead - #[derive(Debug)] - pub struct Stdin { - std: Blocking, - } - - /// Constructs a new handle to the standard input of the current process. - /// - /// This handle is best used for non-interactive uses, such as when a file - /// is piped into the application. For technical reasons, `stdin` is - /// implemented by using an ordinary blocking read on a separate thread, and - /// it is impossible to cancel that read. This can make shutdown of the - /// runtime hang until the user presses enter. - /// - /// For interactive uses, it is recommended to spawn a thread dedicated to - /// user input and use blocking IO directly in that thread. - pub fn stdin() -> Stdin { - let std = io::stdin(); - Stdin { - std: Blocking::new(std), - } - } -} - -#[cfg(unix)] -impl std::os::unix::io::AsRawFd for Stdin { - fn as_raw_fd(&self) -> std::os::unix::io::RawFd { - std::io::stdin().as_raw_fd() - } -} - -#[cfg(windows)] -impl std::os::windows::io::AsRawHandle for Stdin { - fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { - std::io::stdin().as_raw_handle() - } -} - -impl AsyncRead for Stdin { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [std::mem::MaybeUninit]) -> bool { - // https://github.com/rust-lang/rust/blob/09c817eeb29e764cfc12d0a8d94841e3ffe34023/src/libstd/io/stdio.rs#L97 - false - } - - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut self.std).poll_read(cx, buf) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/stdout.rs b/third_party/rust/tokio-0.2.25/src/io/stdout.rs deleted file mode 100644 index 5377993a466e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/stdout.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::io::blocking::Blocking; -use crate::io::AsyncWrite; - -use std::io; -use std::pin::Pin; -use std::task::Context; -use std::task::Poll; - -cfg_io_std! { - /// A handle to the standard output stream of a process. - /// - /// Concurrent writes to stdout must be executed with care: Only individual - /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular - /// you should be aware that writes using [`write_all`] are not guaranteed - /// to occur as a single write, so multiple threads writing data with - /// [`write_all`] may result in interleaved output. - /// - /// Created by the [`stdout`] function. - /// - /// [`stdout`]: stdout() - /// [`AsyncWrite`]: AsyncWrite - /// [`write_all`]: crate::io::AsyncWriteExt::write_all() - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stdout = io::stdout(); - /// stdout.write_all(b"Hello world!").await?; - /// Ok(()) - /// } - /// ``` - #[derive(Debug)] - pub struct Stdout { - std: Blocking, - } - - /// Constructs a new handle to the standard output of the current process. - /// - /// The returned handle allows writing to standard out from the within the - /// Tokio runtime. - /// - /// Concurrent writes to stdout must be executed with care: Only individual - /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular - /// you should be aware that writes using [`write_all`] are not guaranteed - /// to occur as a single write, so multiple threads writing data with - /// [`write_all`] may result in interleaved output. - /// - /// [`AsyncWrite`]: AsyncWrite - /// [`write_all`]: crate::io::AsyncWriteExt::write_all() - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stdout = io::stdout(); - /// stdout.write_all(b"Hello world!").await?; - /// Ok(()) - /// } - /// ``` - pub fn stdout() -> Stdout { - let std = io::stdout(); - Stdout { - std: Blocking::new(std), - } - } -} - -#[cfg(unix)] -impl std::os::unix::io::AsRawFd for Stdout { - fn as_raw_fd(&self) -> std::os::unix::io::RawFd { - std::io::stdout().as_raw_fd() - } -} - -#[cfg(windows)] -impl std::os::windows::io::AsRawHandle for Stdout { - fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { - std::io::stdout().as_raw_handle() - } -} - -impl AsyncWrite for Stdout { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.std).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.std).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.std).poll_shutdown(cx) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/async_buf_read_ext.rs b/third_party/rust/tokio-0.2.25/src/io/util/async_buf_read_ext.rs deleted file mode 100644 index 1bfab90220f9..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/async_buf_read_ext.rs +++ /dev/null @@ -1,258 +0,0 @@ -use crate::io::util::lines::{lines, Lines}; -use crate::io::util::read_line::{read_line, ReadLine}; -use crate::io::util::read_until::{read_until, ReadUntil}; -use crate::io::util::split::{split, Split}; -use crate::io::AsyncBufRead; - -cfg_io_util! { - /// An extension trait which adds utility methods to [`AsyncBufRead`] types. - /// - /// [`AsyncBufRead`]: crate::io::AsyncBufRead - pub trait AsyncBufReadExt: AsyncBufRead { - /// Reads all bytes into `buf` until the delimiter `byte` or EOF is reached. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_until(&mut self, buf: &mut Vec) -> io::Result; - /// ``` - /// - /// This function will read bytes from the underlying stream until the - /// delimiter or EOF is found. Once found, all bytes up to, and including, - /// the delimiter (if found) will be appended to `buf`. - /// - /// If successful, this function will return the total number of bytes read. - /// - /// # Errors - /// - /// This function will ignore all instances of [`ErrorKind::Interrupted`] and - /// will otherwise return any errors returned by [`fill_buf`]. - /// - /// If an I/O error is encountered then all bytes read so far will be - /// present in `buf` and its length will have been adjusted appropriately. - /// - /// [`fill_buf`]: AsyncBufRead::poll_fill_buf - /// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted - /// - /// # Examples - /// - /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In - /// this example, we use [`Cursor`] to read all the bytes in a byte slice - /// in hyphen delimited segments: - /// - /// [`Cursor`]: std::io::Cursor - /// - /// ``` - /// use tokio::io::AsyncBufReadExt; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut cursor = Cursor::new(b"lorem-ipsum"); - /// let mut buf = vec![]; - /// - /// // cursor is at 'l' - /// let num_bytes = cursor.read_until(b'-', &mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 6); - /// assert_eq!(buf, b"lorem-"); - /// buf.clear(); - /// - /// // cursor is at 'i' - /// let num_bytes = cursor.read_until(b'-', &mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 5); - /// assert_eq!(buf, b"ipsum"); - /// buf.clear(); - /// - /// // cursor is at EOF - /// let num_bytes = cursor.read_until(b'-', &mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// assert_eq!(num_bytes, 0); - /// assert_eq!(buf, b""); - /// } - /// ``` - fn read_until<'a>(&'a mut self, byte: u8, buf: &'a mut Vec) -> ReadUntil<'a, Self> - where - Self: Unpin, - { - read_until(self, byte, buf) - } - - /// Reads all bytes until a newline (the 0xA byte) is reached, and append - /// them to the provided buffer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_line(&mut self, buf: &mut String) -> io::Result; - /// ``` - /// - /// This function will read bytes from the underlying stream until the - /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes - /// up to, and including, the delimiter (if found) will be appended to - /// `buf`. - /// - /// If successful, this function will return the total number of bytes read. - /// - /// If this function returns `Ok(0)`, the stream has reached EOF. - /// - /// # Errors - /// - /// This function has the same error semantics as [`read_until`] and will - /// also return an error if the read bytes are not valid UTF-8. If an I/O - /// error is encountered then `buf` may contain some bytes already read in - /// the event that all data read so far was valid UTF-8. - /// - /// [`read_until`]: AsyncBufReadExt::read_until - /// - /// # Examples - /// - /// [`std::io::Cursor`][`Cursor`] is a type that implements - /// `AsyncBufRead`. In this example, we use [`Cursor`] to read all the - /// lines in a byte slice: - /// - /// [`Cursor`]: std::io::Cursor - /// - /// ``` - /// use tokio::io::AsyncBufReadExt; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut cursor = Cursor::new(b"foo\nbar"); - /// let mut buf = String::new(); - /// - /// // cursor is at 'f' - /// let num_bytes = cursor.read_line(&mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 4); - /// assert_eq!(buf, "foo\n"); - /// buf.clear(); - /// - /// // cursor is at 'b' - /// let num_bytes = cursor.read_line(&mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 3); - /// assert_eq!(buf, "bar"); - /// buf.clear(); - /// - /// // cursor is at EOF - /// let num_bytes = cursor.read_line(&mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 0); - /// assert_eq!(buf, ""); - /// } - /// ``` - fn read_line<'a>(&'a mut self, buf: &'a mut String) -> ReadLine<'a, Self> - where - Self: Unpin, - { - read_line(self, buf) - } - - /// Returns a stream of the contents of this reader split on the byte - /// `byte`. - /// - /// This method is the asynchronous equivalent to - /// [`BufRead::split`](std::io::BufRead::split). - /// - /// The stream returned from this function will yield instances of - /// [`io::Result`]`<`[`Vec`]`>`. Each vector returned will *not* have - /// the delimiter byte at the end. - /// - /// [`io::Result`]: std::io::Result - /// [`Vec`]: std::vec::Vec - /// - /// # Errors - /// - /// Each item of the stream has the same error semantics as - /// [`AsyncBufReadExt::read_until`](AsyncBufReadExt::read_until). - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncBufRead; - /// use tokio::io::AsyncBufReadExt; - /// - /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { - /// let mut segments = my_buf_read.split(b'f'); - /// - /// while let Some(segment) = segments.next_segment().await? { - /// println!("length = {}", segment.len()) - /// } - /// # Ok(()) - /// # } - /// ``` - fn split(self, byte: u8) -> Split - where - Self: Sized + Unpin, - { - split(self, byte) - } - - /// Returns a stream over the lines of this reader. - /// This method is the async equivalent to [`BufRead::lines`](std::io::BufRead::lines). - /// - /// The stream returned from this function will yield instances of - /// [`io::Result`]`<`[`String`]`>`. Each string returned will *not* have a newline - /// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end. - /// - /// [`io::Result`]: std::io::Result - /// [`String`]: String - /// - /// # Errors - /// - /// Each line of the stream has the same error semantics as [`AsyncBufReadExt::read_line`]. - /// - /// # Examples - /// - /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In - /// this example, we use [`Cursor`] to iterate over all the lines in a byte - /// slice. - /// - /// [`Cursor`]: std::io::Cursor - /// - /// ``` - /// use tokio::io::AsyncBufReadExt; - /// use tokio::stream::StreamExt; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() { - /// let cursor = Cursor::new(b"lorem\nipsum\r\ndolor"); - /// - /// let mut lines = cursor.lines().map(|res| res.unwrap()); - /// - /// assert_eq!(lines.next().await, Some(String::from("lorem"))); - /// assert_eq!(lines.next().await, Some(String::from("ipsum"))); - /// assert_eq!(lines.next().await, Some(String::from("dolor"))); - /// assert_eq!(lines.next().await, None); - /// } - /// ``` - /// - /// [`AsyncBufReadExt::read_line`]: AsyncBufReadExt::read_line - fn lines(self) -> Lines - where - Self: Sized, - { - lines(self) - } - } -} - -impl AsyncBufReadExt for R {} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/async_read_ext.rs b/third_party/rust/tokio-0.2.25/src/io/util/async_read_ext.rs deleted file mode 100644 index 0ab66c286d3f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/async_read_ext.rs +++ /dev/null @@ -1,1118 +0,0 @@ -use crate::io::util::chain::{chain, Chain}; -use crate::io::util::read::{read, Read}; -use crate::io::util::read_buf::{read_buf, ReadBuf}; -use crate::io::util::read_exact::{read_exact, ReadExact}; -use crate::io::util::read_int::{ - ReadI128, ReadI128Le, ReadI16, ReadI16Le, ReadI32, ReadI32Le, ReadI64, ReadI64Le, ReadI8, -}; -use crate::io::util::read_int::{ - ReadU128, ReadU128Le, ReadU16, ReadU16Le, ReadU32, ReadU32Le, ReadU64, ReadU64Le, ReadU8, -}; -use crate::io::util::read_to_end::{read_to_end, ReadToEnd}; -use crate::io::util::read_to_string::{read_to_string, ReadToString}; -use crate::io::util::take::{take, Take}; -use crate::io::AsyncRead; - -use bytes::BufMut; - -cfg_io_util! { - /// Defines numeric reader - macro_rules! read_impl { - ( - $( - $(#[$outer:meta])* - fn $name:ident(&mut self) -> $($fut:ident)*; - )* - ) => { - $( - $(#[$outer])* - fn $name<'a>(&'a mut self) -> $($fut)*<&'a mut Self> where Self: Unpin { - $($fut)*::new(self) - } - )* - } - } - - /// Reads bytes from a source. - /// - /// Implemented as an extention trait, adding utility methods to all - /// [`AsyncRead`] types. Callers will tend to import this trait instead of - /// [`AsyncRead`]. - /// - /// As a convenience, this trait may be imported using the [`prelude`]: - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::prelude::*; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = [0; 10]; - /// - /// // The `read` method is defined by this trait. - /// let n = f.read(&mut buffer[..]).await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// See [module][crate::io] documentation for more details. - /// - /// [`AsyncRead`]: AsyncRead - /// [`prelude`]: crate::prelude - pub trait AsyncReadExt: AsyncRead { - /// Creates a new `AsyncRead` instance that chains this stream with - /// `next`. - /// - /// The returned `AsyncRead` instance will first read all bytes from this object - /// until EOF is encountered. Afterwards the output is equivalent to the - /// output of `next`. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `AsyncRead`: - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let f1 = File::open("foo.txt").await?; - /// let f2 = File::open("bar.txt").await?; - /// - /// let mut handle = f1.chain(f2); - /// let mut buffer = String::new(); - /// - /// // read the value into a String. We could use any AsyncRead - /// // method here, this is just one example. - /// handle.read_to_string(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - fn chain(self, next: R) -> Chain - where - Self: Sized, - R: AsyncRead, - { - chain(self, next) - } - - /// Pulls some bytes from this source into the specified buffer, - /// returning how many bytes were read. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read(&mut self, buf: &mut [u8]) -> io::Result; - /// ``` - /// - /// This function does not provide any guarantees about whether it - /// completes immediately or asynchronously - /// - /// If the return value of this method is `Ok(n)`, then it must be - /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates - /// that the buffer `buf` has been filled in with `n` bytes of data from - /// this source. If `n` is `0`, then it can indicate one of two - /// scenarios: - /// - /// 1. This reader has reached its "end of file" and will likely no longer - /// be able to produce bytes. Note that this does not mean that the - /// reader will *always* no longer be able to produce bytes. - /// 2. The buffer specified was 0 bytes in length. - /// - /// No guarantees are provided about the contents of `buf` when this - /// function is called, implementations cannot rely on any property of the - /// contents of `buf` being `true`. It is recommended that *implementations* - /// only write data to `buf` instead of reading its contents. - /// - /// Correspondingly, however, *callers* of this method may not assume - /// any guarantees about how the implementation uses `buf`. It is - /// possible that the code that's supposed to write to the buffer might - /// also read from it. It is your responsibility to make sure that `buf` - /// is initialized before calling `read`. - /// - /// # Errors - /// - /// If this function encounters any form of I/O or other error, an error - /// variant will be returned. If an error is returned then it must be - /// guaranteed that no bytes were read. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = [0; 10]; - /// - /// // read up to 10 bytes - /// let n = f.read(&mut buffer[..]).await?; - /// - /// println!("The bytes: {:?}", &buffer[..n]); - /// Ok(()) - /// } - /// ``` - fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> Read<'a, Self> - where - Self: Unpin, - { - read(self, buf) - } - - /// Pulls some bytes from this source into the specified buffer, - /// advancing the buffer's internal cursor. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_buf(&mut self, buf: &mut B) -> io::Result; - /// ``` - /// - /// Usually, only a single `read` syscall is issued, even if there is - /// more space in the supplied buffer. - /// - /// This function does not provide any guarantees about whether it - /// completes immediately or asynchronously - /// - /// # Return - /// - /// On a successful read, the number of read bytes is returned. If the - /// supplied buffer is not empty and the function returns `Ok(0)` then - /// the source as reached an "end-of-file" event. - /// - /// # Errors - /// - /// If this function encounters any form of I/O or other error, an error - /// variant will be returned. If an error is returned then it must be - /// guaranteed that no bytes were read. - /// - /// # Examples - /// - /// [`File`] implements `Read` and [`BytesMut`] implements [`BufMut`]: - /// - /// [`File`]: crate::fs::File - /// [`BytesMut`]: bytes::BytesMut - /// [`BufMut`]: bytes::BufMut - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use bytes::BytesMut; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = BytesMut::with_capacity(10); - /// - /// assert!(buffer.is_empty()); - /// - /// // read up to 10 bytes, note that the return value is not needed - /// // to access the data that was read as `buffer`'s internal - /// // cursor is updated. - /// f.read_buf(&mut buffer).await?; - /// - /// println!("The bytes: {:?}", &buffer[..]); - /// Ok(()) - /// } - /// ``` - fn read_buf<'a, B>(&'a mut self, buf: &'a mut B) -> ReadBuf<'a, Self, B> - where - Self: Sized + Unpin, - B: BufMut, - { - read_buf(self, buf) - } - - /// Reads the exact number of bytes required to fill `buf`. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_exact(&mut self, buf: &mut [u8]) -> io::Result; - /// ``` - /// - /// This function reads as many bytes as necessary to completely fill - /// the specified buffer `buf`. - /// - /// # Errors - /// - /// If the operation encounters an "end of file" before completely - /// filling the buffer, it returns an error of the kind - /// [`ErrorKind::UnexpectedEof`]. The contents of `buf` are unspecified - /// in this case. - /// - /// If any other read error is encountered then the operation - /// immediately returns. The contents of `buf` are unspecified in this - /// case. - /// - /// If this operation returns an error, it is unspecified how many bytes - /// it has read, but it will never read more than would be necessary to - /// completely fill the buffer. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = [0; 10]; - /// - /// // read exactly 10 bytes - /// f.read_exact(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - /// - /// [`ErrorKind::UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof - fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ReadExact<'a, Self> - where - Self: Unpin, - { - read_exact(self, buf) - } - - read_impl! { - /// Reads an unsigned 8 bit integer from the underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u8(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 8 bit integers from an `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![2, 5]); - /// - /// assert_eq!(2, reader.read_u8().await?); - /// assert_eq!(5, reader.read_u8().await?); - /// - /// Ok(()) - /// } - /// ``` - fn read_u8(&mut self) -> ReadU8; - - /// Reads a signed 8 bit integer from the underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i8(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 8 bit integers from an `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x02, 0xfb]); - /// - /// assert_eq!(2, reader.read_i8().await?); - /// assert_eq!(-5, reader.read_i8().await?); - /// - /// Ok(()) - /// } - /// ``` - fn read_i8(&mut self) -> ReadI8; - - /// Reads an unsigned 16-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u16(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 16 bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![2, 5, 3, 0]); - /// - /// assert_eq!(517, reader.read_u16().await?); - /// assert_eq!(768, reader.read_u16().await?); - /// Ok(()) - /// } - /// ``` - fn read_u16(&mut self) -> ReadU16; - - /// Reads a signed 16-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i16(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 16 bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); - /// - /// assert_eq!(193, reader.read_i16().await?); - /// assert_eq!(-132, reader.read_i16().await?); - /// Ok(()) - /// } - /// ``` - fn read_i16(&mut self) -> ReadI16; - - /// Reads an unsigned 32-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u32(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 32-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); - /// - /// assert_eq!(267, reader.read_u32().await?); - /// Ok(()) - /// } - /// ``` - fn read_u32(&mut self) -> ReadU32; - - /// Reads a signed 32-bit integer in big-endian order from the - /// underlying reader. - /// - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i32(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 32-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); - /// - /// assert_eq!(-34253, reader.read_i32().await?); - /// Ok(()) - /// } - /// ``` - fn read_i32(&mut self) -> ReadI32; - - /// Reads an unsigned 64-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u64(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 64-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// - /// assert_eq!(918733457491587, reader.read_u64().await?); - /// Ok(()) - /// } - /// ``` - fn read_u64(&mut self) -> ReadU64; - - /// Reads an signed 64-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i64(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 64-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); - /// - /// assert_eq!(i64::min_value(), reader.read_i64().await?); - /// Ok(()) - /// } - /// ``` - fn read_i64(&mut self) -> ReadI64; - - /// Reads an unsigned 128-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u128(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 128-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// - /// assert_eq!(16947640962301618749969007319746179, reader.read_u128().await?); - /// Ok(()) - /// } - /// ``` - fn read_u128(&mut self) -> ReadU128; - - /// Reads an signed 128-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i128(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 128-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x80, 0, 0, 0, 0, 0, 0, 0, - /// 0, 0, 0, 0, 0, 0, 0, 0 - /// ]); - /// - /// assert_eq!(i128::min_value(), reader.read_i128().await?); - /// Ok(()) - /// } - /// ``` - fn read_i128(&mut self) -> ReadI128; - - /// Reads an unsigned 16-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u16_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 16 bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![2, 5, 3, 0]); - /// - /// assert_eq!(1282, reader.read_u16_le().await?); - /// assert_eq!(3, reader.read_u16_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_u16_le(&mut self) -> ReadU16Le; - - /// Reads a signed 16-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i16_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 16 bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); - /// - /// assert_eq!(-16128, reader.read_i16_le().await?); - /// assert_eq!(31999, reader.read_i16_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_i16_le(&mut self) -> ReadI16Le; - - /// Reads an unsigned 32-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u32_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 32-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); - /// - /// assert_eq!(184614912, reader.read_u32_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_u32_le(&mut self) -> ReadU32Le; - - /// Reads a signed 32-bit integer in little-endian order from the - /// underlying reader. - /// - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i32_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 32-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); - /// - /// assert_eq!(863698943, reader.read_i32_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_i32_le(&mut self) -> ReadI32Le; - - /// Reads an unsigned 64-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u64_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 64-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// - /// assert_eq!(9477368352180732672, reader.read_u64_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_u64_le(&mut self) -> ReadU64Le; - - /// Reads an signed 64-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i64_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 64-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); - /// - /// assert_eq!(128, reader.read_i64_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_i64_le(&mut self) -> ReadI64Le; - - /// Reads an unsigned 128-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u128_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 128-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// - /// assert_eq!(174826588484952389081207917399662330624, reader.read_u128_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_u128_le(&mut self) -> ReadU128Le; - - /// Reads an signed 128-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i128_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 128-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x80, 0, 0, 0, 0, 0, 0, 0, - /// 0, 0, 0, 0, 0, 0, 0, 0 - /// ]); - /// - /// assert_eq!(128, reader.read_i128_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_i128_le(&mut self) -> ReadI128Le; - } - - /// Reads all bytes until EOF in this source, placing them into `buf`. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_to_end(&mut self, buf: &mut Vec) -> io::Result; - /// ``` - /// - /// All bytes read from this source will be appended to the specified - /// buffer `buf`. This function will continuously call [`read()`] to - /// append more data to `buf` until [`read()`] returns `Ok(0)`. - /// - /// If successful, the total number of bytes read is returned. - /// - /// [`read()`]: AsyncReadExt::read - /// - /// # Errors - /// - /// If a read error is encountered then the `read_to_end` operation - /// immediately completes. Any bytes which have already been read will - /// be appended to `buf`. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::io::{self, AsyncReadExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = Vec::new(); - /// - /// // read the whole file - /// f.read_to_end(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - /// - /// (See also the [`tokio::fs::read`] convenience function for reading from a - /// file.) - /// - /// [`tokio::fs::read`]: fn@crate::fs::read - fn read_to_end<'a>(&'a mut self, buf: &'a mut Vec) -> ReadToEnd<'a, Self> - where - Self: Unpin, - { - read_to_end(self, buf) - } - - /// Reads all bytes until EOF in this source, appending them to `buf`. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_to_string(&mut self, buf: &mut String) -> io::Result; - /// ``` - /// - /// If successful, the number of bytes which were read and appended to - /// `buf` is returned. - /// - /// # Errors - /// - /// If the data in this stream is *not* valid UTF-8 then an error is - /// returned and `buf` is unchanged. - /// - /// See [`read_to_end`][AsyncReadExt::read_to_end] for other error semantics. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::io::{self, AsyncReadExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = String::new(); - /// - /// f.read_to_string(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - /// - /// (See also the [`crate::fs::read_to_string`] convenience function for - /// reading from a file.) - /// - /// [`crate::fs::read_to_string`]: fn@crate::fs::read_to_string - fn read_to_string<'a>(&'a mut self, dst: &'a mut String) -> ReadToString<'a, Self> - where - Self: Unpin, - { - read_to_string(self, dst) - } - - /// Creates an adaptor which reads at most `limit` bytes from it. - /// - /// This function returns a new instance of `AsyncRead` which will read - /// at most `limit` bytes, after which it will always return EOF - /// (`Ok(0)`). Any read errors will not count towards the number of - /// bytes read and future calls to [`read()`] may succeed. - /// - /// [`read()`]: fn@crate::io::AsyncReadExt::read - /// - /// [read]: AsyncReadExt::read - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::io::{self, AsyncReadExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let f = File::open("foo.txt").await?; - /// let mut buffer = [0; 5]; - /// - /// // read at most five bytes - /// let mut handle = f.take(5); - /// - /// handle.read(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - fn take(self, limit: u64) -> Take - where - Self: Sized, - { - take(self, limit) - } - } -} - -impl AsyncReadExt for R {} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/async_seek_ext.rs b/third_party/rust/tokio-0.2.25/src/io/util/async_seek_ext.rs deleted file mode 100644 index c7a0f72fb81f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/async_seek_ext.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::io::seek::{seek, Seek}; -use crate::io::AsyncSeek; -use std::io::SeekFrom; - -/// An extension trait which adds utility methods to [`AsyncSeek`] types. -/// -/// As a convenience, this trait may be imported using the [`prelude`]: -/// -/// # Examples -/// -/// ``` -/// use std::io::{Cursor, SeekFrom}; -/// use tokio::prelude::*; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let mut cursor = Cursor::new(b"abcdefg"); -/// -/// // the `seek` method is defined by this trait -/// cursor.seek(SeekFrom::Start(3)).await?; -/// -/// let mut buf = [0; 1]; -/// let n = cursor.read(&mut buf).await?; -/// assert_eq!(n, 1); -/// assert_eq!(buf, [b'd']); -/// -/// Ok(()) -/// } -/// ``` -/// -/// See [module][crate::io] documentation for more details. -/// -/// [`AsyncSeek`]: AsyncSeek -/// [`prelude`]: crate::prelude -pub trait AsyncSeekExt: AsyncSeek { - /// Creates a future which will seek an IO object, and then yield the - /// new position in the object and the object itself. - /// - /// In the case of an error the buffer and the object will be discarded, with - /// the error yielded. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::prelude::*; - /// - /// use std::io::SeekFrom; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::open("foo.txt").await?; - /// file.seek(SeekFrom::Start(6)).await?; - /// - /// let mut contents = vec![0u8; 10]; - /// file.read_exact(&mut contents).await?; - /// # Ok(()) - /// # } - /// ``` - fn seek(&mut self, pos: SeekFrom) -> Seek<'_, Self> - where - Self: Unpin, - { - seek(self, pos) - } -} - -impl AsyncSeekExt for S {} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/async_write_ext.rs b/third_party/rust/tokio-0.2.25/src/io/util/async_write_ext.rs deleted file mode 100644 index 321301e28977..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/async_write_ext.rs +++ /dev/null @@ -1,1006 +0,0 @@ -use crate::io::util::flush::{flush, Flush}; -use crate::io::util::shutdown::{shutdown, Shutdown}; -use crate::io::util::write::{write, Write}; -use crate::io::util::write_all::{write_all, WriteAll}; -use crate::io::util::write_buf::{write_buf, WriteBuf}; -use crate::io::util::write_int::{ - WriteI128, WriteI128Le, WriteI16, WriteI16Le, WriteI32, WriteI32Le, WriteI64, WriteI64Le, - WriteI8, -}; -use crate::io::util::write_int::{ - WriteU128, WriteU128Le, WriteU16, WriteU16Le, WriteU32, WriteU32Le, WriteU64, WriteU64Le, - WriteU8, -}; -use crate::io::AsyncWrite; - -use bytes::Buf; - -cfg_io_util! { - /// Defines numeric writer - macro_rules! write_impl { - ( - $( - $(#[$outer:meta])* - fn $name:ident(&mut self, n: $ty:ty) -> $($fut:ident)*; - )* - ) => { - $( - $(#[$outer])* - fn $name<'a>(&'a mut self, n: $ty) -> $($fut)*<&'a mut Self> where Self: Unpin { - $($fut)*::new(self, n) - } - )* - } - } - - /// Writes bytes to a sink. - /// - /// Implemented as an extention trait, adding utility methods to all - /// [`AsyncWrite`] types. Callers will tend to import this trait instead of - /// [`AsyncWrite`]. - /// - /// As a convenience, this trait may be imported using the [`prelude`]: - /// - /// ```no_run - /// use tokio::prelude::*; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let data = b"some bytes"; - /// - /// let mut pos = 0; - /// let mut buffer = File::create("foo.txt").await?; - /// - /// while pos < data.len() { - /// let bytes_written = buffer.write(&data[pos..]).await?; - /// pos += bytes_written; - /// } - /// - /// Ok(()) - /// } - /// ``` - /// - /// See [module][crate::io] documentation for more details. - /// - /// [`AsyncWrite`]: AsyncWrite - /// [`prelude`]: crate::prelude - pub trait AsyncWriteExt: AsyncWrite { - /// Writes a buffer into this writer, returning how many bytes were - /// written. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write(&mut self, buf: &[u8]) -> io::Result; - /// ``` - /// - /// This function will attempt to write the entire contents of `buf`, but - /// the entire write may not succeed, or the write may also generate an - /// error. A call to `write` represents *at most one* attempt to write to - /// any wrapped object. - /// - /// # Return - /// - /// If the return value is `Ok(n)` then it must be guaranteed that `n <= - /// buf.len()`. A return value of `0` typically means that the - /// underlying object is no longer able to accept bytes and will likely - /// not be able to in the future as well, or that the buffer provided is - /// empty. - /// - /// # Errors - /// - /// Each call to `write` may generate an I/O error indicating that the - /// operation could not be completed. If an error is returned then no bytes - /// in the buffer were written to this writer. - /// - /// It is **not** considered an error if the entire buffer could not be - /// written to this writer. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// - /// // Writes some prefix of the byte string, not necessarily all of it. - /// file.write(b"some bytes").await?; - /// Ok(()) - /// } - /// ``` - fn write<'a>(&'a mut self, src: &'a [u8]) -> Write<'a, Self> - where - Self: Unpin, - { - write(self, src) - } - - /// Writes a buffer into this writer, advancing the buffer's internal - /// cursor. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_buf(&mut self, buf: &mut B) -> io::Result; - /// ``` - /// - /// This function will attempt to write the entire contents of `buf`, but - /// the entire write may not succeed, or the write may also generate an - /// error. After the operation completes, the buffer's - /// internal cursor is advanced by the number of bytes written. A - /// subsequent call to `write_buf` using the **same** `buf` value will - /// resume from the point that the first call to `write_buf` completed. - /// A call to `write` represents *at most one* attempt to write to any - /// wrapped object. - /// - /// # Return - /// - /// If the return value is `Ok(n)` then it must be guaranteed that `n <= - /// buf.len()`. A return value of `0` typically means that the - /// underlying object is no longer able to accept bytes and will likely - /// not be able to in the future as well, or that the buffer provided is - /// empty. - /// - /// # Errors - /// - /// Each call to `write` may generate an I/O error indicating that the - /// operation could not be completed. If an error is returned then no bytes - /// in the buffer were written to this writer. - /// - /// It is **not** considered an error if the entire buffer could not be - /// written to this writer. - /// - /// # Examples - /// - /// [`File`] implements `Read` and [`Cursor<&[u8]>`] implements [`Buf`]: - /// - /// [`File`]: crate::fs::File - /// [`Buf`]: bytes::Buf - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// use bytes::Buf; - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// let mut buffer = Cursor::new(b"data to write"); - /// - /// // Loop until the entire contents of the buffer are written to - /// // the file. - /// while buffer.has_remaining() { - /// // Writes some prefix of the byte string, not necessarily - /// // all of it. - /// file.write_buf(&mut buffer).await?; - /// } - /// - /// Ok(()) - /// } - /// ``` - fn write_buf<'a, B>(&'a mut self, src: &'a mut B) -> WriteBuf<'a, Self, B> - where - Self: Sized + Unpin, - B: Buf, - { - write_buf(self, src) - } - - /// Attempts to write an entire buffer into this writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_all(&mut self, buf: &[u8]) -> io::Result<()>; - /// ``` - /// - /// This method will continuously call [`write`] until there is no more data - /// to be written. This method will not return until the entire buffer - /// has been successfully written or such an error occurs. The first - /// error generated from this method will be returned. - /// - /// # Errors - /// - /// This function will return the first error that [`write`] returns. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut buffer = File::create("foo.txt").await?; - /// - /// buffer.write_all(b"some bytes").await?; - /// Ok(()) - /// } - /// ``` - /// - /// [`write`]: AsyncWriteExt::write - fn write_all<'a>(&'a mut self, src: &'a [u8]) -> WriteAll<'a, Self> - where - Self: Unpin, - { - write_all(self, src) - } - - write_impl! { - /// Writes an unsigned 8-bit integer to the underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u8(&mut self, n: u8) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 8 bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u8(2).await?; - /// writer.write_u8(5).await?; - /// - /// assert_eq!(writer, b"\x02\x05"); - /// Ok(()) - /// } - /// ``` - fn write_u8(&mut self, n: u8) -> WriteU8; - - /// Writes an unsigned 8-bit integer to the underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i8(&mut self, n: i8) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 8 bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u8(2).await?; - /// writer.write_u8(5).await?; - /// - /// assert_eq!(writer, b"\x02\x05"); - /// Ok(()) - /// } - /// ``` - fn write_i8(&mut self, n: i8) -> WriteI8; - - /// Writes an unsigned 16-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u16(&mut self, n: u16) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 16-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u16(517).await?; - /// writer.write_u16(768).await?; - /// - /// assert_eq!(writer, b"\x02\x05\x03\x00"); - /// Ok(()) - /// } - /// ``` - fn write_u16(&mut self, n: u16) -> WriteU16; - - /// Writes a signed 16-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i16(&mut self, n: i16) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 16-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i16(193).await?; - /// writer.write_i16(-132).await?; - /// - /// assert_eq!(writer, b"\x00\xc1\xff\x7c"); - /// Ok(()) - /// } - /// ``` - fn write_i16(&mut self, n: i16) -> WriteI16; - - /// Writes an unsigned 32-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u32(&mut self, n: u32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 32-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u32(267).await?; - /// writer.write_u32(1205419366).await?; - /// - /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); - /// Ok(()) - /// } - /// ``` - fn write_u32(&mut self, n: u32) -> WriteU32; - - /// Writes a signed 32-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i32(&mut self, n: i32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 32-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i32(267).await?; - /// writer.write_i32(1205419366).await?; - /// - /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); - /// Ok(()) - /// } - /// ``` - fn write_i32(&mut self, n: i32) -> WriteI32; - - /// Writes an unsigned 64-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u64(&mut self, n: u64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 64-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u64(918733457491587).await?; - /// writer.write_u64(143).await?; - /// - /// assert_eq!(writer, b"\x00\x03\x43\x95\x4d\x60\x86\x83\x00\x00\x00\x00\x00\x00\x00\x8f"); - /// Ok(()) - /// } - /// ``` - fn write_u64(&mut self, n: u64) -> WriteU64; - - /// Writes an signed 64-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i64(&mut self, n: i64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 64-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i64(i64::min_value()).await?; - /// writer.write_i64(i64::max_value()).await?; - /// - /// assert_eq!(writer, b"\x80\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff"); - /// Ok(()) - /// } - /// ``` - fn write_i64(&mut self, n: i64) -> WriteI64; - - /// Writes an unsigned 128-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u128(&mut self, n: u128) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 128-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u128(16947640962301618749969007319746179).await?; - /// - /// assert_eq!(writer, vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_u128(&mut self, n: u128) -> WriteU128; - - /// Writes an signed 128-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i128(&mut self, n: i128) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 128-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i128(i128::min_value()).await?; - /// - /// assert_eq!(writer, vec![ - /// 0x80, 0, 0, 0, 0, 0, 0, 0, - /// 0, 0, 0, 0, 0, 0, 0, 0 - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_i128(&mut self, n: i128) -> WriteI128; - - - /// Writes an unsigned 16-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u16_le(&mut self, n: u16) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 16-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u16_le(517).await?; - /// writer.write_u16_le(768).await?; - /// - /// assert_eq!(writer, b"\x05\x02\x00\x03"); - /// Ok(()) - /// } - /// ``` - fn write_u16_le(&mut self, n: u16) -> WriteU16Le; - - /// Writes a signed 16-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i16_le(&mut self, n: i16) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 16-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i16_le(193).await?; - /// writer.write_i16_le(-132).await?; - /// - /// assert_eq!(writer, b"\xc1\x00\x7c\xff"); - /// Ok(()) - /// } - /// ``` - fn write_i16_le(&mut self, n: i16) -> WriteI16Le; - - /// Writes an unsigned 32-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u32_le(&mut self, n: u32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 32-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u32_le(267).await?; - /// writer.write_u32_le(1205419366).await?; - /// - /// assert_eq!(writer, b"\x0b\x01\x00\x00\x66\x3d\xd9\x47"); - /// Ok(()) - /// } - /// ``` - fn write_u32_le(&mut self, n: u32) -> WriteU32Le; - - /// Writes a signed 32-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i32_le(&mut self, n: i32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 32-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i32_le(267).await?; - /// writer.write_i32_le(1205419366).await?; - /// - /// assert_eq!(writer, b"\x0b\x01\x00\x00\x66\x3d\xd9\x47"); - /// Ok(()) - /// } - /// ``` - fn write_i32_le(&mut self, n: i32) -> WriteI32Le; - - /// Writes an unsigned 64-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u64_le(&mut self, n: u64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 64-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u64_le(918733457491587).await?; - /// writer.write_u64_le(143).await?; - /// - /// assert_eq!(writer, b"\x83\x86\x60\x4d\x95\x43\x03\x00\x8f\x00\x00\x00\x00\x00\x00\x00"); - /// Ok(()) - /// } - /// ``` - fn write_u64_le(&mut self, n: u64) -> WriteU64Le; - - /// Writes an signed 64-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i64_le(&mut self, n: i64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 64-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i64_le(i64::min_value()).await?; - /// writer.write_i64_le(i64::max_value()).await?; - /// - /// assert_eq!(writer, b"\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\xff\xff\xff\xff\xff\x7f"); - /// Ok(()) - /// } - /// ``` - fn write_i64_le(&mut self, n: i64) -> WriteI64Le; - - /// Writes an unsigned 128-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u128_le(&mut self, n: u128) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 128-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u128_le(16947640962301618749969007319746179).await?; - /// - /// assert_eq!(writer, vec![ - /// 0x83, 0x86, 0x60, 0x4d, 0x95, 0x43, 0x03, 0x00, - /// 0x83, 0x86, 0x60, 0x4d, 0x95, 0x43, 0x03, 0x00, - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_u128_le(&mut self, n: u128) -> WriteU128Le; - - /// Writes an signed 128-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i128_le(&mut self, n: i128) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 128-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i128_le(i128::min_value()).await?; - /// - /// assert_eq!(writer, vec![ - /// 0, 0, 0, 0, 0, 0, 0, - /// 0, 0, 0, 0, 0, 0, 0, 0, 0x80 - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_i128_le(&mut self, n: i128) -> WriteI128Le; - } - - /// Flushes this output stream, ensuring that all intermediately buffered - /// contents reach their destination. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn flush(&mut self) -> io::Result<()>; - /// ``` - /// - /// # Errors - /// - /// It is considered an error if not all bytes could be written due to - /// I/O errors or EOF being reached. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, BufWriter, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let f = File::create("foo.txt").await?; - /// let mut buffer = BufWriter::new(f); - /// - /// buffer.write_all(b"some bytes").await?; - /// buffer.flush().await?; - /// Ok(()) - /// } - /// ``` - fn flush(&mut self) -> Flush<'_, Self> - where - Self: Unpin, - { - flush(self) - } - - /// Shuts down the output stream, ensuring that the value can be dropped - /// cleanly. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn shutdown(&mut self) -> io::Result<()>; - /// ``` - /// - /// Similar to [`flush`], all intermediately buffered is written to the - /// underlying stream. Once the operation completes, the caller should - /// no longer attempt to write to the stream. For example, the - /// `TcpStream` implementation will issue a `shutdown(Write)` sys call. - /// - /// [`flush`]: fn@crate::io::AsyncWriteExt::flush - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, BufWriter, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let f = File::create("foo.txt").await?; - /// let mut buffer = BufWriter::new(f); - /// - /// buffer.write_all(b"some bytes").await?; - /// buffer.shutdown().await?; - /// Ok(()) - /// } - /// ``` - fn shutdown(&mut self) -> Shutdown<'_, Self> - where - Self: Unpin, - { - shutdown(self) - } - } -} - -impl AsyncWriteExt for W {} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/buf_reader.rs b/third_party/rust/tokio-0.2.25/src/io/util/buf_reader.rs deleted file mode 100644 index a1c5990a6444..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/buf_reader.rs +++ /dev/null @@ -1,203 +0,0 @@ -use crate::io::util::DEFAULT_BUF_SIZE; -use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite}; - -use bytes::Buf; -use pin_project_lite::pin_project; -use std::io::{self, Read}; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{cmp, fmt}; - -pin_project! { - /// The `BufReader` struct adds buffering to any reader. - /// - /// It can be excessively inefficient to work directly with a [`AsyncRead`] - /// instance. A `BufReader` performs large, infrequent reads on the underlying - /// [`AsyncRead`] and maintains an in-memory buffer of the results. - /// - /// `BufReader` can improve the speed of programs that make *small* and - /// *repeated* read calls to the same file or network socket. It does not - /// help when reading very large amounts at once, or reading just one or a few - /// times. It also provides no advantage when reading from a source that is - /// already in memory, like a `Vec`. - /// - /// When the `BufReader` is dropped, the contents of its buffer will be - /// discarded. Creating multiple instances of a `BufReader` on the same - /// stream can cause data loss. - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct BufReader { - #[pin] - pub(super) inner: R, - pub(super) buf: Box<[u8]>, - pub(super) pos: usize, - pub(super) cap: usize, - } -} - -impl BufReader { - /// Creates a new `BufReader` with a default buffer capacity. The default is currently 8 KB, - /// but may change in the future. - pub fn new(inner: R) -> Self { - Self::with_capacity(DEFAULT_BUF_SIZE, inner) - } - - /// Creates a new `BufReader` with the specified buffer capacity. - pub fn with_capacity(capacity: usize, inner: R) -> Self { - unsafe { - let mut buffer = Vec::with_capacity(capacity); - buffer.set_len(capacity); - - { - // Convert to MaybeUninit - let b = &mut *(&mut buffer[..] as *mut [u8] as *mut [MaybeUninit]); - inner.prepare_uninitialized_buffer(b); - } - Self { - inner, - buf: buffer.into_boxed_slice(), - pos: 0, - cap: 0, - } - } - } - - /// Gets a reference to the underlying reader. - /// - /// It is inadvisable to directly read from the underlying reader. - pub fn get_ref(&self) -> &R { - &self.inner - } - - /// Gets a mutable reference to the underlying reader. - /// - /// It is inadvisable to directly read from the underlying reader. - pub fn get_mut(&mut self) -> &mut R { - &mut self.inner - } - - /// Gets a pinned mutable reference to the underlying reader. - /// - /// It is inadvisable to directly read from the underlying reader. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { - self.project().inner - } - - /// Consumes this `BufReader`, returning the underlying reader. - /// - /// Note that any leftover data in the internal buffer is lost. - pub fn into_inner(self) -> R { - self.inner - } - - /// Returns a reference to the internally buffered data. - /// - /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. - pub fn buffer(&self) -> &[u8] { - &self.buf[self.pos..self.cap] - } - - /// Invalidates all data in the internal buffer. - #[inline] - fn discard_buffer(self: Pin<&mut Self>) { - let me = self.project(); - *me.pos = 0; - *me.cap = 0; - } -} - -impl AsyncRead for BufReader { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - // If we don't have any buffered data and we're doing a massive read - // (larger than our internal buffer), bypass our internal buffer - // entirely. - if self.pos == self.cap && buf.len() >= self.buf.len() { - let res = ready!(self.as_mut().get_pin_mut().poll_read(cx, buf)); - self.discard_buffer(); - return Poll::Ready(res); - } - let mut rem = ready!(self.as_mut().poll_fill_buf(cx))?; - let nread = rem.read(buf)?; - self.consume(nread); - Poll::Ready(Ok(nread)) - } - - // we can't skip unconditionally because of the large buffer case in read. - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.inner.prepare_uninitialized_buffer(buf) - } -} - -impl AsyncBufRead for BufReader { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - - // If we've reached the end of our internal buffer then we need to fetch - // some more data from the underlying reader. - // Branch using `>=` instead of the more correct `==` - // to tell the compiler that the pos..cap slice is always valid. - if *me.pos >= *me.cap { - debug_assert!(*me.pos == *me.cap); - *me.cap = ready!(me.inner.poll_read(cx, me.buf))?; - *me.pos = 0; - } - Poll::Ready(Ok(&me.buf[*me.pos..*me.cap])) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - let me = self.project(); - *me.pos = cmp::min(*me.pos + amt, *me.cap); - } -} - -impl AsyncWrite for BufReader { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.get_pin_mut().poll_write(cx, buf) - } - - fn poll_write_buf( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> { - self.get_pin_mut().poll_write_buf(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_pin_mut().poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_pin_mut().poll_shutdown(cx) - } -} - -impl fmt::Debug for BufReader { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BufReader") - .field("reader", &self.inner) - .field( - "buffer", - &format_args!("{}/{}", self.cap - self.pos, self.buf.len()), - ) - .finish() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/buf_stream.rs b/third_party/rust/tokio-0.2.25/src/io/util/buf_stream.rs deleted file mode 100644 index a56a4517fa47..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/buf_stream.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::io::util::{BufReader, BufWriter}; -use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite}; - -use pin_project_lite::pin_project; -use std::io; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Wraps a type that is [`AsyncWrite`] and [`AsyncRead`], and buffers its input and output. - /// - /// It can be excessively inefficient to work directly with something that implements [`AsyncWrite`] - /// and [`AsyncRead`]. For example, every `write`, however small, has to traverse the syscall - /// interface, and similarly, every read has to do the same. The [`BufWriter`] and [`BufReader`] - /// types aid with these problems respectively, but do so in only one direction. `BufStream` wraps - /// one in the other so that both directions are buffered. See their documentation for details. - #[derive(Debug)] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct BufStream { - #[pin] - inner: BufReader>, - } -} - -impl BufStream { - /// Wraps a type in both [`BufWriter`] and [`BufReader`]. - /// - /// See the documentation for those types and [`BufStream`] for details. - pub fn new(stream: RW) -> BufStream { - BufStream { - inner: BufReader::new(BufWriter::new(stream)), - } - } - - /// Creates a `BufStream` with the specified [`BufReader`] capacity and [`BufWriter`] - /// capacity. - /// - /// See the documentation for those types and [`BufStream`] for details. - pub fn with_capacity( - reader_capacity: usize, - writer_capacity: usize, - stream: RW, - ) -> BufStream { - BufStream { - inner: BufReader::with_capacity( - reader_capacity, - BufWriter::with_capacity(writer_capacity, stream), - ), - } - } - - /// Gets a reference to the underlying I/O object. - /// - /// It is inadvisable to directly read from the underlying I/O object. - pub fn get_ref(&self) -> &RW { - self.inner.get_ref().get_ref() - } - - /// Gets a mutable reference to the underlying I/O object. - /// - /// It is inadvisable to directly read from the underlying I/O object. - pub fn get_mut(&mut self) -> &mut RW { - self.inner.get_mut().get_mut() - } - - /// Gets a pinned mutable reference to the underlying I/O object. - /// - /// It is inadvisable to directly read from the underlying I/O object. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut RW> { - self.project().inner.get_pin_mut().get_pin_mut() - } - - /// Consumes this `BufStream`, returning the underlying I/O object. - /// - /// Note that any leftover data in the internal buffer is lost. - pub fn into_inner(self) -> RW { - self.inner.into_inner().into_inner() - } -} - -impl From>> for BufStream { - fn from(b: BufReader>) -> Self { - BufStream { inner: b } - } -} - -impl From>> for BufStream { - fn from(b: BufWriter>) -> Self { - // we need to "invert" the reader and writer - let BufWriter { - inner: - BufReader { - inner, - buf: rbuf, - pos, - cap, - }, - buf: wbuf, - written, - } = b; - - BufStream { - inner: BufReader { - inner: BufWriter { - inner, - buf: wbuf, - written, - }, - buf: rbuf, - pos, - cap, - }, - } - } -} - -impl AsyncWrite for BufStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.project().inner.poll_write(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_shutdown(cx) - } -} - -impl AsyncRead for BufStream { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.project().inner.poll_read(cx, buf) - } - - // we can't skip unconditionally because of the large buffer case in read. - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.inner.prepare_uninitialized_buffer(buf) - } -} - -impl AsyncBufRead for BufStream { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.project().inner.consume(amt) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/buf_writer.rs b/third_party/rust/tokio-0.2.25/src/io/util/buf_writer.rs deleted file mode 100644 index efd053ebac6d..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/buf_writer.rs +++ /dev/null @@ -1,192 +0,0 @@ -use crate::io::util::DEFAULT_BUF_SIZE; -use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite}; - -use pin_project_lite::pin_project; -use std::fmt; -use std::io::{self, Write}; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Wraps a writer and buffers its output. - /// - /// It can be excessively inefficient to work directly with something that - /// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and - /// writes it to an underlying writer in large, infrequent batches. - /// - /// `BufWriter` can improve the speed of programs that make *small* and - /// *repeated* write calls to the same file or network socket. It does not - /// help when writing very large amounts at once, or writing just one or a few - /// times. It also provides no advantage when writing to a destination that is - /// in memory, like a `Vec`. - /// - /// When the `BufWriter` is dropped, the contents of its buffer will be - /// discarded. Creating multiple instances of a `BufWriter` on the same - /// stream can cause data loss. If you need to write out the contents of its - /// buffer, you must manually call flush before the writer is dropped. - /// - /// [`AsyncWrite`]: AsyncWrite - /// [`flush`]: super::AsyncWriteExt::flush - /// - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct BufWriter { - #[pin] - pub(super) inner: W, - pub(super) buf: Vec, - pub(super) written: usize, - } -} - -impl BufWriter { - /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, - /// but may change in the future. - pub fn new(inner: W) -> Self { - Self::with_capacity(DEFAULT_BUF_SIZE, inner) - } - - /// Creates a new `BufWriter` with the specified buffer capacity. - pub fn with_capacity(cap: usize, inner: W) -> Self { - Self { - inner, - buf: Vec::with_capacity(cap), - written: 0, - } - } - - fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); - - let len = me.buf.len(); - let mut ret = Ok(()); - while *me.written < len { - match ready!(me.inner.as_mut().poll_write(cx, &me.buf[*me.written..])) { - Ok(0) => { - ret = Err(io::Error::new( - io::ErrorKind::WriteZero, - "failed to write the buffered data", - )); - break; - } - Ok(n) => *me.written += n, - Err(e) => { - ret = Err(e); - break; - } - } - } - if *me.written > 0 { - me.buf.drain(..*me.written); - } - *me.written = 0; - Poll::Ready(ret) - } - - /// Gets a reference to the underlying writer. - pub fn get_ref(&self) -> &W { - &self.inner - } - - /// Gets a mutable reference to the underlying writer. - /// - /// It is inadvisable to directly write to the underlying writer. - pub fn get_mut(&mut self) -> &mut W { - &mut self.inner - } - - /// Gets a pinned mutable reference to the underlying writer. - /// - /// It is inadvisable to directly write to the underlying writer. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { - self.project().inner - } - - /// Consumes this `BufWriter`, returning the underlying writer. - /// - /// Note that any leftover data in the internal buffer is lost. - pub fn into_inner(self) -> W { - self.inner - } - - /// Returns a reference to the internally buffered data. - pub fn buffer(&self) -> &[u8] { - &self.buf - } -} - -impl AsyncWrite for BufWriter { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - if self.buf.len() + buf.len() > self.buf.capacity() { - ready!(self.as_mut().flush_buf(cx))?; - } - - let me = self.project(); - if buf.len() >= me.buf.capacity() { - me.inner.poll_write(cx, buf) - } else { - Poll::Ready(me.buf.write(buf)) - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().flush_buf(cx))?; - self.get_pin_mut().poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().flush_buf(cx))?; - self.get_pin_mut().poll_shutdown(cx) - } -} - -impl AsyncRead for BufWriter { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.get_pin_mut().poll_read(cx, buf) - } - - // we can't skip unconditionally because of the large buffer case in read. - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.get_ref().prepare_uninitialized_buffer(buf) - } -} - -impl AsyncBufRead for BufWriter { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_pin_mut().poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.get_pin_mut().consume(amt) - } -} - -impl fmt::Debug for BufWriter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BufWriter") - .field("writer", &self.inner) - .field( - "buffer", - &format_args!("{}/{}", self.buf.len(), self.buf.capacity()), - ) - .field("written", &self.written) - .finish() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/chain.rs b/third_party/rust/tokio-0.2.25/src/io/util/chain.rs deleted file mode 100644 index 8ba9194f5de7..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/chain.rs +++ /dev/null @@ -1,150 +0,0 @@ -use crate::io::{AsyncBufRead, AsyncRead}; - -use pin_project_lite::pin_project; -use std::fmt; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Stream for the [`chain`](super::AsyncReadExt::chain) method. - #[must_use = "streams do nothing unless polled"] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct Chain { - #[pin] - first: T, - #[pin] - second: U, - done_first: bool, - } -} - -pub(super) fn chain(first: T, second: U) -> Chain -where - T: AsyncRead, - U: AsyncRead, -{ - Chain { - first, - second, - done_first: false, - } -} - -impl Chain -where - T: AsyncRead, - U: AsyncRead, -{ - /// Gets references to the underlying readers in this `Chain`. - pub fn get_ref(&self) -> (&T, &U) { - (&self.first, &self.second) - } - - /// Gets mutable references to the underlying readers in this `Chain`. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying readers as doing so may corrupt the internal state of this - /// `Chain`. - pub fn get_mut(&mut self) -> (&mut T, &mut U) { - (&mut self.first, &mut self.second) - } - - /// Gets pinned mutable references to the underlying readers in this `Chain`. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying readers as doing so may corrupt the internal state of this - /// `Chain`. - pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) { - let me = self.project(); - (me.first, me.second) - } - - /// Consumes the `Chain`, returning the wrapped readers. - pub fn into_inner(self) -> (T, U) { - (self.first, self.second) - } -} - -impl fmt::Debug for Chain -where - T: fmt::Debug, - U: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Chain") - .field("t", &self.first) - .field("u", &self.second) - .finish() - } -} - -impl AsyncRead for Chain -where - T: AsyncRead, - U: AsyncRead, -{ - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit]) -> bool { - if self.first.prepare_uninitialized_buffer(buf) { - return true; - } - if self.second.prepare_uninitialized_buffer(buf) { - return true; - } - false - } - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - let me = self.project(); - - if !*me.done_first { - match ready!(me.first.poll_read(cx, buf)?) { - 0 if !buf.is_empty() => *me.done_first = true, - n => return Poll::Ready(Ok(n)), - } - } - me.second.poll_read(cx, buf) - } -} - -impl AsyncBufRead for Chain -where - T: AsyncBufRead, - U: AsyncBufRead, -{ - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - - if !*me.done_first { - match ready!(me.first.poll_fill_buf(cx)?) { - buf if buf.is_empty() => { - *me.done_first = true; - } - buf => return Poll::Ready(Ok(buf)), - } - } - me.second.poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - let me = self.project(); - if !*me.done_first { - me.first.consume(amt) - } else { - me.second.consume(amt) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/copy.rs b/third_party/rust/tokio-0.2.25/src/io/util/copy.rs deleted file mode 100644 index 7bfe296941e2..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/copy.rs +++ /dev/null @@ -1,135 +0,0 @@ -use crate::io::{AsyncRead, AsyncWrite}; - -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// A future that asynchronously copies the entire contents of a reader into a - /// writer. - /// - /// This struct is generally created by calling [`copy`][copy]. Please - /// see the documentation of `copy()` for more details. - /// - /// [copy]: copy() - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Copy<'a, R: ?Sized, W: ?Sized> { - reader: &'a mut R, - read_done: bool, - writer: &'a mut W, - pos: usize, - cap: usize, - amt: u64, - buf: Box<[u8]>, - } - - /// Asynchronously copies the entire contents of a reader into a writer. - /// - /// This function returns a future that will continuously read data from - /// `reader` and then write it into `writer` in a streaming fashion until - /// `reader` returns EOF. - /// - /// On success, the total number of bytes that were copied from `reader` to - /// `writer` is returned. - /// - /// This is an asynchronous version of [`std::io::copy`][std]. - /// - /// [std]: std::io::copy - /// - /// # Errors - /// - /// The returned future will finish with an error will return an error - /// immediately if any call to `poll_read` or `poll_write` returns an error. - /// - /// # Examples - /// - /// ``` - /// use tokio::io; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut reader: &[u8] = b"hello"; - /// let mut writer: Vec = vec![]; - /// - /// io::copy(&mut reader, &mut writer).await?; - /// - /// assert_eq!(&b"hello"[..], &writer[..]); - /// # Ok(()) - /// # } - /// ``` - pub fn copy<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> Copy<'a, R, W> - where - R: AsyncRead + Unpin + ?Sized, - W: AsyncWrite + Unpin + ?Sized, - { - Copy { - reader, - read_done: false, - writer, - amt: 0, - pos: 0, - cap: 0, - buf: vec![0; 2048].into_boxed_slice(), - } - } -} - -impl Future for Copy<'_, R, W> -where - R: AsyncRead + Unpin + ?Sized, - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - // If our buffer is empty, then we need to read some data to - // continue. - if self.pos == self.cap && !self.read_done { - let me = &mut *self; - let n = ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut me.buf))?; - if n == 0 { - self.read_done = true; - } else { - self.pos = 0; - self.cap = n; - } - } - - // If our buffer has some data, let's write it out! - while self.pos < self.cap { - let me = &mut *self; - let i = ready!(Pin::new(&mut *me.writer).poll_write(cx, &me.buf[me.pos..me.cap]))?; - if i == 0 { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::WriteZero, - "write zero byte into writer", - ))); - } else { - self.pos += i; - self.amt += i as u64; - } - } - - // If we've written all the data and we've seen EOF, flush out the - // data and finish the transfer. - if self.pos == self.cap && self.read_done { - let me = &mut *self; - ready!(Pin::new(&mut *me.writer).poll_flush(cx))?; - return Poll::Ready(Ok(self.amt)); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/empty.rs b/third_party/rust/tokio-0.2.25/src/io/util/empty.rs deleted file mode 100644 index 576058d52d12..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/empty.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::io::{AsyncBufRead, AsyncRead}; - -use std::fmt; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// An async reader which is always at EOF. - /// - /// This struct is generally created by calling [`empty`]. Please see - /// the documentation of [`empty()`][`empty`] for more details. - /// - /// This is an asynchronous version of [`std::io::empty`][std]. - /// - /// [`empty`]: fn@empty - /// [std]: std::io::empty - pub struct Empty { - _p: (), - } - - /// Creates a new empty async reader. - /// - /// All reads from the returned reader will return `Poll::Ready(Ok(0))`. - /// - /// This is an asynchronous version of [`std::io::empty`][std]. - /// - /// [std]: std::io::empty - /// - /// # Examples - /// - /// A slightly sad example of not reading anything into a buffer: - /// - /// ``` - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut buffer = String::new(); - /// io::empty().read_to_string(&mut buffer).await.unwrap(); - /// assert!(buffer.is_empty()); - /// } - /// ``` - pub fn empty() -> Empty { - Empty { _p: () } - } -} - -impl AsyncRead for Empty { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [std::mem::MaybeUninit]) -> bool { - false - } - #[inline] - fn poll_read( - self: Pin<&mut Self>, - _: &mut Context<'_>, - _: &mut [u8], - ) -> Poll> { - Poll::Ready(Ok(0)) - } -} - -impl AsyncBufRead for Empty { - #[inline] - fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(&[])) - } - - #[inline] - fn consume(self: Pin<&mut Self>, _: usize) {} -} - -impl fmt::Debug for Empty { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Empty { .. }") - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/flush.rs b/third_party/rust/tokio-0.2.25/src/io/util/flush.rs deleted file mode 100644 index 534a5160c1a3..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/flush.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::io::AsyncWrite; - -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// A future used to fully flush an I/O object. - /// - /// Created by the [`AsyncWriteExt::flush`][flush] function. - /// [flush]: crate::io::AsyncWriteExt::flush - #[derive(Debug)] - pub struct Flush<'a, A: ?Sized> { - a: &'a mut A, - } -} - -/// Creates a future which will entirely flush an I/O object. -pub(super) fn flush(a: &mut A) -> Flush<'_, A> -where - A: AsyncWrite + Unpin + ?Sized, -{ - Flush { a } -} - -impl Future for Flush<'_, A> -where - A: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = &mut *self; - Pin::new(&mut *me.a).poll_flush(cx) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/lines.rs b/third_party/rust/tokio-0.2.25/src/io/util/lines.rs deleted file mode 100644 index ee27400c9de7..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/lines.rs +++ /dev/null @@ -1,133 +0,0 @@ -use crate::io::util::read_line::read_line_internal; -use crate::io::AsyncBufRead; - -use pin_project_lite::pin_project; -use std::io; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Stream for the [`lines`](crate::io::AsyncBufReadExt::lines) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct Lines { - #[pin] - reader: R, - buf: String, - bytes: Vec, - read: usize, - } -} - -pub(crate) fn lines(reader: R) -> Lines -where - R: AsyncBufRead, -{ - Lines { - reader, - buf: String::new(), - bytes: Vec::new(), - read: 0, - } -} - -impl Lines -where - R: AsyncBufRead + Unpin, -{ - /// Returns the next line in the stream. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncBufRead; - /// use tokio::io::AsyncBufReadExt; - /// - /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { - /// let mut lines = my_buf_read.lines(); - /// - /// while let Some(line) = lines.next_line().await? { - /// println!("length = {}", line.len()) - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn next_line(&mut self) -> io::Result> { - use crate::future::poll_fn; - - poll_fn(|cx| Pin::new(&mut *self).poll_next_line(cx)).await - } - - /// Obtain a mutable reference to the underlying reader - pub fn get_mut(&mut self) -> &mut R { - &mut self.reader - } - - /// Obtain a reference to the underlying reader - pub fn get_ref(&mut self) -> &R { - &self.reader - } - - /// Unwraps this `Lines`, returning the underlying reader. - /// - /// Note that any leftover data in the internal buffer is lost. - /// Therefore, a following read from the underlying reader may lead to data loss. - pub fn into_inner(self) -> R { - self.reader - } -} - -impl Lines -where - R: AsyncBufRead, -{ - #[doc(hidden)] - pub fn poll_next_line( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - let me = self.project(); - - let n = ready!(read_line_internal(me.reader, cx, me.buf, me.bytes, me.read))?; - debug_assert_eq!(*me.read, 0); - - if n == 0 && me.buf.is_empty() { - return Poll::Ready(Ok(None)); - } - - if me.buf.ends_with('\n') { - me.buf.pop(); - - if me.buf.ends_with('\r') { - me.buf.pop(); - } - } - - Poll::Ready(Ok(Some(mem::replace(me.buf, String::new())))) - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for Lines { - type Item = io::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Poll::Ready(match ready!(self.poll_next_line(cx)) { - Ok(Some(line)) => Some(Ok(line)), - Ok(None) => None, - Err(err) => Some(Err(err)), - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/mem.rs b/third_party/rust/tokio-0.2.25/src/io/util/mem.rs deleted file mode 100644 index 02ba6aa7e91a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/mem.rs +++ /dev/null @@ -1,222 +0,0 @@ -//! In-process memory IO types. - -use crate::io::{AsyncRead, AsyncWrite}; -use crate::loom::sync::Mutex; - -use bytes::{Buf, BytesMut}; -use std::{ - pin::Pin, - sync::Arc, - task::{self, Poll, Waker}, -}; - -/// A bidirectional pipe to read and write bytes in memory. -/// -/// A pair of `DuplexStream`s are created together, and they act as a "channel" -/// that can be used as in-memory IO types. Writing to one of the pairs will -/// allow that data to be read from the other, and vice versa. -/// -/// # Example -/// -/// ``` -/// # async fn ex() -> std::io::Result<()> { -/// # use tokio::io::{AsyncReadExt, AsyncWriteExt}; -/// let (mut client, mut server) = tokio::io::duplex(64); -/// -/// client.write_all(b"ping").await?; -/// -/// let mut buf = [0u8; 4]; -/// server.read_exact(&mut buf).await?; -/// assert_eq!(&buf, b"ping"); -/// -/// server.write_all(b"pong").await?; -/// -/// client.read_exact(&mut buf).await?; -/// assert_eq!(&buf, b"pong"); -/// # Ok(()) -/// # } -/// ``` -#[derive(Debug)] -pub struct DuplexStream { - read: Arc>, - write: Arc>, -} - -/// A unidirectional IO over a piece of memory. -/// -/// Data can be written to the pipe, and reading will return that data. -#[derive(Debug)] -struct Pipe { - /// The buffer storing the bytes written, also read from. - /// - /// Using a `BytesMut` because it has efficient `Buf` and `BufMut` - /// functionality already. Additionally, it can try to copy data in the - /// same buffer if there read index has advanced far enough. - buffer: BytesMut, - /// Determines if the write side has been closed. - is_closed: bool, - /// The maximum amount of bytes that can be written before returning - /// `Poll::Pending`. - max_buf_size: usize, - /// If the `read` side has been polled and is pending, this is the waker - /// for that parked task. - read_waker: Option, - /// If the `write` side has filled the `max_buf_size` and returned - /// `Poll::Pending`, this is the waker for that parked task. - write_waker: Option, -} - -// ===== impl DuplexStream ===== - -/// Create a new pair of `DuplexStream`s that act like a pair of connected sockets. -/// -/// The `max_buf_size` argument is the maximum amount of bytes that can be -/// written to a side before the write returns `Poll::Pending`. -pub fn duplex(max_buf_size: usize) -> (DuplexStream, DuplexStream) { - let one = Arc::new(Mutex::new(Pipe::new(max_buf_size))); - let two = Arc::new(Mutex::new(Pipe::new(max_buf_size))); - - ( - DuplexStream { - read: one.clone(), - write: two.clone(), - }, - DuplexStream { - read: two, - write: one, - }, - ) -} - -impl AsyncRead for DuplexStream { - // Previous rustc required this `self` to be `mut`, even though newer - // versions recognize it isn't needed to call `lock()`. So for - // compatibility, we include the `mut` and `allow` the lint. - // - // See https://github.com/rust-lang/rust/issues/73592 - #[allow(unused_mut)] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut *self.read.lock().unwrap()).poll_read(cx, buf) - } -} - -impl AsyncWrite for DuplexStream { - #[allow(unused_mut)] - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut *self.write.lock().unwrap()).poll_write(cx, buf) - } - - #[allow(unused_mut)] - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll> { - Pin::new(&mut *self.write.lock().unwrap()).poll_flush(cx) - } - - #[allow(unused_mut)] - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll> { - Pin::new(&mut *self.write.lock().unwrap()).poll_shutdown(cx) - } -} - -impl Drop for DuplexStream { - fn drop(&mut self) { - // notify the other side of the closure - self.write.lock().unwrap().close(); - } -} - -// ===== impl Pipe ===== - -impl Pipe { - fn new(max_buf_size: usize) -> Self { - Pipe { - buffer: BytesMut::new(), - is_closed: false, - max_buf_size, - read_waker: None, - write_waker: None, - } - } - - fn close(&mut self) { - self.is_closed = true; - if let Some(waker) = self.read_waker.take() { - waker.wake(); - } - } -} - -impl AsyncRead for Pipe { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut [u8], - ) -> Poll> { - if self.buffer.has_remaining() { - let max = self.buffer.remaining().min(buf.len()); - self.buffer.copy_to_slice(&mut buf[..max]); - if max > 0 { - // The passed `buf` might have been empty, don't wake up if - // no bytes have been moved. - if let Some(waker) = self.write_waker.take() { - waker.wake(); - } - } - Poll::Ready(Ok(max)) - } else if self.is_closed { - Poll::Ready(Ok(0)) - } else { - self.read_waker = Some(cx.waker().clone()); - Poll::Pending - } - } -} - -impl AsyncWrite for Pipe { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - if self.is_closed { - return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())); - } - let avail = self.max_buf_size - self.buffer.len(); - if avail == 0 { - self.write_waker = Some(cx.waker().clone()); - return Poll::Pending; - } - - let len = buf.len().min(avail); - self.buffer.extend_from_slice(&buf[..len]); - if let Some(waker) = self.read_waker.take() { - waker.wake(); - } - Poll::Ready(Ok(len)) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - _: &mut task::Context<'_>, - ) -> Poll> { - self.close(); - Poll::Ready(Ok(())) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/mod.rs b/third_party/rust/tokio-0.2.25/src/io/util/mod.rs deleted file mode 100644 index 782a02a8f741..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/mod.rs +++ /dev/null @@ -1,94 +0,0 @@ -#![allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 - -cfg_io_util! { - mod async_buf_read_ext; - pub use async_buf_read_ext::AsyncBufReadExt; - - mod async_read_ext; - pub use async_read_ext::AsyncReadExt; - - mod async_seek_ext; - pub use async_seek_ext::AsyncSeekExt; - - mod async_write_ext; - pub use async_write_ext::AsyncWriteExt; - - mod buf_reader; - pub use buf_reader::BufReader; - - mod buf_stream; - pub use buf_stream::BufStream; - - mod buf_writer; - pub use buf_writer::BufWriter; - - mod chain; - - mod copy; - pub use copy::{copy, Copy}; - - mod empty; - pub use empty::{empty, Empty}; - - mod flush; - - mod lines; - pub use lines::Lines; - - mod mem; - pub use mem::{duplex, DuplexStream}; - - mod read; - mod read_buf; - mod read_exact; - mod read_int; - mod read_line; - - mod read_to_end; - cfg_process! { - pub(crate) use read_to_end::read_to_end; - } - - mod read_to_string; - mod read_until; - - mod repeat; - pub use repeat::{repeat, Repeat}; - - mod shutdown; - - mod sink; - pub use sink::{sink, Sink}; - - mod split; - pub use split::Split; - - cfg_stream! { - mod stream_reader; - pub use stream_reader::{stream_reader, StreamReader}; - - mod reader_stream; - pub use reader_stream::{reader_stream, ReaderStream}; - } - - mod take; - pub use take::Take; - - mod write; - mod write_all; - mod write_buf; - mod write_int; - - - // used by `BufReader` and `BufWriter` - // https://github.com/rust-lang/rust/blob/master/src/libstd/sys_common/io.rs#L1 - const DEFAULT_BUF_SIZE: usize = 8 * 1024; -} - -cfg_not_io_util! { - cfg_process! { - mod read_to_end; - // Used by process - pub(crate) use read_to_end::read_to_end; - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/read.rs b/third_party/rust/tokio-0.2.25/src/io/util/read.rs deleted file mode 100644 index a8ca370ea87d..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/read.rs +++ /dev/null @@ -1,55 +0,0 @@ -use crate::io::AsyncRead; - -use std::future::Future; -use std::io; -use std::marker::Unpin; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Tries to read some bytes directly into the given `buf` in asynchronous -/// manner, returning a future type. -/// -/// The returned future will resolve to both the I/O stream and the buffer -/// as well as the number of bytes read once the read operation is completed. -pub(crate) fn read<'a, R>(reader: &'a mut R, buf: &'a mut [u8]) -> Read<'a, R> -where - R: AsyncRead + Unpin + ?Sized, -{ - Read { reader, buf } -} - -cfg_io_util! { - /// A future which can be used to easily read available number of bytes to fill - /// a buffer. - /// - /// Created by the [`read`] function. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Read<'a, R: ?Sized> { - reader: &'a mut R, - buf: &'a mut [u8], - } -} - -impl Future for Read<'_, R> -where - R: AsyncRead + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = &mut *self; - Pin::new(&mut *me.reader).poll_read(cx, me.buf) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/read_buf.rs b/third_party/rust/tokio-0.2.25/src/io/util/read_buf.rs deleted file mode 100644 index 6ee3d249f825..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/read_buf.rs +++ /dev/null @@ -1,38 +0,0 @@ -use crate::io::AsyncRead; - -use bytes::BufMut; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pub(crate) fn read_buf<'a, R, B>(reader: &'a mut R, buf: &'a mut B) -> ReadBuf<'a, R, B> -where - R: AsyncRead + Unpin, - B: BufMut, -{ - ReadBuf { reader, buf } -} - -cfg_io_util! { - /// Future returned by [`read_buf`](crate::io::AsyncReadExt::read_buf). - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadBuf<'a, R, B> { - reader: &'a mut R, - buf: &'a mut B, - } -} - -impl Future for ReadBuf<'_, R, B> -where - R: AsyncRead + Unpin, - B: BufMut, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = &mut *self; - Pin::new(&mut *me.reader).poll_read_buf(cx, me.buf) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/read_exact.rs b/third_party/rust/tokio-0.2.25/src/io/util/read_exact.rs deleted file mode 100644 index 86b8412954ba..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/read_exact.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::io::AsyncRead; - -use std::future::Future; -use std::io; -use std::marker::Unpin; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// A future which can be used to easily read exactly enough bytes to fill -/// a buffer. -/// -/// Created by the [`AsyncReadExt::read_exact`][read_exact]. -/// [read_exact]: [crate::io::AsyncReadExt::read_exact] -pub(crate) fn read_exact<'a, A>(reader: &'a mut A, buf: &'a mut [u8]) -> ReadExact<'a, A> -where - A: AsyncRead + Unpin + ?Sized, -{ - ReadExact { - reader, - buf, - pos: 0, - } -} - -cfg_io_util! { - /// Creates a future which will read exactly enough bytes to fill `buf`, - /// returning an error if EOF is hit sooner. - /// - /// On success the number of bytes is returned - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadExact<'a, A: ?Sized> { - reader: &'a mut A, - buf: &'a mut [u8], - pos: usize, - } -} - -fn eof() -> io::Error { - io::Error::new(io::ErrorKind::UnexpectedEof, "early eof") -} - -impl Future for ReadExact<'_, A> -where - A: AsyncRead + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - // if our buffer is empty, then we need to read some data to continue. - if self.pos < self.buf.len() { - let me = &mut *self; - let n = ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut me.buf[me.pos..]))?; - me.pos += n; - if n == 0 { - return Err(eof()).into(); - } - } - - if self.pos >= self.buf.len() { - return Poll::Ready(Ok(self.pos)); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/read_int.rs b/third_party/rust/tokio-0.2.25/src/io/util/read_int.rs deleted file mode 100644 index 9d37dc7a4009..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/read_int.rs +++ /dev/null @@ -1,133 +0,0 @@ -use crate::io::AsyncRead; - -use bytes::Buf; -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::io::ErrorKind::UnexpectedEof; -use std::mem::size_of; -use std::pin::Pin; -use std::task::{Context, Poll}; - -macro_rules! reader { - ($name:ident, $ty:ty, $reader:ident) => { - reader!($name, $ty, $reader, size_of::<$ty>()); - }; - ($name:ident, $ty:ty, $reader:ident, $bytes:expr) => { - pin_project! { - #[doc(hidden)] - pub struct $name { - #[pin] - src: R, - buf: [u8; $bytes], - read: u8, - } - } - - impl $name { - pub(crate) fn new(src: R) -> Self { - $name { - src, - buf: [0; $bytes], - read: 0, - } - } - } - - impl Future for $name - where - R: AsyncRead, - { - type Output = io::Result<$ty>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut me = self.project(); - - if *me.read == $bytes as u8 { - return Poll::Ready(Ok(Buf::$reader(&mut &me.buf[..]))); - } - - while *me.read < $bytes as u8 { - *me.read += match me - .src - .as_mut() - .poll_read(cx, &mut me.buf[*me.read as usize..]) - { - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), - Poll::Ready(Ok(0)) => { - return Poll::Ready(Err(UnexpectedEof.into())); - } - Poll::Ready(Ok(n)) => n as u8, - }; - } - - let num = Buf::$reader(&mut &me.buf[..]); - - Poll::Ready(Ok(num)) - } - } - }; -} - -macro_rules! reader8 { - ($name:ident, $ty:ty) => { - pin_project! { - /// Future returned from `read_u8` - #[doc(hidden)] - pub struct $name { - #[pin] - reader: R, - } - } - - impl $name { - pub(crate) fn new(reader: R) -> $name { - $name { reader } - } - } - - impl Future for $name - where - R: AsyncRead, - { - type Output = io::Result<$ty>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - let mut buf = [0; 1]; - match me.reader.poll_read(cx, &mut buf[..]) { - Poll::Pending => Poll::Pending, - Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), - Poll::Ready(Ok(0)) => Poll::Ready(Err(UnexpectedEof.into())), - Poll::Ready(Ok(1)) => Poll::Ready(Ok(buf[0] as $ty)), - Poll::Ready(Ok(_)) => unreachable!(), - } - } - } - }; -} - -reader8!(ReadU8, u8); -reader8!(ReadI8, i8); - -reader!(ReadU16, u16, get_u16); -reader!(ReadU32, u32, get_u32); -reader!(ReadU64, u64, get_u64); -reader!(ReadU128, u128, get_u128); - -reader!(ReadI16, i16, get_i16); -reader!(ReadI32, i32, get_i32); -reader!(ReadI64, i64, get_i64); -reader!(ReadI128, i128, get_i128); - -reader!(ReadU16Le, u16, get_u16_le); -reader!(ReadU32Le, u32, get_u32_le); -reader!(ReadU64Le, u64, get_u64_le); -reader!(ReadU128Le, u128, get_u128_le); - -reader!(ReadI16Le, i16, get_i16_le); -reader!(ReadI32Le, i32, get_i32_le); -reader!(ReadI64Le, i64, get_i64_le); -reader!(ReadI128Le, i128, get_i128_le); diff --git a/third_party/rust/tokio-0.2.25/src/io/util/read_line.rs b/third_party/rust/tokio-0.2.25/src/io/util/read_line.rs deleted file mode 100644 index d1f66f3807d5..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/read_line.rs +++ /dev/null @@ -1,129 +0,0 @@ -use crate::io::util::read_until::read_until_internal; -use crate::io::AsyncBufRead; - -use std::future::Future; -use std::io; -use std::mem; -use std::pin::Pin; -use std::string::FromUtf8Error; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// Future for the [`read_line`](crate::io::AsyncBufReadExt::read_line) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadLine<'a, R: ?Sized> { - reader: &'a mut R, - /// This is the buffer we were provided. It will be replaced with an empty string - /// while reading to postpone utf-8 handling until after reading. - output: &'a mut String, - /// The actual allocation of the string is moved into this vector instead. - buf: Vec, - /// The number of bytes appended to buf. This can be less than buf.len() if - /// the buffer was not empty when the operation was started. - read: usize, - } -} - -pub(crate) fn read_line<'a, R>(reader: &'a mut R, string: &'a mut String) -> ReadLine<'a, R> -where - R: AsyncBufRead + ?Sized + Unpin, -{ - ReadLine { - reader, - buf: mem::replace(string, String::new()).into_bytes(), - output: string, - read: 0, - } -} - -fn put_back_original_data(output: &mut String, mut vector: Vec, num_bytes_read: usize) { - let original_len = vector.len() - num_bytes_read; - vector.truncate(original_len); - *output = String::from_utf8(vector).expect("The original data must be valid utf-8."); -} - -/// This handles the various failure cases and puts the string back into `output`. -/// -/// The `truncate_on_io_error` bool is necessary because `read_to_string` and `read_line` -/// disagree on what should happen when an IO error occurs. -pub(super) fn finish_string_read( - io_res: io::Result, - utf8_res: Result, - read: usize, - output: &mut String, - truncate_on_io_error: bool, -) -> Poll> { - match (io_res, utf8_res) { - (Ok(num_bytes), Ok(string)) => { - debug_assert_eq!(read, 0); - *output = string; - Poll::Ready(Ok(num_bytes)) - } - (Err(io_err), Ok(string)) => { - *output = string; - if truncate_on_io_error { - let original_len = output.len() - read; - output.truncate(original_len); - } - Poll::Ready(Err(io_err)) - } - (Ok(num_bytes), Err(utf8_err)) => { - debug_assert_eq!(read, 0); - put_back_original_data(output, utf8_err.into_bytes(), num_bytes); - - Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidData, - "stream did not contain valid UTF-8", - ))) - } - (Err(io_err), Err(utf8_err)) => { - put_back_original_data(output, utf8_err.into_bytes(), read); - - Poll::Ready(Err(io_err)) - } - } -} - -pub(super) fn read_line_internal( - reader: Pin<&mut R>, - cx: &mut Context<'_>, - output: &mut String, - buf: &mut Vec, - read: &mut usize, -) -> Poll> { - let io_res = ready!(read_until_internal(reader, cx, b'\n', buf, read)); - let utf8_res = String::from_utf8(mem::replace(buf, Vec::new())); - - // At this point both buf and output are empty. The allocation is in utf8_res. - - debug_assert!(buf.is_empty()); - debug_assert!(output.is_empty()); - finish_string_read(io_res, utf8_res, *read, output, false) -} - -impl Future for ReadLine<'_, R> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let Self { - reader, - output, - buf, - read, - } = &mut *self; - - read_line_internal(Pin::new(reader), cx, output, buf, read) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/read_to_end.rs b/third_party/rust/tokio-0.2.25/src/io/util/read_to_end.rs deleted file mode 100644 index 29b8b811f729..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/read_to_end.rs +++ /dev/null @@ -1,170 +0,0 @@ -use crate::io::AsyncRead; - -use std::future::Future; -use std::io; -use std::mem::{self, MaybeUninit}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] -pub struct ReadToEnd<'a, R: ?Sized> { - reader: &'a mut R, - buf: &'a mut Vec, - /// The number of bytes appended to buf. This can be less than buf.len() if - /// the buffer was not empty when the operation was started. - read: usize, -} - -pub(crate) fn read_to_end<'a, R>(reader: &'a mut R, buffer: &'a mut Vec) -> ReadToEnd<'a, R> -where - R: AsyncRead + Unpin + ?Sized, -{ - prepare_buffer(buffer, reader); - ReadToEnd { - reader, - buf: buffer, - read: 0, - } -} - -/// # Safety -/// -/// Before first calling this method, the unused capacity must have been -/// prepared for use with the provided AsyncRead. This can be done using the -/// `prepare_buffer` function later in this file. -pub(super) unsafe fn read_to_end_internal( - buf: &mut Vec, - mut reader: Pin<&mut R>, - num_read: &mut usize, - cx: &mut Context<'_>, -) -> Poll> { - loop { - // safety: The caller promised to prepare the buffer. - let ret = ready!(poll_read_to_end(buf, reader.as_mut(), cx)); - match ret { - Err(err) => return Poll::Ready(Err(err)), - Ok(0) => return Poll::Ready(Ok(mem::replace(num_read, 0))), - Ok(num) => { - *num_read += num; - } - } - } -} - -/// Tries to read from the provided AsyncRead. -/// -/// The length of the buffer is increased by the number of bytes read. -/// -/// # Safety -/// -/// The caller ensures that the buffer has been prepared for use with the -/// AsyncRead before calling this function. This can be done using the -/// `prepare_buffer` function later in this file. -unsafe fn poll_read_to_end( - buf: &mut Vec, - read: Pin<&mut R>, - cx: &mut Context<'_>, -) -> Poll> { - // This uses an adaptive system to extend the vector when it fills. We want to - // avoid paying to allocate and zero a huge chunk of memory if the reader only - // has 4 bytes while still making large reads if the reader does have a ton - // of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every - // time is 4,500 times (!) slower than this if the reader has a very small - // amount of data to return. - reserve(buf, &*read, 32); - - let unused_capacity: &mut [MaybeUninit] = get_unused_capacity(buf); - - // safety: The buffer has been prepared for use with the AsyncRead before - // calling this function. - let slice: &mut [u8] = &mut *(unused_capacity as *mut [MaybeUninit] as *mut [u8]); - - let res = ready!(read.poll_read(cx, slice)); - if let Ok(num) = res { - // safety: There are two situations: - // - // 1. The AsyncRead has not overriden `prepare_uninitialized_buffer`. - // - // In this situation, the default implementation of that method will have - // zeroed the unused capacity. This means that setting the length will - // never expose uninitialized memory in the vector. - // - // Note that the assert! below ensures that we don't set the length to - // something larger than the capacity, which malicious implementors might - // try to have us do. - // - // 2. The AsyncRead has overriden `prepare_uninitialized_buffer`. - // - // In this case, the safety of the `set_len` call below relies on this - // guarantee from the documentation on `prepare_uninitialized_buffer`: - // - // > This function isn't actually unsafe to call but unsafe to implement. - // > The implementer must ensure that either the whole buf has been zeroed - // > or poll_read() overwrites the buffer without reading it and returns - // > correct value. - // - // Note that `prepare_uninitialized_buffer` is unsafe to implement, so this - // is a guarantee we can rely on in unsafe code. - // - // The assert!() is technically only necessary in the first case. - let new_len = buf.len() + num; - assert!(new_len <= buf.capacity()); - - buf.set_len(new_len); - } - Poll::Ready(res) -} - -/// This function prepares the unused capacity for use with the provided AsyncRead. -pub(super) fn prepare_buffer(buf: &mut Vec, read: &R) { - let buffer = get_unused_capacity(buf); - - // safety: This function is only unsafe to implement. - unsafe { - read.prepare_uninitialized_buffer(buffer); - } -} - -/// Allocates more memory and ensures that the unused capacity is prepared for use -/// with the `AsyncRead`. -fn reserve(buf: &mut Vec, read: &R, bytes: usize) { - if buf.capacity() - buf.len() >= bytes { - return; - } - buf.reserve(bytes); - // The call above has reallocated the buffer, so we must reinitialize the entire - // unused capacity, even if we already initialized some of it before the resize. - prepare_buffer(buf, read); -} - -/// Returns the unused capacity of the provided vector. -fn get_unused_capacity(buf: &mut Vec) -> &mut [MaybeUninit] { - bytes::BufMut::bytes_mut(buf) -} - -impl Future for ReadToEnd<'_, A> -where - A: AsyncRead + ?Sized + Unpin, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let Self { reader, buf, read } = &mut *self; - - // safety: The constructor of ReadToEnd calls `prepare_buffer` - unsafe { read_to_end_internal(buf, Pin::new(*reader), read, cx) } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/read_to_string.rs b/third_party/rust/tokio-0.2.25/src/io/util/read_to_string.rs deleted file mode 100644 index 4ef50be308ca..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/read_to_string.rs +++ /dev/null @@ -1,94 +0,0 @@ -use crate::io::util::read_line::finish_string_read; -use crate::io::util::read_to_end::{prepare_buffer, read_to_end_internal}; -use crate::io::AsyncRead; - -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{io, mem}; - -cfg_io_util! { - /// Future for the [`read_to_string`](super::AsyncReadExt::read_to_string) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadToString<'a, R: ?Sized> { - reader: &'a mut R, - /// This is the buffer we were provided. It will be replaced with an empty string - /// while reading to postpone utf-8 handling until after reading. - output: &'a mut String, - /// The actual allocation of the string is moved into this vector instead. - buf: Vec, - /// The number of bytes appended to buf. This can be less than buf.len() if - /// the buffer was not empty when the operation was started. - read: usize, - } -} - -pub(crate) fn read_to_string<'a, R>( - reader: &'a mut R, - string: &'a mut String, -) -> ReadToString<'a, R> -where - R: AsyncRead + ?Sized + Unpin, -{ - let mut buf = mem::replace(string, String::new()).into_bytes(); - prepare_buffer(&mut buf, reader); - ReadToString { - reader, - buf, - output: string, - read: 0, - } -} - -/// # Safety -/// -/// Before first calling this method, the unused capacity must have been -/// prepared for use with the provided AsyncRead. This can be done using the -/// `prepare_buffer` function in `read_to_end.rs`. -unsafe fn read_to_string_internal( - reader: Pin<&mut R>, - output: &mut String, - buf: &mut Vec, - read: &mut usize, - cx: &mut Context<'_>, -) -> Poll> { - let io_res = ready!(read_to_end_internal(buf, reader, read, cx)); - let utf8_res = String::from_utf8(mem::replace(buf, Vec::new())); - - // At this point both buf and output are empty. The allocation is in utf8_res. - - debug_assert!(buf.is_empty()); - debug_assert!(output.is_empty()); - finish_string_read(io_res, utf8_res, *read, output, true) -} - -impl Future for ReadToString<'_, A> -where - A: AsyncRead + ?Sized + Unpin, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let Self { - reader, - buf, - output, - read, - } = &mut *self; - - // safety: The constructor of ReadToString called `prepare_buffer`. - unsafe { read_to_string_internal(Pin::new(*reader), output, buf, read, cx) } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/read_until.rs b/third_party/rust/tokio-0.2.25/src/io/util/read_until.rs deleted file mode 100644 index 78dac8c2a14a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/read_until.rs +++ /dev/null @@ -1,89 +0,0 @@ -use crate::io::AsyncBufRead; - -use std::future::Future; -use std::io; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// Future for the [`read_until`](crate::io::AsyncBufReadExt::read_until) method. - /// The delimeter is included in the resulting vector. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadUntil<'a, R: ?Sized> { - reader: &'a mut R, - delimeter: u8, - buf: &'a mut Vec, - /// The number of bytes appended to buf. This can be less than buf.len() if - /// the buffer was not empty when the operation was started. - read: usize, - } -} - -pub(crate) fn read_until<'a, R>( - reader: &'a mut R, - delimeter: u8, - buf: &'a mut Vec, -) -> ReadUntil<'a, R> -where - R: AsyncBufRead + ?Sized + Unpin, -{ - ReadUntil { - reader, - delimeter, - buf, - read: 0, - } -} - -pub(super) fn read_until_internal( - mut reader: Pin<&mut R>, - cx: &mut Context<'_>, - delimeter: u8, - buf: &mut Vec, - read: &mut usize, -) -> Poll> { - loop { - let (done, used) = { - let available = ready!(reader.as_mut().poll_fill_buf(cx))?; - if let Some(i) = memchr::memchr(delimeter, available) { - buf.extend_from_slice(&available[..=i]); - (true, i + 1) - } else { - buf.extend_from_slice(available); - (false, available.len()) - } - }; - reader.as_mut().consume(used); - *read += used; - if done || used == 0 { - return Poll::Ready(Ok(mem::replace(read, 0))); - } - } -} - -impl Future for ReadUntil<'_, R> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let Self { - reader, - delimeter, - buf, - read, - } = &mut *self; - read_until_internal(Pin::new(reader), cx, *delimeter, buf, read) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/reader_stream.rs b/third_party/rust/tokio-0.2.25/src/io/util/reader_stream.rs deleted file mode 100644 index 51651cede4d8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/reader_stream.rs +++ /dev/null @@ -1,105 +0,0 @@ -use crate::io::AsyncRead; -use crate::stream::Stream; -use bytes::{Bytes, BytesMut}; -use pin_project_lite::pin_project; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Convert an [`AsyncRead`] implementor into a - /// [`Stream`] of Result<[`Bytes`], std::io::Error>. - /// After first error it will stop. - /// Additionally, this stream is fused: after it returns None at some - /// moment, it is guaranteed that further `next()`, `poll_next()` and - /// similar functions will instantly return None. - /// - /// This type can be created using the [`reader_stream`] function - /// - /// [`AsyncRead`]: crate::io::AsyncRead - /// [`Stream`]: crate::stream::Stream - /// [`Bytes`]: bytes::Bytes - /// [`reader_stream`]: crate::io::reader_stream - #[derive(Debug)] - #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct ReaderStream { - // Reader itself. - // None if we had error reading from the `reader` in the past. - #[pin] - reader: Option, - // Working buffer, used to optimize allocations. - // # Capacity behavior - // Initially `buf` is empty. Also it's getting smaller and smaller - // during polls (because its chunks are returned to stream user). - // But when it's capacity reaches 0, it is growed. - buf: BytesMut, - } -} - -/// Convert an [`AsyncRead`] implementor into a -/// [`Stream`] of Result<[`Bytes`], std::io::Error>. -/// -/// # Example -/// -/// ``` -/// # #[tokio::main] -/// # async fn main() -> std::io::Result<()> { -/// use tokio::stream::StreamExt; -/// -/// let data: &[u8] = b"hello, world!"; -/// let mut stream = tokio::io::reader_stream(data); -/// let mut stream_contents = Vec::new(); -/// while let Some(chunk) = stream.next().await { -/// stream_contents.extend_from_slice(chunk?.as_ref()); -/// } -/// assert_eq!(stream_contents, data); -/// # Ok(()) -/// # } -/// ``` -/// -/// [`AsyncRead`]: crate::io::AsyncRead -/// [`Stream`]: crate::stream::Stream -/// [`Bytes`]: bytes::Bytes -pub fn reader_stream(reader: R) -> ReaderStream -where - R: AsyncRead, -{ - ReaderStream { - reader: Some(reader), - buf: BytesMut::new(), - } -} - -const CAPACITY: usize = 4096; - -impl Stream for ReaderStream -where - R: AsyncRead, -{ - type Item = std::io::Result; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.as_mut().project(); - let reader = match this.reader.as_pin_mut() { - Some(r) => r, - None => return Poll::Ready(None), - }; - if this.buf.capacity() == 0 { - this.buf.reserve(CAPACITY); - } - match reader.poll_read_buf(cx, &mut this.buf) { - Poll::Pending => Poll::Pending, - Poll::Ready(Err(err)) => { - self.project().reader.set(None); - Poll::Ready(Some(Err(err))) - } - Poll::Ready(Ok(0)) => { - self.project().reader.set(None); - Poll::Ready(None) - } - Poll::Ready(Ok(_)) => { - let chunk = this.buf.split(); - Poll::Ready(Some(Ok(chunk.freeze()))) - } - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/repeat.rs b/third_party/rust/tokio-0.2.25/src/io/util/repeat.rs deleted file mode 100644 index eeef7cc187bf..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/repeat.rs +++ /dev/null @@ -1,74 +0,0 @@ -use crate::io::AsyncRead; - -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// An async reader which yields one byte over and over and over and over and - /// over and... - /// - /// This struct is generally created by calling [`repeat`][repeat]. Please - /// see the documentation of `repeat()` for more details. - /// - /// This is an asynchronous version of [`std::io::Repeat`][std]. - /// - /// [repeat]: fn@repeat - /// [std]: std::io::Repeat - #[derive(Debug)] - pub struct Repeat { - byte: u8, - } - - /// Creates an instance of an async reader that infinitely repeats one byte. - /// - /// All reads from this reader will succeed by filling the specified buffer with - /// the given byte. - /// - /// This is an asynchronous version of [`std::io::repeat`][std]. - /// - /// [std]: std::io::repeat - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut buffer = [0; 3]; - /// io::repeat(0b101).read_exact(&mut buffer).await.unwrap(); - /// assert_eq!(buffer, [0b101, 0b101, 0b101]); - /// } - /// ``` - pub fn repeat(byte: u8) -> Repeat { - Repeat { byte } - } -} - -impl AsyncRead for Repeat { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [std::mem::MaybeUninit]) -> bool { - false - } - #[inline] - fn poll_read( - self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - for byte in &mut *buf { - *byte = self.byte; - } - Poll::Ready(Ok(buf.len())) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/shutdown.rs b/third_party/rust/tokio-0.2.25/src/io/util/shutdown.rs deleted file mode 100644 index 33ac0ac0db7a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/shutdown.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::io::AsyncWrite; - -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// A future used to shutdown an I/O object. - /// - /// Created by the [`AsyncWriteExt::shutdown`][shutdown] function. - /// [shutdown]: crate::io::AsyncWriteExt::shutdown - #[derive(Debug)] - pub struct Shutdown<'a, A: ?Sized> { - a: &'a mut A, - } -} - -/// Creates a future which will shutdown an I/O object. -pub(super) fn shutdown(a: &mut A) -> Shutdown<'_, A> -where - A: AsyncWrite + Unpin + ?Sized, -{ - Shutdown { a } -} - -impl Future for Shutdown<'_, A> -where - A: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = &mut *self; - Pin::new(&mut *me.a).poll_shutdown(cx) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/sink.rs b/third_party/rust/tokio-0.2.25/src/io/util/sink.rs deleted file mode 100644 index 05ee773fa381..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/sink.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::io::AsyncWrite; - -use std::fmt; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// An async writer which will move data into the void. - /// - /// This struct is generally created by calling [`sink`][sink]. Please - /// see the documentation of `sink()` for more details. - /// - /// This is an asynchronous version of [`std::io::Sink`][std]. - /// - /// [sink]: sink() - /// [std]: std::io::Sink - pub struct Sink { - _p: (), - } - - /// Creates an instance of an async writer which will successfully consume all - /// data. - /// - /// All calls to [`poll_write`] on the returned instance will return - /// `Poll::Ready(Ok(buf.len()))` and the contents of the buffer will not be - /// inspected. - /// - /// This is an asynchronous version of [`std::io::sink`][std]. - /// - /// [`poll_write`]: crate::io::AsyncWrite::poll_write() - /// [std]: std::io::sink - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let buffer = vec![1, 2, 3, 5, 8]; - /// let num_bytes = io::sink().write(&buffer).await?; - /// assert_eq!(num_bytes, 5); - /// Ok(()) - /// } - /// ``` - pub fn sink() -> Sink { - Sink { _p: () } - } -} - -impl AsyncWrite for Sink { - #[inline] - fn poll_write( - self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(Ok(buf.len())) - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - #[inline] - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -impl fmt::Debug for Sink { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Sink { .. }") - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/split.rs b/third_party/rust/tokio-0.2.25/src/io/util/split.rs deleted file mode 100644 index f552ed503d17..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/split.rs +++ /dev/null @@ -1,114 +0,0 @@ -use crate::io::util::read_until::read_until_internal; -use crate::io::AsyncBufRead; - -use pin_project_lite::pin_project; -use std::io; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Stream for the [`split`](crate::io::AsyncBufReadExt::split) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct Split { - #[pin] - reader: R, - buf: Vec, - delim: u8, - read: usize, - } -} - -pub(crate) fn split(reader: R, delim: u8) -> Split -where - R: AsyncBufRead, -{ - Split { - reader, - buf: Vec::new(), - delim, - read: 0, - } -} - -impl Split -where - R: AsyncBufRead + Unpin, -{ - /// Returns the next segment in the stream. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncBufRead; - /// use tokio::io::AsyncBufReadExt; - /// - /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { - /// let mut segments = my_buf_read.split(b'f'); - /// - /// while let Some(segment) = segments.next_segment().await? { - /// println!("length = {}", segment.len()) - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn next_segment(&mut self) -> io::Result>> { - use crate::future::poll_fn; - - poll_fn(|cx| Pin::new(&mut *self).poll_next_segment(cx)).await - } -} - -impl Split -where - R: AsyncBufRead, -{ - #[doc(hidden)] - pub fn poll_next_segment( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>> { - let me = self.project(); - - let n = ready!(read_until_internal( - me.reader, cx, *me.delim, me.buf, me.read, - ))?; - // read_until_internal resets me.read to zero once it finds the delimeter - debug_assert_eq!(*me.read, 0); - - if n == 0 && me.buf.is_empty() { - return Poll::Ready(Ok(None)); - } - - if me.buf.last() == Some(me.delim) { - me.buf.pop(); - } - - Poll::Ready(Ok(Some(mem::replace(me.buf, Vec::new())))) - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for Split { - type Item = io::Result>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Poll::Ready(match ready!(self.poll_next_segment(cx)) { - Ok(Some(segment)) => Some(Ok(segment)), - Ok(None) => None, - Err(err) => Some(Err(err)), - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/stream_reader.rs b/third_party/rust/tokio-0.2.25/src/io/util/stream_reader.rs deleted file mode 100644 index b98f8bdfc281..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/stream_reader.rs +++ /dev/null @@ -1,184 +0,0 @@ -use crate::io::{AsyncBufRead, AsyncRead}; -use crate::stream::Stream; -use bytes::{Buf, BufMut}; -use pin_project_lite::pin_project; -use std::io; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Convert a stream of byte chunks into an [`AsyncRead`]. - /// - /// This type is usually created using the [`stream_reader`] function. - /// - /// [`AsyncRead`]: crate::io::AsyncRead - /// [`stream_reader`]: crate::io::stream_reader - #[derive(Debug)] - #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct StreamReader { - #[pin] - inner: S, - chunk: Option, - } -} - -/// Convert a stream of byte chunks into an [`AsyncRead`](crate::io::AsyncRead). -/// -/// # Example -/// -/// ``` -/// use bytes::Bytes; -/// use tokio::io::{stream_reader, AsyncReadExt}; -/// # #[tokio::main] -/// # async fn main() -> std::io::Result<()> { -/// -/// // Create a stream from an iterator. -/// let stream = tokio::stream::iter(vec![ -/// Ok(Bytes::from_static(&[0, 1, 2, 3])), -/// Ok(Bytes::from_static(&[4, 5, 6, 7])), -/// Ok(Bytes::from_static(&[8, 9, 10, 11])), -/// ]); -/// -/// // Convert it to an AsyncRead. -/// let mut read = stream_reader(stream); -/// -/// // Read five bytes from the stream. -/// let mut buf = [0; 5]; -/// read.read_exact(&mut buf).await?; -/// assert_eq!(buf, [0, 1, 2, 3, 4]); -/// -/// // Read the rest of the current chunk. -/// assert_eq!(read.read(&mut buf).await?, 3); -/// assert_eq!(&buf[..3], [5, 6, 7]); -/// -/// // Read the next chunk. -/// assert_eq!(read.read(&mut buf).await?, 4); -/// assert_eq!(&buf[..4], [8, 9, 10, 11]); -/// -/// // We have now reached the end. -/// assert_eq!(read.read(&mut buf).await?, 0); -/// -/// # Ok(()) -/// # } -/// ``` -#[cfg_attr(docsrs, doc(cfg(feature = "stream")))] -#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] -pub fn stream_reader(stream: S) -> StreamReader -where - S: Stream>, - B: Buf, -{ - StreamReader::new(stream) -} - -impl StreamReader -where - S: Stream>, - B: Buf, -{ - /// Convert the provided stream into an `AsyncRead`. - fn new(stream: S) -> Self { - Self { - inner: stream, - chunk: None, - } - } - /// Do we have a chunk and is it non-empty? - fn has_chunk(self: Pin<&mut Self>) -> bool { - if let Some(chunk) = self.project().chunk { - chunk.remaining() > 0 - } else { - false - } - } -} - -impl AsyncRead for StreamReader -where - S: Stream>, - B: Buf, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - if buf.is_empty() { - return Poll::Ready(Ok(0)); - } - - let inner_buf = match self.as_mut().poll_fill_buf(cx) { - Poll::Ready(Ok(buf)) => buf, - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), - Poll::Pending => return Poll::Pending, - }; - let len = std::cmp::min(inner_buf.len(), buf.len()); - (&mut buf[..len]).copy_from_slice(&inner_buf[..len]); - - self.consume(len); - Poll::Ready(Ok(len)) - } - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut BM, - ) -> Poll> - where - Self: Sized, - { - if !buf.has_remaining_mut() { - return Poll::Ready(Ok(0)); - } - - let inner_buf = match self.as_mut().poll_fill_buf(cx) { - Poll::Ready(Ok(buf)) => buf, - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), - Poll::Pending => return Poll::Pending, - }; - let len = std::cmp::min(inner_buf.len(), buf.remaining_mut()); - buf.put_slice(&inner_buf[..len]); - - self.consume(len); - Poll::Ready(Ok(len)) - } - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit]) -> bool { - false - } -} - -impl AsyncBufRead for StreamReader -where - S: Stream>, - B: Buf, -{ - fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - if self.as_mut().has_chunk() { - // This unwrap is very sad, but it can't be avoided. - let buf = self.project().chunk.as_ref().unwrap().bytes(); - return Poll::Ready(Ok(buf)); - } else { - match self.as_mut().project().inner.poll_next(cx) { - Poll::Ready(Some(Ok(chunk))) => { - // Go around the loop in case the chunk is empty. - *self.as_mut().project().chunk = Some(chunk); - } - Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)), - Poll::Ready(None) => return Poll::Ready(Ok(&[])), - Poll::Pending => return Poll::Pending, - } - } - } - } - fn consume(self: Pin<&mut Self>, amt: usize) { - if amt > 0 { - self.project() - .chunk - .as_mut() - .expect("No chunk present") - .advance(amt); - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/take.rs b/third_party/rust/tokio-0.2.25/src/io/util/take.rs deleted file mode 100644 index 5d6bd90aa31a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/take.rs +++ /dev/null @@ -1,131 +0,0 @@ -use crate::io::{AsyncBufRead, AsyncRead}; - -use pin_project_lite::pin_project; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{cmp, io}; - -pin_project! { - /// Stream for the [`take`](super::AsyncReadExt::take) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless you `.await` or poll them"] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct Take { - #[pin] - inner: R, - // Add '_' to avoid conflicts with `limit` method. - limit_: u64, - } -} - -pub(super) fn take(inner: R, limit: u64) -> Take { - Take { - inner, - limit_: limit, - } -} - -impl Take { - /// Returns the remaining number of bytes that can be - /// read before this instance will return EOF. - /// - /// # Note - /// - /// This instance may reach `EOF` after reading fewer bytes than indicated by - /// this method if the underlying [`AsyncRead`] instance reaches EOF. - pub fn limit(&self) -> u64 { - self.limit_ - } - - /// Sets the number of bytes that can be read before this instance will - /// return EOF. This is the same as constructing a new `Take` instance, so - /// the amount of bytes read and the previous limit value don't matter when - /// calling this method. - pub fn set_limit(&mut self, limit: u64) { - self.limit_ = limit - } - - /// Gets a reference to the underlying reader. - pub fn get_ref(&self) -> &R { - &self.inner - } - - /// Gets a mutable reference to the underlying reader. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying reader as doing so may corrupt the internal limit of this - /// `Take`. - pub fn get_mut(&mut self) -> &mut R { - &mut self.inner - } - - /// Gets a pinned mutable reference to the underlying reader. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying reader as doing so may corrupt the internal limit of this - /// `Take`. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { - self.project().inner - } - - /// Consumes the `Take`, returning the wrapped reader. - pub fn into_inner(self) -> R { - self.inner - } -} - -impl AsyncRead for Take { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.inner.prepare_uninitialized_buffer(buf) - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - if self.limit_ == 0 { - return Poll::Ready(Ok(0)); - } - - let me = self.project(); - let max = std::cmp::min(buf.len() as u64, *me.limit_) as usize; - let n = ready!(me.inner.poll_read(cx, &mut buf[..max]))?; - *me.limit_ -= n as u64; - Poll::Ready(Ok(n)) - } -} - -impl AsyncBufRead for Take { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - - // Don't call into inner reader at all at EOF because it may still block - if *me.limit_ == 0 { - return Poll::Ready(Ok(&[])); - } - - let buf = ready!(me.inner.poll_fill_buf(cx)?); - let cap = cmp::min(buf.len() as u64, *me.limit_) as usize; - Poll::Ready(Ok(&buf[..cap])) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - let me = self.project(); - // Don't let callers reset the limit by passing an overlarge value - let amt = cmp::min(amt as u64, *me.limit_) as usize; - *me.limit_ -= amt as u64; - me.inner.consume(amt); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/write.rs b/third_party/rust/tokio-0.2.25/src/io/util/write.rs deleted file mode 100644 index 433a421d3456..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/write.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::io::AsyncWrite; - -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// A future to write some of the buffer to an `AsyncWrite`. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Write<'a, W: ?Sized> { - writer: &'a mut W, - buf: &'a [u8], - } -} - -/// Tries to write some bytes from the given `buf` to the writer in an -/// asynchronous manner, returning a future. -pub(crate) fn write<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> Write<'a, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - Write { writer, buf } -} - -impl Future for Write<'_, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = &mut *self; - Pin::new(&mut *me.writer).poll_write(cx, me.buf) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/write_all.rs b/third_party/rust/tokio-0.2.25/src/io/util/write_all.rs deleted file mode 100644 index 898006c56ca8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/write_all.rs +++ /dev/null @@ -1,57 +0,0 @@ -use crate::io::AsyncWrite; - -use std::future::Future; -use std::io; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct WriteAll<'a, W: ?Sized> { - writer: &'a mut W, - buf: &'a [u8], - } -} - -pub(crate) fn write_all<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> WriteAll<'a, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - WriteAll { writer, buf } -} - -impl Future for WriteAll<'_, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = &mut *self; - while !me.buf.is_empty() { - let n = ready!(Pin::new(&mut me.writer).poll_write(cx, me.buf))?; - { - let (_, rest) = mem::replace(&mut me.buf, &[]).split_at(n); - me.buf = rest; - } - if n == 0 { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - } - - Poll::Ready(Ok(())) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/write_buf.rs b/third_party/rust/tokio-0.2.25/src/io/util/write_buf.rs deleted file mode 100644 index cedfde64e6e6..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/write_buf.rs +++ /dev/null @@ -1,40 +0,0 @@ -use crate::io::AsyncWrite; - -use bytes::Buf; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// A future to write some of the buffer to an `AsyncWrite`. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct WriteBuf<'a, W, B> { - writer: &'a mut W, - buf: &'a mut B, - } -} - -/// Tries to write some bytes from the given `buf` to the writer in an -/// asynchronous manner, returning a future. -pub(crate) fn write_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteBuf<'a, W, B> -where - W: AsyncWrite + Unpin, - B: Buf, -{ - WriteBuf { writer, buf } -} - -impl Future for WriteBuf<'_, W, B> -where - W: AsyncWrite + Unpin, - B: Buf, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = &mut *self; - Pin::new(&mut *me.writer).poll_write_buf(cx, me.buf) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/io/util/write_int.rs b/third_party/rust/tokio-0.2.25/src/io/util/write_int.rs deleted file mode 100644 index ee992de1832e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/io/util/write_int.rs +++ /dev/null @@ -1,132 +0,0 @@ -use crate::io::AsyncWrite; - -use bytes::BufMut; -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::mem::size_of; -use std::pin::Pin; -use std::task::{Context, Poll}; - -macro_rules! writer { - ($name:ident, $ty:ty, $writer:ident) => { - writer!($name, $ty, $writer, size_of::<$ty>()); - }; - ($name:ident, $ty:ty, $writer:ident, $bytes:expr) => { - pin_project! { - #[doc(hidden)] - pub struct $name { - #[pin] - dst: W, - buf: [u8; $bytes], - written: u8, - } - } - - impl $name { - pub(crate) fn new(w: W, value: $ty) -> Self { - let mut writer = $name { - buf: [0; $bytes], - written: 0, - dst: w, - }; - BufMut::$writer(&mut &mut writer.buf[..], value); - writer - } - } - - impl Future for $name - where - W: AsyncWrite, - { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut me = self.project(); - - if *me.written == $bytes as u8 { - return Poll::Ready(Ok(())); - } - - while *me.written < $bytes as u8 { - *me.written += match me - .dst - .as_mut() - .poll_write(cx, &me.buf[*me.written as usize..]) - { - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), - Poll::Ready(Ok(0)) => { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - Poll::Ready(Ok(n)) => n as u8, - }; - } - Poll::Ready(Ok(())) - } - } - }; -} - -macro_rules! writer8 { - ($name:ident, $ty:ty) => { - pin_project! { - #[doc(hidden)] - pub struct $name { - #[pin] - dst: W, - byte: $ty, - } - } - - impl $name { - pub(crate) fn new(dst: W, byte: $ty) -> Self { - Self { dst, byte } - } - } - - impl Future for $name - where - W: AsyncWrite, - { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - let buf = [*me.byte as u8]; - - match me.dst.poll_write(cx, &buf[..]) { - Poll::Pending => Poll::Pending, - Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), - Poll::Ready(Ok(0)) => Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Poll::Ready(Ok(1)) => Poll::Ready(Ok(())), - Poll::Ready(Ok(_)) => unreachable!(), - } - } - } - }; -} - -writer8!(WriteU8, u8); -writer8!(WriteI8, i8); - -writer!(WriteU16, u16, put_u16); -writer!(WriteU32, u32, put_u32); -writer!(WriteU64, u64, put_u64); -writer!(WriteU128, u128, put_u128); - -writer!(WriteI16, i16, put_i16); -writer!(WriteI32, i32, put_i32); -writer!(WriteI64, i64, put_i64); -writer!(WriteI128, i128, put_i128); - -writer!(WriteU16Le, u16, put_u16_le); -writer!(WriteU32Le, u32, put_u32_le); -writer!(WriteU64Le, u64, put_u64_le); -writer!(WriteU128Le, u128, put_u128_le); - -writer!(WriteI16Le, i16, put_i16_le); -writer!(WriteI32Le, i32, put_i32_le); -writer!(WriteI64Le, i64, put_i64_le); -writer!(WriteI128Le, i128, put_i128_le); diff --git a/third_party/rust/tokio-0.2.25/src/lib.rs b/third_party/rust/tokio-0.2.25/src/lib.rs deleted file mode 100644 index 24b1d48cf4c6..000000000000 --- a/third_party/rust/tokio-0.2.25/src/lib.rs +++ /dev/null @@ -1,437 +0,0 @@ -#![doc(html_root_url = "https://docs.rs/tokio/0.2.25")] -#![allow( - clippy::cognitive_complexity, - clippy::large_enum_variant, - clippy::needless_doctest_main, - clippy::match_like_matches_macro, - clippy::stable_sort_primitive -)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - unreachable_pub -)] -#![cfg_attr(docsrs, deny(broken_intra_doc_links))] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) -))] -#![cfg_attr(docsrs, feature(doc_cfg))] - -//! A runtime for writing reliable, asynchronous, and slim applications. -//! -//! Tokio is an event-driven, non-blocking I/O platform for writing asynchronous -//! applications with the Rust programming language. At a high level, it -//! provides a few major components: -//! -//! * Tools for [working with asynchronous tasks][tasks], including -//! [synchronization primitives and channels][sync] and [timeouts, delays, and -//! intervals][time]. -//! * APIs for [performing asynchronous I/O][io], including [TCP and UDP][net] sockets, -//! [filesystem][fs] operations, and [process] and [signal] management. -//! * A [runtime] for executing asynchronous code, including a task scheduler, -//! an I/O driver backed by the operating system's event queue (epoll, kqueue, -//! IOCP, etc...), and a high performance timer. -//! -//! Guide level documentation is found on the [website]. -//! -//! [tasks]: #working-with-tasks -//! [sync]: crate::sync -//! [time]: crate::time -//! [io]: #asynchronous-io -//! [net]: crate::net -//! [fs]: crate::fs -//! [process]: crate::process -//! [signal]: crate::signal -//! [fs]: crate::fs -//! [runtime]: crate::runtime -//! [website]: https://tokio.rs/tokio/tutorial -//! -//! # A Tour of Tokio -//! -//! Tokio consists of a number of modules that provide a range of functionality -//! essential for implementing asynchronous applications in Rust. In this -//! section, we will take a brief tour of Tokio, summarizing the major APIs and -//! their uses. -//! -//! The easiest way to get started is to enable all features. Do this by -//! enabling the `full` feature flag: -//! -//! ```toml -//! tokio = { version = "0.2", features = ["full"] } -//! ``` -//! -//! ## Feature flags -//! -//! Tokio uses a set of [feature flags] to reduce the amount of compiled code. It -//! is possible to just enable certain features over others. By default, Tokio -//! does not enable any features but allows one to enable a subset for their use -//! case. Below is a list of the available feature flags. You may also notice -//! above each function, struct and trait there is listed one or more feature flags -//! that are required for that item to be used. If you are new to Tokio it is -//! recommended that you use the `full` feature flag which will enable all public APIs. -//! Beware though that this will pull in many extra dependencies that you may not -//! need. -//! -//! - `full`: Enables all Tokio public API features listed below. -//! - `rt-core`: Enables `tokio::spawn` and the basic (single-threaded) scheduler. -//! - `rt-threaded`: Enables the heavier, multi-threaded, work-stealing scheduler. -//! - `rt-util`: Enables non-scheduler utilities. -//! - `io-driver`: Enables the `mio` based IO driver. -//! - `io-util`: Enables the IO based `Ext` traits. -//! - `io-std`: Enable `Stdout`, `Stdin` and `Stderr` types. -//! - `net`: Enables `tokio::net` types such as `TcpStream`, `UnixStream` and `UdpSocket`. -//! - `tcp`: Enables all `tokio::net::tcp` types. -//! - `udp`: Enables all `tokio::net::udp` types. -//! - `uds`: Enables all `tokio::net::unix` types. -//! - `time`: Enables `tokio::time` types and allows the schedulers to enable -//! the built in timer. -//! - `process`: Enables `tokio::process` types. -//! - `macros`: Enables `#[tokio::main]` and `#[tokio::test]` macros. -//! - `sync`: Enables all `tokio::sync` types. -//! - `stream`: Enables optional `Stream` implementations for types within Tokio. -//! - `signal`: Enables all `tokio::signal` types. -//! - `fs`: Enables `tokio::fs` types. -//! - `dns`: Enables async `tokio::net::ToSocketAddrs`. -//! - `test-util`: Enables testing based infrastructure for the Tokio runtime. -//! - `blocking`: Enables `block_in_place` and `spawn_blocking`. -//! -//! _Note: `AsyncRead` and `AsyncWrite` traits do not require any features and are -//! always available._ -//! -//! ### Internal features -//! -//! These features do not expose any new API, but influence internal -//! implementation aspects of Tokio, and can pull in additional -//! dependencies. They are not included in `full`: -//! -//! - `parking_lot`: As a potential optimization, use the _parking_lot_ crate's -//! synchronization primitives internally. MSRV may increase according to the -//! _parking_lot_ release in use. -//! -//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section -//! -//! ### Authoring applications -//! -//! Tokio is great for writing applications and most users in this case shouldn't -//! worry too much about what features they should pick. If you're unsure, we suggest -//! going with `full` to ensure that you don't run into any road blocks while you're -//! building your application. -//! -//! #### Example -//! -//! This example shows the quickest way to get started with Tokio. -//! -//! ```toml -//! tokio = { version = "0.2", features = ["full"] } -//! ``` -//! -//! ### Authoring libraries -//! -//! As a library author your goal should be to provide the lighest weight crate -//! that is based on Tokio. To achieve this you should ensure that you only enable -//! the features you need. This allows users to pick up your crate without having -//! to enable unnecessary features. -//! -//! #### Example -//! -//! This example shows how you may want to import features for a library that just -//! needs to `tokio::spawn` and use a `TcpStream`. -//! -//! ```toml -//! tokio = { version = "0.2", features = ["rt-core", "tcp"] } -//! ``` -//! -//! ## Working With Tasks -//! -//! Asynchronous programs in Rust are based around lightweight, non-blocking -//! units of execution called [_tasks_][tasks]. The [`tokio::task`] module provides -//! important tools for working with tasks: -//! -//! * The [`spawn`] function and [`JoinHandle`] type, for scheduling a new task -//! on the Tokio runtime and awaiting the output of a spawned task, respectively, -//! * Functions for [running blocking operations][blocking] in an asynchronous -//! task context. -//! -//! The [`tokio::task`] module is present only when the "rt-core" feature flag -//! is enabled. -//! -//! [tasks]: task/index.html#what-are-tasks -//! [`tokio::task`]: crate::task -//! [`spawn`]: crate::task::spawn() -//! [`JoinHandle`]: crate::task::JoinHandle -//! [blocking]: task/index.html#blocking-and-yielding -//! -//! The [`tokio::sync`] module contains synchronization primitives to use when -//! needing to communicate or share data. These include: -//! -//! * channels ([`oneshot`], [`mpsc`], and [`watch`]), for sending values -//! between tasks, -//! * a non-blocking [`Mutex`], for controlling access to a shared, mutable -//! value, -//! * an asynchronous [`Barrier`] type, for multiple tasks to synchronize before -//! beginning a computation. -//! -//! The `tokio::sync` module is present only when the "sync" feature flag is -//! enabled. -//! -//! [`tokio::sync`]: crate::sync -//! [`Mutex`]: crate::sync::Mutex -//! [`Barrier`]: crate::sync::Barrier -//! [`oneshot`]: crate::sync::oneshot -//! [`mpsc`]: crate::sync::mpsc -//! [`watch`]: crate::sync::watch -//! -//! The [`tokio::time`] module provides utilities for tracking time and -//! scheduling work. This includes functions for setting [timeouts][timeout] for -//! tasks, [delaying][delay] work to run in the future, or [repeating an operation at an -//! interval][interval]. -//! -//! In order to use `tokio::time`, the "time" feature flag must be enabled. -//! -//! [`tokio::time`]: crate::time -//! [delay]: crate::time::delay_for() -//! [interval]: crate::time::interval() -//! [timeout]: crate::time::timeout() -//! -//! Finally, Tokio provides a _runtime_ for executing asynchronous tasks. Most -//! applications can use the [`#[tokio::main]`][main] macro to run their code on the -//! Tokio runtime. In use-cases where manual control over the runtime is -//! required, the [`tokio::runtime`] module provides APIs for configuring and -//! managing runtimes. -//! -//! Using the runtime requires the "rt-core" or "rt-threaded" feature flags, to -//! enable the basic [single-threaded scheduler][rt-core] and the [thread-pool -//! scheduler][rt-threaded], respectively. See the [`runtime` module -//! documentation][rt-features] for details. In addition, the "macros" feature -//! flag enables the `#[tokio::main]` and `#[tokio::test]` attributes. -//! -//! [main]: attr.main.html -//! [`tokio::runtime`]: crate::runtime -//! [`Builder`]: crate::runtime::Builder -//! [`Runtime`]: crate::runtime::Runtime -//! [rt-core]: runtime/index.html#basic-scheduler -//! [rt-threaded]: runtime/index.html#threaded-scheduler -//! [rt-features]: runtime/index.html#runtime-scheduler -//! -//! ## CPU-bound tasks and blocking code -//! -//! Tokio is able to concurrently run many tasks on a few threads by repeatedly -//! swapping the currently running task on each thread. However, this kind of -//! swapping can only happen at `.await` points, so code that spends a long time -//! without reaching an `.await` will prevent other tasks from running. To -//! combat this, Tokio provides two kinds of threads: Core threads and blocking -//! threads. The core threads are where all asynchronous code runs, and Tokio -//! will by default spawn one for each CPU core. The blocking threads are -//! spawned on demand, and can be used to run blocking code that would otherwise -//! block other tasks from running. Since it is not possible for Tokio to swap -//! out blocking tasks, like it can do with asynchronous code, the upper limit -//! on the number of blocking threads is very large. These limits can be -//! configured on the [`Builder`]. -//! -//! To spawn a blocking task, you should use the [`spawn_blocking`] function. -//! -//! [`Builder`]: crate::runtime::Builder -//! [`spawn_blocking`]: crate::task::spawn_blocking() -//! -//! ``` -//! #[tokio::main] -//! async fn main() { -//! // This is running on a core thread. -//! -//! let blocking_task = tokio::task::spawn_blocking(|| { -//! // This is running on a blocking thread. -//! // Blocking here is ok. -//! }); -//! -//! // We can wait for the blocking task like this: -//! // If the blocking task panics, the unwrap below will propagate the -//! // panic. -//! blocking_task.await.unwrap(); -//! } -//! ``` -//! -//! If your code is CPU-bound and you wish to limit the number of threads used -//! to run it, you should run it on another thread pool such as [rayon]. You -//! can use an [`oneshot`] channel to send the result back to Tokio when the -//! rayon task finishes. -//! -//! [rayon]: https://docs.rs/rayon -//! [`oneshot`]: crate::sync::oneshot -//! -//! ## Asynchronous IO -//! -//! As well as scheduling and running tasks, Tokio provides everything you need -//! to perform input and output asynchronously. -//! -//! The [`tokio::io`] module provides Tokio's asynchronous core I/O primitives, -//! the [`AsyncRead`], [`AsyncWrite`], and [`AsyncBufRead`] traits. In addition, -//! when the "io-util" feature flag is enabled, it also provides combinators and -//! functions for working with these traits, forming as an asynchronous -//! counterpart to [`std::io`]. When the "io-driver" feature flag is enabled, it -//! also provides utilities for library authors implementing I/O resources. -//! -//! Tokio also includes APIs for performing various kinds of I/O and interacting -//! with the operating system asynchronously. These include: -//! -//! * [`tokio::net`], which contains non-blocking versions of [TCP], [UDP], and -//! [Unix Domain Sockets][UDS] (enabled by the "net" feature flag), -//! * [`tokio::fs`], similar to [`std::fs`] but for performing filesystem I/O -//! asynchronously (enabled by the "fs" feature flag), -//! * [`tokio::signal`], for asynchronously handling Unix and Windows OS signals -//! (enabled by the "signal" feature flag), -//! * [`tokio::process`], for spawning and managing child processes (enabled by -//! the "process" feature flag). -//! -//! [`tokio::io`]: crate::io -//! [`AsyncRead`]: crate::io::AsyncRead -//! [`AsyncWrite`]: crate::io::AsyncWrite -//! [`AsyncBufRead`]: crate::io::AsyncBufRead -//! [`std::io`]: std::io -//! [`tokio::net`]: crate::net -//! [TCP]: crate::net::tcp -//! [UDP]: crate::net::udp -//! [UDS]: crate::net::unix -//! [`tokio::fs`]: crate::fs -//! [`std::fs`]: std::fs -//! [`tokio::signal`]: crate::signal -//! [`tokio::process`]: crate::process -//! -//! # Examples -//! -//! A simple TCP echo server: -//! -//! ```no_run -//! use tokio::net::TcpListener; -//! use tokio::prelude::*; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?; -//! -//! loop { -//! let (mut socket, _) = listener.accept().await?; -//! -//! tokio::spawn(async move { -//! let mut buf = [0; 1024]; -//! -//! // In a loop, read data from the socket and write the data back. -//! loop { -//! let n = match socket.read(&mut buf).await { -//! // socket closed -//! Ok(n) if n == 0 => return, -//! Ok(n) => n, -//! Err(e) => { -//! eprintln!("failed to read from socket; err = {:?}", e); -//! return; -//! } -//! }; -//! -//! // Write the data back -//! if let Err(e) = socket.write_all(&buf[0..n]).await { -//! eprintln!("failed to write to socket; err = {:?}", e); -//! return; -//! } -//! } -//! }); -//! } -//! } -//! ``` - -// Includes re-exports used by macros. -// -// This module is not intended to be part of the public API. In general, any -// `doc(hidden)` code is not part of Tokio's public and stable API. -#[macro_use] -#[doc(hidden)] -pub mod macros; - -cfg_fs! { - pub mod fs; -} - -#[doc(hidden)] -pub mod future; - -pub mod io; -pub mod net; - -mod loom; -mod park; - -pub mod prelude; - -cfg_process! { - pub mod process; -} - -pub mod runtime; - -pub(crate) mod coop; - -cfg_signal! { - pub mod signal; -} - -cfg_stream! { - pub mod stream; -} - -cfg_sync! { - pub mod sync; -} -cfg_not_sync! { - mod sync; -} - -cfg_rt_core! { - pub mod task; - pub use task::spawn; -} - -cfg_time! { - pub mod time; -} - -mod util; - -cfg_macros! { - /// Implementation detail of the `select!` macro. This macro is **not** - /// intended to be used as part of the public API and is permitted to - /// change. - #[doc(hidden)] - pub use tokio_macros::select_priv_declare_output_enum; - - doc_rt_core! { - cfg_rt_threaded! { - // This is the docs.rs case (with all features) so make sure macros - // is included in doc(cfg). - - #[cfg(not(test))] // Work around for rust-lang/rust#62127 - #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] - pub use tokio_macros::main_threaded as main; - - #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] - pub use tokio_macros::test_threaded as test; - } - - cfg_not_rt_threaded! { - #[cfg(not(test))] // Work around for rust-lang/rust#62127 - pub use tokio_macros::main_basic as main; - pub use tokio_macros::test_basic as test; - } - } - - // Maintains old behavior - cfg_not_rt_core! { - #[cfg(not(test))] - pub use tokio_macros::main; - pub use tokio_macros::test; - } -} - -// TODO: rm -#[cfg(feature = "io-util")] -#[cfg(test)] -fn is_unpin() {} diff --git a/third_party/rust/tokio-0.2.25/src/loom/mocked.rs b/third_party/rust/tokio-0.2.25/src/loom/mocked.rs deleted file mode 100644 index 78913952256a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/mocked.rs +++ /dev/null @@ -1,13 +0,0 @@ -pub(crate) use loom::*; - -pub(crate) mod rand { - pub(crate) fn seed() -> u64 { - 1 - } -} - -pub(crate) mod sys { - pub(crate) fn num_cpus() -> usize { - 2 - } -} diff --git a/third_party/rust/tokio-0.2.25/src/loom/mod.rs b/third_party/rust/tokio-0.2.25/src/loom/mod.rs deleted file mode 100644 index 56a41f25a05f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! This module abstracts over `loom` and `std::sync` depending on whether we -//! are running tests or not. - -#[cfg(not(all(test, loom)))] -mod std; -#[cfg(not(all(test, loom)))] -pub(crate) use self::std::*; - -#[cfg(all(test, loom))] -mod mocked; -#[cfg(all(test, loom))] -pub(crate) use self::mocked::*; diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_ptr.rs b/third_party/rust/tokio-0.2.25/src/loom/std/atomic_ptr.rs deleted file mode 100644 index f7fd56cc69bb..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_ptr.rs +++ /dev/null @@ -1,28 +0,0 @@ -use std::fmt; -use std::ops::Deref; - -/// `AtomicPtr` providing an additional `load_unsync` function. -pub(crate) struct AtomicPtr { - inner: std::sync::atomic::AtomicPtr, -} - -impl AtomicPtr { - pub(crate) fn new(ptr: *mut T) -> AtomicPtr { - let inner = std::sync::atomic::AtomicPtr::new(ptr); - AtomicPtr { inner } - } -} - -impl Deref for AtomicPtr { - type Target = std::sync::atomic::AtomicPtr; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl fmt::Debug for AtomicPtr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.deref().fmt(fmt) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u16.rs b/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u16.rs deleted file mode 100644 index 70390972b4b6..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u16.rs +++ /dev/null @@ -1,44 +0,0 @@ -use std::cell::UnsafeCell; -use std::fmt; -use std::ops::Deref; - -/// `AtomicU16` providing an additional `load_unsync` function. -pub(crate) struct AtomicU16 { - inner: UnsafeCell, -} - -unsafe impl Send for AtomicU16 {} -unsafe impl Sync for AtomicU16 {} - -impl AtomicU16 { - pub(crate) fn new(val: u16) -> AtomicU16 { - let inner = UnsafeCell::new(std::sync::atomic::AtomicU16::new(val)); - AtomicU16 { inner } - } - - /// Performs an unsynchronized load. - /// - /// # Safety - /// - /// All mutations must have happened before the unsynchronized load. - /// Additionally, there must be no concurrent mutations. - pub(crate) unsafe fn unsync_load(&self) -> u16 { - *(*self.inner.get()).get_mut() - } -} - -impl Deref for AtomicU16 { - type Target = std::sync::atomic::AtomicU16; - - fn deref(&self) -> &Self::Target { - // safety: it is always safe to access `&self` fns on the inner value as - // we never perform unsafe mutations. - unsafe { &*self.inner.get() } - } -} - -impl fmt::Debug for AtomicU16 { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.deref().fmt(fmt) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u32.rs b/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u32.rs deleted file mode 100644 index 6f786c519f19..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u32.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::cell::UnsafeCell; -use std::fmt; -use std::ops::Deref; - -/// `AtomicU32` providing an additional `load_unsync` function. -pub(crate) struct AtomicU32 { - inner: UnsafeCell, -} - -unsafe impl Send for AtomicU32 {} -unsafe impl Sync for AtomicU32 {} - -impl AtomicU32 { - pub(crate) fn new(val: u32) -> AtomicU32 { - let inner = UnsafeCell::new(std::sync::atomic::AtomicU32::new(val)); - AtomicU32 { inner } - } -} - -impl Deref for AtomicU32 { - type Target = std::sync::atomic::AtomicU32; - - fn deref(&self) -> &Self::Target { - // safety: it is always safe to access `&self` fns on the inner value as - // we never perform unsafe mutations. - unsafe { &*self.inner.get() } - } -} - -impl fmt::Debug for AtomicU32 { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.deref().fmt(fmt) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u64.rs b/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u64.rs deleted file mode 100644 index 206954fcc38b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u64.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! Implementation of an atomic u64 cell. On 64 bit platforms, this is a -//! re-export of `AtomicU64`. On 32 bit platforms, this is implemented using a -//! `Mutex`. - -pub(crate) use self::imp::AtomicU64; - -// `AtomicU64` can only be used on targets with `target_has_atomic` is 64 or greater. -// Once `cfg_target_has_atomic` feature is stable, we can replace it with -// `#[cfg(target_has_atomic = "64")]`. -// Refs: https://github.com/rust-lang/rust/tree/master/src/librustc_target -#[cfg(not(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc")))] -mod imp { - pub(crate) use std::sync::atomic::AtomicU64; -} - -#[cfg(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc"))] -mod imp { - use std::sync::atomic::Ordering; - use std::sync::Mutex; - - #[derive(Debug)] - pub(crate) struct AtomicU64 { - inner: Mutex, - } - - impl AtomicU64 { - pub(crate) fn new(val: u64) -> AtomicU64 { - AtomicU64 { - inner: Mutex::new(val), - } - } - - pub(crate) fn load(&self, _: Ordering) -> u64 { - *self.inner.lock().unwrap() - } - - pub(crate) fn store(&self, val: u64, _: Ordering) { - *self.inner.lock().unwrap() = val; - } - - pub(crate) fn fetch_or(&self, val: u64, _: Ordering) -> u64 { - let mut lock = self.inner.lock().unwrap(); - let prev = *lock; - *lock = prev | val; - prev - } - - pub(crate) fn compare_and_swap(&self, old: u64, new: u64, _: Ordering) -> u64 { - let mut lock = self.inner.lock().unwrap(); - let prev = *lock; - - if prev != old { - return prev; - } - - *lock = new; - prev - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u8.rs b/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u8.rs deleted file mode 100644 index 4fcd0df3d45b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_u8.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::cell::UnsafeCell; -use std::fmt; -use std::ops::Deref; - -/// `AtomicU8` providing an additional `load_unsync` function. -pub(crate) struct AtomicU8 { - inner: UnsafeCell, -} - -unsafe impl Send for AtomicU8 {} -unsafe impl Sync for AtomicU8 {} - -impl AtomicU8 { - pub(crate) fn new(val: u8) -> AtomicU8 { - let inner = UnsafeCell::new(std::sync::atomic::AtomicU8::new(val)); - AtomicU8 { inner } - } -} - -impl Deref for AtomicU8 { - type Target = std::sync::atomic::AtomicU8; - - fn deref(&self) -> &Self::Target { - // safety: it is always safe to access `&self` fns on the inner value as - // we never perform unsafe mutations. - unsafe { &*self.inner.get() } - } -} - -impl fmt::Debug for AtomicU8 { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.deref().fmt(fmt) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_usize.rs b/third_party/rust/tokio-0.2.25/src/loom/std/atomic_usize.rs deleted file mode 100644 index 0fe998f1f9e1..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/atomic_usize.rs +++ /dev/null @@ -1,56 +0,0 @@ -use std::cell::UnsafeCell; -use std::fmt; -use std::ops; - -/// `AtomicUsize` providing an additional `load_unsync` function. -pub(crate) struct AtomicUsize { - inner: UnsafeCell, -} - -unsafe impl Send for AtomicUsize {} -unsafe impl Sync for AtomicUsize {} - -impl AtomicUsize { - pub(crate) fn new(val: usize) -> AtomicUsize { - let inner = UnsafeCell::new(std::sync::atomic::AtomicUsize::new(val)); - AtomicUsize { inner } - } - - /// Performs an unsynchronized load. - /// - /// # Safety - /// - /// All mutations must have happened before the unsynchronized load. - /// Additionally, there must be no concurrent mutations. - pub(crate) unsafe fn unsync_load(&self) -> usize { - *(*self.inner.get()).get_mut() - } - - pub(crate) fn with_mut(&mut self, f: impl FnOnce(&mut usize) -> R) -> R { - // safety: we have mutable access - f(unsafe { (*self.inner.get()).get_mut() }) - } -} - -impl ops::Deref for AtomicUsize { - type Target = std::sync::atomic::AtomicUsize; - - fn deref(&self) -> &Self::Target { - // safety: it is always safe to access `&self` fns on the inner value as - // we never perform unsafe mutations. - unsafe { &*self.inner.get() } - } -} - -impl ops::DerefMut for AtomicUsize { - fn deref_mut(&mut self) -> &mut Self::Target { - // safety: we hold `&mut self` - unsafe { &mut *self.inner.get() } - } -} - -impl fmt::Debug for AtomicUsize { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(fmt) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/mod.rs b/third_party/rust/tokio-0.2.25/src/loom/std/mod.rs deleted file mode 100644 index 60ee56ad2025..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/mod.rs +++ /dev/null @@ -1,86 +0,0 @@ -#![cfg_attr(any(not(feature = "full"), loom), allow(unused_imports, dead_code))] - -mod atomic_ptr; -mod atomic_u16; -mod atomic_u32; -mod atomic_u64; -mod atomic_u8; -mod atomic_usize; -#[cfg(feature = "parking_lot")] -mod parking_lot; -mod unsafe_cell; - -pub(crate) mod cell { - pub(crate) use super::unsafe_cell::UnsafeCell; -} - -#[cfg(any(feature = "sync", feature = "io-driver"))] -pub(crate) mod future { - pub(crate) use crate::sync::AtomicWaker; -} - -pub(crate) mod rand { - use std::collections::hash_map::RandomState; - use std::hash::{BuildHasher, Hash, Hasher}; - use std::sync::atomic::AtomicU32; - use std::sync::atomic::Ordering::Relaxed; - - static COUNTER: AtomicU32 = AtomicU32::new(1); - - pub(crate) fn seed() -> u64 { - let rand_state = RandomState::new(); - - let mut hasher = rand_state.build_hasher(); - - // Hash some unique-ish data to generate some new state - COUNTER.fetch_add(1, Relaxed).hash(&mut hasher); - - // Get the seed - hasher.finish() - } -} - -pub(crate) mod sync { - pub(crate) use std::sync::Arc; - - // Below, make sure all the feature-influenced types are exported for - // internal use. Note however that some are not _currently_ named by - // consuming code. - - #[cfg(feature = "parking_lot")] - #[allow(unused_imports)] - pub(crate) use crate::loom::std::parking_lot::{ - Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, WaitTimeoutResult, - }; - - #[cfg(not(feature = "parking_lot"))] - #[allow(unused_imports)] - pub(crate) use std::sync::{ - Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, WaitTimeoutResult, - }; - - pub(crate) mod atomic { - pub(crate) use crate::loom::std::atomic_ptr::AtomicPtr; - pub(crate) use crate::loom::std::atomic_u16::AtomicU16; - pub(crate) use crate::loom::std::atomic_u32::AtomicU32; - pub(crate) use crate::loom::std::atomic_u64::AtomicU64; - pub(crate) use crate::loom::std::atomic_u8::AtomicU8; - pub(crate) use crate::loom::std::atomic_usize::AtomicUsize; - - pub(crate) use std::sync::atomic::{spin_loop_hint, AtomicBool}; - } -} - -pub(crate) mod sys { - #[cfg(feature = "rt-threaded")] - pub(crate) fn num_cpus() -> usize { - usize::max(1, num_cpus::get()) - } - - #[cfg(not(feature = "rt-threaded"))] - pub(crate) fn num_cpus() -> usize { - 1 - } -} - -pub(crate) use std::thread; diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/parking_lot.rs b/third_party/rust/tokio-0.2.25/src/loom/std/parking_lot.rs deleted file mode 100644 index 25d94af44f53..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/parking_lot.rs +++ /dev/null @@ -1,97 +0,0 @@ -//! A minimal adaption of the `parking_lot` synchronization primitives to the -//! equivalent `std::sync` types. -//! -//! This can be extended to additional types/methods as required. - -use std::sync::{LockResult, TryLockError, TryLockResult}; -use std::time::Duration; - -// Types that do not need wrapping -pub(crate) use parking_lot::{MutexGuard, RwLockReadGuard, RwLockWriteGuard, WaitTimeoutResult}; - -/// Adapter for `parking_lot::Mutex` to the `std::sync::Mutex` interface. -#[derive(Debug)] -pub(crate) struct Mutex(parking_lot::Mutex); - -#[derive(Debug)] -pub(crate) struct RwLock(parking_lot::RwLock); - -/// Adapter for `parking_lot::Condvar` to the `std::sync::Condvar` interface. -#[derive(Debug)] -pub(crate) struct Condvar(parking_lot::Condvar); - -impl Mutex { - #[inline] - pub(crate) fn new(t: T) -> Mutex { - Mutex(parking_lot::Mutex::new(t)) - } - - #[inline] - pub(crate) fn lock(&self) -> LockResult> { - Ok(self.0.lock()) - } - - #[inline] - pub(crate) fn try_lock(&self) -> TryLockResult> { - match self.0.try_lock() { - Some(guard) => Ok(guard), - None => Err(TryLockError::WouldBlock), - } - } - - // Note: Additional methods `is_poisoned` and `into_inner`, can be - // provided here as needed. -} - -impl RwLock { - pub(crate) fn new(t: T) -> RwLock { - RwLock(parking_lot::RwLock::new(t)) - } - - pub(crate) fn read(&self) -> LockResult> { - Ok(self.0.read()) - } - - pub(crate) fn write(&self) -> LockResult> { - Ok(self.0.write()) - } -} - -impl Condvar { - #[inline] - pub(crate) fn new() -> Condvar { - Condvar(parking_lot::Condvar::new()) - } - - #[inline] - pub(crate) fn notify_one(&self) { - self.0.notify_one(); - } - - #[inline] - pub(crate) fn notify_all(&self) { - self.0.notify_all(); - } - - #[inline] - pub(crate) fn wait<'a, T>( - &self, - mut guard: MutexGuard<'a, T>, - ) -> LockResult> { - self.0.wait(&mut guard); - Ok(guard) - } - - #[inline] - pub(crate) fn wait_timeout<'a, T>( - &self, - mut guard: MutexGuard<'a, T>, - timeout: Duration, - ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { - let wtr = self.0.wait_for(&mut guard, timeout); - Ok((guard, wtr)) - } - - // Note: Additional methods `wait_timeout_ms`, `wait_timeout_until`, - // `wait_until` can be provided here as needed. -} diff --git a/third_party/rust/tokio-0.2.25/src/loom/std/unsafe_cell.rs b/third_party/rust/tokio-0.2.25/src/loom/std/unsafe_cell.rs deleted file mode 100644 index f2b03d8dc2a3..000000000000 --- a/third_party/rust/tokio-0.2.25/src/loom/std/unsafe_cell.rs +++ /dev/null @@ -1,16 +0,0 @@ -#[derive(Debug)] -pub(crate) struct UnsafeCell(std::cell::UnsafeCell); - -impl UnsafeCell { - pub(crate) fn new(data: T) -> UnsafeCell { - UnsafeCell(std::cell::UnsafeCell::new(data)) - } - - pub(crate) fn with(&self, f: impl FnOnce(*const T) -> R) -> R { - f(self.0.get()) - } - - pub(crate) fn with_mut(&self, f: impl FnOnce(*mut T) -> R) -> R { - f(self.0.get()) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/cfg.rs b/third_party/rust/tokio-0.2.25/src/macros/cfg.rs deleted file mode 100644 index 4b77544eb5c3..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/cfg.rs +++ /dev/null @@ -1,404 +0,0 @@ -#![allow(unused_macros)] - -macro_rules! cfg_resource_drivers { - ($($item:item)*) => { - $( - #[cfg(any(feature = "io-driver", feature = "time"))] - $item - )* - } -} - -macro_rules! cfg_blocking { - ($($item:item)*) => { - $( - #[cfg(feature = "blocking")] - #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))] - $item - )* - } -} - -/// Enables blocking API internals -macro_rules! cfg_blocking_impl { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "blocking", - feature = "fs", - feature = "dns", - feature = "io-std", - feature = "rt-threaded", - ))] - $item - )* - } -} - -/// Enables blocking API internals -macro_rules! cfg_blocking_impl_or_task { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "blocking", - feature = "fs", - feature = "dns", - feature = "io-std", - feature = "rt-threaded", - feature = "task", - ))] - $item - )* - } -} - -/// Enables enter::block_on -macro_rules! cfg_block_on { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "blocking", - feature = "fs", - feature = "dns", - feature = "io-std", - feature = "rt-core", - ))] - $item - )* - } -} - -/// Enables blocking API internals -macro_rules! cfg_not_blocking_impl { - ($($item:item)*) => { - $( - #[cfg(not(any( - feature = "blocking", - feature = "fs", - feature = "dns", - feature = "io-std", - feature = "rt-threaded", - )))] - $item - )* - } -} - -/// Enables internal `AtomicWaker` impl -macro_rules! cfg_atomic_waker_impl { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "io-driver", - feature = "time", - all(feature = "rt-core", feature = "rt-util") - ))] - #[cfg(not(loom))] - $item - )* - } -} - -macro_rules! cfg_dns { - ($($item:item)*) => { - $( - #[cfg(feature = "dns")] - #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] - $item - )* - } -} - -macro_rules! cfg_fs { - ($($item:item)*) => { - $( - #[cfg(feature = "fs")] - #[cfg_attr(docsrs, doc(cfg(feature = "fs")))] - $item - )* - } -} - -macro_rules! cfg_io_blocking { - ($($item:item)*) => { - $( #[cfg(any(feature = "io-std", feature = "fs"))] $item )* - } -} - -macro_rules! cfg_io_driver { - ($($item:item)*) => { - $( - #[cfg(feature = "io-driver")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-driver")))] - $item - )* - } -} - -macro_rules! cfg_not_io_driver { - ($($item:item)*) => { - $( - #[cfg(not(feature = "io-driver"))] - $item - )* - } -} - -macro_rules! cfg_io_std { - ($($item:item)*) => { - $( - #[cfg(feature = "io-std")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-std")))] - $item - )* - } -} - -macro_rules! cfg_io_util { - ($($item:item)*) => { - $( - #[cfg(feature = "io-util")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - $item - )* - } -} - -macro_rules! cfg_not_io_util { - ($($item:item)*) => { - $( #[cfg(not(feature = "io-util"))] $item )* - } -} - -macro_rules! cfg_loom { - ($($item:item)*) => { - $( #[cfg(loom)] $item )* - } -} - -macro_rules! cfg_not_loom { - ($($item:item)*) => { - $( #[cfg(not(loom))] $item )* - } -} - -macro_rules! cfg_macros { - ($($item:item)*) => { - $( - #[cfg(feature = "macros")] - #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] - #[doc(inline)] - $item - )* - } -} - -macro_rules! cfg_process { - ($($item:item)*) => { - $( - #[cfg(feature = "process")] - #[cfg_attr(docsrs, doc(cfg(feature = "process")))] - #[cfg(not(loom))] - $item - )* - } -} - -macro_rules! cfg_signal { - ($($item:item)*) => { - $( - #[cfg(feature = "signal")] - #[cfg_attr(docsrs, doc(cfg(feature = "signal")))] - #[cfg(not(loom))] - $item - )* - } -} - -macro_rules! cfg_stream { - ($($item:item)*) => { - $( - #[cfg(feature = "stream")] - #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] - $item - )* - } -} - -macro_rules! cfg_sync { - ($($item:item)*) => { - $( - #[cfg(feature = "sync")] - #[cfg_attr(docsrs, doc(cfg(feature = "sync")))] - $item - )* - } -} - -macro_rules! cfg_not_sync { - ($($item:item)*) => { - $( #[cfg(not(feature = "sync"))] $item )* - } -} - -macro_rules! cfg_rt_core { - ($($item:item)*) => { - $( - #[cfg(feature = "rt-core")] - $item - )* - } -} - -macro_rules! doc_rt_core { - ($($item:item)*) => { - $( - #[cfg(feature = "rt-core")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-core")))] - $item - )* - } -} - -macro_rules! cfg_not_rt_core { - ($($item:item)*) => { - $( #[cfg(not(feature = "rt-core"))] $item )* - } -} - -macro_rules! cfg_rt_threaded { - ($($item:item)*) => { - $( - #[cfg(feature = "rt-threaded")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-threaded")))] - $item - )* - } -} - -macro_rules! cfg_rt_util { - ($($item:item)*) => { - $( - #[cfg(feature = "rt-util")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-util")))] - $item - )* - } -} - -macro_rules! cfg_not_rt_threaded { - ($($item:item)*) => { - $( #[cfg(not(feature = "rt-threaded"))] $item )* - } -} - -macro_rules! cfg_tcp { - ($($item:item)*) => { - $( - #[cfg(feature = "tcp")] - #[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] - $item - )* - } -} - -macro_rules! cfg_test_util { - ($($item:item)*) => { - $( - #[cfg(feature = "test-util")] - #[cfg_attr(docsrs, doc(cfg(feature = "test-util")))] - $item - )* - } -} - -macro_rules! cfg_not_test_util { - ($($item:item)*) => { - $( #[cfg(not(feature = "test-util"))] $item )* - } -} - -macro_rules! cfg_time { - ($($item:item)*) => { - $( - #[cfg(feature = "time")] - #[cfg_attr(docsrs, doc(cfg(feature = "time")))] - $item - )* - } -} - -macro_rules! cfg_not_time { - ($($item:item)*) => { - $( #[cfg(not(feature = "time"))] $item )* - } -} - -macro_rules! cfg_udp { - ($($item:item)*) => { - $( - #[cfg(feature = "udp")] - #[cfg_attr(docsrs, doc(cfg(feature = "udp")))] - $item - )* - } -} - -macro_rules! cfg_uds { - ($($item:item)*) => { - $( - #[cfg(all(unix, feature = "uds"))] - #[cfg_attr(docsrs, doc(cfg(feature = "uds")))] - $item - )* - } -} - -macro_rules! cfg_unstable { - ($($item:item)*) => { - $( - #[cfg(tokio_unstable)] - #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] - $item - )* - } -} - -macro_rules! cfg_trace { - ($($item:item)*) => { - $( - #[cfg(feature = "tracing")] - #[cfg_attr(docsrs, doc(cfg(feature = "tracing")))] - $item - )* - } -} - -macro_rules! cfg_not_trace { - ($($item:item)*) => { - $( - #[cfg(not(feature = "tracing"))] - $item - )* - } -} - -macro_rules! cfg_coop { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "blocking", - feature = "dns", - feature = "fs", - feature = "io-driver", - feature = "io-std", - feature = "process", - feature = "rt-core", - feature = "sync", - feature = "stream", - feature = "time" - ))] - $item - )* - } -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/join.rs b/third_party/rust/tokio-0.2.25/src/macros/join.rs deleted file mode 100644 index 5f37af510d3c..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/join.rs +++ /dev/null @@ -1,119 +0,0 @@ -/// Wait on multiple concurrent branches, returning when **all** branches -/// complete. -/// -/// The `join!` macro must be used inside of async functions, closures, and -/// blocks. -/// -/// The `join!` macro takes a list of async expressions and evaluates them -/// concurrently on the same task. Each async expression evaluates to a future -/// and the futures from each expression are multiplexed on the current task. -/// -/// When working with async expressions returning `Result`, `join!` will wait -/// for **all** branches complete regardless if any complete with `Err`. Use -/// [`try_join!`] to return early when `Err` is encountered. -/// -/// [`try_join!`]: macro@try_join -/// -/// # Notes -/// -/// The supplied futures are stored inline and does not require allocating a -/// `Vec`. -/// -/// ### Runtime characteristics -/// -/// By running all async expressions on the current task, the expressions are -/// able to run **concurrently** but not in **parallel**. This means all -/// expressions are run on the same thread and if one branch blocks the thread, -/// all other expressions will be unable to continue. If parallelism is -/// required, spawn each async expression using [`tokio::spawn`] and pass the -/// join handle to `join!`. -/// -/// [`tokio::spawn`]: crate::spawn -/// -/// # Examples -/// -/// Basic join with two branches -/// -/// ``` -/// async fn do_stuff_async() { -/// // async work -/// } -/// -/// async fn more_async_work() { -/// // more here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let (first, second) = tokio::join!( -/// do_stuff_async(), -/// more_async_work()); -/// -/// // do something with the values -/// } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] -macro_rules! join { - (@ { - // One `_` for each branch in the `join!` macro. This is not used once - // normalization is complete. - ( $($count:tt)* ) - - // Normalized join! branches - $( ( $($skip:tt)* ) $e:expr, )* - - }) => {{ - use $crate::macros::support::{maybe_done, poll_fn, Future, Pin}; - use $crate::macros::support::Poll::{Ready, Pending}; - - // Safety: nothing must be moved out of `futures`. This is to satisfy - // the requirement of `Pin::new_unchecked` called below. - let mut futures = ( $( maybe_done($e), )* ); - - poll_fn(move |cx| { - let mut is_pending = false; - - $( - // Extract the future for this branch from the tuple. - let ( $($skip,)* fut, .. ) = &mut futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - // Try polling - if fut.poll(cx).is_pending() { - is_pending = true; - } - )* - - if is_pending { - Pending - } else { - Ready(($({ - // Extract the future for this branch from the tuple. - let ( $($skip,)* fut, .. ) = &mut futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - fut.take_output().expect("expected completed future") - },)*)) - } - }).await - }}; - - // ===== Normalize ===== - - (@ { ( $($s:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => { - $crate::join!(@{ ($($s)* _) $($t)* ($($s)*) $e, } $($r)*) - }; - - // ===== Entry point ===== - - ( $($e:expr),* $(,)?) => { - $crate::join!(@{ () } $($e,)*) - }; -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/loom.rs b/third_party/rust/tokio-0.2.25/src/macros/loom.rs deleted file mode 100644 index d57d9fb0f7b2..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/loom.rs +++ /dev/null @@ -1,12 +0,0 @@ -macro_rules! if_loom { - ($($t:tt)*) => {{ - #[cfg(loom)] - const LOOM: bool = true; - #[cfg(not(loom))] - const LOOM: bool = false; - - if LOOM { - $($t)* - } - }} -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/mod.rs b/third_party/rust/tokio-0.2.25/src/macros/mod.rs deleted file mode 100644 index 2643c360189c..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![cfg_attr(not(feature = "full"), allow(unused_macros))] - -#[macro_use] -mod cfg; - -#[macro_use] -mod loom; - -#[macro_use] -mod pin; - -#[macro_use] -mod ready; - -#[macro_use] -mod thread_local; - -#[macro_use] -#[cfg(feature = "rt-core")] -pub(crate) mod scoped_tls; - -cfg_macros! { - #[macro_use] - mod select; - - #[macro_use] - mod join; - - #[macro_use] - mod try_join; -} - -// Includes re-exports needed to implement macros -#[doc(hidden)] -pub mod support; diff --git a/third_party/rust/tokio-0.2.25/src/macros/pin.rs b/third_party/rust/tokio-0.2.25/src/macros/pin.rs deleted file mode 100644 index ed844ef7d112..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/pin.rs +++ /dev/null @@ -1,144 +0,0 @@ -/// Pins a value on the stack. -/// -/// Calls to `async fn` return anonymous [`Future`] values that are `!Unpin`. -/// These values must be pinned before they can be polled. Calling `.await` will -/// handle this, but consumes the future. If it is required to call `.await` on -/// a `&mut _` reference, the caller is responsible for pinning the future. -/// -/// Pinning may be done by allocating with [`Box::pin`] or by using the stack -/// with the `pin!` macro. -/// -/// The following will **fail to compile**: -/// -/// ```compile_fail -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut future = my_async_fn(); -/// (&mut future).await; -/// } -/// ``` -/// -/// To make this work requires pinning: -/// -/// ``` -/// use tokio::pin; -/// -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let future = my_async_fn(); -/// pin!(future); -/// -/// (&mut future).await; -/// } -/// ``` -/// -/// Pinning is useful when using `select!` and stream operators that require `T: -/// Stream + Unpin`. -/// -/// [`Future`]: trait@std::future::Future -/// [`Box::pin`]: # -/// -/// # Usage -/// -/// The `pin!` macro takes **identifiers** as arguments. It does **not** work -/// with expressions. -/// -/// The following does not compile as an expression is passed to `pin!`. -/// -/// ```compile_fail -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut future = pin!(my_async_fn()); -/// (&mut future).await; -/// } -/// ``` -/// -/// # Examples -/// -/// Using with select: -/// -/// ``` -/// use tokio::{pin, select}; -/// use tokio::stream::{self, StreamExt}; -/// -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream = stream::iter(vec![1, 2, 3, 4]); -/// -/// let future = my_async_fn(); -/// pin!(future); -/// -/// loop { -/// select! { -/// _ = &mut future => { -/// // Stop looping `future` will be polled after completion -/// break; -/// } -/// Some(val) = stream.next() => { -/// println!("got value = {}", val); -/// } -/// } -/// } -/// } -/// ``` -/// -/// Because assigning to a variable followed by pinning is common, there is also -/// a variant of the macro that supports doing both in one go. -/// -/// ``` -/// use tokio::{pin, select}; -/// -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// pin! { -/// let future1 = my_async_fn(); -/// let future2 = my_async_fn(); -/// } -/// -/// select! { -/// _ = &mut future1 => {} -/// _ = &mut future2 => {} -/// } -/// } -/// ``` -#[macro_export] -macro_rules! pin { - ($($x:ident),*) => { $( - // Move the value to ensure that it is owned - let mut $x = $x; - // Shadow the original binding so that it can't be directly accessed - // ever again. - #[allow(unused_mut)] - let mut $x = unsafe { - $crate::macros::support::Pin::new_unchecked(&mut $x) - }; - )* }; - ($( - let $x:ident = $init:expr; - )*) => { - $( - let $x = $init; - $crate::pin!($x); - )* - }; -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/ready.rs b/third_party/rust/tokio-0.2.25/src/macros/ready.rs deleted file mode 100644 index 1f48623b8012..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/ready.rs +++ /dev/null @@ -1,8 +0,0 @@ -macro_rules! ready { - ($e:expr $(,)?) => { - match $e { - std::task::Poll::Ready(t) => t, - std::task::Poll::Pending => return std::task::Poll::Pending, - } - }; -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/scoped_tls.rs b/third_party/rust/tokio-0.2.25/src/macros/scoped_tls.rs deleted file mode 100644 index 886f9d44b0e8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/scoped_tls.rs +++ /dev/null @@ -1,79 +0,0 @@ -use crate::loom::thread::LocalKey; - -use std::cell::Cell; -use std::marker; - -/// Set a reference as a thread-local -macro_rules! scoped_thread_local { - ($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty) => ( - $(#[$attrs])* - $vis static $name: $crate::macros::scoped_tls::ScopedKey<$ty> - = $crate::macros::scoped_tls::ScopedKey { - inner: { - thread_local!(static FOO: ::std::cell::Cell<*const ()> = { - std::cell::Cell::new(::std::ptr::null()) - }); - &FOO - }, - _marker: ::std::marker::PhantomData, - }; - ) -} - -/// Type representing a thread local storage key corresponding to a reference -/// to the type parameter `T`. -pub(crate) struct ScopedKey { - #[doc(hidden)] - pub(crate) inner: &'static LocalKey>, - #[doc(hidden)] - pub(crate) _marker: marker::PhantomData, -} - -unsafe impl Sync for ScopedKey {} - -impl ScopedKey { - /// Inserts a value into this scoped thread local storage slot for a - /// duration of a closure. - pub(crate) fn set(&'static self, t: &T, f: F) -> R - where - F: FnOnce() -> R, - { - struct Reset { - key: &'static LocalKey>, - val: *const (), - } - - impl Drop for Reset { - fn drop(&mut self) { - self.key.with(|c| c.set(self.val)); - } - } - - let prev = self.inner.with(|c| { - let prev = c.get(); - c.set(t as *const _ as *const ()); - prev - }); - - let _reset = Reset { - key: self.inner, - val: prev, - }; - - f() - } - - /// Gets a value out of this scoped variable. - pub(crate) fn with(&'static self, f: F) -> R - where - F: FnOnce(Option<&T>) -> R, - { - let val = self.inner.with(|c| c.get()); - - if val.is_null() { - f(None) - } else { - unsafe { f(Some(&*(val as *const T))) } - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/select.rs b/third_party/rust/tokio-0.2.25/src/macros/select.rs deleted file mode 100644 index 6497a510e989..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/select.rs +++ /dev/null @@ -1,880 +0,0 @@ -/// Wait on multiple concurrent branches, returning when the **first** branch -/// completes, cancelling the remaining branches. -/// -/// The `select!` macro must be used inside of async functions, closures, and -/// blocks. -/// -/// The `select!` macro accepts one or more branches with the following pattern: -/// -/// ```text -/// = (, if )? => , -/// ``` -/// -/// Additionally, the `select!` macro may include a single, optional `else` -/// branch, which evaluates if none of the other branches match their patterns: -/// -/// ```text -/// else -/// ``` -/// -/// The macro aggregates all `` expressions and runs them -/// concurrently on the **current** task. Once the **first** expression -/// completes with a value that matches its ``, the `select!` macro -/// returns the result of evaluating the completed branch's `` -/// expression. -/// -/// Additionally, each branch may include an optional `if` precondition. This -/// precondition is evaluated **before** the ``. If the -/// precondition returns `false`, the branch is entirely disabled. This -/// capability is useful when using `select!` within a loop. -/// -/// The complete lifecycle of a `select!` expression is as follows: -/// -/// 1. Evaluate all provided `` expressions. If the precondition -/// returns `false`, disable the branch for the remainder of the current call -/// to `select!`. Re-entering `select!` due to a loop clears the "disabled" -/// state. -/// 2. Aggregate the ``s from each branch, including the -/// disabled ones. If the branch is disabled, `` is still -/// evaluated, but the resulting future is not polled. -/// 3. Concurrently await on the results for all remaining ``s. -/// 4. Once an `` returns a value, attempt to apply the value -/// to the provided ``, if the pattern matches, evaluate `` -/// and return. If the pattern **does not** match, disable the current branch -/// and for the remainder of the current call to `select!`. Continue from step 3. -/// 5. If **all** branches are disabled, evaluate the `else` expression. If none -/// is provided, panic. -/// -/// # Notes -/// -/// ### Runtime characteristics -/// -/// By running all async expressions on the current task, the expressions are -/// able to run **concurrently** but not in **parallel**. This means all -/// expressions are run on the same thread and if one branch blocks the thread, -/// all other expressions will be unable to continue. If parallelism is -/// required, spawn each async expression using [`tokio::spawn`] and pass the -/// join handle to `select!`. -/// -/// [`tokio::spawn`]: crate::spawn -/// -/// ### Avoid racy `if` preconditions -/// -/// Given that `if` preconditions are used to disable `select!` branches, some -/// caution must be used to avoid missing values. -/// -/// For example, here is **incorrect** usage of `delay` with `if`. The objective -/// is to repeatedly run an asynchronous task for up to 50 milliseconds. -/// However, there is a potential for the `delay` completion to be missed. -/// -/// ```no_run -/// use tokio::time::{self, Duration}; -/// -/// async fn some_async_work() { -/// // do work -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut delay = time::delay_for(Duration::from_millis(50)); -/// -/// while !delay.is_elapsed() { -/// tokio::select! { -/// _ = &mut delay, if !delay.is_elapsed() => { -/// println!("operation timed out"); -/// } -/// _ = some_async_work() => { -/// println!("operation completed"); -/// } -/// } -/// } -/// } -/// ``` -/// -/// In the above example, `delay.is_elapsed()` may return `true` even if -/// `delay.poll()` never returned `Ready`. This opens up a potential race -/// condition where `delay` expires between the `while !delay.is_elapsed()` -/// check and the call to `select!` resulting in the `some_async_work()` call to -/// run uninterrupted despite the delay having elapsed. -/// -/// One way to write the above example without the race would be: -/// -/// ``` -/// use tokio::time::{self, Duration}; -/// -/// async fn some_async_work() { -/// # time::delay_for(Duration::from_millis(10)).await; -/// // do work -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut delay = time::delay_for(Duration::from_millis(50)); -/// -/// loop { -/// tokio::select! { -/// _ = &mut delay => { -/// println!("operation timed out"); -/// break; -/// } -/// _ = some_async_work() => { -/// println!("operation completed"); -/// } -/// } -/// } -/// } -/// ``` -/// -/// ### Fairness -/// -/// `select!` randomly picks a branch to check first. This provides some level -/// of fairness when calling `select!` in a loop with branches that are always -/// ready. -/// -/// # Panics -/// -/// `select!` panics if all branches are disabled **and** there is no provided -/// `else` branch. A branch is disabled when the provided `if` precondition -/// returns `false` **or** when the pattern does not match the result of `. -/// -/// # Examples -/// -/// Basic select with two branches. -/// -/// ``` -/// async fn do_stuff_async() { -/// // async work -/// } -/// -/// async fn more_async_work() { -/// // more here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// tokio::select! { -/// _ = do_stuff_async() => { -/// println!("do_stuff_async() completed first") -/// } -/// _ = more_async_work() => { -/// println!("more_async_work() completed first") -/// } -/// }; -/// } -/// ``` -/// -/// Basic stream selecting. -/// -/// ``` -/// use tokio::stream::{self, StreamExt}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream1 = stream::iter(vec![1, 2, 3]); -/// let mut stream2 = stream::iter(vec![4, 5, 6]); -/// -/// let next = tokio::select! { -/// v = stream1.next() => v.unwrap(), -/// v = stream2.next() => v.unwrap(), -/// }; -/// -/// assert!(next == 1 || next == 4); -/// } -/// ``` -/// -/// Collect the contents of two streams. In this example, we rely on pattern -/// matching and the fact that `stream::iter` is "fused", i.e. once the stream -/// is complete, all calls to `next()` return `None`. -/// -/// ``` -/// use tokio::stream::{self, StreamExt}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream1 = stream::iter(vec![1, 2, 3]); -/// let mut stream2 = stream::iter(vec![4, 5, 6]); -/// -/// let mut values = vec![]; -/// -/// loop { -/// tokio::select! { -/// Some(v) = stream1.next() => values.push(v), -/// Some(v) = stream2.next() => values.push(v), -/// else => break, -/// } -/// } -/// -/// values.sort(); -/// assert_eq!(&[1, 2, 3, 4, 5, 6], &values[..]); -/// } -/// ``` -/// -/// Using the same future in multiple `select!` expressions can be done by passing -/// a reference to the future. Doing so requires the future to be [`Unpin`]. A -/// future can be made [`Unpin`] by either using [`Box::pin`] or stack pinning. -/// -/// [`Unpin`]: std::marker::Unpin -/// [`Box::pin`]: std::boxed::Box::pin -/// -/// Here, a stream is consumed for at most 1 second. -/// -/// ``` -/// use tokio::stream::{self, StreamExt}; -/// use tokio::time::{self, Duration}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream = stream::iter(vec![1, 2, 3]); -/// let mut delay = time::delay_for(Duration::from_secs(1)); -/// -/// loop { -/// tokio::select! { -/// maybe_v = stream.next() => { -/// if let Some(v) = maybe_v { -/// println!("got = {}", v); -/// } else { -/// break; -/// } -/// } -/// _ = &mut delay => { -/// println!("timeout"); -/// break; -/// } -/// } -/// } -/// } -/// ``` -/// -/// Joining two values using `select!`. -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx1, mut rx1) = oneshot::channel(); -/// let (tx2, mut rx2) = oneshot::channel(); -/// -/// tokio::spawn(async move { -/// tx1.send("first").unwrap(); -/// }); -/// -/// tokio::spawn(async move { -/// tx2.send("second").unwrap(); -/// }); -/// -/// let mut a = None; -/// let mut b = None; -/// -/// while a.is_none() || b.is_none() { -/// tokio::select! { -/// v1 = (&mut rx1), if a.is_none() => a = Some(v1.unwrap()), -/// v2 = (&mut rx2), if b.is_none() => b = Some(v2.unwrap()), -/// } -/// } -/// -/// let res = (a.unwrap(), b.unwrap()); -/// -/// assert_eq!(res.0, "first"); -/// assert_eq!(res.1, "second"); -/// } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] -macro_rules! select { - // Uses a declarative macro to do **most** of the work. While it is possible - // to implement fully with a declarative macro, a procedural macro is used - // to enable improved error messages. - // - // The macro is structured as a tt-muncher. All branches are processed and - // normalized. Once the input is normalized, it is passed to the top-most - // rule. When entering the macro, `@{ }` is inserted at the front. This is - // used to collect the normalized input. - // - // The macro only recurses once per branch. This allows using `select!` - // without requiring the user to increase the recursion limit. - - // All input is normalized, now transform. - (@ { - // One `_` for each branch in the `select!` macro. Passing this to - // `count!` converts $skip to an integer. - ( $($count:tt)* ) - - // Normalized select branches. `( $skip )` is a set of `_` characters. - // There is one `_` for each select branch **before** this one. Given - // that all input futures are stored in a tuple, $skip is useful for - // generating a pattern to reference the future for the current branch. - // $skip is also used as an argument to `count!`, returning the index of - // the current select branch. - $( ( $($skip:tt)* ) $bind:pat = $fut:expr, if $c:expr => $handle:expr, )+ - - // Fallback expression used when all select branches have been disabled. - ; $else:expr - - }) => {{ - // Enter a context where stable "function-like" proc macros can be used. - // - // This module is defined within a scope and should not leak out of this - // macro. - mod util { - // Generate an enum with one variant per select branch - $crate::select_priv_declare_output_enum!( ( $($count)* ) ); - } - - // `tokio::macros::support` is a public, but doc(hidden) module - // including a re-export of all types needed by this macro. - use $crate::macros::support::Future; - use $crate::macros::support::Pin; - use $crate::macros::support::Poll::{Ready, Pending}; - - const BRANCHES: u32 = $crate::count!( $($count)* ); - - let mut disabled: util::Mask = Default::default(); - - // First, invoke all the pre-conditions. For any that return true, - // set the appropriate bit in `disabled`. - $( - if !$c { - let mask = 1 << $crate::count!( $($skip)* ); - disabled |= mask; - } - )* - - // Create a scope to separate polling from handling the output. This - // adds borrow checker flexibility when using the macro. - let mut output = { - // Safety: Nothing must be moved out of `futures`. This is to - // satisfy the requirement of `Pin::new_unchecked` called below. - let mut futures = ( $( $fut , )+ ); - - $crate::macros::support::poll_fn(|cx| { - // Track if any branch returns pending. If no branch completes - // **or** returns pending, this implies that all branches are - // disabled. - let mut is_pending = false; - - // Randomly generate a starting point. This makes `select!` a - // bit more fair and avoids always polling the first future. - let start = $crate::macros::support::thread_rng_n(BRANCHES); - - for i in 0..BRANCHES { - let branch; - #[allow(clippy::modulo_one)] - { - branch = (start + i) % BRANCHES; - } - match branch { - $( - #[allow(unreachable_code)] - $crate::count!( $($skip)* ) => { - // First, if the future has previously been - // disabled, do not poll it again. This is done - // by checking the associated bit in the - // `disabled` bit field. - let mask = 1 << branch; - - if disabled & mask == mask { - // The future has been disabled. - continue; - } - - // Extract the future for this branch from the - // tuple - let ( $($skip,)* fut, .. ) = &mut futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - // Try polling it - let out = match fut.poll(cx) { - Ready(out) => out, - Pending => { - // Track that at least one future is - // still pending and continue polling. - is_pending = true; - continue; - } - }; - - // Disable the future from future polling. - disabled |= mask; - - // The future returned a value, check if matches - // the specified pattern. - #[allow(unused_variables)] - match &out { - $bind => {} - _ => continue, - } - - // The select is complete, return the value - return Ready($crate::select_variant!(util::Out, ($($skip)*))(out)); - } - )* - _ => unreachable!("reaching this means there probably is an off by one bug"), - } - } - - if is_pending { - Pending - } else { - // All branches have been disabled. - Ready(util::Out::Disabled) - } - }).await - }; - - match output { - $( - $crate::select_variant!(util::Out, ($($skip)*) ($bind)) => $handle, - )* - util::Out::Disabled => $else, - _ => unreachable!("failed to match bind"), - } - }}; - - // ==== Normalize ===== - - // These rules match a single `select!` branch and normalize it for - // processing by the first rule. - - (@ { $($t:tt)* } ) => { - // No `else` branch - $crate::select!(@{ $($t)*; unreachable!() }) - }; - (@ { $($t:tt)* } else => $else:expr $(,)?) => { - $crate::select!(@{ $($t)*; $else }) - }; - (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block, $($r:tt)* ) => { - $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) - }; - (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block, $($r:tt)* ) => { - $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) - }; - (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block $($r:tt)* ) => { - $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) - }; - (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block $($r:tt)* ) => { - $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) - }; - (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr ) => { - $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, }) - }; - (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr ) => { - $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, }) - }; - (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr, $($r:tt)* ) => { - $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) - }; - (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr, $($r:tt)* ) => { - $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) - }; - - // ===== Entry point ===== - - ( $p:pat = $($t:tt)* ) => { - $crate::select!(@{ () } $p = $($t)*) - }; - () => { - compile_error!("select! requires at least one branch.") - }; -} - -// And here... we manually list out matches for up to 64 branches... I'm not -// happy about it either, but this is how we manage to use a declarative macro! - -#[macro_export] -#[doc(hidden)] -macro_rules! count { - () => { - 0 - }; - (_) => { - 1 - }; - (_ _) => { - 2 - }; - (_ _ _) => { - 3 - }; - (_ _ _ _) => { - 4 - }; - (_ _ _ _ _) => { - 5 - }; - (_ _ _ _ _ _) => { - 6 - }; - (_ _ _ _ _ _ _) => { - 7 - }; - (_ _ _ _ _ _ _ _) => { - 8 - }; - (_ _ _ _ _ _ _ _ _) => { - 9 - }; - (_ _ _ _ _ _ _ _ _ _) => { - 10 - }; - (_ _ _ _ _ _ _ _ _ _ _) => { - 11 - }; - (_ _ _ _ _ _ _ _ _ _ _ _) => { - 12 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _) => { - 13 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 14 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 15 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 16 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 17 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 18 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 19 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 20 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 21 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 22 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 23 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 24 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 25 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 26 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 27 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 28 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 29 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 30 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 31 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 32 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 33 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 34 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 35 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 36 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 37 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 38 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 39 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 40 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 41 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 42 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 43 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 44 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 45 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 46 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 47 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 48 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 49 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 50 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 51 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 52 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 53 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 54 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 55 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 56 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 57 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 58 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 59 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 60 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 61 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 62 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 63 - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! select_variant { - ($($p:ident)::*, () $($t:tt)*) => { - $($p)::*::_0 $($t)* - }; - ($($p:ident)::*, (_) $($t:tt)*) => { - $($p)::*::_1 $($t)* - }; - ($($p:ident)::*, (_ _) $($t:tt)*) => { - $($p)::*::_2 $($t)* - }; - ($($p:ident)::*, (_ _ _) $($t:tt)*) => { - $($p)::*::_3 $($t)* - }; - ($($p:ident)::*, (_ _ _ _) $($t:tt)*) => { - $($p)::*::_4 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _) $($t:tt)*) => { - $($p)::*::_5 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_6 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_7 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_8 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_9 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_10 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_11 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_12 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_13 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_14 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_15 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_16 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_17 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_18 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_19 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_20 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_21 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_22 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_23 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_24 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_25 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_26 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_27 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_28 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_29 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_30 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_31 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_32 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_33 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_34 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_35 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_36 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_37 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_38 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_39 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_40 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_41 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_42 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_43 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_44 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_45 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_46 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_47 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_48 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_49 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_50 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_51 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_52 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_53 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_54 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_55 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_56 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_57 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_58 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_59 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_60 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_61 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_62 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_63 $($t)* - }; -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/support.rs b/third_party/rust/tokio-0.2.25/src/macros/support.rs deleted file mode 100644 index fc1cdfcfa001..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/support.rs +++ /dev/null @@ -1,8 +0,0 @@ -cfg_macros! { - pub use crate::future::{maybe_done, poll_fn}; - pub use crate::util::thread_rng_n; -} - -pub use std::future::Future; -pub use std::pin::Pin; -pub use std::task::Poll; diff --git a/third_party/rust/tokio-0.2.25/src/macros/thread_local.rs b/third_party/rust/tokio-0.2.25/src/macros/thread_local.rs deleted file mode 100644 index d848947350dd..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/thread_local.rs +++ /dev/null @@ -1,4 +0,0 @@ -#[cfg(all(loom, test))] -macro_rules! thread_local { - ($($tts:tt)+) => { loom::thread_local!{ $($tts)+ } } -} diff --git a/third_party/rust/tokio-0.2.25/src/macros/try_join.rs b/third_party/rust/tokio-0.2.25/src/macros/try_join.rs deleted file mode 100644 index fa5850ef0e18..000000000000 --- a/third_party/rust/tokio-0.2.25/src/macros/try_join.rs +++ /dev/null @@ -1,132 +0,0 @@ -/// Wait on multiple concurrent branches, returning when **all** branches -/// complete with `Ok(_)` or on the first `Err(_)`. -/// -/// The `try_join!` macro must be used inside of async functions, closures, and -/// blocks. -/// -/// Similar to [`join!`], the `try_join!` macro takes a list of async -/// expressions and evaluates them concurrently on the same task. Each async -/// expression evaluates to a future and the futures from each expression are -/// multiplexed on the current task. The `try_join!` macro returns when **all** -/// branches return with `Ok` or when the **first** branch returns with `Err`. -/// -/// [`join!`]: macro@join -/// -/// # Notes -/// -/// The supplied futures are stored inline and does not require allocating a -/// `Vec`. -/// -/// ### Runtime characteristics -/// -/// By running all async expressions on the current task, the expressions are -/// able to run **concurrently** but not in **parallel**. This means all -/// expressions are run on the same thread and if one branch blocks the thread, -/// all other expressions will be unable to continue. If parallelism is -/// required, spawn each async expression using [`tokio::spawn`] and pass the -/// join handle to `try_join!`. -/// -/// [`tokio::spawn`]: crate::spawn -/// -/// # Examples -/// -/// Basic try_join with two branches. -/// -/// ``` -/// async fn do_stuff_async() -> Result<(), &'static str> { -/// // async work -/// # Ok(()) -/// } -/// -/// async fn more_async_work() -> Result<(), &'static str> { -/// // more here -/// # Ok(()) -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let res = tokio::try_join!( -/// do_stuff_async(), -/// more_async_work()); -/// -/// match res { -/// Ok((first, second)) => { -/// // do something with the values -/// } -/// Err(err) => { -/// println!("processing failed; error = {}", err); -/// } -/// } -/// } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] -macro_rules! try_join { - (@ { - // One `_` for each branch in the `try_join!` macro. This is not used once - // normalization is complete. - ( $($count:tt)* ) - - // Normalized try_join! branches - $( ( $($skip:tt)* ) $e:expr, )* - - }) => {{ - use $crate::macros::support::{maybe_done, poll_fn, Future, Pin}; - use $crate::macros::support::Poll::{Ready, Pending}; - - // Safety: nothing must be moved out of `futures`. This is to satisfy - // the requirement of `Pin::new_unchecked` called below. - let mut futures = ( $( maybe_done($e), )* ); - - poll_fn(move |cx| { - let mut is_pending = false; - - $( - // Extract the future for this branch from the tuple. - let ( $($skip,)* fut, .. ) = &mut futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - // Try polling - if fut.as_mut().poll(cx).is_pending() { - is_pending = true; - } else if fut.as_mut().output_mut().expect("expected completed future").is_err() { - return Ready(Err(fut.take_output().expect("expected completed future").err().unwrap())) - } - )* - - if is_pending { - Pending - } else { - Ready(Ok(($({ - // Extract the future for this branch from the tuple. - let ( $($skip,)* fut, .. ) = &mut futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - fut - .take_output() - .expect("expected completed future") - .ok() - .expect("expected Ok(_)") - },)*))) - } - }).await - }}; - - // ===== Normalize ===== - - (@ { ( $($s:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => { - $crate::try_join!(@{ ($($s)* _) $($t)* ($($s)*) $e, } $($r)*) - }; - - // ===== Entry point ===== - - ( $($e:expr),* $(,)?) => { - $crate::try_join!(@{ () } $($e,)*) - }; -} diff --git a/third_party/rust/tokio-0.2.25/src/net/addr.rs b/third_party/rust/tokio-0.2.25/src/net/addr.rs deleted file mode 100644 index ed00ca4a7c60..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/addr.rs +++ /dev/null @@ -1,308 +0,0 @@ -use crate::future; - -use std::io; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; - -/// Converts or resolves without blocking to one or more `SocketAddr` values. -/// -/// # DNS -/// -/// Implementations of `ToSocketAddrs` for string types require a DNS lookup. -/// These implementations are only provided when Tokio is used with the -/// **`dns`** feature flag. -/// -/// # Calling -/// -/// Currently, this trait is only used as an argument to Tokio functions that -/// need to reference a target socket address. To perform a `SocketAddr` -/// conversion directly, use [`lookup_host()`](super::lookup_host()). -/// -/// This trait is sealed and is intended to be opaque. The details of the trait -/// will change. Stabilization is pending enhancements to the Rust language. -pub trait ToSocketAddrs: sealed::ToSocketAddrsPriv {} - -type ReadyFuture = future::Ready>; - -// ===== impl &impl ToSocketAddrs ===== - -impl ToSocketAddrs for &T {} - -impl sealed::ToSocketAddrsPriv for &T -where - T: sealed::ToSocketAddrsPriv + ?Sized, -{ - type Iter = T::Iter; - type Future = T::Future; - - fn to_socket_addrs(&self) -> Self::Future { - (**self).to_socket_addrs() - } -} - -// ===== impl SocketAddr ===== - -impl ToSocketAddrs for SocketAddr {} - -impl sealed::ToSocketAddrsPriv for SocketAddr { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self) -> Self::Future { - let iter = Some(*self).into_iter(); - future::ok(iter) - } -} - -// ===== impl SocketAddrV4 ===== - -impl ToSocketAddrs for SocketAddrV4 {} - -impl sealed::ToSocketAddrsPriv for SocketAddrV4 { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self) -> Self::Future { - SocketAddr::V4(*self).to_socket_addrs() - } -} - -// ===== impl SocketAddrV6 ===== - -impl ToSocketAddrs for SocketAddrV6 {} - -impl sealed::ToSocketAddrsPriv for SocketAddrV6 { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self) -> Self::Future { - SocketAddr::V6(*self).to_socket_addrs() - } -} - -// ===== impl (IpAddr, u16) ===== - -impl ToSocketAddrs for (IpAddr, u16) {} - -impl sealed::ToSocketAddrsPriv for (IpAddr, u16) { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self) -> Self::Future { - let iter = Some(SocketAddr::from(*self)).into_iter(); - future::ok(iter) - } -} - -// ===== impl (Ipv4Addr, u16) ===== - -impl ToSocketAddrs for (Ipv4Addr, u16) {} - -impl sealed::ToSocketAddrsPriv for (Ipv4Addr, u16) { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self) -> Self::Future { - let (ip, port) = *self; - SocketAddrV4::new(ip, port).to_socket_addrs() - } -} - -// ===== impl (Ipv6Addr, u16) ===== - -impl ToSocketAddrs for (Ipv6Addr, u16) {} - -impl sealed::ToSocketAddrsPriv for (Ipv6Addr, u16) { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self) -> Self::Future { - let (ip, port) = *self; - SocketAddrV6::new(ip, port, 0, 0).to_socket_addrs() - } -} - -// ===== impl &[SocketAddr] ===== - -impl ToSocketAddrs for &[SocketAddr] {} - -impl sealed::ToSocketAddrsPriv for &[SocketAddr] { - type Iter = std::vec::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self) -> Self::Future { - let iter = self.to_vec().into_iter(); - future::ok(iter) - } -} - -cfg_dns! { - // ===== impl str ===== - - impl ToSocketAddrs for str {} - - impl sealed::ToSocketAddrsPriv for str { - type Iter = sealed::OneOrMore; - type Future = sealed::MaybeReady; - - fn to_socket_addrs(&self) -> Self::Future { - use crate::runtime::spawn_blocking; - use sealed::MaybeReady; - - // First check if the input parses as a socket address - let res: Result = self.parse(); - - if let Ok(addr) = res { - return MaybeReady::Ready(Some(addr)); - } - - // Run DNS lookup on the blocking pool - let s = self.to_owned(); - - MaybeReady::Blocking(spawn_blocking(move || { - std::net::ToSocketAddrs::to_socket_addrs(&s) - })) - } - } - - // ===== impl (&str, u16) ===== - - impl ToSocketAddrs for (&str, u16) {} - - impl sealed::ToSocketAddrsPriv for (&str, u16) { - type Iter = sealed::OneOrMore; - type Future = sealed::MaybeReady; - - fn to_socket_addrs(&self) -> Self::Future { - use crate::runtime::spawn_blocking; - use sealed::MaybeReady; - - let (host, port) = *self; - - // try to parse the host as a regular IP address first - if let Ok(addr) = host.parse::() { - let addr = SocketAddrV4::new(addr, port); - let addr = SocketAddr::V4(addr); - - return MaybeReady::Ready(Some(addr)); - } - - if let Ok(addr) = host.parse::() { - let addr = SocketAddrV6::new(addr, port, 0, 0); - let addr = SocketAddr::V6(addr); - - return MaybeReady::Ready(Some(addr)); - } - - let host = host.to_owned(); - - MaybeReady::Blocking(spawn_blocking(move || { - std::net::ToSocketAddrs::to_socket_addrs(&(&host[..], port)) - })) - } - } - - // ===== impl (String, u16) ===== - - impl ToSocketAddrs for (String, u16) {} - - impl sealed::ToSocketAddrsPriv for (String, u16) { - type Iter = sealed::OneOrMore; - type Future = sealed::MaybeReady; - - fn to_socket_addrs(&self) -> Self::Future { - (self.0.as_str(), self.1).to_socket_addrs() - } - } - - // ===== impl String ===== - - impl ToSocketAddrs for String {} - - impl sealed::ToSocketAddrsPriv for String { - type Iter = ::Iter; - type Future = ::Future; - - fn to_socket_addrs(&self) -> Self::Future { - (&self[..]).to_socket_addrs() - } - } -} - -pub(crate) mod sealed { - //! The contents of this trait are intended to remain private and __not__ - //! part of the `ToSocketAddrs` public API. The details will change over - //! time. - - use std::future::Future; - use std::io; - use std::net::SocketAddr; - - cfg_dns! { - use crate::task::JoinHandle; - - use std::option; - use std::pin::Pin; - use std::task::{Context, Poll}; - use std::vec; - } - - #[doc(hidden)] - pub trait ToSocketAddrsPriv { - type Iter: Iterator + Send + 'static; - type Future: Future> + Send + 'static; - - fn to_socket_addrs(&self) -> Self::Future; - } - - cfg_dns! { - #[doc(hidden)] - #[derive(Debug)] - pub enum MaybeReady { - Ready(Option), - Blocking(JoinHandle>>), - } - - #[doc(hidden)] - #[derive(Debug)] - pub enum OneOrMore { - One(option::IntoIter), - More(vec::IntoIter), - } - - impl Future for MaybeReady { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match *self { - MaybeReady::Ready(ref mut i) => { - let iter = OneOrMore::One(i.take().into_iter()); - Poll::Ready(Ok(iter)) - } - MaybeReady::Blocking(ref mut rx) => { - let res = ready!(Pin::new(rx).poll(cx))?.map(OneOrMore::More); - - Poll::Ready(res) - } - } - } - } - - impl Iterator for OneOrMore { - type Item = SocketAddr; - - fn next(&mut self) -> Option { - match self { - OneOrMore::One(i) => i.next(), - OneOrMore::More(i) => i.next(), - } - } - - fn size_hint(&self) -> (usize, Option) { - match self { - OneOrMore::One(i) => i.size_hint(), - OneOrMore::More(i) => i.size_hint(), - } - } - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/lookup_host.rs b/third_party/rust/tokio-0.2.25/src/net/lookup_host.rs deleted file mode 100644 index 3098b463e38d..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/lookup_host.rs +++ /dev/null @@ -1,38 +0,0 @@ -cfg_dns! { - use crate::net::addr::ToSocketAddrs; - - use std::io; - use std::net::SocketAddr; - - /// Performs a DNS resolution. - /// - /// The returned iterator may not actually yield any values depending on the - /// outcome of any resolution performed. - /// - /// This API is not intended to cover all DNS use cases. Anything beyond the - /// basic use case should be done with a specialized library. - /// - /// # Examples - /// - /// To resolve a DNS entry: - /// - /// ```no_run - /// use tokio::net; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// for addr in net::lookup_host("localhost:3000").await? { - /// println!("socket address is {}", addr); - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn lookup_host(host: T) -> io::Result> - where - T: ToSocketAddrs - { - host.to_socket_addrs().await - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/mod.rs b/third_party/rust/tokio-0.2.25/src/net/mod.rs deleted file mode 100644 index da6ad1fc4a31..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/mod.rs +++ /dev/null @@ -1,49 +0,0 @@ -#![cfg(not(loom))] - -//! TCP/UDP/Unix bindings for `tokio`. -//! -//! This module contains the TCP/UDP/Unix networking types, similar to the standard -//! library, which can be used to implement networking protocols. -//! -//! # Organization -//! -//! * [`TcpListener`] and [`TcpStream`] provide functionality for communication over TCP -//! * [`UdpSocket`] provides functionality for communication over UDP -//! * [`UnixListener`] and [`UnixStream`] provide functionality for communication over a -//! Unix Domain Stream Socket **(available on Unix only)** -//! * [`UnixDatagram`] provides functionality for communication -//! over Unix Domain Datagram Socket **(available on Unix only)** - -//! -//! [`TcpListener`]: TcpListener -//! [`TcpStream`]: TcpStream -//! [`UdpSocket`]: UdpSocket -//! [`UnixListener`]: UnixListener -//! [`UnixStream`]: UnixStream -//! [`UnixDatagram`]: UnixDatagram - -mod addr; -pub use addr::ToSocketAddrs; - -cfg_dns! { - mod lookup_host; - pub use lookup_host::lookup_host; -} - -cfg_tcp! { - pub mod tcp; - pub use tcp::listener::TcpListener; - pub use tcp::stream::TcpStream; -} - -cfg_udp! { - pub mod udp; - pub use udp::socket::UdpSocket; -} - -cfg_uds! { - pub mod unix; - pub use unix::datagram::socket::UnixDatagram; - pub use unix::listener::UnixListener; - pub use unix::stream::UnixStream; -} diff --git a/third_party/rust/tokio-0.2.25/src/net/tcp/incoming.rs b/third_party/rust/tokio-0.2.25/src/net/tcp/incoming.rs deleted file mode 100644 index 062be1e9cf97..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/tcp/incoming.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::net::tcp::{TcpListener, TcpStream}; - -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Stream returned by the `TcpListener::incoming` function representing the -/// stream of sockets received from a listener. -#[must_use = "streams do nothing unless polled"] -#[derive(Debug)] -pub struct Incoming<'a> { - inner: &'a mut TcpListener, -} - -impl Incoming<'_> { - pub(crate) fn new(listener: &mut TcpListener) -> Incoming<'_> { - Incoming { inner: listener } - } - - /// Attempts to poll `TcpStream` by polling inner `TcpListener` to accept - /// connection. - /// - /// If `TcpListener` isn't ready yet, `Poll::Pending` is returned and - /// current task will be notified by a waker. - pub fn poll_accept( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - let (socket, _) = ready!(self.inner.poll_accept(cx))?; - Poll::Ready(Ok(socket)) - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for Incoming<'_> { - type Item = io::Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let (socket, _) = ready!(self.inner.poll_accept(cx))?; - Poll::Ready(Some(Ok(socket))) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/tcp/listener.rs b/third_party/rust/tokio-0.2.25/src/net/tcp/listener.rs deleted file mode 100644 index bb25db306bdd..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/tcp/listener.rs +++ /dev/null @@ -1,466 +0,0 @@ -use crate::future::poll_fn; -use crate::io::PollEvented; -use crate::net::tcp::{Incoming, TcpStream}; -use crate::net::ToSocketAddrs; - -use std::convert::TryFrom; -use std::fmt; -use std::io; -use std::net::{self, SocketAddr}; -use std::task::{Context, Poll}; - -cfg_tcp! { - /// A TCP socket server, listening for connections. - /// - /// You can accept a new connection by using the [`accept`](`TcpListener::accept`) method. Alternatively `TcpListener` - /// implements the [`Stream`](`crate::stream::Stream`) trait, which allows you to use the listener in places that want a - /// stream. The stream will never return `None` and will also not yield the peer's `SocketAddr` structure. Iterating over - /// it is equivalent to calling accept in a loop. - /// - /// # Errors - /// - /// Note that accepting a connection can lead to various errors and not all - /// of them are necessarily fatal ‒ for example having too many open file - /// descriptors or the other side closing the connection while it waits in - /// an accept queue. These would terminate the stream if not handled in any - /// way. - /// - /// # Examples - /// - /// Using `accept`: - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// async fn process_socket(socket: T) { - /// # drop(socket); - /// // do work with socket here - /// } - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?; - /// - /// loop { - /// let (socket, _) = listener.accept().await?; - /// process_socket(socket).await; - /// } - /// } - /// ``` - /// - /// Using `impl Stream`: - /// ```no_run - /// use tokio::{net::TcpListener, stream::StreamExt}; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut listener = TcpListener::bind("127.0.0.1:8080").await.unwrap(); - /// while let Some(stream) = listener.next().await { - /// match stream { - /// Ok(stream) => { - /// println!("new client!"); - /// } - /// Err(e) => { /* connection failed */ } - /// } - /// } - /// } - /// ``` - pub struct TcpListener { - io: PollEvented, - } -} - -impl TcpListener { - /// Creates a new TcpListener, which will be bound to the specified address. - /// - /// The returned listener is ready for accepting connections. - /// - /// Binding with a port number of 0 will request that the OS assigns a port - /// to this listener. The port allocated can be queried via the `local_addr` - /// method. - /// - /// The address type can be any implementor of the [`ToSocketAddrs`] trait. - /// Note that strings only implement this trait when the **`dns`** feature - /// is enabled, as strings may contain domain names that need to be resolved. - /// - /// If `addr` yields multiple addresses, bind will be attempted with each of - /// the addresses until one succeeds and returns the listener. If none of - /// the addresses succeed in creating a listener, the error returned from - /// the last attempt (the last address) is returned. - /// - /// This function sets the `SO_REUSEADDR` option on the socket. - /// - /// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:2345").await?; - /// - /// // use the listener - /// - /// # let _ = listener; - /// Ok(()) - /// } - /// ``` - /// - /// Without the `dns` feature: - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// use std::net::Ipv4Addr; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind((Ipv4Addr::new(127, 0, 0, 1), 2345)).await?; - /// - /// // use the listener - /// - /// # let _ = listener; - /// Ok(()) - /// } - /// ``` - pub async fn bind(addr: A) -> io::Result { - let addrs = addr.to_socket_addrs().await?; - - let mut last_err = None; - - for addr in addrs { - match TcpListener::bind_addr(addr) { - Ok(listener) => return Ok(listener), - Err(e) => last_err = Some(e), - } - } - - Err(last_err.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve to any address", - ) - })) - } - - fn bind_addr(addr: SocketAddr) -> io::Result { - let listener = mio::net::TcpListener::bind(&addr)?; - TcpListener::new(listener) - } - - /// Accepts a new incoming connection from this listener. - /// - /// This function will yield once a new TCP connection is established. When - /// established, the corresponding [`TcpStream`] and the remote peer's - /// address will be returned. - /// - /// [`TcpStream`]: struct@crate::net::TcpStream - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?; - /// - /// match listener.accept().await { - /// Ok((_socket, addr)) => println!("new client: {:?}", addr), - /// Err(e) => println!("couldn't get client: {:?}", e), - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { - poll_fn(|cx| self.poll_accept(cx)).await - } - - /// Polls to accept a new incoming connection to this listener. - /// - /// If there is no connection to accept, `Poll::Pending` is returned and - /// the current task will be notified by a waker. - pub fn poll_accept( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { - let (io, addr) = ready!(self.poll_accept_std(cx))?; - - let io = mio::net::TcpStream::from_stream(io)?; - let io = TcpStream::new(io)?; - - Poll::Ready(Ok((io, addr))) - } - - fn poll_accept_std( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - match self.io.get_ref().accept_std() { - Ok(pair) => Poll::Ready(Ok(pair)), - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - Err(e) => Poll::Ready(Err(e)), - } - } - - /// Creates a new TCP listener from the standard library's TCP listener. - /// - /// This method can be used when the `Handle::tcp_listen` method isn't - /// sufficient because perhaps some more configuration is needed in terms of - /// before the calls to `bind` and `listen`. - /// - /// This API is typically paired with the `net2` crate and the `TcpBuilder` - /// type to build up and customize a listener before it's shipped off to the - /// backing event loop. This allows configuration of options like - /// `SO_REUSEPORT`, binding to multiple addresses, etc. - /// - /// The `addr` argument here is one of the addresses that `listener` is - /// bound to and the listener will only be guaranteed to accept connections - /// of the same address type currently. - /// - /// The platform specific behavior of this function looks like: - /// - /// * On Unix, the socket is placed into nonblocking mode and connections - /// can be accepted as normal - /// - /// * On Windows, the address is stored internally and all future accepts - /// will only be for the same IP version as `addr` specified. That is, if - /// `addr` is an IPv4 address then all sockets accepted will be IPv4 as - /// well (same for IPv6). - /// - /// # Examples - /// - /// ```rust,no_run - /// use std::error::Error; - /// use tokio::net::TcpListener; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let std_listener = std::net::TcpListener::bind("127.0.0.1:0")?; - /// let listener = TcpListener::from_std(std_listener)?; - /// Ok(()) - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn from_std(listener: net::TcpListener) -> io::Result { - let io = mio::net::TcpListener::from_std(listener)?; - let io = PollEvented::new(io)?; - Ok(TcpListener { io }) - } - - fn new(listener: mio::net::TcpListener) -> io::Result { - let io = PollEvented::new(listener)?; - Ok(TcpListener { io }) - } - - /// Returns the local address that this listener is bound to. - /// - /// This can be useful, for example, when binding to port 0 to figure out - /// which port was actually bound. - /// - /// # Examples - /// - /// ```rust,no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:8080").await?; - /// - /// assert_eq!(listener.local_addr()?, - /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080))); - /// - /// Ok(()) - /// } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Returns a stream over the connections being received on this listener. - /// - /// Note that `TcpListener` also directly implements `Stream`. - /// - /// The returned stream will never return `None` and will also not yield the - /// peer's `SocketAddr` structure. Iterating over it is equivalent to - /// calling accept in a loop. - /// - /// # Errors - /// - /// Note that accepting a connection can lead to various errors and not all - /// of them are necessarily fatal ‒ for example having too many open file - /// descriptors or the other side closing the connection while it waits in - /// an accept queue. These would terminate the stream if not handled in any - /// way. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::{net::TcpListener, stream::StreamExt}; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut listener = TcpListener::bind("127.0.0.1:8080").await.unwrap(); - /// let mut incoming = listener.incoming(); - /// - /// while let Some(stream) = incoming.next().await { - /// match stream { - /// Ok(stream) => { - /// println!("new client!"); - /// } - /// Err(e) => { /* connection failed */ } - /// } - /// } - /// } - /// ``` - pub fn incoming(&mut self) -> Incoming<'_> { - Incoming::new(self) - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`]. - /// - /// [`set_ttl`]: method@Self::set_ttl - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:0").await?; - /// - /// listener.set_ttl(100).expect("could not set TTL"); - /// assert_eq!(listener.ttl()?, 100); - /// - /// Ok(()) - /// } - /// ``` - pub fn ttl(&self) -> io::Result { - self.io.get_ref().ttl() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:0").await?; - /// - /// listener.set_ttl(100).expect("could not set TTL"); - /// - /// Ok(()) - /// } - /// ``` - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.io.get_ref().set_ttl(ttl) - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for TcpListener { - type Item = io::Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - let (socket, _) = ready!(self.poll_accept(cx))?; - Poll::Ready(Some(Ok(socket))) - } -} - -impl TryFrom for mio::net::TcpListener { - type Error = io::Error; - - /// Consumes value, returning the mio I/O object. - /// - /// See [`PollEvented::into_inner`] for more details about - /// resource deregistration that happens during the call. - /// - /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner - fn try_from(value: TcpListener) -> Result { - value.io.into_inner() - } -} - -impl TryFrom for TcpListener { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`TcpListener::from_std(stream)`](TcpListener::from_std). - fn try_from(stream: net::TcpListener) -> Result { - Self::from_std(stream) - } -} - -impl fmt::Debug for TcpListener { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -#[cfg(unix)] -mod sys { - use super::TcpListener; - use std::os::unix::prelude::*; - - impl AsRawFd for TcpListener { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } - } -} - -#[cfg(windows)] -mod sys { - // TODO: let's land these upstream with mio and then we can add them here. - // - // use std::os::windows::prelude::*; - // use super::{TcpListener; - // - // impl AsRawHandle for TcpListener { - // fn as_raw_handle(&self) -> RawHandle { - // self.listener.io().as_raw_handle() - // } - // } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/tcp/mod.rs b/third_party/rust/tokio-0.2.25/src/net/tcp/mod.rs deleted file mode 100644 index 7ad36eb0b11c..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/tcp/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! TCP utility types - -pub(crate) mod listener; -pub(crate) use listener::TcpListener; - -mod incoming; -pub use incoming::Incoming; - -mod split; -pub use split::{ReadHalf, WriteHalf}; - -mod split_owned; -pub use split_owned::{OwnedReadHalf, OwnedWriteHalf, ReuniteError}; - -pub(crate) mod stream; -pub(crate) use stream::TcpStream; diff --git a/third_party/rust/tokio-0.2.25/src/net/tcp/split.rs b/third_party/rust/tokio-0.2.25/src/net/tcp/split.rs deleted file mode 100644 index 0c1e359f72de..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/tcp/split.rs +++ /dev/null @@ -1,186 +0,0 @@ -//! `TcpStream` split support. -//! -//! A `TcpStream` can be split into a `ReadHalf` and a -//! `WriteHalf` with the `TcpStream::split` method. `ReadHalf` -//! implements `AsyncRead` while `WriteHalf` implements `AsyncWrite`. -//! -//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized -//! split has no associated overhead and enforces all invariants at the type -//! level. - -use crate::future::poll_fn; -use crate::io::{AsyncRead, AsyncWrite}; -use crate::net::TcpStream; - -use bytes::Buf; -use std::io; -use std::mem::MaybeUninit; -use std::net::Shutdown; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Borrowed read half of a [`TcpStream`], created by [`split`]. -/// -/// Reading from a `ReadHalf` is usually done using the convenience methods found on the -/// [`AsyncReadExt`] trait. Examples import this trait through [the prelude]. -/// -/// [`TcpStream`]: TcpStream -/// [`split`]: TcpStream::split() -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -/// [the prelude]: crate::prelude -#[derive(Debug)] -pub struct ReadHalf<'a>(&'a TcpStream); - -/// Borrowed write half of a [`TcpStream`], created by [`split`]. -/// -/// Note that in the [`AsyncWrite`] implemenation of this type, [`poll_shutdown`] will -/// shut down the TCP stream in the write direction. -/// -/// Writing to an `WriteHalf` is usually done using the convenience methods found -/// on the [`AsyncWriteExt`] trait. Examples import this trait through [the prelude]. -/// -/// [`TcpStream`]: TcpStream -/// [`split`]: TcpStream::split() -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -/// [the prelude]: crate::prelude -#[derive(Debug)] -pub struct WriteHalf<'a>(&'a TcpStream); - -pub(crate) fn split(stream: &mut TcpStream) -> (ReadHalf<'_>, WriteHalf<'_>) { - (ReadHalf(&*stream), WriteHalf(&*stream)) -} - -impl ReadHalf<'_> { - /// Attempt to receive data on the socket, without removing that data from - /// the queue, registering the current task for wakeup if data is not yet - /// available. - /// - /// See the [`TcpStream::poll_peek`] level documenation for more details. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io; - /// use tokio::net::TcpStream; - /// - /// use futures::future::poll_fn; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stream = TcpStream::connect("127.0.0.1:8000").await?; - /// let (mut read_half, _) = stream.split(); - /// let mut buf = [0; 10]; - /// - /// poll_fn(|cx| { - /// read_half.poll_peek(cx, &mut buf) - /// }).await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// [`TcpStream::poll_peek`]: TcpStream::poll_peek - pub fn poll_peek(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll> { - self.0.poll_peek2(cx, buf) - } - - /// Receives data on the socket from the remote address to which it is - /// connected, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - /// - /// See the [`TcpStream::peek`] level documenation for more details. - /// - /// [`TcpStream::peek`]: TcpStream::peek - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::prelude::*; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; - /// let (mut read_half, _) = stream.split(); - /// - /// let mut b1 = [0; 10]; - /// let mut b2 = [0; 10]; - /// - /// // Peek at the data - /// let n = read_half.peek(&mut b1).await?; - /// - /// // Read the data - /// assert_eq!(n, read_half.read(&mut b2[..n]).await?); - /// assert_eq!(&b1[..n], &b2[..n]); - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`read`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read`]: fn@crate::io::AsyncReadExt::read - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result { - poll_fn(|cx| self.poll_peek(cx, buf)).await - } -} - -impl AsyncRead for ReadHalf<'_> { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.0.poll_read_priv(cx, buf) - } -} - -impl AsyncWrite for WriteHalf<'_> { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.0.poll_write_priv(cx, buf) - } - - fn poll_write_buf( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> { - self.0.poll_write_buf_priv(cx, buf) - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // tcp flush is a no-op - Poll::Ready(Ok(())) - } - - // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.0.shutdown(Shutdown::Write).into() - } -} - -impl AsRef for ReadHalf<'_> { - fn as_ref(&self) -> &TcpStream { - self.0 - } -} - -impl AsRef for WriteHalf<'_> { - fn as_ref(&self) -> &TcpStream { - self.0 - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/tcp/split_owned.rs b/third_party/rust/tokio-0.2.25/src/net/tcp/split_owned.rs deleted file mode 100644 index 6c2b9e6977ef..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/tcp/split_owned.rs +++ /dev/null @@ -1,272 +0,0 @@ -//! `TcpStream` owned split support. -//! -//! A `TcpStream` can be split into an `OwnedReadHalf` and a `OwnedWriteHalf` -//! with the `TcpStream::into_split` method. `OwnedReadHalf` implements -//! `AsyncRead` while `OwnedWriteHalf` implements `AsyncWrite`. -//! -//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized -//! split has no associated overhead and enforces all invariants at the type -//! level. - -use crate::future::poll_fn; -use crate::io::{AsyncRead, AsyncWrite}; -use crate::net::TcpStream; - -use bytes::Buf; -use std::error::Error; -use std::mem::MaybeUninit; -use std::net::Shutdown; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::{fmt, io}; - -/// Owned read half of a [`TcpStream`], created by [`into_split`]. -/// -/// Reading from an `OwnedReadHalf` is usually done using the convenience methods found -/// on the [`AsyncReadExt`] trait. Examples import this trait through [the prelude]. -/// -/// [`TcpStream`]: TcpStream -/// [`into_split`]: TcpStream::into_split() -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -/// [the prelude]: crate::prelude -#[derive(Debug)] -pub struct OwnedReadHalf { - inner: Arc, -} - -/// Owned write half of a [`TcpStream`], created by [`into_split`]. -/// -/// Note that in the [`AsyncWrite`] implementation of this type, [`poll_shutdown`] will -/// shut down the TCP stream in the write direction. Dropping the write half -/// will also shut down the write half of the TCP stream. -/// -/// Writing to an `OwnedWriteHalf` is usually done using the convenience methods found -/// on the [`AsyncWriteExt`] trait. Examples import this trait through [the prelude]. -/// -/// [`TcpStream`]: TcpStream -/// [`into_split`]: TcpStream::into_split() -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -/// [the prelude]: crate::prelude -#[derive(Debug)] -pub struct OwnedWriteHalf { - inner: Arc, - shutdown_on_drop: bool, -} - -pub(crate) fn split_owned(stream: TcpStream) -> (OwnedReadHalf, OwnedWriteHalf) { - let arc = Arc::new(stream); - let read = OwnedReadHalf { - inner: Arc::clone(&arc), - }; - let write = OwnedWriteHalf { - inner: arc, - shutdown_on_drop: true, - }; - (read, write) -} - -pub(crate) fn reunite( - read: OwnedReadHalf, - write: OwnedWriteHalf, -) -> Result { - if Arc::ptr_eq(&read.inner, &write.inner) { - write.forget(); - // This unwrap cannot fail as the api does not allow creating more than two Arcs, - // and we just dropped the other half. - Ok(Arc::try_unwrap(read.inner).expect("TcpStream: try_unwrap failed in reunite")) - } else { - Err(ReuniteError(read, write)) - } -} - -/// Error indicating that two halves were not from the same socket, and thus could -/// not be reunited. -#[derive(Debug)] -pub struct ReuniteError(pub OwnedReadHalf, pub OwnedWriteHalf); - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "tried to reunite halves that are not from the same socket" - ) - } -} - -impl Error for ReuniteError {} - -impl OwnedReadHalf { - /// Attempts to put the two halves of a `TcpStream` back together and - /// recover the original socket. Succeeds only if the two halves - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: TcpStream::into_split() - pub fn reunite(self, other: OwnedWriteHalf) -> Result { - reunite(self, other) - } - - /// Attempt to receive data on the socket, without removing that data from - /// the queue, registering the current task for wakeup if data is not yet - /// available. - /// - /// See the [`TcpStream::poll_peek`] level documenation for more details. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io; - /// use tokio::net::TcpStream; - /// - /// use futures::future::poll_fn; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let stream = TcpStream::connect("127.0.0.1:8000").await?; - /// let (mut read_half, _) = stream.into_split(); - /// let mut buf = [0; 10]; - /// - /// poll_fn(|cx| { - /// read_half.poll_peek(cx, &mut buf) - /// }).await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// [`TcpStream::poll_peek`]: TcpStream::poll_peek - pub fn poll_peek(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll> { - self.inner.poll_peek2(cx, buf) - } - - /// Receives data on the socket from the remote address to which it is - /// connected, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - /// - /// See the [`TcpStream::peek`] level documenation for more details. - /// - /// [`TcpStream::peek`]: TcpStream::peek - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::prelude::*; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// let (mut read_half, _) = stream.into_split(); - /// - /// let mut b1 = [0; 10]; - /// let mut b2 = [0; 10]; - /// - /// // Peek at the data - /// let n = read_half.peek(&mut b1).await?; - /// - /// // Read the data - /// assert_eq!(n, read_half.read(&mut b2[..n]).await?); - /// assert_eq!(&b1[..n], &b2[..n]); - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`read`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read`]: fn@crate::io::AsyncReadExt::read - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result { - poll_fn(|cx| self.poll_peek(cx, buf)).await - } -} - -impl AsyncRead for OwnedReadHalf { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.inner.poll_read_priv(cx, buf) - } -} - -impl OwnedWriteHalf { - /// Attempts to put the two halves of a `TcpStream` back together and - /// recover the original socket. Succeeds only if the two halves - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: TcpStream::into_split() - pub fn reunite(self, other: OwnedReadHalf) -> Result { - reunite(other, self) - } - - /// Destroy the write half, but don't close the write half of the stream - /// until the read half is dropped. If the read half has already been - /// dropped, this closes the stream. - pub fn forget(mut self) { - self.shutdown_on_drop = false; - drop(self); - } -} - -impl Drop for OwnedWriteHalf { - fn drop(&mut self) { - if self.shutdown_on_drop { - let _ = self.inner.shutdown(Shutdown::Write); - } - } -} - -impl AsyncWrite for OwnedWriteHalf { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.inner.poll_write_priv(cx, buf) - } - - fn poll_write_buf( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> { - self.inner.poll_write_buf_priv(cx, buf) - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // tcp flush is a no-op - Poll::Ready(Ok(())) - } - - // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - let res = self.inner.shutdown(Shutdown::Write); - if res.is_ok() { - Pin::into_inner(self).shutdown_on_drop = false; - } - res.into() - } -} - -impl AsRef for OwnedReadHalf { - fn as_ref(&self) -> &TcpStream { - &*self.inner - } -} - -impl AsRef for OwnedWriteHalf { - fn as_ref(&self) -> &TcpStream { - &*self.inner - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/tcp/stream.rs b/third_party/rust/tokio-0.2.25/src/net/tcp/stream.rs deleted file mode 100644 index 02b5262723e3..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/tcp/stream.rs +++ /dev/null @@ -1,939 +0,0 @@ -use crate::future::poll_fn; -use crate::io::{AsyncRead, AsyncWrite, PollEvented}; -use crate::net::tcp::split::{split, ReadHalf, WriteHalf}; -use crate::net::tcp::split_owned::{split_owned, OwnedReadHalf, OwnedWriteHalf}; -use crate::net::ToSocketAddrs; - -use bytes::Buf; -use iovec::IoVec; -use std::convert::TryFrom; -use std::fmt; -use std::io::{self, Read, Write}; -use std::mem::MaybeUninit; -use std::net::{self, Shutdown, SocketAddr}; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; - -cfg_tcp! { - /// A TCP stream between a local and a remote socket. - /// - /// A TCP stream can either be created by connecting to an endpoint, via the - /// [`connect`] method, or by [accepting] a connection from a [listener]. - /// - /// Reading and writing to a `TcpStream` is usually done using the - /// convenience methods found on the [`AsyncReadExt`] and [`AsyncWriteExt`] - /// traits. Examples import these traits through [the prelude]. - /// - /// [`connect`]: method@TcpStream::connect - /// [accepting]: method@super::TcpListener::accept - /// [listener]: struct@super::TcpListener - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - /// [the prelude]: crate::prelude - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::prelude::*; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// // Write some data. - /// stream.write_all(b"hello world!").await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub struct TcpStream { - io: PollEvented, - } -} - -impl TcpStream { - /// Opens a TCP connection to a remote host. - /// - /// `addr` is an address of the remote host. Anything which implements the - /// [`ToSocketAddrs`] trait can be supplied as the address. Note that - /// strings only implement this trait when the **`dns`** feature is enabled, - /// as strings may contain domain names that need to be resolved. - /// - /// If `addr` yields multiple addresses, connect will be attempted with each - /// of the addresses until a connection is successful. If none of the - /// addresses result in a successful connection, the error returned from the - /// last connection attempt (the last address) is returned. - /// - /// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::prelude::*; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// // Write some data. - /// stream.write_all(b"hello world!").await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// Without the `dns` feature: - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::prelude::*; - /// use std::error::Error; - /// use std::net::Ipv4Addr; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect((Ipv4Addr::new(127, 0, 0, 1), 8080)).await?; - /// - /// // Write some data. - /// stream.write_all(b"hello world!").await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn connect(addr: A) -> io::Result { - let addrs = addr.to_socket_addrs().await?; - - let mut last_err = None; - - for addr in addrs { - match TcpStream::connect_addr(addr).await { - Ok(stream) => return Ok(stream), - Err(e) => last_err = Some(e), - } - } - - Err(last_err.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve to any address", - ) - })) - } - - /// Establishes a connection to the specified `addr`. - async fn connect_addr(addr: SocketAddr) -> io::Result { - let sys = mio::net::TcpStream::connect(&addr)?; - let stream = TcpStream::new(sys)?; - - // Once we've connected, wait for the stream to be writable as - // that's when the actual connection has been initiated. Once we're - // writable we check for `take_socket_error` to see if the connect - // actually hit an error or not. - // - // If all that succeeded then we ship everything on up. - poll_fn(|cx| stream.io.poll_write_ready(cx)).await?; - - if let Some(e) = stream.io.get_ref().take_error()? { - return Err(e); - } - - Ok(stream) - } - - pub(crate) fn new(connected: mio::net::TcpStream) -> io::Result { - let io = PollEvented::new(connected)?; - Ok(TcpStream { io }) - } - - /// Creates new `TcpStream` from a `std::net::TcpStream`. - /// - /// This function will convert a TCP stream created by the standard library - /// to a TCP stream ready to be used with the provided event loop handle. - /// - /// # Examples - /// - /// ```rust,no_run - /// use std::error::Error; - /// use tokio::net::TcpStream; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let std_stream = std::net::TcpStream::connect("127.0.0.1:34254")?; - /// let stream = TcpStream::from_std(std_stream)?; - /// Ok(()) - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn from_std(stream: net::TcpStream) -> io::Result { - let io = mio::net::TcpStream::from_stream(stream)?; - let io = PollEvented::new(io)?; - Ok(TcpStream { io }) - } - - // Connects `TcpStream` asynchronously that may be built with a net2 `TcpBuilder`. - // - // This should be removed in favor of some in-crate TcpSocket builder API. - #[doc(hidden)] - pub async fn connect_std(stream: net::TcpStream, addr: &SocketAddr) -> io::Result { - let io = mio::net::TcpStream::connect_stream(stream, addr)?; - let io = PollEvented::new(io)?; - let stream = TcpStream { io }; - - // Once we've connected, wait for the stream to be writable as - // that's when the actual connection has been initiated. Once we're - // writable we check for `take_socket_error` to see if the connect - // actually hit an error or not. - // - // If all that succeeded then we ship everything on up. - poll_fn(|cx| stream.io.poll_write_ready(cx)).await?; - - if let Some(e) = stream.io.get_ref().take_error()? { - return Err(e); - } - - Ok(stream) - } - - /// Returns the local address that this stream is bound to. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.local_addr()?); - /// # Ok(()) - /// # } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Returns the remote address that this stream is connected to. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.peer_addr()?); - /// # Ok(()) - /// # } - /// ``` - pub fn peer_addr(&self) -> io::Result { - self.io.get_ref().peer_addr() - } - - /// Attempts to receive data on the socket, without removing that data from - /// the queue, registering the current task for wakeup if data is not yet - /// available. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if data is not yet available. - /// * `Poll::Ready(Ok(n))` if data is available. `n` is the number of bytes peeked. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io; - /// use tokio::net::TcpStream; - /// - /// use futures::future::poll_fn; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stream = TcpStream::connect("127.0.0.1:8000").await?; - /// let mut buf = [0; 10]; - /// - /// poll_fn(|cx| { - /// stream.poll_peek(cx, &mut buf) - /// }).await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn poll_peek(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll> { - self.poll_peek2(cx, buf) - } - - pub(super) fn poll_peek2( - &self, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - match self.io.get_ref().peek(buf) { - Ok(ret) => Poll::Ready(Ok(ret)), - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - Err(e) => Poll::Ready(Err(e)), - } - } - - /// Receives data on the socket from the remote address to which it is - /// connected, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - /// - /// Successive calls return the same data. This is accomplished by passing - /// `MSG_PEEK` as a flag to the underlying recv system call. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::prelude::*; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// let mut b1 = [0; 10]; - /// let mut b2 = [0; 10]; - /// - /// // Peek at the data - /// let n = stream.peek(&mut b1).await?; - /// - /// // Read the data - /// assert_eq!(n, stream.read(&mut b2[..n]).await?); - /// assert_eq!(&b1[..n], &b2[..n]); - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`read`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read`]: fn@crate::io::AsyncReadExt::read - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result { - poll_fn(|cx| self.poll_peek(cx, buf)).await - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O on the specified - /// portions to return immediately with an appropriate value (see the - /// documentation of `Shutdown`). - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::net::Shutdown; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// // Shutdown the stream - /// stream.shutdown(Shutdown::Write)?; - /// - /// Ok(()) - /// } - /// ``` - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.io.get_ref().shutdown(how) - } - - /// Gets the value of the `TCP_NODELAY` option on this socket. - /// - /// For more information about this option, see [`set_nodelay`]. - /// - /// [`set_nodelay`]: TcpStream::set_nodelay - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.nodelay()?); - /// # Ok(()) - /// # } - /// ``` - pub fn nodelay(&self) -> io::Result { - self.io.get_ref().nodelay() - } - - /// Sets the value of the `TCP_NODELAY` option on this socket. - /// - /// If set, this option disables the Nagle algorithm. This means that - /// segments are always sent as soon as possible, even if there is only a - /// small amount of data. When not set, data is buffered until there is a - /// sufficient amount to send out, thereby avoiding the frequent sending of - /// small packets. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_nodelay(true)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { - self.io.get_ref().set_nodelay(nodelay) - } - - /// Gets the value of the `SO_RCVBUF` option on this socket. - /// - /// For more information about this option, see [`set_recv_buffer_size`]. - /// - /// [`set_recv_buffer_size`]: TcpStream::set_recv_buffer_size - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.recv_buffer_size()?); - /// # Ok(()) - /// # } - /// ``` - pub fn recv_buffer_size(&self) -> io::Result { - self.io.get_ref().recv_buffer_size() - } - - /// Sets the value of the `SO_RCVBUF` option on this socket. - /// - /// Changes the size of the operating system's receive buffer associated - /// with the socket. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_recv_buffer_size(100)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { - self.io.get_ref().set_recv_buffer_size(size) - } - - /// Gets the value of the `SO_SNDBUF` option on this socket. - /// - /// For more information about this option, see [`set_send_buffer_size`]. - /// - /// [`set_send_buffer_size`]: TcpStream::set_send_buffer_size - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.send_buffer_size()?); - /// # Ok(()) - /// # } - /// ``` - pub fn send_buffer_size(&self) -> io::Result { - self.io.get_ref().send_buffer_size() - } - - /// Sets the value of the `SO_SNDBUF` option on this socket. - /// - /// Changes the size of the operating system's send buffer associated with - /// the socket. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_send_buffer_size(100)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { - self.io.get_ref().set_send_buffer_size(size) - } - - /// Returns whether keepalive messages are enabled on this socket, and if so - /// the duration of time between them. - /// - /// For more information about this option, see [`set_keepalive`]. - /// - /// [`set_keepalive`]: TcpStream::set_keepalive - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.keepalive()?); - /// # Ok(()) - /// # } - /// ``` - pub fn keepalive(&self) -> io::Result> { - self.io.get_ref().keepalive() - } - - /// Sets whether keepalive messages are enabled to be sent on this socket. - /// - /// On Unix, this option will set the `SO_KEEPALIVE` as well as the - /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform). - /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. - /// - /// If `None` is specified then keepalive messages are disabled, otherwise - /// the duration specified will be the time to remain idle before sending a - /// TCP keepalive probe. - /// - /// Some platforms specify this value in seconds, so sub-second - /// specifications may be omitted. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_keepalive(None)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { - self.io.get_ref().set_keepalive(keepalive) - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`]. - /// - /// [`set_ttl`]: TcpStream::set_ttl - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.ttl()?); - /// # Ok(()) - /// # } - /// ``` - pub fn ttl(&self) -> io::Result { - self.io.get_ref().ttl() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_ttl(123)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.io.get_ref().set_ttl(ttl) - } - - /// Reads the linger duration for this socket by getting the `SO_LINGER` - /// option. - /// - /// For more information about this option, see [`set_linger`]. - /// - /// [`set_linger`]: TcpStream::set_linger - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.linger()?); - /// # Ok(()) - /// # } - /// ``` - pub fn linger(&self) -> io::Result> { - self.io.get_ref().linger() - } - - /// Sets the linger duration of this socket by setting the `SO_LINGER` - /// option. - /// - /// This option controls the action taken when a stream has unsent messages - /// and the stream is closed. If `SO_LINGER` is set, the system - /// shall block the process until it can transmit the data or until the - /// time expires. - /// - /// If `SO_LINGER` is not specified, and the stream is closed, the system - /// handles the call in a way that allows the process to continue as quickly - /// as possible. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_linger(None)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_linger(&self, dur: Option) -> io::Result<()> { - self.io.get_ref().set_linger(dur) - } - - // These lifetime markers also appear in the generated documentation, and make - // it more clear that this is a *borrowed* split. - #[allow(clippy::needless_lifetimes)] - /// Splits a `TcpStream` into a read half and a write half, which can be used - /// to read and write the stream concurrently. - /// - /// This method is more efficient than [`into_split`], but the halves cannot be - /// moved into independently spawned tasks. - /// - /// [`into_split`]: TcpStream::into_split() - pub fn split<'a>(&'a mut self) -> (ReadHalf<'a>, WriteHalf<'a>) { - split(self) - } - - /// Splits a `TcpStream` into a read half and a write half, which can be used - /// to read and write the stream concurrently. - /// - /// Unlike [`split`], the owned halves can be moved to separate tasks, however - /// this comes at the cost of a heap allocation. - /// - /// **Note:** Dropping the write half will shut down the write half of the TCP - /// stream. This is equivalent to calling [`shutdown(Write)`] on the `TcpStream`. - /// - /// [`split`]: TcpStream::split() - /// [`shutdown(Write)`]: fn@crate::net::TcpStream::shutdown - pub fn into_split(self) -> (OwnedReadHalf, OwnedWriteHalf) { - split_owned(self) - } - - // == Poll IO functions that takes `&self` == - // - // They are not public because (taken from the doc of `PollEvented`): - // - // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the - // caller must ensure that there are at most two tasks that use a - // `PollEvented` instance concurrently. One for reading and one for writing. - // While violating this requirement is "safe" from a Rust memory model point - // of view, it will result in unexpected behavior in the form of lost - // notifications and tasks hanging. - - pub(crate) fn poll_read_priv( - &self, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - match self.io.get_ref().read(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - pub(super) fn poll_write_priv( - &self, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - ready!(self.io.poll_write_ready(cx))?; - - match self.io.get_ref().write(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_write_ready(cx)?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - pub(super) fn poll_write_buf_priv( - &self, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> { - use std::io::IoSlice; - - ready!(self.io.poll_write_ready(cx))?; - - // The `IoVec` (v0.1.x) type can't have a zero-length size, so create - // a dummy version from a 1-length slice which we'll overwrite with - // the `bytes_vectored` method. - static S: &[u8] = &[0]; - const MAX_BUFS: usize = 64; - - // IoSlice isn't Copy, so we must expand this manually ;_; - let mut slices: [IoSlice<'_>; MAX_BUFS] = [ - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - IoSlice::new(S), - ]; - let cnt = buf.bytes_vectored(&mut slices); - - let iovec = <&IoVec>::from(S); - let mut vecs = [iovec; MAX_BUFS]; - for i in 0..cnt { - vecs[i] = (*slices[i]).into(); - } - - match self.io.get_ref().write_bufs(&vecs[..cnt]) { - Ok(n) => { - buf.advance(n); - Poll::Ready(Ok(n)) - } - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_write_ready(cx)?; - Poll::Pending - } - Err(e) => Poll::Ready(Err(e)), - } - } -} - -impl TryFrom for mio::net::TcpStream { - type Error = io::Error; - - /// Consumes value, returning the mio I/O object. - /// - /// See [`PollEvented::into_inner`] for more details about - /// resource deregistration that happens during the call. - /// - /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner - fn try_from(value: TcpStream) -> Result { - value.io.into_inner() - } -} - -impl TryFrom for TcpStream { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`TcpStream::from_std(stream)`](TcpStream::from_std). - fn try_from(stream: net::TcpStream) -> Result { - Self::from_std(stream) - } -} - -// ===== impl Read / Write ===== - -impl AsyncRead for TcpStream { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.poll_read_priv(cx, buf) - } -} - -impl AsyncWrite for TcpStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.poll_write_priv(cx, buf) - } - - fn poll_write_buf( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> { - self.poll_write_buf_priv(cx, buf) - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // tcp flush is a no-op - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.shutdown(std::net::Shutdown::Write)?; - Poll::Ready(Ok(())) - } -} - -impl fmt::Debug for TcpStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -#[cfg(unix)] -mod sys { - use super::TcpStream; - use std::os::unix::prelude::*; - - impl AsRawFd for TcpStream { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } - } -} - -#[cfg(windows)] -mod sys { - // TODO: let's land these upstream with mio and then we can add them here. - // - // use std::os::windows::prelude::*; - // use super::TcpStream; - // - // impl AsRawHandle for TcpStream { - // fn as_raw_handle(&self) -> RawHandle { - // self.io.get_ref().as_raw_handle() - // } - // } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/udp/mod.rs b/third_party/rust/tokio-0.2.25/src/net/udp/mod.rs deleted file mode 100644 index d43121a1ca06..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/udp/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! UDP utility types. - -pub(crate) mod socket; -pub(crate) use socket::UdpSocket; - -mod split; -pub use split::{RecvHalf, ReuniteError, SendHalf}; diff --git a/third_party/rust/tokio-0.2.25/src/net/udp/socket.rs b/third_party/rust/tokio-0.2.25/src/net/udp/socket.rs deleted file mode 100644 index 00b00d8d94fd..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/udp/socket.rs +++ /dev/null @@ -1,454 +0,0 @@ -use crate::future::poll_fn; -use crate::io::PollEvented; -use crate::net::udp::split::{split, RecvHalf, SendHalf}; -use crate::net::ToSocketAddrs; - -use std::convert::TryFrom; -use std::fmt; -use std::io; -use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::task::{Context, Poll}; - -cfg_udp! { - /// A UDP socket - pub struct UdpSocket { - io: PollEvented, - } -} - -impl UdpSocket { - /// This function will create a new UDP socket and attempt to bind it to - /// the `addr` provided. - pub async fn bind(addr: A) -> io::Result { - let addrs = addr.to_socket_addrs().await?; - let mut last_err = None; - - for addr in addrs { - match UdpSocket::bind_addr(addr) { - Ok(socket) => return Ok(socket), - Err(e) => last_err = Some(e), - } - } - - Err(last_err.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve to any address", - ) - })) - } - - fn bind_addr(addr: SocketAddr) -> io::Result { - let sys = mio::net::UdpSocket::bind(&addr)?; - UdpSocket::new(sys) - } - - fn new(socket: mio::net::UdpSocket) -> io::Result { - let io = PollEvented::new(socket)?; - Ok(UdpSocket { io }) - } - - /// Creates a new `UdpSocket` from the previously bound socket provided. - /// - /// The socket given will be registered with the event loop that `handle` - /// is associated with. This function requires that `socket` has previously - /// been bound to an address to work correctly. - /// - /// This can be used in conjunction with net2's `UdpBuilder` interface to - /// configure a socket before it's handed off, such as setting options like - /// `reuse_address` or binding to multiple addresses. - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn from_std(socket: net::UdpSocket) -> io::Result { - let io = mio::net::UdpSocket::from_socket(socket)?; - let io = PollEvented::new(io)?; - Ok(UdpSocket { io }) - } - - /// Splits the `UdpSocket` into a receive half and a send half. The two parts - /// can be used to receive and send datagrams concurrently, even from two - /// different tasks. - pub fn split(self) -> (RecvHalf, SendHalf) { - split(self) - } - - /// Returns the local address that this socket is bound to. - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Connects the UDP socket setting the default destination for send() and - /// limiting packets that are read via recv from the address specified in - /// `addr`. - pub async fn connect(&self, addr: A) -> io::Result<()> { - let addrs = addr.to_socket_addrs().await?; - let mut last_err = None; - - for addr in addrs { - match self.io.get_ref().connect(addr) { - Ok(_) => return Ok(()), - Err(e) => last_err = Some(e), - } - } - - Err(last_err.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve to any address", - ) - })) - } - - /// Returns a future that sends data on the socket to the remote address to which it is connected. - /// On success, the future will resolve to the number of bytes written. - /// - /// The [`connect`] method will connect this socket to a remote address. The future - /// will resolve to an error if the socket is not connected. - /// - /// [`connect`]: method@Self::connect - pub async fn send(&mut self, buf: &[u8]) -> io::Result { - poll_fn(|cx| self.poll_send(cx, buf)).await - } - - /// Try to send data on the socket to the remote address to which it is - /// connected. - /// - /// # Returns - /// - /// If successfull, the number of bytes sent is returned. Users - /// should ensure that when the remote cannot receive, the - /// [`ErrorKind::WouldBlock`] is properly handled. - /// - /// [`ErrorKind::WouldBlock`]: std::io::ErrorKind::WouldBlock - pub fn try_send(&self, buf: &[u8]) -> io::Result { - self.io.get_ref().send(buf) - } - - // Poll IO functions that takes `&self` are provided for the split API. - // - // They are not public because (taken from the doc of `PollEvented`): - // - // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the - // caller must ensure that there are at most two tasks that use a - // `PollEvented` instance concurrently. One for reading and one for writing. - // While violating this requirement is "safe" from a Rust memory model point - // of view, it will result in unexpected behavior in the form of lost - // notifications and tasks hanging. - #[doc(hidden)] - pub fn poll_send(&self, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - ready!(self.io.poll_write_ready(cx))?; - - match self.io.get_ref().send(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_write_ready(cx)?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - /// Returns a future that receives a single datagram message on the socket from - /// the remote address to which it is connected. On success, the future will resolve - /// to the number of bytes read. - /// - /// The function must be called with valid byte array `buf` of sufficient size to - /// hold the message bytes. If a message is too long to fit in the supplied buffer, - /// excess bytes may be discarded. - /// - /// The [`connect`] method will connect this socket to a remote address. The future - /// will fail if the socket is not connected. - /// - /// [`connect`]: method@Self::connect - pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result { - poll_fn(|cx| self.poll_recv(cx, buf)).await - } - - #[doc(hidden)] - pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - match self.io.get_ref().recv(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - /// Returns a future that sends data on the socket to the given address. - /// On success, the future will resolve to the number of bytes written. - /// - /// The future will resolve to an error if the IP version of the socket does - /// not match that of `target`. - pub async fn send_to(&mut self, buf: &[u8], target: A) -> io::Result { - let mut addrs = target.to_socket_addrs().await?; - - match addrs.next() { - Some(target) => poll_fn(|cx| self.poll_send_to(cx, buf, &target)).await, - None => Err(io::Error::new( - io::ErrorKind::InvalidInput, - "no addresses to send data to", - )), - } - } - - /// Try to send data on the socket to the given address. - /// - /// # Returns - /// - /// If successfull, the future resolves to the number of bytes sent. - /// - /// Users should ensure that when the remote cannot receive, the - /// [`ErrorKind::WouldBlock`] is properly handled. An error can also occur - /// if the IP version of the socket does not match that of `target`. - /// - /// [`ErrorKind::WouldBlock`]: std::io::ErrorKind::WouldBlock - pub fn try_send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result { - self.io.get_ref().send_to(buf, &target) - } - - // TODO: Public or not? - #[doc(hidden)] - pub fn poll_send_to( - &self, - cx: &mut Context<'_>, - buf: &[u8], - target: &SocketAddr, - ) -> Poll> { - ready!(self.io.poll_write_ready(cx))?; - - match self.io.get_ref().send_to(buf, target) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_write_ready(cx)?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - /// Returns a future that receives a single datagram on the socket. On success, - /// the future resolves to the number of bytes read and the origin. - /// - /// The function must be called with valid byte array `buf` of sufficient size - /// to hold the message bytes. If a message is too long to fit in the supplied - /// buffer, excess bytes may be discarded. - pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - poll_fn(|cx| self.poll_recv_from(cx, buf)).await - } - - #[doc(hidden)] - pub fn poll_recv_from( - &self, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - match self.io.get_ref().recv_from(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - /// Gets the value of the `SO_BROADCAST` option for this socket. - /// - /// For more information about this option, see [`set_broadcast`]. - /// - /// [`set_broadcast`]: method@Self::set_broadcast - pub fn broadcast(&self) -> io::Result { - self.io.get_ref().broadcast() - } - - /// Sets the value of the `SO_BROADCAST` option for this socket. - /// - /// When enabled, this socket is allowed to send packets to a broadcast - /// address. - pub fn set_broadcast(&self, on: bool) -> io::Result<()> { - self.io.get_ref().set_broadcast(on) - } - - /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see [`set_multicast_loop_v4`]. - /// - /// [`set_multicast_loop_v4`]: method@Self::set_multicast_loop_v4 - pub fn multicast_loop_v4(&self) -> io::Result { - self.io.get_ref().multicast_loop_v4() - } - - /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// If enabled, multicast packets will be looped back to the local socket. - /// - /// # Note - /// - /// This may not have any affect on IPv6 sockets. - pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> { - self.io.get_ref().set_multicast_loop_v4(on) - } - - /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. - /// - /// For more information about this option, see [`set_multicast_ttl_v4`]. - /// - /// [`set_multicast_ttl_v4`]: method@Self::set_multicast_ttl_v4 - pub fn multicast_ttl_v4(&self) -> io::Result { - self.io.get_ref().multicast_ttl_v4() - } - - /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. - /// - /// Indicates the time-to-live value of outgoing multicast packets for - /// this socket. The default value is 1 which means that multicast packets - /// don't leave the local network unless explicitly requested. - /// - /// # Note - /// - /// This may not have any affect on IPv6 sockets. - pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { - self.io.get_ref().set_multicast_ttl_v4(ttl) - } - - /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see [`set_multicast_loop_v6`]. - /// - /// [`set_multicast_loop_v6`]: method@Self::set_multicast_loop_v6 - pub fn multicast_loop_v6(&self) -> io::Result { - self.io.get_ref().multicast_loop_v6() - } - - /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// Controls whether this socket sees the multicast packets it sends itself. - /// - /// # Note - /// - /// This may not have any affect on IPv4 sockets. - pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> { - self.io.get_ref().set_multicast_loop_v6(on) - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`]. - /// - /// [`set_ttl`]: method@Self::set_ttl - pub fn ttl(&self) -> io::Result { - self.io.get_ref().ttl() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.io.get_ref().set_ttl(ttl) - } - - /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// address of the local interface with which the system should join the - /// multicast group. If it's equal to `INADDR_ANY` then an appropriate - /// interface is chosen by the system. - pub fn join_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> { - self.io.get_ref().join_multicast_v4(&multiaddr, &interface) - } - - /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// index of the interface to join/leave (or 0 to indicate any interface). - pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - self.io.get_ref().join_multicast_v6(multiaddr, interface) - } - - /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. - /// - /// For more information about this option, see [`join_multicast_v4`]. - /// - /// [`join_multicast_v4`]: method@Self::join_multicast_v4 - pub fn leave_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> { - self.io.get_ref().leave_multicast_v4(&multiaddr, &interface) - } - - /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. - /// - /// For more information about this option, see [`join_multicast_v6`]. - /// - /// [`join_multicast_v6`]: method@Self::join_multicast_v6 - pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - self.io.get_ref().leave_multicast_v6(multiaddr, interface) - } -} - -impl TryFrom for mio::net::UdpSocket { - type Error = io::Error; - - /// Consumes value, returning the mio I/O object. - /// - /// See [`PollEvented::into_inner`] for more details about - /// resource deregistration that happens during the call. - /// - /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner - fn try_from(value: UdpSocket) -> Result { - value.io.into_inner() - } -} - -impl TryFrom for UdpSocket { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`UdpSocket::from_std(stream)`](UdpSocket::from_std). - fn try_from(stream: net::UdpSocket) -> Result { - Self::from_std(stream) - } -} - -impl fmt::Debug for UdpSocket { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -#[cfg(all(unix))] -mod sys { - use super::UdpSocket; - use std::os::unix::prelude::*; - - impl AsRawFd for UdpSocket { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } - } -} - -#[cfg(windows)] -mod sys { - // TODO: let's land these upstream with mio and then we can add them here. - // - // use std::os::windows::prelude::*; - // use super::UdpSocket; - // - // impl AsRawHandle for UdpSocket { - // fn as_raw_handle(&self) -> RawHandle { - // self.io.get_ref().as_raw_handle() - // } - // } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/udp/split.rs b/third_party/rust/tokio-0.2.25/src/net/udp/split.rs deleted file mode 100644 index 8d87f1c7c677..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/udp/split.rs +++ /dev/null @@ -1,148 +0,0 @@ -//! [`UdpSocket`](crate::net::UdpSocket) split support. -//! -//! The [`split`](method@crate::net::UdpSocket::split) method splits a -//! `UdpSocket` into a receive half and a send half, which can be used to -//! receive and send datagrams concurrently, even from two different tasks. -//! -//! The halves provide access to the underlying socket, implementing -//! `AsRef`. This allows you to call `UdpSocket` methods that takes -//! `&self`, e.g., to get local address, to get and set socket options, to join -//! or leave multicast groups, etc. -//! -//! The halves can be reunited to the original socket with their `reunite` -//! methods. - -use crate::future::poll_fn; -use crate::net::udp::UdpSocket; - -use std::error::Error; -use std::fmt; -use std::io; -use std::net::SocketAddr; -use std::sync::Arc; - -/// The send half after [`split`](super::UdpSocket::split). -/// -/// Use [`send_to`](method@Self::send_to) or [`send`](method@Self::send) to send -/// datagrams. -#[derive(Debug)] -pub struct SendHalf(Arc); - -/// The recv half after [`split`](super::UdpSocket::split). -/// -/// Use [`recv_from`](method@Self::recv_from) or [`recv`](method@Self::recv) to receive -/// datagrams. -#[derive(Debug)] -pub struct RecvHalf(Arc); - -pub(crate) fn split(socket: UdpSocket) -> (RecvHalf, SendHalf) { - let shared = Arc::new(socket); - let send = shared.clone(); - let recv = shared; - (RecvHalf(recv), SendHalf(send)) -} - -/// Error indicating that two halves were not from the same socket, and thus could -/// not be `reunite`d. -#[derive(Debug)] -pub struct ReuniteError(pub SendHalf, pub RecvHalf); - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "tried to reunite halves that are not from the same socket" - ) - } -} - -impl Error for ReuniteError {} - -fn reunite(s: SendHalf, r: RecvHalf) -> Result { - if Arc::ptr_eq(&s.0, &r.0) { - drop(r); - // Only two instances of the `Arc` are ever created, one for the - // receiver and one for the sender, and those `Arc`s are never exposed - // externally. And so when we drop one here, the other one must be the - // only remaining one. - Ok(Arc::try_unwrap(s.0).expect("udp: try_unwrap failed in reunite")) - } else { - Err(ReuniteError(s, r)) - } -} - -impl RecvHalf { - /// Attempts to put the two "halves" of a `UdpSocket` back together and - /// recover the original socket. Succeeds only if the two "halves" - /// originated from the same call to `UdpSocket::split`. - pub fn reunite(self, other: SendHalf) -> Result { - reunite(other, self) - } - - /// Returns a future that receives a single datagram on the socket. On success, - /// the future resolves to the number of bytes read and the origin. - /// - /// The function must be called with valid byte array `buf` of sufficient size - /// to hold the message bytes. If a message is too long to fit in the supplied - /// buffer, excess bytes may be discarded. - pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - poll_fn(|cx| self.0.poll_recv_from(cx, buf)).await - } - - /// Returns a future that receives a single datagram message on the socket from - /// the remote address to which it is connected. On success, the future will resolve - /// to the number of bytes read. - /// - /// The function must be called with valid byte array `buf` of sufficient size to - /// hold the message bytes. If a message is too long to fit in the supplied buffer, - /// excess bytes may be discarded. - /// - /// The [`connect`] method will connect this socket to a remote address. The future - /// will fail if the socket is not connected. - /// - /// [`connect`]: super::UdpSocket::connect - pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result { - poll_fn(|cx| self.0.poll_recv(cx, buf)).await - } -} - -impl SendHalf { - /// Attempts to put the two "halves" of a `UdpSocket` back together and - /// recover the original socket. Succeeds only if the two "halves" - /// originated from the same call to `UdpSocket::split`. - pub fn reunite(self, other: RecvHalf) -> Result { - reunite(self, other) - } - - /// Returns a future that sends data on the socket to the given address. - /// On success, the future will resolve to the number of bytes written. - /// - /// The future will resolve to an error if the IP version of the socket does - /// not match that of `target`. - pub async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result { - poll_fn(|cx| self.0.poll_send_to(cx, buf, target)).await - } - - /// Returns a future that sends data on the socket to the remote address to which it is connected. - /// On success, the future will resolve to the number of bytes written. - /// - /// The [`connect`] method will connect this socket to a remote address. The future - /// will resolve to an error if the socket is not connected. - /// - /// [`connect`]: super::UdpSocket::connect - pub async fn send(&mut self, buf: &[u8]) -> io::Result { - poll_fn(|cx| self.0.poll_send(cx, buf)).await - } -} - -impl AsRef for SendHalf { - fn as_ref(&self) -> &UdpSocket { - &self.0 - } -} - -impl AsRef for RecvHalf { - fn as_ref(&self) -> &UdpSocket { - &self.0 - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/datagram/mod.rs b/third_party/rust/tokio-0.2.25/src/net/unix/datagram/mod.rs deleted file mode 100644 index f484ae34a349..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/datagram/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Unix datagram types. - -pub(crate) mod socket; -pub(crate) mod split; -pub(crate) mod split_owned; - -pub use split::{RecvHalf, SendHalf}; -pub use split_owned::{OwnedRecvHalf, OwnedSendHalf, ReuniteError}; diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/datagram/socket.rs b/third_party/rust/tokio-0.2.25/src/net/unix/datagram/socket.rs deleted file mode 100644 index a332d2afb459..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/datagram/socket.rs +++ /dev/null @@ -1,350 +0,0 @@ -use crate::future::poll_fn; -use crate::io::PollEvented; -use crate::net::unix::datagram::split::{split, RecvHalf, SendHalf}; -use crate::net::unix::datagram::split_owned::{split_owned, OwnedRecvHalf, OwnedSendHalf}; - -use std::convert::TryFrom; -use std::fmt; -use std::io; -use std::net::Shutdown; -use std::os::unix::io::{AsRawFd, RawFd}; -use std::os::unix::net::{self, SocketAddr}; -use std::path::Path; -use std::task::{Context, Poll}; - -cfg_uds! { - /// An I/O object representing a Unix datagram socket. - pub struct UnixDatagram { - io: PollEvented, - } -} - -impl UnixDatagram { - /// Creates a new `UnixDatagram` bound to the specified path. - pub fn bind

(path: P) -> io::Result - where - P: AsRef, - { - let socket = mio_uds::UnixDatagram::bind(path)?; - UnixDatagram::new(socket) - } - - /// Creates an unnamed pair of connected sockets. - /// - /// This function will create a pair of interconnected Unix sockets for - /// communicating back and forth between one another. Each socket will - /// be associated with the default event loop's handle. - pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> { - let (a, b) = mio_uds::UnixDatagram::pair()?; - let a = UnixDatagram::new(a)?; - let b = UnixDatagram::new(b)?; - - Ok((a, b)) - } - - /// Consumes a `UnixDatagram` in the standard library and returns a - /// nonblocking `UnixDatagram` from this crate. - /// - /// The returned datagram will be associated with the given event loop - /// specified by `handle` and is ready to perform I/O. - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn from_std(datagram: net::UnixDatagram) -> io::Result { - let socket = mio_uds::UnixDatagram::from_datagram(datagram)?; - let io = PollEvented::new(socket)?; - Ok(UnixDatagram { io }) - } - - fn new(socket: mio_uds::UnixDatagram) -> io::Result { - let io = PollEvented::new(socket)?; - Ok(UnixDatagram { io }) - } - - /// Creates a new `UnixDatagram` which is not bound to any address. - pub fn unbound() -> io::Result { - let socket = mio_uds::UnixDatagram::unbound()?; - UnixDatagram::new(socket) - } - - /// Connects the socket to the specified address. - /// - /// The `send` method may be used to send data to the specified address. - /// `recv` and `recv_from` will only receive data from that address. - pub fn connect>(&self, path: P) -> io::Result<()> { - self.io.get_ref().connect(path) - } - - /// Sends data on the socket to the socket's peer. - pub async fn send(&mut self, buf: &[u8]) -> io::Result { - poll_fn(|cx| self.poll_send_priv(cx, buf)).await - } - - /// Try to send a datagram to the peer without waiting. - /// - /// ``` - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// let bytes = b"bytes"; - /// // We use a socket pair so that they are assigned - /// // each other as a peer. - /// let (mut first, mut second) = UnixDatagram::pair()?; - /// - /// let size = first.try_send(bytes)?; - /// assert_eq!(size, bytes.len()); - /// - /// let mut buffer = vec![0u8; 24]; - /// let size = second.try_recv(&mut buffer)?; - /// - /// let dgram = &buffer.as_slice()[..size]; - /// assert_eq!(dgram, bytes); - /// # Ok(()) - /// # } - /// ``` - pub fn try_send(&mut self, buf: &[u8]) -> io::Result { - self.io.get_ref().send(buf) - } - - /// Try to send a datagram to the peer without waiting. - /// - /// ``` - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use { - /// tokio::net::UnixDatagram, - /// tempfile::tempdir, - /// }; - /// - /// let bytes = b"bytes"; - /// // We use a temporary directory so that the socket - /// // files left by the bound sockets will get cleaned up. - /// let tmp = tempdir().unwrap(); - /// - /// let server_path = tmp.path().join("server"); - /// let mut server = UnixDatagram::bind(&server_path)?; - /// - /// let client_path = tmp.path().join("client"); - /// let mut client = UnixDatagram::bind(&client_path)?; - /// - /// let size = client.try_send_to(bytes, &server_path)?; - /// assert_eq!(size, bytes.len()); - /// - /// let mut buffer = vec![0u8; 24]; - /// let (size, addr) = server.try_recv_from(&mut buffer)?; - /// - /// let dgram = &buffer.as_slice()[..size]; - /// assert_eq!(dgram, bytes); - /// assert_eq!(addr.as_pathname().unwrap(), &client_path); - /// # Ok(()) - /// # } - /// ``` - pub fn try_send_to

(&mut self, buf: &[u8], target: P) -> io::Result - where - P: AsRef, - { - self.io.get_ref().send_to(buf, target) - } - - // Poll IO functions that takes `&self` are provided for the split API. - // - // They are not public because (taken from the doc of `PollEvented`): - // - // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the - // caller must ensure that there are at most two tasks that use a - // `PollEvented` instance concurrently. One for reading and one for writing. - // While violating this requirement is "safe" from a Rust memory model point - // of view, it will result in unexpected behavior in the form of lost - // notifications and tasks hanging. - pub(crate) fn poll_send_priv( - &self, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - ready!(self.io.poll_write_ready(cx))?; - - match self.io.get_ref().send(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_write_ready(cx)?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - /// Receives data from the socket. - pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result { - poll_fn(|cx| self.poll_recv_priv(cx, buf)).await - } - - /// Try to receive a datagram from the peer without waiting. - pub fn try_recv(&mut self, buf: &mut [u8]) -> io::Result { - self.io.get_ref().recv(buf) - } - - pub(crate) fn poll_recv_priv( - &self, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - match self.io.get_ref().recv(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - /// Sends data on the socket to the specified address. - pub async fn send_to

(&mut self, buf: &[u8], target: P) -> io::Result - where - P: AsRef + Unpin, - { - poll_fn(|cx| self.poll_send_to_priv(cx, buf, target.as_ref())).await - } - - pub(crate) fn poll_send_to_priv( - &self, - cx: &mut Context<'_>, - buf: &[u8], - target: &Path, - ) -> Poll> { - ready!(self.io.poll_write_ready(cx))?; - - match self.io.get_ref().send_to(buf, target) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_write_ready(cx)?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - /// Receives data from the socket. - pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - poll_fn(|cx| self.poll_recv_from_priv(cx, buf)).await - } - - /// Try to receive data from the socket without waiting. - pub fn try_recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - self.io.get_ref().recv_from(buf) - } - - pub(crate) fn poll_recv_from_priv( - &self, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - match self.io.get_ref().recv_from(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - /// Returns the local address that this socket is bound to. - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Returns the address of this socket's peer. - /// - /// The `connect` method will connect the socket to a peer. - pub fn peer_addr(&self) -> io::Result { - self.io.get_ref().peer_addr() - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.get_ref().take_error() - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O calls on the - /// specified portions to immediately return with an appropriate value - /// (see the documentation of `Shutdown`). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.io.get_ref().shutdown(how) - } - - // These lifetime markers also appear in the generated documentation, and make - // it more clear that this is a *borrowed* split. - #[allow(clippy::needless_lifetimes)] - /// Split a `UnixDatagram` into a receive half and a send half, which can be used - /// to receive and send the datagram concurrently. - /// - /// This method is more efficient than [`into_split`], but the halves cannot - /// be moved into independently spawned tasks. - /// - /// [`into_split`]: fn@crate::net::UnixDatagram::into_split - pub fn split<'a>(&'a mut self) -> (RecvHalf<'a>, SendHalf<'a>) { - split(self) - } - - /// Split a `UnixDatagram` into a receive half and a send half, which can be used - /// to receive and send the datagram concurrently. - /// - /// Unlike [`split`], the owned halves can be moved to separate tasks, - /// however this comes at the cost of a heap allocation. - /// - /// **Note:** Dropping the write half will shut down the write half of the - /// datagram. This is equivalent to calling [`shutdown(Write)`]. - /// - /// [`split`]: fn@crate::net::UnixDatagram::split - /// [`shutdown(Write)`]:fn@crate::net::UnixDatagram::shutdown - pub fn into_split(self) -> (OwnedRecvHalf, OwnedSendHalf) { - split_owned(self) - } -} - -impl TryFrom for mio_uds::UnixDatagram { - type Error = io::Error; - - /// Consumes value, returning the mio I/O object. - /// - /// See [`PollEvented::into_inner`] for more details about - /// resource deregistration that happens during the call. - /// - /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner - fn try_from(value: UnixDatagram) -> Result { - value.io.into_inner() - } -} - -impl TryFrom for UnixDatagram { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`UnixDatagram::from_std(stream)`](UnixDatagram::from_std). - fn try_from(stream: net::UnixDatagram) -> Result { - Self::from_std(stream) - } -} - -impl fmt::Debug for UnixDatagram { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -impl AsRawFd for UnixDatagram { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/datagram/split.rs b/third_party/rust/tokio-0.2.25/src/net/unix/datagram/split.rs deleted file mode 100644 index e42eeda88447..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/datagram/split.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! `UnixDatagram` split support. -//! -//! A `UnixDatagram` can be split into a `RecvHalf` and a `SendHalf` with the -//! `UnixDatagram::split` method. - -use crate::future::poll_fn; -use crate::net::UnixDatagram; - -use std::io; -use std::os::unix::net::SocketAddr; -use std::path::Path; - -/// Borrowed receive half of a [`UnixDatagram`], created by [`split`]. -/// -/// [`UnixDatagram`]: UnixDatagram -/// [`split`]: crate::net::UnixDatagram::split() -#[derive(Debug)] -pub struct RecvHalf<'a>(&'a UnixDatagram); - -/// Borrowed send half of a [`UnixDatagram`], created by [`split`]. -/// -/// [`UnixDatagram`]: UnixDatagram -/// [`split`]: crate::net::UnixDatagram::split() -#[derive(Debug)] -pub struct SendHalf<'a>(&'a UnixDatagram); - -pub(crate) fn split(stream: &mut UnixDatagram) -> (RecvHalf<'_>, SendHalf<'_>) { - (RecvHalf(&*stream), SendHalf(&*stream)) -} - -impl RecvHalf<'_> { - /// Receives data from the socket. - pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - poll_fn(|cx| self.0.poll_recv_from_priv(cx, buf)).await - } - - /// Receives data from the socket. - pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result { - poll_fn(|cx| self.0.poll_recv_priv(cx, buf)).await - } -} - -impl SendHalf<'_> { - /// Sends data on the socket to the specified address. - pub async fn send_to

(&mut self, buf: &[u8], target: P) -> io::Result - where - P: AsRef + Unpin, - { - poll_fn(|cx| self.0.poll_send_to_priv(cx, buf, target.as_ref())).await - } - - /// Sends data on the socket to the socket's peer. - pub async fn send(&mut self, buf: &[u8]) -> io::Result { - poll_fn(|cx| self.0.poll_send_priv(cx, buf)).await - } -} - -impl AsRef for RecvHalf<'_> { - fn as_ref(&self) -> &UnixDatagram { - self.0 - } -} - -impl AsRef for SendHalf<'_> { - fn as_ref(&self) -> &UnixDatagram { - self.0 - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/datagram/split_owned.rs b/third_party/rust/tokio-0.2.25/src/net/unix/datagram/split_owned.rs deleted file mode 100644 index 699771f30e6f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/datagram/split_owned.rs +++ /dev/null @@ -1,148 +0,0 @@ -//! `UnixDatagram` owned split support. -//! -//! A `UnixDatagram` can be split into an `OwnedSendHalf` and a `OwnedRecvHalf` -//! with the `UnixDatagram::into_split` method. - -use crate::future::poll_fn; -use crate::net::UnixDatagram; - -use std::error::Error; -use std::net::Shutdown; -use std::os::unix::net::SocketAddr; -use std::path::Path; -use std::sync::Arc; -use std::{fmt, io}; - -pub(crate) fn split_owned(socket: UnixDatagram) -> (OwnedRecvHalf, OwnedSendHalf) { - let shared = Arc::new(socket); - let send = shared.clone(); - let recv = shared; - ( - OwnedRecvHalf { inner: recv }, - OwnedSendHalf { - inner: send, - shutdown_on_drop: true, - }, - ) -} - -/// Owned send half of a [`UnixDatagram`], created by [`into_split`]. -/// -/// [`UnixDatagram`]: UnixDatagram -/// [`into_split`]: UnixDatagram::into_split() -#[derive(Debug)] -pub struct OwnedSendHalf { - inner: Arc, - shutdown_on_drop: bool, -} - -/// Owned receive half of a [`UnixDatagram`], created by [`into_split`]. -/// -/// [`UnixDatagram`]: UnixDatagram -/// [`into_split`]: UnixDatagram::into_split() -#[derive(Debug)] -pub struct OwnedRecvHalf { - inner: Arc, -} - -/// Error indicating that two halves were not from the same socket, and thus could -/// not be `reunite`d. -#[derive(Debug)] -pub struct ReuniteError(pub OwnedSendHalf, pub OwnedRecvHalf); - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "tried to reunite halves that are not from the same socket" - ) - } -} - -impl Error for ReuniteError {} - -fn reunite(s: OwnedSendHalf, r: OwnedRecvHalf) -> Result { - if Arc::ptr_eq(&s.inner, &r.inner) { - s.forget(); - // Only two instances of the `Arc` are ever created, one for the - // receiver and one for the sender, and those `Arc`s are never exposed - // externally. And so when we drop one here, the other one must be the - // only remaining one. - Ok(Arc::try_unwrap(r.inner).expect("UnixDatagram: try_unwrap failed in reunite")) - } else { - Err(ReuniteError(s, r)) - } -} - -impl OwnedRecvHalf { - /// Attempts to put the two "halves" of a `UnixDatagram` back together and - /// recover the original socket. Succeeds only if the two "halves" - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: UnixDatagram::into_split() - pub fn reunite(self, other: OwnedSendHalf) -> Result { - reunite(other, self) - } - - /// Receives data from the socket. - pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - poll_fn(|cx| self.inner.poll_recv_from_priv(cx, buf)).await - } - - /// Receives data from the socket. - pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result { - poll_fn(|cx| self.inner.poll_recv_priv(cx, buf)).await - } -} - -impl OwnedSendHalf { - /// Attempts to put the two "halves" of a `UnixDatagram` back together and - /// recover the original socket. Succeeds only if the two "halves" - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: UnixDatagram::into_split() - pub fn reunite(self, other: OwnedRecvHalf) -> Result { - reunite(self, other) - } - - /// Sends data on the socket to the specified address. - pub async fn send_to

(&mut self, buf: &[u8], target: P) -> io::Result - where - P: AsRef + Unpin, - { - poll_fn(|cx| self.inner.poll_send_to_priv(cx, buf, target.as_ref())).await - } - - /// Sends data on the socket to the socket's peer. - pub async fn send(&mut self, buf: &[u8]) -> io::Result { - poll_fn(|cx| self.inner.poll_send_priv(cx, buf)).await - } - - /// Destroy the send half, but don't close the send half of the stream - /// until the receive half is dropped. If the read half has already been - /// dropped, this closes the stream. - pub fn forget(mut self) { - self.shutdown_on_drop = false; - drop(self); - } -} - -impl Drop for OwnedSendHalf { - fn drop(&mut self) { - if self.shutdown_on_drop { - let _ = self.inner.shutdown(Shutdown::Write); - } - } -} - -impl AsRef for OwnedSendHalf { - fn as_ref(&self) -> &UnixDatagram { - &self.inner - } -} - -impl AsRef for OwnedRecvHalf { - fn as_ref(&self) -> &UnixDatagram { - &self.inner - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/incoming.rs b/third_party/rust/tokio-0.2.25/src/net/unix/incoming.rs deleted file mode 100644 index af4936043532..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/incoming.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::net::unix::{UnixListener, UnixStream}; - -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Stream of listeners -#[derive(Debug)] -#[must_use = "streams do nothing unless polled"] -pub struct Incoming<'a> { - inner: &'a mut UnixListener, -} - -impl Incoming<'_> { - pub(crate) fn new(listener: &mut UnixListener) -> Incoming<'_> { - Incoming { inner: listener } - } - - /// Attempts to poll `UnixStream` by polling inner `UnixListener` to accept - /// connection. - /// - /// If `UnixListener` isn't ready yet, `Poll::Pending` is returned and - /// current task will be notified by a waker. Otherwise `Poll::Ready` with - /// `Result` containing `UnixStream` will be returned. - pub fn poll_accept( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - let (socket, _) = ready!(self.inner.poll_accept(cx))?; - Poll::Ready(Ok(socket)) - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for Incoming<'_> { - type Item = io::Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let (socket, _) = ready!(self.inner.poll_accept(cx))?; - Poll::Ready(Some(Ok(socket))) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/listener.rs b/third_party/rust/tokio-0.2.25/src/net/unix/listener.rs deleted file mode 100644 index 78954fe3854b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/listener.rs +++ /dev/null @@ -1,232 +0,0 @@ -use crate::future::poll_fn; -use crate::io::PollEvented; -use crate::net::unix::{Incoming, UnixStream}; - -use mio::Ready; -use std::convert::TryFrom; -use std::fmt; -use std::io; -use std::os::unix::io::{AsRawFd, RawFd}; -use std::os::unix::net::{self, SocketAddr}; -use std::path::Path; -use std::task::{Context, Poll}; - -cfg_uds! { - /// A Unix socket which can accept connections from other Unix sockets. - /// - /// You can accept a new connection by using the [`accept`](`UnixListener::accept`) method. Alternatively `UnixListener` - /// implements the [`Stream`](`crate::stream::Stream`) trait, which allows you to use the listener in places that want a - /// stream. The stream will never return `None` and will also not yield the peer's `SocketAddr` structure. Iterating over - /// it is equivalent to calling accept in a loop. - /// - /// # Errors - /// - /// Note that accepting a connection can lead to various errors and not all - /// of them are necessarily fatal ‒ for example having too many open file - /// descriptors or the other side closing the connection while it waits in - /// an accept queue. These would terminate the stream if not handled in any - /// way. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixListener; - /// use tokio::stream::StreamExt; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut listener = UnixListener::bind("/path/to/the/socket").unwrap(); - /// while let Some(stream) = listener.next().await { - /// match stream { - /// Ok(stream) => { - /// println!("new client!"); - /// } - /// Err(e) => { /* connection failed */ } - /// } - /// } - /// } - /// ``` - pub struct UnixListener { - io: PollEvented, - } -} - -impl UnixListener { - /// Creates a new `UnixListener` bound to the specified path. - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn bind

(path: P) -> io::Result - where - P: AsRef, - { - let listener = mio_uds::UnixListener::bind(path)?; - let io = PollEvented::new(listener)?; - Ok(UnixListener { io }) - } - - /// Consumes a `UnixListener` in the standard library and returns a - /// nonblocking `UnixListener` from this crate. - /// - /// The returned listener will be associated with the given event loop - /// specified by `handle` and is ready to perform I/O. - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn from_std(listener: net::UnixListener) -> io::Result { - let listener = mio_uds::UnixListener::from_listener(listener)?; - let io = PollEvented::new(listener)?; - Ok(UnixListener { io }) - } - - /// Returns the local socket address of this listener. - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.get_ref().take_error() - } - - /// Accepts a new incoming connection to this listener. - pub async fn accept(&mut self) -> io::Result<(UnixStream, SocketAddr)> { - poll_fn(|cx| self.poll_accept(cx)).await - } - - /// Polls to accept a new incoming connection to this listener. - /// - /// If there is no connection to accept, `Poll::Pending` is returned and - /// the current task will be notified by a waker. - pub fn poll_accept( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { - let (io, addr) = ready!(self.poll_accept_std(cx))?; - - let io = mio_uds::UnixStream::from_stream(io)?; - Ok((UnixStream::new(io)?, addr)).into() - } - - fn poll_accept_std( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { - ready!(self.io.poll_read_ready(cx, Ready::readable()))?; - - match self.io.get_ref().accept_std() { - Ok(None) => { - self.io.clear_read_ready(cx, Ready::readable())?; - Poll::Pending - } - Ok(Some((sock, addr))) => Ok((sock, addr)).into(), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, Ready::readable())?; - Poll::Pending - } - Err(err) => Err(err).into(), - } - } - - /// Returns a stream over the connections being received on this listener. - /// - /// Note that `UnixListener` also directly implements `Stream`. - /// - /// The returned stream will never return `None` and will also not yield the - /// peer's `SocketAddr` structure. Iterating over it is equivalent to - /// calling accept in a loop. - /// - /// # Errors - /// - /// Note that accepting a connection can lead to various errors and not all - /// of them are necessarily fatal ‒ for example having too many open file - /// descriptors or the other side closing the connection while it waits in - /// an accept queue. These would terminate the stream if not handled in any - /// way. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixListener; - /// use tokio::stream::StreamExt; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut listener = UnixListener::bind("/path/to/the/socket").unwrap(); - /// let mut incoming = listener.incoming(); - /// - /// while let Some(stream) = incoming.next().await { - /// match stream { - /// Ok(stream) => { - /// println!("new client!"); - /// } - /// Err(e) => { /* connection failed */ } - /// } - /// } - /// } - /// ``` - pub fn incoming(&mut self) -> Incoming<'_> { - Incoming::new(self) - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for UnixListener { - type Item = io::Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - let (socket, _) = ready!(self.poll_accept(cx))?; - Poll::Ready(Some(Ok(socket))) - } -} - -impl TryFrom for mio_uds::UnixListener { - type Error = io::Error; - - /// Consumes value, returning the mio I/O object. - /// - /// See [`PollEvented::into_inner`] for more details about - /// resource deregistration that happens during the call. - /// - /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner - fn try_from(value: UnixListener) -> Result { - value.io.into_inner() - } -} - -impl TryFrom for UnixListener { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`UnixListener::from_std(stream)`](UnixListener::from_std). - fn try_from(stream: net::UnixListener) -> io::Result { - Self::from_std(stream) - } -} - -impl fmt::Debug for UnixListener { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -impl AsRawFd for UnixListener { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/mod.rs b/third_party/rust/tokio-0.2.25/src/net/unix/mod.rs deleted file mode 100644 index b079fe04d7dd..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! Unix domain socket utility types - -pub mod datagram; - -mod incoming; -pub use incoming::Incoming; - -pub(crate) mod listener; -pub(crate) use listener::UnixListener; - -mod split; -pub use split::{ReadHalf, WriteHalf}; - -mod split_owned; -pub use split_owned::{OwnedReadHalf, OwnedWriteHalf, ReuniteError}; - -pub(crate) mod stream; -pub(crate) use stream::UnixStream; - -mod ucred; -pub use ucred::UCred; diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/split.rs b/third_party/rust/tokio-0.2.25/src/net/unix/split.rs deleted file mode 100644 index 4fd85774e9a0..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/split.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! `UnixStream` split support. -//! -//! A `UnixStream` can be split into a read half and a write half with -//! `UnixStream::split`. The read half implements `AsyncRead` while the write -//! half implements `AsyncWrite`. -//! -//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized -//! split has no associated overhead and enforces all invariants at the type -//! level. - -use crate::io::{AsyncRead, AsyncWrite}; -use crate::net::UnixStream; - -use std::io; -use std::mem::MaybeUninit; -use std::net::Shutdown; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Borrowed read half of a [`UnixStream`], created by [`split`]. -/// -/// Reading from a `ReadHalf` is usually done using the convenience methods found on the -/// [`AsyncReadExt`] trait. Examples import this trait through [the prelude]. -/// -/// [`UnixStream`]: UnixStream -/// [`split`]: UnixStream::split() -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -/// [the prelude]: crate::prelude -#[derive(Debug)] -pub struct ReadHalf<'a>(&'a UnixStream); - -/// Borrowed write half of a [`UnixStream`], created by [`split`]. -/// -/// Note that in the [`AsyncWrite`] implemenation of this type, [`poll_shutdown`] will -/// shut down the UnixStream stream in the write direction. -/// -/// Writing to an `WriteHalf` is usually done using the convenience methods found -/// on the [`AsyncWriteExt`] trait. Examples import this trait through [the prelude]. -/// -/// [`UnixStream`]: UnixStream -/// [`split`]: UnixStream::split() -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -/// [the prelude]: crate::prelude -#[derive(Debug)] -pub struct WriteHalf<'a>(&'a UnixStream); - -pub(crate) fn split(stream: &mut UnixStream) -> (ReadHalf<'_>, WriteHalf<'_>) { - (ReadHalf(stream), WriteHalf(stream)) -} - -impl AsyncRead for ReadHalf<'_> { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.0.poll_read_priv(cx, buf) - } -} - -impl AsyncWrite for WriteHalf<'_> { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.0.poll_write_priv(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.0.shutdown(Shutdown::Write).into() - } -} - -impl AsRef for ReadHalf<'_> { - fn as_ref(&self) -> &UnixStream { - self.0 - } -} - -impl AsRef for WriteHalf<'_> { - fn as_ref(&self) -> &UnixStream { - self.0 - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/split_owned.rs b/third_party/rust/tokio-0.2.25/src/net/unix/split_owned.rs deleted file mode 100644 index eb35304bfa26..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/split_owned.rs +++ /dev/null @@ -1,187 +0,0 @@ -//! `UnixStream` owned split support. -//! -//! A `UnixStream` can be split into an `OwnedReadHalf` and a `OwnedWriteHalf` -//! with the `UnixStream::into_split` method. `OwnedReadHalf` implements -//! `AsyncRead` while `OwnedWriteHalf` implements `AsyncWrite`. -//! -//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized -//! split has no associated overhead and enforces all invariants at the type -//! level. - -use crate::io::{AsyncRead, AsyncWrite}; -use crate::net::UnixStream; - -use std::error::Error; -use std::mem::MaybeUninit; -use std::net::Shutdown; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::{fmt, io}; - -/// Owned read half of a [`UnixStream`], created by [`into_split`]. -/// -/// Reading from an `OwnedReadHalf` is usually done using the convenience methods found -/// on the [`AsyncReadExt`] trait. Examples import this trait through [the prelude]. -/// -/// [`UnixStream`]: crate::net::UnixStream -/// [`into_split`]: crate::net::UnixStream::into_split() -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -/// [the prelude]: crate::prelude -#[derive(Debug)] -pub struct OwnedReadHalf { - inner: Arc, -} - -/// Owned write half of a [`UnixStream`], created by [`into_split`]. -/// -/// Note that in the [`AsyncWrite`] implementation of this type, -/// [`poll_shutdown`] will shut down the stream in the write direction. -/// Dropping the write half will also shut down the write half of the stream. -/// -/// Writing to an `OwnedWriteHalf` is usually done using the convenience methods -/// found on the [`AsyncWriteExt`] trait. Examples import this trait through -/// [the prelude]. -/// -/// [`UnixStream`]: crate::net::UnixStream -/// [`into_split`]: crate::net::UnixStream::into_split() -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -/// [the prelude]: crate::prelude -#[derive(Debug)] -pub struct OwnedWriteHalf { - inner: Arc, - shutdown_on_drop: bool, -} - -pub(crate) fn split_owned(stream: UnixStream) -> (OwnedReadHalf, OwnedWriteHalf) { - let arc = Arc::new(stream); - let read = OwnedReadHalf { - inner: Arc::clone(&arc), - }; - let write = OwnedWriteHalf { - inner: arc, - shutdown_on_drop: true, - }; - (read, write) -} - -pub(crate) fn reunite( - read: OwnedReadHalf, - write: OwnedWriteHalf, -) -> Result { - if Arc::ptr_eq(&read.inner, &write.inner) { - write.forget(); - // This unwrap cannot fail as the api does not allow creating more than two Arcs, - // and we just dropped the other half. - Ok(Arc::try_unwrap(read.inner).expect("UnixStream: try_unwrap failed in reunite")) - } else { - Err(ReuniteError(read, write)) - } -} - -/// Error indicating that two halves were not from the same socket, and thus could -/// not be reunited. -#[derive(Debug)] -pub struct ReuniteError(pub OwnedReadHalf, pub OwnedWriteHalf); - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "tried to reunite halves that are not from the same socket" - ) - } -} - -impl Error for ReuniteError {} - -impl OwnedReadHalf { - /// Attempts to put the two halves of a `UnixStream` back together and - /// recover the original socket. Succeeds only if the two halves - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: crate::net::UnixStream::into_split() - pub fn reunite(self, other: OwnedWriteHalf) -> Result { - reunite(self, other) - } -} - -impl AsyncRead for OwnedReadHalf { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.inner.poll_read_priv(cx, buf) - } -} - -impl OwnedWriteHalf { - /// Attempts to put the two halves of a `UnixStream` back together and - /// recover the original socket. Succeeds only if the two halves - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: crate::net::UnixStream::into_split() - pub fn reunite(self, other: OwnedReadHalf) -> Result { - reunite(other, self) - } - - /// Destroy the write half, but don't close the write half of the stream - /// until the read half is dropped. If the read half has already been - /// dropped, this closes the stream. - pub fn forget(mut self) { - self.shutdown_on_drop = false; - drop(self); - } -} - -impl Drop for OwnedWriteHalf { - fn drop(&mut self) { - if self.shutdown_on_drop { - let _ = self.inner.shutdown(Shutdown::Write); - } - } -} - -impl AsyncWrite for OwnedWriteHalf { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.inner.poll_write_priv(cx, buf) - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // flush is a no-op - Poll::Ready(Ok(())) - } - - // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - let res = self.inner.shutdown(Shutdown::Write); - if res.is_ok() { - Pin::into_inner(self).shutdown_on_drop = false; - } - res.into() - } -} - -impl AsRef for OwnedReadHalf { - fn as_ref(&self) -> &UnixStream { - &*self.inner - } -} - -impl AsRef for OwnedWriteHalf { - fn as_ref(&self) -> &UnixStream { - &*self.inner - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/stream.rs b/third_party/rust/tokio-0.2.25/src/net/unix/stream.rs deleted file mode 100644 index 5fe242d0887c..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/stream.rs +++ /dev/null @@ -1,257 +0,0 @@ -use crate::future::poll_fn; -use crate::io::{AsyncRead, AsyncWrite, PollEvented}; -use crate::net::unix::split::{split, ReadHalf, WriteHalf}; -use crate::net::unix::split_owned::{split_owned, OwnedReadHalf, OwnedWriteHalf}; -use crate::net::unix::ucred::{self, UCred}; - -use std::convert::TryFrom; -use std::fmt; -use std::io::{self, Read, Write}; -use std::mem::MaybeUninit; -use std::net::Shutdown; -use std::os::unix::io::{AsRawFd, RawFd}; -use std::os::unix::net::{self, SocketAddr}; -use std::path::Path; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_uds! { - /// A structure representing a connected Unix socket. - /// - /// This socket can be connected directly with `UnixStream::connect` or accepted - /// from a listener with `UnixListener::incoming`. Additionally, a pair of - /// anonymous Unix sockets can be created with `UnixStream::pair`. - pub struct UnixStream { - io: PollEvented, - } -} - -impl UnixStream { - /// Connects to the socket named by `path`. - /// - /// This function will create a new Unix socket and connect to the path - /// specified, associating the returned stream with the default event loop's - /// handle. - pub async fn connect

-where - P: Park, -{ - /// Scheduler run queue - /// - /// When the scheduler is executed, the queue is removed from `self` and - /// moved into `Context`. - /// - /// This indirection is to allow `BasicScheduler` to be `Send`. - tasks: Option, - - /// Sendable task spawner - spawner: Spawner, - - /// Current tick - tick: u8, - - /// Thread park handle - park: P, -} - -#[derive(Clone)] -pub(crate) struct Spawner { - shared: Arc, -} - -struct Tasks { - /// Collection of all active tasks spawned onto this executor. - owned: LinkedList>>, - - /// Local run queue. - /// - /// Tasks notified from the current thread are pushed into this queue. - queue: VecDeque>>, -} - -/// Scheduler state shared between threads. -struct Shared { - /// Remote run queue - queue: Mutex>>>, - - /// Unpark the blocked thread - unpark: Box, -} - -/// Thread-local context -struct Context { - /// Shared scheduler state - shared: Arc, - - /// Local queue - tasks: RefCell, -} - -/// Initial queue capacity -const INITIAL_CAPACITY: usize = 64; - -/// Max number of tasks to poll per tick. -const MAX_TASKS_PER_TICK: usize = 61; - -/// How often ot check the remote queue first -const REMOTE_FIRST_INTERVAL: u8 = 31; - -// Tracks the current BasicScheduler -scoped_thread_local!(static CURRENT: Context); - -impl

BasicScheduler

-where - P: Park, -{ - pub(crate) fn new(park: P) -> BasicScheduler

{ - let unpark = Box::new(park.unpark()); - - BasicScheduler { - tasks: Some(Tasks { - owned: LinkedList::new(), - queue: VecDeque::with_capacity(INITIAL_CAPACITY), - }), - spawner: Spawner { - shared: Arc::new(Shared { - queue: Mutex::new(VecDeque::with_capacity(INITIAL_CAPACITY)), - unpark: unpark as Box, - }), - }, - tick: 0, - park, - } - } - - pub(crate) fn spawner(&self) -> &Spawner { - &self.spawner - } - - /// Spawns a future onto the thread pool - pub(crate) fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - self.spawner.spawn(future) - } - - pub(crate) fn block_on(&mut self, future: F) -> F::Output - where - F: Future, - { - enter(self, |scheduler, context| { - let _enter = runtime::enter(false); - let waker = waker_ref(&scheduler.spawner.shared); - let mut cx = std::task::Context::from_waker(&waker); - - pin!(future); - - 'outer: loop { - if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) { - return v; - } - - for _ in 0..MAX_TASKS_PER_TICK { - // Get and increment the current tick - let tick = scheduler.tick; - scheduler.tick = scheduler.tick.wrapping_add(1); - - let next = if tick % REMOTE_FIRST_INTERVAL == 0 { - scheduler - .spawner - .pop() - .or_else(|| context.tasks.borrow_mut().queue.pop_front()) - } else { - context - .tasks - .borrow_mut() - .queue - .pop_front() - .or_else(|| scheduler.spawner.pop()) - }; - - match next { - Some(task) => crate::coop::budget(|| task.run()), - None => { - // Park until the thread is signaled - scheduler.park.park().ok().expect("failed to park"); - - // Try polling the `block_on` future next - continue 'outer; - } - } - } - - // Yield to the park, this drives the timer and pulls any pending - // I/O events. - scheduler - .park - .park_timeout(Duration::from_millis(0)) - .ok() - .expect("failed to park"); - } - }) - } -} - -/// Enter the scheduler context. This sets the queue and other necessary -/// scheduler state in the thread-local -fn enter(scheduler: &mut BasicScheduler

, f: F) -> R -where - F: FnOnce(&mut BasicScheduler

, &Context) -> R, - P: Park, -{ - // Ensures the run queue is placed back in the `BasicScheduler` instance - // once `block_on` returns.` - struct Guard<'a, P: Park> { - context: Option, - scheduler: &'a mut BasicScheduler

, - } - - impl Drop for Guard<'_, P> { - fn drop(&mut self) { - let Context { tasks, .. } = self.context.take().expect("context missing"); - self.scheduler.tasks = Some(tasks.into_inner()); - } - } - - // Remove `tasks` from `self` and place it in a `Context`. - let tasks = scheduler.tasks.take().expect("invalid state"); - - let guard = Guard { - context: Some(Context { - shared: scheduler.spawner.shared.clone(), - tasks: RefCell::new(tasks), - }), - scheduler, - }; - - let context = guard.context.as_ref().unwrap(); - let scheduler = &mut *guard.scheduler; - - CURRENT.set(context, || f(scheduler, context)) -} - -impl

Drop for BasicScheduler

-where - P: Park, -{ - fn drop(&mut self) { - enter(self, |scheduler, context| { - // Loop required here to ensure borrow is dropped between iterations - #[allow(clippy::while_let_loop)] - loop { - let task = match context.tasks.borrow_mut().owned.pop_back() { - Some(task) => task, - None => break, - }; - - task.shutdown(); - } - - // Drain local queue - for task in context.tasks.borrow_mut().queue.drain(..) { - task.shutdown(); - } - - // Drain remote queue - for task in scheduler.spawner.shared.queue.lock().unwrap().drain(..) { - task.shutdown(); - } - - assert!(context.tasks.borrow().owned.is_empty()); - }); - } -} - -impl fmt::Debug for BasicScheduler

{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("BasicScheduler").finish() - } -} - -// ===== impl Spawner ===== - -impl Spawner { - /// Spawns a future onto the thread pool - pub(crate) fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - let (task, handle) = task::joinable(future); - self.shared.schedule(task); - handle - } - - fn pop(&self) -> Option>> { - self.shared.queue.lock().unwrap().pop_front() - } -} - -impl fmt::Debug for Spawner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Spawner").finish() - } -} - -// ===== impl Shared ===== - -impl Schedule for Arc { - fn bind(task: Task) -> Arc { - CURRENT.with(|maybe_cx| { - let cx = maybe_cx.expect("scheduler context missing"); - cx.tasks.borrow_mut().owned.push_front(task); - cx.shared.clone() - }) - } - - fn release(&self, task: &Task) -> Option> { - use std::ptr::NonNull; - - CURRENT.with(|maybe_cx| { - let cx = maybe_cx.expect("scheduler context missing"); - - // safety: the task is inserted in the list in `bind`. - unsafe { - let ptr = NonNull::from(task.header()); - cx.tasks.borrow_mut().owned.remove(ptr) - } - }) - } - - fn schedule(&self, task: task::Notified) { - CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if Arc::ptr_eq(self, &cx.shared) => { - cx.tasks.borrow_mut().queue.push_back(task); - } - _ => { - self.queue.lock().unwrap().push_back(task); - self.unpark.unpark(); - } - }); - } -} - -impl Wake for Shared { - fn wake(self: Arc) { - Wake::wake_by_ref(&self) - } - - /// Wake by reference - fn wake_by_ref(arc_self: &Arc) { - arc_self.unpark.unpark(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/blocking/mod.rs b/third_party/rust/tokio-0.2.25/src/runtime/blocking/mod.rs deleted file mode 100644 index 0b36a75f6558..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/blocking/mod.rs +++ /dev/null @@ -1,43 +0,0 @@ -//! Abstracts out the APIs necessary to `Runtime` for integrating the blocking -//! pool. When the `blocking` feature flag is **not** enabled, these APIs are -//! shells. This isolates the complexity of dealing with conditional -//! compilation. - -cfg_blocking_impl! { - mod pool; - pub(crate) use pool::{spawn_blocking, try_spawn_blocking, BlockingPool, Spawner}; - - mod schedule; - mod shutdown; - pub(crate) mod task; - - use crate::runtime::Builder; - - pub(crate) fn create_blocking_pool(builder: &Builder, thread_cap: usize) -> BlockingPool { - BlockingPool::new(builder, thread_cap) - - } -} - -cfg_not_blocking_impl! { - use crate::runtime::Builder; - use std::time::Duration; - - #[derive(Debug, Clone)] - pub(crate) struct BlockingPool {} - - pub(crate) use BlockingPool as Spawner; - - pub(crate) fn create_blocking_pool(_builder: &Builder, _thread_cap: usize) -> BlockingPool { - BlockingPool {} - } - - impl BlockingPool { - pub(crate) fn spawner(&self) -> &BlockingPool { - self - } - - pub(crate) fn shutdown(&mut self, _duration: Option) { - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/blocking/pool.rs b/third_party/rust/tokio-0.2.25/src/runtime/blocking/pool.rs deleted file mode 100644 index c5d464c8571e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/blocking/pool.rs +++ /dev/null @@ -1,329 +0,0 @@ -//! Thread pool for blocking operations - -use crate::loom::sync::{Arc, Condvar, Mutex}; -use crate::loom::thread; -use crate::runtime::blocking::schedule::NoopSchedule; -use crate::runtime::blocking::shutdown; -use crate::runtime::blocking::task::BlockingTask; -use crate::runtime::task::{self, JoinHandle}; -use crate::runtime::{Builder, Callback, Handle}; - -use slab::Slab; - -use std::collections::VecDeque; -use std::fmt; -use std::time::Duration; - -pub(crate) struct BlockingPool { - spawner: Spawner, - shutdown_rx: shutdown::Receiver, -} - -#[derive(Clone)] -pub(crate) struct Spawner { - inner: Arc, -} - -struct Inner { - /// State shared between worker threads - shared: Mutex, - - /// Pool threads wait on this. - condvar: Condvar, - - /// Spawned threads use this name - thread_name: String, - - /// Spawned thread stack size - stack_size: Option, - - /// Call after a thread starts - after_start: Option, - - /// Call before a thread stops - before_stop: Option, - - // Maximum number of threads - thread_cap: usize, -} - -struct Shared { - queue: VecDeque, - num_th: usize, - num_idle: u32, - num_notify: u32, - shutdown: bool, - shutdown_tx: Option, - worker_threads: Slab>, -} - -type Task = task::Notified; - -const KEEP_ALIVE: Duration = Duration::from_secs(10); - -/// Run the provided function on an executor dedicated to blocking operations. -pub(crate) fn spawn_blocking(func: F) -> JoinHandle -where - F: FnOnce() -> R + Send + 'static, -{ - let rt = Handle::current(); - - let (task, handle) = task::joinable(BlockingTask::new(func)); - let _ = rt.blocking_spawner.spawn(task, &rt); - handle -} - -#[allow(dead_code)] -pub(crate) fn try_spawn_blocking(func: F) -> Result<(), ()> -where - F: FnOnce() -> R + Send + 'static, -{ - let rt = Handle::current(); - - let (task, _handle) = task::joinable(BlockingTask::new(func)); - rt.blocking_spawner.spawn(task, &rt) -} - -// ===== impl BlockingPool ===== - -impl BlockingPool { - pub(crate) fn new(builder: &Builder, thread_cap: usize) -> BlockingPool { - let (shutdown_tx, shutdown_rx) = shutdown::channel(); - - BlockingPool { - spawner: Spawner { - inner: Arc::new(Inner { - shared: Mutex::new(Shared { - queue: VecDeque::new(), - num_th: 0, - num_idle: 0, - num_notify: 0, - shutdown: false, - shutdown_tx: Some(shutdown_tx), - worker_threads: Slab::new(), - }), - condvar: Condvar::new(), - thread_name: builder.thread_name.clone(), - stack_size: builder.thread_stack_size, - after_start: builder.after_start.clone(), - before_stop: builder.before_stop.clone(), - thread_cap, - }), - }, - shutdown_rx, - } - } - - pub(crate) fn spawner(&self) -> &Spawner { - &self.spawner - } - - pub(crate) fn shutdown(&mut self, timeout: Option) { - let mut shared = self.spawner.inner.shared.lock().unwrap(); - - // The function can be called multiple times. First, by explicitly - // calling `shutdown` then by the drop handler calling `shutdown`. This - // prevents shutting down twice. - if shared.shutdown { - return; - } - - shared.shutdown = true; - shared.shutdown_tx = None; - self.spawner.inner.condvar.notify_all(); - let mut workers = std::mem::replace(&mut shared.worker_threads, Slab::new()); - - drop(shared); - - if self.shutdown_rx.wait(timeout) { - for handle in workers.drain() { - let _ = handle.join(); - } - } - } -} - -impl Drop for BlockingPool { - fn drop(&mut self) { - self.shutdown(None); - } -} - -impl fmt::Debug for BlockingPool { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("BlockingPool").finish() - } -} - -// ===== impl Spawner ===== - -impl Spawner { - pub(crate) fn spawn(&self, task: Task, rt: &Handle) -> Result<(), ()> { - let shutdown_tx = { - let mut shared = self.inner.shared.lock().unwrap(); - - if shared.shutdown { - // Shutdown the task - task.shutdown(); - - // no need to even push this task; it would never get picked up - return Err(()); - } - - shared.queue.push_back(task); - - if shared.num_idle == 0 { - // No threads are able to process the task. - - if shared.num_th == self.inner.thread_cap { - // At max number of threads - None - } else { - shared.num_th += 1; - assert!(shared.shutdown_tx.is_some()); - shared.shutdown_tx.clone() - } - } else { - // Notify an idle worker thread. The notification counter - // is used to count the needed amount of notifications - // exactly. Thread libraries may generate spurious - // wakeups, this counter is used to keep us in a - // consistent state. - shared.num_idle -= 1; - shared.num_notify += 1; - self.inner.condvar.notify_one(); - None - } - }; - - if let Some(shutdown_tx) = shutdown_tx { - let mut shared = self.inner.shared.lock().unwrap(); - let entry = shared.worker_threads.vacant_entry(); - - let handle = self.spawn_thread(shutdown_tx, rt, entry.key()); - - entry.insert(handle); - } - - Ok(()) - } - - fn spawn_thread( - &self, - shutdown_tx: shutdown::Sender, - rt: &Handle, - worker_id: usize, - ) -> thread::JoinHandle<()> { - let mut builder = thread::Builder::new().name(self.inner.thread_name.clone()); - - if let Some(stack_size) = self.inner.stack_size { - builder = builder.stack_size(stack_size); - } - - let rt = rt.clone(); - - builder - .spawn(move || { - // Only the reference should be moved into the closure - let rt = &rt; - rt.enter(move || { - rt.blocking_spawner.inner.run(worker_id); - drop(shutdown_tx); - }) - }) - .unwrap() - } -} - -impl Inner { - fn run(&self, worker_id: usize) { - if let Some(f) = &self.after_start { - f() - } - - let mut shared = self.shared.lock().unwrap(); - - 'main: loop { - // BUSY - while let Some(task) = shared.queue.pop_front() { - drop(shared); - task.run(); - - shared = self.shared.lock().unwrap(); - } - - // IDLE - shared.num_idle += 1; - - while !shared.shutdown { - let lock_result = self.condvar.wait_timeout(shared, KEEP_ALIVE).unwrap(); - - shared = lock_result.0; - let timeout_result = lock_result.1; - - if shared.num_notify != 0 { - // We have received a legitimate wakeup, - // acknowledge it by decrementing the counter - // and transition to the BUSY state. - shared.num_notify -= 1; - break; - } - - // Even if the condvar "timed out", if the pool is entering the - // shutdown phase, we want to perform the cleanup logic. - if !shared.shutdown && timeout_result.timed_out() { - shared.worker_threads.remove(worker_id); - - break 'main; - } - - // Spurious wakeup detected, go back to sleep. - } - - if shared.shutdown { - // Drain the queue - while let Some(task) = shared.queue.pop_front() { - drop(shared); - task.shutdown(); - - shared = self.shared.lock().unwrap(); - } - - // Work was produced, and we "took" it (by decrementing num_notify). - // This means that num_idle was decremented once for our wakeup. - // But, since we are exiting, we need to "undo" that, as we'll stay idle. - shared.num_idle += 1; - // NOTE: Technically we should also do num_notify++ and notify again, - // but since we're shutting down anyway, that won't be necessary. - break; - } - } - - // Thread exit - shared.num_th -= 1; - - // num_idle should now be tracked exactly, panic - // with a descriptive message if it is not the - // case. - shared.num_idle = shared - .num_idle - .checked_sub(1) - .expect("num_idle underflowed on thread exit"); - - if shared.shutdown && shared.num_th == 0 { - self.condvar.notify_one(); - } - - drop(shared); - - if let Some(f) = &self.before_stop { - f() - } - } -} - -impl fmt::Debug for Spawner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("blocking::Spawner").finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/blocking/schedule.rs b/third_party/rust/tokio-0.2.25/src/runtime/blocking/schedule.rs deleted file mode 100644 index 4e044ab29879..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/blocking/schedule.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::runtime::task::{self, Task}; - -/// `task::Schedule` implementation that does nothing. This is unique to the -/// blocking scheduler as tasks scheduled are not really futures but blocking -/// operations. -/// -/// We avoid storing the task by forgetting it in `bind` and re-materializing it -/// in `release. -pub(crate) struct NoopSchedule; - -impl task::Schedule for NoopSchedule { - fn bind(_task: Task) -> NoopSchedule { - // Do nothing w/ the task - NoopSchedule - } - - fn release(&self, _task: &Task) -> Option> { - None - } - - fn schedule(&self, _task: task::Notified) { - unreachable!(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/blocking/shutdown.rs b/third_party/rust/tokio-0.2.25/src/runtime/blocking/shutdown.rs deleted file mode 100644 index 3b6cc5930dcd..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/blocking/shutdown.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! A shutdown channel. -//! -//! Each worker holds the `Sender` half. When all the `Sender` halves are -//! dropped, the `Receiver` receives a notification. - -use crate::loom::sync::Arc; -use crate::sync::oneshot; - -use std::time::Duration; - -#[derive(Debug, Clone)] -pub(super) struct Sender { - tx: Arc>, -} - -#[derive(Debug)] -pub(super) struct Receiver { - rx: oneshot::Receiver<()>, -} - -pub(super) fn channel() -> (Sender, Receiver) { - let (tx, rx) = oneshot::channel(); - let tx = Sender { tx: Arc::new(tx) }; - let rx = Receiver { rx }; - - (tx, rx) -} - -impl Receiver { - /// Blocks the current thread until all `Sender` handles drop. - /// - /// If `timeout` is `Some`, the thread is blocked for **at most** `timeout` - /// duration. If `timeout` is `None`, then the thread is blocked until the - /// shutdown signal is received. - /// - /// If the timeout has elapsed, it returns `false`, otherwise it returns `true`. - pub(crate) fn wait(&mut self, timeout: Option) -> bool { - use crate::runtime::enter::try_enter; - - if timeout == Some(Duration::from_nanos(0)) { - return true; - } - - let mut e = match try_enter(false) { - Some(enter) => enter, - _ => { - if std::thread::panicking() { - // Don't panic in a panic - return false; - } else { - panic!( - "Cannot drop a runtime in a context where blocking is not allowed. \ - This happens when a runtime is dropped from within an asynchronous context." - ); - } - } - }; - - // The oneshot completes with an Err - // - // If blocking fails to wait, this indicates a problem parking the - // current thread (usually, shutting down a runtime stored in a - // thread-local). - if let Some(timeout) = timeout { - e.block_on_timeout(&mut self.rx, timeout).is_ok() - } else { - let _ = e.block_on(&mut self.rx); - true - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/blocking/task.rs b/third_party/rust/tokio-0.2.25/src/runtime/blocking/task.rs deleted file mode 100644 index a521af4630c0..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/blocking/task.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Converts a function to a future that completes on poll -pub(crate) struct BlockingTask { - func: Option, -} - -impl BlockingTask { - /// Initializes a new blocking task from the given function - pub(crate) fn new(func: T) -> BlockingTask { - BlockingTask { func: Some(func) } - } -} - -// The closure `F` is never pinned -impl Unpin for BlockingTask {} - -impl Future for BlockingTask -where - T: FnOnce() -> R, -{ - type Output = R; - - fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - let me = &mut *self; - let func = me - .func - .take() - .expect("[internal exception] blocking task ran twice."); - - // This is a little subtle: - // For convenience, we'd like _every_ call tokio ever makes to Task::poll() to be budgeted - // using coop. However, the way things are currently modeled, even running a blocking task - // currently goes through Task::poll(), and so is subject to budgeting. That isn't really - // what we want; a blocking task may itself want to run tasks (it might be a Worker!), so - // we want it to start without any budgeting. - crate::coop::stop(); - - Poll::Ready(func()) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/builder.rs b/third_party/rust/tokio-0.2.25/src/runtime/builder.rs deleted file mode 100644 index fad72c7ad941..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/builder.rs +++ /dev/null @@ -1,522 +0,0 @@ -use crate::runtime::handle::Handle; -use crate::runtime::shell::Shell; -use crate::runtime::{blocking, io, time, Callback, Runtime, Spawner}; - -use std::fmt; -#[cfg(not(loom))] -use std::sync::Arc; - -/// Builds Tokio Runtime with custom configuration values. -/// -/// Methods can be chained in order to set the configuration values. The -/// Runtime is constructed by calling [`build`]. -/// -/// New instances of `Builder` are obtained via [`Builder::new`]. -/// -/// See function level documentation for details on the various configuration -/// settings. -/// -/// [`build`]: method@Self::build -/// [`Builder::new`]: method@Self::new -/// -/// # Examples -/// -/// ``` -/// use tokio::runtime::Builder; -/// -/// fn main() { -/// // build runtime -/// let runtime = Builder::new() -/// .threaded_scheduler() -/// .core_threads(4) -/// .thread_name("my-custom-name") -/// .thread_stack_size(3 * 1024 * 1024) -/// .build() -/// .unwrap(); -/// -/// // use runtime ... -/// } -/// ``` -pub struct Builder { - /// The task execution model to use. - kind: Kind, - - /// Whether or not to enable the I/O driver - enable_io: bool, - - /// Whether or not to enable the time driver - enable_time: bool, - - /// The number of worker threads, used by Runtime. - /// - /// Only used when not using the current-thread executor. - core_threads: Option, - - /// Cap on thread usage. - max_threads: usize, - - /// Name used for threads spawned by the runtime. - pub(super) thread_name: String, - - /// Stack size used for threads spawned by the runtime. - pub(super) thread_stack_size: Option, - - /// Callback to run after each thread starts. - pub(super) after_start: Option, - - /// To run before each worker thread stops - pub(super) before_stop: Option, -} - -#[derive(Debug, Clone, Copy)] -enum Kind { - Shell, - #[cfg(feature = "rt-core")] - Basic, - #[cfg(feature = "rt-threaded")] - ThreadPool, -} - -impl Builder { - /// Returns a new runtime builder initialized with default configuration - /// values. - /// - /// Configuration methods can be chained on the return value. - pub fn new() -> Builder { - Builder { - // No task execution by default - kind: Kind::Shell, - - // I/O defaults to "off" - enable_io: false, - - // Time defaults to "off" - enable_time: false, - - // Default to lazy auto-detection (one thread per CPU core) - core_threads: None, - - max_threads: 512, - - // Default thread name - thread_name: "tokio-runtime-worker".into(), - - // Do not set a stack size by default - thread_stack_size: None, - - // No worker thread callbacks - after_start: None, - before_stop: None, - } - } - - /// Enables both I/O and time drivers. - /// - /// Doing this is a shorthand for calling `enable_io` and `enable_time` - /// individually. If additional components are added to Tokio in the future, - /// `enable_all` will include these future components. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new() - /// .threaded_scheduler() - /// .enable_all() - /// .build() - /// .unwrap(); - /// ``` - pub fn enable_all(&mut self) -> &mut Self { - #[cfg(feature = "io-driver")] - self.enable_io(); - #[cfg(feature = "time")] - self.enable_time(); - - self - } - - #[deprecated(note = "In future will be replaced by core_threads method")] - /// Sets the maximum number of worker threads for the `Runtime`'s thread pool. - /// - /// This must be a number between 1 and 32,768 though it is advised to keep - /// this value on the smaller side. - /// - /// The default value is the number of cores available to the system. - pub fn num_threads(&mut self, val: usize) -> &mut Self { - self.core_threads = Some(val); - self - } - - /// Sets the core number of worker threads for the `Runtime`'s thread pool. - /// - /// This should be a number between 1 and 32,768 though it is advised to keep - /// this value on the smaller side. - /// - /// The default value is the number of cores available to the system. - /// - /// These threads will be always active and running. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new() - /// .threaded_scheduler() - /// .core_threads(4) - /// .build() - /// .unwrap(); - /// ``` - pub fn core_threads(&mut self, val: usize) -> &mut Self { - assert_ne!(val, 0, "Core threads cannot be zero"); - self.core_threads = Some(val); - self - } - - /// Specifies limit for threads, spawned by the Runtime. - /// - /// This is number of threads to be used by Runtime, including `core_threads` - /// Having `max_threads` less than `core_threads` results in invalid configuration - /// when building multi-threaded `Runtime`, which would cause a panic. - /// - /// Similarly to the `core_threads`, this number should be between 1 and 32,768. - /// - /// The default value is 512. - /// - /// When multi-threaded runtime is not used, will act as limit on additional threads. - /// - /// Otherwise as `core_threads` are always active, it limits additional threads (e.g. for - /// blocking annotations) as `max_threads - core_threads`. - pub fn max_threads(&mut self, val: usize) -> &mut Self { - assert_ne!(val, 0, "Thread limit cannot be zero"); - self.max_threads = val; - self - } - - /// Sets name of threads spawned by the `Runtime`'s thread pool. - /// - /// The default name is "tokio-runtime-worker". - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// - /// # pub fn main() { - /// let rt = runtime::Builder::new() - /// .thread_name("my-pool") - /// .build(); - /// # } - /// ``` - pub fn thread_name(&mut self, val: impl Into) -> &mut Self { - self.thread_name = val.into(); - self - } - - /// Sets the stack size (in bytes) for worker threads. - /// - /// The actual stack size may be greater than this value if the platform - /// specifies minimal stack size. - /// - /// The default stack size for spawned threads is 2 MiB, though this - /// particular stack size is subject to change in the future. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// - /// # pub fn main() { - /// let rt = runtime::Builder::new() - /// .threaded_scheduler() - /// .thread_stack_size(32 * 1024) - /// .build(); - /// # } - /// ``` - pub fn thread_stack_size(&mut self, val: usize) -> &mut Self { - self.thread_stack_size = Some(val); - self - } - - /// Executes function `f` after each thread is started but before it starts - /// doing work. - /// - /// This is intended for bookkeeping and monitoring use cases. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// - /// # pub fn main() { - /// let runtime = runtime::Builder::new() - /// .threaded_scheduler() - /// .on_thread_start(|| { - /// println!("thread started"); - /// }) - /// .build(); - /// # } - /// ``` - #[cfg(not(loom))] - pub fn on_thread_start(&mut self, f: F) -> &mut Self - where - F: Fn() + Send + Sync + 'static, - { - self.after_start = Some(Arc::new(f)); - self - } - - /// Executes function `f` before each thread stops. - /// - /// This is intended for bookkeeping and monitoring use cases. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// - /// # pub fn main() { - /// let runtime = runtime::Builder::new() - /// .threaded_scheduler() - /// .on_thread_stop(|| { - /// println!("thread stopping"); - /// }) - /// .build(); - /// # } - /// ``` - #[cfg(not(loom))] - pub fn on_thread_stop(&mut self, f: F) -> &mut Self - where - F: Fn() + Send + Sync + 'static, - { - self.before_stop = Some(Arc::new(f)); - self - } - - /// Creates the configured `Runtime`. - /// - /// The returned `ThreadPool` instance is ready to spawn tasks. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Builder; - /// - /// let mut rt = Builder::new().build().unwrap(); - /// - /// rt.block_on(async { - /// println!("Hello from the Tokio runtime"); - /// }); - /// ``` - pub fn build(&mut self) -> io::Result { - match self.kind { - Kind::Shell => self.build_shell_runtime(), - #[cfg(feature = "rt-core")] - Kind::Basic => self.build_basic_runtime(), - #[cfg(feature = "rt-threaded")] - Kind::ThreadPool => self.build_threaded_runtime(), - } - } - - fn build_shell_runtime(&mut self) -> io::Result { - use crate::runtime::Kind; - - let clock = time::create_clock(); - - // Create I/O driver - let (io_driver, io_handle) = io::create_driver(self.enable_io)?; - let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone()); - - let spawner = Spawner::Shell; - - let blocking_pool = blocking::create_blocking_pool(self, self.max_threads); - let blocking_spawner = blocking_pool.spawner().clone(); - - Ok(Runtime { - kind: Kind::Shell(Shell::new(driver)), - handle: Handle { - spawner, - io_handle, - time_handle, - clock, - blocking_spawner, - }, - blocking_pool, - }) - } -} - -cfg_io_driver! { - impl Builder { - /// Enables the I/O driver. - /// - /// Doing this enables using net, process, signal, and some I/O types on - /// the runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new() - /// .enable_io() - /// .build() - /// .unwrap(); - /// ``` - pub fn enable_io(&mut self) -> &mut Self { - self.enable_io = true; - self - } - } -} - -cfg_time! { - impl Builder { - /// Enables the time driver. - /// - /// Doing this enables using `tokio::time` on the runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new() - /// .enable_time() - /// .build() - /// .unwrap(); - /// ``` - pub fn enable_time(&mut self) -> &mut Self { - self.enable_time = true; - self - } - } -} - -cfg_rt_core! { - impl Builder { - /// Sets runtime to use a simpler scheduler that runs all tasks on the current-thread. - /// - /// The executor and all necessary drivers will all be run on the current - /// thread during [`block_on`] calls. - /// - /// See also [the module level documentation][1], which has a section on scheduler - /// types. - /// - /// [1]: index.html#runtime-configurations - /// [`block_on`]: Runtime::block_on - pub fn basic_scheduler(&mut self) -> &mut Self { - self.kind = Kind::Basic; - self - } - - fn build_basic_runtime(&mut self) -> io::Result { - use crate::runtime::{BasicScheduler, Kind}; - - let clock = time::create_clock(); - - // Create I/O driver - let (io_driver, io_handle) = io::create_driver(self.enable_io)?; - - let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone()); - - // And now put a single-threaded scheduler on top of the timer. When - // there are no futures ready to do something, it'll let the timer or - // the reactor to generate some new stimuli for the futures to continue - // in their life. - let scheduler = BasicScheduler::new(driver); - let spawner = Spawner::Basic(scheduler.spawner().clone()); - - // Blocking pool - let blocking_pool = blocking::create_blocking_pool(self, self.max_threads); - let blocking_spawner = blocking_pool.spawner().clone(); - - Ok(Runtime { - kind: Kind::Basic(scheduler), - handle: Handle { - spawner, - io_handle, - time_handle, - clock, - blocking_spawner, - }, - blocking_pool, - }) - } - } -} - -cfg_rt_threaded! { - impl Builder { - /// Sets runtime to use a multi-threaded scheduler for executing tasks. - /// - /// See also [the module level documentation][1], which has a section on scheduler - /// types. - /// - /// [1]: index.html#runtime-configurations - pub fn threaded_scheduler(&mut self) -> &mut Self { - self.kind = Kind::ThreadPool; - self - } - - fn build_threaded_runtime(&mut self) -> io::Result { - use crate::loom::sys::num_cpus; - use crate::runtime::{Kind, ThreadPool}; - use crate::runtime::park::Parker; - use std::cmp; - - let core_threads = self.core_threads.unwrap_or_else(|| cmp::min(self.max_threads, num_cpus())); - assert!(core_threads <= self.max_threads, "Core threads number cannot be above max limit"); - - let clock = time::create_clock(); - - let (io_driver, io_handle) = io::create_driver(self.enable_io)?; - let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone()); - let (scheduler, launch) = ThreadPool::new(core_threads, Parker::new(driver)); - let spawner = Spawner::ThreadPool(scheduler.spawner().clone()); - - // Create the blocking pool - let blocking_pool = blocking::create_blocking_pool(self, self.max_threads); - let blocking_spawner = blocking_pool.spawner().clone(); - - // Create the runtime handle - let handle = Handle { - spawner, - io_handle, - time_handle, - clock, - blocking_spawner, - }; - - // Spawn the thread pool workers - handle.enter(|| launch.launch()); - - Ok(Runtime { - kind: Kind::ThreadPool(scheduler), - handle, - blocking_pool, - }) - } - } -} - -impl Default for Builder { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Debug for Builder { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Builder") - .field("kind", &self.kind) - .field("core_threads", &self.core_threads) - .field("max_threads", &self.max_threads) - .field("thread_name", &self.thread_name) - .field("thread_stack_size", &self.thread_stack_size) - .field("after_start", &self.after_start.as_ref().map(|_| "...")) - .field("before_stop", &self.after_start.as_ref().map(|_| "...")) - .finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/context.rs b/third_party/rust/tokio-0.2.25/src/runtime/context.rs deleted file mode 100644 index 1b267f481e22..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/context.rs +++ /dev/null @@ -1,73 +0,0 @@ -//! Thread local runtime context -use crate::runtime::Handle; - -use std::cell::RefCell; - -thread_local! { - static CONTEXT: RefCell> = RefCell::new(None) -} - -pub(crate) fn current() -> Option { - CONTEXT.with(|ctx| ctx.borrow().clone()) -} - -cfg_io_driver! { - pub(crate) fn io_handle() -> crate::runtime::io::Handle { - CONTEXT.with(|ctx| match *ctx.borrow() { - Some(ref ctx) => ctx.io_handle.clone(), - None => Default::default(), - }) - } -} - -cfg_time! { - pub(crate) fn time_handle() -> crate::runtime::time::Handle { - CONTEXT.with(|ctx| match *ctx.borrow() { - Some(ref ctx) => ctx.time_handle.clone(), - None => Default::default(), - }) - } - - cfg_test_util! { - pub(crate) fn clock() -> Option { - CONTEXT.with(|ctx| match *ctx.borrow() { - Some(ref ctx) => Some(ctx.clock.clone()), - None => None, - }) - } - } -} - -cfg_rt_core! { - pub(crate) fn spawn_handle() -> Option { - CONTEXT.with(|ctx| match *ctx.borrow() { - Some(ref ctx) => Some(ctx.spawner.clone()), - None => None, - }) - } -} - -/// Set this [`Handle`] as the current active [`Handle`]. -/// -/// [`Handle`]: Handle -pub(crate) fn enter(new: Handle, f: F) -> R -where - F: FnOnce() -> R, -{ - struct DropGuard(Option); - - impl Drop for DropGuard { - fn drop(&mut self) { - CONTEXT.with(|ctx| { - *ctx.borrow_mut() = self.0.take(); - }); - } - } - - let _guard = CONTEXT.with(|ctx| { - let old = ctx.borrow_mut().replace(new); - DropGuard(old) - }); - - f() -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/enter.rs b/third_party/rust/tokio-0.2.25/src/runtime/enter.rs deleted file mode 100644 index 56a7c57b6c6d..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/enter.rs +++ /dev/null @@ -1,225 +0,0 @@ -use std::cell::{Cell, RefCell}; -use std::fmt; -use std::marker::PhantomData; - -#[derive(Debug, Clone, Copy)] -pub(crate) enum EnterContext { - Entered { - #[allow(dead_code)] - allow_blocking: bool, - }, - NotEntered, -} - -impl EnterContext { - pub(crate) fn is_entered(self) -> bool { - if let EnterContext::Entered { .. } = self { - true - } else { - false - } - } -} - -thread_local!(static ENTERED: Cell = Cell::new(EnterContext::NotEntered)); - -/// Represents an executor context. -pub(crate) struct Enter { - _p: PhantomData>, -} - -/// Marks the current thread as being within the dynamic extent of an -/// executor. -pub(crate) fn enter(allow_blocking: bool) -> Enter { - if let Some(enter) = try_enter(allow_blocking) { - return enter; - } - - panic!( - "Cannot start a runtime from within a runtime. This happens \ - because a function (like `block_on`) attempted to block the \ - current thread while the thread is being used to drive \ - asynchronous tasks." - ); -} - -/// Tries to enter a runtime context, returns `None` if already in a runtime -/// context. -pub(crate) fn try_enter(allow_blocking: bool) -> Option { - ENTERED.with(|c| { - if c.get().is_entered() { - None - } else { - c.set(EnterContext::Entered { allow_blocking }); - Some(Enter { _p: PhantomData }) - } - }) -} - -// Forces the current "entered" state to be cleared while the closure -// is executed. -// -// # Warning -// -// This is hidden for a reason. Do not use without fully understanding -// executors. Misuing can easily cause your program to deadlock. -#[cfg(all(feature = "rt-threaded", feature = "blocking"))] -pub(crate) fn exit R, R>(f: F) -> R { - // Reset in case the closure panics - struct Reset(EnterContext); - impl Drop for Reset { - fn drop(&mut self) { - ENTERED.with(|c| { - assert!(!c.get().is_entered(), "closure claimed permanent executor"); - c.set(self.0); - }); - } - } - - let was = ENTERED.with(|c| { - let e = c.get(); - assert!(e.is_entered(), "asked to exit when not entered"); - c.set(EnterContext::NotEntered); - e - }); - - let _reset = Reset(was); - // dropping _reset after f() will reset ENTERED - f() -} - -cfg_rt_core! { - cfg_rt_util! { - /// Disallow blocking in the current runtime context until the guard is dropped. - pub(crate) fn disallow_blocking() -> DisallowBlockingGuard { - let reset = ENTERED.with(|c| { - if let EnterContext::Entered { - allow_blocking: true, - } = c.get() - { - c.set(EnterContext::Entered { - allow_blocking: false, - }); - true - } else { - false - } - }); - DisallowBlockingGuard(reset) - } - - pub(crate) struct DisallowBlockingGuard(bool); - impl Drop for DisallowBlockingGuard { - fn drop(&mut self) { - if self.0 { - // XXX: Do we want some kind of assertion here, or is "best effort" okay? - ENTERED.with(|c| { - if let EnterContext::Entered { - allow_blocking: false, - } = c.get() - { - c.set(EnterContext::Entered { - allow_blocking: true, - }); - } - }) - } - } - } - } -} - -cfg_rt_threaded! { - cfg_blocking! { - /// Returns true if in a runtime context. - pub(crate) fn context() -> EnterContext { - ENTERED.with(|c| c.get()) - } - } -} - -cfg_block_on! { - impl Enter { - /// Blocks the thread on the specified future, returning the value with - /// which that future completes. - pub(crate) fn block_on(&mut self, f: F) -> Result - where - F: std::future::Future, - { - use crate::park::{CachedParkThread, Park}; - use std::task::Context; - use std::task::Poll::Ready; - - let mut park = CachedParkThread::new(); - let waker = park.get_unpark()?.into_waker(); - let mut cx = Context::from_waker(&waker); - - pin!(f); - - loop { - if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) { - return Ok(v); - } - - park.park()?; - } - } - } -} - -cfg_blocking_impl! { - use crate::park::ParkError; - use std::time::Duration; - - impl Enter { - /// Blocks the thread on the specified future for **at most** `timeout` - /// - /// If the future completes before `timeout`, the result is returned. If - /// `timeout` elapses, then `Err` is returned. - pub(crate) fn block_on_timeout(&mut self, f: F, timeout: Duration) -> Result - where - F: std::future::Future, - { - use crate::park::{CachedParkThread, Park}; - use std::task::Context; - use std::task::Poll::Ready; - use std::time::Instant; - - let mut park = CachedParkThread::new(); - let waker = park.get_unpark()?.into_waker(); - let mut cx = Context::from_waker(&waker); - - pin!(f); - let when = Instant::now() + timeout; - - loop { - if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) { - return Ok(v); - } - - let now = Instant::now(); - - if now >= when { - return Err(()); - } - - park.park_timeout(when - now)?; - } - } - } -} - -impl fmt::Debug for Enter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Enter").finish() - } -} - -impl Drop for Enter { - fn drop(&mut self) { - ENTERED.with(|c| { - assert!(c.get().is_entered()); - c.set(EnterContext::NotEntered); - }); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/handle.rs b/third_party/rust/tokio-0.2.25/src/runtime/handle.rs deleted file mode 100644 index 92c08d617b26..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/handle.rs +++ /dev/null @@ -1,371 +0,0 @@ -use crate::runtime::{blocking, context, io, time, Spawner}; -use std::{error, fmt}; - -cfg_blocking! { - use crate::runtime::task; - use crate::runtime::blocking::task::BlockingTask; -} - -cfg_rt_core! { - use crate::task::JoinHandle; - - use std::future::Future; -} - -/// Handle to the runtime. -/// -/// The handle is internally reference-counted and can be freely cloned. A handle can be -/// obtained using the [`Runtime::handle`] method. -/// -/// [`Runtime::handle`]: crate::runtime::Runtime::handle() -#[derive(Debug, Clone)] -pub struct Handle { - pub(super) spawner: Spawner, - - /// Handles to the I/O drivers - pub(super) io_handle: io::Handle, - - /// Handles to the time drivers - pub(super) time_handle: time::Handle, - - /// Source of `Instant::now()` - pub(super) clock: time::Clock, - - /// Blocking pool spawner - pub(super) blocking_spawner: blocking::Spawner, -} - -impl Handle { - /// Enter the runtime context. This allows you to construct types that must - /// have an executor available on creation such as [`Delay`] or [`TcpStream`]. - /// It will also allow you to call methods such as [`tokio::spawn`]. - /// - /// This function is also available as [`Runtime::enter`]. - /// - /// [`Delay`]: struct@crate::time::Delay - /// [`TcpStream`]: struct@crate::net::TcpStream - /// [`Runtime::enter`]: fn@crate::runtime::Runtime::enter - /// [`tokio::spawn`]: fn@crate::spawn - /// - /// # Example - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// fn function_that_spawns(msg: String) { - /// // Had we not used `handle.enter` below, this would panic. - /// tokio::spawn(async move { - /// println!("{}", msg); - /// }); - /// } - /// - /// fn main() { - /// let rt = Runtime::new().unwrap(); - /// let handle = rt.handle().clone(); - /// - /// let s = "Hello World!".to_string(); - /// - /// // By entering the context, we tie `tokio::spawn` to this executor. - /// handle.enter(|| function_that_spawns(s)); - /// } - /// ``` - pub fn enter(&self, f: F) -> R - where - F: FnOnce() -> R, - { - context::enter(self.clone(), f) - } - - /// Returns a `Handle` view over the currently running `Runtime` - /// - /// # Panic - /// - /// This will panic if called outside the context of a Tokio runtime. That means that you must - /// call this on one of the threads **being run by the runtime**. Calling this from within a - /// thread created by `std::thread::spawn` (for example) will cause a panic. - /// - /// # Examples - /// - /// This can be used to obtain the handle of the surrounding runtime from an async - /// block or function running on that runtime. - /// - /// ``` - /// # use std::thread; - /// # use tokio::runtime::Runtime; - /// # fn dox() { - /// # let rt = Runtime::new().unwrap(); - /// # rt.spawn(async { - /// use tokio::runtime::Handle; - /// - /// // Inside an async block or function. - /// let handle = Handle::current(); - /// handle.spawn(async { - /// println!("now running in the existing Runtime"); - /// }); - /// - /// # let handle = - /// thread::spawn(move || { - /// // Notice that the handle is created outside of this thread and then moved in - /// handle.block_on(async { /* ... */ }) - /// // This next line would cause a panic - /// // let handle2 = Handle::current(); - /// }); - /// # handle.join().unwrap(); - /// # }); - /// # } - /// ``` - pub fn current() -> Self { - context::current().expect("not currently running on a Tokio 0.2.x runtime.") - } - - /// Returns a Handle view over the currently running Runtime - /// - /// Returns an error if no Runtime has been started - /// - /// Contrary to `current`, this never panics - pub fn try_current() -> Result { - context::current().ok_or(TryCurrentError(())) - } -} - -cfg_rt_core! { - impl Handle { - /// Spawns a future onto the Tokio runtime. - /// - /// This spawns the given future onto the runtime's executor, usually a - /// thread pool. The thread pool is then responsible for polling the future - /// until it completes. - /// - /// See [module level][mod] documentation for more details. - /// - /// [mod]: index.html - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// # fn dox() { - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// let handle = rt.handle(); - /// - /// // Spawn a future onto the runtime - /// handle.spawn(async { - /// println!("now running on a worker thread"); - /// }); - /// # } - /// ``` - /// - /// # Panics - /// - /// This function will not panic unless task execution is disabled on the - /// executor. This can only happen if the runtime was built using - /// [`Builder`] without picking either [`basic_scheduler`] or - /// [`threaded_scheduler`]. - /// - /// [`Builder`]: struct@crate::runtime::Builder - /// [`threaded_scheduler`]: fn@crate::runtime::Builder::threaded_scheduler - /// [`basic_scheduler`]: fn@crate::runtime::Builder::basic_scheduler - pub fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - self.spawner.spawn(future) - } - - /// Run a future to completion on the Tokio runtime from a synchronous - /// context. - /// - /// This runs the given future on the runtime, blocking until it is - /// complete, and yielding its resolved result. Any tasks or timers which - /// the future spawns internally will be executed on the runtime. - /// - /// If the provided executor currently has no active core thread, this - /// function might hang until a core thread is added. This is not a - /// concern when using the [threaded scheduler], as it always has active - /// core threads, but if you use the [basic scheduler], some other - /// thread must currently be inside a call to [`Runtime::block_on`]. - /// See also [the module level documentation][1], which has a section on - /// scheduler types. - /// - /// This method may not be called from an asynchronous context. - /// - /// [threaded scheduler]: fn@crate::runtime::Builder::threaded_scheduler - /// [basic scheduler]: fn@crate::runtime::Builder::basic_scheduler - /// [`Runtime::block_on`]: fn@crate::runtime::Runtime::block_on - /// [1]: index.html#runtime-configurations - /// - /// # Panics - /// - /// This function panics if the provided future panics, or if called - /// within an asynchronous execution context. - /// - /// # Examples - /// - /// Using `block_on` with the [threaded scheduler]. - /// - /// ``` - /// use tokio::runtime::Runtime; - /// use std::thread; - /// - /// // Create the runtime. - /// // - /// // If the rt-threaded feature is enabled, this creates a threaded - /// // scheduler by default. - /// let rt = Runtime::new().unwrap(); - /// let handle = rt.handle().clone(); - /// - /// // Use the runtime from another thread. - /// let th = thread::spawn(move || { - /// // Execute the future, blocking the current thread until completion. - /// // - /// // This example uses the threaded scheduler, so no concurrent call to - /// // `rt.block_on` is required. - /// handle.block_on(async { - /// println!("hello"); - /// }); - /// }); - /// - /// th.join().unwrap(); - /// ``` - /// - /// Using the [basic scheduler] requires a concurrent call to - /// [`Runtime::block_on`]: - /// - /// [threaded scheduler]: fn@crate::runtime::Builder::threaded_scheduler - /// [basic scheduler]: fn@crate::runtime::Builder::basic_scheduler - /// [`Runtime::block_on`]: fn@crate::runtime::Runtime::block_on - /// - /// ``` - /// use tokio::runtime::Builder; - /// use tokio::sync::oneshot; - /// use std::thread; - /// - /// // Create the runtime. - /// let mut rt = Builder::new() - /// .enable_all() - /// .basic_scheduler() - /// .build() - /// .unwrap(); - /// - /// let handle = rt.handle().clone(); - /// - /// // Signal main thread when task has finished. - /// let (send, recv) = oneshot::channel(); - /// - /// // Use the runtime from another thread. - /// let th = thread::spawn(move || { - /// // Execute the future, blocking the current thread until completion. - /// handle.block_on(async { - /// send.send("done").unwrap(); - /// }); - /// }); - /// - /// // The basic scheduler is used, so the thread above might hang if we - /// // didn't call block_on on the rt too. - /// rt.block_on(async { - /// assert_eq!(recv.await.unwrap(), "done"); - /// }); - /// # th.join().unwrap(); - /// ``` - /// - pub fn block_on(&self, future: F) -> F::Output { - self.enter(|| { - let mut enter = crate::runtime::enter(true); - enter.block_on(future).expect("failed to park thread") - }) - } - } -} - -cfg_blocking! { - impl Handle { - /// Runs the provided closure on a thread where blocking is acceptable. - /// - /// In general, issuing a blocking call or performing a lot of compute in a - /// future without yielding is not okay, as it may prevent the executor from - /// driving other futures forward. This function runs the provided closure - /// on a thread dedicated to blocking operations. See the [CPU-bound tasks - /// and blocking code][blocking] section for more information. - /// - /// Tokio will spawn more blocking threads when they are requested through - /// this function until the upper limit configured on the [`Builder`] is - /// reached. This limit is very large by default, because `spawn_blocking` is - /// often used for various kinds of IO operations that cannot be performed - /// asynchronously. When you run CPU-bound code using `spawn_blocking`, you - /// should keep this large upper limit in mind; to run your CPU-bound - /// computations on only a few threads, you should use a separate thread - /// pool such as [rayon] rather than configuring the number of blocking - /// threads. - /// - /// This function is intended for non-async operations that eventually - /// finish on their own. If you want to spawn an ordinary thread, you should - /// use [`thread::spawn`] instead. - /// - /// Closures spawned using `spawn_blocking` cannot be cancelled. When you - /// shut down the executor, it will wait indefinitely for all blocking - /// operations to finish. You can use [`shutdown_timeout`] to stop waiting - /// for them after a certain timeout. Be aware that this will still not - /// cancel the tasks — they are simply allowed to keep running after the - /// method returns. - /// - /// Note that if you are using the [basic scheduler], this function will - /// still spawn additional threads for blocking operations. The basic - /// scheduler's single thread is only used for asynchronous code. - /// - /// [`Builder`]: struct@crate::runtime::Builder - /// [blocking]: ../index.html#cpu-bound-tasks-and-blocking-code - /// [rayon]: https://docs.rs/rayon - /// [basic scheduler]: fn@crate::runtime::Builder::basic_scheduler - /// [`thread::spawn`]: fn@std::thread::spawn - /// [`shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// # async fn docs() -> Result<(), Box>{ - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// let handle = rt.handle(); - /// - /// let res = handle.spawn_blocking(move || { - /// // do some compute-heavy work or call synchronous code - /// "done computing" - /// }).await?; - /// - /// assert_eq!(res, "done computing"); - /// # Ok(()) - /// # } - /// ``` - pub fn spawn_blocking(&self, f: F) -> JoinHandle - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - let (task, handle) = task::joinable(BlockingTask::new(f)); - let _ = self.blocking_spawner.spawn(task, self); - handle - } - } -} - -/// Error returned by `try_current` when no Runtime has been started -pub struct TryCurrentError(()); - -impl fmt::Debug for TryCurrentError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryCurrentError").finish() - } -} - -impl fmt::Display for TryCurrentError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("no tokio Runtime has been initialized") - } -} - -impl error::Error for TryCurrentError {} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/io.rs b/third_party/rust/tokio-0.2.25/src/runtime/io.rs deleted file mode 100644 index 6a0953af8516..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/io.rs +++ /dev/null @@ -1,63 +0,0 @@ -//! Abstracts out the APIs necessary to `Runtime` for integrating the I/O -//! driver. When the `time` feature flag is **not** enabled. These APIs are -//! shells. This isolates the complexity of dealing with conditional -//! compilation. - -/// Re-exported for convenience. -pub(crate) use std::io::Result; - -pub(crate) use variant::*; - -#[cfg(feature = "io-driver")] -mod variant { - use crate::io::driver; - use crate::park::{Either, ParkThread}; - - use std::io; - - /// The driver value the runtime passes to the `timer` layer. - /// - /// When the `io-driver` feature is enabled, this is the "real" I/O driver - /// backed by Mio. Without the `io-driver` feature, this is a thread parker - /// backed by a condition variable. - pub(crate) type Driver = Either; - - /// The handle the runtime stores for future use. - /// - /// When the `io-driver` feature is **not** enabled, this is `()`. - pub(crate) type Handle = Option; - - pub(crate) fn create_driver(enable: bool) -> io::Result<(Driver, Handle)> { - #[cfg(loom)] - assert!(!enable); - - if enable { - let driver = driver::Driver::new()?; - let handle = driver.handle(); - - Ok((Either::A(driver), Some(handle))) - } else { - let driver = ParkThread::new(); - Ok((Either::B(driver), None)) - } - } -} - -#[cfg(not(feature = "io-driver"))] -mod variant { - use crate::park::ParkThread; - - use std::io; - - /// I/O is not enabled, use a condition variable based parker - pub(crate) type Driver = ParkThread; - - /// There is no handle - pub(crate) type Handle = (); - - pub(crate) fn create_driver(_enable: bool) -> io::Result<(Driver, Handle)> { - let driver = ParkThread::new(); - - Ok((driver, ())) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/mod.rs b/third_party/rust/tokio-0.2.25/src/runtime/mod.rs deleted file mode 100644 index 637f38cabb56..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/mod.rs +++ /dev/null @@ -1,580 +0,0 @@ -//! The Tokio runtime. -//! -//! Unlike other Rust programs, asynchronous applications require -//! runtime support. In particular, the following runtime services are -//! necessary: -//! -//! * An **I/O event loop**, called the driver, which drives I/O resources and -//! dispatches I/O events to tasks that depend on them. -//! * A **scheduler** to execute [tasks] that use these I/O resources. -//! * A **timer** for scheduling work to run after a set period of time. -//! -//! Tokio's [`Runtime`] bundles all of these services as a single type, allowing -//! them to be started, shut down, and configured together. However, most -//! applications won't need to use [`Runtime`] directly. Instead, they can -//! use the [`tokio::main`] attribute macro, which creates a [`Runtime`] under -//! the hood. -//! -//! # Usage -//! -//! Most applications will use the [`tokio::main`] attribute macro. -//! -//! ```no_run -//! use tokio::net::TcpListener; -//! use tokio::prelude::*; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?; -//! -//! loop { -//! let (mut socket, _) = listener.accept().await?; -//! -//! tokio::spawn(async move { -//! let mut buf = [0; 1024]; -//! -//! // In a loop, read data from the socket and write the data back. -//! loop { -//! let n = match socket.read(&mut buf).await { -//! // socket closed -//! Ok(n) if n == 0 => return, -//! Ok(n) => n, -//! Err(e) => { -//! println!("failed to read from socket; err = {:?}", e); -//! return; -//! } -//! }; -//! -//! // Write the data back -//! if let Err(e) = socket.write_all(&buf[0..n]).await { -//! println!("failed to write to socket; err = {:?}", e); -//! return; -//! } -//! } -//! }); -//! } -//! } -//! ``` -//! -//! From within the context of the runtime, additional tasks are spawned using -//! the [`tokio::spawn`] function. Futures spawned using this function will be -//! executed on the same thread pool used by the [`Runtime`]. -//! -//! A [`Runtime`] instance can also be used directly. -//! -//! ```no_run -//! use tokio::net::TcpListener; -//! use tokio::prelude::*; -//! use tokio::runtime::Runtime; -//! -//! fn main() -> Result<(), Box> { -//! // Create the runtime -//! let mut rt = Runtime::new()?; -//! -//! // Spawn the root task -//! rt.block_on(async { -//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?; -//! -//! loop { -//! let (mut socket, _) = listener.accept().await?; -//! -//! tokio::spawn(async move { -//! let mut buf = [0; 1024]; -//! -//! // In a loop, read data from the socket and write the data back. -//! loop { -//! let n = match socket.read(&mut buf).await { -//! // socket closed -//! Ok(n) if n == 0 => return, -//! Ok(n) => n, -//! Err(e) => { -//! println!("failed to read from socket; err = {:?}", e); -//! return; -//! } -//! }; -//! -//! // Write the data back -//! if let Err(e) = socket.write_all(&buf[0..n]).await { -//! println!("failed to write to socket; err = {:?}", e); -//! return; -//! } -//! } -//! }); -//! } -//! }) -//! } -//! ``` -//! -//! ## Runtime Configurations -//! -//! Tokio provides multiple task scheduling strategies, suitable for different -//! applications. The [runtime builder] or `#[tokio::main]` attribute may be -//! used to select which scheduler to use. -//! -//! #### Basic Scheduler -//! -//! The basic scheduler provides a _single-threaded_ future executor. All tasks -//! will be created and executed on the current thread. The basic scheduler -//! requires the `rt-core` feature flag, and can be selected using the -//! [`Builder::basic_scheduler`] method: -//! ``` -//! use tokio::runtime; -//! -//! # fn main() -> Result<(), Box> { -//! let basic_rt = runtime::Builder::new() -//! .basic_scheduler() -//! .build()?; -//! # Ok(()) } -//! ``` -//! -//! If the `rt-core` feature is enabled and `rt-threaded` is not, -//! [`Runtime::new`] will return a basic scheduler runtime by default. -//! -//! #### Threaded Scheduler -//! -//! The threaded scheduler executes futures on a _thread pool_, using a -//! work-stealing strategy. By default, it will start a worker thread for each -//! CPU core available on the system. This tends to be the ideal configurations -//! for most applications. The threaded scheduler requires the `rt-threaded` feature -//! flag, and can be selected using the [`Builder::threaded_scheduler`] method: -//! ``` -//! use tokio::runtime; -//! -//! # fn main() -> Result<(), Box> { -//! let threaded_rt = runtime::Builder::new() -//! .threaded_scheduler() -//! .build()?; -//! # Ok(()) } -//! ``` -//! -//! If the `rt-threaded` feature flag is enabled, [`Runtime::new`] will return a -//! threaded scheduler runtime by default. -//! -//! Most applications should use the threaded scheduler, except in some niche -//! use-cases, such as when running only a single thread is required. -//! -//! #### Resource drivers -//! -//! When configuring a runtime by hand, no resource drivers are enabled by -//! default. In this case, attempting to use networking types or time types will -//! fail. In order to enable these types, the resource drivers must be enabled. -//! This is done with [`Builder::enable_io`] and [`Builder::enable_time`]. As a -//! shorthand, [`Builder::enable_all`] enables both resource drivers. -//! -//! ## Lifetime of spawned threads -//! -//! The runtime may spawn threads depending on its configuration and usage. The -//! threaded scheduler spawns threads to schedule tasks and calls to -//! `spawn_blocking` spawn threads to run blocking operations. -//! -//! While the `Runtime` is active, threads may shutdown after periods of being -//! idle. Once `Runtime` is dropped, all runtime threads are forcibly shutdown. -//! Any tasks that have not yet completed will be dropped. -//! -//! [tasks]: crate::task -//! [`Runtime`]: Runtime -//! [`tokio::spawn`]: crate::spawn -//! [`tokio::main`]: ../attr.main.html -//! [runtime builder]: crate::runtime::Builder -//! [`Runtime::new`]: crate::runtime::Runtime::new -//! [`Builder::basic_scheduler`]: crate::runtime::Builder::basic_scheduler -//! [`Builder::threaded_scheduler`]: crate::runtime::Builder::threaded_scheduler -//! [`Builder::enable_io`]: crate::runtime::Builder::enable_io -//! [`Builder::enable_time`]: crate::runtime::Builder::enable_time -//! [`Builder::enable_all`]: crate::runtime::Builder::enable_all - -// At the top due to macros -#[cfg(test)] -#[macro_use] -mod tests; - -pub(crate) mod context; - -cfg_rt_core! { - mod basic_scheduler; - use basic_scheduler::BasicScheduler; - - pub(crate) mod task; -} - -mod blocking; -use blocking::BlockingPool; - -cfg_blocking_impl! { - #[allow(unused_imports)] - pub(crate) use blocking::{spawn_blocking, try_spawn_blocking}; -} - -mod builder; -pub use self::builder::Builder; - -pub(crate) mod enter; -use self::enter::enter; - -mod handle; -pub use self::handle::{Handle, TryCurrentError}; - -mod io; - -cfg_rt_threaded! { - mod park; - use park::Parker; -} - -mod shell; -use self::shell::Shell; - -mod spawner; -use self::spawner::Spawner; - -mod time; - -cfg_rt_threaded! { - mod queue; - - pub(crate) mod thread_pool; - use self::thread_pool::ThreadPool; -} - -cfg_rt_core! { - use crate::task::JoinHandle; -} - -use std::future::Future; -use std::time::Duration; - -/// The Tokio runtime. -/// -/// The runtime provides an I/O driver, task scheduler, [timer], and blocking -/// pool, necessary for running asynchronous tasks. -/// -/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However, -/// most users will use the `#[tokio::main]` annotation on their entry point instead. -/// -/// See [module level][mod] documentation for more details. -/// -/// # Shutdown -/// -/// Shutting down the runtime is done by dropping the value. The current thread -/// will block until the shut down operation has completed. -/// -/// * Drain any scheduled work queues. -/// * Drop any futures that have not yet completed. -/// * Drop the reactor. -/// -/// Once the reactor has dropped, any outstanding I/O resources bound to -/// that reactor will no longer function. Calling any method on them will -/// result in an error. -/// -/// [timer]: crate::time -/// [mod]: index.html -/// [`new`]: method@Self::new -/// [`Builder`]: struct@Builder -/// [`tokio::run`]: fn@run -#[derive(Debug)] -pub struct Runtime { - /// Task executor - kind: Kind, - - /// Handle to runtime, also contains driver handles - handle: Handle, - - /// Blocking pool handle, used to signal shutdown - blocking_pool: BlockingPool, -} - -/// The runtime executor is either a thread-pool or a current-thread executor. -#[derive(Debug)] -enum Kind { - /// Not able to execute concurrent tasks. This variant is mostly used to get - /// access to the driver handles. - Shell(Shell), - - /// Execute all tasks on the current-thread. - #[cfg(feature = "rt-core")] - Basic(BasicScheduler), - - /// Execute tasks across multiple threads. - #[cfg(feature = "rt-threaded")] - ThreadPool(ThreadPool), -} - -/// After thread starts / before thread stops -type Callback = std::sync::Arc; - -impl Runtime { - /// Create a new runtime instance with default configuration values. - /// - /// This results in a scheduler, I/O driver, and time driver being - /// initialized. The type of scheduler used depends on what feature flags - /// are enabled: if the `rt-threaded` feature is enabled, the [threaded - /// scheduler] is used, while if only the `rt-core` feature is enabled, the - /// [basic scheduler] is used instead. - /// - /// If the threaded scheduler is selected, it will not spawn - /// any worker threads until it needs to, i.e. tasks are scheduled to run. - /// - /// Most applications will not need to call this function directly. Instead, - /// they will use the [`#[tokio::main]` attribute][main]. When more complex - /// configuration is necessary, the [runtime builder] may be used. - /// - /// See [module level][mod] documentation for more details. - /// - /// # Examples - /// - /// Creating a new `Runtime` with default configuration values. - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// let rt = Runtime::new() - /// .unwrap(); - /// - /// // Use the runtime... - /// ``` - /// - /// [mod]: index.html - /// [main]: ../attr.main.html - /// [threaded scheduler]: index.html#threaded-scheduler - /// [basic scheduler]: index.html#basic-scheduler - /// [runtime builder]: crate::runtime::Builder - pub fn new() -> io::Result { - #[cfg(feature = "rt-threaded")] - let ret = Builder::new().threaded_scheduler().enable_all().build(); - - #[cfg(all(not(feature = "rt-threaded"), feature = "rt-core"))] - let ret = Builder::new().basic_scheduler().enable_all().build(); - - #[cfg(not(feature = "rt-core"))] - let ret = Builder::new().enable_all().build(); - - ret - } - - /// Spawn a future onto the Tokio runtime. - /// - /// This spawns the given future onto the runtime's executor, usually a - /// thread pool. The thread pool is then responsible for polling the future - /// until it completes. - /// - /// See [module level][mod] documentation for more details. - /// - /// [mod]: index.html - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// # fn dox() { - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// - /// // Spawn a future onto the runtime - /// rt.spawn(async { - /// println!("now running on a worker thread"); - /// }); - /// # } - /// ``` - /// - /// # Panics - /// - /// This function will not panic unless task execution is disabled on the - /// executor. This can only happen if the runtime was built using - /// [`Builder`] without picking either [`basic_scheduler`] or - /// [`threaded_scheduler`]. - /// - /// [`Builder`]: struct@Builder - /// [`threaded_scheduler`]: fn@Builder::threaded_scheduler - /// [`basic_scheduler`]: fn@Builder::basic_scheduler - #[cfg(feature = "rt-core")] - pub fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - match &self.kind { - Kind::Shell(_) => panic!("task execution disabled"), - #[cfg(feature = "rt-threaded")] - Kind::ThreadPool(exec) => exec.spawn(future), - Kind::Basic(exec) => exec.spawn(future), - } - } - - /// Run a future to completion on the Tokio runtime. This is the runtime's - /// entry point. - /// - /// This runs the given future on the runtime, blocking until it is - /// complete, and yielding its resolved result. Any tasks or timers which - /// the future spawns internally will be executed on the runtime. - /// - /// `&mut` is required as calling `block_on` **may** result in advancing the - /// state of the runtime. The details depend on how the runtime is - /// configured. [`runtime::Handle::block_on`][handle] provides a version - /// that takes `&self`. - /// - /// This method may not be called from an asynchronous context. - /// - /// # Panics - /// - /// This function panics if the provided future panics, or if called within an - /// asynchronous execution context. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::runtime::Runtime; - /// - /// // Create the runtime - /// let mut rt = Runtime::new().unwrap(); - /// - /// // Execute the future, blocking the current thread until completion - /// rt.block_on(async { - /// println!("hello"); - /// }); - /// ``` - /// - /// [handle]: fn@Handle::block_on - pub fn block_on(&mut self, future: F) -> F::Output { - let kind = &mut self.kind; - - self.handle.enter(|| match kind { - Kind::Shell(exec) => exec.block_on(future), - #[cfg(feature = "rt-core")] - Kind::Basic(exec) => exec.block_on(future), - #[cfg(feature = "rt-threaded")] - Kind::ThreadPool(exec) => exec.block_on(future), - }) - } - - /// Enter the runtime context. This allows you to construct types that must - /// have an executor available on creation such as [`Delay`] or [`TcpStream`]. - /// It will also allow you to call methods such as [`tokio::spawn`]. - /// - /// This function is also available as [`Handle::enter`]. - /// - /// [`Delay`]: struct@crate::time::Delay - /// [`TcpStream`]: struct@crate::net::TcpStream - /// [`Handle::enter`]: fn@crate::runtime::Handle::enter - /// [`tokio::spawn`]: fn@crate::spawn - /// - /// # Example - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// fn function_that_spawns(msg: String) { - /// // Had we not used `rt.enter` below, this would panic. - /// tokio::spawn(async move { - /// println!("{}", msg); - /// }); - /// } - /// - /// fn main() { - /// let rt = Runtime::new().unwrap(); - /// - /// let s = "Hello World!".to_string(); - /// - /// // By entering the context, we tie `tokio::spawn` to this executor. - /// rt.enter(|| function_that_spawns(s)); - /// } - /// ``` - pub fn enter(&self, f: F) -> R - where - F: FnOnce() -> R, - { - self.handle.enter(f) - } - - /// Return a handle to the runtime's spawner. - /// - /// The returned handle can be used to spawn tasks that run on this runtime, and can - /// be cloned to allow moving the `Handle` to other threads. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// let rt = Runtime::new() - /// .unwrap(); - /// - /// let handle = rt.handle(); - /// - /// handle.spawn(async { println!("hello"); }); - /// ``` - pub fn handle(&self) -> &Handle { - &self.handle - } - - /// Shutdown the runtime, waiting for at most `duration` for all spawned - /// task to shutdown. - /// - /// Usually, dropping a `Runtime` handle is sufficient as tasks are able to - /// shutdown in a timely fashion. However, dropping a `Runtime` will wait - /// indefinitely for all tasks to terminate, and there are cases where a long - /// blocking task has been spawned, which can block dropping `Runtime`. - /// - /// In this case, calling `shutdown_timeout` with an explicit wait timeout - /// can work. The `shutdown_timeout` will signal all tasks to shutdown and - /// will wait for at most `duration` for all spawned tasks to terminate. If - /// `timeout` elapses before all tasks are dropped, the function returns and - /// outstanding tasks are potentially leaked. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// use tokio::task; - /// - /// use std::thread; - /// use std::time::Duration; - /// - /// fn main() { - /// let mut runtime = Runtime::new().unwrap(); - /// - /// runtime.block_on(async move { - /// task::spawn_blocking(move || { - /// thread::sleep(Duration::from_secs(10_000)); - /// }); - /// }); - /// - /// runtime.shutdown_timeout(Duration::from_millis(100)); - /// } - /// ``` - pub fn shutdown_timeout(mut self, duration: Duration) { - // Wakeup and shutdown all the worker threads - self.handle.spawner.shutdown(); - self.blocking_pool.shutdown(Some(duration)); - } - - /// Shutdown the runtime, without waiting for any spawned tasks to shutdown. - /// - /// This can be useful if you want to drop a runtime from within another runtime. - /// Normally, dropping a runtime will block indefinitely for spawned blocking tasks - /// to complete, which would normally not be permitted within an asynchronous context. - /// By calling `shutdown_background()`, you can drop the runtime from such a context. - /// - /// Note however, that because we do not wait for any blocking tasks to complete, this - /// may result in a resource leak (in that any blocking tasks are still running until they - /// return. - /// - /// This function is equivalent to calling `shutdown_timeout(Duration::of_nanos(0))`. - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// fn main() { - /// let mut runtime = Runtime::new().unwrap(); - /// - /// runtime.block_on(async move { - /// let inner_runtime = Runtime::new().unwrap(); - /// // ... - /// inner_runtime.shutdown_background(); - /// }); - /// } - /// ``` - pub fn shutdown_background(self) { - self.shutdown_timeout(Duration::from_nanos(0)) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/park.rs b/third_party/rust/tokio-0.2.25/src/runtime/park.rs deleted file mode 100644 index 1dcf65af91b8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/park.rs +++ /dev/null @@ -1,257 +0,0 @@ -//! Parks the runtime. -//! -//! A combination of the various resource driver park handles. - -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Arc, Condvar, Mutex}; -use crate::loom::thread; -use crate::park::{Park, Unpark}; -use crate::runtime::time; -use crate::util::TryLock; - -use std::sync::atomic::Ordering::SeqCst; -use std::time::Duration; - -pub(crate) struct Parker { - inner: Arc, -} - -pub(crate) struct Unparker { - inner: Arc, -} - -struct Inner { - /// Avoids entering the park if possible - state: AtomicUsize, - - /// Used to coordinate access to the driver / condvar - mutex: Mutex<()>, - - /// Condvar to block on if the driver is unavailable. - condvar: Condvar, - - /// Resource (I/O, time, ...) driver - shared: Arc, -} - -const EMPTY: usize = 0; -const PARKED_CONDVAR: usize = 1; -const PARKED_DRIVER: usize = 2; -const NOTIFIED: usize = 3; - -/// Shared across multiple Parker handles -struct Shared { - /// Shared driver. Only one thread at a time can use this - driver: TryLock, - - /// Unpark handle - handle: ::Unpark, -} - -impl Parker { - pub(crate) fn new(driver: time::Driver) -> Parker { - let handle = driver.unpark(); - - Parker { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - shared: Arc::new(Shared { - driver: TryLock::new(driver), - handle, - }), - }), - } - } -} - -impl Clone for Parker { - fn clone(&self) -> Parker { - Parker { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - shared: self.inner.shared.clone(), - }), - } - } -} - -impl Park for Parker { - type Unpark = Unparker; - type Error = (); - - fn unpark(&self) -> Unparker { - Unparker { - inner: self.inner.clone(), - } - } - - fn park(&mut self) -> Result<(), Self::Error> { - self.inner.park(); - Ok(()) - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - // Only parking with zero is supported... - assert_eq!(duration, Duration::from_millis(0)); - - if let Some(mut driver) = self.inner.shared.driver.try_lock() { - driver.park_timeout(duration).map_err(|_| ()) - } else { - Ok(()) - } - } - - fn shutdown(&mut self) { - self.inner.shutdown(); - } -} - -impl Unpark for Unparker { - fn unpark(&self) { - self.inner.unpark(); - } -} - -impl Inner { - /// Parks the current thread for at most `dur`. - fn park(&self) { - for _ in 0..3 { - // If we were previously notified then we consume this notification and - // return quickly. - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - return; - } - - thread::yield_now(); - } - - if let Some(mut driver) = self.shared.driver.try_lock() { - self.park_driver(&mut driver); - } else { - self.park_condvar(); - } - } - - fn park_condvar(&self) { - // Otherwise we need to coordinate going to sleep - let mut m = self.mutex.lock().unwrap(); - - match self - .state - .compare_exchange(EMPTY, PARKED_CONDVAR, SeqCst, SeqCst) - { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park state; actual = {}", actual), - } - - loop { - m = self.condvar.wait(m).unwrap(); - - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - // got a notification - return; - } - - // spurious wakeup, go back to sleep - } - } - - fn park_driver(&self, driver: &mut time::Driver) { - match self - .state - .compare_exchange(EMPTY, PARKED_DRIVER, SeqCst, SeqCst) - { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park state; actual = {}", actual), - } - - // TODO: don't unwrap - driver.park().unwrap(); - - match self.state.swap(EMPTY, SeqCst) { - NOTIFIED => {} // got a notification, hurray! - PARKED_DRIVER => {} // no notification, alas - n => panic!("inconsistent park_timeout state: {}", n), - } - } - - fn unpark(&self) { - // To ensure the unparked thread will observe any writes we made before - // this call, we must perform a release operation that `park` can - // synchronize with. To do that we must write `NOTIFIED` even if `state` - // is already `NOTIFIED`. That is why this must be a swap rather than a - // compare-and-swap that returns if it reads `NOTIFIED` on failure. - match self.state.swap(NOTIFIED, SeqCst) { - EMPTY => {} // no one was waiting - NOTIFIED => {} // already unparked - PARKED_CONDVAR => self.unpark_condvar(), - PARKED_DRIVER => self.unpark_driver(), - actual => panic!("inconsistent state in unpark; actual = {}", actual), - } - } - - fn unpark_condvar(&self) { - // There is a period between when the parked thread sets `state` to - // `PARKED` (or last checked `state` in the case of a spurious wake - // up) and when it actually waits on `cvar`. If we were to notify - // during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has - // `lock` locked at this stage so we can acquire `lock` to wait until - // it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the - // parked thread wakes it doesn't get woken only to have to wait for us - // to release `lock`. - drop(self.mutex.lock().unwrap()); - - self.condvar.notify_one() - } - - fn unpark_driver(&self) { - self.shared.handle.unpark(); - } - - fn shutdown(&self) { - if let Some(mut driver) = self.shared.driver.try_lock() { - driver.shutdown(); - } - - self.condvar.notify_all(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/queue.rs b/third_party/rust/tokio-0.2.25/src/runtime/queue.rs deleted file mode 100644 index c654514bbc6b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/queue.rs +++ /dev/null @@ -1,630 +0,0 @@ -//! Run-queue structures to support a work-stealing scheduler - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::{AtomicU16, AtomicU32, AtomicUsize}; -use crate::loom::sync::{Arc, Mutex}; -use crate::runtime::task; - -use std::marker::PhantomData; -use std::mem::MaybeUninit; -use std::ptr::{self, NonNull}; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; - -/// Producer handle. May only be used from a single thread. -pub(super) struct Local { - inner: Arc>, -} - -/// Consumer handle. May be used from many threads. -pub(super) struct Steal(Arc>); - -/// Growable, MPMC queue used to inject new tasks into the scheduler and as an -/// overflow queue when the local, fixed-size, array queue overflows. -pub(super) struct Inject { - /// Pointers to the head and tail of the queue - pointers: Mutex, - - /// Number of pending tasks in the queue. This helps prevent unnecessary - /// locking in the hot path. - len: AtomicUsize, - - _p: PhantomData, -} - -pub(super) struct Inner { - /// Concurrently updated by many threads. - /// - /// Contains two `u16` values. The LSB byte is the "real" head of the queue. - /// The `u16` in the MSB is set by a stealer in process of stealing values. - /// It represents the first value being stolen in the batch. `u16` is used - /// in order to distinguish between `head == tail` and `head == tail - - /// capacity`. - /// - /// When both `u16` values are the same, there is no active stealer. - /// - /// Tracking an in-progress stealer prevents a wrapping scenario. - head: AtomicU32, - - /// Only updated by producer thread but read by many threads. - tail: AtomicU16, - - /// Elements - buffer: Box<[UnsafeCell>>]>, -} - -struct Pointers { - /// True if the queue is closed - is_closed: bool, - - /// Linked-list head - head: Option>, - - /// Linked-list tail - tail: Option>, -} - -unsafe impl Send for Inner {} -unsafe impl Sync for Inner {} -unsafe impl Send for Inject {} -unsafe impl Sync for Inject {} - -#[cfg(not(loom))] -const LOCAL_QUEUE_CAPACITY: usize = 256; - -// Shrink the size of the local queue when using loom. This shouldn't impact -// logic, but allows loom to test more edge cases in a reasonable a mount of -// time. -#[cfg(loom)] -const LOCAL_QUEUE_CAPACITY: usize = 4; - -const MASK: usize = LOCAL_QUEUE_CAPACITY - 1; - -/// Create a new local run-queue -pub(super) fn local() -> (Steal, Local) { - let mut buffer = Vec::with_capacity(LOCAL_QUEUE_CAPACITY); - - for _ in 0..LOCAL_QUEUE_CAPACITY { - buffer.push(UnsafeCell::new(MaybeUninit::uninit())); - } - - let inner = Arc::new(Inner { - head: AtomicU32::new(0), - tail: AtomicU16::new(0), - buffer: buffer.into(), - }); - - let local = Local { - inner: inner.clone(), - }; - - let remote = Steal(inner); - - (remote, local) -} - -impl Local { - /// Returns true if the queue has entries that can be stealed. - pub(super) fn is_stealable(&self) -> bool { - !self.inner.is_empty() - } - - /// Pushes a task to the back of the local queue, skipping the LIFO slot. - pub(super) fn push_back(&mut self, mut task: task::Notified, inject: &Inject) { - let tail = loop { - let head = self.inner.head.load(Acquire); - let (steal, real) = unpack(head); - - // safety: this is the **only** thread that updates this cell. - let tail = unsafe { self.inner.tail.unsync_load() }; - - if tail.wrapping_sub(steal) < LOCAL_QUEUE_CAPACITY as u16 { - // There is capacity for the task - break tail; - } else if steal != real { - // Concurrently stealing, this will free up capacity, so - // only push the new task onto the inject queue - inject.push(task); - return; - } else { - // Push the current task and half of the queue into the - // inject queue. - match self.push_overflow(task, real, tail, inject) { - Ok(_) => return, - // Lost the race, try again - Err(v) => { - task = v; - } - } - } - }; - - // Map the position to a slot index. - let idx = tail as usize & MASK; - - self.inner.buffer[idx].with_mut(|ptr| { - // Write the task to the slot - // - // Safety: There is only one producer and the above `if` - // condition ensures we don't touch a cell if there is a - // value, thus no consumer. - unsafe { - ptr::write((*ptr).as_mut_ptr(), task); - } - }); - - // Make the task available. Synchronizes with a load in - // `steal_into2`. - self.inner.tail.store(tail.wrapping_add(1), Release); - } - - /// Moves a batch of tasks into the inject queue. - /// - /// This will temporarily make some of the tasks unavailable to stealers. - /// Once `push_overflow` is done, a notification is sent out, so if other - /// workers "missed" some of the tasks during a steal, they will get - /// another opportunity. - #[inline(never)] - fn push_overflow( - &mut self, - task: task::Notified, - head: u16, - tail: u16, - inject: &Inject, - ) -> Result<(), task::Notified> { - const BATCH_LEN: usize = LOCAL_QUEUE_CAPACITY / 2 + 1; - - let n = (LOCAL_QUEUE_CAPACITY / 2) as u16; - assert_eq!( - tail.wrapping_sub(head) as usize, - LOCAL_QUEUE_CAPACITY, - "queue is not full; tail = {}; head = {}", - tail, - head - ); - - let prev = pack(head, head); - - // Claim a bunch of tasks - // - // We are claiming the tasks **before** reading them out of the buffer. - // This is safe because only the **current** thread is able to push new - // tasks. - // - // There isn't really any need for memory ordering... Relaxed would - // work. This is because all tasks are pushed into the queue from the - // current thread (or memory has been acquired if the local queue handle - // moved). - let actual = self.inner.head.compare_and_swap( - prev, - pack(head.wrapping_add(n), head.wrapping_add(n)), - Release, - ); - - if actual != prev { - // We failed to claim the tasks, losing the race. Return out of - // this function and try the full `push` routine again. The queue - // may not be full anymore. - return Err(task); - } - - // link the tasks - for i in 0..n { - let j = i + 1; - - let i_idx = i.wrapping_add(head) as usize & MASK; - let j_idx = j.wrapping_add(head) as usize & MASK; - - // Get the next pointer - let next = if j == n { - // The last task in the local queue being moved - task.header().into() - } else { - // safety: The above CAS prevents a stealer from accessing these - // tasks and we are the only producer. - self.inner.buffer[j_idx].with(|ptr| unsafe { - let value = (*ptr).as_ptr(); - (*value).header().into() - }) - }; - - // safety: the above CAS prevents a stealer from accessing these - // tasks and we are the only producer. - self.inner.buffer[i_idx].with_mut(|ptr| unsafe { - let ptr = (*ptr).as_ptr(); - (*ptr).header().queue_next.with_mut(|ptr| *ptr = Some(next)); - }); - } - - // safety: the above CAS prevents a stealer from accessing these tasks - // and we are the only producer. - let head = self.inner.buffer[head as usize & MASK] - .with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - // Push the tasks onto the inject queue - inject.push_batch(head, task, BATCH_LEN); - - Ok(()) - } - - /// Pops a task from the local queue. - pub(super) fn pop(&mut self) -> Option> { - let mut head = self.inner.head.load(Acquire); - - let idx = loop { - let (steal, real) = unpack(head); - - // safety: this is the **only** thread that updates this cell. - let tail = unsafe { self.inner.tail.unsync_load() }; - - if real == tail { - // queue is empty - return None; - } - - let next_real = real.wrapping_add(1); - - // If `steal == real` there are no concurrent stealers. Both `steal` - // and `real` are updated. - let next = if steal == real { - pack(next_real, next_real) - } else { - assert_ne!(steal, next_real); - pack(steal, next_real) - }; - - // Attempt to claim a task. - let res = self - .inner - .head - .compare_exchange(head, next, AcqRel, Acquire); - - match res { - Ok(_) => break real as usize & MASK, - Err(actual) => head = actual, - } - }; - - Some(self.inner.buffer[idx].with(|ptr| unsafe { ptr::read(ptr).assume_init() })) - } -} - -impl Steal { - pub(super) fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Steals half the tasks from self and place them into `dst`. - pub(super) fn steal_into(&self, dst: &mut Local) -> Option> { - // Safety: the caller is the only thread that mutates `dst.tail` and - // holds a mutable reference. - let dst_tail = unsafe { dst.inner.tail.unsync_load() }; - - // To the caller, `dst` may **look** empty but still have values - // contained in the buffer. If another thread is concurrently stealing - // from `dst` there may not be enough capacity to steal. - let (steal, _) = unpack(dst.inner.head.load(Acquire)); - - if dst_tail.wrapping_sub(steal) > LOCAL_QUEUE_CAPACITY as u16 / 2 { - // we *could* try to steal less here, but for simplicity, we're just - // going to abort. - return None; - } - - // Steal the tasks into `dst`'s buffer. This does not yet expose the - // tasks in `dst`. - let mut n = self.steal_into2(dst, dst_tail); - - if n == 0 { - // No tasks were stolen - return None; - } - - // We are returning a task here - n -= 1; - - let ret_pos = dst_tail.wrapping_add(n); - let ret_idx = ret_pos as usize & MASK; - - // safety: the value was written as part of `steal_into2` and not - // exposed to stealers, so no other thread can access it. - let ret = dst.inner.buffer[ret_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - if n == 0 { - // The `dst` queue is empty, but a single task was stolen - return Some(ret); - } - - // Make the stolen items available to consumers - dst.inner.tail.store(dst_tail.wrapping_add(n), Release); - - Some(ret) - } - - // Steal tasks from `self`, placing them into `dst`. Returns the number of - // tasks that were stolen. - fn steal_into2(&self, dst: &mut Local, dst_tail: u16) -> u16 { - let mut prev_packed = self.0.head.load(Acquire); - let mut next_packed; - - let n = loop { - let (src_head_steal, src_head_real) = unpack(prev_packed); - let src_tail = self.0.tail.load(Acquire); - - // If these two do not match, another thread is concurrently - // stealing from the queue. - if src_head_steal != src_head_real { - return 0; - } - - // Number of available tasks to steal - let n = src_tail.wrapping_sub(src_head_real); - let n = n - n / 2; - - if n == 0 { - // No tasks available to steal - return 0; - } - - // Update the real head index to acquire the tasks. - let steal_to = src_head_real.wrapping_add(n); - assert_ne!(src_head_steal, steal_to); - next_packed = pack(src_head_steal, steal_to); - - // Claim all those tasks. This is done by incrementing the "real" - // head but not the steal. By doing this, no other thread is able to - // steal from this queue until the current thread completes. - let res = self - .0 - .head - .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); - - match res { - Ok(_) => break n, - Err(actual) => prev_packed = actual, - } - }; - - assert!(n <= LOCAL_QUEUE_CAPACITY as u16 / 2, "actual = {}", n); - - let (first, _) = unpack(next_packed); - - // Take all the tasks - for i in 0..n { - // Compute the positions - let src_pos = first.wrapping_add(i); - let dst_pos = dst_tail.wrapping_add(i); - - // Map to slots - let src_idx = src_pos as usize & MASK; - let dst_idx = dst_pos as usize & MASK; - - // Read the task - // - // safety: We acquired the task with the atomic exchange above. - let task = self.0.buffer[src_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - // Write the task to the new slot - // - // safety: `dst` queue is empty and we are the only producer to - // this queue. - dst.inner.buffer[dst_idx] - .with_mut(|ptr| unsafe { ptr::write((*ptr).as_mut_ptr(), task) }); - } - - let mut prev_packed = next_packed; - - // Update `src_head_steal` to match `src_head_real` signalling that the - // stealing routine is complete. - loop { - let head = unpack(prev_packed).1; - next_packed = pack(head, head); - - let res = self - .0 - .head - .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); - - match res { - Ok(_) => return n, - Err(actual) => { - let (actual_steal, actual_real) = unpack(actual); - - assert_ne!(actual_steal, actual_real); - - prev_packed = actual; - } - } - } - } -} - -impl Clone for Steal { - fn clone(&self) -> Steal { - Steal(self.0.clone()) - } -} - -impl Drop for Local { - fn drop(&mut self) { - if !std::thread::panicking() { - assert!(self.pop().is_none(), "queue not empty"); - } - } -} - -impl Inner { - fn is_empty(&self) -> bool { - let (_, head) = unpack(self.head.load(Acquire)); - let tail = self.tail.load(Acquire); - - head == tail - } -} - -impl Inject { - pub(super) fn new() -> Inject { - Inject { - pointers: Mutex::new(Pointers { - is_closed: false, - head: None, - tail: None, - }), - len: AtomicUsize::new(0), - _p: PhantomData, - } - } - - pub(super) fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Close the injection queue, returns `true` if the queue is open when the - /// transition is made. - pub(super) fn close(&self) -> bool { - let mut p = self.pointers.lock().unwrap(); - - if p.is_closed { - return false; - } - - p.is_closed = true; - true - } - - pub(super) fn is_closed(&self) -> bool { - self.pointers.lock().unwrap().is_closed - } - - pub(super) fn len(&self) -> usize { - self.len.load(Acquire) - } - - /// Pushes a value into the queue. - pub(super) fn push(&self, task: task::Notified) { - // Acquire queue lock - let mut p = self.pointers.lock().unwrap(); - - if p.is_closed { - // Drop the mutex to avoid a potential deadlock when - // re-entering. - drop(p); - drop(task); - return; - } - - // safety: only mutated with the lock held - let len = unsafe { self.len.unsync_load() }; - let task = task.into_raw(); - - // The next pointer should already be null - debug_assert!(get_next(task).is_none()); - - if let Some(tail) = p.tail { - set_next(tail, Some(task)); - } else { - p.head = Some(task); - } - - p.tail = Some(task); - - self.len.store(len + 1, Release); - } - - pub(super) fn push_batch( - &self, - batch_head: task::Notified, - batch_tail: task::Notified, - num: usize, - ) { - let batch_head = batch_head.into_raw(); - let batch_tail = batch_tail.into_raw(); - - debug_assert!(get_next(batch_tail).is_none()); - - let mut p = self.pointers.lock().unwrap(); - - if let Some(tail) = p.tail { - set_next(tail, Some(batch_head)); - } else { - p.head = Some(batch_head); - } - - p.tail = Some(batch_tail); - - // Increment the count. - // - // safety: All updates to the len atomic are guarded by the mutex. As - // such, a non-atomic load followed by a store is safe. - let len = unsafe { self.len.unsync_load() }; - - self.len.store(len + num, Release); - } - - pub(super) fn pop(&self) -> Option> { - // Fast path, if len == 0, then there are no values - if self.is_empty() { - return None; - } - - let mut p = self.pointers.lock().unwrap(); - - // It is possible to hit null here if another thread poped the last - // task between us checking `len` and acquiring the lock. - let task = p.head?; - - p.head = get_next(task); - - if p.head.is_none() { - p.tail = None; - } - - set_next(task, None); - - // Decrement the count. - // - // safety: All updates to the len atomic are guarded by the mutex. As - // such, a non-atomic load followed by a store is safe. - self.len - .store(unsafe { self.len.unsync_load() } - 1, Release); - - // safety: a `Notified` is pushed into the queue and now it is popped! - Some(unsafe { task::Notified::from_raw(task) }) - } -} - -impl Drop for Inject { - fn drop(&mut self) { - if !std::thread::panicking() { - assert!(self.pop().is_none(), "queue not empty"); - } - } -} - -fn get_next(header: NonNull) -> Option> { - unsafe { header.as_ref().queue_next.with(|ptr| *ptr) } -} - -fn set_next(header: NonNull, val: Option>) { - unsafe { - header.as_ref().queue_next.with_mut(|ptr| *ptr = val); - } -} - -/// Split the head value into the real head and the index a stealer is working -/// on. -fn unpack(n: u32) -> (u16, u16) { - let real = n & u16::max_value() as u32; - let steal = n >> 16; - - (steal as u16, real as u16) -} - -/// Join the two head values -fn pack(steal: u16, real: u16) -> u32 { - (real as u32) | ((steal as u32) << 16) -} - -#[test] -fn test_local_queue_capacity() { - assert!(LOCAL_QUEUE_CAPACITY - 1 <= u8::max_value() as usize); -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/shell.rs b/third_party/rust/tokio-0.2.25/src/runtime/shell.rs deleted file mode 100644 index a65869d0de26..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/shell.rs +++ /dev/null @@ -1,62 +0,0 @@ -#![allow(clippy::redundant_clone)] - -use crate::park::{Park, Unpark}; -use crate::runtime::enter; -use crate::runtime::time; -use crate::util::{waker_ref, Wake}; - -use std::future::Future; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll::Ready; - -#[derive(Debug)] -pub(super) struct Shell { - driver: time::Driver, - - /// TODO: don't store this - unpark: Arc, -} - -#[derive(Debug)] -struct Handle(::Unpark); - -impl Shell { - pub(super) fn new(driver: time::Driver) -> Shell { - let unpark = Arc::new(Handle(driver.unpark())); - - Shell { driver, unpark } - } - - pub(super) fn block_on(&mut self, f: F) -> F::Output - where - F: Future, - { - let _e = enter(true); - - pin!(f); - - let waker = waker_ref(&self.unpark); - let mut cx = Context::from_waker(&waker); - - loop { - if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) { - return v; - } - - self.driver.park().unwrap(); - } - } -} - -impl Wake for Handle { - /// Wake by value - fn wake(self: Arc) { - Wake::wake_by_ref(&self); - } - - /// Wake by reference - fn wake_by_ref(arc_self: &Arc) { - arc_self.0.unpark(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/spawner.rs b/third_party/rust/tokio-0.2.25/src/runtime/spawner.rs deleted file mode 100644 index c5f2d17cdd35..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/spawner.rs +++ /dev/null @@ -1,48 +0,0 @@ -cfg_rt_core! { - use crate::runtime::basic_scheduler; - use crate::task::JoinHandle; - - use std::future::Future; -} - -cfg_rt_threaded! { - use crate::runtime::thread_pool; -} - -#[derive(Debug, Clone)] -pub(crate) enum Spawner { - Shell, - #[cfg(feature = "rt-core")] - Basic(basic_scheduler::Spawner), - #[cfg(feature = "rt-threaded")] - ThreadPool(thread_pool::Spawner), -} - -impl Spawner { - pub(crate) fn shutdown(&mut self) { - #[cfg(feature = "rt-threaded")] - { - if let Spawner::ThreadPool(spawner) = self { - spawner.shutdown(); - } - } - } -} - -cfg_rt_core! { - impl Spawner { - pub(crate) fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - match self { - Spawner::Shell => panic!("spawning not enabled for runtime"), - #[cfg(feature = "rt-core")] - Spawner::Basic(spawner) => spawner.spawn(future), - #[cfg(feature = "rt-threaded")] - Spawner::ThreadPool(spawner) => spawner.spawn(future), - } - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/core.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/core.rs deleted file mode 100644 index f4756c238efb..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/core.rs +++ /dev/null @@ -1,289 +0,0 @@ -//! Core task module. -//! -//! # Safety -//! -//! The functions in this module are private to the `task` module. All of them -//! should be considered `unsafe` to use, but are not marked as such since it -//! would be too noisy. -//! -//! Make sure to consult the relevant safety section of each function before -//! use. - -use crate::loom::cell::UnsafeCell; -use crate::runtime::task::raw::{self, Vtable}; -use crate::runtime::task::state::State; -use crate::runtime::task::waker::waker_ref; -use crate::runtime::task::{Notified, Schedule, Task}; -use crate::util::linked_list; - -use std::future::Future; -use std::pin::Pin; -use std::ptr::NonNull; -use std::task::{Context, Poll, Waker}; - -/// The task cell. Contains the components of the task. -/// -/// It is critical for `Header` to be the first field as the task structure will -/// be referenced by both *mut Cell and *mut Header. -#[repr(C)] -pub(super) struct Cell { - /// Hot task state data - pub(super) header: Header, - - /// Either the future or output, depending on the execution stage. - pub(super) core: Core, - - /// Cold data - pub(super) trailer: Trailer, -} - -/// The core of the task. -/// -/// Holds the future or output, depending on the stage of execution. -pub(super) struct Core { - /// Scheduler used to drive this future - pub(super) scheduler: UnsafeCell>, - - /// Either the future or the output - pub(super) stage: UnsafeCell>, -} - -/// Crate public as this is also needed by the pool. -#[repr(C)] -pub(crate) struct Header { - /// Task state - pub(super) state: State, - - pub(crate) owned: UnsafeCell>, - - /// Pointer to next task, used with the injection queue - pub(crate) queue_next: UnsafeCell>>, - - /// Pointer to the next task in the transfer stack - pub(super) stack_next: UnsafeCell>>, - - /// Table of function pointers for executing actions on the task. - pub(super) vtable: &'static Vtable, -} - -unsafe impl Send for Header {} -unsafe impl Sync for Header {} - -/// Cold data is stored after the future. -pub(super) struct Trailer { - /// Consumer task waiting on completion of this task. - pub(super) waker: UnsafeCell>, -} - -/// Either the future or the output. -pub(super) enum Stage { - Running(T), - Finished(super::Result), - Consumed, -} - -impl Cell { - /// Allocates a new task cell, containing the header, trailer, and core - /// structures. - pub(super) fn new(future: T, state: State) -> Box> { - Box::new(Cell { - header: Header { - state, - owned: UnsafeCell::new(linked_list::Pointers::new()), - queue_next: UnsafeCell::new(None), - stack_next: UnsafeCell::new(None), - vtable: raw::vtable::(), - }, - core: Core { - scheduler: UnsafeCell::new(None), - stage: UnsafeCell::new(Stage::Running(future)), - }, - trailer: Trailer { - waker: UnsafeCell::new(None), - }, - }) - } -} - -impl Core { - /// Bind a scheduler to the task. - /// - /// This only happens on the first poll and must be preceeded by a call to - /// `is_bound` to determine if binding is appropriate or not. - /// - /// # Safety - /// - /// Binding must not be done concurrently since it will mutate the task - /// core through a shared reference. - pub(super) fn bind_scheduler(&self, task: Task) { - // This function may be called concurrently, but the __first__ time it - // is called, the caller has unique access to this field. All subsequent - // concurrent calls will be via the `Waker`, which will "happens after" - // the first poll. - // - // In other words, it is always safe to read the field and it is safe to - // write to the field when it is `None`. - debug_assert!(!self.is_bound()); - - // Bind the task to the scheduler - let scheduler = S::bind(task); - - // Safety: As `scheduler` is not set, this is the first poll - self.scheduler.with_mut(|ptr| unsafe { - *ptr = Some(scheduler); - }); - } - - /// Returns true if the task is bound to a scheduler. - pub(super) fn is_bound(&self) -> bool { - // Safety: never called concurrently w/ a mutation. - self.scheduler.with(|ptr| unsafe { (*ptr).is_some() }) - } - - /// Poll the future - /// - /// # Safety - /// - /// The caller must ensure it is safe to mutate the `state` field. This - /// requires ensuring mutal exclusion between any concurrent thread that - /// might modify the future or output field. - /// - /// The mutual exclusion is implemented by `Harness` and the `Lifecycle` - /// component of the task state. - /// - /// `self` must also be pinned. This is handled by storing the task on the - /// heap. - pub(super) fn poll(&self, header: &Header) -> Poll { - let res = { - self.stage.with_mut(|ptr| { - // Safety: The caller ensures mutual exclusion to the field. - let future = match unsafe { &mut *ptr } { - Stage::Running(future) => future, - _ => unreachable!("unexpected stage"), - }; - - // Safety: The caller ensures the future is pinned. - let future = unsafe { Pin::new_unchecked(future) }; - - // The waker passed into the `poll` function does not require a ref - // count increment. - let waker_ref = waker_ref::(header); - let mut cx = Context::from_waker(&*waker_ref); - - future.poll(&mut cx) - }) - }; - - if res.is_ready() { - self.drop_future_or_output(); - } - - res - } - - /// Drop the future - /// - /// # Safety - /// - /// The caller must ensure it is safe to mutate the `stage` field. - pub(super) fn drop_future_or_output(&self) { - self.stage.with_mut(|ptr| { - // Safety: The caller ensures mutal exclusion to the field. - unsafe { *ptr = Stage::Consumed }; - }); - } - - /// Store the task output - /// - /// # Safety - /// - /// The caller must ensure it is safe to mutate the `stage` field. - pub(super) fn store_output(&self, output: super::Result) { - self.stage.with_mut(|ptr| { - // Safety: the caller ensures mutual exclusion to the field. - unsafe { *ptr = Stage::Finished(output) }; - }); - } - - /// Take the task output - /// - /// # Safety - /// - /// The caller must ensure it is safe to mutate the `stage` field. - pub(super) fn take_output(&self) -> super::Result { - use std::mem; - - self.stage.with_mut(|ptr| { - // Safety:: the caller ensures mutal exclusion to the field. - match mem::replace(unsafe { &mut *ptr }, Stage::Consumed) { - Stage::Finished(output) => output, - _ => panic!("unexpected task state"), - } - }) - } - - /// Schedule the future for execution - pub(super) fn schedule(&self, task: Notified) { - self.scheduler.with(|ptr| { - // Safety: Can only be called after initial `poll`, which is the - // only time the field is mutated. - match unsafe { &*ptr } { - Some(scheduler) => scheduler.schedule(task), - None => panic!("no scheduler set"), - } - }); - } - - /// Schedule the future for execution in the near future, yielding the - /// thread to other tasks. - pub(super) fn yield_now(&self, task: Notified) { - self.scheduler.with(|ptr| { - // Safety: Can only be called after initial `poll`, which is the - // only time the field is mutated. - match unsafe { &*ptr } { - Some(scheduler) => scheduler.yield_now(task), - None => panic!("no scheduler set"), - } - }); - } - - /// Release the task - /// - /// If the `Scheduler` implementation is able to, it returns the `Task` - /// handle immediately. The caller of this function will batch a ref-dec - /// with a state change. - pub(super) fn release(&self, task: Task) -> Option> { - use std::mem::ManuallyDrop; - - let task = ManuallyDrop::new(task); - - self.scheduler.with(|ptr| { - // Safety: Can only be called after initial `poll`, which is the - // only time the field is mutated. - match unsafe { &*ptr } { - Some(scheduler) => scheduler.release(&*task), - // Task was never polled - None => None, - } - }) - } -} - -cfg_rt_threaded! { - impl Header { - pub(crate) fn shutdown(&self) { - use crate::runtime::task::RawTask; - - let task = unsafe { RawTask::from_raw(self.into()) }; - task.shutdown(); - } - } -} - -#[test] -#[cfg(not(loom))] -fn header_lte_cache_line() { - use std::mem::size_of; - - assert!(size_of::

() <= 8 * size_of::<*const ()>()); -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/error.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/error.rs deleted file mode 100644 index d5f65a49814b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/error.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::any::Any; -use std::fmt; -use std::io; -use std::sync::Mutex; - -doc_rt_core! { - /// Task failed to execute to completion. - pub struct JoinError { - repr: Repr, - } -} - -enum Repr { - Cancelled, - Panic(Mutex>), -} - -impl JoinError { - #[doc(hidden)] - #[deprecated] - pub fn cancelled() -> JoinError { - Self::cancelled2() - } - - pub(crate) fn cancelled2() -> JoinError { - JoinError { - repr: Repr::Cancelled, - } - } - - #[doc(hidden)] - #[deprecated] - pub fn panic(err: Box) -> JoinError { - Self::panic2(err) - } - - pub(crate) fn panic2(err: Box) -> JoinError { - JoinError { - repr: Repr::Panic(Mutex::new(err)), - } - } - - /// Returns true if the error was caused by the task being cancelled - pub fn is_cancelled(&self) -> bool { - match &self.repr { - Repr::Cancelled => true, - _ => false, - } - } - - /// Returns true if the error was caused by the task panicking - /// - /// # Examples - /// - /// ``` - /// use std::panic; - /// - /// #[tokio::main] - /// async fn main() { - /// let err = tokio::spawn(async { - /// panic!("boom"); - /// }).await.unwrap_err(); - /// - /// assert!(err.is_panic()); - /// } - /// ``` - pub fn is_panic(&self) -> bool { - match &self.repr { - Repr::Panic(_) => true, - _ => false, - } - } - - /// Consumes the join error, returning the object with which the task panicked. - /// - /// # Panics - /// - /// `into_panic()` panics if the `Error` does not represent the underlying - /// task terminating with a panic. Use `is_panic` to check the error reason - /// or `try_into_panic` for a variant that does not panic. - /// - /// # Examples - /// - /// ```should_panic - /// use std::panic; - /// - /// #[tokio::main] - /// async fn main() { - /// let err = tokio::spawn(async { - /// panic!("boom"); - /// }).await.unwrap_err(); - /// - /// if err.is_panic() { - /// // Resume the panic on the main task - /// panic::resume_unwind(err.into_panic()); - /// } - /// } - /// ``` - pub fn into_panic(self) -> Box { - self.try_into_panic() - .expect("`JoinError` reason is not a panic.") - } - - /// Consumes the join error, returning the object with which the task - /// panicked if the task terminated due to a panic. Otherwise, `self` is - /// returned. - /// - /// # Examples - /// - /// ```should_panic - /// use std::panic; - /// - /// #[tokio::main] - /// async fn main() { - /// let err = tokio::spawn(async { - /// panic!("boom"); - /// }).await.unwrap_err(); - /// - /// if let Ok(reason) = err.try_into_panic() { - /// // Resume the panic on the main task - /// panic::resume_unwind(reason); - /// } - /// } - /// ``` - pub fn try_into_panic(self) -> Result, JoinError> { - match self.repr { - Repr::Panic(p) => Ok(p.into_inner().expect("Extracting panic from mutex")), - _ => Err(self), - } - } -} - -impl fmt::Display for JoinError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match &self.repr { - Repr::Cancelled => write!(fmt, "cancelled"), - Repr::Panic(_) => write!(fmt, "panic"), - } - } -} - -impl fmt::Debug for JoinError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match &self.repr { - Repr::Cancelled => write!(fmt, "JoinError::Cancelled"), - Repr::Panic(_) => write!(fmt, "JoinError::Panic(...)"), - } - } -} - -impl std::error::Error for JoinError {} - -impl From for io::Error { - fn from(src: JoinError) -> io::Error { - io::Error::new( - io::ErrorKind::Other, - match src.repr { - Repr::Cancelled => "task was cancelled", - Repr::Panic(_) => "task panicked", - }, - ) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/harness.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/harness.rs deleted file mode 100644 index e86b29e699e9..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/harness.rs +++ /dev/null @@ -1,371 +0,0 @@ -use crate::runtime::task::core::{Cell, Core, Header, Trailer}; -use crate::runtime::task::state::Snapshot; -use crate::runtime::task::{JoinError, Notified, Schedule, Task}; - -use std::future::Future; -use std::mem; -use std::panic; -use std::ptr::NonNull; -use std::task::{Poll, Waker}; - -/// Typed raw task handle -pub(super) struct Harness { - cell: NonNull>, -} - -impl Harness -where - T: Future, - S: 'static, -{ - pub(super) unsafe fn from_raw(ptr: NonNull
) -> Harness { - Harness { - cell: ptr.cast::>(), - } - } - - fn header(&self) -> &Header { - unsafe { &self.cell.as_ref().header } - } - - fn trailer(&self) -> &Trailer { - unsafe { &self.cell.as_ref().trailer } - } - - fn core(&self) -> &Core { - unsafe { &self.cell.as_ref().core } - } -} - -impl Harness -where - T: Future, - S: Schedule, -{ - /// Polls the inner future. - /// - /// All necessary state checks and transitions are performed. - /// - /// Panics raised while polling the future are handled. - pub(super) fn poll(self) { - // If this is the first time the task is polled, the task will be bound - // to the scheduler, in which case the task ref count must be - // incremented. - let is_not_bound = !self.core().is_bound(); - - // Transition the task to the running state. - // - // A failure to transition here indicates the task has been cancelled - // while in the run queue pending execution. - let snapshot = match self.header().state.transition_to_running(is_not_bound) { - Ok(snapshot) => snapshot, - Err(_) => { - // The task was shutdown while in the run queue. At this point, - // we just hold a ref counted reference. Drop it here. - self.drop_reference(); - return; - } - }; - - if is_not_bound { - // Ensure the task is bound to a scheduler instance. Since this is - // the first time polling the task, a scheduler instance is pulled - // from the local context and assigned to the task. - // - // The scheduler maintains ownership of the task and responds to - // `wake` calls. - // - // The task reference count has been incremented. - // - // Safety: Since we have unique access to the task so that we can - // safely call `bind_scheduler`. - self.core().bind_scheduler(self.to_task()); - } - - // The transition to `Running` done above ensures that a lock on the - // future has been obtained. This also ensures the `*mut T` pointer - // contains the future (as opposed to the output) and is initialized. - - let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { - struct Guard<'a, T: Future, S: Schedule> { - core: &'a Core, - } - - impl Drop for Guard<'_, T, S> { - fn drop(&mut self) { - self.core.drop_future_or_output(); - } - } - - let guard = Guard { core: self.core() }; - - // If the task is cancelled, avoid polling it, instead signalling it - // is complete. - if snapshot.is_cancelled() { - Poll::Ready(Err(JoinError::cancelled2())) - } else { - let res = guard.core.poll(self.header()); - - // prevent the guard from dropping the future - mem::forget(guard); - - res.map(Ok) - } - })); - - match res { - Ok(Poll::Ready(out)) => { - self.complete(out, snapshot.is_join_interested()); - } - Ok(Poll::Pending) => { - match self.header().state.transition_to_idle() { - Ok(snapshot) => { - if snapshot.is_notified() { - // Signal yield - self.core().yield_now(Notified(self.to_task())); - // The ref-count was incremented as part of - // `transition_to_idle`. - self.drop_reference(); - } - } - Err(_) => self.cancel_task(), - } - } - Err(err) => { - self.complete(Err(JoinError::panic2(err)), snapshot.is_join_interested()); - } - } - } - - pub(super) fn dealloc(self) { - // Release the join waker, if there is one. - self.trailer().waker.with_mut(|_| ()); - - // Check causality - self.core().stage.with_mut(|_| {}); - self.core().scheduler.with_mut(|_| {}); - - unsafe { - drop(Box::from_raw(self.cell.as_ptr())); - } - } - - // ===== join handle ===== - - /// Read the task output into `dst`. - pub(super) fn try_read_output(self, dst: &mut Poll>, waker: &Waker) { - // Load a snapshot of the current task state - let snapshot = self.header().state.load(); - - debug_assert!(snapshot.is_join_interested()); - - if !snapshot.is_complete() { - // The waker must be stored in the task struct. - let res = if snapshot.has_join_waker() { - // There already is a waker stored in the struct. If it matches - // the provided waker, then there is no further work to do. - // Otherwise, the waker must be swapped. - let will_wake = unsafe { - // Safety: when `JOIN_INTEREST` is set, only `JOIN_HANDLE` - // may mutate the `waker` field. - self.trailer() - .waker - .with(|ptr| (*ptr).as_ref().unwrap().will_wake(waker)) - }; - - if will_wake { - // The task is not complete **and** the waker is up to date, - // there is nothing further that needs to be done. - return; - } - - // Unset the `JOIN_WAKER` to gain mutable access to the `waker` - // field then update the field with the new join worker. - // - // This requires two atomic operations, unsetting the bit and - // then resetting it. If the task transitions to complete - // concurrently to either one of those operations, then setting - // the join waker fails and we proceed to reading the task - // output. - self.header() - .state - .unset_waker() - .and_then(|snapshot| self.set_join_waker(waker.clone(), snapshot)) - } else { - self.set_join_waker(waker.clone(), snapshot) - }; - - match res { - Ok(_) => return, - Err(snapshot) => { - assert!(snapshot.is_complete()); - } - } - } - - *dst = Poll::Ready(self.core().take_output()); - } - - fn set_join_waker(&self, waker: Waker, snapshot: Snapshot) -> Result { - assert!(snapshot.is_join_interested()); - assert!(!snapshot.has_join_waker()); - - // Safety: Only the `JoinHandle` may set the `waker` field. When - // `JOIN_INTEREST` is **not** set, nothing else will touch the field. - unsafe { - self.trailer().waker.with_mut(|ptr| { - *ptr = Some(waker); - }); - } - - // Update the `JoinWaker` state accordingly - let res = self.header().state.set_join_waker(); - - // If the state could not be updated, then clear the join waker - if res.is_err() { - unsafe { - self.trailer().waker.with_mut(|ptr| { - *ptr = None; - }); - } - } - - res - } - - pub(super) fn drop_join_handle_slow(self) { - // Try to unset `JOIN_INTEREST`. This must be done as a first step in - // case the task concurrently completed. - if self.header().state.unset_join_interested().is_err() { - // It is our responsibility to drop the output. This is critical as - // the task output may not be `Send` and as such must remain with - // the scheduler or `JoinHandle`. i.e. if the output remains in the - // task structure until the task is deallocated, it may be dropped - // by a Waker on any arbitrary thread. - self.core().drop_future_or_output(); - } - - // Drop the `JoinHandle` reference, possibly deallocating the task - self.drop_reference(); - } - - // ===== waker behavior ===== - - pub(super) fn wake_by_val(self) { - self.wake_by_ref(); - self.drop_reference(); - } - - pub(super) fn wake_by_ref(&self) { - if self.header().state.transition_to_notified() { - self.core().schedule(Notified(self.to_task())); - } - } - - pub(super) fn drop_reference(self) { - if self.header().state.ref_dec() { - self.dealloc(); - } - } - - /// Forcibly shutdown the task - /// - /// Attempt to transition to `Running` in order to forcibly shutdown the - /// task. If the task is currently running or in a state of completion, then - /// there is nothing further to do. When the task completes running, it will - /// notice the `CANCELLED` bit and finalize the task. - pub(super) fn shutdown(self) { - if !self.header().state.transition_to_shutdown() { - // The task is concurrently running. No further work needed. - return; - } - - // By transitioning the lifcycle to `Running`, we have permission to - // drop the future. - self.cancel_task(); - } - - // ====== internal ====== - - fn cancel_task(self) { - // Drop the future from a panic guard. - let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { - self.core().drop_future_or_output(); - })); - - if let Err(err) = res { - // Dropping the future panicked, complete the join - // handle with the panic to avoid dropping the panic - // on the ground. - self.complete(Err(JoinError::panic2(err)), true); - } else { - self.complete(Err(JoinError::cancelled2()), true); - } - } - - fn complete(mut self, output: super::Result, is_join_interested: bool) { - if is_join_interested { - // Store the output. The future has already been dropped - // - // Safety: Mutual exclusion is obtained by having transitioned the task - // state -> Running - self.core().store_output(output); - - // Transition to `Complete`, notifying the `JoinHandle` if necessary. - self.transition_to_complete(); - } - - // The task has completed execution and will no longer be scheduled. - // - // Attempts to batch a ref-dec with the state transition below. - let ref_dec = if self.core().is_bound() { - if let Some(task) = self.core().release(self.to_task()) { - mem::forget(task); - true - } else { - false - } - } else { - false - }; - - // This might deallocate - let snapshot = self - .header() - .state - .transition_to_terminal(!is_join_interested, ref_dec); - - if snapshot.ref_count() == 0 { - self.dealloc() - } - } - - /// Transitions the task's lifecycle to `Complete`. Notifies the - /// `JoinHandle` if it still has interest in the completion. - fn transition_to_complete(&mut self) { - // Transition the task's lifecycle to `Complete` and get a snapshot of - // the task's sate. - let snapshot = self.header().state.transition_to_complete(); - - if !snapshot.is_join_interested() { - // The `JoinHandle` is not interested in the output of this task. It - // is our responsibility to drop the output. - self.core().drop_future_or_output(); - } else if snapshot.has_join_waker() { - // Notify the join handle. The previous transition obtains the - // lock on the waker cell. - self.wake_join(); - } - } - - fn wake_join(&self) { - self.trailer().waker.with(|ptr| match unsafe { &*ptr } { - Some(waker) => waker.wake_by_ref(), - None => panic!("waker missing"), - }); - } - - fn to_task(&self) -> Task { - unsafe { Task::from_raw(self.header().into()) } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/join.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/join.rs deleted file mode 100644 index 3c4aabb2e845..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/join.rs +++ /dev/null @@ -1,156 +0,0 @@ -use crate::runtime::task::RawTask; - -use std::fmt; -use std::future::Future; -use std::marker::PhantomData; -use std::pin::Pin; -use std::task::{Context, Poll}; - -doc_rt_core! { - /// An owned permission to join on a task (await its termination). - /// - /// This can be thought of as the equivalent of [`std::thread::JoinHandle`] for - /// a task rather than a thread. - /// - /// A `JoinHandle` *detaches* the associated task when it is dropped, which - /// means that there is no longer any handle to the task, and no way to `join` - /// on it. - /// - /// This `struct` is created by the [`task::spawn`] and [`task::spawn_blocking`] - /// functions. - /// - /// # Examples - /// - /// Creation from [`task::spawn`]: - /// - /// ``` - /// use tokio::task; - /// - /// # async fn doc() { - /// let join_handle: task::JoinHandle<_> = task::spawn(async { - /// // some work here - /// }); - /// # } - /// ``` - /// - /// Creation from [`task::spawn_blocking`]: - /// - /// ``` - /// use tokio::task; - /// - /// # async fn doc() { - /// let join_handle: task::JoinHandle<_> = task::spawn_blocking(|| { - /// // some blocking work here - /// }); - /// # } - /// ``` - /// - /// Child being detached and outliving its parent: - /// - /// ```no_run - /// use tokio::task; - /// use tokio::time; - /// use std::time::Duration; - /// - /// # #[tokio::main] async fn main() { - /// let original_task = task::spawn(async { - /// let _detached_task = task::spawn(async { - /// // Here we sleep to make sure that the first task returns before. - /// time::delay_for(Duration::from_millis(10)).await; - /// // This will be called, even though the JoinHandle is dropped. - /// println!("♫ Still alive ♫"); - /// }); - /// }); - /// - /// original_task.await.expect("The task being joined has panicked"); - /// println!("Original task is joined."); - /// - /// // We make sure that the new task has time to run, before the main - /// // task returns. - /// - /// time::delay_for(Duration::from_millis(1000)).await; - /// # } - /// ``` - /// - /// [`task::spawn`]: crate::task::spawn() - /// [`task::spawn_blocking`]: crate::task::spawn_blocking - /// [`std::thread::JoinHandle`]: std::thread::JoinHandle - pub struct JoinHandle { - raw: Option, - _p: PhantomData, - } -} - -unsafe impl Send for JoinHandle {} -unsafe impl Sync for JoinHandle {} - -impl JoinHandle { - pub(super) fn new(raw: RawTask) -> JoinHandle { - JoinHandle { - raw: Some(raw), - _p: PhantomData, - } - } -} - -impl Unpin for JoinHandle {} - -impl Future for JoinHandle { - type Output = super::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut ret = Poll::Pending; - - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - // Raw should always be set. If it is not, this is due to polling after - // completion - let raw = self - .raw - .as_ref() - .expect("polling after `JoinHandle` already completed"); - - // Try to read the task output. If the task is not yet complete, the - // waker is stored and is notified once the task does complete. - // - // The function must go via the vtable, which requires erasing generic - // types. To do this, the function "return" is placed on the stack - // **before** calling the function and is passed into the function using - // `*mut ()`. - // - // Safety: - // - // The type of `T` must match the task's output type. - unsafe { - raw.try_read_output(&mut ret as *mut _ as *mut (), cx.waker()); - } - - if ret.is_ready() { - coop.made_progress(); - } - - ret - } -} - -impl Drop for JoinHandle { - fn drop(&mut self) { - if let Some(raw) = self.raw.take() { - if raw.header().state.drop_join_handle_fast().is_ok() { - return; - } - - raw.drop_join_handle_slow(); - } - } -} - -impl fmt::Debug for JoinHandle -where - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("JoinHandle").finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/mod.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/mod.rs deleted file mode 100644 index 17b5157e8484..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/mod.rs +++ /dev/null @@ -1,220 +0,0 @@ -mod core; -use self::core::Cell; -pub(crate) use self::core::Header; - -mod error; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::error::JoinError; - -mod harness; -use self::harness::Harness; - -mod join; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::join::JoinHandle; - -mod raw; -use self::raw::RawTask; - -mod state; -use self::state::State; - -mod waker; - -cfg_rt_threaded! { - mod stack; - pub(crate) use self::stack::TransferStack; -} - -use crate::util::linked_list; - -use std::future::Future; -use std::marker::PhantomData; -use std::ptr::NonNull; -use std::{fmt, mem}; - -/// An owned handle to the task, tracked by ref count -#[repr(transparent)] -pub(crate) struct Task { - raw: RawTask, - _p: PhantomData, -} - -unsafe impl Send for Task {} -unsafe impl Sync for Task {} - -/// A task was notified -#[repr(transparent)] -pub(crate) struct Notified(Task); - -unsafe impl Send for Notified {} -unsafe impl Sync for Notified {} - -/// Task result sent back -pub(crate) type Result = std::result::Result; - -pub(crate) trait Schedule: Sync + Sized + 'static { - /// Bind a task to the executor. - /// - /// Guaranteed to be called from the thread that called `poll` on the task. - /// The returned `Schedule` instance is associated with the task and is used - /// as `&self` in the other methods on this trait. - fn bind(task: Task) -> Self; - - /// The task has completed work and is ready to be released. The scheduler - /// is free to drop it whenever. - /// - /// If the scheduler can immediately release the task, it should return - /// it as part of the function. This enables the task module to batch - /// the ref-dec with other options. - fn release(&self, task: &Task) -> Option>; - - /// Schedule the task - fn schedule(&self, task: Notified); - - /// Schedule the task to run in the near future, yielding the thread to - /// other tasks. - fn yield_now(&self, task: Notified) { - self.schedule(task); - } -} - -/// Create a new task with an associated join handle -pub(crate) fn joinable(task: T) -> (Notified, JoinHandle) -where - T: Future + Send + 'static, - S: Schedule, -{ - let raw = RawTask::new::<_, S>(task); - - let task = Task { - raw, - _p: PhantomData, - }; - - let join = JoinHandle::new(raw); - - (Notified(task), join) -} - -cfg_rt_util! { - /// Create a new `!Send` task with an associated join handle - pub(crate) unsafe fn joinable_local(task: T) -> (Notified, JoinHandle) - where - T: Future + 'static, - S: Schedule, - { - let raw = RawTask::new::<_, S>(task); - - let task = Task { - raw, - _p: PhantomData, - }; - - let join = JoinHandle::new(raw); - - (Notified(task), join) - } -} - -impl Task { - pub(crate) unsafe fn from_raw(ptr: NonNull
) -> Task { - Task { - raw: RawTask::from_raw(ptr), - _p: PhantomData, - } - } - - pub(crate) fn header(&self) -> &Header { - self.raw.header() - } -} - -cfg_rt_threaded! { - impl Notified { - pub(crate) unsafe fn from_raw(ptr: NonNull
) -> Notified { - Notified(Task::from_raw(ptr)) - } - - pub(crate) fn header(&self) -> &Header { - self.0.header() - } - } - - impl Task { - pub(crate) fn into_raw(self) -> NonNull
{ - let ret = self.header().into(); - mem::forget(self); - ret - } - } - - impl Notified { - pub(crate) fn into_raw(self) -> NonNull
{ - self.0.into_raw() - } - } -} - -impl Task { - /// Pre-emptively cancel the task as part of the shutdown process. - pub(crate) fn shutdown(&self) { - self.raw.shutdown(); - } -} - -impl Notified { - /// Run the task - pub(crate) fn run(self) { - self.0.raw.poll(); - mem::forget(self); - } - - /// Pre-emptively cancel the task as part of the shutdown process. - pub(crate) fn shutdown(self) { - self.0.shutdown(); - } -} - -impl Drop for Task { - fn drop(&mut self) { - // Decrement the ref count - if self.header().state.ref_dec() { - // Deallocate if this is the final ref count - self.raw.dealloc(); - } - } -} - -impl fmt::Debug for Task { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "Task({:p})", self.header()) - } -} - -impl fmt::Debug for Notified { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "task::Notified({:p})", self.0.header()) - } -} - -/// # Safety -/// -/// Tasks are pinned -unsafe impl linked_list::Link for Task { - type Handle = Task; - type Target = Header; - - fn as_raw(handle: &Task) -> NonNull
{ - handle.header().into() - } - - unsafe fn from_raw(ptr: NonNull
) -> Task { - Task::from_raw(ptr) - } - - unsafe fn pointers(target: NonNull
) -> NonNull> { - // Not super great as it avoids some of looms checking... - NonNull::from(target.as_ref().owned.with_mut(|ptr| &mut *ptr)) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/raw.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/raw.rs deleted file mode 100644 index cae56d037da1..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/raw.rs +++ /dev/null @@ -1,131 +0,0 @@ -use crate::runtime::task::{Cell, Harness, Header, Schedule, State}; - -use std::future::Future; -use std::ptr::NonNull; -use std::task::{Poll, Waker}; - -/// Raw task handle -pub(super) struct RawTask { - ptr: NonNull
, -} - -pub(super) struct Vtable { - /// Poll the future - pub(super) poll: unsafe fn(NonNull
), - - /// Deallocate the memory - pub(super) dealloc: unsafe fn(NonNull
), - - /// Read the task output, if complete - pub(super) try_read_output: unsafe fn(NonNull
, *mut (), &Waker), - - /// The join handle has been dropped - pub(super) drop_join_handle_slow: unsafe fn(NonNull
), - - /// Scheduler is being shutdown - pub(super) shutdown: unsafe fn(NonNull
), -} - -/// Get the vtable for the requested `T` and `S` generics. -pub(super) fn vtable() -> &'static Vtable { - &Vtable { - poll: poll::, - dealloc: dealloc::, - try_read_output: try_read_output::, - drop_join_handle_slow: drop_join_handle_slow::, - shutdown: shutdown::, - } -} - -impl RawTask { - pub(super) fn new(task: T) -> RawTask - where - T: Future, - S: Schedule, - { - let ptr = Box::into_raw(Cell::<_, S>::new(task, State::new())); - let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; - - RawTask { ptr } - } - - pub(super) unsafe fn from_raw(ptr: NonNull
) -> RawTask { - RawTask { ptr } - } - - /// Returns a reference to the task's meta structure. - /// - /// Safe as `Header` is `Sync`. - pub(super) fn header(&self) -> &Header { - unsafe { self.ptr.as_ref() } - } - - /// Safety: mutual exclusion is required to call this function. - pub(super) fn poll(self) { - let vtable = self.header().vtable; - unsafe { (vtable.poll)(self.ptr) } - } - - pub(super) fn dealloc(self) { - let vtable = self.header().vtable; - unsafe { - (vtable.dealloc)(self.ptr); - } - } - - /// Safety: `dst` must be a `*mut Poll>` where `T` - /// is the future stored by the task. - pub(super) unsafe fn try_read_output(self, dst: *mut (), waker: &Waker) { - let vtable = self.header().vtable; - (vtable.try_read_output)(self.ptr, dst, waker); - } - - pub(super) fn drop_join_handle_slow(self) { - let vtable = self.header().vtable; - unsafe { (vtable.drop_join_handle_slow)(self.ptr) } - } - - pub(super) fn shutdown(self) { - let vtable = self.header().vtable; - unsafe { (vtable.shutdown)(self.ptr) } - } -} - -impl Clone for RawTask { - fn clone(&self) -> Self { - RawTask { ptr: self.ptr } - } -} - -impl Copy for RawTask {} - -unsafe fn poll(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.poll(); -} - -unsafe fn dealloc(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.dealloc(); -} - -unsafe fn try_read_output( - ptr: NonNull
, - dst: *mut (), - waker: &Waker, -) { - let out = &mut *(dst as *mut Poll>); - - let harness = Harness::::from_raw(ptr); - harness.try_read_output(out, waker); -} - -unsafe fn drop_join_handle_slow(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.drop_join_handle_slow() -} - -unsafe fn shutdown(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.shutdown() -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/stack.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/stack.rs deleted file mode 100644 index 9dd8d3f43f98..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/stack.rs +++ /dev/null @@ -1,83 +0,0 @@ -use crate::loom::sync::atomic::AtomicPtr; -use crate::runtime::task::{Header, Task}; - -use std::marker::PhantomData; -use std::ptr::{self, NonNull}; -use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; - -/// Concurrent stack of tasks, used to pass ownership of a task from one worker -/// to another. -pub(crate) struct TransferStack { - head: AtomicPtr
, - _p: PhantomData, -} - -impl TransferStack { - pub(crate) fn new() -> TransferStack { - TransferStack { - head: AtomicPtr::new(ptr::null_mut()), - _p: PhantomData, - } - } - - pub(crate) fn push(&self, task: Task) { - let task = task.into_raw(); - - // We don't care about any memory associated w/ setting the `head` - // field, just the current value. - // - // The compare-exchange creates a release sequence. - let mut curr = self.head.load(Relaxed); - - loop { - unsafe { - task.as_ref() - .stack_next - .with_mut(|ptr| *ptr = NonNull::new(curr)) - }; - - let res = self - .head - .compare_exchange(curr, task.as_ptr() as *mut _, Release, Relaxed); - - match res { - Ok(_) => return, - Err(actual) => { - curr = actual; - } - } - } - } - - pub(crate) fn drain(&self) -> impl Iterator> { - struct Iter(Option>, PhantomData); - - impl Iterator for Iter { - type Item = Task; - - fn next(&mut self) -> Option> { - let task = self.0?; - - // Move the cursor forward - self.0 = unsafe { task.as_ref().stack_next.with(|ptr| *ptr) }; - - // Return the task - unsafe { Some(Task::from_raw(task)) } - } - } - - impl Drop for Iter { - fn drop(&mut self) { - use std::process; - - if self.0.is_some() { - // we have bugs - process::abort(); - } - } - } - - let ptr = self.head.swap(ptr::null_mut(), Acquire); - Iter(NonNull::new(ptr), PhantomData) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/state.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/state.rs deleted file mode 100644 index 21e90430db24..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/state.rs +++ /dev/null @@ -1,446 +0,0 @@ -use crate::loom::sync::atomic::AtomicUsize; - -use std::fmt; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; -use std::usize; - -pub(super) struct State { - val: AtomicUsize, -} - -/// Current state value -#[derive(Copy, Clone)] -pub(super) struct Snapshot(usize); - -type UpdateResult = Result; - -/// The task is currently being run. -const RUNNING: usize = 0b0001; - -/// The task is complete. -/// -/// Once this bit is set, it is never unset -const COMPLETE: usize = 0b0010; - -/// Extracts the task's lifecycle value from the state -const LIFECYCLE_MASK: usize = 0b11; - -/// Flag tracking if the task has been pushed into a run queue. -const NOTIFIED: usize = 0b100; - -/// The join handle is still around -const JOIN_INTEREST: usize = 0b1_000; - -/// A join handle waker has been set -const JOIN_WAKER: usize = 0b10_000; - -/// The task has been forcibly cancelled. -const CANCELLED: usize = 0b100_000; - -/// All bits -const STATE_MASK: usize = LIFECYCLE_MASK | NOTIFIED | JOIN_INTEREST | JOIN_WAKER | CANCELLED; - -/// Bits used by the ref count portion of the state. -const REF_COUNT_MASK: usize = !STATE_MASK; - -/// Number of positions to shift the ref count -const REF_COUNT_SHIFT: usize = REF_COUNT_MASK.count_zeros() as usize; - -/// One ref count -const REF_ONE: usize = 1 << REF_COUNT_SHIFT; - -/// State a task is initialized with -/// -/// A task is initialized with two references: one for the scheduler and one for -/// the `JoinHandle`. As the task starts with a `JoinHandle`, `JOIN_INTERST` is -/// set. A new task is immediately pushed into the run queue for execution and -/// starts with the `NOTIFIED` flag set. -const INITIAL_STATE: usize = (REF_ONE * 2) | JOIN_INTEREST | NOTIFIED; - -/// All transitions are performed via RMW operations. This establishes an -/// unambiguous modification order. -impl State { - /// Return a task's initial state - pub(super) fn new() -> State { - // A task is initialized with three references: one for the scheduler, - // one for the `JoinHandle`, one for the task handle made available in - // release. As the task starts with a `JoinHandle`, `JOIN_INTERST` is - // set. A new task is immediately pushed into the run queue for - // execution and starts with the `NOTIFIED` flag set. - State { - val: AtomicUsize::new(INITIAL_STATE), - } - } - - /// Loads the current state, establishes `Acquire` ordering. - pub(super) fn load(&self) -> Snapshot { - Snapshot(self.val.load(Acquire)) - } - - /// Attempt to transition the lifecycle to `Running`. - /// - /// If `ref_inc` is set, the reference count is also incremented. - /// - /// The `NOTIFIED` bit is always unset. - pub(super) fn transition_to_running(&self, ref_inc: bool) -> UpdateResult { - self.fetch_update(|curr| { - assert!(curr.is_notified()); - - let mut next = curr; - - if !next.is_idle() { - return None; - } - - if ref_inc { - next.ref_inc(); - } - - next.set_running(); - next.unset_notified(); - Some(next) - }) - } - - /// Transitions the task from `Running` -> `Idle`. - /// - /// Returns `Ok` if the transition to `Idle` is successful, `Err` otherwise. - /// In both cases, a snapshot of the state from **after** the transition is - /// returned. - /// - /// The transition to `Idle` fails if the task has been flagged to be - /// cancelled. - pub(super) fn transition_to_idle(&self) -> UpdateResult { - self.fetch_update(|curr| { - assert!(curr.is_running()); - - if curr.is_cancelled() { - return None; - } - - let mut next = curr; - next.unset_running(); - - if next.is_notified() { - // The caller needs to schedule the task. To do this, it needs a - // waker. The waker requires a ref count. - next.ref_inc(); - } - - Some(next) - }) - } - - /// Transitions the task from `Running` -> `Complete`. - pub(super) fn transition_to_complete(&self) -> Snapshot { - const DELTA: usize = RUNNING | COMPLETE; - - let prev = Snapshot(self.val.fetch_xor(DELTA, AcqRel)); - assert!(prev.is_running()); - assert!(!prev.is_complete()); - - Snapshot(prev.0 ^ DELTA) - } - - /// Transition from `Complete` -> `Terminal`, decrementing the reference - /// count by 1. - /// - /// When `ref_dec` is set, an additional ref count decrement is performed. - /// This is used to batch atomic ops when possible. - pub(super) fn transition_to_terminal(&self, complete: bool, ref_dec: bool) -> Snapshot { - self.fetch_update(|mut snapshot| { - if complete { - snapshot.set_complete(); - } else { - assert!(snapshot.is_complete()); - } - - // Decrement the primary handle - snapshot.ref_dec(); - - if ref_dec { - // Decrement a second time - snapshot.ref_dec(); - } - - Some(snapshot) - }) - .unwrap() - } - - /// Transitions the state to `NOTIFIED`. - /// - /// Returns `true` if the task needs to be submitted to the pool for - /// execution - pub(super) fn transition_to_notified(&self) -> bool { - let prev = Snapshot(self.val.fetch_or(NOTIFIED, AcqRel)); - prev.will_need_queueing() - } - - /// Set the `CANCELLED` bit and attempt to transition to `Running`. - /// - /// Returns `true` if the transition to `Running` succeeded. - pub(super) fn transition_to_shutdown(&self) -> bool { - let mut prev = Snapshot(0); - - let _ = self.fetch_update(|mut snapshot| { - prev = snapshot; - - if snapshot.is_idle() { - snapshot.set_running(); - - if snapshot.is_notified() { - // If the task is idle and notified, this indicates the task is - // in the run queue and is considered owned by the scheduler. - // The shutdown operation claims ownership of the task, which - // means we need to assign an additional ref-count to the task - // in the queue. - snapshot.ref_inc(); - } - } - - snapshot.set_cancelled(); - Some(snapshot) - }); - - prev.is_idle() - } - - /// Optimistically tries to swap the state assuming the join handle is - /// __immediately__ dropped on spawn - pub(super) fn drop_join_handle_fast(&self) -> Result<(), ()> { - use std::sync::atomic::Ordering::Relaxed; - - // Relaxed is acceptable as if this function is called and succeeds, - // then nothing has been done w/ the join handle. - // - // The moment the join handle is used (polled), the `JOIN_WAKER` flag is - // set, at which point the CAS will fail. - // - // Given this, there is no risk if this operation is reordered. - self.val - .compare_exchange_weak( - INITIAL_STATE, - (INITIAL_STATE - REF_ONE) & !JOIN_INTEREST, - Release, - Relaxed, - ) - .map(|_| ()) - .map_err(|_| ()) - } - - /// Try to unset the JOIN_INTEREST flag. - /// - /// Returns `Ok` if the operation happens before the task transitions to a - /// completed state, `Err` otherwise. - pub(super) fn unset_join_interested(&self) -> UpdateResult { - self.fetch_update(|curr| { - assert!(curr.is_join_interested()); - - if curr.is_complete() { - return None; - } - - let mut next = curr; - next.unset_join_interested(); - - Some(next) - }) - } - - /// Set the `JOIN_WAKER` bit. - /// - /// Returns `Ok` if the bit is set, `Err` otherwise. This operation fails if - /// the task has completed. - pub(super) fn set_join_waker(&self) -> UpdateResult { - self.fetch_update(|curr| { - assert!(curr.is_join_interested()); - assert!(!curr.has_join_waker()); - - if curr.is_complete() { - return None; - } - - let mut next = curr; - next.set_join_waker(); - - Some(next) - }) - } - - /// Unsets the `JOIN_WAKER` bit. - /// - /// Returns `Ok` has been unset, `Err` otherwise. This operation fails if - /// the task has completed. - pub(super) fn unset_waker(&self) -> UpdateResult { - self.fetch_update(|curr| { - assert!(curr.is_join_interested()); - assert!(curr.has_join_waker()); - - if curr.is_complete() { - return None; - } - - let mut next = curr; - next.unset_join_waker(); - - Some(next) - }) - } - - pub(super) fn ref_inc(&self) { - use std::process; - use std::sync::atomic::Ordering::Relaxed; - - // Using a relaxed ordering is alright here, as knowledge of the - // original reference prevents other threads from erroneously deleting - // the object. - // - // As explained in the [Boost documentation][1], Increasing the - // reference counter can always be done with memory_order_relaxed: New - // references to an object can only be formed from an existing - // reference, and passing an existing reference from one thread to - // another must already provide any required synchronization. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - let prev = self.val.fetch_add(REF_ONE, Relaxed); - - // If the reference count overflowed, abort. - if prev > isize::max_value() as usize { - process::abort(); - } - } - - /// Returns `true` if the task should be released. - pub(super) fn ref_dec(&self) -> bool { - let prev = Snapshot(self.val.fetch_sub(REF_ONE, AcqRel)); - prev.ref_count() == 1 - } - - fn fetch_update(&self, mut f: F) -> Result - where - F: FnMut(Snapshot) -> Option, - { - let mut curr = self.load(); - - loop { - let next = match f(curr) { - Some(next) => next, - None => return Err(curr), - }; - - let res = self.val.compare_exchange(curr.0, next.0, AcqRel, Acquire); - - match res { - Ok(_) => return Ok(next), - Err(actual) => curr = Snapshot(actual), - } - } - } -} - -// ===== impl Snapshot ===== - -impl Snapshot { - /// Returns `true` if the task is in an idle state. - pub(super) fn is_idle(self) -> bool { - self.0 & (RUNNING | COMPLETE) == 0 - } - - /// Returns `true` if the task has been flagged as notified. - pub(super) fn is_notified(self) -> bool { - self.0 & NOTIFIED == NOTIFIED - } - - fn unset_notified(&mut self) { - self.0 &= !NOTIFIED - } - - pub(super) fn is_running(self) -> bool { - self.0 & RUNNING == RUNNING - } - - fn set_running(&mut self) { - self.0 |= RUNNING; - } - - fn unset_running(&mut self) { - self.0 &= !RUNNING; - } - - pub(super) fn is_cancelled(self) -> bool { - self.0 & CANCELLED == CANCELLED - } - - fn set_cancelled(&mut self) { - self.0 |= CANCELLED; - } - - fn set_complete(&mut self) { - self.0 |= COMPLETE; - } - - /// Returns `true` if the task's future has completed execution. - pub(super) fn is_complete(self) -> bool { - self.0 & COMPLETE == COMPLETE - } - - pub(super) fn is_join_interested(self) -> bool { - self.0 & JOIN_INTEREST == JOIN_INTEREST - } - - fn unset_join_interested(&mut self) { - self.0 &= !JOIN_INTEREST - } - - pub(super) fn has_join_waker(self) -> bool { - self.0 & JOIN_WAKER == JOIN_WAKER - } - - fn set_join_waker(&mut self) { - self.0 |= JOIN_WAKER; - } - - fn unset_join_waker(&mut self) { - self.0 &= !JOIN_WAKER - } - - pub(super) fn ref_count(self) -> usize { - (self.0 & REF_COUNT_MASK) >> REF_COUNT_SHIFT - } - - fn ref_inc(&mut self) { - assert!(self.0 <= isize::max_value() as usize); - self.0 += REF_ONE; - } - - pub(super) fn ref_dec(&mut self) { - assert!(self.ref_count() > 0); - self.0 -= REF_ONE - } - - fn will_need_queueing(self) -> bool { - !self.is_notified() && self.is_idle() - } -} - -impl fmt::Debug for State { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let snapshot = self.load(); - snapshot.fmt(fmt) - } -} - -impl fmt::Debug for Snapshot { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Snapshot") - .field("is_running", &self.is_running()) - .field("is_complete", &self.is_complete()) - .field("is_notified", &self.is_notified()) - .field("is_cancelled", &self.is_cancelled()) - .field("is_join_interested", &self.is_join_interested()) - .field("has_join_waker", &self.has_join_waker()) - .field("ref_count", &self.ref_count()) - .finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/task/waker.rs b/third_party/rust/tokio-0.2.25/src/runtime/task/waker.rs deleted file mode 100644 index 5c2d478fbbc3..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/task/waker.rs +++ /dev/null @@ -1,101 +0,0 @@ -use crate::runtime::task::harness::Harness; -use crate::runtime::task::{Header, Schedule}; - -use std::future::Future; -use std::marker::PhantomData; -use std::mem::ManuallyDrop; -use std::ops; -use std::ptr::NonNull; -use std::task::{RawWaker, RawWakerVTable, Waker}; - -pub(super) struct WakerRef<'a, S: 'static> { - waker: ManuallyDrop, - _p: PhantomData<(&'a Header, S)>, -} - -/// Returns a `WakerRef` which avoids having to pre-emptively increase the -/// refcount if there is no need to do so. -pub(super) fn waker_ref(header: &Header) -> WakerRef<'_, S> -where - T: Future, - S: Schedule, -{ - // `Waker::will_wake` uses the VTABLE pointer as part of the check. This - // means that `will_wake` will always return false when using the current - // task's waker. (discussion at rust-lang/rust#66281). - // - // To fix this, we use a single vtable. Since we pass in a reference at this - // point and not an *owned* waker, we must ensure that `drop` is never - // called on this waker instance. This is done by wrapping it with - // `ManuallyDrop` and then never calling drop. - let waker = unsafe { ManuallyDrop::new(Waker::from_raw(raw_waker::(header))) }; - - WakerRef { - waker, - _p: PhantomData, - } -} - -impl ops::Deref for WakerRef<'_, S> { - type Target = Waker; - - fn deref(&self) -> &Waker { - &self.waker - } -} - -unsafe fn clone_waker(ptr: *const ()) -> RawWaker -where - T: Future, - S: Schedule, -{ - let header = ptr as *const Header; - (*header).state.ref_inc(); - raw_waker::(header) -} - -unsafe fn drop_waker(ptr: *const ()) -where - T: Future, - S: Schedule, -{ - let ptr = NonNull::new_unchecked(ptr as *mut Header); - let harness = Harness::::from_raw(ptr); - harness.drop_reference(); -} - -unsafe fn wake_by_val(ptr: *const ()) -where - T: Future, - S: Schedule, -{ - let ptr = NonNull::new_unchecked(ptr as *mut Header); - let harness = Harness::::from_raw(ptr); - harness.wake_by_val(); -} - -// Wake without consuming the waker -unsafe fn wake_by_ref(ptr: *const ()) -where - T: Future, - S: Schedule, -{ - let ptr = NonNull::new_unchecked(ptr as *mut Header); - let harness = Harness::::from_raw(ptr); - harness.wake_by_ref(); -} - -fn raw_waker(header: *const Header) -> RawWaker -where - T: Future, - S: Schedule, -{ - let ptr = header as *const (); - let vtable = &RawWakerVTable::new( - clone_waker::, - wake_by_val::, - wake_by_ref::, - drop_waker::, - ); - RawWaker::new(ptr, vtable) -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_blocking.rs b/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_blocking.rs deleted file mode 100644 index db7048e3f969..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_blocking.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::runtime::{self, Runtime}; - -use std::sync::Arc; - -#[test] -fn blocking_shutdown() { - loom::model(|| { - let v = Arc::new(()); - - let rt = mk_runtime(1); - rt.enter(|| { - for _ in 0..2 { - let v = v.clone(); - crate::task::spawn_blocking(move || { - assert!(1 < Arc::strong_count(&v)); - }); - } - }); - - drop(rt); - assert_eq!(1, Arc::strong_count(&v)); - }); -} - -fn mk_runtime(num_threads: usize) -> Runtime { - runtime::Builder::new() - .threaded_scheduler() - .core_threads(num_threads) - .build() - .unwrap() -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_oneshot.rs b/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_oneshot.rs deleted file mode 100644 index c126fe479afc..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_oneshot.rs +++ /dev/null @@ -1,49 +0,0 @@ -use loom::sync::Notify; - -use std::sync::{Arc, Mutex}; - -pub(crate) fn channel() -> (Sender, Receiver) { - let inner = Arc::new(Inner { - notify: Notify::new(), - value: Mutex::new(None), - }); - - let tx = Sender { - inner: inner.clone(), - }; - let rx = Receiver { inner }; - - (tx, rx) -} - -pub(crate) struct Sender { - inner: Arc>, -} - -pub(crate) struct Receiver { - inner: Arc>, -} - -struct Inner { - notify: Notify, - value: Mutex>, -} - -impl Sender { - pub(crate) fn send(self, value: T) { - *self.inner.value.lock().unwrap() = Some(value); - self.inner.notify.notify(); - } -} - -impl Receiver { - pub(crate) fn recv(self) -> T { - loop { - if let Some(v) = self.inner.value.lock().unwrap().take() { - return v; - } - - self.inner.notify.wait(); - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_pool.rs b/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_pool.rs deleted file mode 100644 index c08658cde871..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_pool.rs +++ /dev/null @@ -1,380 +0,0 @@ -/// Full runtime loom tests. These are heavy tests and take significant time to -/// run on CI. -/// -/// Use `LOOM_MAX_PREEMPTIONS=1` to do a "quick" run as a smoke test. -/// -/// In order to speed up the C -use crate::future::poll_fn; -use crate::runtime::tests::loom_oneshot as oneshot; -use crate::runtime::{self, Runtime}; -use crate::{spawn, task}; -use tokio_test::assert_ok; - -use loom::sync::atomic::{AtomicBool, AtomicUsize}; -use loom::sync::{Arc, Mutex}; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::Ordering::{Relaxed, SeqCst}; -use std::task::{Context, Poll}; - -/// Tests are divided into groups to make the runs faster on CI. -mod group_a { - use super::*; - - #[test] - fn racy_shutdown() { - loom::model(|| { - let pool = mk_pool(1); - - // here's the case we want to exercise: - // - // a worker that still has tasks in its local queue gets sent to the blocking pool (due to - // block_in_place). the blocking pool is shut down, so drops the worker. the worker's - // shutdown method never gets run. - // - // we do this by spawning two tasks on one worker, the first of which does block_in_place, - // and then immediately drop the pool. - - pool.spawn(track(async { - crate::task::block_in_place(|| {}); - })); - pool.spawn(track(async {})); - drop(pool); - }); - } - - #[test] - fn pool_multi_spawn() { - loom::model(|| { - let pool = mk_pool(2); - let c1 = Arc::new(AtomicUsize::new(0)); - - let (tx, rx) = oneshot::channel(); - let tx1 = Arc::new(Mutex::new(Some(tx))); - - // Spawn a task - let c2 = c1.clone(); - let tx2 = tx1.clone(); - pool.spawn(track(async move { - spawn(track(async move { - if 1 == c1.fetch_add(1, Relaxed) { - tx1.lock().unwrap().take().unwrap().send(()); - } - })); - })); - - // Spawn a second task - pool.spawn(track(async move { - spawn(track(async move { - if 1 == c2.fetch_add(1, Relaxed) { - tx2.lock().unwrap().take().unwrap().send(()); - } - })); - })); - - rx.recv(); - }); - } - - fn only_blocking_inner(first_pending: bool) { - loom::model(move || { - let pool = mk_pool(1); - let (block_tx, block_rx) = oneshot::channel(); - - pool.spawn(track(async move { - crate::task::block_in_place(move || { - block_tx.send(()); - }); - if first_pending { - task::yield_now().await - } - })); - - block_rx.recv(); - drop(pool); - }); - } - - #[test] - fn only_blocking_without_pending() { - only_blocking_inner(false) - } - - #[test] - fn only_blocking_with_pending() { - only_blocking_inner(true) - } -} - -mod group_b { - use super::*; - - fn blocking_and_regular_inner(first_pending: bool) { - const NUM: usize = 3; - loom::model(move || { - let pool = mk_pool(1); - let cnt = Arc::new(AtomicUsize::new(0)); - - let (block_tx, block_rx) = oneshot::channel(); - let (done_tx, done_rx) = oneshot::channel(); - let done_tx = Arc::new(Mutex::new(Some(done_tx))); - - pool.spawn(track(async move { - crate::task::block_in_place(move || { - block_tx.send(()); - }); - if first_pending { - task::yield_now().await - } - })); - - for _ in 0..NUM { - let cnt = cnt.clone(); - let done_tx = done_tx.clone(); - - pool.spawn(track(async move { - if NUM == cnt.fetch_add(1, Relaxed) + 1 { - done_tx.lock().unwrap().take().unwrap().send(()); - } - })); - } - - done_rx.recv(); - block_rx.recv(); - - drop(pool); - }); - } - - #[test] - fn blocking_and_regular() { - blocking_and_regular_inner(false); - } - - #[test] - fn blocking_and_regular_with_pending() { - blocking_and_regular_inner(true); - } - - #[test] - fn pool_shutdown() { - loom::model(|| { - let pool = mk_pool(2); - - pool.spawn(track(async move { - gated2(true).await; - })); - - pool.spawn(track(async move { - gated2(false).await; - })); - - drop(pool); - }); - } - - #[test] - fn join_output() { - loom::model(|| { - let mut rt = mk_pool(1); - - rt.block_on(async { - let t = crate::spawn(track(async { "hello" })); - - let out = assert_ok!(t.await); - assert_eq!("hello", out.into_inner()); - }); - }); - } - - #[test] - fn poll_drop_handle_then_drop() { - loom::model(|| { - let mut rt = mk_pool(1); - - rt.block_on(async move { - let mut t = crate::spawn(track(async { "hello" })); - - poll_fn(|cx| { - let _ = Pin::new(&mut t).poll(cx); - Poll::Ready(()) - }) - .await; - }); - }) - } - - #[test] - fn complete_block_on_under_load() { - loom::model(|| { - let mut pool = mk_pool(1); - - pool.block_on(async { - // Trigger a re-schedule - crate::spawn(track(async { - for _ in 0..2 { - task::yield_now().await; - } - })); - - gated2(true).await - }); - }); - } -} - -mod group_c { - use super::*; - - #[test] - fn shutdown_with_notification() { - use crate::sync::oneshot; - - loom::model(|| { - let rt = mk_pool(2); - let (done_tx, done_rx) = oneshot::channel::<()>(); - - rt.spawn(track(async move { - let (tx, rx) = oneshot::channel::<()>(); - - crate::spawn(async move { - crate::task::spawn_blocking(move || { - let _ = tx.send(()); - }); - - let _ = done_rx.await; - }); - - let _ = rx.await; - - let _ = done_tx.send(()); - })); - }); - } -} - -mod group_d { - use super::*; - - #[test] - fn pool_multi_notify() { - loom::model(|| { - let pool = mk_pool(2); - - let c1 = Arc::new(AtomicUsize::new(0)); - - let (done_tx, done_rx) = oneshot::channel(); - let done_tx1 = Arc::new(Mutex::new(Some(done_tx))); - - // Spawn a task - let c2 = c1.clone(); - let done_tx2 = done_tx1.clone(); - pool.spawn(track(async move { - gated().await; - gated().await; - - if 1 == c1.fetch_add(1, Relaxed) { - done_tx1.lock().unwrap().take().unwrap().send(()); - } - })); - - // Spawn a second task - pool.spawn(track(async move { - gated().await; - gated().await; - - if 1 == c2.fetch_add(1, Relaxed) { - done_tx2.lock().unwrap().take().unwrap().send(()); - } - })); - - done_rx.recv(); - }); - } -} - -fn mk_pool(num_threads: usize) -> Runtime { - runtime::Builder::new() - .threaded_scheduler() - .core_threads(num_threads) - .build() - .unwrap() -} - -fn gated() -> impl Future { - gated2(false) -} - -fn gated2(thread: bool) -> impl Future { - use loom::thread; - use std::sync::Arc; - - let gate = Arc::new(AtomicBool::new(false)); - let mut fired = false; - - poll_fn(move |cx| { - if !fired { - let gate = gate.clone(); - let waker = cx.waker().clone(); - - if thread { - thread::spawn(move || { - gate.store(true, SeqCst); - waker.wake_by_ref(); - }); - } else { - spawn(track(async move { - gate.store(true, SeqCst); - waker.wake_by_ref(); - })); - } - - fired = true; - - return Poll::Pending; - } - - if gate.load(SeqCst) { - Poll::Ready("hello world") - } else { - Poll::Pending - } - }) -} - -fn track(f: T) -> Track { - Track { - inner: f, - arc: Arc::new(()), - } -} - -pin_project! { - struct Track { - #[pin] - inner: T, - // Arc is used to hook into loom's leak tracking. - arc: Arc<()>, - } -} - -impl Track { - fn into_inner(self) -> T { - self.inner - } -} - -impl Future for Track { - type Output = Track; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - Poll::Ready(Track { - inner: ready!(me.inner.poll(cx)), - arc: me.arc.clone(), - }) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_queue.rs b/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_queue.rs deleted file mode 100644 index de02610db08c..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/tests/loom_queue.rs +++ /dev/null @@ -1,216 +0,0 @@ -use crate::runtime::queue; -use crate::runtime::task::{self, Schedule, Task}; - -use loom::thread; - -#[test] -fn basic() { - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = queue::Inject::new(); - - let th = thread::spawn(move || { - let (_, mut local) = queue::local(); - let mut n = 0; - - for _ in 0..3 { - if steal.steal_into(&mut local).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - } - - n - }); - - let mut n = 0; - - for _ in 0..2 { - for _ in 0..2 { - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - } - - if local.pop().is_some() { - n += 1; - } - - // Push another task - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - - while local.pop().is_some() { - n += 1; - } - } - - while inject.pop().is_some() { - n += 1; - } - - n += th.join().unwrap(); - - assert_eq!(6, n); - }); -} - -#[test] -fn steal_overflow() { - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = queue::Inject::new(); - - let th = thread::spawn(move || { - let (_, mut local) = queue::local(); - let mut n = 0; - - if steal.steal_into(&mut local).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - - n - }); - - let mut n = 0; - - // push a task, pop a task - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - - if local.pop().is_some() { - n += 1; - } - - for _ in 0..6 { - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - } - - n += th.join().unwrap(); - - while local.pop().is_some() { - n += 1; - } - - while inject.pop().is_some() { - n += 1; - } - - assert_eq!(7, n); - }); -} - -#[test] -fn multi_stealer() { - const NUM_TASKS: usize = 5; - - fn steal_tasks(steal: queue::Steal) -> usize { - let (_, mut local) = queue::local(); - - if steal.steal_into(&mut local).is_none() { - return 0; - } - - let mut n = 1; - - while local.pop().is_some() { - n += 1; - } - - n - } - - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = queue::Inject::new(); - - // Push work - for _ in 0..NUM_TASKS { - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - } - - let th1 = { - let steal = steal.clone(); - thread::spawn(move || steal_tasks(steal)) - }; - - let th2 = thread::spawn(move || steal_tasks(steal)); - - let mut n = 0; - - while local.pop().is_some() { - n += 1; - } - - while inject.pop().is_some() { - n += 1; - } - - n += th1.join().unwrap(); - n += th2.join().unwrap(); - - assert_eq!(n, NUM_TASKS); - }); -} - -#[test] -fn chained_steal() { - loom::model(|| { - let (s1, mut l1) = queue::local(); - let (s2, mut l2) = queue::local(); - let inject = queue::Inject::new(); - - // Load up some tasks - for _ in 0..4 { - let (task, _) = task::joinable::<_, Runtime>(async {}); - l1.push_back(task, &inject); - - let (task, _) = task::joinable::<_, Runtime>(async {}); - l2.push_back(task, &inject); - } - - // Spawn a task to steal from **our** queue - let th = thread::spawn(move || { - let (_, mut local) = queue::local(); - s1.steal_into(&mut local); - - while local.pop().is_some() {} - }); - - // Drain our tasks, then attempt to steal - while l1.pop().is_some() {} - - s2.steal_into(&mut l1); - - th.join().unwrap(); - - while l1.pop().is_some() {} - while l2.pop().is_some() {} - while inject.pop().is_some() {} - }); -} - -struct Runtime; - -impl Schedule for Runtime { - fn bind(task: Task) -> Runtime { - std::mem::forget(task); - Runtime - } - - fn release(&self, _task: &Task) -> Option> { - None - } - - fn schedule(&self, _task: task::Notified) { - unreachable!(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/tests/mod.rs b/third_party/rust/tokio-0.2.25/src/runtime/tests/mod.rs deleted file mode 100644 index 123a7e35a370..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/tests/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -cfg_loom! { - mod loom_blocking; - mod loom_oneshot; - mod loom_pool; - mod loom_queue; -} - -cfg_not_loom! { - mod queue; - - #[cfg(miri)] - mod task; -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/tests/queue.rs b/third_party/rust/tokio-0.2.25/src/runtime/tests/queue.rs deleted file mode 100644 index d228d5dcc791..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/tests/queue.rs +++ /dev/null @@ -1,202 +0,0 @@ -use crate::runtime::queue; -use crate::runtime::task::{self, Schedule, Task}; - -use std::thread; -use std::time::Duration; - -#[test] -fn fits_256() { - let (_, mut local) = queue::local(); - let inject = queue::Inject::new(); - - for _ in 0..256 { - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - } - - assert!(inject.pop().is_none()); - - while local.pop().is_some() {} -} - -#[test] -fn overflow() { - let (_, mut local) = queue::local(); - let inject = queue::Inject::new(); - - for _ in 0..257 { - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - } - - let mut n = 0; - - while inject.pop().is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - - assert_eq!(n, 257); -} - -#[test] -fn steal_batch() { - let (steal1, mut local1) = queue::local(); - let (_, mut local2) = queue::local(); - let inject = queue::Inject::new(); - - for _ in 0..4 { - let (task, _) = task::joinable::<_, Runtime>(async {}); - local1.push_back(task, &inject); - } - - assert!(steal1.steal_into(&mut local2).is_some()); - - for _ in 0..1 { - assert!(local2.pop().is_some()); - } - - assert!(local2.pop().is_none()); - - for _ in 0..2 { - assert!(local1.pop().is_some()); - } - - assert!(local1.pop().is_none()); -} - -#[test] -fn stress1() { - const NUM_ITER: usize = 1; - const NUM_STEAL: usize = 1_000; - const NUM_LOCAL: usize = 1_000; - const NUM_PUSH: usize = 500; - const NUM_POP: usize = 250; - - for _ in 0..NUM_ITER { - let (steal, mut local) = queue::local(); - let inject = queue::Inject::new(); - - let th = thread::spawn(move || { - let (_, mut local) = queue::local(); - let mut n = 0; - - for _ in 0..NUM_STEAL { - if steal.steal_into(&mut local).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - - thread::yield_now(); - } - - n - }); - - let mut n = 0; - - for _ in 0..NUM_LOCAL { - for _ in 0..NUM_PUSH { - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - } - - for _ in 0..NUM_POP { - if local.pop().is_some() { - n += 1; - } else { - break; - } - } - } - - while inject.pop().is_some() { - n += 1; - } - - n += th.join().unwrap(); - - assert_eq!(n, NUM_LOCAL * NUM_PUSH); - } -} - -#[test] -fn stress2() { - const NUM_ITER: usize = 1; - const NUM_TASKS: usize = 1_000_000; - const NUM_STEAL: usize = 1_000; - - for _ in 0..NUM_ITER { - let (steal, mut local) = queue::local(); - let inject = queue::Inject::new(); - - let th = thread::spawn(move || { - let (_, mut local) = queue::local(); - let mut n = 0; - - for _ in 0..NUM_STEAL { - if steal.steal_into(&mut local).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - - thread::sleep(Duration::from_micros(10)); - } - - n - }); - - let mut num_pop = 0; - - for i in 0..NUM_TASKS { - let (task, _) = task::joinable::<_, Runtime>(async {}); - local.push_back(task, &inject); - - if i % 128 == 0 && local.pop().is_some() { - num_pop += 1; - } - - while inject.pop().is_some() { - num_pop += 1; - } - } - - num_pop += th.join().unwrap(); - - while local.pop().is_some() { - num_pop += 1; - } - - while inject.pop().is_some() { - num_pop += 1; - } - - assert_eq!(num_pop, NUM_TASKS); - } -} - -struct Runtime; - -impl Schedule for Runtime { - fn bind(task: Task) -> Runtime { - std::mem::forget(task); - Runtime - } - - fn release(&self, _task: &Task) -> Option> { - None - } - - fn schedule(&self, _task: task::Notified) { - unreachable!(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/tests/task.rs b/third_party/rust/tokio-0.2.25/src/runtime/tests/task.rs deleted file mode 100644 index 82315a04ffa5..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/tests/task.rs +++ /dev/null @@ -1,159 +0,0 @@ -use crate::runtime::task::{self, Schedule, Task}; -use crate::util::linked_list::LinkedList; -use crate::util::TryLock; - -use std::collections::VecDeque; -use std::sync::Arc; - -#[test] -fn create_drop() { - let _ = task::joinable::<_, Runtime>(async { unreachable!() }); -} - -#[test] -fn schedule() { - with(|rt| { - let (task, _) = task::joinable(async { - crate::task::yield_now().await; - }); - - rt.schedule(task); - - assert_eq!(2, rt.tick()); - }) -} - -#[test] -fn shutdown() { - with(|rt| { - let (task, _) = task::joinable(async { - loop { - crate::task::yield_now().await; - } - }); - - rt.schedule(task); - rt.tick_max(1); - - rt.shutdown(); - }) -} - -fn with(f: impl FnOnce(Runtime)) { - struct Reset; - - impl Drop for Reset { - fn drop(&mut self) { - let _rt = CURRENT.try_lock().unwrap().take(); - } - } - - let _reset = Reset; - - let rt = Runtime(Arc::new(Inner { - released: task::TransferStack::new(), - core: TryLock::new(Core { - queue: VecDeque::new(), - tasks: LinkedList::new(), - }), - })); - - *CURRENT.try_lock().unwrap() = Some(rt.clone()); - f(rt) -} - -#[derive(Clone)] -struct Runtime(Arc); - -struct Inner { - released: task::TransferStack, - core: TryLock, -} - -struct Core { - queue: VecDeque>, - tasks: LinkedList>, -} - -static CURRENT: TryLock> = TryLock::new(None); - -impl Runtime { - fn tick(&self) -> usize { - self.tick_max(usize::max_value()) - } - - fn tick_max(&self, max: usize) -> usize { - let mut n = 0; - - while !self.is_empty() && n < max { - let task = self.next_task(); - n += 1; - task.run(); - } - - self.0.maintenance(); - - n - } - - fn is_empty(&self) -> bool { - self.0.core.try_lock().unwrap().queue.is_empty() - } - - fn next_task(&self) -> task::Notified { - self.0.core.try_lock().unwrap().queue.pop_front().unwrap() - } - - fn shutdown(&self) { - let mut core = self.0.core.try_lock().unwrap(); - - for task in core.tasks.iter() { - task.shutdown(); - } - - while let Some(task) = core.queue.pop_back() { - task.shutdown(); - } - - drop(core); - - while !self.0.core.try_lock().unwrap().tasks.is_empty() { - self.0.maintenance(); - } - } -} - -impl Inner { - fn maintenance(&self) { - use std::mem::ManuallyDrop; - - for task in self.released.drain() { - let task = ManuallyDrop::new(task); - - // safety: see worker.rs - unsafe { - let ptr = task.header().into(); - self.core.try_lock().unwrap().tasks.remove(ptr); - } - } - } -} - -impl Schedule for Runtime { - fn bind(task: Task) -> Runtime { - let rt = CURRENT.try_lock().unwrap().as_ref().unwrap().clone(); - rt.0.core.try_lock().unwrap().tasks.push_front(task); - rt - } - - fn release(&self, task: &Task) -> Option> { - // safety: copying worker.rs - let task = unsafe { Task::from_raw(task.header().into()) }; - self.0.released.push(task); - None - } - - fn schedule(&self, task: task::Notified) { - self.0.core.try_lock().unwrap().queue.push_back(task); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/atomic_cell.rs b/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/atomic_cell.rs deleted file mode 100644 index 2bda0fc7387a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/atomic_cell.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::loom::sync::atomic::AtomicPtr; - -use std::ptr; -use std::sync::atomic::Ordering::AcqRel; - -pub(super) struct AtomicCell { - data: AtomicPtr, -} - -unsafe impl Send for AtomicCell {} -unsafe impl Sync for AtomicCell {} - -impl AtomicCell { - pub(super) fn new(data: Option>) -> AtomicCell { - AtomicCell { - data: AtomicPtr::new(to_raw(data)), - } - } - - pub(super) fn swap(&self, val: Option>) -> Option> { - let old = self.data.swap(to_raw(val), AcqRel); - from_raw(old) - } - - #[cfg(feature = "blocking")] - pub(super) fn set(&self, val: Box) { - let _ = self.swap(Some(val)); - } - - pub(super) fn take(&self) -> Option> { - self.swap(None) - } -} - -fn to_raw(data: Option>) -> *mut T { - data.map(Box::into_raw).unwrap_or(ptr::null_mut()) -} - -fn from_raw(val: *mut T) -> Option> { - if val.is_null() { - None - } else { - Some(unsafe { Box::from_raw(val) }) - } -} - -impl Drop for AtomicCell { - fn drop(&mut self) { - // Free any data still held by the cell - let _ = self.take(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/idle.rs b/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/idle.rs deleted file mode 100644 index ae87ca4ba1a7..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/idle.rs +++ /dev/null @@ -1,222 +0,0 @@ -//! Coordinates idling workers - -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Mutex; - -use std::fmt; -use std::sync::atomic::Ordering::{self, SeqCst}; - -pub(super) struct Idle { - /// Tracks both the number of searching workers and the number of unparked - /// workers. - /// - /// Used as a fast-path to avoid acquiring the lock when needed. - state: AtomicUsize, - - /// Sleeping workers - sleepers: Mutex>, - - /// Total number of workers. - num_workers: usize, -} - -const UNPARK_SHIFT: usize = 16; -const UNPARK_MASK: usize = !SEARCH_MASK; -const SEARCH_MASK: usize = (1 << UNPARK_SHIFT) - 1; - -#[derive(Copy, Clone)] -struct State(usize); - -impl Idle { - pub(super) fn new(num_workers: usize) -> Idle { - let init = State::new(num_workers); - - Idle { - state: AtomicUsize::new(init.into()), - sleepers: Mutex::new(Vec::with_capacity(num_workers)), - num_workers, - } - } - - /// If there are no workers actively searching, returns the index of a - /// worker currently sleeping. - pub(super) fn worker_to_notify(&self) -> Option { - // If at least one worker is spinning, work being notified will - // eventully be found. A searching thread will find **some** work and - // notify another worker, eventually leading to our work being found. - // - // For this to happen, this load must happen before the thread - // transitioning `num_searching` to zero. Acquire / Relese does not - // provide sufficient guarantees, so this load is done with `SeqCst` and - // will pair with the `fetch_sub(1)` when transitioning out of - // searching. - if !self.notify_should_wakeup() { - return None; - } - - // Acquire the lock - let mut sleepers = self.sleepers.lock().unwrap(); - - // Check again, now that the lock is acquired - if !self.notify_should_wakeup() { - return None; - } - - // A worker should be woken up, atomically increment the number of - // searching workers as well as the number of unparked workers. - State::unpark_one(&self.state); - - // Get the worker to unpark - let ret = sleepers.pop(); - debug_assert!(ret.is_some()); - - ret - } - - /// Returns `true` if the worker needs to do a final check for submitted - /// work. - pub(super) fn transition_worker_to_parked(&self, worker: usize, is_searching: bool) -> bool { - // Acquire the lock - let mut sleepers = self.sleepers.lock().unwrap(); - - // Decrement the number of unparked threads - let ret = State::dec_num_unparked(&self.state, is_searching); - - // Track the sleeping worker - sleepers.push(worker); - - ret - } - - pub(super) fn transition_worker_to_searching(&self) -> bool { - let state = State::load(&self.state, SeqCst); - if 2 * state.num_searching() >= self.num_workers { - return false; - } - - // It is possible for this routine to allow more than 50% of the workers - // to search. That is OK. Limiting searchers is only an optimization to - // prevent too much contention. - State::inc_num_searching(&self.state, SeqCst); - true - } - - /// A lightweight transition from searching -> running. - /// - /// Returns `true` if this is the final searching worker. The caller - /// **must** notify a new worker. - pub(super) fn transition_worker_from_searching(&self) -> bool { - State::dec_num_searching(&self.state) - } - - /// Unpark a specific worker. This happens if tasks are submitted from - /// within the worker's park routine. - pub(super) fn unpark_worker_by_id(&self, worker_id: usize) { - let mut sleepers = self.sleepers.lock().unwrap(); - - for index in 0..sleepers.len() { - if sleepers[index] == worker_id { - sleepers.swap_remove(index); - - // Update the state accordingly whle the lock is held. - State::unpark_one(&self.state); - - return; - } - } - } - - /// Returns `true` if `worker_id` is contained in the sleep set - pub(super) fn is_parked(&self, worker_id: usize) -> bool { - let sleepers = self.sleepers.lock().unwrap(); - sleepers.contains(&worker_id) - } - - fn notify_should_wakeup(&self) -> bool { - let state = State(self.state.fetch_add(0, SeqCst)); - state.num_searching() == 0 && state.num_unparked() < self.num_workers - } -} - -impl State { - fn new(num_workers: usize) -> State { - // All workers start in the unparked state - let ret = State(num_workers << UNPARK_SHIFT); - debug_assert_eq!(num_workers, ret.num_unparked()); - debug_assert_eq!(0, ret.num_searching()); - ret - } - - fn load(cell: &AtomicUsize, ordering: Ordering) -> State { - State(cell.load(ordering)) - } - - fn unpark_one(cell: &AtomicUsize) { - cell.fetch_add(1 | (1 << UNPARK_SHIFT), SeqCst); - } - - fn inc_num_searching(cell: &AtomicUsize, ordering: Ordering) { - cell.fetch_add(1, ordering); - } - - /// Returns `true` if this is the final searching worker - fn dec_num_searching(cell: &AtomicUsize) -> bool { - let state = State(cell.fetch_sub(1, SeqCst)); - state.num_searching() == 1 - } - - /// Track a sleeping worker - /// - /// Returns `true` if this is the final searching worker. - fn dec_num_unparked(cell: &AtomicUsize, is_searching: bool) -> bool { - let mut dec = 1 << UNPARK_SHIFT; - - if is_searching { - dec += 1; - } - - let prev = State(cell.fetch_sub(dec, SeqCst)); - is_searching && prev.num_searching() == 1 - } - - /// Number of workers currently searching - fn num_searching(self) -> usize { - self.0 & SEARCH_MASK - } - - /// Number of workers currently unparked - fn num_unparked(self) -> usize { - (self.0 & UNPARK_MASK) >> UNPARK_SHIFT - } -} - -impl From for State { - fn from(src: usize) -> State { - State(src) - } -} - -impl From for usize { - fn from(src: State) -> usize { - src.0 - } -} - -impl fmt::Debug for State { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("worker::State") - .field("num_unparked", &self.num_unparked()) - .field("num_searching", &self.num_searching()) - .finish() - } -} - -#[test] -fn test_state() { - assert_eq!(0, UNPARK_MASK & SEARCH_MASK); - assert_eq!(0, !(UNPARK_MASK | SEARCH_MASK)); - - let state = State::new(10); - assert_eq!(10, state.num_unparked()); - assert_eq!(0, state.num_searching()); -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/mod.rs b/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/mod.rs deleted file mode 100644 index d30e8d456ca4..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/mod.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! Threadpool - -mod atomic_cell; -use atomic_cell::AtomicCell; - -mod idle; -use self::idle::Idle; - -mod worker; -pub(crate) use worker::Launch; - -cfg_blocking! { - pub(crate) use worker::block_in_place; -} - -use crate::loom::sync::Arc; -use crate::runtime::task::{self, JoinHandle}; -use crate::runtime::Parker; - -use std::fmt; -use std::future::Future; - -/// Work-stealing based thread pool for executing futures. -pub(crate) struct ThreadPool { - spawner: Spawner, -} - -/// Submit futures to the associated thread pool for execution. -/// -/// A `Spawner` instance is a handle to a single thread pool that allows the owner -/// of the handle to spawn futures onto the thread pool. -/// -/// The `Spawner` handle is *only* used for spawning new futures. It does not -/// impact the lifecycle of the thread pool in any way. The thread pool may -/// shutdown while there are outstanding `Spawner` instances. -/// -/// `Spawner` instances are obtained by calling [`ThreadPool::spawner`]. -/// -/// [`ThreadPool::spawner`]: method@ThreadPool::spawner -#[derive(Clone)] -pub(crate) struct Spawner { - shared: Arc, -} - -// ===== impl ThreadPool ===== - -impl ThreadPool { - pub(crate) fn new(size: usize, parker: Parker) -> (ThreadPool, Launch) { - let (shared, launch) = worker::create(size, parker); - let spawner = Spawner { shared }; - let thread_pool = ThreadPool { spawner }; - - (thread_pool, launch) - } - - /// Returns reference to `Spawner`. - /// - /// The `Spawner` handle can be cloned and enables spawning tasks from other - /// threads. - pub(crate) fn spawner(&self) -> &Spawner { - &self.spawner - } - - /// Spawns a task - pub(crate) fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - self.spawner.spawn(future) - } - - /// Blocks the current thread waiting for the future to complete. - /// - /// The future will execute on the current thread, but all spawned tasks - /// will be executed on the thread pool. - pub(crate) fn block_on(&self, future: F) -> F::Output - where - F: Future, - { - let mut enter = crate::runtime::enter(true); - enter.block_on(future).expect("failed to park thread") - } -} - -impl fmt::Debug for ThreadPool { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("ThreadPool").finish() - } -} - -impl Drop for ThreadPool { - fn drop(&mut self) { - self.spawner.shutdown(); - } -} - -// ==== impl Spawner ===== - -impl Spawner { - /// Spawns a future onto the thread pool - pub(crate) fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - let (task, handle) = task::joinable(future); - self.shared.schedule(task, false); - handle - } - - pub(crate) fn shutdown(&mut self) { - self.shared.close(); - } -} - -impl fmt::Debug for Spawner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Spawner").finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/worker.rs b/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/worker.rs deleted file mode 100644 index ac05285478d7..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/thread_pool/worker.rs +++ /dev/null @@ -1,804 +0,0 @@ -//! A scheduler is initialized with a fixed number of workers. Each worker is -//! driven by a thread. Each worker has a "core" which contains data such as the -//! run queue and other state. When `block_in_place` is called, the worker's -//! "core" is handed off to a new thread allowing the scheduler to continue to -//! make progress while the originating thread blocks. - -use crate::coop; -use crate::loom::rand::seed; -use crate::loom::sync::{Arc, Mutex}; -use crate::park::{Park, Unpark}; -use crate::runtime; -use crate::runtime::park::{Parker, Unparker}; -use crate::runtime::thread_pool::{AtomicCell, Idle}; -use crate::runtime::{queue, task}; -use crate::util::linked_list::LinkedList; -use crate::util::FastRand; - -use std::cell::RefCell; -use std::time::Duration; - -/// A scheduler worker -pub(super) struct Worker { - /// Reference to shared state - shared: Arc, - - /// Index holding this worker's remote state - index: usize, - - /// Used to hand-off a worker's core to another thread. - core: AtomicCell, -} - -/// Core data -struct Core { - /// Used to schedule bookkeeping tasks every so often. - tick: u8, - - /// When a task is scheduled from a worker, it is stored in this slot. The - /// worker will check this slot for a task **before** checking the run - /// queue. This effectively results in the **last** scheduled task to be run - /// next (LIFO). This is an optimization for message passing patterns and - /// helps to reduce latency. - lifo_slot: Option, - - /// The worker-local run queue. - run_queue: queue::Local>, - - /// True if the worker is currently searching for more work. Searching - /// involves attempting to steal from other workers. - is_searching: bool, - - /// True if the scheduler is being shutdown - is_shutdown: bool, - - /// Tasks owned by the core - tasks: LinkedList, - - /// Parker - /// - /// Stored in an `Option` as the parker is added / removed to make the - /// borrow checker happy. - park: Option, - - /// Fast random number generator. - rand: FastRand, -} - -/// State shared across all workers -pub(super) struct Shared { - /// Per-worker remote state. All other workers have access to this and is - /// how they communicate between each other. - remotes: Box<[Remote]>, - - /// Submit work to the scheduler while **not** currently on a worker thread. - inject: queue::Inject>, - - /// Coordinates idle workers - idle: Idle, - - /// Workers have have observed the shutdown signal - /// - /// The core is **not** placed back in the worker to avoid it from being - /// stolen by a thread that was spawned as part of `block_in_place`. - shutdown_workers: Mutex, Arc)>>, -} - -/// Used to communicate with a worker from other threads. -struct Remote { - /// Steal tasks from this worker. - steal: queue::Steal>, - - /// Transfers tasks to be released. Any worker pushes tasks, only the owning - /// worker pops. - pending_drop: task::TransferStack>, - - /// Unparks the associated worker thread - unpark: Unparker, -} - -/// Thread-local context -struct Context { - /// Worker - worker: Arc, - - /// Core data - core: RefCell>>, -} - -/// Starts the workers -pub(crate) struct Launch(Vec>); - -/// Running a task may consume the core. If the core is still available when -/// running the task completes, it is returned. Otherwise, the worker will need -/// to stop processing. -type RunResult = Result, ()>; - -/// A task handle -type Task = task::Task>; - -/// A notified task handle -type Notified = task::Notified>; - -// Tracks thread-local state -scoped_thread_local!(static CURRENT: Context); - -pub(super) fn create(size: usize, park: Parker) -> (Arc, Launch) { - let mut cores = vec![]; - let mut remotes = vec![]; - - // Create the local queues - for _ in 0..size { - let (steal, run_queue) = queue::local(); - - let park = park.clone(); - let unpark = park.unpark(); - - cores.push(Box::new(Core { - tick: 0, - lifo_slot: None, - run_queue, - is_searching: false, - is_shutdown: false, - tasks: LinkedList::new(), - park: Some(park), - rand: FastRand::new(seed()), - })); - - remotes.push(Remote { - steal, - pending_drop: task::TransferStack::new(), - unpark, - }); - } - - let shared = Arc::new(Shared { - remotes: remotes.into_boxed_slice(), - inject: queue::Inject::new(), - idle: Idle::new(size), - shutdown_workers: Mutex::new(vec![]), - }); - - let mut launch = Launch(vec![]); - - for (index, core) in cores.drain(..).enumerate() { - launch.0.push(Arc::new(Worker { - shared: shared.clone(), - index, - core: AtomicCell::new(Some(core)), - })); - } - - (shared, launch) -} - -cfg_blocking! { - use crate::runtime::enter::EnterContext; - - pub(crate) fn block_in_place(f: F) -> R - where - F: FnOnce() -> R, - { - // Try to steal the worker core back - struct Reset(coop::Budget); - - impl Drop for Reset { - fn drop(&mut self) { - CURRENT.with(|maybe_cx| { - if let Some(cx) = maybe_cx { - let core = cx.worker.core.take(); - let mut cx_core = cx.core.borrow_mut(); - assert!(cx_core.is_none()); - *cx_core = core; - - // Reset the task budget as we are re-entering the - // runtime. - coop::set(self.0); - } - }); - } - } - - let mut had_entered = false; - - CURRENT.with(|maybe_cx| { - match (crate::runtime::enter::context(), maybe_cx.is_some()) { - (EnterContext::Entered { .. }, true) => { - // We are on a thread pool runtime thread, so we just need to set up blocking. - had_entered = true; - } - (EnterContext::Entered { allow_blocking }, false) => { - // We are on an executor, but _not_ on the thread pool. - // That is _only_ okay if we are in a thread pool runtime's block_on method: - if allow_blocking { - had_entered = true; - return; - } else { - // This probably means we are on the basic_scheduler or in a LocalSet, - // where it is _not_ okay to block. - panic!("can call blocking only when running on the multi-threaded runtime"); - } - } - (EnterContext::NotEntered, true) => { - // This is a nested call to block_in_place (we already exited). - // All the necessary setup has already been done. - return; - } - (EnterContext::NotEntered, false) => { - // We are outside of the tokio runtime, so blocking is fine. - // We can also skip all of the thread pool blocking setup steps. - return; - } - } - - let cx = maybe_cx.expect("no .is_some() == false cases above should lead here"); - - // Get the worker core. If none is set, then blocking is fine! - let core = match cx.core.borrow_mut().take() { - Some(core) => core, - None => return, - }; - - // The parker should be set here - assert!(core.park.is_some()); - - // In order to block, the core must be sent to another thread for - // execution. - // - // First, move the core back into the worker's shared core slot. - cx.worker.core.set(core); - - // Next, clone the worker handle and send it to a new thread for - // processing. - // - // Once the blocking task is done executing, we will attempt to - // steal the core back. - let worker = cx.worker.clone(); - runtime::spawn_blocking(move || run(worker)); - }); - - if had_entered { - // Unset the current task's budget. Blocking sections are not - // constrained by task budgets. - let _reset = Reset(coop::stop()); - - crate::runtime::enter::exit(f) - } else { - f() - } - } -} - -/// After how many ticks is the global queue polled. This helps to ensure -/// fairness. -/// -/// The number is fairly arbitrary. I believe this value was copied from golang. -const GLOBAL_POLL_INTERVAL: u8 = 61; - -impl Launch { - pub(crate) fn launch(mut self) { - for worker in self.0.drain(..) { - runtime::spawn_blocking(move || run(worker)); - } - } -} - -fn run(worker: Arc) { - // Acquire a core. If this fails, then another thread is running this - // worker and there is nothing further to do. - let core = match worker.core.take() { - Some(core) => core, - None => return, - }; - - // Set the worker context. - let cx = Context { - worker, - core: RefCell::new(None), - }; - - let _enter = crate::runtime::enter(true); - - CURRENT.set(&cx, || { - // This should always be an error. It only returns a `Result` to support - // using `?` to short circuit. - assert!(cx.run(core).is_err()); - }); -} - -impl Context { - fn run(&self, mut core: Box) -> RunResult { - while !core.is_shutdown { - // Increment the tick - core.tick(); - - // Run maintenance, if needed - core = self.maintenance(core); - - // First, check work available to the current worker. - if let Some(task) = core.next_task(&self.worker) { - core = self.run_task(task, core)?; - continue; - } - - // There is no more **local** work to process, try to steal work - // from other workers. - if let Some(task) = core.steal_work(&self.worker) { - core = self.run_task(task, core)?; - } else { - // Wait for work - core = self.park(core); - } - } - - // Signal shutdown - self.worker.shared.shutdown(core, self.worker.clone()); - Err(()) - } - - fn run_task(&self, task: Notified, mut core: Box) -> RunResult { - // Make sure thew orker is not in the **searching** state. This enables - // another idle worker to try to steal work. - core.transition_from_searching(&self.worker); - - // Make the core available to the runtime context - *self.core.borrow_mut() = Some(core); - - // Run the task - coop::budget(|| { - task.run(); - - // As long as there is budget remaining and a task exists in the - // `lifo_slot`, then keep running. - loop { - // Check if we still have the core. If not, the core was stolen - // by another worker. - let mut core = match self.core.borrow_mut().take() { - Some(core) => core, - None => return Err(()), - }; - - // Check for a task in the LIFO slot - let task = match core.lifo_slot.take() { - Some(task) => task, - None => return Ok(core), - }; - - if coop::has_budget_remaining() { - // Run the LIFO task, then loop - *self.core.borrow_mut() = Some(core); - task.run(); - } else { - // Not enough budget left to run the LIFO task, push it to - // the back of the queue and return. - core.run_queue.push_back(task, self.worker.inject()); - return Ok(core); - } - } - }) - } - - fn maintenance(&self, mut core: Box) -> Box { - if core.tick % GLOBAL_POLL_INTERVAL == 0 { - // Call `park` with a 0 timeout. This enables the I/O driver, timer, ... - // to run without actually putting the thread to sleep. - core = self.park_timeout(core, Some(Duration::from_millis(0))); - - // Run regularly scheduled maintenance - core.maintenance(&self.worker); - } - - core - } - - fn park(&self, mut core: Box) -> Box { - core.transition_to_parked(&self.worker); - - while !core.is_shutdown { - core = self.park_timeout(core, None); - - // Run regularly scheduled maintenance - core.maintenance(&self.worker); - - if core.transition_from_parked(&self.worker) { - return core; - } - } - - core - } - - fn park_timeout(&self, mut core: Box, duration: Option) -> Box { - // Take the parker out of core - let mut park = core.park.take().expect("park missing"); - - // Store `core` in context - *self.core.borrow_mut() = Some(core); - - // Park thread - if let Some(timeout) = duration { - park.park_timeout(timeout).expect("park failed"); - } else { - park.park().expect("park failed"); - } - - // Remove `core` from context - core = self.core.borrow_mut().take().expect("core missing"); - - // Place `park` back in `core` - core.park = Some(park); - - // If there are tasks available to steal, notify a worker - if core.run_queue.is_stealable() { - self.worker.shared.notify_parked(); - } - - core - } -} - -impl Core { - /// Increment the tick - fn tick(&mut self) { - self.tick = self.tick.wrapping_add(1); - } - - /// Return the next notified task available to this worker. - fn next_task(&mut self, worker: &Worker) -> Option { - if self.tick % GLOBAL_POLL_INTERVAL == 0 { - worker.inject().pop().or_else(|| self.next_local_task()) - } else { - self.next_local_task().or_else(|| worker.inject().pop()) - } - } - - fn next_local_task(&mut self) -> Option { - self.lifo_slot.take().or_else(|| self.run_queue.pop()) - } - - fn steal_work(&mut self, worker: &Worker) -> Option { - if !self.transition_to_searching(worker) { - return None; - } - - let num = worker.shared.remotes.len(); - // Start from a random worker - let start = self.rand.fastrand_n(num as u32) as usize; - - for i in 0..num { - let i = (start + i) % num; - - // Don't steal from ourself! We know we don't have work. - if i == worker.index { - continue; - } - - let target = &worker.shared.remotes[i]; - if let Some(task) = target.steal.steal_into(&mut self.run_queue) { - return Some(task); - } - } - - // Fallback on checking the global queue - worker.shared.inject.pop() - } - - fn transition_to_searching(&mut self, worker: &Worker) -> bool { - if !self.is_searching { - self.is_searching = worker.shared.idle.transition_worker_to_searching(); - } - - self.is_searching - } - - fn transition_from_searching(&mut self, worker: &Worker) { - if !self.is_searching { - return; - } - - self.is_searching = false; - worker.shared.transition_worker_from_searching(); - } - - /// Prepare the worker state for parking - fn transition_to_parked(&mut self, worker: &Worker) { - // When the final worker transitions **out** of searching to parked, it - // must check all the queues one last time in case work materialized - // between the last work scan and transitioning out of searching. - let is_last_searcher = worker - .shared - .idle - .transition_worker_to_parked(worker.index, self.is_searching); - - // The worker is no longer searching. Setting this is the local cache - // only. - self.is_searching = false; - - if is_last_searcher { - worker.shared.notify_if_work_pending(); - } - } - - /// Returns `true` if the transition happened. - fn transition_from_parked(&mut self, worker: &Worker) -> bool { - // If a task is in the lifo slot, then we must unpark regardless of - // being notified - if self.lifo_slot.is_some() { - worker.shared.idle.unpark_worker_by_id(worker.index); - self.is_searching = true; - return true; - } - - if worker.shared.idle.is_parked(worker.index) { - return false; - } - - // When unparked, the worker is in the searching state. - self.is_searching = true; - true - } - - /// Runs maintenance work such as free pending tasks and check the pool's - /// state. - fn maintenance(&mut self, worker: &Worker) { - self.drain_pending_drop(worker); - - if !self.is_shutdown { - // Check if the scheduler has been shutdown - self.is_shutdown = worker.inject().is_closed(); - } - } - - // Shutdown the core - fn shutdown(&mut self, worker: &Worker) { - // Take the core - let mut park = self.park.take().expect("park missing"); - - // Signal to all tasks to shut down. - for header in self.tasks.iter() { - header.shutdown(); - } - - loop { - self.drain_pending_drop(worker); - - if self.tasks.is_empty() { - break; - } - - // Wait until signalled - park.park().expect("park failed"); - } - - // Drain the queue - while self.next_local_task().is_some() {} - - park.shutdown(); - } - - fn drain_pending_drop(&mut self, worker: &Worker) { - use std::mem::ManuallyDrop; - - for task in worker.remote().pending_drop.drain() { - let task = ManuallyDrop::new(task); - - // safety: tasks are only pushed into the `pending_drop` stacks that - // are associated with the list they are inserted into. When a task - // is pushed into `pending_drop`, the ref-inc is skipped, so we must - // not ref-dec here. - // - // See `bind` and `release` implementations. - unsafe { - self.tasks.remove(task.header().into()); - } - } - } -} - -impl Worker { - /// Returns a reference to the scheduler's injection queue - fn inject(&self) -> &queue::Inject> { - &self.shared.inject - } - - /// Return a reference to this worker's remote data - fn remote(&self) -> &Remote { - &self.shared.remotes[self.index] - } - - fn eq(&self, other: &Worker) -> bool { - self.shared.ptr_eq(&other.shared) && self.index == other.index - } -} - -impl task::Schedule for Arc { - fn bind(task: Task) -> Arc { - CURRENT.with(|maybe_cx| { - let cx = maybe_cx.expect("scheduler context missing"); - - // Track the task - cx.core - .borrow_mut() - .as_mut() - .expect("scheduler core missing") - .tasks - .push_front(task); - - // Return a clone of the worker - cx.worker.clone() - }) - } - - fn release(&self, task: &Task) -> Option { - use std::ptr::NonNull; - - CURRENT.with(|maybe_cx| { - let cx = maybe_cx.expect("scheduler context missing"); - - if self.eq(&cx.worker) { - let mut maybe_core = cx.core.borrow_mut(); - - if let Some(core) = &mut *maybe_core { - // Directly remove the task - // - // safety: the task is inserted in the list in `bind`. - unsafe { - let ptr = NonNull::from(task.header()); - return core.tasks.remove(ptr); - } - } - } - - // Track the task to be released by the worker that owns it - // - // Safety: We get a new handle without incrementing the ref-count. - // A ref-count is held by the "owned" linked list and it is only - // ever removed from that list as part of the release process: this - // method or popping the task from `pending_drop`. Thus, we can rely - // on the ref-count held by the linked-list to keep the memory - // alive. - // - // When the task is removed from the stack, it is forgotten instead - // of dropped. - let task = unsafe { Task::from_raw(task.header().into()) }; - - self.remote().pending_drop.push(task); - - if cx.core.borrow().is_some() { - return None; - } - - // The worker core has been handed off to another thread. In the - // event that the scheduler is currently shutting down, the thread - // that owns the task may be waiting on the release to complete - // shutdown. - if self.inject().is_closed() { - self.remote().unpark.unpark(); - } - - None - }) - } - - fn schedule(&self, task: Notified) { - self.shared.schedule(task, false); - } - - fn yield_now(&self, task: Notified) { - self.shared.schedule(task, true); - } -} - -impl Shared { - pub(super) fn schedule(&self, task: Notified, is_yield: bool) { - CURRENT.with(|maybe_cx| { - if let Some(cx) = maybe_cx { - // Make sure the task is part of the **current** scheduler. - if self.ptr_eq(&cx.worker.shared) { - // And the current thread still holds a core - if let Some(core) = cx.core.borrow_mut().as_mut() { - self.schedule_local(core, task, is_yield); - return; - } - } - } - - // Otherwise, use the inject queue - self.inject.push(task); - self.notify_parked(); - }); - } - - fn schedule_local(&self, core: &mut Core, task: Notified, is_yield: bool) { - // Spawning from the worker thread. If scheduling a "yield" then the - // task must always be pushed to the back of the queue, enabling other - // tasks to be executed. If **not** a yield, then there is more - // flexibility and the task may go to the front of the queue. - let should_notify = if is_yield { - core.run_queue.push_back(task, &self.inject); - true - } else { - // Push to the LIFO slot - let prev = core.lifo_slot.take(); - let ret = prev.is_some(); - - if let Some(prev) = prev { - core.run_queue.push_back(prev, &self.inject); - } - - core.lifo_slot = Some(task); - - ret - }; - - // Only notify if not currently parked. If `park` is `None`, then the - // scheduling is from a resource driver. As notifications often come in - // batches, the notification is delayed until the park is complete. - if should_notify && core.park.is_some() { - self.notify_parked(); - } - } - - pub(super) fn close(&self) { - if self.inject.close() { - self.notify_all(); - } - } - - fn notify_parked(&self) { - if let Some(index) = self.idle.worker_to_notify() { - self.remotes[index].unpark.unpark(); - } - } - - fn notify_all(&self) { - for remote in &self.remotes[..] { - remote.unpark.unpark(); - } - } - - fn notify_if_work_pending(&self) { - for remote in &self.remotes[..] { - if !remote.steal.is_empty() { - self.notify_parked(); - return; - } - } - - if !self.inject.is_empty() { - self.notify_parked(); - } - } - - fn transition_worker_from_searching(&self) { - if self.idle.transition_worker_from_searching() { - // We are the final searching worker. Because work was found, we - // need to notify another worker. - self.notify_parked(); - } - } - - /// Signals that a worker has observed the shutdown signal and has replaced - /// its core back into its handle. - /// - /// If all workers have reached this point, the final cleanup is performed. - fn shutdown(&self, core: Box, worker: Arc) { - let mut workers = self.shutdown_workers.lock().unwrap(); - workers.push((core, worker)); - - if workers.len() != self.remotes.len() { - return; - } - - for (mut core, worker) in workers.drain(..) { - core.shutdown(&worker); - } - - // Drain the injection queue - while self.inject.pop().is_some() {} - } - - fn ptr_eq(&self, other: &Shared) -> bool { - self as *const _ == other as *const _ - } -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/time.rs b/third_party/rust/tokio-0.2.25/src/runtime/time.rs deleted file mode 100644 index c623d9641a10..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/time.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! Abstracts out the APIs necessary to `Runtime` for integrating the time -//! driver. When the `time` feature flag is **not** enabled. These APIs are -//! shells. This isolates the complexity of dealing with conditional -//! compilation. - -pub(crate) use variant::*; - -#[cfg(feature = "time")] -mod variant { - use crate::park::Either; - use crate::runtime::io; - use crate::time::{self, driver}; - - pub(crate) type Clock = time::Clock; - pub(crate) type Driver = Either, io::Driver>; - pub(crate) type Handle = Option; - - pub(crate) fn create_clock() -> Clock { - Clock::new() - } - - /// Create a new timer driver / handle pair - pub(crate) fn create_driver( - enable: bool, - io_driver: io::Driver, - clock: Clock, - ) -> (Driver, Handle) { - if enable { - let driver = driver::Driver::new(io_driver, clock); - let handle = driver.handle(); - - (Either::A(driver), Some(handle)) - } else { - (Either::B(io_driver), None) - } - } -} - -#[cfg(not(feature = "time"))] -mod variant { - use crate::runtime::io; - - pub(crate) type Clock = (); - pub(crate) type Driver = io::Driver; - pub(crate) type Handle = (); - - pub(crate) fn create_clock() -> Clock { - () - } - - /// Create a new timer driver / handle pair - pub(crate) fn create_driver( - _enable: bool, - io_driver: io::Driver, - _clock: Clock, - ) -> (Driver, Handle) { - (io_driver, ()) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/signal/ctrl_c.rs b/third_party/rust/tokio-0.2.25/src/signal/ctrl_c.rs deleted file mode 100644 index 1eeeb85aa172..000000000000 --- a/third_party/rust/tokio-0.2.25/src/signal/ctrl_c.rs +++ /dev/null @@ -1,53 +0,0 @@ -#[cfg(unix)] -use super::unix::{self as os_impl}; -#[cfg(windows)] -use super::windows::{self as os_impl}; - -use std::io; - -/// Completes when a "ctrl-c" notification is sent to the process. -/// -/// While signals are handled very differently between Unix and Windows, both -/// platforms support receiving a signal on "ctrl-c". This function provides a -/// portable API for receiving this notification. -/// -/// Once the returned future is polled, a listener is registered. The future -/// will complete on the first received `ctrl-c` **after** the initial call to -/// either `Future::poll` or `.await`. -/// -/// # Caveats -/// -/// On Unix platforms, the first time that a `Signal` instance is registered for a -/// particular signal kind, an OS signal-handler is installed which replaces the -/// default platform behavior when that signal is received, **for the duration of -/// the entire process**. -/// -/// For example, Unix systems will terminate a process by default when it -/// receives a signal generated by "CTRL+C" on the terminal. But, when a -/// `ctrl_c` stream is created to listen for this signal, the time it arrives, -/// it will be translated to a stream event, and the process will continue to -/// execute. **Even if this `Signal` instance is dropped, subsequent SIGINT -/// deliveries will end up captured by Tokio, and the default platform behavior -/// will NOT be reset**. -/// -/// Thus, applications should take care to ensure the expected signal behavior -/// occurs as expected after listening for specific signals. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::signal; -/// -/// #[tokio::main] -/// async fn main() { -/// println!("waiting for ctrl-c"); -/// -/// signal::ctrl_c().await.expect("failed to listen for event"); -/// -/// println!("received ctrl-c event"); -/// } -/// ``` -pub async fn ctrl_c() -> io::Result<()> { - os_impl::ctrl_c()?.recv().await; - Ok(()) -} diff --git a/third_party/rust/tokio-0.2.25/src/signal/mod.rs b/third_party/rust/tokio-0.2.25/src/signal/mod.rs deleted file mode 100644 index 6e5e350df575..000000000000 --- a/third_party/rust/tokio-0.2.25/src/signal/mod.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! Asynchronous signal handling for Tokio -//! -//! Note that signal handling is in general a very tricky topic and should be -//! used with great care. This crate attempts to implement 'best practice' for -//! signal handling, but it should be evaluated for your own applications' needs -//! to see if it's suitable. -//! -//! The are some fundamental limitations of this crate documented on the OS -//! specific structures, as well. -//! -//! # Examples -//! -//! Print on "ctrl-c" notification. -//! -//! ```rust,no_run -//! use tokio::signal; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! signal::ctrl_c().await?; -//! println!("ctrl-c received!"); -//! Ok(()) -//! } -//! ``` -//! -//! Wait for SIGHUP on Unix -//! -//! ```rust,no_run -//! # #[cfg(unix)] { -//! use tokio::signal::unix::{signal, SignalKind}; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! // An infinite stream of hangup signals. -//! let mut stream = signal(SignalKind::hangup())?; -//! -//! // Print whenever a HUP signal is received -//! loop { -//! stream.recv().await; -//! println!("got signal HUP"); -//! } -//! } -//! # } -//! ``` - -mod ctrl_c; -pub use ctrl_c::ctrl_c; - -mod registry; - -mod os { - #[cfg(unix)] - pub(crate) use super::unix::{OsExtraData, OsStorage}; - - #[cfg(windows)] - pub(crate) use super::windows::{OsExtraData, OsStorage}; -} - -pub mod unix; -pub mod windows; diff --git a/third_party/rust/tokio-0.2.25/src/signal/registry.rs b/third_party/rust/tokio-0.2.25/src/signal/registry.rs deleted file mode 100644 index 50edd2b6c485..000000000000 --- a/third_party/rust/tokio-0.2.25/src/signal/registry.rs +++ /dev/null @@ -1,321 +0,0 @@ -#![allow(clippy::unit_arg)] - -use crate::signal::os::{OsExtraData, OsStorage}; - -use crate::sync::mpsc::Sender; - -use lazy_static::lazy_static; -use std::ops; -use std::pin::Pin; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; - -pub(crate) type EventId = usize; - -/// State for a specific event, whether a notification is pending delivery, -/// and what listeners are registered. -#[derive(Default, Debug)] -pub(crate) struct EventInfo { - pending: AtomicBool, - recipients: Mutex>>, -} - -/// An interface for retrieving the `EventInfo` for a particular eventId. -pub(crate) trait Storage { - /// Gets the `EventInfo` for `id` if it exists. - fn event_info(&self, id: EventId) -> Option<&EventInfo>; - - /// Invokes `f` once for each defined `EventInfo` in this storage. - fn for_each<'a, F>(&'a self, f: F) - where - F: FnMut(&'a EventInfo); -} - -impl Storage for Vec { - fn event_info(&self, id: EventId) -> Option<&EventInfo> { - self.get(id) - } - - fn for_each<'a, F>(&'a self, f: F) - where - F: FnMut(&'a EventInfo), - { - self.iter().for_each(f) - } -} - -/// An interface for initializing a type. Useful for situations where we cannot -/// inject a configured instance in the constructor of another type. -pub(crate) trait Init { - fn init() -> Self; -} - -/// Manages and distributes event notifications to any registered listeners. -/// -/// Generic over the underlying storage to allow for domain specific -/// optimizations (e.g. eventIds may or may not be contiguous). -#[derive(Debug)] -pub(crate) struct Registry { - storage: S, -} - -impl Registry { - fn new(storage: S) -> Self { - Self { storage } - } -} - -impl Registry { - /// Registers a new listener for `event_id`. - fn register_listener(&self, event_id: EventId, listener: Sender<()>) { - self.storage - .event_info(event_id) - .unwrap_or_else(|| panic!("invalid event_id: {}", event_id)) - .recipients - .lock() - .unwrap() - .push(listener); - } - - /// Marks `event_id` as having been delivered, without broadcasting it to - /// any listeners. - fn record_event(&self, event_id: EventId) { - if let Some(event_info) = self.storage.event_info(event_id) { - event_info.pending.store(true, Ordering::SeqCst) - } - } - - /// Broadcasts all previously recorded events to their respective listeners. - /// - /// Returns `true` if an event was delivered to at least one listener. - fn broadcast(&self) -> bool { - use crate::sync::mpsc::error::TrySendError; - - let mut did_notify = false; - self.storage.for_each(|event_info| { - // Any signal of this kind arrived since we checked last? - if !event_info.pending.swap(false, Ordering::SeqCst) { - return; - } - - let mut recipients = event_info.recipients.lock().unwrap(); - - // Notify all waiters on this signal that the signal has been - // received. If we can't push a message into the queue then we don't - // worry about it as everything is coalesced anyway. If the channel - // has gone away then we can remove that slot. - for i in (0..recipients.len()).rev() { - match recipients[i].try_send(()) { - Ok(()) => did_notify = true, - Err(TrySendError::Closed(..)) => { - recipients.swap_remove(i); - } - - // Channel is full, ignore the error since the - // receiver has already been woken up - Err(_) => {} - } - } - }); - - did_notify - } -} - -pub(crate) struct Globals { - extra: OsExtraData, - registry: Registry, -} - -impl ops::Deref for Globals { - type Target = OsExtraData; - - fn deref(&self) -> &Self::Target { - &self.extra - } -} - -impl Globals { - /// Registers a new listener for `event_id`. - pub(crate) fn register_listener(&self, event_id: EventId, listener: Sender<()>) { - self.registry.register_listener(event_id, listener); - } - - /// Marks `event_id` as having been delivered, without broadcasting it to - /// any listeners. - pub(crate) fn record_event(&self, event_id: EventId) { - self.registry.record_event(event_id); - } - - /// Broadcasts all previously recorded events to their respective listeners. - /// - /// Returns `true` if an event was delivered to at least one listener. - pub(crate) fn broadcast(&self) -> bool { - self.registry.broadcast() - } - - #[cfg(unix)] - pub(crate) fn storage(&self) -> &OsStorage { - &self.registry.storage - } -} - -pub(crate) fn globals() -> Pin<&'static Globals> -where - OsExtraData: 'static + Send + Sync + Init, - OsStorage: 'static + Send + Sync + Init, -{ - lazy_static! { - static ref GLOBALS: Pin> = Box::pin(Globals { - extra: OsExtraData::init(), - registry: Registry::new(OsStorage::init()), - }); - } - - GLOBALS.as_ref() -} - -#[cfg(all(test, not(loom)))] -mod tests { - use super::*; - use crate::runtime::{self, Runtime}; - use crate::sync::{mpsc, oneshot}; - - use futures::future; - - #[test] - fn smoke() { - let mut rt = rt(); - rt.block_on(async move { - let registry = Registry::new(vec![ - EventInfo::default(), - EventInfo::default(), - EventInfo::default(), - ]); - - let (first_tx, first_rx) = mpsc::channel(3); - let (second_tx, second_rx) = mpsc::channel(3); - let (third_tx, third_rx) = mpsc::channel(3); - - registry.register_listener(0, first_tx); - registry.register_listener(1, second_tx); - registry.register_listener(2, third_tx); - - let (fire, wait) = oneshot::channel(); - - crate::spawn(async { - wait.await.expect("wait failed"); - - // Record some events which should get coalesced - registry.record_event(0); - registry.record_event(0); - registry.record_event(1); - registry.record_event(1); - registry.broadcast(); - - // Send subsequent signal - registry.record_event(0); - registry.broadcast(); - - drop(registry); - }); - - let _ = fire.send(()); - let all = future::join3(collect(first_rx), collect(second_rx), collect(third_rx)); - - let (first_results, second_results, third_results) = all.await; - assert_eq!(2, first_results.len()); - assert_eq!(1, second_results.len()); - assert_eq!(0, third_results.len()); - }); - } - - #[test] - #[should_panic = "invalid event_id: 1"] - fn register_panics_on_invalid_input() { - let registry = Registry::new(vec![EventInfo::default()]); - - let (tx, _) = mpsc::channel(1); - registry.register_listener(1, tx); - } - - #[test] - fn record_invalid_event_does_nothing() { - let registry = Registry::new(vec![EventInfo::default()]); - registry.record_event(42); - } - - #[test] - fn broadcast_cleans_up_disconnected_listeners() { - let mut rt = Runtime::new().unwrap(); - - rt.block_on(async { - let registry = Registry::new(vec![EventInfo::default()]); - - let (first_tx, first_rx) = mpsc::channel(1); - let (second_tx, second_rx) = mpsc::channel(1); - let (third_tx, third_rx) = mpsc::channel(1); - - registry.register_listener(0, first_tx); - registry.register_listener(0, second_tx); - registry.register_listener(0, third_tx); - - drop(first_rx); - drop(second_rx); - - let (fire, wait) = oneshot::channel(); - - crate::spawn(async { - wait.await.expect("wait failed"); - - registry.record_event(0); - registry.broadcast(); - - assert_eq!(1, registry.storage[0].recipients.lock().unwrap().len()); - drop(registry); - }); - - let _ = fire.send(()); - let results = collect(third_rx).await; - - assert_eq!(1, results.len()); - }); - } - - #[test] - fn broadcast_returns_if_at_least_one_event_fired() { - let registry = Registry::new(vec![EventInfo::default()]); - - registry.record_event(0); - assert_eq!(false, registry.broadcast()); - - let (first_tx, first_rx) = mpsc::channel(1); - let (second_tx, second_rx) = mpsc::channel(1); - - registry.register_listener(0, first_tx); - registry.register_listener(0, second_tx); - - registry.record_event(0); - assert_eq!(true, registry.broadcast()); - - drop(first_rx); - registry.record_event(0); - assert_eq!(false, registry.broadcast()); - - drop(second_rx); - } - - fn rt() -> Runtime { - runtime::Builder::new().basic_scheduler().build().unwrap() - } - - async fn collect(mut rx: crate::sync::mpsc::Receiver<()>) -> Vec<()> { - let mut ret = vec![]; - - while let Some(v) = rx.recv().await { - ret.push(v); - } - - ret - } -} diff --git a/third_party/rust/tokio-0.2.25/src/signal/unix.rs b/third_party/rust/tokio-0.2.25/src/signal/unix.rs deleted file mode 100644 index b46b15c99a66..000000000000 --- a/third_party/rust/tokio-0.2.25/src/signal/unix.rs +++ /dev/null @@ -1,513 +0,0 @@ -//! Unix-specific types for signal handling. -//! -//! This module is only defined on Unix platforms and contains the primary -//! `Signal` type for receiving notifications of signals. - -#![cfg(unix)] - -use crate::io::{AsyncRead, PollEvented}; -use crate::signal::registry::{globals, EventId, EventInfo, Globals, Init, Storage}; -use crate::sync::mpsc::{channel, Receiver}; - -use libc::c_int; -use mio_uds::UnixStream; -use std::io::{self, Error, ErrorKind, Write}; -use std::pin::Pin; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Once; -use std::task::{Context, Poll}; - -pub(crate) type OsStorage = Vec; - -// Number of different unix signals -// (FreeBSD has 33) -const SIGNUM: usize = 33; - -impl Init for OsStorage { - fn init() -> Self { - (0..SIGNUM).map(|_| SignalInfo::default()).collect() - } -} - -impl Storage for OsStorage { - fn event_info(&self, id: EventId) -> Option<&EventInfo> { - self.get(id).map(|si| &si.event_info) - } - - fn for_each<'a, F>(&'a self, f: F) - where - F: FnMut(&'a EventInfo), - { - self.iter().map(|si| &si.event_info).for_each(f) - } -} - -#[derive(Debug)] -pub(crate) struct OsExtraData { - sender: UnixStream, - receiver: UnixStream, -} - -impl Init for OsExtraData { - fn init() -> Self { - let (receiver, sender) = UnixStream::pair().expect("failed to create UnixStream"); - - Self { sender, receiver } - } -} - -/// Represents the specific kind of signal to listen for. -#[derive(Debug, Clone, Copy)] -pub struct SignalKind(c_int); - -impl SignalKind { - /// Allows for listening to any valid OS signal. - /// - /// For example, this can be used for listening for platform-specific - /// signals. - /// ```rust,no_run - /// # use tokio::signal::unix::SignalKind; - /// # let signum = -1; - /// // let signum = libc::OS_SPECIFIC_SIGNAL; - /// let kind = SignalKind::from_raw(signum); - /// ``` - pub fn from_raw(signum: c_int) -> Self { - Self(signum) - } - - /// Represents the SIGALRM signal. - /// - /// On Unix systems this signal is sent when a real-time timer has expired. - /// By default, the process is terminated by this signal. - pub fn alarm() -> Self { - Self(libc::SIGALRM) - } - - /// Represents the SIGCHLD signal. - /// - /// On Unix systems this signal is sent when the status of a child process - /// has changed. By default, this signal is ignored. - pub fn child() -> Self { - Self(libc::SIGCHLD) - } - - /// Represents the SIGHUP signal. - /// - /// On Unix systems this signal is sent when the terminal is disconnected. - /// By default, the process is terminated by this signal. - pub fn hangup() -> Self { - Self(libc::SIGHUP) - } - - /// Represents the SIGINFO signal. - /// - /// On Unix systems this signal is sent to request a status update from the - /// process. By default, this signal is ignored. - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd" - ))] - pub fn info() -> Self { - Self(libc::SIGINFO) - } - - /// Represents the SIGINT signal. - /// - /// On Unix systems this signal is sent to interrupt a program. - /// By default, the process is terminated by this signal. - pub fn interrupt() -> Self { - Self(libc::SIGINT) - } - - /// Represents the SIGIO signal. - /// - /// On Unix systems this signal is sent when I/O operations are possible - /// on some file descriptor. By default, this signal is ignored. - pub fn io() -> Self { - Self(libc::SIGIO) - } - - /// Represents the SIGPIPE signal. - /// - /// On Unix systems this signal is sent when the process attempts to write - /// to a pipe which has no reader. By default, the process is terminated by - /// this signal. - pub fn pipe() -> Self { - Self(libc::SIGPIPE) - } - - /// Represents the SIGQUIT signal. - /// - /// On Unix systems this signal is sent to issue a shutdown of the - /// process, after which the OS will dump the process core. - /// By default, the process is terminated by this signal. - pub fn quit() -> Self { - Self(libc::SIGQUIT) - } - - /// Represents the SIGTERM signal. - /// - /// On Unix systems this signal is sent to issue a shutdown of the - /// process. By default, the process is terminated by this signal. - pub fn terminate() -> Self { - Self(libc::SIGTERM) - } - - /// Represents the SIGUSR1 signal. - /// - /// On Unix systems this is a user defined signal. - /// By default, the process is terminated by this signal. - pub fn user_defined1() -> Self { - Self(libc::SIGUSR1) - } - - /// Represents the SIGUSR2 signal. - /// - /// On Unix systems this is a user defined signal. - /// By default, the process is terminated by this signal. - pub fn user_defined2() -> Self { - Self(libc::SIGUSR2) - } - - /// Represents the SIGWINCH signal. - /// - /// On Unix systems this signal is sent when the terminal window is resized. - /// By default, this signal is ignored. - pub fn window_change() -> Self { - Self(libc::SIGWINCH) - } -} - -pub(crate) struct SignalInfo { - event_info: EventInfo, - init: Once, - initialized: AtomicBool, -} - -impl Default for SignalInfo { - fn default() -> SignalInfo { - SignalInfo { - event_info: Default::default(), - init: Once::new(), - initialized: AtomicBool::new(false), - } - } -} - -/// Our global signal handler for all signals registered by this module. -/// -/// The purpose of this signal handler is to primarily: -/// -/// 1. Flag that our specific signal was received (e.g. store an atomic flag) -/// 2. Wake up driver tasks by writing a byte to a pipe -/// -/// Those two operations shoudl both be async-signal safe. -fn action(globals: Pin<&'static Globals>, signal: c_int) { - globals.record_event(signal as EventId); - - // Send a wakeup, ignore any errors (anything reasonably possible is - // full pipe and then it will wake up anyway). - let mut sender = &globals.sender; - drop(sender.write(&[1])); -} - -/// Enables this module to receive signal notifications for the `signal` -/// provided. -/// -/// This will register the signal handler if it hasn't already been registered, -/// returning any error along the way if that fails. -fn signal_enable(signal: c_int) -> io::Result<()> { - if signal < 0 || signal_hook_registry::FORBIDDEN.contains(&signal) { - return Err(Error::new( - ErrorKind::Other, - format!("Refusing to register signal {}", signal), - )); - } - - let globals = globals(); - let siginfo = match globals.storage().get(signal as EventId) { - Some(slot) => slot, - None => return Err(io::Error::new(io::ErrorKind::Other, "signal too large")), - }; - let mut registered = Ok(()); - siginfo.init.call_once(|| { - registered = unsafe { - signal_hook_registry::register(signal, move || action(globals, signal)).map(|_| ()) - }; - if registered.is_ok() { - siginfo.initialized.store(true, Ordering::Relaxed); - } - }); - registered?; - // If the call_once failed, it won't be retried on the next attempt to register the signal. In - // such case it is not run, registered is still `Ok(())`, initialized is still `false`. - if siginfo.initialized.load(Ordering::Relaxed) { - Ok(()) - } else { - Err(Error::new( - ErrorKind::Other, - "Failed to register signal handler", - )) - } -} - -#[derive(Debug)] -struct Driver { - wakeup: PollEvented, -} - -impl Driver { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll<()> { - // Drain the data from the pipe and maintain interest in getting more - self.drain(cx); - // Broadcast any signals which were received - globals().broadcast(); - - Poll::Pending - } -} - -impl Driver { - fn new() -> io::Result { - // NB: We give each driver a "fresh" reciever file descriptor to avoid - // the issues described in alexcrichton/tokio-process#42. - // - // In the past we would reuse the actual receiver file descriptor and - // swallow any errors around double registration of the same descriptor. - // I'm not sure if the second (failed) registration simply doesn't end up - // receiving wake up notifications, or there could be some race condition - // when consuming readiness events, but having distinct descriptors for - // distinct PollEvented instances appears to mitigate this. - // - // Unfortunately we cannot just use a single global PollEvented instance - // either, since we can't compare Handles or assume they will always - // point to the exact same reactor. - let stream = globals().receiver.try_clone()?; - let wakeup = PollEvented::new(stream)?; - - Ok(Driver { wakeup }) - } - - /// Drain all data in the global receiver, ensuring we'll get woken up when - /// there is a write on the other end. - /// - /// We do *NOT* use the existence of any read bytes as evidence a signal was - /// received since the `pending` flags would have already been set if that - /// was the case. See - /// [#38](https://github.com/alexcrichton/tokio-signal/issues/38) for more - /// info. - fn drain(&mut self, cx: &mut Context<'_>) { - loop { - match Pin::new(&mut self.wakeup).poll_read(cx, &mut [0; 128]) { - Poll::Ready(Ok(0)) => panic!("EOF on self-pipe"), - Poll::Ready(Ok(_)) => {} - Poll::Ready(Err(e)) => panic!("Bad read on self-pipe: {}", e), - Poll::Pending => break, - } - } - } -} - -/// A stream of events for receiving a particular type of OS signal. -/// -/// In general signal handling on Unix is a pretty tricky topic, and this -/// structure is no exception! There are some important limitations to keep in -/// mind when using `Signal` streams: -/// -/// * Signals handling in Unix already necessitates coalescing signals -/// together sometimes. This `Signal` stream is also no exception here in -/// that it will also coalesce signals. That is, even if the signal handler -/// for this process runs multiple times, the `Signal` stream may only return -/// one signal notification. Specifically, before `poll` is called, all -/// signal notifications are coalesced into one item returned from `poll`. -/// Once `poll` has been called, however, a further signal is guaranteed to -/// be yielded as an item. -/// -/// Put another way, any element pulled off the returned stream corresponds to -/// *at least one* signal, but possibly more. -/// -/// * Signal handling in general is relatively inefficient. Although some -/// improvements are possible in this crate, it's recommended to not plan on -/// having millions of signal channels open. -/// -/// If you've got any questions about this feel free to open an issue on the -/// repo! New approaches to alleviate some of these limitations are always -/// appreciated! -/// -/// # Caveats -/// -/// The first time that a `Signal` instance is registered for a particular -/// signal kind, an OS signal-handler is installed which replaces the default -/// platform behavior when that signal is received, **for the duration of the -/// entire process**. -/// -/// For example, Unix systems will terminate a process by default when it -/// receives SIGINT. But, when a `Signal` instance is created to listen for -/// this signal, the next SIGINT that arrives will be translated to a stream -/// event, and the process will continue to execute. **Even if this `Signal` -/// instance is dropped, subsequent SIGINT deliveries will end up captured by -/// Tokio, and the default platform behavior will NOT be reset**. -/// -/// Thus, applications should take care to ensure the expected signal behavior -/// occurs as expected after listening for specific signals. -/// -/// # Examples -/// -/// Wait for SIGHUP -/// -/// ```rust,no_run -/// use tokio::signal::unix::{signal, SignalKind}; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// // An infinite stream of hangup signals. -/// let mut stream = signal(SignalKind::hangup())?; -/// -/// // Print whenever a HUP signal is received -/// loop { -/// stream.recv().await; -/// println!("got signal HUP"); -/// } -/// } -/// ``` -#[must_use = "streams do nothing unless polled"] -#[derive(Debug)] -pub struct Signal { - driver: Driver, - rx: Receiver<()>, -} - -/// Creates a new stream which will receive notifications when the current -/// process receives the specified signal `kind`. -/// -/// This function will create a new stream which binds to the default reactor. -/// The `Signal` stream is an infinite stream which will receive -/// notifications whenever a signal is received. More documentation can be -/// found on `Signal` itself, but to reiterate: -/// -/// * Signals may be coalesced beyond what the kernel already does. -/// * Once a signal handler is registered with the process the underlying -/// libc signal handler is never unregistered. -/// -/// A `Signal` stream can be created for a particular signal number -/// multiple times. When a signal is received then all the associated -/// channels will receive the signal notification. -/// -/// # Errors -/// -/// * If the lower-level C functions fail for some reason. -/// * If the previous initialization of this specific signal failed. -/// * If the signal is one of -/// [`signal_hook::FORBIDDEN`](fn@signal_hook_registry::register#panics) -pub fn signal(kind: SignalKind) -> io::Result { - let signal = kind.0; - - // Turn the signal delivery on once we are ready for it - signal_enable(signal)?; - - // Ensure there's a driver for our associated event loop processing - // signals. - let driver = Driver::new()?; - - // One wakeup in a queue is enough, no need for us to buffer up any - // more. - let (tx, rx) = channel(1); - globals().register_listener(signal as EventId, tx); - - Ok(Signal { driver, rx }) -} - -impl Signal { - /// Receives the next signal notification event. - /// - /// `None` is returned if no more events can be received by this stream. - /// - /// # Examples - /// - /// Wait for SIGHUP - /// - /// ```rust,no_run - /// use tokio::signal::unix::{signal, SignalKind}; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // An infinite stream of hangup signals. - /// let mut stream = signal(SignalKind::hangup())?; - /// - /// // Print whenever a HUP signal is received - /// loop { - /// stream.recv().await; - /// println!("got signal HUP"); - /// } - /// } - /// ``` - pub async fn recv(&mut self) -> Option<()> { - use crate::future::poll_fn; - poll_fn(|cx| self.poll_recv(cx)).await - } - - /// Polls to receive the next signal notification event, outside of an - /// `async` context. - /// - /// `None` is returned if no more events can be received by this stream. - /// - /// # Examples - /// - /// Polling from a manually implemented future - /// - /// ```rust,no_run - /// use std::pin::Pin; - /// use std::future::Future; - /// use std::task::{Context, Poll}; - /// use tokio::signal::unix::Signal; - /// - /// struct MyFuture { - /// signal: Signal, - /// } - /// - /// impl Future for MyFuture { - /// type Output = Option<()>; - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// println!("polling MyFuture"); - /// self.signal.poll_recv(cx) - /// } - /// } - /// ``` - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - let _ = self.driver.poll(cx); - self.rx.poll_recv(cx) - } -} - -cfg_stream! { - impl crate::stream::Stream for Signal { - type Item = (); - - fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_recv(cx) - } - } -} - -pub(crate) fn ctrl_c() -> io::Result { - signal(SignalKind::interrupt()) -} - -#[cfg(all(test, not(loom)))] -mod tests { - use super::*; - - #[test] - fn signal_enable_error_on_invalid_input() { - signal_enable(-1).unwrap_err(); - } - - #[test] - fn signal_enable_error_on_forbidden_input() { - signal_enable(signal_hook_registry::FORBIDDEN[0]).unwrap_err(); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/signal/windows.rs b/third_party/rust/tokio-0.2.25/src/signal/windows.rs deleted file mode 100644 index f55e504b00f4..000000000000 --- a/third_party/rust/tokio-0.2.25/src/signal/windows.rs +++ /dev/null @@ -1,297 +0,0 @@ -//! Windows-specific types for signal handling. -//! -//! This module is only defined on Windows and contains the primary `Event` type -//! for receiving notifications of events. These events are listened for via the -//! `SetConsoleCtrlHandler` function which receives events of the type -//! `CTRL_C_EVENT` and `CTRL_BREAK_EVENT` - -#![cfg(windows)] - -use crate::signal::registry::{globals, EventId, EventInfo, Init, Storage}; -use crate::sync::mpsc::{channel, Receiver}; - -use std::convert::TryFrom; -use std::io; -use std::sync::Once; -use std::task::{Context, Poll}; -use winapi::shared::minwindef::*; -use winapi::um::consoleapi::SetConsoleCtrlHandler; -use winapi::um::wincon::*; - -#[derive(Debug)] -pub(crate) struct OsStorage { - ctrl_c: EventInfo, - ctrl_break: EventInfo, -} - -impl Init for OsStorage { - fn init() -> Self { - Self { - ctrl_c: EventInfo::default(), - ctrl_break: EventInfo::default(), - } - } -} - -impl Storage for OsStorage { - fn event_info(&self, id: EventId) -> Option<&EventInfo> { - match DWORD::try_from(id) { - Ok(CTRL_C_EVENT) => Some(&self.ctrl_c), - Ok(CTRL_BREAK_EVENT) => Some(&self.ctrl_break), - _ => None, - } - } - - fn for_each<'a, F>(&'a self, mut f: F) - where - F: FnMut(&'a EventInfo), - { - f(&self.ctrl_c); - f(&self.ctrl_break); - } -} - -#[derive(Debug)] -pub(crate) struct OsExtraData {} - -impl Init for OsExtraData { - fn init() -> Self { - Self {} - } -} - -/// Stream of events discovered via `SetConsoleCtrlHandler`. -/// -/// This structure can be used to listen for events of the type `CTRL_C_EVENT` -/// and `CTRL_BREAK_EVENT`. The `Stream` trait is implemented for this struct -/// and will resolve for each notification received by the process. Note that -/// there are few limitations with this as well: -/// -/// * A notification to this process notifies *all* `Event` streams for that -/// event type. -/// * Notifications to an `Event` stream **are coalesced** if they aren't -/// processed quickly enough. This means that if two notifications are -/// received back-to-back, then the stream may only receive one item about the -/// two notifications. -#[must_use = "streams do nothing unless polled"] -#[derive(Debug)] -pub(crate) struct Event { - rx: Receiver<()>, -} - -pub(crate) fn ctrl_c() -> io::Result { - Event::new(CTRL_C_EVENT) -} - -impl Event { - fn new(signum: DWORD) -> io::Result { - global_init()?; - - let (tx, rx) = channel(1); - globals().register_listener(signum as EventId, tx); - - Ok(Event { rx }) - } - - pub(crate) async fn recv(&mut self) -> Option<()> { - use crate::future::poll_fn; - poll_fn(|cx| self.rx.poll_recv(cx)).await - } -} - -fn global_init() -> io::Result<()> { - static INIT: Once = Once::new(); - - let mut init = None; - - INIT.call_once(|| unsafe { - let rc = SetConsoleCtrlHandler(Some(handler), TRUE); - let ret = if rc == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(()) - }; - - init = Some(ret); - }); - - init.unwrap_or_else(|| Ok(())) -} - -unsafe extern "system" fn handler(ty: DWORD) -> BOOL { - let globals = globals(); - globals.record_event(ty as EventId); - - // According to https://docs.microsoft.com/en-us/windows/console/handlerroutine - // the handler routine is always invoked in a new thread, thus we don't - // have the same restrictions as in Unix signal handlers, meaning we can - // go ahead and perform the broadcast here. - if globals.broadcast() { - TRUE - } else { - // No one is listening for this notification any more - // let the OS fire the next (possibly the default) handler. - FALSE - } -} - -/// Represents a stream which receives "ctrl-break" notifications sent to the process -/// via `SetConsoleCtrlHandler`. -/// -/// A notification to this process notifies *all* streams listening for -/// this event. Moreover, the notifications **are coalesced** if they aren't processed -/// quickly enough. This means that if two notifications are received back-to-back, -/// then the stream may only receive one item about the two notifications. -#[must_use = "streams do nothing unless polled"] -#[derive(Debug)] -pub struct CtrlBreak { - inner: Event, -} - -impl CtrlBreak { - /// Receives the next signal notification event. - /// - /// `None` is returned if no more events can be received by this stream. - /// - /// # Examples - /// - /// ```rust,no_run - /// use tokio::signal::windows::ctrl_break; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // An infinite stream of CTRL-BREAK events. - /// let mut stream = ctrl_break()?; - /// - /// // Print whenever a CTRL-BREAK event is received - /// loop { - /// stream.recv().await; - /// println!("got signal CTRL-BREAK"); - /// } - /// } - /// ``` - pub async fn recv(&mut self) -> Option<()> { - use crate::future::poll_fn; - poll_fn(|cx| self.poll_recv(cx)).await - } - - /// Polls to receive the next signal notification event, outside of an - /// `async` context. - /// - /// `None` is returned if no more events can be received by this stream. - /// - /// # Examples - /// - /// Polling from a manually implemented future - /// - /// ```rust,no_run - /// use std::pin::Pin; - /// use std::future::Future; - /// use std::task::{Context, Poll}; - /// use tokio::signal::windows::CtrlBreak; - /// - /// struct MyFuture { - /// ctrl_break: CtrlBreak, - /// } - /// - /// impl Future for MyFuture { - /// type Output = Option<()>; - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// println!("polling MyFuture"); - /// self.ctrl_break.poll_recv(cx) - /// } - /// } - /// ``` - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.rx.poll_recv(cx) - } -} - -cfg_stream! { - impl crate::stream::Stream for CtrlBreak { - type Item = (); - - fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_recv(cx) - } - } -} - -/// Creates a new stream which receives "ctrl-break" notifications sent to the -/// process. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::signal::windows::ctrl_break; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// // An infinite stream of CTRL-BREAK events. -/// let mut stream = ctrl_break()?; -/// -/// // Print whenever a CTRL-BREAK event is received -/// loop { -/// stream.recv().await; -/// println!("got signal CTRL-BREAK"); -/// } -/// } -/// ``` -pub fn ctrl_break() -> io::Result { - Event::new(CTRL_BREAK_EVENT).map(|inner| CtrlBreak { inner }) -} - -#[cfg(all(test, not(loom)))] -mod tests { - use super::*; - use crate::runtime::Runtime; - use crate::stream::StreamExt; - - use tokio_test::{assert_ok, assert_pending, assert_ready_ok, task}; - - #[test] - fn ctrl_c() { - let rt = rt(); - - rt.enter(|| { - let mut ctrl_c = task::spawn(crate::signal::ctrl_c()); - - assert_pending!(ctrl_c.poll()); - - // Windows doesn't have a good programmatic way of sending events - // like sending signals on Unix, so we'll stub out the actual OS - // integration and test that our handling works. - unsafe { - super::handler(CTRL_C_EVENT); - } - - assert_ready_ok!(ctrl_c.poll()); - }); - } - - #[test] - fn ctrl_break() { - let mut rt = rt(); - - rt.block_on(async { - let mut ctrl_break = assert_ok!(super::ctrl_break()); - - // Windows doesn't have a good programmatic way of sending events - // like sending signals on Unix, so we'll stub out the actual OS - // integration and test that our handling works. - unsafe { - super::handler(CTRL_BREAK_EVENT); - } - - ctrl_break.next().await.unwrap(); - }); - } - - fn rt() -> Runtime { - crate::runtime::Builder::new() - .basic_scheduler() - .build() - .unwrap() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/stream/all.rs b/third_party/rust/tokio-0.2.25/src/stream/all.rs deleted file mode 100644 index 615665d27051..000000000000 --- a/third_party/rust/tokio-0.2.25/src/stream/all.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::stream::Stream; - -use core::future::Future; -use core::pin::Pin; -use core::task::{Context, Poll}; - -/// Future for the [`all`](super::StreamExt::all) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct AllFuture<'a, St: ?Sized, F> { - stream: &'a mut St, - f: F, -} - -impl<'a, St: ?Sized, F> AllFuture<'a, St, F> { - pub(super) fn new(stream: &'a mut St, f: F) -> Self { - Self { stream, f } - } -} - -impl Unpin for AllFuture<'_, St, F> {} - -impl Future for AllFuture<'_, St, F> -where - St: ?Sized + Stream + Unpin, - F: FnMut(St::Item) -> bool, -{ - type Output = bool; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let next = futures_core::ready!(Pin::new(&mut self.stream).poll_next(cx)); - - match next { - Some(v) => { - if !(&mut self.f)(v) { - Poll::Ready(false) - } else { - cx.waker().wake_by_ref(); - Poll::Pending - } - } - None => Poll::Ready(true), - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/stream/any.rs b/third_party/rust/tokio-0.2.25/src/stream/any.rs deleted file mode 100644 index f2ecad5edb13..000000000000 --- a/third_party/rust/tokio-0.2.25/src/stream/any.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::stream::Stream; - -use core::future::Future; -use core::pin::Pin; -use core::task::{Context, Poll}; - -/// Future for the [`any`](super::StreamExt::any) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct AnyFuture<'a, St: ?Sized, F> { - stream: &'a mut St, - f: F, -} - -impl<'a, St: ?Sized, F> AnyFuture<'a, St, F> { - pub(super) fn new(stream: &'a mut St, f: F) -> Self { - Self { stream, f } - } -} - -impl Unpin for AnyFuture<'_, St, F> {} - -impl Future for AnyFuture<'_, St, F> -where - St: ?Sized + Stream + Unpin, - F: FnMut(St::Item) -> bool, -{ - type Output = bool; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let next = futures_core::ready!(Pin::new(&mut self.stream).poll_next(cx)); - - match next { - Some(v) => { - if (&mut self.f)(v) { - Poll::Ready(true) - } else { - cx.waker().wake_by_ref(); - Poll::Pending - } - } - None => Poll::Ready(false), - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/stream/collect.rs b/third_party/rust/tokio-0.2.25/src/stream/collect.rs deleted file mode 100644 index 46494287cd89..000000000000 --- a/third_party/rust/tokio-0.2.25/src/stream/collect.rs +++ /dev/null @@ -1,246 +0,0 @@ -use crate::stream::Stream; - -use bytes::{Buf, BufMut, Bytes, BytesMut}; -use core::future::Future; -use core::mem; -use core::pin::Pin; -use core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -// Do not export this struct until `FromStream` can be unsealed. -pin_project! { - /// Future returned by the [`collect`](super::StreamExt::collect) method. - #[must_use = "streams do nothing unless polled"] - #[derive(Debug)] - pub struct Collect - where - T: Stream, - U: FromStream, - { - #[pin] - stream: T, - collection: U::Collection, - } -} - -/// Convert from a [`Stream`](crate::stream::Stream). -/// -/// This trait is not intended to be used directly. Instead, call -/// [`StreamExt::collect()`](super::StreamExt::collect). -/// -/// # Implementing -/// -/// Currently, this trait may not be implemented by third parties. The trait is -/// sealed in order to make changes in the future. Stabilization is pending -/// enhancements to the Rust language. -pub trait FromStream: sealed::FromStreamPriv {} - -impl Collect -where - T: Stream, - U: FromStream, -{ - pub(super) fn new(stream: T) -> Collect { - let (lower, upper) = stream.size_hint(); - let collection = U::initialize(lower, upper); - - Collect { stream, collection } - } -} - -impl Future for Collect -where - T: Stream, - U: FromStream, -{ - type Output = U; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - use Poll::Ready; - - loop { - let mut me = self.as_mut().project(); - - let item = match ready!(me.stream.poll_next(cx)) { - Some(item) => item, - None => { - return Ready(U::finalize(&mut me.collection)); - } - }; - - if !U::extend(&mut me.collection, item) { - return Ready(U::finalize(&mut me.collection)); - } - } - } -} - -// ===== FromStream implementations - -impl FromStream<()> for () {} - -impl sealed::FromStreamPriv<()> for () { - type Collection = (); - - fn initialize(_lower: usize, _upper: Option) {} - - fn extend(_collection: &mut (), _item: ()) -> bool { - true - } - - fn finalize(_collection: &mut ()) {} -} - -impl> FromStream for String {} - -impl> sealed::FromStreamPriv for String { - type Collection = String; - - fn initialize(_lower: usize, _upper: Option) -> String { - String::new() - } - - fn extend(collection: &mut String, item: T) -> bool { - collection.push_str(item.as_ref()); - true - } - - fn finalize(collection: &mut String) -> String { - mem::replace(collection, String::new()) - } -} - -impl FromStream for Vec {} - -impl sealed::FromStreamPriv for Vec { - type Collection = Vec; - - fn initialize(lower: usize, _upper: Option) -> Vec { - Vec::with_capacity(lower) - } - - fn extend(collection: &mut Vec, item: T) -> bool { - collection.push(item); - true - } - - fn finalize(collection: &mut Vec) -> Vec { - mem::replace(collection, vec![]) - } -} - -impl FromStream for Box<[T]> {} - -impl sealed::FromStreamPriv for Box<[T]> { - type Collection = Vec; - - fn initialize(lower: usize, upper: Option) -> Vec { - as sealed::FromStreamPriv>::initialize(lower, upper) - } - - fn extend(collection: &mut Vec, item: T) -> bool { - as sealed::FromStreamPriv>::extend(collection, item) - } - - fn finalize(collection: &mut Vec) -> Box<[T]> { - as sealed::FromStreamPriv>::finalize(collection).into_boxed_slice() - } -} - -impl FromStream> for Result where U: FromStream {} - -impl sealed::FromStreamPriv> for Result -where - U: FromStream, -{ - type Collection = Result; - - fn initialize(lower: usize, upper: Option) -> Result { - Ok(U::initialize(lower, upper)) - } - - fn extend(collection: &mut Self::Collection, item: Result) -> bool { - assert!(collection.is_ok()); - match item { - Ok(item) => { - let collection = collection.as_mut().ok().expect("invalid state"); - U::extend(collection, item) - } - Err(err) => { - *collection = Err(err); - false - } - } - } - - fn finalize(collection: &mut Self::Collection) -> Result { - if let Ok(collection) = collection.as_mut() { - Ok(U::finalize(collection)) - } else { - let res = mem::replace(collection, Ok(U::initialize(0, Some(0)))); - - if let Err(err) = res { - Err(err) - } else { - unreachable!(); - } - } - } -} - -impl FromStream for Bytes {} - -impl sealed::FromStreamPriv for Bytes { - type Collection = BytesMut; - - fn initialize(_lower: usize, _upper: Option) -> BytesMut { - BytesMut::new() - } - - fn extend(collection: &mut BytesMut, item: T) -> bool { - collection.put(item); - true - } - - fn finalize(collection: &mut BytesMut) -> Bytes { - mem::replace(collection, BytesMut::new()).freeze() - } -} - -impl FromStream for BytesMut {} - -impl sealed::FromStreamPriv for BytesMut { - type Collection = BytesMut; - - fn initialize(_lower: usize, _upper: Option) -> BytesMut { - BytesMut::new() - } - - fn extend(collection: &mut BytesMut, item: T) -> bool { - collection.put(item); - true - } - - fn finalize(collection: &mut BytesMut) -> BytesMut { - mem::replace(collection, BytesMut::new()) - } -} - -pub(crate) mod sealed { - #[doc(hidden)] - pub trait FromStreamPriv { - /// Intermediate type used during collection process - type Collection; - - /// Initialize the collection - fn initialize(lower: usize, upper: Option) -> Self::Collection; - - /// Extend the collection with the received item - /// - /// Return `true` to continue streaming, `false` complete collection. - fn extend(collection: &mut Self::Collection, item: T) -> bool; - - /// Finalize collection into target type. - fn finalize(collection: &mut Self::Collection) -> Self; - } -} diff --git a/third_party/rust/tokio-0.2.25/src/stream/next.rs b/third_party/rust/tokio-0.2.25/src/stream/next.rs deleted file mode 100644 index 3909c0c23353..000000000000 --- a/third_party/rust/tokio-0.2.25/src/stream/next.rs +++ /dev/null @@ -1,28 +0,0 @@ -use crate::stream::Stream; - -use core::future::Future; -use core::pin::Pin; -use core::task::{Context, Poll}; - -/// Future for the [`next`](super::StreamExt::next) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Next<'a, St: ?Sized> { - stream: &'a mut St, -} - -impl Unpin for Next<'_, St> {} - -impl<'a, St: ?Sized> Next<'a, St> { - pub(super) fn new(stream: &'a mut St) -> Self { - Next { stream } - } -} - -impl Future for Next<'_, St> { - type Output = Option; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.stream).poll_next(cx) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/stream/timeout.rs b/third_party/rust/tokio-0.2.25/src/stream/timeout.rs deleted file mode 100644 index b8a2024f6a1e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/stream/timeout.rs +++ /dev/null @@ -1,65 +0,0 @@ -use crate::stream::{Fuse, Stream}; -use crate::time::{Delay, Elapsed, Instant}; - -use core::future::Future; -use core::pin::Pin; -use core::task::{Context, Poll}; -use pin_project_lite::pin_project; -use std::time::Duration; - -pin_project! { - /// Stream returned by the [`timeout`](super::StreamExt::timeout) method. - #[must_use = "streams do nothing unless polled"] - #[derive(Debug)] - pub struct Timeout { - #[pin] - stream: Fuse, - deadline: Delay, - duration: Duration, - poll_deadline: bool, - } -} - -impl Timeout { - pub(super) fn new(stream: S, duration: Duration) -> Self { - let next = Instant::now() + duration; - let deadline = Delay::new_timeout(next, duration); - - Timeout { - stream: Fuse::new(stream), - deadline, - duration, - poll_deadline: true, - } - } -} - -impl Stream for Timeout { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.as_mut().project().stream.poll_next(cx) { - Poll::Ready(v) => { - if v.is_some() { - let next = Instant::now() + self.duration; - self.as_mut().project().deadline.reset(next); - *self.as_mut().project().poll_deadline = true; - } - return Poll::Ready(v.map(Ok)); - } - Poll::Pending => {} - }; - - if self.poll_deadline { - ready!(Pin::new(self.as_mut().project().deadline).poll(cx)); - *self.as_mut().project().poll_deadline = false; - return Poll::Ready(Some(Err(Elapsed::new()))); - } - - Poll::Pending - } - - fn size_hint(&self) -> (usize, Option) { - self.stream.size_hint() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/stream/try_next.rs b/third_party/rust/tokio-0.2.25/src/stream/try_next.rs deleted file mode 100644 index 59e0eb1a4142..000000000000 --- a/third_party/rust/tokio-0.2.25/src/stream/try_next.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::stream::{Next, Stream}; - -use core::future::Future; -use core::pin::Pin; -use core::task::{Context, Poll}; - -/// Future for the [`try_next`](super::StreamExt::try_next) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct TryNext<'a, St: ?Sized> { - inner: Next<'a, St>, -} - -impl Unpin for TryNext<'_, St> {} - -impl<'a, St: ?Sized> TryNext<'a, St> { - pub(super) fn new(stream: &'a mut St) -> Self { - Self { - inner: Next::new(stream), - } - } -} - -impl> + Unpin> Future for TryNext<'_, St> { - type Output = Result, E>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx).map(Option::transpose) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/barrier.rs b/third_party/rust/tokio-0.2.25/src/sync/barrier.rs deleted file mode 100644 index 628633493a9b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/barrier.rs +++ /dev/null @@ -1,136 +0,0 @@ -use crate::sync::watch; - -use std::sync::Mutex; - -/// A barrier enables multiple threads to synchronize the beginning of some computation. -/// -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use tokio::sync::Barrier; -/// -/// use futures::future::join_all; -/// use std::sync::Arc; -/// -/// let mut handles = Vec::with_capacity(10); -/// let barrier = Arc::new(Barrier::new(10)); -/// for _ in 0..10 { -/// let c = barrier.clone(); -/// // The same messages will be printed together. -/// // You will NOT see any interleaving. -/// handles.push(async move { -/// println!("before wait"); -/// let wr = c.wait().await; -/// println!("after wait"); -/// wr -/// }); -/// } -/// // Will not resolve until all "before wait" messages have been printed -/// let wrs = join_all(handles).await; -/// // Exactly one barrier will resolve as the "leader" -/// assert_eq!(wrs.into_iter().filter(|wr| wr.is_leader()).count(), 1); -/// # } -/// ``` -#[derive(Debug)] -pub struct Barrier { - state: Mutex, - wait: watch::Receiver, - n: usize, -} - -#[derive(Debug)] -struct BarrierState { - waker: watch::Sender, - arrived: usize, - generation: usize, -} - -impl Barrier { - /// Creates a new barrier that can block a given number of threads. - /// - /// A barrier will block `n`-1 threads which call [`Barrier::wait`] and then wake up all - /// threads at once when the `n`th thread calls `wait`. - pub fn new(mut n: usize) -> Barrier { - let (waker, wait) = crate::sync::watch::channel(0); - - if n == 0 { - // if n is 0, it's not clear what behavior the user wants. - // in std::sync::Barrier, an n of 0 exhibits the same behavior as n == 1, where every - // .wait() immediately unblocks, so we adopt that here as well. - n = 1; - } - - Barrier { - state: Mutex::new(BarrierState { - waker, - arrived: 0, - generation: 1, - }), - n, - wait, - } - } - - /// Does not resolve until all tasks have rendezvoused here. - /// - /// Barriers are re-usable after all threads have rendezvoused once, and can - /// be used continuously. - /// - /// A single (arbitrary) future will receive a [`BarrierWaitResult`] that returns `true` from - /// [`BarrierWaitResult::is_leader`] when returning from this function, and all other threads - /// will receive a result that will return `false` from `is_leader`. - pub async fn wait(&self) -> BarrierWaitResult { - // NOTE: we are taking a _synchronous_ lock here. - // It is okay to do so because the critical section is fast and never yields, so it cannot - // deadlock even if another future is concurrently holding the lock. - // It is _desireable_ to do so as synchronous Mutexes are, at least in theory, faster than - // the asynchronous counter-parts, so we should use them where possible [citation needed]. - // NOTE: the extra scope here is so that the compiler doesn't think `state` is held across - // a yield point, and thus marks the returned future as !Send. - let generation = { - let mut state = self.state.lock().unwrap(); - let generation = state.generation; - state.arrived += 1; - if state.arrived == self.n { - // we are the leader for this generation - // wake everyone, increment the generation, and return - state - .waker - .broadcast(state.generation) - .expect("there is at least one receiver"); - state.arrived = 0; - state.generation += 1; - return BarrierWaitResult(true); - } - - generation - }; - - // we're going to have to wait for the last of the generation to arrive - let mut wait = self.wait.clone(); - - loop { - // note that the first time through the loop, this _will_ yield a generation - // immediately, since we cloned a receiver that has never seen any values. - if wait.recv().await.expect("sender hasn't been closed") >= generation { - break; - } - } - - BarrierWaitResult(false) - } -} - -/// A `BarrierWaitResult` is returned by `wait` when all threads in the `Barrier` have rendezvoused. -#[derive(Debug, Clone)] -pub struct BarrierWaitResult(bool); - -impl BarrierWaitResult { - /// Returns `true` if this thread from wait is the "leader thread". - /// - /// Only one thread will have `true` returned from their result, all other threads will have - /// `false` returned. - pub fn is_leader(&self) -> bool { - self.0 - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/batch_semaphore.rs b/third_party/rust/tokio-0.2.25/src/sync/batch_semaphore.rs deleted file mode 100644 index 070cd2010c4a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/batch_semaphore.rs +++ /dev/null @@ -1,561 +0,0 @@ -//! # Implementation Details -//! -//! The semaphore is implemented using an intrusive linked list of waiters. An -//! atomic counter tracks the number of available permits. If the semaphore does -//! not contain the required number of permits, the task attempting to acquire -//! permits places its waker at the end of a queue. When new permits are made -//! available (such as by releasing an initial acquisition), they are assigned -//! to the task at the front of the queue, waking that task if its requested -//! number of permits is met. -//! -//! Because waiters are enqueued at the back of the linked list and dequeued -//! from the front, the semaphore is fair. Tasks trying to acquire large numbers -//! of permits at a time will always be woken eventually, even if many other -//! tasks are acquiring smaller numbers of permits. This means that in a -//! use-case like tokio's read-write lock, writers will not be starved by -//! readers. -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Mutex, MutexGuard}; -use crate::util::linked_list::{self, LinkedList}; - -use std::future::Future; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::*; -use std::task::Poll::*; -use std::task::{Context, Poll, Waker}; -use std::{cmp, fmt}; - -/// An asynchronous counting semaphore which permits waiting on multiple permits at once. -pub(crate) struct Semaphore { - waiters: Mutex, - /// The current number of available permits in the semaphore. - permits: AtomicUsize, -} - -struct Waitlist { - queue: LinkedList, - closed: bool, -} - -/// Error returned by `Semaphore::try_acquire`. -#[derive(Debug)] -pub(crate) enum TryAcquireError { - Closed, - NoPermits, -} -/// Error returned by `Semaphore::acquire`. -#[derive(Debug)] -pub(crate) struct AcquireError(()); - -pub(crate) struct Acquire<'a> { - node: Waiter, - semaphore: &'a Semaphore, - num_permits: u32, - queued: bool, -} - -/// An entry in the wait queue. -struct Waiter { - /// The current state of the waiter. - /// - /// This is either the number of remaining permits required by - /// the waiter, or a flag indicating that the waiter is not yet queued. - state: AtomicUsize, - - /// The waker to notify the task awaiting permits. - /// - /// # Safety - /// - /// This may only be accessed while the wait queue is locked. - waker: UnsafeCell>, - - /// Intrusive linked-list pointers. - /// - /// # Safety - /// - /// This may only be accessed while the wait queue is locked. - /// - /// TODO: Ideally, we would be able to use loom to enforce that - /// this isn't accessed concurrently. However, it is difficult to - /// use a `UnsafeCell` here, since the `Link` trait requires _returning_ - /// references to `Pointers`, and `UnsafeCell` requires that checked access - /// take place inside a closure. We should consider changing `Pointers` to - /// use `UnsafeCell` internally. - pointers: linked_list::Pointers, - - /// Should not be `Unpin`. - _p: PhantomPinned, -} - -impl Semaphore { - /// The maximum number of permits which a semaphore can hold. - /// - /// Note that this reserves three bits of flags in the permit counter, but - /// we only actually use one of them. However, the previous semaphore - /// implementation used three bits, so we will continue to reserve them to - /// avoid a breaking change if additional flags need to be aadded in the - /// future. - pub(crate) const MAX_PERMITS: usize = std::usize::MAX >> 3; - const CLOSED: usize = 1; - const PERMIT_SHIFT: usize = 1; - - /// Creates a new semaphore with the initial number of permits - /// - /// Maximum number of permits on 32-bit platforms is `1<<29`. - pub(crate) fn new(permits: usize) -> Self { - assert!( - permits <= Self::MAX_PERMITS, - "a semaphore may not have more than MAX_PERMITS permits ({})", - Self::MAX_PERMITS - ); - Self { - permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT), - waiters: Mutex::new(Waitlist { - queue: LinkedList::new(), - closed: false, - }), - } - } - - /// Returns the current number of available permits - pub(crate) fn available_permits(&self) -> usize { - self.permits.load(Acquire) >> Self::PERMIT_SHIFT - } - - /// Adds `added` new permits to the semaphore. - /// - /// The maximum number of permits is `usize::MAX >> 3`, and this function will panic if the limit is exceeded. - pub(crate) fn release(&self, added: usize) { - if added == 0 { - return; - } - - // Assign permits to the wait queue - self.add_permits_locked(added, self.waiters.lock().unwrap()); - } - - /// Closes the semaphore. This prevents the semaphore from issuing new - /// permits and notifies all pending waiters. - // This will be used once the bounded MPSC is updated to use the new - // semaphore implementation. - #[allow(dead_code)] - pub(crate) fn close(&self) { - let mut waiters = self.waiters.lock().unwrap(); - // If the semaphore's permits counter has enough permits for an - // unqueued waiter to acquire all the permits it needs immediately, - // it won't touch the wait list. Therefore, we have to set a bit on - // the permit counter as well. However, we must do this while - // holding the lock --- otherwise, if we set the bit and then wait - // to acquire the lock we'll enter an inconsistent state where the - // permit counter is closed, but the wait list is not. - self.permits.fetch_or(Self::CLOSED, Release); - waiters.closed = true; - while let Some(mut waiter) = waiters.queue.pop_back() { - let waker = unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) }; - if let Some(waker) = waker { - waker.wake(); - } - } - } - - pub(crate) fn try_acquire(&self, num_permits: u32) -> Result<(), TryAcquireError> { - assert!( - num_permits as usize <= Self::MAX_PERMITS, - "a semaphore may not have more than MAX_PERMITS permits ({})", - Self::MAX_PERMITS - ); - let num_permits = (num_permits as usize) << Self::PERMIT_SHIFT; - let mut curr = self.permits.load(Acquire); - loop { - // Has the semaphore closed?git - if curr & Self::CLOSED > 0 { - return Err(TryAcquireError::Closed); - } - - // Are there enough permits remaining? - if curr < num_permits { - return Err(TryAcquireError::NoPermits); - } - - let next = curr - num_permits; - - match self.permits.compare_exchange(curr, next, AcqRel, Acquire) { - Ok(_) => return Ok(()), - Err(actual) => curr = actual, - } - } - } - - pub(crate) fn acquire(&self, num_permits: u32) -> Acquire<'_> { - Acquire::new(self, num_permits) - } - - /// Release `rem` permits to the semaphore's wait list, starting from the - /// end of the queue. - /// - /// If `rem` exceeds the number of permits needed by the wait list, the - /// remainder are assigned back to the semaphore. - fn add_permits_locked(&self, mut rem: usize, waiters: MutexGuard<'_, Waitlist>) { - let mut wakers: [Option; 8] = Default::default(); - let mut lock = Some(waiters); - let mut is_empty = false; - while rem > 0 { - let mut waiters = lock.take().unwrap_or_else(|| self.waiters.lock().unwrap()); - 'inner: for slot in &mut wakers[..] { - // Was the waiter assigned enough permits to wake it? - match waiters.queue.last() { - Some(waiter) => { - if !waiter.assign_permits(&mut rem) { - break 'inner; - } - } - None => { - is_empty = true; - // If we assigned permits to all the waiters in the queue, and there are - // still permits left over, assign them back to the semaphore. - break 'inner; - } - }; - let mut waiter = waiters.queue.pop_back().unwrap(); - *slot = unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) }; - } - - if rem > 0 && is_empty { - let permits = rem << Self::PERMIT_SHIFT; - assert!( - permits < Self::MAX_PERMITS, - "cannot add more than MAX_PERMITS permits ({})", - Self::MAX_PERMITS - ); - let prev = self.permits.fetch_add(rem << Self::PERMIT_SHIFT, Release); - assert!( - prev + permits <= Self::MAX_PERMITS, - "number of added permits ({}) would overflow MAX_PERMITS ({})", - rem, - Self::MAX_PERMITS - ); - rem = 0; - } - - drop(waiters); // release the lock - - wakers - .iter_mut() - .filter_map(Option::take) - .for_each(Waker::wake); - } - - assert_eq!(rem, 0); - } - - fn poll_acquire( - &self, - cx: &mut Context<'_>, - num_permits: u32, - node: Pin<&mut Waiter>, - queued: bool, - ) -> Poll> { - let mut acquired = 0; - - let needed = if queued { - node.state.load(Acquire) << Self::PERMIT_SHIFT - } else { - (num_permits as usize) << Self::PERMIT_SHIFT - }; - - let mut lock = None; - // First, try to take the requested number of permits from the - // semaphore. - let mut curr = self.permits.load(Acquire); - let mut waiters = loop { - // Has the semaphore closed? - if curr & Self::CLOSED > 0 { - return Ready(Err(AcquireError::closed())); - } - - let mut remaining = 0; - let total = curr - .checked_add(acquired) - .expect("number of permits must not overflow"); - let (next, acq) = if total >= needed { - let next = curr - (needed - acquired); - (next, needed >> Self::PERMIT_SHIFT) - } else { - remaining = (needed - acquired) - curr; - (0, curr >> Self::PERMIT_SHIFT) - }; - - if remaining > 0 && lock.is_none() { - // No permits were immediately available, so this permit will - // (probably) need to wait. We'll need to acquire a lock on the - // wait queue before continuing. We need to do this _before_ the - // CAS that sets the new value of the semaphore's `permits` - // counter. Otherwise, if we subtract the permits and then - // acquire the lock, we might miss additional permits being - // added while waiting for the lock. - lock = Some(self.waiters.lock().unwrap()); - } - - match self.permits.compare_exchange(curr, next, AcqRel, Acquire) { - Ok(_) => { - acquired += acq; - if remaining == 0 { - if !queued { - return Ready(Ok(())); - } else if lock.is_none() { - break self.waiters.lock().unwrap(); - } - } - break lock.expect("lock must be acquired before waiting"); - } - Err(actual) => curr = actual, - } - }; - - if waiters.closed { - return Ready(Err(AcquireError::closed())); - } - - if node.assign_permits(&mut acquired) { - self.add_permits_locked(acquired, waiters); - return Ready(Ok(())); - } - - assert_eq!(acquired, 0); - - // Otherwise, register the waker & enqueue the node. - node.waker.with_mut(|waker| { - // Safety: the wait list is locked, so we may modify the waker. - let waker = unsafe { &mut *waker }; - // Do we need to register the new waker? - if waker - .as_ref() - .map(|waker| !waker.will_wake(cx.waker())) - .unwrap_or(true) - { - *waker = Some(cx.waker().clone()); - } - }); - - // If the waiter is not already in the wait queue, enqueue it. - if !queued { - let node = unsafe { - let node = Pin::into_inner_unchecked(node) as *mut _; - NonNull::new_unchecked(node) - }; - - waiters.queue.push_front(node); - } - - Pending - } -} - -impl fmt::Debug for Semaphore { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Semaphore") - .field("permits", &self.permits.load(Relaxed)) - .finish() - } -} - -impl Waiter { - fn new(num_permits: u32) -> Self { - Waiter { - waker: UnsafeCell::new(None), - state: AtomicUsize::new(num_permits as usize), - pointers: linked_list::Pointers::new(), - _p: PhantomPinned, - } - } - - /// Assign permits to the waiter. - /// - /// Returns `true` if the waiter should be removed from the queue - fn assign_permits(&self, n: &mut usize) -> bool { - let mut curr = self.state.load(Acquire); - loop { - let assign = cmp::min(curr, *n); - let next = curr - assign; - match self.state.compare_exchange(curr, next, AcqRel, Acquire) { - Ok(_) => { - *n -= assign; - return next == 0; - } - Err(actual) => curr = actual, - } - } - } -} - -impl Future for Acquire<'_> { - type Output = Result<(), AcquireError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // First, ensure the current task has enough budget to proceed. - let coop = ready!(crate::coop::poll_proceed(cx)); - - let (node, semaphore, needed, queued) = self.project(); - - match semaphore.poll_acquire(cx, needed, node, *queued) { - Pending => { - *queued = true; - Pending - } - Ready(r) => { - coop.made_progress(); - r?; - *queued = false; - Ready(Ok(())) - } - } - } -} - -impl<'a> Acquire<'a> { - fn new(semaphore: &'a Semaphore, num_permits: u32) -> Self { - Self { - node: Waiter::new(num_permits), - semaphore, - num_permits, - queued: false, - } - } - - fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, u32, &mut bool) { - fn is_unpin() {} - unsafe { - // Safety: all fields other than `node` are `Unpin` - - is_unpin::<&Semaphore>(); - is_unpin::<&mut bool>(); - is_unpin::(); - - let this = self.get_unchecked_mut(); - ( - Pin::new_unchecked(&mut this.node), - &this.semaphore, - this.num_permits, - &mut this.queued, - ) - } - } -} - -impl Drop for Acquire<'_> { - fn drop(&mut self) { - // If the future is completed, there is no node in the wait list, so we - // can skip acquiring the lock. - if !self.queued { - return; - } - - // This is where we ensure safety. The future is being dropped, - // which means we must ensure that the waiter entry is no longer stored - // in the linked list. - let mut waiters = match self.semaphore.waiters.lock() { - Ok(lock) => lock, - // Removing the node from the linked list is necessary to ensure - // safety. Even if the lock was poisoned, we need to make sure it is - // removed from the linked list before dropping it --- otherwise, - // the list will contain a dangling pointer to this node. - Err(e) => e.into_inner(), - }; - - // remove the entry from the list - let node = NonNull::from(&mut self.node); - // Safety: we have locked the wait list. - unsafe { waiters.queue.remove(node) }; - - let acquired_permits = self.num_permits as usize - self.node.state.load(Acquire); - if acquired_permits > 0 { - self.semaphore.add_permits_locked(acquired_permits, waiters); - } - } -} - -// Safety: the `Acquire` future is not `Sync` automatically because it contains -// a `Waiter`, which, in turn, contains an `UnsafeCell`. However, the -// `UnsafeCell` is only accessed when the future is borrowed mutably (either in -// `poll` or in `drop`). Therefore, it is safe (although not particularly -// _useful_) for the future to be borrowed immutably across threads. -unsafe impl Sync for Acquire<'_> {} - -// ===== impl AcquireError ==== - -impl AcquireError { - fn closed() -> AcquireError { - AcquireError(()) - } -} - -impl fmt::Display for AcquireError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "semaphore closed") - } -} - -impl std::error::Error for AcquireError {} - -// ===== impl TryAcquireError ===== - -impl TryAcquireError { - /// Returns `true` if the error was caused by a closed semaphore. - #[allow(dead_code)] // may be used later! - pub(crate) fn is_closed(&self) -> bool { - match self { - TryAcquireError::Closed => true, - _ => false, - } - } - - /// Returns `true` if the error was caused by calling `try_acquire` on a - /// semaphore with no available permits. - #[allow(dead_code)] // may be used later! - pub(crate) fn is_no_permits(&self) -> bool { - match self { - TryAcquireError::NoPermits => true, - _ => false, - } - } -} - -impl fmt::Display for TryAcquireError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TryAcquireError::Closed => write!(fmt, "semaphore closed"), - TryAcquireError::NoPermits => write!(fmt, "no permits available"), - } - } -} - -impl std::error::Error for TryAcquireError {} - -/// # Safety -/// -/// `Waiter` is forced to be !Unpin. -unsafe impl linked_list::Link for Waiter { - // XXX: ideally, we would be able to use `Pin` here, to enforce the - // invariant that list entries may not move while in the list. However, we - // can't do this currently, as using `Pin<&'a mut Waiter>` as the `Handle` - // type would require `Semaphore` to be generic over a lifetime. We can't - // use `Pin<*mut Waiter>`, as raw pointers are `Unpin` regardless of whether - // or not they dereference to an `!Unpin` target. - type Handle = NonNull; - type Target = Waiter; - - fn as_raw(handle: &Self::Handle) -> NonNull { - *handle - } - - unsafe fn from_raw(ptr: NonNull) -> NonNull { - ptr - } - - unsafe fn pointers(mut target: NonNull) -> NonNull> { - NonNull::from(&mut target.as_mut().pointers) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/broadcast.rs b/third_party/rust/tokio-0.2.25/src/sync/broadcast.rs deleted file mode 100644 index 8fcb2b4b0fc0..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/broadcast.rs +++ /dev/null @@ -1,1237 +0,0 @@ -//! A multi-producer, multi-consumer broadcast queue. Each sent value is seen by -//! all consumers. -//! -//! A [`Sender`] is used to broadcast values to **all** connected [`Receiver`] -//! values. [`Sender`] handles are clone-able, allowing concurrent send and -//! receive actions. [`Sender`] and [`Receiver`] are both `Send` and `Sync` as -//! long as `T` is also `Send` or `Sync` respectively. -//! -//! When a value is sent, **all** [`Receiver`] handles are notified and will -//! receive the value. The value is stored once inside the channel and cloned on -//! demand for each receiver. Once all receivers have received a clone of the -//! value, the value is released from the channel. -//! -//! A channel is created by calling [`channel`], specifying the maximum number -//! of messages the channel can retain at any given time. -//! -//! New [`Receiver`] handles are created by calling [`Sender::subscribe`]. The -//! returned [`Receiver`] will receive values sent **after** the call to -//! `subscribe`. -//! -//! ## Lagging -//! -//! As sent messages must be retained until **all** [`Receiver`] handles receive -//! a clone, broadcast channels are susceptible to the "slow receiver" problem. -//! In this case, all but one receiver are able to receive values at the rate -//! they are sent. Because one receiver is stalled, the channel starts to fill -//! up. -//! -//! This broadcast channel implementation handles this case by setting a hard -//! upper bound on the number of values the channel may retain at any given -//! time. This upper bound is passed to the [`channel`] function as an argument. -//! -//! If a value is sent when the channel is at capacity, the oldest value -//! currently held by the channel is released. This frees up space for the new -//! value. Any receiver that has not yet seen the released value will return -//! [`RecvError::Lagged`] the next time [`recv`] is called. -//! -//! Once [`RecvError::Lagged`] is returned, the lagging receiver's position is -//! updated to the oldest value contained by the channel. The next call to -//! [`recv`] will return this value. -//! -//! This behavior enables a receiver to detect when it has lagged so far behind -//! that data has been dropped. The caller may decide how to respond to this: -//! either by aborting its task or by tolerating lost messages and resuming -//! consumption of the channel. -//! -//! ## Closing -//! -//! When **all** [`Sender`] handles have been dropped, no new values may be -//! sent. At this point, the channel is "closed". Once a receiver has received -//! all values retained by the channel, the next call to [`recv`] will return -//! with [`RecvError::Closed`]. -//! -//! [`Sender`]: crate::sync::broadcast::Sender -//! [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe -//! [`Receiver`]: crate::sync::broadcast::Receiver -//! [`channel`]: crate::sync::broadcast::channel -//! [`RecvError::Lagged`]: crate::sync::broadcast::RecvError::Lagged -//! [`RecvError::Closed`]: crate::sync::broadcast::RecvError::Closed -//! [`recv`]: crate::sync::broadcast::Receiver::recv -//! -//! # Examples -//! -//! Basic usage -//! -//! ``` -//! use tokio::sync::broadcast; -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, mut rx1) = broadcast::channel(16); -//! let mut rx2 = tx.subscribe(); -//! -//! tokio::spawn(async move { -//! assert_eq!(rx1.recv().await.unwrap(), 10); -//! assert_eq!(rx1.recv().await.unwrap(), 20); -//! }); -//! -//! tokio::spawn(async move { -//! assert_eq!(rx2.recv().await.unwrap(), 10); -//! assert_eq!(rx2.recv().await.unwrap(), 20); -//! }); -//! -//! tx.send(10).unwrap(); -//! tx.send(20).unwrap(); -//! } -//! ``` -//! -//! Handling lag -//! -//! ``` -//! use tokio::sync::broadcast; -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, mut rx) = broadcast::channel(2); -//! -//! tx.send(10).unwrap(); -//! tx.send(20).unwrap(); -//! tx.send(30).unwrap(); -//! -//! // The receiver lagged behind -//! assert!(rx.recv().await.is_err()); -//! -//! // At this point, we can abort or continue with lost messages -//! -//! assert_eq!(20, rx.recv().await.unwrap()); -//! assert_eq!(30, rx.recv().await.unwrap()); -//! } - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Arc, Mutex, RwLock, RwLockReadGuard}; -use crate::util::linked_list::{self, LinkedList}; - -use std::fmt; -use std::future::Future; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::SeqCst; -use std::task::{Context, Poll, Waker}; -use std::usize; - -/// Sending-half of the [`broadcast`] channel. -/// -/// May be used from many threads. Messages can be sent with -/// [`send`][Sender::send]. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::broadcast; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, mut rx1) = broadcast::channel(16); -/// let mut rx2 = tx.subscribe(); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx1.recv().await.unwrap(), 10); -/// assert_eq!(rx1.recv().await.unwrap(), 20); -/// }); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx2.recv().await.unwrap(), 10); -/// assert_eq!(rx2.recv().await.unwrap(), 20); -/// }); -/// -/// tx.send(10).unwrap(); -/// tx.send(20).unwrap(); -/// } -/// ``` -/// -/// [`broadcast`]: crate::sync::broadcast -pub struct Sender { - shared: Arc>, -} - -/// Receiving-half of the [`broadcast`] channel. -/// -/// Must not be used concurrently. Messages may be retrieved using -/// [`recv`][Receiver::recv]. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::broadcast; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, mut rx1) = broadcast::channel(16); -/// let mut rx2 = tx.subscribe(); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx1.recv().await.unwrap(), 10); -/// assert_eq!(rx1.recv().await.unwrap(), 20); -/// }); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx2.recv().await.unwrap(), 10); -/// assert_eq!(rx2.recv().await.unwrap(), 20); -/// }); -/// -/// tx.send(10).unwrap(); -/// tx.send(20).unwrap(); -/// } -/// ``` -/// -/// [`broadcast`]: crate::sync::broadcast -pub struct Receiver { - /// State shared with all receivers and senders. - shared: Arc>, - - /// Next position to read from - next: u64, - - /// Used to support the deprecated `poll_recv` fn - waiter: Option>>>, -} - -/// Error returned by [`Sender::send`][Sender::send]. -/// -/// A **send** operation can only fail if there are no active receivers, -/// implying that the message could never be received. The error contains the -/// message being sent as a payload so it can be recovered. -#[derive(Debug)] -pub struct SendError(pub T); - -/// An error returned from the [`recv`] function on a [`Receiver`]. -/// -/// [`recv`]: crate::sync::broadcast::Receiver::recv -/// [`Receiver`]: crate::sync::broadcast::Receiver -#[derive(Debug, PartialEq)] -pub enum RecvError { - /// There are no more active senders implying no further messages will ever - /// be sent. - Closed, - - /// The receiver lagged too far behind. Attempting to receive again will - /// return the oldest message still retained by the channel. - /// - /// Includes the number of skipped messages. - Lagged(u64), -} - -/// An error returned from the [`try_recv`] function on a [`Receiver`]. -/// -/// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv -/// [`Receiver`]: crate::sync::broadcast::Receiver -#[derive(Debug, PartialEq)] -pub enum TryRecvError { - /// The channel is currently empty. There are still active - /// [`Sender`][Sender] handles, so data may yet become available. - Empty, - - /// There are no more active senders implying no further messages will ever - /// be sent. - Closed, - - /// The receiver lagged too far behind and has been forcibly disconnected. - /// Attempting to receive again will return the oldest message still - /// retained by the channel. - /// - /// Includes the number of skipped messages. - Lagged(u64), -} - -/// Data shared between senders and receivers -struct Shared { - /// slots in the channel - buffer: Box<[RwLock>]>, - - /// Mask a position -> index - mask: usize, - - /// Tail of the queue. Includes the rx wait list. - tail: Mutex, - - /// Number of outstanding Sender handles - num_tx: AtomicUsize, -} - -/// Next position to write a value -struct Tail { - /// Next position to write to - pos: u64, - - /// Number of active receivers - rx_cnt: usize, - - /// True if the channel is closed - closed: bool, - - /// Receivers waiting for a value - waiters: LinkedList, -} - -/// Slot in the buffer -struct Slot { - /// Remaining number of receivers that are expected to see this value. - /// - /// When this goes to zero, the value is released. - /// - /// An atomic is used as it is mutated concurrently with the slot read lock - /// acquired. - rem: AtomicUsize, - - /// Uniquely identifies the `send` stored in the slot - pos: u64, - - /// True signals the channel is closed. - closed: bool, - - /// The value being broadcast. - /// - /// The value is set by `send` when the write lock is held. When a reader - /// drops, `rem` is decremented. When it hits zero, the value is dropped. - val: UnsafeCell>, -} - -/// An entry in the wait queue -struct Waiter { - /// True if queued - queued: bool, - - /// Task waiting on the broadcast channel. - waker: Option, - - /// Intrusive linked-list pointers. - pointers: linked_list::Pointers, - - /// Should not be `Unpin`. - _p: PhantomPinned, -} - -struct RecvGuard<'a, T> { - slot: RwLockReadGuard<'a, Slot>, -} - -/// Receive a value future -struct Recv -where - R: AsMut>, -{ - /// Receiver being waited on - receiver: R, - - /// Entry in the waiter `LinkedList` - waiter: UnsafeCell, - - _p: std::marker::PhantomData, -} - -/// `AsMut` is not implemented for `T` (coherence). Explicitly implementing -/// `AsMut` for `Receiver` would be included in the public API of the receiver -/// type. Instead, `Borrow` is used internally to bridge the gap. -struct Borrow(T); - -impl AsMut> for Borrow> { - fn as_mut(&mut self) -> &mut Receiver { - &mut self.0 - } -} - -impl<'a, T> AsMut> for Borrow<&'a mut Receiver> { - fn as_mut(&mut self) -> &mut Receiver { - &mut *self.0 - } -} - -unsafe impl> + Send, T: Send> Send for Recv {} -unsafe impl> + Sync, T: Send> Sync for Recv {} - -/// Max number of receivers. Reserve space to lock. -const MAX_RECEIVERS: usize = usize::MAX >> 2; - -/// Create a bounded, multi-producer, multi-consumer channel where each sent -/// value is broadcasted to all active receivers. -/// -/// All data sent on [`Sender`] will become available on every active -/// [`Receiver`] in the same order as it was sent. -/// -/// The `Sender` can be cloned to `send` to the same channel from multiple -/// points in the process or it can be used concurrently from an `Arc`. New -/// `Receiver` handles are created by calling [`Sender::subscribe`]. -/// -/// If all [`Receiver`] handles are dropped, the `send` method will return a -/// [`SendError`]. Similarly, if all [`Sender`] handles are dropped, the [`recv`] -/// method will return a [`RecvError`]. -/// -/// [`Sender`]: crate::sync::broadcast::Sender -/// [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe -/// [`Receiver`]: crate::sync::broadcast::Receiver -/// [`recv`]: crate::sync::broadcast::Receiver::recv -/// [`SendError`]: crate::sync::broadcast::SendError -/// [`RecvError`]: crate::sync::broadcast::RecvError -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::broadcast; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, mut rx1) = broadcast::channel(16); -/// let mut rx2 = tx.subscribe(); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx1.recv().await.unwrap(), 10); -/// assert_eq!(rx1.recv().await.unwrap(), 20); -/// }); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx2.recv().await.unwrap(), 10); -/// assert_eq!(rx2.recv().await.unwrap(), 20); -/// }); -/// -/// tx.send(10).unwrap(); -/// tx.send(20).unwrap(); -/// } -/// ``` -pub fn channel(mut capacity: usize) -> (Sender, Receiver) { - assert!(capacity > 0, "capacity is empty"); - assert!(capacity <= usize::MAX >> 1, "requested capacity too large"); - - // Round to a power of two - capacity = capacity.next_power_of_two(); - - let mut buffer = Vec::with_capacity(capacity); - - for i in 0..capacity { - buffer.push(RwLock::new(Slot { - rem: AtomicUsize::new(0), - pos: (i as u64).wrapping_sub(capacity as u64), - closed: false, - val: UnsafeCell::new(None), - })); - } - - let shared = Arc::new(Shared { - buffer: buffer.into_boxed_slice(), - mask: capacity - 1, - tail: Mutex::new(Tail { - pos: 0, - rx_cnt: 1, - closed: false, - waiters: LinkedList::new(), - }), - num_tx: AtomicUsize::new(1), - }); - - let rx = Receiver { - shared: shared.clone(), - next: 0, - waiter: None, - }; - - let tx = Sender { shared }; - - (tx, rx) -} - -unsafe impl Send for Sender {} -unsafe impl Sync for Sender {} - -unsafe impl Send for Receiver {} -unsafe impl Sync for Receiver {} - -impl Sender { - /// Attempts to send a value to all active [`Receiver`] handles, returning - /// it back if it could not be sent. - /// - /// A successful send occurs when there is at least one active [`Receiver`] - /// handle. An unsuccessful send would be one where all associated - /// [`Receiver`] handles have already been dropped. - /// - /// # Return - /// - /// On success, the number of subscribed [`Receiver`] handles is returned. - /// This does not mean that this number of receivers will see the message as - /// a receiver may drop before receiving the message. - /// - /// # Note - /// - /// A return value of `Ok` **does not** mean that the sent value will be - /// observed by all or any of the active [`Receiver`] handles. [`Receiver`] - /// handles may be dropped before receiving the sent message. - /// - /// A return value of `Err` **does not** mean that future calls to `send` - /// will fail. New [`Receiver`] handles may be created by calling - /// [`subscribe`]. - /// - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// [`subscribe`]: crate::sync::broadcast::Sender::subscribe - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx1) = broadcast::channel(16); - /// let mut rx2 = tx.subscribe(); - /// - /// tokio::spawn(async move { - /// assert_eq!(rx1.recv().await.unwrap(), 10); - /// assert_eq!(rx1.recv().await.unwrap(), 20); - /// }); - /// - /// tokio::spawn(async move { - /// assert_eq!(rx2.recv().await.unwrap(), 10); - /// assert_eq!(rx2.recv().await.unwrap(), 20); - /// }); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// } - /// ``` - pub fn send(&self, value: T) -> Result> { - self.send2(Some(value)) - .map_err(|SendError(maybe_v)| SendError(maybe_v.unwrap())) - } - - /// Creates a new [`Receiver`] handle that will receive values sent **after** - /// this call to `subscribe`. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, _rx) = broadcast::channel(16); - /// - /// // Will not be seen - /// tx.send(10).unwrap(); - /// - /// let mut rx = tx.subscribe(); - /// - /// tx.send(20).unwrap(); - /// - /// let value = rx.recv().await.unwrap(); - /// assert_eq!(20, value); - /// } - /// ``` - pub fn subscribe(&self) -> Receiver { - let shared = self.shared.clone(); - - let mut tail = shared.tail.lock().unwrap(); - - if tail.rx_cnt == MAX_RECEIVERS { - panic!("max receivers"); - } - - tail.rx_cnt = tail.rx_cnt.checked_add(1).expect("overflow"); - let next = tail.pos; - - drop(tail); - - Receiver { - shared, - next, - waiter: None, - } - } - - /// Returns the number of active receivers - /// - /// An active receiver is a [`Receiver`] handle returned from [`channel`] or - /// [`subscribe`]. These are the handles that will receive values sent on - /// this [`Sender`]. - /// - /// # Note - /// - /// It is not guaranteed that a sent message will reach this number of - /// receivers. Active receivers may never call [`recv`] again before - /// dropping. - /// - /// [`recv`]: crate::sync::broadcast::Receiver::recv - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// [`Sender`]: crate::sync::broadcast::Sender - /// [`subscribe`]: crate::sync::broadcast::Sender::subscribe - /// [`channel`]: crate::sync::broadcast::channel - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, _rx1) = broadcast::channel(16); - /// - /// assert_eq!(1, tx.receiver_count()); - /// - /// let mut _rx2 = tx.subscribe(); - /// - /// assert_eq!(2, tx.receiver_count()); - /// - /// tx.send(10).unwrap(); - /// } - /// ``` - pub fn receiver_count(&self) -> usize { - let tail = self.shared.tail.lock().unwrap(); - tail.rx_cnt - } - - fn send2(&self, value: Option) -> Result>> { - let mut tail = self.shared.tail.lock().unwrap(); - - if tail.rx_cnt == 0 { - return Err(SendError(value)); - } - - // Position to write into - let pos = tail.pos; - let rem = tail.rx_cnt; - let idx = (pos & self.shared.mask as u64) as usize; - - // Update the tail position - tail.pos = tail.pos.wrapping_add(1); - - // Get the slot - let mut slot = self.shared.buffer[idx].write().unwrap(); - - // Track the position - slot.pos = pos; - - // Set remaining receivers - slot.rem.with_mut(|v| *v = rem); - - // Set the closed bit if the value is `None`; otherwise write the value - if value.is_none() { - tail.closed = true; - slot.closed = true; - } else { - slot.val.with_mut(|ptr| unsafe { *ptr = value }); - } - - // Release the slot lock before notifying the receivers. - drop(slot); - - tail.notify_rx(); - - // Release the mutex. This must happen after the slot lock is released, - // otherwise the writer lock bit could be cleared while another thread - // is in the critical section. - drop(tail); - - Ok(rem) - } -} - -impl Tail { - fn notify_rx(&mut self) { - while let Some(mut waiter) = self.waiters.pop_back() { - // Safety: `waiters` lock is still held. - let waiter = unsafe { waiter.as_mut() }; - - assert!(waiter.queued); - waiter.queued = false; - - let waker = waiter.waker.take().unwrap(); - waker.wake(); - } - } -} - -impl Clone for Sender { - fn clone(&self) -> Sender { - let shared = self.shared.clone(); - shared.num_tx.fetch_add(1, SeqCst); - - Sender { shared } - } -} - -impl Drop for Sender { - fn drop(&mut self) { - if 1 == self.shared.num_tx.fetch_sub(1, SeqCst) { - let _ = self.send2(None); - } - } -} - -impl Receiver { - /// Locks the next value if there is one. - fn recv_ref( - &mut self, - waiter: Option<(&UnsafeCell, &Waker)>, - ) -> Result, TryRecvError> { - let idx = (self.next & self.shared.mask as u64) as usize; - - // The slot holding the next value to read - let mut slot = self.shared.buffer[idx].read().unwrap(); - - if slot.pos != self.next { - let next_pos = slot.pos.wrapping_add(self.shared.buffer.len() as u64); - - // The receiver has read all current values in the channel and there - // is no waiter to register - if waiter.is_none() && next_pos == self.next { - return Err(TryRecvError::Empty); - } - - // Release the `slot` lock before attempting to acquire the `tail` - // lock. This is required because `send2` acquires the tail lock - // first followed by the slot lock. Acquiring the locks in reverse - // order here would result in a potential deadlock: `recv_ref` - // acquires the `slot` lock and attempts to acquire the `tail` lock - // while `send2` acquired the `tail` lock and attempts to acquire - // the slot lock. - drop(slot); - - let mut tail = self.shared.tail.lock().unwrap(); - - // Acquire slot lock again - slot = self.shared.buffer[idx].read().unwrap(); - - // Make sure the position did not change. This could happen in the - // unlikely event that the buffer is wrapped between dropping the - // read lock and acquiring the tail lock. - if slot.pos != self.next { - let next_pos = slot.pos.wrapping_add(self.shared.buffer.len() as u64); - - if next_pos == self.next { - // Store the waker - if let Some((waiter, waker)) = waiter { - // Safety: called while locked. - unsafe { - // Only queue if not already queued - waiter.with_mut(|ptr| { - // If there is no waker **or** if the currently - // stored waker references a **different** task, - // track the tasks' waker to be notified on - // receipt of a new value. - match (*ptr).waker { - Some(ref w) if w.will_wake(waker) => {} - _ => { - (*ptr).waker = Some(waker.clone()); - } - } - - if !(*ptr).queued { - (*ptr).queued = true; - tail.waiters.push_front(NonNull::new_unchecked(&mut *ptr)); - } - }); - } - } - - return Err(TryRecvError::Empty); - } - - // At this point, the receiver has lagged behind the sender by - // more than the channel capacity. The receiver will attempt to - // catch up by skipping dropped messages and setting the - // internal cursor to the **oldest** message stored by the - // channel. - // - // However, finding the oldest position is a bit more - // complicated than `tail-position - buffer-size`. When - // the channel is closed, the tail position is incremented to - // signal a new `None` message, but `None` is not stored in the - // channel itself (see issue #2425 for why). - // - // To account for this, if the channel is closed, the tail - // position is decremented by `buffer-size + 1`. - let mut adjust = 0; - if tail.closed { - adjust = 1 - } - let next = tail - .pos - .wrapping_sub(self.shared.buffer.len() as u64 + adjust); - - let missed = next.wrapping_sub(self.next); - - drop(tail); - - // The receiver is slow but no values have been missed - if missed == 0 { - self.next = self.next.wrapping_add(1); - - return Ok(RecvGuard { slot }); - } - - self.next = next; - - return Err(TryRecvError::Lagged(missed)); - } - } - - self.next = self.next.wrapping_add(1); - - if slot.closed { - return Err(TryRecvError::Closed); - } - - Ok(RecvGuard { slot }) - } -} - -impl Receiver -where - T: Clone, -{ - /// Attempts to return a pending value on this receiver without awaiting. - /// - /// This is useful for a flavor of "optimistic check" before deciding to - /// await on a receiver. - /// - /// Compared with [`recv`], this function has three failure cases instead of one - /// (one for closed, one for an empty buffer, one for a lagging receiver). - /// - /// `Err(TryRecvError::Closed)` is returned when all `Sender` halves have - /// dropped, indicating that no further values can be sent on the channel. - /// - /// If the [`Receiver`] handle falls behind, once the channel is full, newly - /// sent values will overwrite old values. At this point, a call to [`recv`] - /// will return with `Err(TryRecvError::Lagged)` and the [`Receiver`]'s - /// internal cursor is updated to point to the oldest value still held by - /// the channel. A subsequent call to [`try_recv`] will return this value - /// **unless** it has been since overwritten. If there are no values to - /// receive, `Err(TryRecvError::Empty)` is returned. - /// - /// [`recv`]: crate::sync::broadcast::Receiver::recv - /// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = broadcast::channel(16); - /// - /// assert!(rx.try_recv().is_err()); - /// - /// tx.send(10).unwrap(); - /// - /// let value = rx.try_recv().unwrap(); - /// assert_eq!(10, value); - /// } - /// ``` - pub fn try_recv(&mut self) -> Result { - let guard = self.recv_ref(None)?; - guard.clone_value().ok_or(TryRecvError::Closed) - } - - #[doc(hidden)] - #[deprecated(since = "0.2.21", note = "use async fn recv()")] - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - use Poll::{Pending, Ready}; - - // The borrow checker prohibits calling `self.poll_ref` while passing in - // a mutable ref to a field (as it should). To work around this, - // `waiter` is first *removed* from `self` then `poll_recv` is called. - // - // However, for safety, we must ensure that `waiter` is **not** dropped. - // It could be contained in the intrusive linked list. The `Receiver` - // drop implementation handles cleanup. - // - // The guard pattern is used to ensure that, on return, even due to - // panic, the waiter node is replaced on `self`. - - struct Guard<'a, T> { - waiter: Option>>>, - receiver: &'a mut Receiver, - } - - impl<'a, T> Drop for Guard<'a, T> { - fn drop(&mut self) { - self.receiver.waiter = self.waiter.take(); - } - } - - let waiter = self.waiter.take().or_else(|| { - Some(Box::pin(UnsafeCell::new(Waiter { - queued: false, - waker: None, - pointers: linked_list::Pointers::new(), - _p: PhantomPinned, - }))) - }); - - let guard = Guard { - waiter, - receiver: self, - }; - let res = guard - .receiver - .recv_ref(Some((&guard.waiter.as_ref().unwrap(), cx.waker()))); - - match res { - Ok(guard) => Ready(guard.clone_value().ok_or(RecvError::Closed)), - Err(TryRecvError::Closed) => Ready(Err(RecvError::Closed)), - Err(TryRecvError::Lagged(n)) => Ready(Err(RecvError::Lagged(n))), - Err(TryRecvError::Empty) => Pending, - } - } - - /// Receives the next value for this receiver. - /// - /// Each [`Receiver`] handle will receive a clone of all values sent - /// **after** it has subscribed. - /// - /// `Err(RecvError::Closed)` is returned when all `Sender` halves have - /// dropped, indicating that no further values can be sent on the channel. - /// - /// If the [`Receiver`] handle falls behind, once the channel is full, newly - /// sent values will overwrite old values. At this point, a call to [`recv`] - /// will return with `Err(RecvError::Lagged)` and the [`Receiver`]'s - /// internal cursor is updated to point to the oldest value still held by - /// the channel. A subsequent call to [`recv`] will return this value - /// **unless** it has been since overwritten. - /// - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// [`recv`]: crate::sync::broadcast::Receiver::recv - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx1) = broadcast::channel(16); - /// let mut rx2 = tx.subscribe(); - /// - /// tokio::spawn(async move { - /// assert_eq!(rx1.recv().await.unwrap(), 10); - /// assert_eq!(rx1.recv().await.unwrap(), 20); - /// }); - /// - /// tokio::spawn(async move { - /// assert_eq!(rx2.recv().await.unwrap(), 10); - /// assert_eq!(rx2.recv().await.unwrap(), 20); - /// }); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// } - /// ``` - /// - /// Handling lag - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = broadcast::channel(2); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// tx.send(30).unwrap(); - /// - /// // The receiver lagged behind - /// assert!(rx.recv().await.is_err()); - /// - /// // At this point, we can abort or continue with lost messages - /// - /// assert_eq!(20, rx.recv().await.unwrap()); - /// assert_eq!(30, rx.recv().await.unwrap()); - /// } - pub async fn recv(&mut self) -> Result { - let fut = Recv::<_, T>::new(Borrow(self)); - fut.await - } -} - -#[cfg(feature = "stream")] -#[doc(hidden)] -impl crate::stream::Stream for Receiver -where - T: Clone, -{ - type Item = Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - #[allow(deprecated)] - self.poll_recv(cx).map(|v| match v { - Ok(v) => Some(Ok(v)), - lag @ Err(RecvError::Lagged(_)) => Some(lag), - Err(RecvError::Closed) => None, - }) - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - let mut tail = self.shared.tail.lock().unwrap(); - - if let Some(waiter) = &self.waiter { - // safety: tail lock is held - let queued = waiter.with(|ptr| unsafe { (*ptr).queued }); - - if queued { - // Remove the node - // - // safety: tail lock is held and the wait node is verified to be in - // the list. - unsafe { - waiter.with_mut(|ptr| { - tail.waiters.remove((&mut *ptr).into()); - }); - } - } - } - - tail.rx_cnt -= 1; - let until = tail.pos; - - drop(tail); - - while self.next != until { - match self.recv_ref(None) { - Ok(_) => {} - // The channel is closed - Err(TryRecvError::Closed) => break, - // Ignore lagging, we will catch up - Err(TryRecvError::Lagged(..)) => {} - // Can't be empty - Err(TryRecvError::Empty) => panic!("unexpected empty broadcast channel"), - } - } - } -} - -impl Recv -where - R: AsMut>, -{ - fn new(receiver: R) -> Recv { - Recv { - receiver, - waiter: UnsafeCell::new(Waiter { - queued: false, - waker: None, - pointers: linked_list::Pointers::new(), - _p: PhantomPinned, - }), - _p: std::marker::PhantomData, - } - } - - /// A custom `project` implementation is used in place of `pin-project-lite` - /// as a custom drop implementation is needed. - fn project(self: Pin<&mut Self>) -> (&mut Receiver, &UnsafeCell) { - unsafe { - // Safety: Receiver is Unpin - is_unpin::<&mut Receiver>(); - - let me = self.get_unchecked_mut(); - (me.receiver.as_mut(), &me.waiter) - } - } -} - -impl Future for Recv -where - R: AsMut>, - T: Clone, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let (receiver, waiter) = self.project(); - - let guard = match receiver.recv_ref(Some((waiter, cx.waker()))) { - Ok(value) => value, - Err(TryRecvError::Empty) => return Poll::Pending, - Err(TryRecvError::Lagged(n)) => return Poll::Ready(Err(RecvError::Lagged(n))), - Err(TryRecvError::Closed) => return Poll::Ready(Err(RecvError::Closed)), - }; - - Poll::Ready(guard.clone_value().ok_or(RecvError::Closed)) - } -} - -cfg_stream! { - use futures_core::Stream; - - impl Receiver { - /// Convert the receiver into a `Stream`. - /// - /// The conversion allows using `Receiver` with APIs that require stream - /// values. - /// - /// # Examples - /// - /// ``` - /// use tokio::stream::StreamExt; - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = broadcast::channel(128); - /// - /// tokio::spawn(async move { - /// for i in 0..10_i32 { - /// tx.send(i).unwrap(); - /// } - /// }); - /// - /// // Streams must be pinned to iterate. - /// tokio::pin! { - /// let stream = rx - /// .into_stream() - /// .filter(Result::is_ok) - /// .map(Result::unwrap) - /// .filter(|v| v % 2 == 0) - /// .map(|v| v + 1); - /// } - /// - /// while let Some(i) = stream.next().await { - /// println!("{}", i); - /// } - /// } - /// ``` - pub fn into_stream(self) -> impl Stream> { - Recv::new(Borrow(self)) - } - } - - impl Stream for Recv - where - R: AsMut>, - T: Clone, - { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let (receiver, waiter) = self.project(); - - let guard = match receiver.recv_ref(Some((waiter, cx.waker()))) { - Ok(value) => value, - Err(TryRecvError::Empty) => return Poll::Pending, - Err(TryRecvError::Lagged(n)) => return Poll::Ready(Some(Err(RecvError::Lagged(n)))), - Err(TryRecvError::Closed) => return Poll::Ready(None), - }; - - Poll::Ready(guard.clone_value().map(Ok)) - } - } -} - -impl Drop for Recv -where - R: AsMut>, -{ - fn drop(&mut self) { - // Acquire the tail lock. This is required for safety before accessing - // the waiter node. - let mut tail = self.receiver.as_mut().shared.tail.lock().unwrap(); - - // safety: tail lock is held - let queued = self.waiter.with(|ptr| unsafe { (*ptr).queued }); - - if queued { - // Remove the node - // - // safety: tail lock is held and the wait node is verified to be in - // the list. - unsafe { - self.waiter.with_mut(|ptr| { - tail.waiters.remove((&mut *ptr).into()); - }); - } - } - } -} - -/// # Safety -/// -/// `Waiter` is forced to be !Unpin. -unsafe impl linked_list::Link for Waiter { - type Handle = NonNull; - type Target = Waiter; - - fn as_raw(handle: &NonNull) -> NonNull { - *handle - } - - unsafe fn from_raw(ptr: NonNull) -> NonNull { - ptr - } - - unsafe fn pointers(mut target: NonNull) -> NonNull> { - NonNull::from(&mut target.as_mut().pointers) - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "broadcast::Sender") - } -} - -impl fmt::Debug for Receiver { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "broadcast::Receiver") - } -} - -impl<'a, T> RecvGuard<'a, T> { - fn clone_value(&self) -> Option - where - T: Clone, - { - self.slot.val.with(|ptr| unsafe { (*ptr).clone() }) - } -} - -impl<'a, T> Drop for RecvGuard<'a, T> { - fn drop(&mut self) { - // Decrement the remaining counter - if 1 == self.slot.rem.fetch_sub(1, SeqCst) { - // Safety: Last receiver, drop the value - self.slot.val.with_mut(|ptr| unsafe { *ptr = None }); - } - } -} - -impl fmt::Display for RecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - RecvError::Closed => write!(f, "channel closed"), - RecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt), - } - } -} - -impl std::error::Error for RecvError {} - -impl fmt::Display for TryRecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TryRecvError::Empty => write!(f, "channel empty"), - TryRecvError::Closed => write!(f, "channel closed"), - TryRecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt), - } - } -} - -impl std::error::Error for TryRecvError {} - -fn is_unpin() {} diff --git a/third_party/rust/tokio-0.2.25/src/sync/cancellation_token.rs b/third_party/rust/tokio-0.2.25/src/sync/cancellation_token.rs deleted file mode 100644 index d60d8e0202cf..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/cancellation_token.rs +++ /dev/null @@ -1,861 +0,0 @@ -//! An asynchronously awaitable `CancellationToken`. -//! The token allows to signal a cancellation request to one or more tasks. - -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Mutex; -use crate::util::intrusive_double_linked_list::{LinkedList, ListNode}; - -use core::future::Future; -use core::pin::Pin; -use core::ptr::NonNull; -use core::sync::atomic::Ordering; -use core::task::{Context, Poll, Waker}; - -/// A token which can be used to signal a cancellation request to one or more -/// tasks. -/// -/// Tasks can call [`CancellationToken::cancelled()`] in order to -/// obtain a Future which will be resolved when cancellation is requested. -/// -/// Cancellation can be requested through the [`CancellationToken::cancel`] method. -/// -/// # Examples -/// -/// ```ignore -/// use tokio::select; -/// use tokio::scope::CancellationToken; -/// -/// #[tokio::main] -/// async fn main() { -/// let token = CancellationToken::new(); -/// let cloned_token = token.clone(); -/// -/// let join_handle = tokio::spawn(async move { -/// // Wait for either cancellation or a very long time -/// select! { -/// _ = cloned_token.cancelled() => { -/// // The token was cancelled -/// 5 -/// } -/// _ = tokio::time::delay_for(std::time::Duration::from_secs(9999)) => { -/// 99 -/// } -/// } -/// }); -/// -/// tokio::spawn(async move { -/// tokio::time::delay_for(std::time::Duration::from_millis(10)).await; -/// token.cancel(); -/// }); -/// -/// assert_eq!(5, join_handle.await.unwrap()); -/// } -/// ``` -pub struct CancellationToken { - inner: NonNull, -} - -// Safety: The CancellationToken is thread-safe and can be moved between threads, -// since all methods are internally synchronized. -unsafe impl Send for CancellationToken {} -unsafe impl Sync for CancellationToken {} - -/// A Future that is resolved once the corresponding [`CancellationToken`] -/// was cancelled -#[must_use = "futures do nothing unless polled"] -pub struct WaitForCancellationFuture<'a> { - /// The CancellationToken that is associated with this WaitForCancellationFuture - cancellation_token: Option<&'a CancellationToken>, - /// Node for waiting at the cancellation_token - wait_node: ListNode, - /// Whether this future was registered at the token yet as a waiter - is_registered: bool, -} - -// Safety: Futures can be sent between threads as long as the underlying -// cancellation_token is thread-safe (Sync), -// which allows to poll/register/unregister from a different thread. -unsafe impl<'a> Send for WaitForCancellationFuture<'a> {} - -// ===== impl CancellationToken ===== - -impl core::fmt::Debug for CancellationToken { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("CancellationToken") - .field("is_cancelled", &self.is_cancelled()) - .finish() - } -} - -impl Clone for CancellationToken { - fn clone(&self) -> Self { - // Safety: The state inside a `CancellationToken` is always valid, since - // is reference counted - let inner = self.state(); - - // Tokens are cloned by increasing their refcount - let current_state = inner.snapshot(); - inner.increment_refcount(current_state); - - CancellationToken { inner: self.inner } - } -} - -impl Drop for CancellationToken { - fn drop(&mut self) { - let token_state_pointer = self.inner; - - // Safety: The state inside a `CancellationToken` is always valid, since - // is reference counted - let inner = unsafe { &mut *self.inner.as_ptr() }; - - let mut current_state = inner.snapshot(); - - // We need to safe the parent, since the state might be released by the - // next call - let parent = inner.parent; - - // Drop our own refcount - current_state = inner.decrement_refcount(current_state); - - // If this was the last reference, unregister from the parent - if current_state.refcount == 0 { - if let Some(mut parent) = parent { - // Safety: Since we still retain a reference on the parent, it must be valid. - let parent = unsafe { parent.as_mut() }; - parent.unregister_child(token_state_pointer, current_state); - } - } - } -} - -impl CancellationToken { - /// Creates a new CancellationToken in the non-cancelled state. - pub fn new() -> CancellationToken { - let state = Box::new(CancellationTokenState::new( - None, - StateSnapshot { - cancel_state: CancellationState::NotCancelled, - has_parent_ref: false, - refcount: 1, - }, - )); - - // Safety: We just created the Box. The pointer is guaranteed to be - // not null - CancellationToken { - inner: unsafe { NonNull::new_unchecked(Box::into_raw(state)) }, - } - } - - /// Returns a reference to the utilized `CancellationTokenState`. - fn state(&self) -> &CancellationTokenState { - // Safety: The state inside a `CancellationToken` is always valid, since - // is reference counted - unsafe { &*self.inner.as_ptr() } - } - - /// Creates a `CancellationToken` which will get cancelled whenever the - /// current token gets cancelled. - /// - /// If the current token is already cancelled, the child token will get - /// returned in cancelled state. - /// - /// # Examples - /// - /// ```ignore - /// use tokio::select; - /// use tokio::scope::CancellationToken; - /// - /// #[tokio::main] - /// async fn main() { - /// let token = CancellationToken::new(); - /// let child_token = token.child_token(); - /// - /// let join_handle = tokio::spawn(async move { - /// // Wait for either cancellation or a very long time - /// select! { - /// _ = child_token.cancelled() => { - /// // The token was cancelled - /// 5 - /// } - /// _ = tokio::time::delay_for(std::time::Duration::from_secs(9999)) => { - /// 99 - /// } - /// } - /// }); - /// - /// tokio::spawn(async move { - /// tokio::time::delay_for(std::time::Duration::from_millis(10)).await; - /// token.cancel(); - /// }); - /// - /// assert_eq!(5, join_handle.await.unwrap()); - /// } - /// ``` - pub fn child_token(&self) -> CancellationToken { - let inner = self.state(); - - // Increment the refcount of this token. It will be referenced by the - // child, independent of whether the child is immediately cancelled or - // not. - let _current_state = inner.increment_refcount(inner.snapshot()); - - let mut unpacked_child_state = StateSnapshot { - has_parent_ref: true, - refcount: 1, - cancel_state: CancellationState::NotCancelled, - }; - let mut child_token_state = Box::new(CancellationTokenState::new( - Some(self.inner), - unpacked_child_state, - )); - - { - let mut guard = inner.synchronized.lock().unwrap(); - if guard.is_cancelled { - // This task was already cancelled. In this case we should not - // insert the child into the list, since it would never get removed - // from the list. - (*child_token_state.synchronized.lock().unwrap()).is_cancelled = true; - unpacked_child_state.cancel_state = CancellationState::Cancelled; - // Since it's not in the list, the parent doesn't need to retain - // a reference to it. - unpacked_child_state.has_parent_ref = false; - child_token_state - .state - .store(unpacked_child_state.pack(), Ordering::SeqCst); - } else { - if let Some(mut first_child) = guard.first_child { - child_token_state.from_parent.next_peer = Some(first_child); - // Safety: We manipulate other child task inside the Mutex - // and retain a parent reference on it. The child token can't - // get invalidated while the Mutex is held. - unsafe { - first_child.as_mut().from_parent.prev_peer = - Some((&mut *child_token_state).into()) - }; - } - guard.first_child = Some((&mut *child_token_state).into()); - } - }; - - let child_token_ptr = Box::into_raw(child_token_state); - // Safety: We just created the pointer from a `Box` - CancellationToken { - inner: unsafe { NonNull::new_unchecked(child_token_ptr) }, - } - } - - /// Cancel the [`CancellationToken`] and all child tokens which had been - /// derived from it. - /// - /// This will wake up all tasks which are waiting for cancellation. - pub fn cancel(&self) { - self.state().cancel(); - } - - /// Returns `true` if the `CancellationToken` had been cancelled - pub fn is_cancelled(&self) -> bool { - self.state().is_cancelled() - } - - /// Returns a `Future` that gets fulfilled when cancellation is requested. - pub fn cancelled(&self) -> WaitForCancellationFuture<'_> { - WaitForCancellationFuture { - cancellation_token: Some(self), - wait_node: ListNode::new(WaitQueueEntry::new()), - is_registered: false, - } - } - - unsafe fn register( - &self, - wait_node: &mut ListNode, - cx: &mut Context<'_>, - ) -> Poll<()> { - self.state().register(wait_node, cx) - } - - fn check_for_cancellation( - &self, - wait_node: &mut ListNode, - cx: &mut Context<'_>, - ) -> Poll<()> { - self.state().check_for_cancellation(wait_node, cx) - } - - fn unregister(&self, wait_node: &mut ListNode) { - self.state().unregister(wait_node) - } -} - -// ===== impl WaitForCancellationFuture ===== - -impl<'a> core::fmt::Debug for WaitForCancellationFuture<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("WaitForCancellationFuture").finish() - } -} - -impl<'a> Future for WaitForCancellationFuture<'a> { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - // Safety: We do not move anything out of `WaitForCancellationFuture` - let mut_self: &mut WaitForCancellationFuture<'_> = unsafe { Pin::get_unchecked_mut(self) }; - - let cancellation_token = mut_self - .cancellation_token - .expect("polled WaitForCancellationFuture after completion"); - - let poll_res = if !mut_self.is_registered { - // Safety: The `ListNode` is pinned through the Future, - // and we will unregister it in `WaitForCancellationFuture::drop` - // before the Future is dropped and the memory reference is invalidated. - unsafe { cancellation_token.register(&mut mut_self.wait_node, cx) } - } else { - cancellation_token.check_for_cancellation(&mut mut_self.wait_node, cx) - }; - - if let Poll::Ready(()) = poll_res { - // The cancellation_token was signalled - mut_self.cancellation_token = None; - // A signalled Token means the Waker won't be enqueued anymore - mut_self.is_registered = false; - mut_self.wait_node.task = None; - } else { - // This `Future` and its stored `Waker` stay registered at the - // `CancellationToken` - mut_self.is_registered = true; - } - - poll_res - } -} - -impl<'a> Drop for WaitForCancellationFuture<'a> { - fn drop(&mut self) { - // If this WaitForCancellationFuture has been polled and it was added to the - // wait queue at the cancellation_token, it must be removed before dropping. - // Otherwise the cancellation_token would access invalid memory. - if let Some(token) = self.cancellation_token { - if self.is_registered { - token.unregister(&mut self.wait_node); - } - } - } -} - -/// Tracks how the future had interacted with the [`CancellationToken`] -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -enum PollState { - /// The task has never interacted with the [`CancellationToken`]. - New, - /// The task was added to the wait queue at the [`CancellationToken`]. - Waiting, - /// The task has been polled to completion. - Done, -} - -/// Tracks the WaitForCancellationFuture waiting state. -/// Access to this struct is synchronized through the mutex in the CancellationToken. -struct WaitQueueEntry { - /// The task handle of the waiting task - task: Option, - // Current polling state. This state is only updated inside the Mutex of - // the CancellationToken. - state: PollState, -} - -impl WaitQueueEntry { - /// Creates a new WaitQueueEntry - fn new() -> WaitQueueEntry { - WaitQueueEntry { - task: None, - state: PollState::New, - } - } -} - -struct SynchronizedState { - waiters: LinkedList, - first_child: Option>, - is_cancelled: bool, -} - -impl SynchronizedState { - fn new() -> Self { - Self { - waiters: LinkedList::new(), - first_child: None, - is_cancelled: false, - } - } -} - -/// Information embedded in child tokens which is synchronized through the Mutex -/// in their parent. -struct SynchronizedThroughParent { - next_peer: Option>, - prev_peer: Option>, -} - -/// Possible states of a `CancellationToken` -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -enum CancellationState { - NotCancelled = 0, - Cancelling = 1, - Cancelled = 2, -} - -impl CancellationState { - fn pack(self) -> usize { - self as usize - } - - fn unpack(value: usize) -> Self { - match value { - 0 => CancellationState::NotCancelled, - 1 => CancellationState::Cancelling, - 2 => CancellationState::Cancelled, - _ => unreachable!("Invalid value"), - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -struct StateSnapshot { - /// The amount of references to this particular CancellationToken. - /// `CancellationToken` structs hold these references to a `CancellationTokenState`. - /// Also the state is referenced by the state of each child. - refcount: usize, - /// Whether the state is still referenced by it's parent and can therefore - /// not be freed. - has_parent_ref: bool, - /// Whether the token is cancelled - cancel_state: CancellationState, -} - -impl StateSnapshot { - /// Packs the snapshot into a `usize` - fn pack(self) -> usize { - self.refcount << 3 | if self.has_parent_ref { 4 } else { 0 } | self.cancel_state.pack() - } - - /// Unpacks the snapshot from a `usize` - fn unpack(value: usize) -> Self { - let refcount = value >> 3; - let has_parent_ref = value & 4 != 0; - let cancel_state = CancellationState::unpack(value & 0x03); - - StateSnapshot { - refcount, - has_parent_ref, - cancel_state, - } - } - - /// Whether this `CancellationTokenState` is still referenced by any - /// `CancellationToken`. - fn has_refs(&self) -> bool { - self.refcount != 0 || self.has_parent_ref - } -} - -/// The maximum permitted amount of references to a CancellationToken. This -/// is derived from the intent to never use more than 32bit in the `Snapshot`. -const MAX_REFS: u32 = (std::u32::MAX - 7) >> 3; - -/// Internal state of the `CancellationToken` pair above -struct CancellationTokenState { - state: AtomicUsize, - parent: Option>, - from_parent: SynchronizedThroughParent, - synchronized: Mutex, -} - -impl CancellationTokenState { - fn new( - parent: Option>, - state: StateSnapshot, - ) -> CancellationTokenState { - CancellationTokenState { - parent, - from_parent: SynchronizedThroughParent { - prev_peer: None, - next_peer: None, - }, - state: AtomicUsize::new(state.pack()), - synchronized: Mutex::new(SynchronizedState::new()), - } - } - - /// Returns a snapshot of the current atomic state of the token - fn snapshot(&self) -> StateSnapshot { - StateSnapshot::unpack(self.state.load(Ordering::SeqCst)) - } - - fn atomic_update_state(&self, mut current_state: StateSnapshot, func: F) -> StateSnapshot - where - F: Fn(StateSnapshot) -> StateSnapshot, - { - let mut current_packed_state = current_state.pack(); - loop { - let next_state = func(current_state); - match self.state.compare_exchange( - current_packed_state, - next_state.pack(), - Ordering::SeqCst, - Ordering::SeqCst, - ) { - Ok(_) => { - return next_state; - } - Err(actual) => { - current_packed_state = actual; - current_state = StateSnapshot::unpack(actual); - } - } - } - } - - fn increment_refcount(&self, current_state: StateSnapshot) -> StateSnapshot { - self.atomic_update_state(current_state, |mut state: StateSnapshot| { - if state.refcount >= MAX_REFS as usize { - eprintln!("[ERROR] Maximum reference count for CancellationToken was exceeded"); - std::process::abort(); - } - state.refcount += 1; - state - }) - } - - fn decrement_refcount(&self, current_state: StateSnapshot) -> StateSnapshot { - let current_state = self.atomic_update_state(current_state, |mut state: StateSnapshot| { - state.refcount -= 1; - state - }); - - // Drop the State if it is not referenced anymore - if !current_state.has_refs() { - // Safety: `CancellationTokenState` is always stored in refcounted - // Boxes - let _ = unsafe { Box::from_raw(self as *const Self as *mut Self) }; - } - - current_state - } - - fn remove_parent_ref(&self, current_state: StateSnapshot) -> StateSnapshot { - let current_state = self.atomic_update_state(current_state, |mut state: StateSnapshot| { - state.has_parent_ref = false; - state - }); - - // Drop the State if it is not referenced anymore - if !current_state.has_refs() { - // Safety: `CancellationTokenState` is always stored in refcounted - // Boxes - let _ = unsafe { Box::from_raw(self as *const Self as *mut Self) }; - } - - current_state - } - - /// Unregisters a child from the parent token. - /// The child tokens state is not exactly known at this point in time. - /// If the parent token is cancelled, the child token gets removed from the - /// parents list, and might therefore already have been freed. If the parent - /// token is not cancelled, the child token is still valid. - fn unregister_child( - &mut self, - mut child_state: NonNull, - current_child_state: StateSnapshot, - ) { - let removed_child = { - // Remove the child toke from the parents linked list - let mut guard = self.synchronized.lock().unwrap(); - if !guard.is_cancelled { - // Safety: Since the token was not cancelled, the child must - // still be in the list and valid. - let mut child_state = unsafe { child_state.as_mut() }; - debug_assert!(child_state.snapshot().has_parent_ref); - - if guard.first_child == Some(child_state.into()) { - guard.first_child = child_state.from_parent.next_peer; - } - // Safety: If peers wouldn't be valid anymore, they would try - // to remove themselves from the list. This would require locking - // the Mutex that we currently own. - unsafe { - if let Some(mut prev_peer) = child_state.from_parent.prev_peer { - prev_peer.as_mut().from_parent.next_peer = - child_state.from_parent.next_peer; - } - if let Some(mut next_peer) = child_state.from_parent.next_peer { - next_peer.as_mut().from_parent.prev_peer = - child_state.from_parent.prev_peer; - } - } - child_state.from_parent.prev_peer = None; - child_state.from_parent.next_peer = None; - - // The child is no longer referenced by the parent, since we were able - // to remove its reference from the parents list. - true - } else { - // Do not touch the linked list anymore. If the parent is cancelled - // it will move all childs outside of the Mutex and manipulate - // the pointers there. Manipulating the pointers here too could - // lead to races. Therefore leave them just as as and let the - // parent deal with it. The parent will make sure to retain a - // reference to this state as long as it manipulates the list - // pointers. Therefore the pointers are not dangling. - false - } - }; - - if removed_child { - // If the token removed itself from the parents list, it can reset - // the the parent ref status. If it is isn't able to do so, because the - // parent removed it from the list, there is no need to do this. - // The parent ref acts as as another reference count. Therefore - // removing this reference can free the object. - // Safety: The token was in the list. This means the parent wasn't - // cancelled before, and the token must still be alive. - unsafe { child_state.as_mut().remove_parent_ref(current_child_state) }; - } - - // Decrement the refcount on the parent and free it if necessary - self.decrement_refcount(self.snapshot()); - } - - fn cancel(&self) { - // Move the state of the CancellationToken from `NotCancelled` to `Cancelling` - let mut current_state = self.snapshot(); - - let state_after_cancellation = loop { - if current_state.cancel_state != CancellationState::NotCancelled { - // Another task already initiated the cancellation - return; - } - - let mut next_state = current_state; - next_state.cancel_state = CancellationState::Cancelling; - match self.state.compare_exchange( - current_state.pack(), - next_state.pack(), - Ordering::SeqCst, - Ordering::SeqCst, - ) { - Ok(_) => break next_state, - Err(actual) => current_state = StateSnapshot::unpack(actual), - } - }; - - // This task cancelled the token - - // Take the task list out of the Token - // We do not want to cancel child token inside this lock. If one of the - // child tasks would have additional child tokens, we would recursively - // take locks. - - // Doing this action has an impact if the child token is dropped concurrently: - // It will try to deregister itself from the parent task, but can not find - // itself in the task list anymore. Therefore it needs to assume the parent - // has extracted the list and will process it. It may not modify the list. - // This is OK from a memory safety perspective, since the parent still - // retains a reference to the child task until it finished iterating over - // it. - - let mut first_child = { - let mut guard = self.synchronized.lock().unwrap(); - // Save the cancellation also inside the Mutex - // This allows child tokens which want to detach themselves to detect - // that this is no longer required since the parent cleared the list. - guard.is_cancelled = true; - - // Wakeup all waiters - // This happens inside the lock to make cancellation reliable - // If we would access waiters outside of the lock, the pointers - // may no longer be valid. - // Typically this shouldn't be an issue, since waking a task should - // only move it from the blocked into the ready state and not have - // further side effects. - - // Use a reverse iterator, so that the oldest waiter gets - // scheduled first - guard.waiters.reverse_drain(|waiter| { - // We are not allowed to move the `Waker` out of the list node. - // The `Future` relies on the fact that the old `Waker` stays there - // as long as the `Future` has not completed in order to perform - // the `will_wake()` check. - // Therefore `wake_by_ref` is used instead of `wake()` - if let Some(handle) = &mut waiter.task { - handle.wake_by_ref(); - } - // Mark the waiter to have been removed from the list. - waiter.state = PollState::Done; - }); - - guard.first_child.take() - }; - - while let Some(mut child) = first_child { - // Safety: We know this is a valid pointer since it is in our child pointer - // list. It can't have been freed in between, since we retain a a reference - // to each child. - let mut_child = unsafe { child.as_mut() }; - - // Get the next child and clean up list pointers - first_child = mut_child.from_parent.next_peer; - mut_child.from_parent.prev_peer = None; - mut_child.from_parent.next_peer = None; - - // Cancel the child task - mut_child.cancel(); - - // Drop the parent reference. This `CancellationToken` is not interested - // in interacting with the child anymore. - // This is ONLY allowed once we promised not to touch the state anymore - // after this interaction. - mut_child.remove_parent_ref(mut_child.snapshot()); - } - - // The cancellation has completed - // At this point in time tasks which registered a wait node can be sure - // that this wait node already had been dequeued from the list without - // needing to inspect the list. - self.atomic_update_state(state_after_cancellation, |mut state| { - state.cancel_state = CancellationState::Cancelled; - state - }); - } - - /// Returns `true` if the `CancellationToken` had been cancelled - fn is_cancelled(&self) -> bool { - let current_state = self.snapshot(); - current_state.cancel_state != CancellationState::NotCancelled - } - - /// Registers a waiting task at the `CancellationToken`. - /// Safety: This method is only safe as long as the waiting waiting task - /// will properly unregister the wait node before it gets moved. - unsafe fn register( - &self, - wait_node: &mut ListNode, - cx: &mut Context<'_>, - ) -> Poll<()> { - debug_assert_eq!(PollState::New, wait_node.state); - let current_state = self.snapshot(); - - // Perform an optimistic cancellation check before. This is not strictly - // necessary since we also check for cancellation in the Mutex, but - // reduces the necessary work to be performed for tasks which already - // had been cancelled. - if current_state.cancel_state != CancellationState::NotCancelled { - return Poll::Ready(()); - } - - // So far the token is not cancelled. However it could be cancelld before - // we get the chance to store the `Waker`. Therfore we need to check - // for cancellation again inside the mutex. - let mut guard = self.synchronized.lock().unwrap(); - if guard.is_cancelled { - // Cancellation was signalled - wait_node.state = PollState::Done; - Poll::Ready(()) - } else { - // Added the task to the wait queue - wait_node.task = Some(cx.waker().clone()); - wait_node.state = PollState::Waiting; - guard.waiters.add_front(wait_node); - Poll::Pending - } - } - - fn check_for_cancellation( - &self, - wait_node: &mut ListNode, - cx: &mut Context<'_>, - ) -> Poll<()> { - debug_assert!( - wait_node.task.is_some(), - "Method can only be called after task had been registered" - ); - - let current_state = self.snapshot(); - - if current_state.cancel_state != CancellationState::NotCancelled { - // If the cancellation had been fully completed we know that our `Waker` - // is no longer registered at the `CancellationToken`. - // Otherwise the cancel call may or may not yet have iterated - // through the waiters list and removed the wait nodes. - // If it hasn't yet, we need to remove it. Otherwise an attempt to - // reuse the `wait_node´ might get freed due to the `WaitForCancellationFuture` - // getting dropped before the cancellation had interacted with it. - if current_state.cancel_state != CancellationState::Cancelled { - self.unregister(wait_node); - } - Poll::Ready(()) - } else { - // Check if we need to swap the `Waker`. This will make the check more - // expensive, since the `Waker` is synchronized through the Mutex. - // If we don't need to perform a `Waker` update, an atomic check for - // cancellation is sufficient. - let need_waker_update = wait_node - .task - .as_ref() - .map(|waker| waker.will_wake(cx.waker())) - .unwrap_or(true); - - if need_waker_update { - let guard = self.synchronized.lock().unwrap(); - if guard.is_cancelled { - // Cancellation was signalled. Since this cancellation signal - // is set inside the Mutex, the old waiter must already have - // been removed from the waiting list - debug_assert_eq!(PollState::Done, wait_node.state); - wait_node.task = None; - Poll::Ready(()) - } else { - // The WaitForCancellationFuture is already in the queue. - // The CancellationToken can't have been cancelled, - // since this would change the is_cancelled flag inside the mutex. - // Therefore we just have to update the Waker. A follow-up - // cancellation will always use the new waker. - wait_node.task = Some(cx.waker().clone()); - Poll::Pending - } - } else { - // Do nothing. If the token gets cancelled, this task will get - // woken again and can fetch the cancellation. - Poll::Pending - } - } - } - - fn unregister(&self, wait_node: &mut ListNode) { - debug_assert!( - wait_node.task.is_some(), - "waiter can not be active without task" - ); - - let mut guard = self.synchronized.lock().unwrap(); - // WaitForCancellationFuture only needs to get removed if it has been added to - // the wait queue of the CancellationToken. - // This has happened in the PollState::Waiting case. - if let PollState::Waiting = wait_node.state { - // Safety: Due to the state, we know that the node must be part - // of the waiter list - if !unsafe { guard.waiters.remove(wait_node) } { - // Panic if the address isn't found. This can only happen if the contract was - // violated, e.g. the WaitQueueEntry got moved after the initial poll. - panic!("Future could not be removed from wait queue"); - } - wait_node.state = PollState::Done; - } - wait_node.task = None; - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/mod.rs b/third_party/rust/tokio-0.2.25/src/sync/mod.rs deleted file mode 100644 index f93ff7bb8733..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mod.rs +++ /dev/null @@ -1,486 +0,0 @@ -#![cfg_attr(loom, allow(dead_code, unreachable_pub, unused_imports))] - -//! Synchronization primitives for use in asynchronous contexts. -//! -//! Tokio programs tend to be organized as a set of [tasks] where each task -//! operates independently and may be executed on separate physical threads. The -//! synchronization primitives provided in this module permit these independent -//! tasks to communicate together. -//! -//! [tasks]: crate::task -//! -//! # Message passing -//! -//! The most common form of synchronization in a Tokio program is message -//! passing. Two tasks operate independently and send messages to each other to -//! synchronize. Doing so has the advantage of avoiding shared state. -//! -//! Message passing is implemented using channels. A channel supports sending a -//! message from one producer task to one or more consumer tasks. There are a -//! few flavors of channels provided by Tokio. Each channel flavor supports -//! different message passing patterns. When a channel supports multiple -//! producers, many separate tasks may **send** messages. When a channel -//! supports muliple consumers, many different separate tasks may **receive** -//! messages. -//! -//! Tokio provides many different channel flavors as different message passing -//! patterns are best handled with different implementations. -//! -//! ## `oneshot` channel -//! -//! The [`oneshot` channel][oneshot] supports sending a **single** value from a -//! single producer to a single consumer. This channel is usually used to send -//! the result of a computation to a waiter. -//! -//! **Example:** using a [`oneshot` channel][oneshot] to receive the result of a -//! computation. -//! -//! ``` -//! use tokio::sync::oneshot; -//! -//! async fn some_computation() -> String { -//! "represents the result of the computation".to_string() -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, rx) = oneshot::channel(); -//! -//! tokio::spawn(async move { -//! let res = some_computation().await; -//! tx.send(res).unwrap(); -//! }); -//! -//! // Do other work while the computation is happening in the background -//! -//! // Wait for the computation result -//! let res = rx.await.unwrap(); -//! } -//! ``` -//! -//! Note, if the task produces a computation result as its final -//! action before terminating, the [`JoinHandle`] can be used to -//! receive that value instead of allocating resources for the -//! `oneshot` channel. Awaiting on [`JoinHandle`] returns `Result`. If -//! the task panics, the `Joinhandle` yields `Err` with the panic -//! cause. -//! -//! **Example:** -//! -//! ``` -//! async fn some_computation() -> String { -//! "the result of the computation".to_string() -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let join_handle = tokio::spawn(async move { -//! some_computation().await -//! }); -//! -//! // Do other work while the computation is happening in the background -//! -//! // Wait for the computation result -//! let res = join_handle.await.unwrap(); -//! } -//! ``` -//! -//! [oneshot]: oneshot -//! [`JoinHandle`]: crate::task::JoinHandle -//! -//! ## `mpsc` channel -//! -//! The [`mpsc` channel][mpsc] supports sending **many** values from **many** -//! producers to a single consumer. This channel is often used to send work to a -//! task or to receive the result of many computations. -//! -//! **Example:** using an mpsc to incrementally stream the results of a series -//! of computations. -//! -//! ``` -//! use tokio::sync::mpsc; -//! -//! async fn some_computation(input: u32) -> String { -//! format!("the result of computation {}", input) -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let (mut tx, mut rx) = mpsc::channel(100); -//! -//! tokio::spawn(async move { -//! for i in 0..10 { -//! let res = some_computation(i).await; -//! tx.send(res).await.unwrap(); -//! } -//! }); -//! -//! while let Some(res) = rx.recv().await { -//! println!("got = {}", res); -//! } -//! } -//! ``` -//! -//! The argument to `mpsc::channel` is the channel capacity. This is the maximum -//! number of values that can be stored in the channel pending receipt at any -//! given time. Properly setting this value is key in implementing robust -//! programs as the channel capacity plays a critical part in handling back -//! pressure. -//! -//! A common concurrency pattern for resource management is to spawn a task -//! dedicated to managing that resource and using message passing between other -//! tasks to interact with the resource. The resource may be anything that may -//! not be concurrently used. Some examples include a socket and program state. -//! For example, if multiple tasks need to send data over a single socket, spawn -//! a task to manage the socket and use a channel to synchronize. -//! -//! **Example:** sending data from many tasks over a single socket using message -//! passing. -//! -//! ```no_run -//! use tokio::io::{self, AsyncWriteExt}; -//! use tokio::net::TcpStream; -//! use tokio::sync::mpsc; -//! -//! #[tokio::main] -//! async fn main() -> io::Result<()> { -//! let mut socket = TcpStream::connect("www.example.com:1234").await?; -//! let (tx, mut rx) = mpsc::channel(100); -//! -//! for _ in 0..10 { -//! // Each task needs its own `tx` handle. This is done by cloning the -//! // original handle. -//! let mut tx = tx.clone(); -//! -//! tokio::spawn(async move { -//! tx.send(&b"data to write"[..]).await.unwrap(); -//! }); -//! } -//! -//! // The `rx` half of the channel returns `None` once **all** `tx` clones -//! // drop. To ensure `None` is returned, drop the handle owned by the -//! // current task. If this `tx` handle is not dropped, there will always -//! // be a single outstanding `tx` handle. -//! drop(tx); -//! -//! while let Some(res) = rx.recv().await { -//! socket.write_all(res).await?; -//! } -//! -//! Ok(()) -//! } -//! ``` -//! -//! The [`mpsc`][mpsc] and [`oneshot`][oneshot] channels can be combined to -//! provide a request / response type synchronization pattern with a shared -//! resource. A task is spawned to synchronize a resource and waits on commands -//! received on a [`mpsc`][mpsc] channel. Each command includes a -//! [`oneshot`][oneshot] `Sender` on which the result of the command is sent. -//! -//! **Example:** use a task to synchronize a `u64` counter. Each task sends an -//! "fetch and increment" command. The counter value **before** the increment is -//! sent over the provided `oneshot` channel. -//! -//! ``` -//! use tokio::sync::{oneshot, mpsc}; -//! use Command::Increment; -//! -//! enum Command { -//! Increment, -//! // Other commands can be added here -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let (cmd_tx, mut cmd_rx) = mpsc::channel::<(Command, oneshot::Sender)>(100); -//! -//! // Spawn a task to manage the counter -//! tokio::spawn(async move { -//! let mut counter: u64 = 0; -//! -//! while let Some((cmd, response)) = cmd_rx.recv().await { -//! match cmd { -//! Increment => { -//! let prev = counter; -//! counter += 1; -//! response.send(prev).unwrap(); -//! } -//! } -//! } -//! }); -//! -//! let mut join_handles = vec![]; -//! -//! // Spawn tasks that will send the increment command. -//! for _ in 0..10 { -//! let mut cmd_tx = cmd_tx.clone(); -//! -//! join_handles.push(tokio::spawn(async move { -//! let (resp_tx, resp_rx) = oneshot::channel(); -//! -//! cmd_tx.send((Increment, resp_tx)).await.ok().unwrap(); -//! let res = resp_rx.await.unwrap(); -//! -//! println!("previous value = {}", res); -//! })); -//! } -//! -//! // Wait for all tasks to complete -//! for join_handle in join_handles.drain(..) { -//! join_handle.await.unwrap(); -//! } -//! } -//! ``` -//! -//! [mpsc]: mpsc -//! -//! ## `broadcast` channel -//! -//! The [`broadcast` channel] supports sending **many** values from -//! **many** producers to **many** consumers. Each consumer will receive -//! **each** value. This channel can be used to implement "fan out" style -//! patterns common with pub / sub or "chat" systems. -//! -//! This channel tends to be used less often than `oneshot` and `mpsc` but still -//! has its use cases. -//! -//! Basic usage -//! -//! ``` -//! use tokio::sync::broadcast; -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, mut rx1) = broadcast::channel(16); -//! let mut rx2 = tx.subscribe(); -//! -//! tokio::spawn(async move { -//! assert_eq!(rx1.recv().await.unwrap(), 10); -//! assert_eq!(rx1.recv().await.unwrap(), 20); -//! }); -//! -//! tokio::spawn(async move { -//! assert_eq!(rx2.recv().await.unwrap(), 10); -//! assert_eq!(rx2.recv().await.unwrap(), 20); -//! }); -//! -//! tx.send(10).unwrap(); -//! tx.send(20).unwrap(); -//! } -//! ``` -//! -//! [`broadcast` channel]: crate::sync::broadcast -//! -//! ## `watch` channel -//! -//! The [`watch` channel] supports sending **many** values from a **single** -//! producer to **many** consumers. However, only the **most recent** value is -//! stored in the channel. Consumers are notified when a new value is sent, but -//! there is no guarantee that consumers will see **all** values. -//! -//! The [`watch` channel] is similar to a [`broadcast` channel] with capacity 1. -//! -//! Use cases for the [`watch` channel] include broadcasting configuration -//! changes or signalling program state changes, such as transitioning to -//! shutdown. -//! -//! **Example:** use a [`watch` channel] to notify tasks of configuration -//! changes. In this example, a configuration file is checked periodically. When -//! the file changes, the configuration changes are signalled to consumers. -//! -//! ``` -//! use tokio::sync::watch; -//! use tokio::time::{self, Duration, Instant}; -//! -//! use std::io; -//! -//! #[derive(Debug, Clone, Eq, PartialEq)] -//! struct Config { -//! timeout: Duration, -//! } -//! -//! impl Config { -//! async fn load_from_file() -> io::Result { -//! // file loading and deserialization logic here -//! # Ok(Config { timeout: Duration::from_secs(1) }) -//! } -//! } -//! -//! async fn my_async_operation() { -//! // Do something here -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! // Load initial configuration value -//! let mut config = Config::load_from_file().await.unwrap(); -//! -//! // Create the watch channel, initialized with the loaded configuration -//! let (tx, rx) = watch::channel(config.clone()); -//! -//! // Spawn a task to monitor the file. -//! tokio::spawn(async move { -//! loop { -//! // Wait 10 seconds between checks -//! time::delay_for(Duration::from_secs(10)).await; -//! -//! // Load the configuration file -//! let new_config = Config::load_from_file().await.unwrap(); -//! -//! // If the configuration changed, send the new config value -//! // on the watch channel. -//! if new_config != config { -//! tx.broadcast(new_config.clone()).unwrap(); -//! config = new_config; -//! } -//! } -//! }); -//! -//! let mut handles = vec![]; -//! -//! // Spawn tasks that runs the async operation for at most `timeout`. If -//! // the timeout elapses, restart the operation. -//! // -//! // The task simultaneously watches the `Config` for changes. When the -//! // timeout duration changes, the timeout is updated without restarting -//! // the in-flight operation. -//! for _ in 0..5 { -//! // Clone a config watch handle for use in this task -//! let mut rx = rx.clone(); -//! -//! let handle = tokio::spawn(async move { -//! // Start the initial operation and pin the future to the stack. -//! // Pinning to the stack is required to resume the operation -//! // across multiple calls to `select!` -//! let op = my_async_operation(); -//! tokio::pin!(op); -//! -//! // Receive the **initial** configuration value. As this is the -//! // first time the config is received from the watch, it will -//! // always complete immediatedly. -//! let mut conf = rx.recv().await.unwrap(); -//! -//! let mut op_start = Instant::now(); -//! let mut delay = time::delay_until(op_start + conf.timeout); -//! -//! loop { -//! tokio::select! { -//! _ = &mut delay => { -//! // The operation elapsed. Restart it -//! op.set(my_async_operation()); -//! -//! // Track the new start time -//! op_start = Instant::now(); -//! -//! // Restart the timeout -//! delay = time::delay_until(op_start + conf.timeout); -//! } -//! new_conf = rx.recv() => { -//! conf = new_conf.unwrap(); -//! -//! // The configuration has been updated. Update the -//! // `delay` using the new `timeout` value. -//! delay.reset(op_start + conf.timeout); -//! } -//! _ = &mut op => { -//! // The operation completed! -//! return -//! } -//! } -//! } -//! }); -//! -//! handles.push(handle); -//! } -//! -//! for handle in handles.drain(..) { -//! handle.await.unwrap(); -//! } -//! } -//! ``` -//! -//! [`watch` channel]: mod@crate::sync::watch -//! [`broadcast` channel]: mod@crate::sync::broadcast -//! -//! # State synchronization -//! -//! The remaining synchronization primitives focus on synchronizing state. -//! These are asynchronous equivalents to versions provided by `std`. They -//! operate in a similar way as their `std` counterparts parts but will wait -//! asynchronously instead of blocking the thread. -//! -//! * [`Barrier`](Barrier) Ensures multiple tasks will wait for each other to -//! reach a point in the program, before continuing execution all together. -//! -//! * [`Mutex`](Mutex) Mutual Exclusion mechanism, which ensures that at most -//! one thread at a time is able to access some data. -//! -//! * [`Notify`](Notify) Basic task notification. `Notify` supports notifying a -//! receiving task without sending data. In this case, the task wakes up and -//! resumes processing. -//! -//! * [`RwLock`](RwLock) Provides a mutual exclusion mechanism which allows -//! multiple readers at the same time, while allowing only one writer at a -//! time. In some cases, this can be more efficient than a mutex. -//! -//! * [`Semaphore`](Semaphore) Limits the amount of concurrency. A semaphore -//! holds a number of permits, which tasks may request in order to enter a -//! critical section. Semaphores are useful for implementing limiting or -//! bounding of any kind. - -cfg_sync! { - mod barrier; - pub use barrier::{Barrier, BarrierWaitResult}; - - pub mod broadcast; - - cfg_unstable! { - mod cancellation_token; - pub use cancellation_token::{CancellationToken, WaitForCancellationFuture}; - } - - pub mod mpsc; - - mod mutex; - pub use mutex::{Mutex, MutexGuard, TryLockError, OwnedMutexGuard}; - - mod notify; - pub use notify::Notify; - - pub mod oneshot; - - pub(crate) mod batch_semaphore; - pub(crate) mod semaphore_ll; - mod semaphore; - pub use semaphore::{Semaphore, SemaphorePermit, OwnedSemaphorePermit}; - - mod rwlock; - pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; - - mod task; - pub(crate) use task::AtomicWaker; - - pub mod watch; -} - -cfg_not_sync! { - cfg_atomic_waker_impl! { - mod task; - pub(crate) use task::AtomicWaker; - } - - #[cfg(any( - feature = "rt-core", - feature = "process", - feature = "signal"))] - pub(crate) mod oneshot; - - cfg_signal! { - pub(crate) mod mpsc; - pub(crate) mod semaphore_ll; - } -} - -/// Unit tests -#[cfg(test)] -mod tests; diff --git a/third_party/rust/tokio-0.2.25/src/sync/mpsc/block.rs b/third_party/rust/tokio-0.2.25/src/sync/mpsc/block.rs deleted file mode 100644 index 7bf161967ba2..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mpsc/block.rs +++ /dev/null @@ -1,387 +0,0 @@ -use crate::loom::{ - cell::UnsafeCell, - sync::atomic::{AtomicPtr, AtomicUsize}, - thread, -}; - -use std::mem::MaybeUninit; -use std::ops; -use std::ptr::{self, NonNull}; -use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Release}; - -/// A block in a linked list. -/// -/// Each block in the list can hold up to `BLOCK_CAP` messages. -pub(crate) struct Block { - /// The start index of this block. - /// - /// Slots in this block have indices in `start_index .. start_index + BLOCK_CAP`. - start_index: usize, - - /// The next block in the linked list. - next: AtomicPtr>, - - /// Bitfield tracking slots that are ready to have their values consumed. - ready_slots: AtomicUsize, - - /// The observed `tail_position` value *after* the block has been passed by - /// `block_tail`. - observed_tail_position: UnsafeCell, - - /// Array containing values pushed into the block. Values are stored in a - /// continuous array in order to improve cache line behavior when reading. - /// The values must be manually dropped. - values: Values, -} - -pub(crate) enum Read { - Value(T), - Closed, -} - -struct Values([UnsafeCell>; BLOCK_CAP]); - -use super::BLOCK_CAP; - -/// Masks an index to get the block identifier -const BLOCK_MASK: usize = !(BLOCK_CAP - 1); - -/// Masks an index to get the value offset in a block. -const SLOT_MASK: usize = BLOCK_CAP - 1; - -/// Flag tracking that a block has gone through the sender's release routine. -/// -/// When this is set, the receiver may consider freeing the block. -const RELEASED: usize = 1 << BLOCK_CAP; - -/// Flag tracking all senders dropped. -/// -/// When this flag is set, the send half of the channel has closed. -const TX_CLOSED: usize = RELEASED << 1; - -/// Mask covering all bits used to track slot readiness. -const READY_MASK: usize = RELEASED - 1; - -/// Returns the index of the first slot in the block referenced by `slot_index`. -#[inline(always)] -pub(crate) fn start_index(slot_index: usize) -> usize { - BLOCK_MASK & slot_index -} - -/// Returns the offset into the block referenced by `slot_index`. -#[inline(always)] -pub(crate) fn offset(slot_index: usize) -> usize { - SLOT_MASK & slot_index -} - -impl Block { - pub(crate) fn new(start_index: usize) -> Block { - Block { - // The absolute index in the channel of the first slot in the block. - start_index, - - // Pointer to the next block in the linked list. - next: AtomicPtr::new(ptr::null_mut()), - - ready_slots: AtomicUsize::new(0), - - observed_tail_position: UnsafeCell::new(0), - - // Value storage - values: unsafe { Values::uninitialized() }, - } - } - - /// Returns `true` if the block matches the given index - pub(crate) fn is_at_index(&self, index: usize) -> bool { - debug_assert!(offset(index) == 0); - self.start_index == index - } - - /// Returns the number of blocks between `self` and the block at the - /// specified index. - /// - /// `start_index` must represent a block *after* `self`. - pub(crate) fn distance(&self, other_index: usize) -> usize { - debug_assert!(offset(other_index) == 0); - other_index.wrapping_sub(self.start_index) / BLOCK_CAP - } - - /// Reads the value at the given offset. - /// - /// Returns `None` if the slot is empty. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * No concurrent access to the slot. - pub(crate) unsafe fn read(&self, slot_index: usize) -> Option> { - let offset = offset(slot_index); - - let ready_bits = self.ready_slots.load(Acquire); - - if !is_ready(ready_bits, offset) { - if is_tx_closed(ready_bits) { - return Some(Read::Closed); - } - - return None; - } - - // Get the value - let value = self.values[offset].with(|ptr| ptr::read(ptr)); - - Some(Read::Value(value.assume_init())) - } - - /// Writes a value to the block at the given offset. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * The slot is empty. - /// * No concurrent access to the slot. - pub(crate) unsafe fn write(&self, slot_index: usize, value: T) { - // Get the offset into the block - let slot_offset = offset(slot_index); - - self.values[slot_offset].with_mut(|ptr| { - ptr::write(ptr, MaybeUninit::new(value)); - }); - - // Release the value. After this point, the slot ref may no longer - // be used. It is possible for the receiver to free the memory at - // any point. - self.set_ready(slot_offset); - } - - /// Signal to the receiver that the sender half of the list is closed. - pub(crate) unsafe fn tx_close(&self) { - self.ready_slots.fetch_or(TX_CLOSED, Release); - } - - /// Resets the block to a blank state. This enables reusing blocks in the - /// channel. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * All slots are empty. - /// * The caller holds a unique pointer to the block. - pub(crate) unsafe fn reclaim(&mut self) { - self.start_index = 0; - self.next = AtomicPtr::new(ptr::null_mut()); - self.ready_slots = AtomicUsize::new(0); - } - - /// Releases the block to the rx half for freeing. - /// - /// This function is called by the tx half once it can be guaranteed that no - /// more senders will attempt to access the block. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * The block will no longer be accessed by any sender. - pub(crate) unsafe fn tx_release(&self, tail_position: usize) { - // Track the observed tail_position. Any sender targetting a greater - // tail_position is guaranteed to not access this block. - self.observed_tail_position - .with_mut(|ptr| *ptr = tail_position); - - // Set the released bit, signalling to the receiver that it is safe to - // free the block's memory as soon as all slots **prior** to - // `observed_tail_position` have been filled. - self.ready_slots.fetch_or(RELEASED, Release); - } - - /// Mark a slot as ready - fn set_ready(&self, slot: usize) { - let mask = 1 << slot; - self.ready_slots.fetch_or(mask, Release); - } - - /// Returns `true` when all slots have their `ready` bits set. - /// - /// This indicates that the block is in its final state and will no longer - /// be mutated. - /// - /// # Implementation - /// - /// The implementation walks each slot checking the `ready` flag. It might - /// be that it would make more sense to coalesce ready flags as bits in a - /// single atomic cell. However, this could have negative impact on cache - /// behavior as there would be many more mutations to a single slot. - pub(crate) fn is_final(&self) -> bool { - self.ready_slots.load(Acquire) & READY_MASK == READY_MASK - } - - /// Returns the `observed_tail_position` value, if set - pub(crate) fn observed_tail_position(&self) -> Option { - if 0 == RELEASED & self.ready_slots.load(Acquire) { - None - } else { - Some(self.observed_tail_position.with(|ptr| unsafe { *ptr })) - } - } - - /// Loads the next block - pub(crate) fn load_next(&self, ordering: Ordering) -> Option>> { - let ret = NonNull::new(self.next.load(ordering)); - - debug_assert!(unsafe { - ret.map(|block| block.as_ref().start_index == self.start_index.wrapping_add(BLOCK_CAP)) - .unwrap_or(true) - }); - - ret - } - - /// Pushes `block` as the next block in the link. - /// - /// Returns Ok if successful, otherwise, a pointer to the next block in - /// the list is returned. - /// - /// This requires that the next pointer is null. - /// - /// # Ordering - /// - /// This performs a compare-and-swap on `next` using AcqRel ordering. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * `block` is not freed until it has been removed from the list. - pub(crate) unsafe fn try_push( - &self, - block: &mut NonNull>, - ordering: Ordering, - ) -> Result<(), NonNull>> { - block.as_mut().start_index = self.start_index.wrapping_add(BLOCK_CAP); - - let next_ptr = self - .next - .compare_and_swap(ptr::null_mut(), block.as_ptr(), ordering); - - match NonNull::new(next_ptr) { - Some(next_ptr) => Err(next_ptr), - None => Ok(()), - } - } - - /// Grows the `Block` linked list by allocating and appending a new block. - /// - /// The next block in the linked list is returned. This may or may not be - /// the one allocated by the function call. - /// - /// # Implementation - /// - /// It is assumed that `self.next` is null. A new block is allocated with - /// `start_index` set to be the next block. A compare-and-swap is performed - /// with AcqRel memory ordering. If the compare-and-swap is successful, the - /// newly allocated block is released to other threads walking the block - /// linked list. If the compare-and-swap fails, the current thread acquires - /// the next block in the linked list, allowing the current thread to access - /// the slots. - pub(crate) fn grow(&self) -> NonNull> { - // Create the new block. It is assumed that the block will become the - // next one after `&self`. If this turns out to not be the case, - // `start_index` is updated accordingly. - let new_block = Box::new(Block::new(self.start_index + BLOCK_CAP)); - - let mut new_block = unsafe { NonNull::new_unchecked(Box::into_raw(new_block)) }; - - // Attempt to store the block. The first compare-and-swap attempt is - // "unrolled" due to minor differences in logic - // - // `AcqRel` is used as the ordering **only** when attempting the - // compare-and-swap on self.next. - // - // If the compare-and-swap fails, then the actual value of the cell is - // returned from this function and accessed by the caller. Given this, - // the memory must be acquired. - // - // `Release` ensures that the newly allocated block is available to - // other threads acquiring the next pointer. - let next = NonNull::new(self.next.compare_and_swap( - ptr::null_mut(), - new_block.as_ptr(), - AcqRel, - )); - - let next = match next { - Some(next) => next, - None => { - // The compare-and-swap succeeded and the newly allocated block - // is successfully pushed. - return new_block; - } - }; - - // There already is a next block in the linked list. The newly allocated - // block could be dropped and the discovered next block returned; - // however, that would be wasteful. Instead, the linked list is walked - // by repeatedly attempting to compare-and-swap the pointer into the - // `next` register until the compare-and-swap succeed. - // - // Care is taken to update new_block's start_index field as appropriate. - - let mut curr = next; - - // TODO: Should this iteration be capped? - loop { - let actual = unsafe { curr.as_ref().try_push(&mut new_block, AcqRel) }; - - curr = match actual { - Ok(_) => { - return next; - } - Err(curr) => curr, - }; - - // When running outside of loom, this calls `spin_loop_hint`. - thread::yield_now(); - } - } -} - -/// Returns `true` if the specificed slot has a value ready to be consumed. -fn is_ready(bits: usize, slot: usize) -> bool { - let mask = 1 << slot; - mask == mask & bits -} - -/// Returns `true` if the closed flag has been set. -fn is_tx_closed(bits: usize) -> bool { - TX_CLOSED == bits & TX_CLOSED -} - -impl Values { - unsafe fn uninitialized() -> Values { - let mut vals = MaybeUninit::uninit(); - - // When fuzzing, `UnsafeCell` needs to be initialized. - if_loom! { - let p = vals.as_mut_ptr() as *mut UnsafeCell>; - for i in 0..BLOCK_CAP { - p.add(i) - .write(UnsafeCell::new(MaybeUninit::uninit())); - } - } - - Values(vals.assume_init()) - } -} - -impl ops::Index for Values { - type Output = UnsafeCell>; - - fn index(&self, index: usize) -> &Self::Output { - self.0.index(index) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/mpsc/bounded.rs b/third_party/rust/tokio-0.2.25/src/sync/mpsc/bounded.rs deleted file mode 100644 index afca8c524dd9..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mpsc/bounded.rs +++ /dev/null @@ -1,479 +0,0 @@ -use crate::sync::mpsc::chan; -use crate::sync::mpsc::error::{ClosedError, SendError, TryRecvError, TrySendError}; -use crate::sync::semaphore_ll as semaphore; - -cfg_time! { - use crate::sync::mpsc::error::SendTimeoutError; - use crate::time::Duration; -} - -use std::fmt; -use std::task::{Context, Poll}; - -/// Send values to the associated `Receiver`. -/// -/// Instances are created by the [`channel`](channel) function. -pub struct Sender { - chan: chan::Tx, -} - -impl Clone for Sender { - fn clone(&self) -> Self { - Sender { - chan: self.chan.clone(), - } - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Sender") - .field("chan", &self.chan) - .finish() - } -} - -/// Receive values from the associated `Sender`. -/// -/// Instances are created by the [`channel`](channel) function. -pub struct Receiver { - /// The channel receiver - chan: chan::Rx, -} - -impl fmt::Debug for Receiver { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Receiver") - .field("chan", &self.chan) - .finish() - } -} - -/// Creates a bounded mpsc channel for communicating between asynchronous tasks, -/// returning the sender/receiver halves. -/// -/// All data sent on `Sender` will become available on `Receiver` in the same -/// order as it was sent. -/// -/// The `Sender` can be cloned to `send` to the same channel from multiple code -/// locations. Only one `Receiver` is supported. -/// -/// If the `Receiver` is disconnected while trying to `send`, the `send` method -/// will return a `SendError`. Similarly, if `Sender` is disconnected while -/// trying to `recv`, the `recv` method will return a `RecvError`. -/// -/// # Examples -/// -/// ```rust -/// use tokio::sync::mpsc; -/// -/// #[tokio::main] -/// async fn main() { -/// let (mut tx, mut rx) = mpsc::channel(100); -/// -/// tokio::spawn(async move { -/// for i in 0..10 { -/// if let Err(_) = tx.send(i).await { -/// println!("receiver dropped"); -/// return; -/// } -/// } -/// }); -/// -/// while let Some(i) = rx.recv().await { -/// println!("got = {}", i); -/// } -/// } -/// ``` -pub fn channel(buffer: usize) -> (Sender, Receiver) { - assert!(buffer > 0, "mpsc bounded channel requires buffer > 0"); - let semaphore = (semaphore::Semaphore::new(buffer), buffer); - let (tx, rx) = chan::channel(semaphore); - - let tx = Sender::new(tx); - let rx = Receiver::new(rx); - - (tx, rx) -} - -/// Channel semaphore is a tuple of the semaphore implementation and a `usize` -/// representing the channel bound. -type Semaphore = (semaphore::Semaphore, usize); - -impl Receiver { - pub(crate) fn new(chan: chan::Rx) -> Receiver { - Receiver { chan } - } - - /// Receives the next value for this receiver. - /// - /// `None` is returned when all `Sender` halves have dropped, indicating - /// that no further values can be sent on the channel. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, mut rx) = mpsc::channel(100); - /// - /// tokio::spawn(async move { - /// tx.send("hello").await.unwrap(); - /// }); - /// - /// assert_eq!(Some("hello"), rx.recv().await); - /// assert_eq!(None, rx.recv().await); - /// } - /// ``` - /// - /// Values are buffered: - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, mut rx) = mpsc::channel(100); - /// - /// tx.send("hello").await.unwrap(); - /// tx.send("world").await.unwrap(); - /// - /// assert_eq!(Some("hello"), rx.recv().await); - /// assert_eq!(Some("world"), rx.recv().await); - /// } - /// ``` - pub async fn recv(&mut self) -> Option { - use crate::future::poll_fn; - - poll_fn(|cx| self.poll_recv(cx)).await - } - - #[doc(hidden)] // TODO: document - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.chan.recv(cx) - } - - /// Attempts to return a pending value on this receiver without blocking. - /// - /// This method will never block the caller in order to wait for data to - /// become available. Instead, this will always return immediately with - /// a possible option of pending data on the channel. - /// - /// This is useful for a flavor of "optimistic check" before deciding to - /// block on a receiver. - /// - /// Compared with recv, this function has two failure cases instead of - /// one (one for disconnection, one for an empty buffer). - pub fn try_recv(&mut self) -> Result { - self.chan.try_recv() - } - - /// Closes the receiving half of a channel, without dropping it. - /// - /// This prevents any further messages from being sent on the channel while - /// still enabling the receiver to drain messages that are buffered. - pub fn close(&mut self) { - self.chan.close(); - } -} - -impl Unpin for Receiver {} - -cfg_stream! { - impl crate::stream::Stream for Receiver { - type Item = T; - - fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_recv(cx) - } - } -} - -impl Sender { - pub(crate) fn new(chan: chan::Tx) -> Sender { - Sender { chan } - } - - /// Sends a value, waiting until there is capacity. - /// - /// A successful send occurs when it is determined that the other end of the - /// channel has not hung up already. An unsuccessful send would be one where - /// the corresponding receiver has already been closed. Note that a return - /// value of `Err` means that the data will never be received, but a return - /// value of `Ok` does not mean that the data will be received. It is - /// possible for the corresponding receiver to hang up immediately after - /// this function returns `Ok`. - /// - /// # Errors - /// - /// If the receive half of the channel is closed, either due to [`close`] - /// being called or the [`Receiver`] handle dropping, the function returns - /// an error. The error includes the value passed to `send`. - /// - /// [`close`]: Receiver::close - /// [`Receiver`]: Receiver - /// - /// # Examples - /// - /// In the following example, each call to `send` will block until the - /// previously sent value was received. - /// - /// ```rust - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, mut rx) = mpsc::channel(1); - /// - /// tokio::spawn(async move { - /// for i in 0..10 { - /// if let Err(_) = tx.send(i).await { - /// println!("receiver dropped"); - /// return; - /// } - /// } - /// }); - /// - /// while let Some(i) = rx.recv().await { - /// println!("got = {}", i); - /// } - /// } - /// ``` - pub async fn send(&mut self, value: T) -> Result<(), SendError> { - use crate::future::poll_fn; - - if poll_fn(|cx| self.poll_ready(cx)).await.is_err() { - return Err(SendError(value)); - } - - match self.try_send(value) { - Ok(()) => Ok(()), - Err(TrySendError::Full(_)) => unreachable!(), - Err(TrySendError::Closed(value)) => Err(SendError(value)), - } - } - - /// Attempts to immediately send a message on this `Sender` - /// - /// This method differs from [`send`] by returning immediately if the channel's - /// buffer is full or no receiver is waiting to acquire some data. Compared - /// with [`send`], this function has two failure cases instead of one (one for - /// disconnection, one for a full buffer). - /// - /// This function may be paired with [`poll_ready`] in order to wait for - /// channel capacity before trying to send a value. - /// - /// # Errors - /// - /// If the channel capacity has been reached, i.e., the channel has `n` - /// buffered values where `n` is the argument passed to [`channel`], then an - /// error is returned. - /// - /// If the receive half of the channel is closed, either due to [`close`] - /// being called or the [`Receiver`] handle dropping, the function returns - /// an error. The error includes the value passed to `send`. - /// - /// [`send`]: Sender::send - /// [`poll_ready`]: Sender::poll_ready - /// [`channel`]: channel - /// [`close`]: Receiver::close - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// // Create a channel with buffer size 1 - /// let (mut tx1, mut rx) = mpsc::channel(1); - /// let mut tx2 = tx1.clone(); - /// - /// tokio::spawn(async move { - /// tx1.send(1).await.unwrap(); - /// tx1.send(2).await.unwrap(); - /// // task waits until the receiver receives a value. - /// }); - /// - /// tokio::spawn(async move { - /// // This will return an error and send - /// // no message if the buffer is full - /// let _ = tx2.try_send(3); - /// }); - /// - /// let mut msg; - /// msg = rx.recv().await.unwrap(); - /// println!("message {} received", msg); - /// - /// msg = rx.recv().await.unwrap(); - /// println!("message {} received", msg); - /// - /// // Third message may have never been sent - /// match rx.recv().await { - /// Some(msg) => println!("message {} received", msg), - /// None => println!("the third message was never sent"), - /// } - /// } - /// ``` - pub fn try_send(&mut self, message: T) -> Result<(), TrySendError> { - self.chan.try_send(message)?; - Ok(()) - } - - /// Sends a value, waiting until there is capacity, but only for a limited time. - /// - /// Shares the same success and error conditions as [`send`], adding one more - /// condition for an unsuccessful send, which is when the provided timeout has - /// elapsed, and there is no capacity available. - /// - /// [`send`]: Sender::send - /// - /// # Errors - /// - /// If the receive half of the channel is closed, either due to [`close`] - /// being called or the [`Receiver`] having been dropped, - /// the function returns an error. The error includes the value passed to `send`. - /// - /// [`close`]: Receiver::close - /// [`Receiver`]: Receiver - /// - /// # Examples - /// - /// In the following example, each call to `send_timeout` will block until the - /// previously sent value was received, unless the timeout has elapsed. - /// - /// ```rust - /// use tokio::sync::mpsc; - /// use tokio::time::{delay_for, Duration}; - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, mut rx) = mpsc::channel(1); - /// - /// tokio::spawn(async move { - /// for i in 0..10 { - /// if let Err(e) = tx.send_timeout(i, Duration::from_millis(100)).await { - /// println!("send error: #{:?}", e); - /// return; - /// } - /// } - /// }); - /// - /// while let Some(i) = rx.recv().await { - /// println!("got = {}", i); - /// delay_for(Duration::from_millis(200)).await; - /// } - /// } - /// ``` - #[cfg(feature = "time")] - #[cfg_attr(docsrs, doc(cfg(feature = "time")))] - pub async fn send_timeout( - &mut self, - value: T, - timeout: Duration, - ) -> Result<(), SendTimeoutError> { - use crate::future::poll_fn; - - match crate::time::timeout(timeout, poll_fn(|cx| self.poll_ready(cx))).await { - Err(_) => { - return Err(SendTimeoutError::Timeout(value)); - } - Ok(Err(_)) => { - return Err(SendTimeoutError::Closed(value)); - } - Ok(_) => {} - } - - match self.try_send(value) { - Ok(()) => Ok(()), - Err(TrySendError::Full(_)) => unreachable!(), - Err(TrySendError::Closed(value)) => Err(SendTimeoutError::Closed(value)), - } - } - - /// Returns `Poll::Ready(Ok(()))` when the channel is able to accept another item. - /// - /// If the channel is full, then `Poll::Pending` is returned and the task is notified when a - /// slot becomes available. - /// - /// Once `poll_ready` returns `Poll::Ready(Ok(()))`, a call to `try_send` will succeed unless - /// the channel has since been closed. To provide this guarantee, the channel reserves one slot - /// in the channel for the coming send. This reserved slot is not available to other `Sender` - /// instances, so you need to be careful to not end up with deadlocks by blocking after calling - /// `poll_ready` but before sending an element. - /// - /// If, after `poll_ready` succeeds, you decide you do not wish to send an item after all, you - /// can use [`disarm`](Sender::disarm) to release the reserved slot. - /// - /// Until an item is sent or [`disarm`](Sender::disarm) is called, repeated calls to - /// `poll_ready` will return either `Poll::Ready(Ok(()))` or `Poll::Ready(Err(_))` if channel - /// is closed. - pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.chan.poll_ready(cx).map_err(|_| ClosedError::new()) - } - - /// Undo a successful call to `poll_ready`. - /// - /// Once a call to `poll_ready` returns `Poll::Ready(Ok(()))`, it holds up one slot in the - /// channel to make room for the coming send. `disarm` allows you to give up that slot if you - /// decide you do not wish to send an item after all. After calling `disarm`, you must call - /// `poll_ready` until it returns `Poll::Ready(Ok(()))` before attempting to send again. - /// - /// Returns `false` if no slot is reserved for this sender (usually because `poll_ready` was - /// not previously called, or did not succeed). - /// - /// # Motivation - /// - /// Since `poll_ready` takes up one of the finite number of slots in a bounded channel, callers - /// need to send an item shortly after `poll_ready` succeeds. If they do not, idle senders may - /// take up all the slots of the channel, and prevent active senders from getting any requests - /// through. Consider this code that forwards from one channel to another: - /// - /// ```rust,ignore - /// loop { - /// ready!(tx.poll_ready(cx))?; - /// if let Some(item) = ready!(rx.poll_recv(cx)) { - /// tx.try_send(item)?; - /// } else { - /// break; - /// } - /// } - /// ``` - /// - /// If many such forwarders exist, and they all forward into a single (cloned) `Sender`, then - /// any number of forwarders may be waiting for `rx.poll_recv` at the same time. While they do, - /// they are effectively each reducing the channel's capacity by 1. If enough of these - /// forwarders are idle, forwarders whose `rx` _do_ have elements will be unable to find a spot - /// for them through `poll_ready`, and the system will deadlock. - /// - /// `disarm` solves this problem by allowing you to give up the reserved slot if you find that - /// you have to block. We can then fix the code above by writing: - /// - /// ```rust,ignore - /// loop { - /// ready!(tx.poll_ready(cx))?; - /// let item = rx.poll_recv(cx); - /// if let Poll::Ready(Ok(_)) = item { - /// // we're going to send the item below, so don't disarm - /// } else { - /// // give up our send slot, we won't need it for a while - /// tx.disarm(); - /// } - /// if let Some(item) = ready!(item) { - /// tx.try_send(item)?; - /// } else { - /// break; - /// } - /// } - /// ``` - pub fn disarm(&mut self) -> bool { - if self.chan.is_ready() { - self.chan.disarm(); - true - } else { - false - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/mpsc/chan.rs b/third_party/rust/tokio-0.2.25/src/sync/mpsc/chan.rs deleted file mode 100644 index 0a53cda20388..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mpsc/chan.rs +++ /dev/null @@ -1,543 +0,0 @@ -use crate::loom::cell::UnsafeCell; -use crate::loom::future::AtomicWaker; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Arc; -use crate::sync::mpsc::error::{ClosedError, TryRecvError}; -use crate::sync::mpsc::{error, list}; - -use std::fmt; -use std::process; -use std::sync::atomic::Ordering::{AcqRel, Relaxed}; -use std::task::Poll::{Pending, Ready}; -use std::task::{Context, Poll}; - -/// Channel sender -pub(crate) struct Tx { - inner: Arc>, - permit: S::Permit, -} - -impl fmt::Debug for Tx -where - S::Permit: fmt::Debug, - S: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Tx") - .field("inner", &self.inner) - .field("permit", &self.permit) - .finish() - } -} - -/// Channel receiver -pub(crate) struct Rx { - inner: Arc>, -} - -impl fmt::Debug for Rx -where - S: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Rx").field("inner", &self.inner).finish() - } -} - -#[derive(Debug, Eq, PartialEq)] -pub(crate) enum TrySendError { - Closed, - Full, -} - -impl From<(T, TrySendError)> for error::SendError { - fn from(src: (T, TrySendError)) -> error::SendError { - match src.1 { - TrySendError::Closed => error::SendError(src.0), - TrySendError::Full => unreachable!(), - } - } -} - -impl From<(T, TrySendError)> for error::TrySendError { - fn from(src: (T, TrySendError)) -> error::TrySendError { - match src.1 { - TrySendError::Closed => error::TrySendError::Closed(src.0), - TrySendError::Full => error::TrySendError::Full(src.0), - } - } -} - -pub(crate) trait Semaphore { - type Permit; - - fn new_permit() -> Self::Permit; - - /// The permit is dropped without a value being sent. In this case, the - /// permit must be returned to the semaphore. - /// - /// # Return - /// - /// Returns true if the permit was acquired. - fn drop_permit(&self, permit: &mut Self::Permit) -> bool; - - fn is_idle(&self) -> bool; - - fn add_permit(&self); - - fn poll_acquire( - &self, - cx: &mut Context<'_>, - permit: &mut Self::Permit, - ) -> Poll>; - - fn try_acquire(&self, permit: &mut Self::Permit) -> Result<(), TrySendError>; - - /// A value was sent into the channel and the permit held by `tx` is - /// dropped. In this case, the permit should not immeditely be returned to - /// the semaphore. Instead, the permit is returnred to the semaphore once - /// the sent value is read by the rx handle. - fn forget(&self, permit: &mut Self::Permit); - - fn close(&self); -} - -struct Chan { - /// Handle to the push half of the lock-free list. - tx: list::Tx, - - /// Coordinates access to channel's capacity. - semaphore: S, - - /// Receiver waker. Notified when a value is pushed into the channel. - rx_waker: AtomicWaker, - - /// Tracks the number of outstanding sender handles. - /// - /// When this drops to zero, the send half of the channel is closed. - tx_count: AtomicUsize, - - /// Only accessed by `Rx` handle. - rx_fields: UnsafeCell>, -} - -impl fmt::Debug for Chan -where - S: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Chan") - .field("tx", &self.tx) - .field("semaphore", &self.semaphore) - .field("rx_waker", &self.rx_waker) - .field("tx_count", &self.tx_count) - .field("rx_fields", &"...") - .finish() - } -} - -/// Fields only accessed by `Rx` handle. -struct RxFields { - /// Channel receiver. This field is only accessed by the `Receiver` type. - list: list::Rx, - - /// `true` if `Rx::close` is called. - rx_closed: bool, -} - -impl fmt::Debug for RxFields { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("RxFields") - .field("list", &self.list) - .field("rx_closed", &self.rx_closed) - .finish() - } -} - -unsafe impl Send for Chan {} -unsafe impl Sync for Chan {} - -pub(crate) fn channel(semaphore: S) -> (Tx, Rx) -where - S: Semaphore, -{ - let (tx, rx) = list::channel(); - - let chan = Arc::new(Chan { - tx, - semaphore, - rx_waker: AtomicWaker::new(), - tx_count: AtomicUsize::new(1), - rx_fields: UnsafeCell::new(RxFields { - list: rx, - rx_closed: false, - }), - }); - - (Tx::new(chan.clone()), Rx::new(chan)) -} - -// ===== impl Tx ===== - -impl Tx -where - S: Semaphore, -{ - fn new(chan: Arc>) -> Tx { - Tx { - inner: chan, - permit: S::new_permit(), - } - } - - pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.semaphore.poll_acquire(cx, &mut self.permit) - } - - pub(crate) fn disarm(&mut self) { - // TODO: should this error if not acquired? - self.inner.semaphore.drop_permit(&mut self.permit); - } - - /// Send a message and notify the receiver. - pub(crate) fn try_send(&mut self, value: T) -> Result<(), (T, TrySendError)> { - self.inner.try_send(value, &mut self.permit) - } -} - -impl Tx { - pub(crate) fn is_ready(&self) -> bool { - self.permit.is_acquired() - } -} - -impl Tx { - pub(crate) fn send_unbounded(&self, value: T) -> Result<(), (T, TrySendError)> { - self.inner.try_send(value, &mut ()) - } -} - -impl Clone for Tx -where - S: Semaphore, -{ - fn clone(&self) -> Tx { - // Using a Relaxed ordering here is sufficient as the caller holds a - // strong ref to `self`, preventing a concurrent decrement to zero. - self.inner.tx_count.fetch_add(1, Relaxed); - - Tx { - inner: self.inner.clone(), - permit: S::new_permit(), - } - } -} - -impl Drop for Tx -where - S: Semaphore, -{ - fn drop(&mut self) { - let notify = self.inner.semaphore.drop_permit(&mut self.permit); - - if notify && self.inner.semaphore.is_idle() { - self.inner.rx_waker.wake(); - } - - if self.inner.tx_count.fetch_sub(1, AcqRel) != 1 { - return; - } - - // Close the list, which sends a `Close` message - self.inner.tx.close(); - - // Notify the receiver - self.inner.rx_waker.wake(); - } -} - -// ===== impl Rx ===== - -impl Rx -where - S: Semaphore, -{ - fn new(chan: Arc>) -> Rx { - Rx { inner: chan } - } - - pub(crate) fn close(&mut self) { - self.inner.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - if rx_fields.rx_closed { - return; - } - - rx_fields.rx_closed = true; - }); - - self.inner.semaphore.close(); - } - - /// Receive the next value - pub(crate) fn recv(&mut self, cx: &mut Context<'_>) -> Poll> { - use super::block::Read::*; - - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - self.inner.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - macro_rules! try_recv { - () => { - match rx_fields.list.pop(&self.inner.tx) { - Some(Value(value)) => { - self.inner.semaphore.add_permit(); - coop.made_progress(); - return Ready(Some(value)); - } - Some(Closed) => { - // TODO: This check may not be required as it most - // likely can only return `true` at this point. A - // channel is closed when all tx handles are - // dropped. Dropping a tx handle releases memory, - // which ensures that if dropping the tx handle is - // visible, then all messages sent are also visible. - assert!(self.inner.semaphore.is_idle()); - coop.made_progress(); - return Ready(None); - } - None => {} // fall through - } - }; - } - - try_recv!(); - - self.inner.rx_waker.register_by_ref(cx.waker()); - - // It is possible that a value was pushed between attempting to read - // and registering the task, so we have to check the channel a - // second time here. - try_recv!(); - - if rx_fields.rx_closed && self.inner.semaphore.is_idle() { - coop.made_progress(); - Ready(None) - } else { - Pending - } - }) - } - - /// Receives the next value without blocking - pub(crate) fn try_recv(&mut self) -> Result { - use super::block::Read::*; - self.inner.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - match rx_fields.list.pop(&self.inner.tx) { - Some(Value(value)) => { - self.inner.semaphore.add_permit(); - Ok(value) - } - Some(Closed) => Err(TryRecvError::Closed), - None => Err(TryRecvError::Empty), - } - }) - } -} - -impl Drop for Rx -where - S: Semaphore, -{ - fn drop(&mut self) { - use super::block::Read::Value; - - self.close(); - - self.inner.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - while let Some(Value(_)) = rx_fields.list.pop(&self.inner.tx) { - self.inner.semaphore.add_permit(); - } - }) - } -} - -// ===== impl Chan ===== - -impl Chan -where - S: Semaphore, -{ - fn try_send(&self, value: T, permit: &mut S::Permit) -> Result<(), (T, TrySendError)> { - if let Err(e) = self.semaphore.try_acquire(permit) { - return Err((value, e)); - } - - // Push the value - self.tx.push(value); - - // Notify the rx task - self.rx_waker.wake(); - - // Release the permit - self.semaphore.forget(permit); - - Ok(()) - } -} - -impl Drop for Chan { - fn drop(&mut self) { - use super::block::Read::Value; - - // Safety: the only owner of the rx fields is Chan, and eing - // inside its own Drop means we're the last ones to touch it. - self.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - while let Some(Value(_)) = rx_fields.list.pop(&self.tx) {} - unsafe { rx_fields.list.free_blocks() }; - }); - } -} - -use crate::sync::semaphore_ll::TryAcquireError; - -impl From for TrySendError { - fn from(src: TryAcquireError) -> TrySendError { - if src.is_closed() { - TrySendError::Closed - } else if src.is_no_permits() { - TrySendError::Full - } else { - unreachable!(); - } - } -} - -// ===== impl Semaphore for (::Semaphore, capacity) ===== - -use crate::sync::semaphore_ll::Permit; - -impl Semaphore for (crate::sync::semaphore_ll::Semaphore, usize) { - type Permit = Permit; - - fn new_permit() -> Permit { - Permit::new() - } - - fn drop_permit(&self, permit: &mut Permit) -> bool { - let ret = permit.is_acquired(); - permit.release(1, &self.0); - ret - } - - fn add_permit(&self) { - self.0.add_permits(1) - } - - fn is_idle(&self) -> bool { - self.0.available_permits() == self.1 - } - - fn poll_acquire( - &self, - cx: &mut Context<'_>, - permit: &mut Permit, - ) -> Poll> { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - permit - .poll_acquire(cx, 1, &self.0) - .map_err(|_| ClosedError::new()) - .map(move |r| { - coop.made_progress(); - r - }) - } - - fn try_acquire(&self, permit: &mut Permit) -> Result<(), TrySendError> { - permit.try_acquire(1, &self.0)?; - Ok(()) - } - - fn forget(&self, permit: &mut Self::Permit) { - permit.forget(1); - } - - fn close(&self) { - self.0.close(); - } -} - -// ===== impl Semaphore for AtomicUsize ===== - -use std::sync::atomic::Ordering::{Acquire, Release}; -use std::usize; - -impl Semaphore for AtomicUsize { - type Permit = (); - - fn new_permit() {} - - fn drop_permit(&self, _permit: &mut ()) -> bool { - false - } - - fn add_permit(&self) { - let prev = self.fetch_sub(2, Release); - - if prev >> 1 == 0 { - // Something went wrong - process::abort(); - } - } - - fn is_idle(&self) -> bool { - self.load(Acquire) >> 1 == 0 - } - - fn poll_acquire( - &self, - _cx: &mut Context<'_>, - permit: &mut (), - ) -> Poll> { - Ready(self.try_acquire(permit).map_err(|_| ClosedError::new())) - } - - fn try_acquire(&self, _permit: &mut ()) -> Result<(), TrySendError> { - let mut curr = self.load(Acquire); - - loop { - if curr & 1 == 1 { - return Err(TrySendError::Closed); - } - - if curr == usize::MAX ^ 1 { - // Overflowed the ref count. There is no safe way to recover, so - // abort the process. In practice, this should never happen. - process::abort() - } - - match self.compare_exchange(curr, curr + 2, AcqRel, Acquire) { - Ok(_) => return Ok(()), - Err(actual) => { - curr = actual; - } - } - } - } - - fn forget(&self, _permit: &mut ()) {} - - fn close(&self) { - self.fetch_or(1, Release); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/mpsc/error.rs b/third_party/rust/tokio-0.2.25/src/sync/mpsc/error.rs deleted file mode 100644 index 72c42aa53e73..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mpsc/error.rs +++ /dev/null @@ -1,146 +0,0 @@ -//! Channel error types - -use std::error::Error; -use std::fmt; - -/// Error returned by the `Sender`. -#[derive(Debug)] -pub struct SendError(pub T); - -impl fmt::Display for SendError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } -} - -impl std::error::Error for SendError {} - -// ===== TrySendError ===== - -/// This enumeration is the list of the possible error outcomes for the -/// [try_send](super::Sender::try_send) method. -#[derive(Debug)] -pub enum TrySendError { - /// The data could not be sent on the channel because the channel is - /// currently full and sending would require blocking. - Full(T), - - /// The receive half of the channel was explicitly closed or has been - /// dropped. - Closed(T), -} - -impl Error for TrySendError {} - -impl fmt::Display for TrySendError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - fmt, - "{}", - match self { - TrySendError::Full(..) => "no available capacity", - TrySendError::Closed(..) => "channel closed", - } - ) - } -} - -impl From> for TrySendError { - fn from(src: SendError) -> TrySendError { - TrySendError::Closed(src.0) - } -} - -// ===== RecvError ===== - -/// Error returned by `Receiver`. -#[derive(Debug)] -pub struct RecvError(()); - -impl fmt::Display for RecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } -} - -impl Error for RecvError {} - -// ===== TryRecvError ===== - -/// This enumeration is the list of the possible reasons that try_recv -/// could not return data when called. -#[derive(Debug, PartialEq)] -pub enum TryRecvError { - /// This channel is currently empty, but the Sender(s) have not yet - /// disconnected, so data may yet become available. - Empty, - /// The channel's sending half has been closed, and there will - /// never be any more data received on it. - Closed, -} - -impl fmt::Display for TryRecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - fmt, - "{}", - match self { - TryRecvError::Empty => "channel empty", - TryRecvError::Closed => "channel closed", - } - ) - } -} - -impl Error for TryRecvError {} - -// ===== ClosedError ===== - -/// Error returned by [`Sender::poll_ready`](super::Sender::poll_ready). -#[derive(Debug)] -pub struct ClosedError(()); - -impl ClosedError { - pub(crate) fn new() -> ClosedError { - ClosedError(()) - } -} - -impl fmt::Display for ClosedError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } -} - -impl Error for ClosedError {} - -cfg_time! { - // ===== SendTimeoutError ===== - - #[derive(Debug)] - /// Error returned by [`Sender::send_timeout`](super::Sender::send_timeout)]. - pub enum SendTimeoutError { - /// The data could not be sent on the channel because the channel is - /// full, and the timeout to send has elapsed. - Timeout(T), - - /// The receive half of the channel was explicitly closed or has been - /// dropped. - Closed(T), - } - - impl Error for SendTimeoutError {} - - impl fmt::Display for SendTimeoutError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - fmt, - "{}", - match self { - SendTimeoutError::Timeout(..) => "timed out waiting on send operation", - SendTimeoutError::Closed(..) => "channel closed", - } - ) - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/mpsc/list.rs b/third_party/rust/tokio-0.2.25/src/sync/mpsc/list.rs deleted file mode 100644 index 53f82a25ef9f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mpsc/list.rs +++ /dev/null @@ -1,341 +0,0 @@ -//! A concurrent, lock-free, FIFO list. - -use crate::loom::{ - sync::atomic::{AtomicPtr, AtomicUsize}, - thread, -}; -use crate::sync::mpsc::block::{self, Block}; - -use std::fmt; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; - -/// List queue transmit handle -pub(crate) struct Tx { - /// Tail in the `Block` mpmc list. - block_tail: AtomicPtr>, - - /// Position to push the next message. This reference a block and offset - /// into the block. - tail_position: AtomicUsize, -} - -/// List queue receive handle -pub(crate) struct Rx { - /// Pointer to the block being processed - head: NonNull>, - - /// Next slot index to process - index: usize, - - /// Pointer to the next block pending release - free_head: NonNull>, -} - -pub(crate) fn channel() -> (Tx, Rx) { - // Create the initial block shared between the tx and rx halves. - let initial_block = Box::new(Block::new(0)); - let initial_block_ptr = Box::into_raw(initial_block); - - let tx = Tx { - block_tail: AtomicPtr::new(initial_block_ptr), - tail_position: AtomicUsize::new(0), - }; - - let head = NonNull::new(initial_block_ptr).unwrap(); - - let rx = Rx { - head, - index: 0, - free_head: head, - }; - - (tx, rx) -} - -impl Tx { - /// Pushes a value into the list. - pub(crate) fn push(&self, value: T) { - // First, claim a slot for the value. `Acquire` is used here to - // synchronize with the `fetch_add` in `reclaim_blocks`. - let slot_index = self.tail_position.fetch_add(1, Acquire); - - // Load the current block and write the value - let block = self.find_block(slot_index); - - unsafe { - // Write the value to the block - block.as_ref().write(slot_index, value); - } - } - - /// Closes the send half of the list - /// - /// Similar process as pushing a value, but instead of writing the value & - /// setting the ready flag, the TX_CLOSED flag is set on the block. - pub(crate) fn close(&self) { - // First, claim a slot for the value. This is the last slot that will be - // claimed. - let slot_index = self.tail_position.fetch_add(1, Acquire); - - let block = self.find_block(slot_index); - - unsafe { block.as_ref().tx_close() } - } - - fn find_block(&self, slot_index: usize) -> NonNull> { - // The start index of the block that contains `index`. - let start_index = block::start_index(slot_index); - - // The index offset into the block - let offset = block::offset(slot_index); - - // Load the current head of the block - let mut block_ptr = self.block_tail.load(Acquire); - - let block = unsafe { &*block_ptr }; - - // Calculate the distance between the tail ptr and the target block - let distance = block.distance(start_index); - - // Decide if this call to `find_block` should attempt to update the - // `block_tail` pointer. - // - // Updating `block_tail` is not always performed in order to reduce - // contention. - // - // When set, as the routine walks the linked list, it attempts to update - // `block_tail`. If the update cannot be performed, `try_updating_tail` - // is unset. - let mut try_updating_tail = distance > offset; - - // Walk the linked list of blocks until the block with `start_index` is - // found. - loop { - let block = unsafe { &(*block_ptr) }; - - if block.is_at_index(start_index) { - return unsafe { NonNull::new_unchecked(block_ptr) }; - } - - let next_block = block - .load_next(Acquire) - // There is no allocated next block, grow the linked list. - .unwrap_or_else(|| block.grow()); - - // If the block is **not** final, then the tail pointer cannot be - // advanced any more. - try_updating_tail &= block.is_final(); - - if try_updating_tail { - // Advancing `block_tail` must happen when walking the linked - // list. `block_tail` may not advance passed any blocks that are - // not "final". At the point a block is finalized, it is unknown - // if there are any prior blocks that are unfinalized, which - // makes it impossible to advance `block_tail`. - // - // While walking the linked list, `block_tail` can be advanced - // as long as finalized blocks are traversed. - // - // Release ordering is used to ensure that any subsequent reads - // are able to see the memory pointed to by `block_tail`. - // - // Acquire is not needed as any "actual" value is not accessed. - // At this point, the linked list is walked to acquire blocks. - let actual = - self.block_tail - .compare_and_swap(block_ptr, next_block.as_ptr(), Release); - - if actual == block_ptr { - // Synchronize with any senders - let tail_position = self.tail_position.fetch_add(0, Release); - - unsafe { - block.tx_release(tail_position); - } - } else { - // A concurrent sender is also working on advancing - // `block_tail` and this thread is falling behind. - // - // Stop trying to advance the tail pointer - try_updating_tail = false; - } - } - - block_ptr = next_block.as_ptr(); - - thread::yield_now(); - } - } - - pub(crate) unsafe fn reclaim_block(&self, mut block: NonNull>) { - // The block has been removed from the linked list and ownership - // is reclaimed. - // - // Before dropping the block, see if it can be reused by - // inserting it back at the end of the linked list. - // - // First, reset the data - block.as_mut().reclaim(); - - let mut reused = false; - - // Attempt to insert the block at the end - // - // Walk at most three times - // - let curr_ptr = self.block_tail.load(Acquire); - - // The pointer can never be null - debug_assert!(!curr_ptr.is_null()); - - let mut curr = NonNull::new_unchecked(curr_ptr); - - // TODO: Unify this logic with Block::grow - for _ in 0..3 { - match curr.as_ref().try_push(&mut block, AcqRel) { - Ok(_) => { - reused = true; - break; - } - Err(next) => { - curr = next; - } - } - } - - if !reused { - let _ = Box::from_raw(block.as_ptr()); - } - } -} - -impl fmt::Debug for Tx { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Tx") - .field("block_tail", &self.block_tail.load(Relaxed)) - .field("tail_position", &self.tail_position.load(Relaxed)) - .finish() - } -} - -impl Rx { - /// Pops the next value off the queue - pub(crate) fn pop(&mut self, tx: &Tx) -> Option> { - // Advance `head`, if needed - if !self.try_advancing_head() { - return None; - } - - self.reclaim_blocks(tx); - - unsafe { - let block = self.head.as_ref(); - - let ret = block.read(self.index); - - if let Some(block::Read::Value(..)) = ret { - self.index = self.index.wrapping_add(1); - } - - ret - } - } - - /// Tries advancing the block pointer to the block referenced by `self.index`. - /// - /// Returns `true` if successful, `false` if there is no next block to load. - fn try_advancing_head(&mut self) -> bool { - let block_index = block::start_index(self.index); - - loop { - let next_block = { - let block = unsafe { self.head.as_ref() }; - - if block.is_at_index(block_index) { - return true; - } - - block.load_next(Acquire) - }; - - let next_block = match next_block { - Some(next_block) => next_block, - None => { - return false; - } - }; - - self.head = next_block; - - thread::yield_now(); - } - } - - fn reclaim_blocks(&mut self, tx: &Tx) { - while self.free_head != self.head { - unsafe { - // Get a handle to the block that will be freed and update - // `free_head` to point to the next block. - let block = self.free_head; - - let observed_tail_position = block.as_ref().observed_tail_position(); - - let required_index = match observed_tail_position { - Some(i) => i, - None => return, - }; - - if required_index > self.index { - return; - } - - // We may read the next pointer with `Relaxed` ordering as it is - // guaranteed that the `reclaim_blocks` routine trails the `recv` - // routine. Any memory accessed by `reclaim_blocks` has already - // been acquired by `recv`. - let next_block = block.as_ref().load_next(Relaxed); - - // Update the free list head - self.free_head = next_block.unwrap(); - - // Push the emptied block onto the back of the queue, making it - // available to senders. - tx.reclaim_block(block); - } - - thread::yield_now(); - } - } - - /// Effectively `Drop` all the blocks. Should only be called once, when - /// the list is dropping. - pub(super) unsafe fn free_blocks(&mut self) { - debug_assert_ne!(self.free_head, NonNull::dangling()); - - let mut cur = Some(self.free_head); - - #[cfg(debug_assertions)] - { - // to trigger the debug assert above so as to catch that we - // don't call `free_blocks` more than once. - self.free_head = NonNull::dangling(); - self.head = NonNull::dangling(); - } - - while let Some(block) = cur { - cur = block.as_ref().load_next(Relaxed); - drop(Box::from_raw(block.as_ptr())); - } - } -} - -impl fmt::Debug for Rx { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Rx") - .field("head", &self.head) - .field("index", &self.index) - .field("free_head", &self.free_head) - .finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/mpsc/mod.rs b/third_party/rust/tokio-0.2.25/src/sync/mpsc/mod.rs deleted file mode 100644 index c489c9f99ff7..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mpsc/mod.rs +++ /dev/null @@ -1,92 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] - -//! A multi-producer, single-consumer queue for sending values across -//! asynchronous tasks. -//! -//! Similar to `std`, channel creation provides [`Receiver`] and [`Sender`] -//! handles. [`Receiver`] implements `Stream` and allows a task to read values -//! out of the channel. If there is no message to read, the current task will be -//! notified when a new value is sent. If the channel is at capacity, the send -//! is rejected and the task will be notified when additional capacity is -//! available. In other words, the channel provides backpressure. -//! -//! This module provides two variants of the channel: bounded and unbounded. The -//! bounded variant has a limit on the number of messages that the channel can -//! store, and if this limit is reached, trying to send another message will -//! wait until a message is received from the channel. An unbounded channel has -//! an infinite capacity, so the `send` method never does any kind of sleeping. -//! This makes the [`UnboundedSender`] usable from both synchronous and -//! asynchronous code. -//! -//! # Disconnection -//! -//! When all [`Sender`] handles have been dropped, it is no longer -//! possible to send values into the channel. This is considered the termination -//! event of the stream. As such, `Receiver::poll` returns `Ok(Ready(None))`. -//! -//! If the [`Receiver`] handle is dropped, then messages can no longer -//! be read out of the channel. In this case, all further attempts to send will -//! result in an error. -//! -//! # Clean Shutdown -//! -//! When the [`Receiver`] is dropped, it is possible for unprocessed messages to -//! remain in the channel. Instead, it is usually desirable to perform a "clean" -//! shutdown. To do this, the receiver first calls `close`, which will prevent -//! any further messages to be sent into the channel. Then, the receiver -//! consumes the channel to completion, at which point the receiver can be -//! dropped. -//! -//! # Communicating between sync and async code -//! -//! When you want to communicate between synchronous and asynchronous code, there -//! are two situations to consider: -//! -//! **Bounded channel**: If you need a bounded channel, you should use a bounded -//! Tokio `mpsc` channel for both directions of communication. To call the async -//! [`send`][bounded-send] or [`recv`][bounded-recv] methods in sync code, you -//! will need to use [`Handle::block_on`], which allow you to execute an async -//! method in synchronous code. This is necessary because a bounded channel may -//! need to wait for additional capacity to become available. -//! -//! **Unbounded channel**: You should use the kind of channel that matches where -//! the receiver is. So for sending a message _from async to sync_, you should -//! use [the standard library unbounded channel][std-unbounded] or -//! [crossbeam][crossbeam-unbounded]. Similarly, for sending a message _from sync -//! to async_, you should use an unbounded Tokio `mpsc` channel. -//! -//! [`Sender`]: crate::sync::mpsc::Sender -//! [`Receiver`]: crate::sync::mpsc::Receiver -//! [bounded-send]: crate::sync::mpsc::Sender::send() -//! [bounded-recv]: crate::sync::mpsc::Receiver::recv() -//! [`UnboundedSender`]: crate::sync::mpsc::UnboundedSender -//! [`Handle::block_on`]: crate::runtime::Handle::block_on() -//! [std-unbounded]: std::sync::mpsc::channel -//! [crossbeam-unbounded]: https://docs.rs/crossbeam/*/crossbeam/channel/fn.unbounded.html - -pub(super) mod block; - -mod bounded; -pub use self::bounded::{channel, Receiver, Sender}; - -mod chan; - -pub(super) mod list; - -mod unbounded; -pub use self::unbounded::{unbounded_channel, UnboundedReceiver, UnboundedSender}; - -pub mod error; - -/// The number of values a block can contain. -/// -/// This value must be a power of 2. It also must be smaller than the number of -/// bits in `usize`. -#[cfg(all(target_pointer_width = "64", not(loom)))] -const BLOCK_CAP: usize = 32; - -#[cfg(all(not(target_pointer_width = "64"), not(loom)))] -const BLOCK_CAP: usize = 16; - -#[cfg(loom)] -const BLOCK_CAP: usize = 2; diff --git a/third_party/rust/tokio-0.2.25/src/sync/mpsc/unbounded.rs b/third_party/rust/tokio-0.2.25/src/sync/mpsc/unbounded.rs deleted file mode 100644 index 1b2288ab08ca..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mpsc/unbounded.rs +++ /dev/null @@ -1,180 +0,0 @@ -use crate::loom::sync::atomic::AtomicUsize; -use crate::sync::mpsc::chan; -use crate::sync::mpsc::error::{SendError, TryRecvError}; - -use std::fmt; -use std::task::{Context, Poll}; - -/// Send values to the associated `UnboundedReceiver`. -/// -/// Instances are created by the -/// [`unbounded_channel`](unbounded_channel) function. -pub struct UnboundedSender { - chan: chan::Tx, -} - -impl Clone for UnboundedSender { - fn clone(&self) -> Self { - UnboundedSender { - chan: self.chan.clone(), - } - } -} - -impl fmt::Debug for UnboundedSender { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("UnboundedSender") - .field("chan", &self.chan) - .finish() - } -} - -/// Receive values from the associated `UnboundedSender`. -/// -/// Instances are created by the -/// [`unbounded_channel`](unbounded_channel) function. -pub struct UnboundedReceiver { - /// The channel receiver - chan: chan::Rx, -} - -impl fmt::Debug for UnboundedReceiver { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("UnboundedReceiver") - .field("chan", &self.chan) - .finish() - } -} - -/// Creates an unbounded mpsc channel for communicating between asynchronous -/// tasks. -/// -/// A `send` on this channel will always succeed as long as the receive half has -/// not been closed. If the receiver falls behind, messages will be arbitrarily -/// buffered. -/// -/// **Note** that the amount of available system memory is an implicit bound to -/// the channel. Using an `unbounded` channel has the ability of causing the -/// process to run out of memory. In this case, the process will be aborted. -pub fn unbounded_channel() -> (UnboundedSender, UnboundedReceiver) { - let (tx, rx) = chan::channel(AtomicUsize::new(0)); - - let tx = UnboundedSender::new(tx); - let rx = UnboundedReceiver::new(rx); - - (tx, rx) -} - -/// No capacity -type Semaphore = AtomicUsize; - -impl UnboundedReceiver { - pub(crate) fn new(chan: chan::Rx) -> UnboundedReceiver { - UnboundedReceiver { chan } - } - - #[doc(hidden)] // TODO: doc - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.chan.recv(cx) - } - - /// Receives the next value for this receiver. - /// - /// `None` is returned when all `Sender` halves have dropped, indicating - /// that no further values can be sent on the channel. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::unbounded_channel(); - /// - /// tokio::spawn(async move { - /// tx.send("hello").unwrap(); - /// }); - /// - /// assert_eq!(Some("hello"), rx.recv().await); - /// assert_eq!(None, rx.recv().await); - /// } - /// ``` - /// - /// Values are buffered: - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::unbounded_channel(); - /// - /// tx.send("hello").unwrap(); - /// tx.send("world").unwrap(); - /// - /// assert_eq!(Some("hello"), rx.recv().await); - /// assert_eq!(Some("world"), rx.recv().await); - /// } - /// ``` - pub async fn recv(&mut self) -> Option { - use crate::future::poll_fn; - - poll_fn(|cx| self.poll_recv(cx)).await - } - - /// Attempts to return a pending value on this receiver without blocking. - /// - /// This method will never block the caller in order to wait for data to - /// become available. Instead, this will always return immediately with - /// a possible option of pending data on the channel. - /// - /// This is useful for a flavor of "optimistic check" before deciding to - /// block on a receiver. - /// - /// Compared with recv, this function has two failure cases instead of - /// one (one for disconnection, one for an empty buffer). - pub fn try_recv(&mut self) -> Result { - self.chan.try_recv() - } - - /// Closes the receiving half of a channel, without dropping it. - /// - /// This prevents any further messages from being sent on the channel while - /// still enabling the receiver to drain messages that are buffered. - pub fn close(&mut self) { - self.chan.close(); - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for UnboundedReceiver { - type Item = T; - - fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_recv(cx) - } -} - -impl UnboundedSender { - pub(crate) fn new(chan: chan::Tx) -> UnboundedSender { - UnboundedSender { chan } - } - - /// Attempts to send a message on this `UnboundedSender` without blocking. - /// - /// This method is not marked async because sending a message to an unbounded channel - /// never requires any form of waiting. Because of this, the `send` method can be - /// used in both synchronous and asynchronous code without problems. - /// - /// If the receive half of the channel is closed, either due to [`close`] - /// being called or the [`UnboundedReceiver`] having been dropped, this - /// function returns an error. The error includes the value passed to `send`. - /// - /// [`close`]: UnboundedReceiver::close - /// [`UnboundedReceiver`]: UnboundedReceiver - pub fn send(&self, message: T) -> Result<(), SendError> { - self.chan.send_unbounded(message)?; - Ok(()) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/mutex.rs b/third_party/rust/tokio-0.2.25/src/sync/mutex.rs deleted file mode 100644 index df348457fec9..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/mutex.rs +++ /dev/null @@ -1,453 +0,0 @@ -use crate::sync::batch_semaphore as semaphore; - -use std::cell::UnsafeCell; -use std::error::Error; -use std::fmt; -use std::ops::{Deref, DerefMut}; -use std::sync::Arc; - -/// An asynchronous `Mutex`-like type. -/// -/// This type acts similarly to an asynchronous [`std::sync::Mutex`], with one -/// major difference: [`lock`] does not block and the lock guard can be held -/// across await points. -/// -/// # Which kind of mutex should you use? -/// -/// Contrary to popular belief, it is ok and often preferred to use the ordinary -/// [`Mutex`][std] from the standard library in asynchronous code. This section -/// will help you decide on which kind of mutex you should use. -/// -/// The primary use case of the async mutex is to provide shared mutable access -/// to IO resources such as a database connection. If the data stored behind the -/// mutex is just data, it is often better to use a blocking mutex such as the -/// one in the standard library or [`parking_lot`]. This is because the feature -/// that the async mutex offers over the blocking mutex is that it is possible -/// to keep the mutex locked across an `.await` point, which is rarely necessary -/// for data. -/// -/// A common pattern is to wrap the `Arc>` in a struct that provides -/// non-async methods for performing operations on the data within, and only -/// lock the mutex inside these methods. The [mini-redis] example provides an -/// illustration of this pattern. -/// -/// Additionally, when you _do_ want shared access to an IO resource, it is -/// often better to spawn a task to manage the IO resource, and to use message -/// passing to communicate with that task. -/// -/// [std]: std::sync::Mutex -/// [`parking_lot`]: https://docs.rs/parking_lot -/// [mini-redis]: https://github.com/tokio-rs/mini-redis/blob/master/src/db.rs -/// -/// # Examples: -/// -/// ```rust,no_run -/// use tokio::sync::Mutex; -/// use std::sync::Arc; -/// -/// #[tokio::main] -/// async fn main() { -/// let data1 = Arc::new(Mutex::new(0)); -/// let data2 = Arc::clone(&data1); -/// -/// tokio::spawn(async move { -/// let mut lock = data2.lock().await; -/// *lock += 1; -/// }); -/// -/// let mut lock = data1.lock().await; -/// *lock += 1; -/// } -/// ``` -/// -/// -/// ```rust,no_run -/// use tokio::sync::Mutex; -/// use std::sync::Arc; -/// -/// #[tokio::main] -/// async fn main() { -/// let count = Arc::new(Mutex::new(0)); -/// -/// for _ in 0..5 { -/// let my_count = Arc::clone(&count); -/// tokio::spawn(async move { -/// for _ in 0..10 { -/// let mut lock = my_count.lock().await; -/// *lock += 1; -/// println!("{}", lock); -/// } -/// }); -/// } -/// -/// loop { -/// if *count.lock().await >= 50 { -/// break; -/// } -/// } -/// println!("Count hit 50."); -/// } -/// ``` -/// There are a few things of note here to pay attention to in this example. -/// 1. The mutex is wrapped in an [`Arc`] to allow it to be shared across -/// threads. -/// 2. Each spawned task obtains a lock and releases it on every iteration. -/// 3. Mutation of the data protected by the Mutex is done by de-referencing -/// the obtained lock as seen on lines 12 and 19. -/// -/// Tokio's Mutex works in a simple FIFO (first in, first out) style where all -/// calls to [`lock`] complete in the order they were performed. In that way the -/// Mutex is "fair" and predictable in how it distributes the locks to inner -/// data. This is why the output of the program above is an in-order count to -/// 50. Locks are released and reacquired after every iteration, so basically, -/// each thread goes to the back of the line after it increments the value once. -/// Finally, since there is only a single valid lock at any given time, there is -/// no possibility of a race condition when mutating the inner value. -/// -/// Note that in contrast to [`std::sync::Mutex`], this implementation does not -/// poison the mutex when a thread holding the [`MutexGuard`] panics. In such a -/// case, the mutex will be unlocked. If the panic is caught, this might leave -/// the data protected by the mutex in an inconsistent state. -/// -/// [`Mutex`]: struct@Mutex -/// [`MutexGuard`]: struct@MutexGuard -/// [`Arc`]: struct@std::sync::Arc -/// [`std::sync::Mutex`]: struct@std::sync::Mutex -/// [`Send`]: trait@std::marker::Send -/// [`lock`]: method@Mutex::lock -pub struct Mutex { - s: semaphore::Semaphore, - c: UnsafeCell, -} - -/// A handle to a held `Mutex`. -/// -/// As long as you have this guard, you have exclusive access to the underlying -/// `T`. The guard internally borrows the `Mutex`, so the mutex will not be -/// dropped while a guard exists. -/// -/// The lock is automatically released whenever the guard is dropped, at which -/// point `lock` will succeed yet again. -pub struct MutexGuard<'a, T: ?Sized> { - lock: &'a Mutex, -} - -/// An owned handle to a held `Mutex`. -/// -/// This guard is only available from a `Mutex` that is wrapped in an [`Arc`]. It -/// is identical to `MutexGuard`, except that rather than borrowing the `Mutex`, -/// it clones the `Arc`, incrementing the reference count. This means that -/// unlike `MutexGuard`, it will have the `'static` lifetime. -/// -/// As long as you have this guard, you have exclusive access to the underlying -/// `T`. The guard internally keeps a reference-couned pointer to the original -/// `Mutex`, so even if the lock goes away, the guard remains valid. -/// -/// The lock is automatically released whenever the guard is dropped, at which -/// point `lock` will succeed yet again. -/// -/// [`Arc`]: std::sync::Arc -pub struct OwnedMutexGuard { - lock: Arc>, -} - -// As long as T: Send, it's fine to send and share Mutex between threads. -// If T was not Send, sending and sharing a Mutex would be bad, since you can -// access T through Mutex. -unsafe impl Send for Mutex where T: ?Sized + Send {} -unsafe impl Sync for Mutex where T: ?Sized + Send {} -unsafe impl Sync for MutexGuard<'_, T> where T: ?Sized + Send + Sync {} -unsafe impl Sync for OwnedMutexGuard where T: ?Sized + Send + Sync {} - -/// Error returned from the [`Mutex::try_lock`] function. -/// -/// A `try_lock` operation can only fail if the mutex is already locked. -/// -/// [`Mutex::try_lock`]: Mutex::try_lock -#[derive(Debug)] -pub struct TryLockError(()); - -impl fmt::Display for TryLockError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "operation would block") - } -} - -impl Error for TryLockError {} - -#[test] -#[cfg(not(loom))] -fn bounds() { - fn check_send() {} - fn check_unpin() {} - // This has to take a value, since the async fn's return type is unnameable. - fn check_send_sync_val(_t: T) {} - fn check_send_sync() {} - fn check_static() {} - fn check_static_val(_t: T) {} - - check_send::>(); - check_send::>(); - check_unpin::>(); - check_send_sync::>(); - check_static::>(); - - let mutex = Mutex::new(1); - check_send_sync_val(mutex.lock()); - let arc_mutex = Arc::new(Mutex::new(1)); - check_send_sync_val(arc_mutex.clone().lock_owned()); - check_static_val(arc_mutex.lock_owned()); -} - -impl Mutex { - /// Creates a new lock in an unlocked state ready for use. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// - /// let lock = Mutex::new(5); - /// ``` - pub fn new(t: T) -> Self - where - T: Sized, - { - Self { - c: UnsafeCell::new(t), - s: semaphore::Semaphore::new(1), - } - } - - /// Locks this mutex, causing the current task - /// to yield until the lock has been acquired. - /// When the lock has been acquired, function returns a [`MutexGuard`]. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// - /// #[tokio::main] - /// async fn main() { - /// let mutex = Mutex::new(1); - /// - /// let mut n = mutex.lock().await; - /// *n = 2; - /// } - /// ``` - pub async fn lock(&self) -> MutexGuard<'_, T> { - self.acquire().await; - MutexGuard { lock: self } - } - - /// Locks this mutex, causing the current task to yield until the lock has - /// been acquired. When the lock has been acquired, this returns an - /// [`OwnedMutexGuard`]. - /// - /// This method is identical to [`Mutex::lock`], except that the returned - /// guard references the `Mutex` with an [`Arc`] rather than by borrowing - /// it. Therefore, the `Mutex` must be wrapped in an `Arc` to call this - /// method, and the guard will live for the `'static` lifetime, as it keeps - /// the `Mutex` alive by holding an `Arc`. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// use std::sync::Arc; - /// - /// #[tokio::main] - /// async fn main() { - /// let mutex = Arc::new(Mutex::new(1)); - /// - /// let mut n = mutex.clone().lock_owned().await; - /// *n = 2; - /// } - /// ``` - /// - /// [`Arc`]: std::sync::Arc - pub async fn lock_owned(self: Arc) -> OwnedMutexGuard { - self.acquire().await; - OwnedMutexGuard { lock: self } - } - - async fn acquire(&self) { - self.s.acquire(1).await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and - // we own it exclusively, which means that this can never happen. - unreachable!() - }); - } - - /// Attempts to acquire the lock, and returns [`TryLockError`] if the - /// lock is currently held somewhere else. - /// - /// [`TryLockError`]: TryLockError - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// # async fn dox() -> Result<(), tokio::sync::TryLockError> { - /// - /// let mutex = Mutex::new(1); - /// - /// let n = mutex.try_lock()?; - /// assert_eq!(*n, 1); - /// # Ok(()) - /// # } - /// ``` - pub fn try_lock(&self) -> Result, TryLockError> { - match self.s.try_acquire(1) { - Ok(_) => Ok(MutexGuard { lock: self }), - Err(_) => Err(TryLockError(())), - } - } - - /// Attempts to acquire the lock, and returns [`TryLockError`] if the lock - /// is currently held somewhere else. - /// - /// This method is identical to [`Mutex::try_lock`], except that the - /// returned guard references the `Mutex` with an [`Arc`] rather than by - /// borrowing it. Therefore, the `Mutex` must be wrapped in an `Arc` to call - /// this method, and the guard will live for the `'static` lifetime, as it - /// keeps the `Mutex` alive by holding an `Arc`. - /// - /// [`TryLockError`]: TryLockError - /// [`Arc`]: std::sync::Arc - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// use std::sync::Arc; - /// # async fn dox() -> Result<(), tokio::sync::TryLockError> { - /// - /// let mutex = Arc::new(Mutex::new(1)); - /// - /// let n = mutex.clone().try_lock_owned()?; - /// assert_eq!(*n, 1); - /// # Ok(()) - /// # } - pub fn try_lock_owned(self: Arc) -> Result, TryLockError> { - match self.s.try_acquire(1) { - Ok(_) => Ok(OwnedMutexGuard { lock: self }), - Err(_) => Err(TryLockError(())), - } - } - - /// Consumes the mutex, returning the underlying data. - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// - /// #[tokio::main] - /// async fn main() { - /// let mutex = Mutex::new(1); - /// - /// let n = mutex.into_inner(); - /// assert_eq!(n, 1); - /// } - /// ``` - pub fn into_inner(self) -> T - where - T: Sized, - { - self.c.into_inner() - } -} - -impl From for Mutex { - fn from(s: T) -> Self { - Self::new(s) - } -} - -impl Default for Mutex -where - T: Default, -{ - fn default() -> Self { - Self::new(T::default()) - } -} - -impl std::fmt::Debug for Mutex -where - T: std::fmt::Debug, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut d = f.debug_struct("Mutex"); - match self.try_lock() { - Ok(inner) => d.field("data", &*inner), - Err(_) => d.field("data", &format_args!("")), - }; - d.finish() - } -} - -// === impl MutexGuard === - -impl Drop for MutexGuard<'_, T> { - fn drop(&mut self) { - self.lock.s.release(1) - } -} - -impl Deref for MutexGuard<'_, T> { - type Target = T; - fn deref(&self) -> &Self::Target { - unsafe { &*self.lock.c.get() } - } -} - -impl DerefMut for MutexGuard<'_, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.lock.c.get() } - } -} - -impl fmt::Debug for MutexGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl fmt::Display for MutexGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -// === impl OwnedMutexGuard === - -impl Drop for OwnedMutexGuard { - fn drop(&mut self) { - self.lock.s.release(1) - } -} - -impl Deref for OwnedMutexGuard { - type Target = T; - fn deref(&self) -> &Self::Target { - unsafe { &*self.lock.c.get() } - } -} - -impl DerefMut for OwnedMutexGuard { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.lock.c.get() } - } -} - -impl fmt::Debug for OwnedMutexGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl fmt::Display for OwnedMutexGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/notify.rs b/third_party/rust/tokio-0.2.25/src/sync/notify.rs deleted file mode 100644 index 5cb41e89eae8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/notify.rs +++ /dev/null @@ -1,556 +0,0 @@ -use crate::loom::sync::atomic::AtomicU8; -use crate::loom::sync::Mutex; -use crate::util::linked_list::{self, LinkedList}; - -use std::cell::UnsafeCell; -use std::future::Future; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::SeqCst; -use std::task::{Context, Poll, Waker}; - -/// Notify a single task to wake up. -/// -/// `Notify` provides a basic mechanism to notify a single task of an event. -/// `Notify` itself does not carry any data. Instead, it is to be used to signal -/// another task to perform an operation. -/// -/// `Notify` can be thought of as a [`Semaphore`] starting with 0 permits. -/// [`notified().await`] waits for a permit to become available, and [`notify()`] -/// sets a permit **if there currently are no available permits**. -/// -/// The synchronization details of `Notify` are similar to -/// [`thread::park`][park] and [`Thread::unpark`][unpark] from std. A [`Notify`] -/// value contains a single permit. [`notified().await`] waits for the permit to -/// be made available, consumes the permit, and resumes. [`notify()`] sets the -/// permit, waking a pending task if there is one. -/// -/// If `notify()` is called **before** `notfied().await`, then the next call to -/// `notified().await` will complete immediately, consuming the permit. Any -/// subsequent calls to `notified().await` will wait for a new permit. -/// -/// If `notify()` is called **multiple** times before `notified().await`, only a -/// **single** permit is stored. The next call to `notified().await` will -/// complete immediately, but the one after will wait for a new permit. -/// -/// # Examples -/// -/// Basic usage. -/// -/// ``` -/// use tokio::sync::Notify; -/// use std::sync::Arc; -/// -/// #[tokio::main] -/// async fn main() { -/// let notify = Arc::new(Notify::new()); -/// let notify2 = notify.clone(); -/// -/// tokio::spawn(async move { -/// notify2.notified().await; -/// println!("received notification"); -/// }); -/// -/// println!("sending notification"); -/// notify.notify(); -/// } -/// ``` -/// -/// Unbound mpsc channel. -/// -/// ``` -/// use tokio::sync::Notify; -/// -/// use std::collections::VecDeque; -/// use std::sync::Mutex; -/// -/// struct Channel { -/// values: Mutex>, -/// notify: Notify, -/// } -/// -/// impl Channel { -/// pub fn send(&self, value: T) { -/// self.values.lock().unwrap() -/// .push_back(value); -/// -/// // Notify the consumer a value is available -/// self.notify.notify(); -/// } -/// -/// pub async fn recv(&self) -> T { -/// loop { -/// // Drain values -/// if let Some(value) = self.values.lock().unwrap().pop_front() { -/// return value; -/// } -/// -/// // Wait for values to be available -/// self.notify.notified().await; -/// } -/// } -/// } -/// ``` -/// -/// [park]: std::thread::park -/// [unpark]: std::thread::Thread::unpark -/// [`notified().await`]: Notify::notified() -/// [`notify()`]: Notify::notify() -/// [`Semaphore`]: crate::sync::Semaphore -#[derive(Debug)] -pub struct Notify { - state: AtomicU8, - waiters: Mutex>, -} - -#[derive(Debug)] -struct Waiter { - /// Intrusive linked-list pointers - pointers: linked_list::Pointers, - - /// Waiting task's waker - waker: Option, - - /// `true` if the notification has been assigned to this waiter. - notified: bool, - - /// Should not be `Unpin`. - _p: PhantomPinned, -} - -/// Future returned from `notified()` -#[derive(Debug)] -struct Notified<'a> { - /// The `Notify` being received on. - notify: &'a Notify, - - /// The current state of the receiving process. - state: State, - - /// Entry in the waiter `LinkedList`. - waiter: UnsafeCell, -} - -unsafe impl<'a> Send for Notified<'a> {} -unsafe impl<'a> Sync for Notified<'a> {} - -#[derive(Debug)] -enum State { - Init, - Waiting, - Done, -} - -/// Initial "idle" state -const EMPTY: u8 = 0; - -/// One or more threads are currently waiting to be notified. -const WAITING: u8 = 1; - -/// Pending notification -const NOTIFIED: u8 = 2; - -impl Notify { - /// Create a new `Notify`, initialized without a permit. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Notify; - /// - /// let notify = Notify::new(); - /// ``` - pub fn new() -> Notify { - Notify { - state: AtomicU8::new(0), - waiters: Mutex::new(LinkedList::new()), - } - } - - /// Wait for a notification. - /// - /// Each `Notify` value holds a single permit. If a permit is available from - /// an earlier call to [`notify()`], then `notified().await` will complete - /// immediately, consuming that permit. Otherwise, `notified().await` waits - /// for a permit to be made available by the next call to `notify()`. - /// - /// [`notify()`]: Notify::notify - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Notify; - /// use std::sync::Arc; - /// - /// #[tokio::main] - /// async fn main() { - /// let notify = Arc::new(Notify::new()); - /// let notify2 = notify.clone(); - /// - /// tokio::spawn(async move { - /// notify2.notified().await; - /// println!("received notification"); - /// }); - /// - /// println!("sending notification"); - /// notify.notify(); - /// } - /// ``` - pub async fn notified(&self) { - Notified { - notify: self, - state: State::Init, - waiter: UnsafeCell::new(Waiter { - pointers: linked_list::Pointers::new(), - waker: None, - notified: false, - _p: PhantomPinned, - }), - } - .await - } - - /// Notifies a waiting task - /// - /// If a task is currently waiting, that task is notified. Otherwise, a - /// permit is stored in this `Notify` value and the **next** call to - /// [`notified().await`] will complete immediately consuming the permit made - /// available by this call to `notify()`. - /// - /// At most one permit may be stored by `Notify`. Many sequential calls to - /// `notify` will result in a single permit being stored. The next call to - /// `notified().await` will complete immediately, but the one after that - /// will wait. - /// - /// [`notified().await`]: Notify::notified() - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Notify; - /// use std::sync::Arc; - /// - /// #[tokio::main] - /// async fn main() { - /// let notify = Arc::new(Notify::new()); - /// let notify2 = notify.clone(); - /// - /// tokio::spawn(async move { - /// notify2.notified().await; - /// println!("received notification"); - /// }); - /// - /// println!("sending notification"); - /// notify.notify(); - /// } - /// ``` - pub fn notify(&self) { - // Load the current state - let mut curr = self.state.load(SeqCst); - - // If the state is `EMPTY`, transition to `NOTIFIED` and return. - while let EMPTY | NOTIFIED = curr { - // The compare-exchange from `NOTIFIED` -> `NOTIFIED` is intended. A - // happens-before synchronization must happen between this atomic - // operation and a task calling `notified().await`. - let res = self.state.compare_exchange(curr, NOTIFIED, SeqCst, SeqCst); - - match res { - // No waiters, no further work to do - Ok(_) => return, - Err(actual) => { - curr = actual; - } - } - } - - // There are waiters, the lock must be acquired to notify. - let mut waiters = self.waiters.lock().unwrap(); - - // The state must be reloaded while the lock is held. The state may only - // transition out of WAITING while the lock is held. - curr = self.state.load(SeqCst); - - if let Some(waker) = notify_locked(&mut waiters, &self.state, curr) { - drop(waiters); - waker.wake(); - } - } -} - -impl Default for Notify { - fn default() -> Notify { - Notify::new() - } -} - -fn notify_locked(waiters: &mut LinkedList, state: &AtomicU8, curr: u8) -> Option { - loop { - match curr { - EMPTY | NOTIFIED => { - let res = state.compare_exchange(curr, NOTIFIED, SeqCst, SeqCst); - - match res { - Ok(_) => return None, - Err(actual) => { - assert!(actual == EMPTY || actual == NOTIFIED); - state.store(NOTIFIED, SeqCst); - return None; - } - } - } - WAITING => { - // At this point, it is guaranteed that the state will not - // concurrently change as holding the lock is required to - // transition **out** of `WAITING`. - // - // Get a pending waiter - let mut waiter = waiters.pop_back().unwrap(); - - // Safety: `waiters` lock is still held. - let waiter = unsafe { waiter.as_mut() }; - - assert!(!waiter.notified); - - waiter.notified = true; - let waker = waiter.waker.take(); - - if waiters.is_empty() { - // As this the **final** waiter in the list, the state - // must be transitioned to `EMPTY`. As transitioning - // **from** `WAITING` requires the lock to be held, a - // `store` is sufficient. - state.store(EMPTY, SeqCst); - } - - return waker; - } - _ => unreachable!(), - } - } -} - -// ===== impl Notified ===== - -impl Notified<'_> { - /// A custom `project` implementation is used in place of `pin-project-lite` - /// as a custom drop implementation is needed. - fn project(self: Pin<&mut Self>) -> (&Notify, &mut State, &UnsafeCell) { - unsafe { - // Safety: both `notify` and `state` are `Unpin`. - - is_unpin::<&Notify>(); - is_unpin::(); - - let me = self.get_unchecked_mut(); - (&me.notify, &mut me.state, &me.waiter) - } - } -} - -impl Future for Notified<'_> { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - use State::*; - - let (notify, state, waiter) = self.project(); - - loop { - match *state { - Init => { - // Optimistically try acquiring a pending notification - let res = notify - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst); - - if res.is_ok() { - // Acquired the notification - *state = Done; - return Poll::Ready(()); - } - - // Acquire the lock and attempt to transition to the waiting - // state. - let mut waiters = notify.waiters.lock().unwrap(); - - // Reload the state with the lock held - let mut curr = notify.state.load(SeqCst); - - // Transition the state to WAITING. - loop { - match curr { - EMPTY => { - // Transition to WAITING - let res = notify - .state - .compare_exchange(EMPTY, WAITING, SeqCst, SeqCst); - - if let Err(actual) = res { - assert_eq!(actual, NOTIFIED); - curr = actual; - } else { - break; - } - } - WAITING => break, - NOTIFIED => { - // Try consuming the notification - let res = notify - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst); - - match res { - Ok(_) => { - // Acquired the notification - *state = Done; - return Poll::Ready(()); - } - Err(actual) => { - assert_eq!(actual, EMPTY); - curr = actual; - } - } - } - _ => unreachable!(), - } - } - - // Safety: called while locked. - unsafe { - (*waiter.get()).waker = Some(cx.waker().clone()); - } - - // Insert the waiter into the linked list - // - // safety: pointers from `UnsafeCell` are never null. - waiters.push_front(unsafe { NonNull::new_unchecked(waiter.get()) }); - - *state = Waiting; - } - Waiting => { - // Currently in the "Waiting" state, implying the caller has - // a waiter stored in the waiter list (guarded by - // `notify.waiters`). In order to access the waker fields, - // we must hold the lock. - - let waiters = notify.waiters.lock().unwrap(); - - // Safety: called while locked - let w = unsafe { &mut *waiter.get() }; - - if w.notified { - // Our waker has been notified. Reset the fields and - // remove it from the list. - w.waker = None; - w.notified = false; - - *state = Done; - } else { - // Update the waker, if necessary. - if !w.waker.as_ref().unwrap().will_wake(cx.waker()) { - w.waker = Some(cx.waker().clone()); - } - - return Poll::Pending; - } - - // Explicit drop of the lock to indicate the scope that the - // lock is held. Because holding the lock is required to - // ensure safe access to fields not held within the lock, it - // is helpful to visualize the scope of the critical - // section. - drop(waiters); - } - Done => { - return Poll::Ready(()); - } - } - } - } -} - -impl Drop for Notified<'_> { - fn drop(&mut self) { - use State::*; - - // Safety: The type only transitions to a "Waiting" state when pinned. - let (notify, state, waiter) = unsafe { Pin::new_unchecked(self).project() }; - - // This is where we ensure safety. The `Notified` value is being - // dropped, which means we must ensure that the waiter entry is no - // longer stored in the linked list. - if let Waiting = *state { - let mut notify_state = WAITING; - let mut waiters = notify.waiters.lock().unwrap(); - - // `Notify.state` may be in any of the three states (Empty, Waiting, - // Notified). It doesn't actually matter what the atomic is set to - // at this point. We hold the lock and will ensure the atomic is in - // the correct state once th elock is dropped. - // - // Because the atomic state is not checked, at first glance, it may - // seem like this routine does not handle the case where the - // receiver is notified but has not yet observed the notification. - // If this happens, no matter how many notifications happen between - // this receiver being notified and the receive future dropping, all - // we need to do is ensure that one notification is returned back to - // the `Notify`. This is done by calling `notify_locked` if `self` - // has the `notified` flag set. - - // remove the entry from the list - // - // safety: the waiter is only added to `waiters` by virtue of it - // being the only `LinkedList` available to the type. - unsafe { waiters.remove(NonNull::new_unchecked(waiter.get())) }; - - if waiters.is_empty() { - notify_state = EMPTY; - // If the state *should* be `NOTIFIED`, the call to - // `notify_locked` below will end up doing the - // `store(NOTIFIED)`. If a concurrent receiver races and - // observes the incorrect `EMPTY` state, it will then obtain the - // lock and block until `notify.state` is in the correct final - // state. - notify.state.store(EMPTY, SeqCst); - } - - // See if the node was notified but not received. In this case, the - // notification must be sent to another waiter. - // - // Safety: with the entry removed from the linked list, there can be - // no concurrent access to the entry - let notified = unsafe { (*waiter.get()).notified }; - - if notified { - if let Some(waker) = notify_locked(&mut waiters, ¬ify.state, notify_state) { - drop(waiters); - waker.wake(); - } - } - } - } -} - -/// # Safety -/// -/// `Waiter` is forced to be !Unpin. -unsafe impl linked_list::Link for Waiter { - type Handle = NonNull; - type Target = Waiter; - - fn as_raw(handle: &NonNull) -> NonNull { - *handle - } - - unsafe fn from_raw(ptr: NonNull) -> NonNull { - ptr - } - - unsafe fn pointers(mut target: NonNull) -> NonNull> { - NonNull::from(&mut target.as_mut().pointers) - } -} - -fn is_unpin() {} diff --git a/third_party/rust/tokio-0.2.25/src/sync/oneshot.rs b/third_party/rust/tokio-0.2.25/src/sync/oneshot.rs deleted file mode 100644 index 17767e7f7f88..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/oneshot.rs +++ /dev/null @@ -1,795 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] - -//! A channel for sending a single message between asynchronous tasks. - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Arc; - -use std::fmt; -use std::future::Future; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::sync::atomic::Ordering::{self, AcqRel, Acquire}; -use std::task::Poll::{Pending, Ready}; -use std::task::{Context, Poll, Waker}; - -/// Sends a value to the associated `Receiver`. -/// -/// Instances are created by the [`channel`](fn@channel) function. -#[derive(Debug)] -pub struct Sender { - inner: Option>>, -} - -/// Receive a value from the associated `Sender`. -/// -/// Instances are created by the [`channel`](fn@channel) function. -#[derive(Debug)] -pub struct Receiver { - inner: Option>>, -} - -pub mod error { - //! Oneshot error types - - use std::fmt; - - /// Error returned by the `Future` implementation for `Receiver`. - #[derive(Debug, Eq, PartialEq)] - pub struct RecvError(pub(super) ()); - - /// Error returned by the `try_recv` function on `Receiver`. - #[derive(Debug, Eq, PartialEq)] - pub enum TryRecvError { - /// The send half of the channel has not yet sent a value. - Empty, - - /// The send half of the channel was dropped without sending a value. - Closed, - } - - // ===== impl RecvError ===== - - impl fmt::Display for RecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } - } - - impl std::error::Error for RecvError {} - - // ===== impl TryRecvError ===== - - impl fmt::Display for TryRecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TryRecvError::Empty => write!(fmt, "channel empty"), - TryRecvError::Closed => write!(fmt, "channel closed"), - } - } - } - - impl std::error::Error for TryRecvError {} -} - -use self::error::*; - -struct Inner { - /// Manages the state of the inner cell - state: AtomicUsize, - - /// The value. This is set by `Sender` and read by `Receiver`. The state of - /// the cell is tracked by `state`. - value: UnsafeCell>, - - /// The task to notify when the receiver drops without consuming the value. - tx_task: UnsafeCell>, - - /// The task to notify when the value is sent. - rx_task: UnsafeCell>, -} - -#[derive(Clone, Copy)] -struct State(usize); - -/// Create a new one-shot channel for sending single values across asynchronous -/// tasks. -/// -/// The function returns separate "send" and "receive" handles. The `Sender` -/// handle is used by the producer to send the value. The `Receiver` handle is -/// used by the consumer to receive the value. -/// -/// Each handle can be used on separate tasks. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, rx) = oneshot::channel(); -/// -/// tokio::spawn(async move { -/// if let Err(_) = tx.send(3) { -/// println!("the receiver dropped"); -/// } -/// }); -/// -/// match rx.await { -/// Ok(v) => println!("got = {:?}", v), -/// Err(_) => println!("the sender dropped"), -/// } -/// } -/// ``` -pub fn channel() -> (Sender, Receiver) { - #[allow(deprecated)] - let inner = Arc::new(Inner { - state: AtomicUsize::new(State::new().as_usize()), - value: UnsafeCell::new(None), - tx_task: UnsafeCell::new(MaybeUninit::uninit()), - rx_task: UnsafeCell::new(MaybeUninit::uninit()), - }); - - let tx = Sender { - inner: Some(inner.clone()), - }; - let rx = Receiver { inner: Some(inner) }; - - (tx, rx) -} - -impl Sender { - /// Attempts to send a value on this channel, returning it back if it could - /// not be sent. - /// - /// This method consumes `self` as only one value may ever be sent on a oneshot - /// channel. It is not marked async because sending a message to an oneshot - /// channel never requires any form of waiting. Because of this, the `send` - /// method can be used in both synchronous and asynchronous code without - /// problems. - /// - /// A successful send occurs when it is determined that the other end of the - /// channel has not hung up already. An unsuccessful send would be one where - /// the corresponding receiver has already been deallocated. Note that a - /// return value of `Err` means that the data will never be received, but - /// a return value of `Ok` does *not* mean that the data will be received. - /// It is possible for the corresponding receiver to hang up immediately - /// after this function returns `Ok`. - /// - /// # Examples - /// - /// Send a value to another task - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = oneshot::channel(); - /// - /// tokio::spawn(async move { - /// if let Err(_) = tx.send(3) { - /// println!("the receiver dropped"); - /// } - /// }); - /// - /// match rx.await { - /// Ok(v) => println!("got = {:?}", v), - /// Err(_) => println!("the sender dropped"), - /// } - /// } - /// ``` - pub fn send(mut self, t: T) -> Result<(), T> { - let inner = self.inner.take().unwrap(); - - inner.value.with_mut(|ptr| unsafe { - *ptr = Some(t); - }); - - if !inner.complete() { - return Err(inner - .value - .with_mut(|ptr| unsafe { (*ptr).take() }.unwrap())); - } - - Ok(()) - } - - #[doc(hidden)] // TODO: remove - pub fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - let inner = self.inner.as_ref().unwrap(); - - let mut state = State::load(&inner.state, Acquire); - - if state.is_closed() { - coop.made_progress(); - return Poll::Ready(()); - } - - if state.is_tx_task_set() { - let will_notify = unsafe { inner.with_tx_task(|w| w.will_wake(cx.waker())) }; - - if !will_notify { - state = State::unset_tx_task(&inner.state); - - if state.is_closed() { - // Set the flag again so that the waker is released in drop - State::set_tx_task(&inner.state); - coop.made_progress(); - return Ready(()); - } else { - unsafe { inner.drop_tx_task() }; - } - } - } - - if !state.is_tx_task_set() { - // Attempt to set the task - unsafe { - inner.set_tx_task(cx); - } - - // Update the state - state = State::set_tx_task(&inner.state); - - if state.is_closed() { - coop.made_progress(); - return Ready(()); - } - } - - Pending - } - - /// Waits for the associated [`Receiver`] handle to close. - /// - /// A [`Receiver`] is closed by either calling [`close`] explicitly or the - /// [`Receiver`] value is dropped. - /// - /// This function is useful when paired with `select!` to abort a - /// computation when the receiver is no longer interested in the result. - /// - /// # Return - /// - /// Returns a `Future` which must be awaited on. - /// - /// [`Receiver`]: Receiver - /// [`close`]: Receiver::close - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, rx) = oneshot::channel::<()>(); - /// - /// tokio::spawn(async move { - /// drop(rx); - /// }); - /// - /// tx.closed().await; - /// println!("the receiver dropped"); - /// } - /// ``` - /// - /// Paired with select - /// - /// ``` - /// use tokio::sync::oneshot; - /// use tokio::time::{self, Duration}; - /// - /// use futures::{select, FutureExt}; - /// - /// async fn compute() -> String { - /// // Complex computation returning a `String` - /// # "hello".to_string() - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, rx) = oneshot::channel(); - /// - /// tokio::spawn(async move { - /// select! { - /// _ = tx.closed().fuse() => { - /// // The receiver dropped, no need to do any further work - /// } - /// value = compute().fuse() => { - /// tx.send(value).unwrap() - /// } - /// } - /// }); - /// - /// // Wait for up to 10 seconds - /// let _ = time::timeout(Duration::from_secs(10), rx).await; - /// } - /// ``` - pub async fn closed(&mut self) { - use crate::future::poll_fn; - - poll_fn(|cx| self.poll_closed(cx)).await - } - - /// Returns `true` if the associated [`Receiver`] handle has been dropped. - /// - /// A [`Receiver`] is closed by either calling [`close`] explicitly or the - /// [`Receiver`] value is dropped. - /// - /// If `true` is returned, a call to `send` will always result in an error. - /// - /// [`Receiver`]: Receiver - /// [`close`]: Receiver::close - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = oneshot::channel(); - /// - /// assert!(!tx.is_closed()); - /// - /// drop(rx); - /// - /// assert!(tx.is_closed()); - /// assert!(tx.send("never received").is_err()); - /// } - /// ``` - pub fn is_closed(&self) -> bool { - let inner = self.inner.as_ref().unwrap(); - - let state = State::load(&inner.state, Acquire); - state.is_closed() - } -} - -impl Drop for Sender { - fn drop(&mut self) { - if let Some(inner) = self.inner.as_ref() { - inner.complete(); - } - } -} - -impl Receiver { - /// Prevents the associated [`Sender`] handle from sending a value. - /// - /// Any `send` operation which happens after calling `close` is guaranteed - /// to fail. After calling `close`, [`try_recv`] should be called to - /// receive a value if one was sent **before** the call to `close` - /// completed. - /// - /// This function is useful to perform a graceful shutdown and ensure that a - /// value will not be sent into the channel and never received. - /// - /// [`Sender`]: Sender - /// [`try_recv`]: Receiver::try_recv - /// - /// # Examples - /// - /// Prevent a value from being sent - /// - /// ``` - /// use tokio::sync::oneshot; - /// use tokio::sync::oneshot::error::TryRecvError; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = oneshot::channel(); - /// - /// assert!(!tx.is_closed()); - /// - /// rx.close(); - /// - /// assert!(tx.is_closed()); - /// assert!(tx.send("never received").is_err()); - /// - /// match rx.try_recv() { - /// Err(TryRecvError::Closed) => {} - /// _ => unreachable!(), - /// } - /// } - /// ``` - /// - /// Receive a value sent **before** calling `close` - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = oneshot::channel(); - /// - /// assert!(tx.send("will receive").is_ok()); - /// - /// rx.close(); - /// - /// let msg = rx.try_recv().unwrap(); - /// assert_eq!(msg, "will receive"); - /// } - /// ``` - pub fn close(&mut self) { - let inner = self.inner.as_ref().unwrap(); - inner.close(); - } - - /// Attempts to receive a value. - /// - /// If a pending value exists in the channel, it is returned. If no value - /// has been sent, the current task **will not** be registered for - /// future notification. - /// - /// This function is useful to call from outside the context of an - /// asynchronous task. - /// - /// # Return - /// - /// - `Ok(T)` if a value is pending in the channel. - /// - `Err(TryRecvError::Empty)` if no value has been sent yet. - /// - `Err(TryRecvError::Closed)` if the sender has dropped without sending - /// a value. - /// - /// # Examples - /// - /// `try_recv` before a value is sent, then after. - /// - /// ``` - /// use tokio::sync::oneshot; - /// use tokio::sync::oneshot::error::TryRecvError; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = oneshot::channel(); - /// - /// match rx.try_recv() { - /// // The channel is currently empty - /// Err(TryRecvError::Empty) => {} - /// _ => unreachable!(), - /// } - /// - /// // Send a value - /// tx.send("hello").unwrap(); - /// - /// match rx.try_recv() { - /// Ok(value) => assert_eq!(value, "hello"), - /// _ => unreachable!(), - /// } - /// } - /// ``` - /// - /// `try_recv` when the sender dropped before sending a value - /// - /// ``` - /// use tokio::sync::oneshot; - /// use tokio::sync::oneshot::error::TryRecvError; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = oneshot::channel::<()>(); - /// - /// drop(tx); - /// - /// match rx.try_recv() { - /// // The channel will never receive a value. - /// Err(TryRecvError::Closed) => {} - /// _ => unreachable!(), - /// } - /// } - /// ``` - pub fn try_recv(&mut self) -> Result { - let result = if let Some(inner) = self.inner.as_ref() { - let state = State::load(&inner.state, Acquire); - - if state.is_complete() { - match unsafe { inner.consume_value() } { - Some(value) => Ok(value), - None => Err(TryRecvError::Closed), - } - } else if state.is_closed() { - Err(TryRecvError::Closed) - } else { - // Not ready, this does not clear `inner` - return Err(TryRecvError::Empty); - } - } else { - panic!("called after complete"); - }; - - self.inner = None; - result - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - if let Some(inner) = self.inner.as_ref() { - inner.close(); - } - } -} - -impl Future for Receiver { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // If `inner` is `None`, then `poll()` has already completed. - let ret = if let Some(inner) = self.as_ref().get_ref().inner.as_ref() { - ready!(inner.poll_recv(cx))? - } else { - panic!("called after complete"); - }; - - self.inner = None; - Ready(Ok(ret)) - } -} - -impl Inner { - fn complete(&self) -> bool { - let prev = State::set_complete(&self.state); - - if prev.is_closed() { - return false; - } - - if prev.is_rx_task_set() { - // TODO: Consume waker? - unsafe { - self.with_rx_task(Waker::wake_by_ref); - } - } - - true - } - - fn poll_recv(&self, cx: &mut Context<'_>) -> Poll> { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - // Load the state - let mut state = State::load(&self.state, Acquire); - - if state.is_complete() { - coop.made_progress(); - match unsafe { self.consume_value() } { - Some(value) => Ready(Ok(value)), - None => Ready(Err(RecvError(()))), - } - } else if state.is_closed() { - coop.made_progress(); - Ready(Err(RecvError(()))) - } else { - if state.is_rx_task_set() { - let will_notify = unsafe { self.with_rx_task(|w| w.will_wake(cx.waker())) }; - - // Check if the task is still the same - if !will_notify { - // Unset the task - state = State::unset_rx_task(&self.state); - if state.is_complete() { - // Set the flag again so that the waker is released in drop - State::set_rx_task(&self.state); - - coop.made_progress(); - return match unsafe { self.consume_value() } { - Some(value) => Ready(Ok(value)), - None => Ready(Err(RecvError(()))), - }; - } else { - unsafe { self.drop_rx_task() }; - } - } - } - - if !state.is_rx_task_set() { - // Attempt to set the task - unsafe { - self.set_rx_task(cx); - } - - // Update the state - state = State::set_rx_task(&self.state); - - if state.is_complete() { - coop.made_progress(); - match unsafe { self.consume_value() } { - Some(value) => Ready(Ok(value)), - None => Ready(Err(RecvError(()))), - } - } else { - Pending - } - } else { - Pending - } - } - } - - /// Called by `Receiver` to indicate that the value will never be received. - fn close(&self) { - let prev = State::set_closed(&self.state); - - if prev.is_tx_task_set() && !prev.is_complete() { - unsafe { - self.with_tx_task(Waker::wake_by_ref); - } - } - } - - /// Consumes the value. This function does not check `state`. - unsafe fn consume_value(&self) -> Option { - self.value.with_mut(|ptr| (*ptr).take()) - } - - unsafe fn with_rx_task(&self, f: F) -> R - where - F: FnOnce(&Waker) -> R, - { - self.rx_task.with(|ptr| { - let waker: *const Waker = (&*ptr).as_ptr(); - f(&*waker) - }) - } - - unsafe fn with_tx_task(&self, f: F) -> R - where - F: FnOnce(&Waker) -> R, - { - self.tx_task.with(|ptr| { - let waker: *const Waker = (&*ptr).as_ptr(); - f(&*waker) - }) - } - - unsafe fn drop_rx_task(&self) { - self.rx_task.with_mut(|ptr| { - let ptr: *mut Waker = (&mut *ptr).as_mut_ptr(); - ptr.drop_in_place(); - }); - } - - unsafe fn drop_tx_task(&self) { - self.tx_task.with_mut(|ptr| { - let ptr: *mut Waker = (&mut *ptr).as_mut_ptr(); - ptr.drop_in_place(); - }); - } - - unsafe fn set_rx_task(&self, cx: &mut Context<'_>) { - self.rx_task.with_mut(|ptr| { - let ptr: *mut Waker = (&mut *ptr).as_mut_ptr(); - ptr.write(cx.waker().clone()); - }); - } - - unsafe fn set_tx_task(&self, cx: &mut Context<'_>) { - self.tx_task.with_mut(|ptr| { - let ptr: *mut Waker = (&mut *ptr).as_mut_ptr(); - ptr.write(cx.waker().clone()); - }); - } -} - -unsafe impl Send for Inner {} -unsafe impl Sync for Inner {} - -impl Drop for Inner { - fn drop(&mut self) { - let state = State(self.state.with_mut(|v| *v)); - - if state.is_rx_task_set() { - unsafe { - self.drop_rx_task(); - } - } - - if state.is_tx_task_set() { - unsafe { - self.drop_tx_task(); - } - } - } -} - -impl fmt::Debug for Inner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use std::sync::atomic::Ordering::Relaxed; - - fmt.debug_struct("Inner") - .field("state", &State::load(&self.state, Relaxed)) - .finish() - } -} - -const RX_TASK_SET: usize = 0b00001; -const VALUE_SENT: usize = 0b00010; -const CLOSED: usize = 0b00100; -const TX_TASK_SET: usize = 0b01000; - -impl State { - fn new() -> State { - State(0) - } - - fn is_complete(self) -> bool { - self.0 & VALUE_SENT == VALUE_SENT - } - - fn set_complete(cell: &AtomicUsize) -> State { - // TODO: This could be `Release`, followed by an `Acquire` fence *if* - // the `RX_TASK_SET` flag is set. However, `loom` does not support - // fences yet. - let val = cell.fetch_or(VALUE_SENT, AcqRel); - State(val) - } - - fn is_rx_task_set(self) -> bool { - self.0 & RX_TASK_SET == RX_TASK_SET - } - - fn set_rx_task(cell: &AtomicUsize) -> State { - let val = cell.fetch_or(RX_TASK_SET, AcqRel); - State(val | RX_TASK_SET) - } - - fn unset_rx_task(cell: &AtomicUsize) -> State { - let val = cell.fetch_and(!RX_TASK_SET, AcqRel); - State(val & !RX_TASK_SET) - } - - fn is_closed(self) -> bool { - self.0 & CLOSED == CLOSED - } - - fn set_closed(cell: &AtomicUsize) -> State { - // Acquire because we want all later writes (attempting to poll) to be - // ordered after this. - let val = cell.fetch_or(CLOSED, Acquire); - State(val) - } - - fn set_tx_task(cell: &AtomicUsize) -> State { - let val = cell.fetch_or(TX_TASK_SET, AcqRel); - State(val | TX_TASK_SET) - } - - fn unset_tx_task(cell: &AtomicUsize) -> State { - let val = cell.fetch_and(!TX_TASK_SET, AcqRel); - State(val & !TX_TASK_SET) - } - - fn is_tx_task_set(self) -> bool { - self.0 & TX_TASK_SET == TX_TASK_SET - } - - fn as_usize(self) -> usize { - self.0 - } - - fn load(cell: &AtomicUsize, order: Ordering) -> State { - let val = cell.load(order); - State(val) - } -} - -impl fmt::Debug for State { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("State") - .field("is_complete", &self.is_complete()) - .field("is_closed", &self.is_closed()) - .field("is_rx_task_set", &self.is_rx_task_set()) - .field("is_tx_task_set", &self.is_tx_task_set()) - .finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/rwlock.rs b/third_party/rust/tokio-0.2.25/src/sync/rwlock.rs deleted file mode 100644 index 1bb579319d07..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/rwlock.rs +++ /dev/null @@ -1,559 +0,0 @@ -use crate::sync::batch_semaphore::Semaphore; -use std::cell::UnsafeCell; -use std::fmt; -use std::marker; -use std::mem; -use std::ops; - -#[cfg(not(loom))] -const MAX_READS: usize = 32; - -#[cfg(loom)] -const MAX_READS: usize = 10; - -/// An asynchronous reader-writer lock -/// -/// This type of lock allows a number of readers or at most one writer at any -/// point in time. The write portion of this lock typically allows modification -/// of the underlying data (exclusive access) and the read portion of this lock -/// typically allows for read-only access (shared access). -/// -/// In comparison, a [`Mutex`] does not distinguish between readers or writers -/// that acquire the lock, therefore causing any tasks waiting for the lock to -/// become available to yield. An `RwLock` will allow any number of readers to -/// acquire the lock as long as a writer is not holding the lock. -/// -/// The priority policy of Tokio's read-write lock is _fair_ (or -/// [_write-preferring_]), in order to ensure that readers cannot starve -/// writers. Fairness is ensured using a first-in, first-out queue for the tasks -/// awaiting the lock; if a task that wishes to acquire the write lock is at the -/// head of the queue, read locks will not be given out until the write lock has -/// been released. This is in contrast to the Rust standard library's -/// `std::sync::RwLock`, where the priority policy is dependent on the -/// operating system's implementation. -/// -/// The type parameter `T` represents the data that this lock protects. It is -/// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards -/// returned from the locking methods implement [`Deref`](trait@std::ops::Deref) -/// (and [`DerefMut`](trait@std::ops::DerefMut) -/// for the `write` methods) to allow access to the content of the lock. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::RwLock; -/// -/// #[tokio::main] -/// async fn main() { -/// let lock = RwLock::new(5); -/// -/// // many reader locks can be held at once -/// { -/// let r1 = lock.read().await; -/// let r2 = lock.read().await; -/// assert_eq!(*r1, 5); -/// assert_eq!(*r2, 5); -/// } // read locks are dropped at this point -/// -/// // only one write lock may be held, however -/// { -/// let mut w = lock.write().await; -/// *w += 1; -/// assert_eq!(*w, 6); -/// } // write lock is dropped here -/// } -/// ``` -/// -/// [`Mutex`]: struct@super::Mutex -/// [`RwLock`]: struct@RwLock -/// [`RwLockReadGuard`]: struct@RwLockReadGuard -/// [`RwLockWriteGuard`]: struct@RwLockWriteGuard -/// [`Send`]: trait@std::marker::Send -/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies -#[derive(Debug)] -pub struct RwLock { - //semaphore to coordinate read and write access to T - s: Semaphore, - - //inner data T - c: UnsafeCell, -} - -/// RAII structure used to release the shared read access of a lock when -/// dropped. -/// -/// This structure is created by the [`read`] method on -/// [`RwLock`]. -/// -/// [`read`]: method@RwLock::read -/// [`RwLock`]: struct@RwLock -pub struct RwLockReadGuard<'a, T: ?Sized> { - s: &'a Semaphore, - data: *const T, - marker: marker::PhantomData<&'a T>, -} - -impl<'a, T> RwLockReadGuard<'a, T> { - /// Make a new `RwLockReadGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `RwLockReadGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockReadGuard::map(...)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockReadGuard::map`] from the - /// [`parking_lot` crate]. - /// - /// [`RwLockReadGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockReadGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// let guard = lock.read().await; - /// let guard = RwLockReadGuard::map(guard, |f| &f.0); - /// - /// assert_eq!(1, *guard); - /// # } - /// ``` - #[inline] - pub fn map(this: Self, f: F) -> RwLockReadGuard<'a, U> - where - F: FnOnce(&T) -> &U, - { - let data = f(&*this) as *const U; - let s = this.s; - // NB: Forget to avoid drop impl from being called. - mem::forget(this); - RwLockReadGuard { - s, - data, - marker: marker::PhantomData, - } - } - - /// Attempts to make a new [`RwLockReadGuard`] for a component of the - /// locked data. The original guard is returned if the closure returns - /// `None`. - /// - /// This operation cannot fail as the `RwLockReadGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be used as - /// `RwLockReadGuard::try_map(..)`. A method would interfere with methods of the - /// same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockReadGuard::try_map`] from the - /// [`parking_lot` crate]. - /// - /// [`RwLockReadGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.try_map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockReadGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// let guard = lock.read().await; - /// let guard = RwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail"); - /// - /// assert_eq!(1, *guard); - /// # } - /// ``` - #[inline] - pub fn try_map(this: Self, f: F) -> Result, Self> - where - F: FnOnce(&T) -> Option<&U>, - { - let data = match f(&*this) { - Some(data) => data as *const U, - None => return Err(this), - }; - let s = this.s; - // NB: Forget to avoid drop impl from being called. - mem::forget(this); - Ok(RwLockReadGuard { - s, - data, - marker: marker::PhantomData, - }) - } -} - -impl<'a, T: ?Sized> fmt::Debug for RwLockReadGuard<'a, T> -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> fmt::Display for RwLockReadGuard<'a, T> -where - T: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> { - fn drop(&mut self) { - self.s.release(1); - } -} - -/// RAII structure used to release the exclusive write access of a lock when -/// dropped. -/// -/// This structure is created by the [`write`] and method -/// on [`RwLock`]. -/// -/// [`write`]: method@RwLock::write -/// [`RwLock`]: struct@RwLock -pub struct RwLockWriteGuard<'a, T: ?Sized> { - s: &'a Semaphore, - data: *mut T, - marker: marker::PhantomData<&'a mut T>, -} - -impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { - /// Make a new `RwLockWriteGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `RwLockWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be used as - /// `RwLockWriteGuard::map(..)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockWriteGuard::map`] from the - /// [`parking_lot` crate]. - /// - /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// { - /// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn map(mut this: Self, f: F) -> RwLockWriteGuard<'a, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let data = f(&mut *this) as *mut U; - let s = this.s; - // NB: Forget to avoid drop impl from being called. - mem::forget(this); - RwLockWriteGuard { - s, - data, - marker: marker::PhantomData, - } - } - - /// Attempts to make a new [`RwLockWriteGuard`] for a component of - /// the locked data. The original guard is returned if the closure returns - /// `None`. - /// - /// This operation cannot fail as the `RwLockWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from - /// the [`parking_lot` crate]. - /// - /// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// { - /// let guard = lock.write().await; - /// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail"); - /// *guard = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn try_map(mut this: Self, f: F) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut U, - None => return Err(this), - }; - let s = this.s; - // NB: Forget to avoid drop impl from being called. - mem::forget(this); - Ok(RwLockWriteGuard { - s, - data, - marker: marker::PhantomData, - }) - } -} - -impl<'a, T: ?Sized> fmt::Debug for RwLockWriteGuard<'a, T> -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> fmt::Display for RwLockWriteGuard<'a, T> -where - T: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> { - fn drop(&mut self) { - self.s.release(MAX_READS); - } -} - -#[test] -#[cfg(not(loom))] -fn bounds() { - fn check_send() {} - fn check_sync() {} - fn check_unpin() {} - // This has to take a value, since the async fn's return type is unnameable. - fn check_send_sync_val(_t: T) {} - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - let rwlock = RwLock::new(0); - check_send_sync_val(rwlock.read()); - check_send_sync_val(rwlock.write()); -} - -// As long as T: Send + Sync, it's fine to send and share RwLock between threads. -// If T were not Send, sending and sharing a RwLock would be bad, since you can access T through -// RwLock. -unsafe impl Send for RwLock where T: ?Sized + Send {} -unsafe impl Sync for RwLock where T: ?Sized + Send + Sync {} -// NB: These impls need to be explicit since we're storing a raw pointer. -// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over -// `T` is `Send`. -unsafe impl Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {} -unsafe impl Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {} -unsafe impl Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {} -// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over -// `T` is `Send` - but since this is also provides mutable access, we need to -// make sure that `T` is `Send` since its value can be sent across thread -// boundaries. -unsafe impl Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {} - -impl RwLock { - /// Creates a new instance of an `RwLock` which is unlocked. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// let lock = RwLock::new(5); - /// ``` - pub fn new(value: T) -> RwLock - where - T: Sized, - { - RwLock { - c: UnsafeCell::new(value), - s: Semaphore::new(MAX_READS), - } - } - - /// Locks this rwlock with shared read access, causing the current task - /// to yield until the lock has been acquired. - /// - /// The calling task will yield until there are no more writers which - /// hold the lock. There may be other readers currently inside the lock when - /// this method returns. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let lock = Arc::new(RwLock::new(1)); - /// let c_lock = lock.clone(); - /// - /// let n = lock.read().await; - /// assert_eq!(*n, 1); - /// - /// tokio::spawn(async move { - /// // While main has an active read lock, we acquire one too. - /// let r = c_lock.read().await; - /// assert_eq!(*r, 1); - /// }).await.expect("The spawned task has paniced"); - /// - /// // Drop the guard after the spawned task finishes. - /// drop(n); - ///} - /// ``` - pub async fn read(&self) -> RwLockReadGuard<'_, T> { - self.s.acquire(1).await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); - RwLockReadGuard { - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - } - } - - /// Locks this rwlock with exclusive write access, causing the current task - /// to yield until the lock has been acquired. - /// - /// This function will not return while other writers or other readers - /// currently have access to the lock. - /// - /// Returns an RAII guard which will drop the write access of this rwlock - /// when dropped. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let lock = RwLock::new(1); - /// - /// let mut n = lock.write().await; - /// *n = 2; - ///} - /// ``` - pub async fn write(&self) -> RwLockWriteGuard<'_, T> { - self.s.acquire(MAX_READS as u32).await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); - RwLockWriteGuard { - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - } - } - - /// Consumes the lock, returning the underlying data. - pub fn into_inner(self) -> T - where - T: Sized, - { - self.c.into_inner() - } -} - -impl ops::Deref for RwLockReadGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl ops::Deref for RwLockWriteGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl ops::DerefMut for RwLockWriteGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.data } - } -} - -impl From for RwLock { - fn from(s: T) -> Self { - Self::new(s) - } -} - -impl Default for RwLock -where - T: Default, -{ - fn default() -> Self { - Self::new(T::default()) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/semaphore.rs b/third_party/rust/tokio-0.2.25/src/sync/semaphore.rs deleted file mode 100644 index 2489d34aaaf8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/semaphore.rs +++ /dev/null @@ -1,166 +0,0 @@ -use super::batch_semaphore as ll; // low level implementation -use std::sync::Arc; - -/// Counting semaphore performing asynchronous permit aquisition. -/// -/// A semaphore maintains a set of permits. Permits are used to synchronize -/// access to a shared resource. A semaphore differs from a mutex in that it -/// can allow more than one concurrent caller to access the shared resource at a -/// time. -/// -/// When `acquire` is called and the semaphore has remaining permits, the -/// function immediately returns a permit. However, if no remaining permits are -/// available, `acquire` (asynchronously) waits until an outstanding permit is -/// dropped. At this point, the freed permit is assigned to the caller. -#[derive(Debug)] -pub struct Semaphore { - /// The low level semaphore - ll_sem: ll::Semaphore, -} - -/// A permit from the semaphore. -/// -/// This type is created by the [`acquire`] method. -/// -/// [`acquire`]: crate::sync::Semaphore::acquire() -#[must_use] -#[derive(Debug)] -pub struct SemaphorePermit<'a> { - sem: &'a Semaphore, - permits: u16, -} - -/// An owned permit from the semaphore. -/// -/// This type is created by the [`acquire_owned`] method. -/// -/// [`acquire_owned`]: crate::sync::Semaphore::acquire_owned() -#[must_use] -#[derive(Debug)] -pub struct OwnedSemaphorePermit { - sem: Arc, - permits: u16, -} - -/// Error returned from the [`Semaphore::try_acquire`] function. -/// -/// A `try_acquire` operation can only fail if the semaphore has no available -/// permits. -/// -/// [`Semaphore::try_acquire`]: Semaphore::try_acquire -#[derive(Debug)] -pub struct TryAcquireError(()); - -#[test] -#[cfg(not(loom))] -fn bounds() { - fn check_unpin() {} - // This has to take a value, since the async fn's return type is unnameable. - fn check_send_sync_val(_t: T) {} - fn check_send_sync() {} - check_unpin::(); - check_unpin::>(); - check_send_sync::(); - - let semaphore = Semaphore::new(0); - check_send_sync_val(semaphore.acquire()); -} - -impl Semaphore { - /// Creates a new semaphore with the initial number of permits. - pub fn new(permits: usize) -> Self { - Self { - ll_sem: ll::Semaphore::new(permits), - } - } - - /// Returns the current number of available permits. - pub fn available_permits(&self) -> usize { - self.ll_sem.available_permits() - } - - /// Adds `n` new permits to the semaphore. - /// - /// The maximum number of permits is `usize::MAX >> 3`, and this function will panic if the limit is exceeded. - pub fn add_permits(&self, n: usize) { - self.ll_sem.release(n); - } - - /// Acquires permit from the semaphore. - pub async fn acquire(&self) -> SemaphorePermit<'_> { - self.ll_sem.acquire(1).await.unwrap(); - SemaphorePermit { - sem: &self, - permits: 1, - } - } - - /// Tries to acquire a permit from the semaphore. - pub fn try_acquire(&self) -> Result, TryAcquireError> { - match self.ll_sem.try_acquire(1) { - Ok(_) => Ok(SemaphorePermit { - sem: self, - permits: 1, - }), - Err(_) => Err(TryAcquireError(())), - } - } - - /// Acquires permit from the semaphore. - /// - /// The semaphore must be wrapped in an [`Arc`] to call this method. - /// - /// [`Arc`]: std::sync::Arc - pub async fn acquire_owned(self: Arc) -> OwnedSemaphorePermit { - self.ll_sem.acquire(1).await.unwrap(); - OwnedSemaphorePermit { - sem: self.clone(), - permits: 1, - } - } - - /// Tries to acquire a permit from the semaphore. - /// - /// The semaphore must be wrapped in an [`Arc`] to call this method. - /// - /// [`Arc`]: std::sync::Arc - pub fn try_acquire_owned(self: Arc) -> Result { - match self.ll_sem.try_acquire(1) { - Ok(_) => Ok(OwnedSemaphorePermit { - sem: self.clone(), - permits: 1, - }), - Err(_) => Err(TryAcquireError(())), - } - } -} - -impl<'a> SemaphorePermit<'a> { - /// Forgets the permit **without** releasing it back to the semaphore. - /// This can be used to reduce the amount of permits available from a - /// semaphore. - pub fn forget(mut self) { - self.permits = 0; - } -} - -impl OwnedSemaphorePermit { - /// Forgets the permit **without** releasing it back to the semaphore. - /// This can be used to reduce the amount of permits available from a - /// semaphore. - pub fn forget(mut self) { - self.permits = 0; - } -} - -impl<'a> Drop for SemaphorePermit<'_> { - fn drop(&mut self) { - self.sem.add_permits(self.permits as usize); - } -} - -impl Drop for OwnedSemaphorePermit { - fn drop(&mut self) { - self.sem.add_permits(self.permits as usize); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/semaphore_ll.rs b/third_party/rust/tokio-0.2.25/src/sync/semaphore_ll.rs deleted file mode 100644 index 1312f56ea3e2..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/semaphore_ll.rs +++ /dev/null @@ -1,1225 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] - -//! Thread-safe, asynchronous counting semaphore. -//! -//! A `Semaphore` instance holds a set of permits. Permits are used to -//! synchronize access to a shared resource. -//! -//! Before accessing the shared resource, callers acquire a permit from the -//! semaphore. Once the permit is acquired, the caller then enters the critical -//! section. If no permits are available, then acquiring the semaphore returns -//! `Pending`. The task is woken once a permit becomes available. - -use crate::loom::cell::UnsafeCell; -use crate::loom::future::AtomicWaker; -use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize}; -use crate::loom::thread; - -use std::cmp; -use std::fmt; -use std::ptr::{self, NonNull}; -use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Relaxed, Release}; -use std::task::Poll::{Pending, Ready}; -use std::task::{Context, Poll}; -use std::usize; - -/// Futures-aware semaphore. -pub(crate) struct Semaphore { - /// Tracks both the waiter queue tail pointer and the number of remaining - /// permits. - state: AtomicUsize, - - /// waiter queue head pointer. - head: UnsafeCell>, - - /// Coordinates access to the queue head. - rx_lock: AtomicUsize, - - /// Stub waiter node used as part of the MPSC channel algorithm. - stub: Box, -} - -/// A semaphore permit -/// -/// Tracks the lifecycle of a semaphore permit. -/// -/// An instance of `Permit` is intended to be used with a **single** instance of -/// `Semaphore`. Using a single instance of `Permit` with multiple semaphore -/// instances will result in unexpected behavior. -/// -/// `Permit` does **not** release the permit back to the semaphore on drop. It -/// is the user's responsibility to ensure that `Permit::release` is called -/// before dropping the permit. -#[derive(Debug)] -pub(crate) struct Permit { - waiter: Option>, - state: PermitState, -} - -/// Error returned by `Permit::poll_acquire`. -#[derive(Debug)] -pub(crate) struct AcquireError(()); - -/// Error returned by `Permit::try_acquire`. -#[derive(Debug)] -pub(crate) enum TryAcquireError { - Closed, - NoPermits, -} - -/// Node used to notify the semaphore waiter when permit is available. -#[derive(Debug)] -struct Waiter { - /// Stores waiter state. - /// - /// See `WaiterState` for more details. - state: AtomicUsize, - - /// Task to wake when a permit is made available. - waker: AtomicWaker, - - /// Next pointer in the queue of waiting senders. - next: AtomicPtr, -} - -/// Semaphore state -/// -/// The 2 low bits track the modes. -/// -/// - Closed -/// - Full -/// -/// When not full, the rest of the `usize` tracks the total number of messages -/// in the channel. When full, the rest of the `usize` is a pointer to the tail -/// of the "waiting senders" queue. -#[derive(Copy, Clone)] -struct SemState(usize); - -/// Permit state -#[derive(Debug, Copy, Clone)] -enum PermitState { - /// Currently waiting for permits to be made available and assigned to the - /// waiter. - Waiting(u16), - - /// The number of acquired permits - Acquired(u16), -} - -/// State for an individual waker node -#[derive(Debug, Copy, Clone)] -struct WaiterState(usize); - -/// Waiter node is in the semaphore queue -const QUEUED: usize = 0b001; - -/// Semaphore has been closed, no more permits will be issued. -const CLOSED: usize = 0b10; - -/// The permit that owns the `Waiter` dropped. -const DROPPED: usize = 0b100; - -/// Represents "one requested permit" in the waiter state -const PERMIT_ONE: usize = 0b1000; - -/// Masks the waiter state to only contain bits tracking number of requested -/// permits. -const PERMIT_MASK: usize = usize::MAX - (PERMIT_ONE - 1); - -/// How much to shift a permit count to pack it into the waker state -const PERMIT_SHIFT: u32 = PERMIT_ONE.trailing_zeros(); - -/// Flag differentiating between available permits and waiter pointers. -/// -/// If we assume pointers are properly aligned, then the least significant bit -/// will always be zero. So, we use that bit to track if the value represents a -/// number. -const NUM_FLAG: usize = 0b01; - -/// Signal the semaphore is closed -const CLOSED_FLAG: usize = 0b10; - -/// Maximum number of permits a semaphore can manage -const MAX_PERMITS: usize = usize::MAX >> NUM_SHIFT; - -/// When representing "numbers", the state has to be shifted this much (to get -/// rid of the flag bit). -const NUM_SHIFT: usize = 2; - -// ===== impl Semaphore ===== - -impl Semaphore { - /// Creates a new semaphore with the initial number of permits - /// - /// # Panics - /// - /// Panics if `permits` is zero. - pub(crate) fn new(permits: usize) -> Semaphore { - let stub = Box::new(Waiter::new()); - let ptr = NonNull::from(&*stub); - - // Allocations are aligned - debug_assert!(ptr.as_ptr() as usize & NUM_FLAG == 0); - - let state = SemState::new(permits, &stub); - - Semaphore { - state: AtomicUsize::new(state.to_usize()), - head: UnsafeCell::new(ptr), - rx_lock: AtomicUsize::new(0), - stub, - } - } - - /// Returns the current number of available permits - pub(crate) fn available_permits(&self) -> usize { - let curr = SemState(self.state.load(Acquire)); - curr.available_permits() - } - - /// Tries to acquire the requested number of permits, registering the waiter - /// if not enough permits are available. - fn poll_acquire( - &self, - cx: &mut Context<'_>, - num_permits: u16, - permit: &mut Permit, - ) -> Poll> { - self.poll_acquire2(num_permits, || { - let waiter = permit.waiter.get_or_insert_with(|| Box::new(Waiter::new())); - - waiter.waker.register_by_ref(cx.waker()); - - Some(NonNull::from(&**waiter)) - }) - } - - fn try_acquire(&self, num_permits: u16) -> Result<(), TryAcquireError> { - match self.poll_acquire2(num_permits, || None) { - Poll::Ready(res) => res.map_err(to_try_acquire), - Poll::Pending => Err(TryAcquireError::NoPermits), - } - } - - /// Polls for a permit - /// - /// Tries to acquire available permits first. If unable to acquire a - /// sufficient number of permits, the caller's waiter is pushed onto the - /// semaphore's wait queue. - fn poll_acquire2( - &self, - num_permits: u16, - mut get_waiter: F, - ) -> Poll> - where - F: FnMut() -> Option>, - { - let num_permits = num_permits as usize; - - // Load the current state - let mut curr = SemState(self.state.load(Acquire)); - - // Saves a ref to the waiter node - let mut maybe_waiter: Option> = None; - - /// Used in branches where we attempt to push the waiter into the wait - /// queue but fail due to permits becoming available or the wait queue - /// transitioning to "closed". In this case, the waiter must be - /// transitioned back to the "idle" state. - macro_rules! revert_to_idle { - () => { - if let Some(waiter) = maybe_waiter { - unsafe { waiter.as_ref() }.revert_to_idle(); - } - }; - } - - loop { - let mut next = curr; - - if curr.is_closed() { - revert_to_idle!(); - return Ready(Err(AcquireError::closed())); - } - - let acquired = next.acquire_permits(num_permits, &self.stub); - - if !acquired { - // There are not enough available permits to satisfy the - // request. The permit transitions to a waiting state. - debug_assert!(curr.waiter().is_some() || curr.available_permits() < num_permits); - - if let Some(waiter) = maybe_waiter.as_ref() { - // Safety: the caller owns the waiter. - let w = unsafe { waiter.as_ref() }; - w.set_permits_to_acquire(num_permits - curr.available_permits()); - } else { - // Get the waiter for the permit. - if let Some(waiter) = get_waiter() { - // Safety: the caller owns the waiter. - let w = unsafe { waiter.as_ref() }; - - // If there are any currently available permits, the - // waiter acquires those immediately and waits for the - // remaining permits to become available. - if !w.to_queued(num_permits - curr.available_permits()) { - // The node is alrady queued, there is no further work - // to do. - return Pending; - } - - maybe_waiter = Some(waiter); - } else { - // No waiter, this indicates the caller does not wish to - // "wait", so there is nothing left to do. - return Pending; - } - } - - next.set_waiter(maybe_waiter.unwrap()); - } - - debug_assert_ne!(curr.0, 0); - debug_assert_ne!(next.0, 0); - - match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { - Ok(_) => { - if acquired { - // Successfully acquire permits **without** queuing the - // waiter node. The waiter node is not currently in the - // queue. - revert_to_idle!(); - return Ready(Ok(())); - } else { - // The node is pushed into the queue, the final step is - // to set the node's "next" pointer to return the wait - // queue into a consistent state. - - let prev_waiter = - curr.waiter().unwrap_or_else(|| NonNull::from(&*self.stub)); - - let waiter = maybe_waiter.unwrap(); - - // Link the nodes. - // - // Safety: the mpsc algorithm guarantees the old tail of - // the queue is not removed from the queue during the - // push process. - unsafe { - prev_waiter.as_ref().store_next(waiter); - } - - return Pending; - } - } - Err(actual) => { - curr = SemState(actual); - } - } - } - } - - /// Closes the semaphore. This prevents the semaphore from issuing new - /// permits and notifies all pending waiters. - pub(crate) fn close(&self) { - // Acquire the `rx_lock`, setting the "closed" flag on the lock. - let prev = self.rx_lock.fetch_or(1, AcqRel); - - if prev != 0 { - // Another thread has the lock and will be responsible for notifying - // pending waiters. - return; - } - - self.add_permits_locked(0, true); - } - /// Adds `n` new permits to the semaphore. - /// - /// The maximum number of permits is `usize::MAX >> 3`, and this function will panic if the limit is exceeded. - pub(crate) fn add_permits(&self, n: usize) { - if n == 0 { - return; - } - - // TODO: Handle overflow. A panic is not sufficient, the process must - // abort. - let prev = self.rx_lock.fetch_add(n << 1, AcqRel); - - if prev != 0 { - // Another thread has the lock and will be responsible for notifying - // pending waiters. - return; - } - - self.add_permits_locked(n, false); - } - - fn add_permits_locked(&self, mut rem: usize, mut closed: bool) { - while rem > 0 || closed { - if closed { - SemState::fetch_set_closed(&self.state, AcqRel); - } - - // Release the permits and notify - self.add_permits_locked2(rem, closed); - - let n = rem << 1; - - let actual = if closed { - let actual = self.rx_lock.fetch_sub(n | 1, AcqRel); - closed = false; - actual - } else { - let actual = self.rx_lock.fetch_sub(n, AcqRel); - closed = actual & 1 == 1; - actual - }; - - rem = (actual >> 1) - rem; - } - } - - /// Releases a specific amount of permits to the semaphore - /// - /// This function is called by `add_permits` after the add lock has been - /// acquired. - fn add_permits_locked2(&self, mut n: usize, closed: bool) { - // If closing the semaphore, we want to drain the entire queue. The - // number of permits being assigned doesn't matter. - if closed { - n = usize::MAX; - } - - 'outer: while n > 0 { - unsafe { - let mut head = self.head.with(|head| *head); - let mut next_ptr = head.as_ref().next.load(Acquire); - - let stub = self.stub(); - - if head == stub { - // The stub node indicates an empty queue. Any remaining - // permits get assigned back to the semaphore. - let next = match NonNull::new(next_ptr) { - Some(next) => next, - None => { - // This loop is not part of the standard intrusive mpsc - // channel algorithm. This is where we atomically pop - // the last task and add `n` to the remaining capacity. - // - // This modification to the pop algorithm works because, - // at this point, we have not done any work (only done - // reading). We have a *pretty* good idea that there is - // no concurrent pusher. - // - // The capacity is then atomically added by doing an - // AcqRel CAS on `state`. The `state` cell is the - // linchpin of the algorithm. - // - // By successfully CASing `head` w/ AcqRel, we ensure - // that, if any thread was racing and entered a push, we - // see that and abort pop, retrying as it is - // "inconsistent". - let mut curr = SemState::load(&self.state, Acquire); - - loop { - if curr.has_waiter(&self.stub) { - // A waiter is being added concurrently. - // This is the MPSC queue's "inconsistent" - // state and we must loop and try again. - thread::yield_now(); - continue 'outer; - } - - // If closing, nothing more to do. - if closed { - debug_assert!(curr.is_closed(), "state = {:?}", curr); - return; - } - - let mut next = curr; - next.release_permits(n, &self.stub); - - match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { - Ok(_) => return, - Err(actual) => { - curr = SemState(actual); - } - } - } - } - }; - - self.head.with_mut(|head| *head = next); - head = next; - next_ptr = next.as_ref().next.load(Acquire); - } - - // `head` points to a waiter assign permits to the waiter. If - // all requested permits are satisfied, then we can continue, - // otherwise the node stays in the wait queue. - if !head.as_ref().assign_permits(&mut n, closed) { - assert_eq!(n, 0); - return; - } - - if let Some(next) = NonNull::new(next_ptr) { - self.head.with_mut(|head| *head = next); - - self.remove_queued(head, closed); - continue 'outer; - } - - let state = SemState::load(&self.state, Acquire); - - // This must always be a pointer as the wait list is not empty. - let tail = state.waiter().unwrap(); - - if tail != head { - // Inconsistent - thread::yield_now(); - continue 'outer; - } - - self.push_stub(closed); - - next_ptr = head.as_ref().next.load(Acquire); - - if let Some(next) = NonNull::new(next_ptr) { - self.head.with_mut(|head| *head = next); - - self.remove_queued(head, closed); - continue 'outer; - } - - // Inconsistent state, loop - thread::yield_now(); - } - } - } - - /// The wait node has had all of its permits assigned and has been removed - /// from the wait queue. - /// - /// Attempt to remove the QUEUED bit from the node. If additional permits - /// are concurrently requested, the node must be pushed back into the wait - /// queued. - fn remove_queued(&self, waiter: NonNull, closed: bool) { - let mut curr = WaiterState(unsafe { waiter.as_ref() }.state.load(Acquire)); - - loop { - if curr.is_dropped() { - // The Permit dropped, it is on us to release the memory - let _ = unsafe { Box::from_raw(waiter.as_ptr()) }; - return; - } - - // The node is removed from the queue. We attempt to unset the - // queued bit, but concurrently the waiter has requested more - // permits. When the waiter requested more permits, it saw the - // queued bit set so took no further action. This requires us to - // push the node back into the queue. - if curr.permits_to_acquire() > 0 { - // More permits are requested. The waiter must be re-queued - unsafe { - self.push_waiter(waiter, closed); - } - return; - } - - let mut next = curr; - next.unset_queued(); - - let w = unsafe { waiter.as_ref() }; - - match w.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { - Ok(_) => return, - Err(actual) => { - curr = WaiterState(actual); - } - } - } - } - - unsafe fn push_stub(&self, closed: bool) { - self.push_waiter(self.stub(), closed); - } - - unsafe fn push_waiter(&self, waiter: NonNull, closed: bool) { - // Set the next pointer. This does not require an atomic operation as - // this node is not accessible. The write will be flushed with the next - // operation - waiter.as_ref().next.store(ptr::null_mut(), Relaxed); - - // Update the tail to point to the new node. We need to see the previous - // node in order to update the next pointer as well as release `task` - // to any other threads calling `push`. - let next = SemState::new_ptr(waiter, closed); - let prev = SemState(self.state.swap(next.0, AcqRel)); - - debug_assert_eq!(closed, prev.is_closed()); - - // This function is only called when there are pending tasks. Because of - // this, the state must *always* be in pointer mode. - let prev = prev.waiter().unwrap(); - - // No cycles plz - debug_assert_ne!(prev, waiter); - - // Release `task` to the consume end. - prev.as_ref().next.store(waiter.as_ptr(), Release); - } - - fn stub(&self) -> NonNull { - unsafe { NonNull::new_unchecked(&*self.stub as *const _ as *mut _) } - } -} - -impl Drop for Semaphore { - fn drop(&mut self) { - self.close(); - } -} - -impl fmt::Debug for Semaphore { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Semaphore") - .field("state", &SemState::load(&self.state, Relaxed)) - .field("head", &self.head.with(|ptr| ptr)) - .field("rx_lock", &self.rx_lock.load(Relaxed)) - .field("stub", &self.stub) - .finish() - } -} - -unsafe impl Send for Semaphore {} -unsafe impl Sync for Semaphore {} - -// ===== impl Permit ===== - -impl Permit { - /// Creates a new `Permit`. - /// - /// The permit begins in the "unacquired" state. - pub(crate) fn new() -> Permit { - use PermitState::Acquired; - - Permit { - waiter: None, - state: Acquired(0), - } - } - - /// Returns `true` if the permit has been acquired - #[allow(dead_code)] // may be used later - pub(crate) fn is_acquired(&self) -> bool { - match self.state { - PermitState::Acquired(num) if num > 0 => true, - _ => false, - } - } - - /// Tries to acquire the permit. If no permits are available, the current task - /// is notified once a new permit becomes available. - pub(crate) fn poll_acquire( - &mut self, - cx: &mut Context<'_>, - num_permits: u16, - semaphore: &Semaphore, - ) -> Poll> { - use std::cmp::Ordering::*; - use PermitState::*; - - match self.state { - Waiting(requested) => { - // There must be a waiter - let waiter = self.waiter.as_ref().unwrap(); - - match requested.cmp(&num_permits) { - Less => { - let delta = num_permits - requested; - - // Request additional permits. If the waiter has been - // dequeued, it must be re-queued. - if !waiter.try_inc_permits_to_acquire(delta as usize) { - let waiter = NonNull::from(&**waiter); - - // Ignore the result. The check for - // `permits_to_acquire()` will converge the state as - // needed - let _ = semaphore.poll_acquire2(delta, || Some(waiter))?; - } - - self.state = Waiting(num_permits); - } - Greater => { - let delta = requested - num_permits; - let to_release = waiter.try_dec_permits_to_acquire(delta as usize); - - semaphore.add_permits(to_release); - self.state = Waiting(num_permits); - } - Equal => {} - } - - if waiter.permits_to_acquire()? == 0 { - self.state = Acquired(requested); - return Ready(Ok(())); - } - - waiter.waker.register_by_ref(cx.waker()); - - if waiter.permits_to_acquire()? == 0 { - self.state = Acquired(requested); - return Ready(Ok(())); - } - - Pending - } - Acquired(acquired) => { - if acquired >= num_permits { - Ready(Ok(())) - } else { - match semaphore.poll_acquire(cx, num_permits - acquired, self)? { - Ready(()) => { - self.state = Acquired(num_permits); - Ready(Ok(())) - } - Pending => { - self.state = Waiting(num_permits); - Pending - } - } - } - } - } - } - - /// Tries to acquire the permit. - pub(crate) fn try_acquire( - &mut self, - num_permits: u16, - semaphore: &Semaphore, - ) -> Result<(), TryAcquireError> { - use PermitState::*; - - match self.state { - Waiting(requested) => { - // There must be a waiter - let waiter = self.waiter.as_ref().unwrap(); - - if requested > num_permits { - let delta = requested - num_permits; - let to_release = waiter.try_dec_permits_to_acquire(delta as usize); - - semaphore.add_permits(to_release); - self.state = Waiting(num_permits); - } - - let res = waiter.permits_to_acquire().map_err(to_try_acquire)?; - - if res == 0 { - if requested < num_permits { - // Try to acquire the additional permits - semaphore.try_acquire(num_permits - requested)?; - } - - self.state = Acquired(num_permits); - Ok(()) - } else { - Err(TryAcquireError::NoPermits) - } - } - Acquired(acquired) => { - if acquired < num_permits { - semaphore.try_acquire(num_permits - acquired)?; - self.state = Acquired(num_permits); - } - - Ok(()) - } - } - } - - /// Releases a permit back to the semaphore - pub(crate) fn release(&mut self, n: u16, semaphore: &Semaphore) { - let n = self.forget(n); - semaphore.add_permits(n as usize); - } - - /// Forgets the permit **without** releasing it back to the semaphore. - /// - /// After calling `forget`, `poll_acquire` is able to acquire new permit - /// from the semaphore. - /// - /// Repeatedly calling `forget` without associated calls to `add_permit` - /// will result in the semaphore losing all permits. - /// - /// Will forget **at most** the number of acquired permits. This number is - /// returned. - pub(crate) fn forget(&mut self, n: u16) -> u16 { - use PermitState::*; - - match self.state { - Waiting(requested) => { - let n = cmp::min(n, requested); - - // Decrement - let acquired = self - .waiter - .as_ref() - .unwrap() - .try_dec_permits_to_acquire(n as usize) as u16; - - if n == requested { - self.state = Acquired(0); - } else if acquired == requested - n { - self.state = Waiting(acquired); - } else { - self.state = Waiting(requested - n); - } - - acquired - } - Acquired(acquired) => { - let n = cmp::min(n, acquired); - self.state = Acquired(acquired - n); - n - } - } - } -} - -impl Default for Permit { - fn default() -> Self { - Self::new() - } -} - -impl Drop for Permit { - fn drop(&mut self) { - if let Some(waiter) = self.waiter.take() { - // Set the dropped flag - let state = WaiterState(waiter.state.fetch_or(DROPPED, AcqRel)); - - if state.is_queued() { - // The waiter is stored in the queue. The semaphore will drop it - std::mem::forget(waiter); - } - } - } -} - -// ===== impl AcquireError ==== - -impl AcquireError { - fn closed() -> AcquireError { - AcquireError(()) - } -} - -fn to_try_acquire(_: AcquireError) -> TryAcquireError { - TryAcquireError::Closed -} - -impl fmt::Display for AcquireError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "semaphore closed") - } -} - -impl std::error::Error for AcquireError {} - -// ===== impl TryAcquireError ===== - -impl TryAcquireError { - /// Returns `true` if the error was caused by a closed semaphore. - pub(crate) fn is_closed(&self) -> bool { - match self { - TryAcquireError::Closed => true, - _ => false, - } - } - - /// Returns `true` if the error was caused by calling `try_acquire` on a - /// semaphore with no available permits. - pub(crate) fn is_no_permits(&self) -> bool { - match self { - TryAcquireError::NoPermits => true, - _ => false, - } - } -} - -impl fmt::Display for TryAcquireError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TryAcquireError::Closed => write!(fmt, "semaphore closed"), - TryAcquireError::NoPermits => write!(fmt, "no permits available"), - } - } -} - -impl std::error::Error for TryAcquireError {} - -// ===== impl Waiter ===== - -impl Waiter { - fn new() -> Waiter { - Waiter { - state: AtomicUsize::new(0), - waker: AtomicWaker::new(), - next: AtomicPtr::new(ptr::null_mut()), - } - } - - fn permits_to_acquire(&self) -> Result { - let state = WaiterState(self.state.load(Acquire)); - - if state.is_closed() { - Err(AcquireError(())) - } else { - Ok(state.permits_to_acquire()) - } - } - - /// Only increments the number of permits *if* the waiter is currently - /// queued. - /// - /// # Returns - /// - /// `true` if the number of permits to acquire has been incremented. `false` - /// otherwise. On `false`, the caller should use `Semaphore::poll_acquire`. - fn try_inc_permits_to_acquire(&self, n: usize) -> bool { - let mut curr = WaiterState(self.state.load(Acquire)); - - loop { - if !curr.is_queued() { - assert_eq!(0, curr.permits_to_acquire()); - return false; - } - - let mut next = curr; - next.set_permits_to_acquire(n + curr.permits_to_acquire()); - - match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { - Ok(_) => return true, - Err(actual) => curr = WaiterState(actual), - } - } - } - - /// Try to decrement the number of permits to acquire. This returns the - /// actual number of permits that were decremented. The delta between `n` - /// and the return has been assigned to the permit and the caller must - /// assign these back to the semaphore. - fn try_dec_permits_to_acquire(&self, n: usize) -> usize { - let mut curr = WaiterState(self.state.load(Acquire)); - - loop { - if curr.is_closed() { - return 0; - } - - if !curr.is_queued() { - assert_eq!(0, curr.permits_to_acquire()); - } - - let delta = cmp::min(n, curr.permits_to_acquire()); - let rem = curr.permits_to_acquire() - delta; - - let mut next = curr; - next.set_permits_to_acquire(rem); - - match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { - Ok(_) => return n - delta, - Err(actual) => curr = WaiterState(actual), - } - } - } - - /// Store the number of remaining permits needed to satisfy the waiter and - /// transition to the "QUEUED" state. - /// - /// # Returns - /// - /// `true` if the `QUEUED` bit was set as part of the transition. - fn to_queued(&self, num_permits: usize) -> bool { - let mut curr = WaiterState(self.state.load(Acquire)); - - // The waiter should **not** be waiting for any permits. - debug_assert_eq!(curr.permits_to_acquire(), 0); - - loop { - let mut next = curr; - next.set_permits_to_acquire(num_permits); - next.set_queued(); - - match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { - Ok(_) => { - if curr.is_queued() { - return false; - } else { - // Make sure the next pointer is null - self.next.store(ptr::null_mut(), Relaxed); - return true; - } - } - Err(actual) => curr = WaiterState(actual), - } - } - } - - /// Set the number of permits to acquire. - /// - /// This function is only called when the waiter is being inserted into the - /// wait queue. Because of this, there are no concurrent threads that can - /// modify the state and using `store` is safe. - fn set_permits_to_acquire(&self, num_permits: usize) { - debug_assert!(WaiterState(self.state.load(Acquire)).is_queued()); - - let mut state = WaiterState(QUEUED); - state.set_permits_to_acquire(num_permits); - - self.state.store(state.0, Release); - } - - /// Assign permits to the waiter. - /// - /// Returns `true` if the waiter should be removed from the queue - fn assign_permits(&self, n: &mut usize, closed: bool) -> bool { - let mut curr = WaiterState(self.state.load(Acquire)); - - loop { - let mut next = curr; - - // Number of permits to assign to this waiter - let assign = cmp::min(curr.permits_to_acquire(), *n); - - // Assign the permits - next.set_permits_to_acquire(curr.permits_to_acquire() - assign); - - if closed { - next.set_closed(); - } - - match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { - Ok(_) => { - // Update `n` - *n -= assign; - - if next.permits_to_acquire() == 0 { - if curr.permits_to_acquire() > 0 { - self.waker.wake(); - } - - return true; - } else { - return false; - } - } - Err(actual) => curr = WaiterState(actual), - } - } - } - - fn revert_to_idle(&self) { - // An idle node is not waiting on any permits - self.state.store(0, Relaxed); - } - - fn store_next(&self, next: NonNull) { - self.next.store(next.as_ptr(), Release); - } -} - -// ===== impl SemState ===== - -impl SemState { - /// Returns a new default `State` value. - fn new(permits: usize, stub: &Waiter) -> SemState { - assert!(permits <= MAX_PERMITS); - - if permits > 0 { - SemState((permits << NUM_SHIFT) | NUM_FLAG) - } else { - SemState(stub as *const _ as usize) - } - } - - /// Returns a `State` tracking `ptr` as the tail of the queue. - fn new_ptr(tail: NonNull, closed: bool) -> SemState { - let mut val = tail.as_ptr() as usize; - - if closed { - val |= CLOSED_FLAG; - } - - SemState(val) - } - - /// Returns the amount of remaining capacity - fn available_permits(self) -> usize { - if !self.has_available_permits() { - return 0; - } - - self.0 >> NUM_SHIFT - } - - /// Returns `true` if the state has permits that can be claimed by a waiter. - fn has_available_permits(self) -> bool { - self.0 & NUM_FLAG == NUM_FLAG - } - - fn has_waiter(self, stub: &Waiter) -> bool { - !self.has_available_permits() && !self.is_stub(stub) - } - - /// Tries to atomically acquire specified number of permits. - /// - /// # Return - /// - /// Returns `true` if the specified number of permits were acquired, `false` - /// otherwise. Returning false does not mean that there are no more - /// available permits. - fn acquire_permits(&mut self, num: usize, stub: &Waiter) -> bool { - debug_assert!(num > 0); - - if self.available_permits() < num { - return false; - } - - debug_assert!(self.waiter().is_none()); - - self.0 -= num << NUM_SHIFT; - - if self.0 == NUM_FLAG { - // Set the state to the stub pointer. - self.0 = stub as *const _ as usize; - } - - true - } - - /// Releases permits - /// - /// Returns `true` if the permits were accepted. - fn release_permits(&mut self, permits: usize, stub: &Waiter) { - debug_assert!(permits > 0); - - if self.is_stub(stub) { - self.0 = (permits << NUM_SHIFT) | NUM_FLAG | (self.0 & CLOSED_FLAG); - return; - } - - debug_assert!(self.has_available_permits()); - - self.0 += permits << NUM_SHIFT; - } - - fn is_waiter(self) -> bool { - self.0 & NUM_FLAG == 0 - } - - /// Returns the waiter, if one is set. - fn waiter(self) -> Option> { - if self.is_waiter() { - let waiter = NonNull::new(self.as_ptr()).expect("null pointer stored"); - - Some(waiter) - } else { - None - } - } - - /// Assumes `self` represents a pointer - fn as_ptr(self) -> *mut Waiter { - (self.0 & !CLOSED_FLAG) as *mut Waiter - } - - /// Sets to a pointer to a waiter. - /// - /// This can only be done from the full state. - fn set_waiter(&mut self, waiter: NonNull) { - let waiter = waiter.as_ptr() as usize; - debug_assert!(!self.is_closed()); - - self.0 = waiter; - } - - fn is_stub(self, stub: &Waiter) -> bool { - self.as_ptr() as usize == stub as *const _ as usize - } - - /// Loads the state from an AtomicUsize. - fn load(cell: &AtomicUsize, ordering: Ordering) -> SemState { - let value = cell.load(ordering); - SemState(value) - } - - fn fetch_set_closed(cell: &AtomicUsize, ordering: Ordering) -> SemState { - let value = cell.fetch_or(CLOSED_FLAG, ordering); - SemState(value) - } - - fn is_closed(self) -> bool { - self.0 & CLOSED_FLAG == CLOSED_FLAG - } - - /// Converts the state into a `usize` representation. - fn to_usize(self) -> usize { - self.0 - } -} - -impl fmt::Debug for SemState { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut fmt = fmt.debug_struct("SemState"); - - if self.is_waiter() { - fmt.field("state", &""); - } else { - fmt.field("permits", &self.available_permits()); - } - - fmt.finish() - } -} - -// ===== impl WaiterState ===== - -impl WaiterState { - fn permits_to_acquire(self) -> usize { - self.0 >> PERMIT_SHIFT - } - - fn set_permits_to_acquire(&mut self, val: usize) { - self.0 = (val << PERMIT_SHIFT) | (self.0 & !PERMIT_MASK) - } - - fn is_queued(self) -> bool { - self.0 & QUEUED == QUEUED - } - - fn set_queued(&mut self) { - self.0 |= QUEUED; - } - - fn is_closed(self) -> bool { - self.0 & CLOSED == CLOSED - } - - fn set_closed(&mut self) { - self.0 |= CLOSED; - } - - fn unset_queued(&mut self) { - assert!(self.is_queued()); - self.0 -= QUEUED; - } - - fn is_dropped(self) -> bool { - self.0 & DROPPED == DROPPED - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/task/atomic_waker.rs b/third_party/rust/tokio-0.2.25/src/sync/task/atomic_waker.rs deleted file mode 100644 index 73b1745f1a2d..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/task/atomic_waker.rs +++ /dev/null @@ -1,318 +0,0 @@ -#![cfg_attr(any(loom, not(feature = "sync")), allow(dead_code, unreachable_pub))] - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::{self, AtomicUsize}; - -use std::fmt; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; -use std::task::Waker; - -/// A synchronization primitive for task waking. -/// -/// `AtomicWaker` will coordinate concurrent wakes with the consumer -/// potentially "waking" the underlying task. This is useful in scenarios -/// where a computation completes in another thread and wants to wake the -/// consumer, but the consumer is in the process of being migrated to a new -/// logical task. -/// -/// Consumers should call `register` before checking the result of a computation -/// and producers should call `wake` after producing the computation (this -/// differs from the usual `thread::park` pattern). It is also permitted for -/// `wake` to be called **before** `register`. This results in a no-op. -/// -/// A single `AtomicWaker` may be reused for any number of calls to `register` or -/// `wake`. -pub(crate) struct AtomicWaker { - state: AtomicUsize, - waker: UnsafeCell>, -} - -// `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell -// stores a `Waker` value produced by calls to `register` and many threads can -// race to take the waker by calling `wake. -// -// If a new `Waker` instance is produced by calling `register` before an existing -// one is consumed, then the existing one is overwritten. -// -// While `AtomicWaker` is single-producer, the implementation ensures memory -// safety. In the event of concurrent calls to `register`, there will be a -// single winner whose waker will get stored in the cell. The losers will not -// have their tasks woken. As such, callers should ensure to add synchronization -// to calls to `register`. -// -// The implementation uses a single `AtomicUsize` value to coordinate access to -// the `Waker` cell. There are two bits that are operated on independently. These -// are represented by `REGISTERING` and `WAKING`. -// -// The `REGISTERING` bit is set when a producer enters the critical section. The -// `WAKING` bit is set when a consumer enters the critical section. Neither -// bit being set is represented by `WAITING`. -// -// A thread obtains an exclusive lock on the waker cell by transitioning the -// state from `WAITING` to `REGISTERING` or `WAKING`, depending on the -// operation the thread wishes to perform. When this transition is made, it is -// guaranteed that no other thread will access the waker cell. -// -// # Registering -// -// On a call to `register`, an attempt to transition the state from WAITING to -// REGISTERING is made. On success, the caller obtains a lock on the waker cell. -// -// If the lock is obtained, then the thread sets the waker cell to the waker -// provided as an argument. Then it attempts to transition the state back from -// `REGISTERING` -> `WAITING`. -// -// If this transition is successful, then the registering process is complete -// and the next call to `wake` will observe the waker. -// -// If the transition fails, then there was a concurrent call to `wake` that -// was unable to access the waker cell (due to the registering thread holding the -// lock). To handle this, the registering thread removes the waker it just set -// from the cell and calls `wake` on it. This call to wake represents the -// attempt to wake by the other thread (that set the `WAKING` bit). The -// state is then transitioned from `REGISTERING | WAKING` back to `WAITING`. -// This transition must succeed because, at this point, the state cannot be -// transitioned by another thread. -// -// # Waking -// -// On a call to `wake`, an attempt to transition the state from `WAITING` to -// `WAKING` is made. On success, the caller obtains a lock on the waker cell. -// -// If the lock is obtained, then the thread takes ownership of the current value -// in the waker cell, and calls `wake` on it. The state is then transitioned -// back to `WAITING`. This transition must succeed as, at this point, the state -// cannot be transitioned by another thread. -// -// If the thread is unable to obtain the lock, the `WAKING` bit is still. -// This is because it has either been set by the current thread but the previous -// value included the `REGISTERING` bit **or** a concurrent thread is in the -// `WAKING` critical section. Either way, no action must be taken. -// -// If the current thread is the only concurrent call to `wake` and another -// thread is in the `register` critical section, when the other thread **exits** -// the `register` critical section, it will observe the `WAKING` bit and -// handle the waker itself. -// -// If another thread is in the `waker` critical section, then it will handle -// waking the caller task. -// -// # A potential race (is safely handled). -// -// Imagine the following situation: -// -// * Thread A obtains the `wake` lock and wakes a task. -// -// * Before thread A releases the `wake` lock, the woken task is scheduled. -// -// * Thread B attempts to wake the task. In theory this should result in the -// task being woken, but it cannot because thread A still holds the wake -// lock. -// -// This case is handled by requiring users of `AtomicWaker` to call `register` -// **before** attempting to observe the application state change that resulted -// in the task being woken. The wakers also change the application state -// before calling wake. -// -// Because of this, the task will do one of two things. -// -// 1) Observe the application state change that Thread B is waking on. In -// this case, it is OK for Thread B's wake to be lost. -// -// 2) Call register before attempting to observe the application state. Since -// Thread A still holds the `wake` lock, the call to `register` will result -// in the task waking itself and get scheduled again. - -/// Idle state -const WAITING: usize = 0; - -/// A new waker value is being registered with the `AtomicWaker` cell. -const REGISTERING: usize = 0b01; - -/// The task currently registered with the `AtomicWaker` cell is being woken. -const WAKING: usize = 0b10; - -impl AtomicWaker { - /// Create an `AtomicWaker` - pub(crate) fn new() -> AtomicWaker { - AtomicWaker { - state: AtomicUsize::new(WAITING), - waker: UnsafeCell::new(None), - } - } - - /// Registers the current waker to be notified on calls to `wake`. - /// - /// This is the same as calling `register_task` with `task::current()`. - #[cfg(feature = "io-driver")] - pub(crate) fn register(&self, waker: Waker) { - self.do_register(waker); - } - - /// Registers the provided waker to be notified on calls to `wake`. - /// - /// The new waker will take place of any previous wakers that were registered - /// by previous calls to `register`. Any calls to `wake` that happen after - /// a call to `register` (as defined by the memory ordering rules), will - /// wake the `register` caller's task. - /// - /// It is safe to call `register` with multiple other threads concurrently - /// calling `wake`. This will result in the `register` caller's current - /// task being woken once. - /// - /// This function is safe to call concurrently, but this is generally a bad - /// idea. Concurrent calls to `register` will attempt to register different - /// tasks to be woken. One of the callers will win and have its task set, - /// but there is no guarantee as to which caller will succeed. - pub(crate) fn register_by_ref(&self, waker: &Waker) { - self.do_register(waker); - } - - fn do_register(&self, waker: W) - where - W: WakerRef, - { - match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) { - WAITING => { - unsafe { - // Locked acquired, update the waker cell - self.waker.with_mut(|t| *t = Some(waker.into_waker())); - - // Release the lock. If the state transitioned to include - // the `WAKING` bit, this means that a wake has been - // called concurrently, so we have to remove the waker and - // wake it.` - // - // Start by assuming that the state is `REGISTERING` as this - // is what we jut set it to. - let res = self - .state - .compare_exchange(REGISTERING, WAITING, AcqRel, Acquire); - - match res { - Ok(_) => {} - Err(actual) => { - // This branch can only be reached if a - // concurrent thread called `wake`. In this - // case, `actual` **must** be `REGISTERING | - // `WAKING`. - debug_assert_eq!(actual, REGISTERING | WAKING); - - // Take the waker to wake once the atomic operation has - // completed. - let waker = self.waker.with_mut(|t| (*t).take()).unwrap(); - - // Just swap, because no one could change state - // while state == `Registering | `Waking` - self.state.swap(WAITING, AcqRel); - - // The atomic swap was complete, now - // wake the waker and return. - waker.wake(); - } - } - } - } - WAKING => { - // Currently in the process of waking the task, i.e., - // `wake` is currently being called on the old waker. - // So, we call wake on the new waker. - waker.wake(); - - // This is equivalent to a spin lock, so use a spin hint. - atomic::spin_loop_hint(); - } - state => { - // In this case, a concurrent thread is holding the - // "registering" lock. This probably indicates a bug in the - // caller's code as racing to call `register` doesn't make much - // sense. - // - // We just want to maintain memory safety. It is ok to drop the - // call to `register`. - debug_assert!(state == REGISTERING || state == REGISTERING | WAKING); - } - } - } - - /// Wakes the task that last called `register`. - /// - /// If `register` has not been called yet, then this does nothing. - pub(crate) fn wake(&self) { - if let Some(waker) = self.take_waker() { - waker.wake(); - } - } - - /// Attempts to take the `Waker` value out of the `AtomicWaker` with the - /// intention that the caller will wake the task later. - pub(crate) fn take_waker(&self) -> Option { - // AcqRel ordering is used in order to acquire the value of the `waker` - // cell as well as to establish a `release` ordering with whatever - // memory the `AtomicWaker` is associated with. - match self.state.fetch_or(WAKING, AcqRel) { - WAITING => { - // The waking lock has been acquired. - let waker = unsafe { self.waker.with_mut(|t| (*t).take()) }; - - // Release the lock - self.state.fetch_and(!WAKING, Release); - - waker - } - state => { - // There is a concurrent thread currently updating the - // associated waker. - // - // Nothing more to do as the `WAKING` bit has been set. It - // doesn't matter if there are concurrent registering threads or - // not. - // - debug_assert!( - state == REGISTERING || state == REGISTERING | WAKING || state == WAKING - ); - None - } - } - } -} - -impl Default for AtomicWaker { - fn default() -> Self { - AtomicWaker::new() - } -} - -impl fmt::Debug for AtomicWaker { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "AtomicWaker") - } -} - -unsafe impl Send for AtomicWaker {} -unsafe impl Sync for AtomicWaker {} - -trait WakerRef { - fn wake(self); - fn into_waker(self) -> Waker; -} - -impl WakerRef for Waker { - fn wake(self) { - self.wake() - } - - fn into_waker(self) -> Waker { - self - } -} - -impl WakerRef for &Waker { - fn wake(self) { - self.wake_by_ref() - } - - fn into_waker(self) -> Waker { - self.clone() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/task/mod.rs b/third_party/rust/tokio-0.2.25/src/sync/task/mod.rs deleted file mode 100644 index a6bc6ed06ea8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/task/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Thread-safe task notification primitives. - -mod atomic_waker; -pub(crate) use self::atomic_waker::AtomicWaker; diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/atomic_waker.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/atomic_waker.rs deleted file mode 100644 index c832d62e9ae6..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/atomic_waker.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::sync::AtomicWaker; -use tokio_test::task; - -use std::task::Waker; - -trait AssertSend: Send {} -trait AssertSync: Send {} - -impl AssertSend for AtomicWaker {} -impl AssertSync for AtomicWaker {} - -impl AssertSend for Waker {} -impl AssertSync for Waker {} - -#[test] -fn basic_usage() { - let mut waker = task::spawn(AtomicWaker::new()); - - waker.enter(|cx, waker| waker.register_by_ref(cx.waker())); - waker.wake(); - - assert!(waker.is_woken()); -} - -#[test] -fn wake_without_register() { - let mut waker = task::spawn(AtomicWaker::new()); - waker.wake(); - - // Registering should not result in a notification - waker.enter(|cx, waker| waker.register_by_ref(cx.waker())); - - assert!(!waker.is_woken()); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_atomic_waker.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_atomic_waker.rs deleted file mode 100644 index c148bcbe1177..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_atomic_waker.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::sync::task::AtomicWaker; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::sync::atomic::AtomicUsize; -use loom::thread; -use std::sync::atomic::Ordering::Relaxed; -use std::sync::Arc; -use std::task::Poll::{Pending, Ready}; - -struct Chan { - num: AtomicUsize, - task: AtomicWaker, -} - -#[test] -fn basic_notification() { - const NUM_NOTIFY: usize = 2; - - loom::model(|| { - let chan = Arc::new(Chan { - num: AtomicUsize::new(0), - task: AtomicWaker::new(), - }); - - for _ in 0..NUM_NOTIFY { - let chan = chan.clone(); - - thread::spawn(move || { - chan.num.fetch_add(1, Relaxed); - chan.task.wake(); - }); - } - - block_on(poll_fn(move |cx| { - chan.task.register_by_ref(cx.waker()); - - if NUM_NOTIFY == chan.num.load(Relaxed) { - return Ready(()); - } - - Pending - })); - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_broadcast.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_broadcast.rs deleted file mode 100644 index da12fb9ff0c7..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_broadcast.rs +++ /dev/null @@ -1,180 +0,0 @@ -use crate::sync::broadcast; -use crate::sync::broadcast::RecvError::{Closed, Lagged}; - -use loom::future::block_on; -use loom::sync::Arc; -use loom::thread; -use tokio_test::{assert_err, assert_ok}; - -#[test] -fn broadcast_send() { - loom::model(|| { - let (tx1, mut rx) = broadcast::channel(2); - let tx1 = Arc::new(tx1); - let tx2 = tx1.clone(); - - let th1 = thread::spawn(move || { - block_on(async { - assert_ok!(tx1.send("one")); - assert_ok!(tx1.send("two")); - assert_ok!(tx1.send("three")); - }); - }); - - let th2 = thread::spawn(move || { - block_on(async { - assert_ok!(tx2.send("eins")); - assert_ok!(tx2.send("zwei")); - assert_ok!(tx2.send("drei")); - }); - }); - - block_on(async { - let mut num = 0; - loop { - match rx.recv().await { - Ok(_) => num += 1, - Err(Closed) => break, - Err(Lagged(n)) => num += n as usize, - } - } - assert_eq!(num, 6); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -// An `Arc` is used as the value in order to detect memory leaks. -#[test] -fn broadcast_two() { - loom::model(|| { - let (tx, mut rx1) = broadcast::channel::>(16); - let mut rx2 = tx.subscribe(); - - let th1 = thread::spawn(move || { - block_on(async { - let v = assert_ok!(rx1.recv().await); - assert_eq!(*v, "hello"); - - let v = assert_ok!(rx1.recv().await); - assert_eq!(*v, "world"); - - match assert_err!(rx1.recv().await) { - Closed => {} - _ => panic!(), - } - }); - }); - - let th2 = thread::spawn(move || { - block_on(async { - let v = assert_ok!(rx2.recv().await); - assert_eq!(*v, "hello"); - - let v = assert_ok!(rx2.recv().await); - assert_eq!(*v, "world"); - - match assert_err!(rx2.recv().await) { - Closed => {} - _ => panic!(), - } - }); - }); - - assert_ok!(tx.send(Arc::new("hello"))); - assert_ok!(tx.send(Arc::new("world"))); - drop(tx); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -#[test] -fn broadcast_wrap() { - loom::model(|| { - let (tx, mut rx1) = broadcast::channel(2); - let mut rx2 = tx.subscribe(); - - let th1 = thread::spawn(move || { - block_on(async { - let mut num = 0; - - loop { - match rx1.recv().await { - Ok(_) => num += 1, - Err(Closed) => break, - Err(Lagged(n)) => num += n as usize, - } - } - - assert_eq!(num, 3); - }); - }); - - let th2 = thread::spawn(move || { - block_on(async { - let mut num = 0; - - loop { - match rx2.recv().await { - Ok(_) => num += 1, - Err(Closed) => break, - Err(Lagged(n)) => num += n as usize, - } - } - - assert_eq!(num, 3); - }); - }); - - assert_ok!(tx.send("one")); - assert_ok!(tx.send("two")); - assert_ok!(tx.send("three")); - - drop(tx); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -#[test] -fn drop_rx() { - loom::model(|| { - let (tx, mut rx1) = broadcast::channel(16); - let rx2 = tx.subscribe(); - - let th1 = thread::spawn(move || { - block_on(async { - let v = assert_ok!(rx1.recv().await); - assert_eq!(v, "one"); - - let v = assert_ok!(rx1.recv().await); - assert_eq!(v, "two"); - - let v = assert_ok!(rx1.recv().await); - assert_eq!(v, "three"); - - match assert_err!(rx1.recv().await) { - Closed => {} - _ => panic!(), - } - }); - }); - - let th2 = thread::spawn(move || { - drop(rx2); - }); - - assert_ok!(tx.send("one")); - assert_ok!(tx.send("two")); - assert_ok!(tx.send("three")); - drop(tx); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_list.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_list.rs deleted file mode 100644 index 4067f865ce48..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_list.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::sync::mpsc::list; - -use loom::thread; -use std::sync::Arc; - -#[test] -fn smoke() { - use crate::sync::mpsc::block::Read::*; - - const NUM_TX: usize = 2; - const NUM_MSG: usize = 2; - - loom::model(|| { - let (tx, mut rx) = list::channel(); - let tx = Arc::new(tx); - - for th in 0..NUM_TX { - let tx = tx.clone(); - - thread::spawn(move || { - for i in 0..NUM_MSG { - tx.push((th, i)); - } - }); - } - - let mut next = vec![0; NUM_TX]; - - loop { - match rx.pop(&tx) { - Some(Value((th, v))) => { - assert_eq!(v, next[th]); - next[th] += 1; - - if next.iter().all(|&i| i == NUM_MSG) { - break; - } - } - Some(Closed) => { - panic!(); - } - None => { - thread::yield_now(); - } - } - } - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_mpsc.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_mpsc.rs deleted file mode 100644 index 6a1a6abedda9..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_mpsc.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::sync::mpsc; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::thread; - -#[test] -fn closing_tx() { - loom::model(|| { - let (mut tx, mut rx) = mpsc::channel(16); - - thread::spawn(move || { - tx.try_send(()).unwrap(); - drop(tx); - }); - - let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); - assert!(v.is_some()); - - let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); - assert!(v.is_none()); - }); -} - -#[test] -fn closing_unbounded_tx() { - loom::model(|| { - let (tx, mut rx) = mpsc::unbounded_channel(); - - thread::spawn(move || { - tx.send(()).unwrap(); - drop(tx); - }); - - let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); - assert!(v.is_some()); - - let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); - assert!(v.is_none()); - }); -} - -#[test] -fn dropping_tx() { - loom::model(|| { - let (tx, mut rx) = mpsc::channel::<()>(16); - - for _ in 0..2 { - let tx = tx.clone(); - thread::spawn(move || { - drop(tx); - }); - } - drop(tx); - - let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); - assert!(v.is_none()); - }); -} - -#[test] -fn dropping_unbounded_tx() { - loom::model(|| { - let (tx, mut rx) = mpsc::unbounded_channel::<()>(); - - for _ in 0..2 { - let tx = tx.clone(); - thread::spawn(move || { - drop(tx); - }); - } - drop(tx); - - let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); - assert!(v.is_none()); - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_notify.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_notify.rs deleted file mode 100644 index 60981d4669a1..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_notify.rs +++ /dev/null @@ -1,90 +0,0 @@ -use crate::sync::Notify; - -use loom::future::block_on; -use loom::sync::Arc; -use loom::thread; - -#[test] -fn notify_one() { - loom::model(|| { - let tx = Arc::new(Notify::new()); - let rx = tx.clone(); - - let th = thread::spawn(move || { - block_on(async { - rx.notified().await; - }); - }); - - tx.notify(); - th.join().unwrap(); - }); -} - -#[test] -fn notify_multi() { - loom::model(|| { - let notify = Arc::new(Notify::new()); - - let mut ths = vec![]; - - for _ in 0..2 { - let notify = notify.clone(); - - ths.push(thread::spawn(move || { - block_on(async { - notify.notified().await; - notify.notify(); - }) - })); - } - - notify.notify(); - - for th in ths.drain(..) { - th.join().unwrap(); - } - - block_on(async { - notify.notified().await; - }); - }); -} - -#[test] -fn notify_drop() { - use crate::future::poll_fn; - use std::future::Future; - use std::task::Poll; - - loom::model(|| { - let notify = Arc::new(Notify::new()); - let rx1 = notify.clone(); - let rx2 = notify.clone(); - - let th1 = thread::spawn(move || { - let mut recv = Box::pin(rx1.notified()); - - block_on(poll_fn(|cx| { - if recv.as_mut().poll(cx).is_ready() { - rx1.notify(); - } - Poll::Ready(()) - })); - }); - - let th2 = thread::spawn(move || { - block_on(async { - rx2.notified().await; - // Trigger second notification - rx2.notify(); - rx2.notified().await; - }); - }); - - notify.notify(); - - th1.join().unwrap(); - th2.join().unwrap(); - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_oneshot.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_oneshot.rs deleted file mode 100644 index dfa7459da7f5..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_oneshot.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::sync::oneshot; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::thread; -use std::task::Poll::{Pending, Ready}; - -#[test] -fn smoke() { - loom::model(|| { - let (tx, rx) = oneshot::channel(); - - thread::spawn(move || { - tx.send(1).unwrap(); - }); - - let value = block_on(rx).unwrap(); - assert_eq!(1, value); - }); -} - -#[test] -fn changing_rx_task() { - loom::model(|| { - let (tx, mut rx) = oneshot::channel(); - - thread::spawn(move || { - tx.send(1).unwrap(); - }); - - let rx = thread::spawn(move || { - let ready = block_on(poll_fn(|cx| match Pin::new(&mut rx).poll(cx) { - Ready(Ok(value)) => { - assert_eq!(1, value); - Ready(true) - } - Ready(Err(_)) => unimplemented!(), - Pending => Ready(false), - })); - - if ready { - None - } else { - Some(rx) - } - }) - .join() - .unwrap(); - - if let Some(rx) = rx { - // Previous task parked, use a new task... - let value = block_on(rx).unwrap(); - assert_eq!(1, value); - } - }); -} - -// TODO: Move this into `oneshot` proper. - -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -struct OnClose<'a> { - tx: &'a mut oneshot::Sender, -} - -impl<'a> OnClose<'a> { - fn new(tx: &'a mut oneshot::Sender) -> Self { - OnClose { tx } - } -} - -impl Future for OnClose<'_> { - type Output = bool; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let res = self.get_mut().tx.poll_closed(cx); - Ready(res.is_ready()) - } -} - -#[test] -fn changing_tx_task() { - loom::model(|| { - let (mut tx, rx) = oneshot::channel::(); - - thread::spawn(move || { - drop(rx); - }); - - let tx = thread::spawn(move || { - let t1 = block_on(OnClose::new(&mut tx)); - - if t1 { - None - } else { - Some(tx) - } - }) - .join() - .unwrap(); - - if let Some(mut tx) = tx { - // Previous task parked, use a new task... - block_on(OnClose::new(&mut tx)); - } - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_rwlock.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_rwlock.rs deleted file mode 100644 index 48d06e1d5f6d..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_rwlock.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::sync::rwlock::*; - -use loom::future::block_on; -use loom::thread; -use std::sync::Arc; - -#[test] -fn concurrent_write() { - let mut b = loom::model::Builder::new(); - - b.check(|| { - let rwlock = Arc::new(RwLock::::new(0)); - - let rwclone = rwlock.clone(); - let t1 = thread::spawn(move || { - block_on(async { - let mut guard = rwclone.write().await; - *guard += 5; - }); - }); - - let rwclone = rwlock.clone(); - let t2 = thread::spawn(move || { - block_on(async { - let mut guard = rwclone.write().await; - *guard += 5; - }); - }); - - t1.join().expect("thread 1 write should not panic"); - t2.join().expect("thread 2 write should not panic"); - //when all threads have finished the value on the lock should be 10 - let guard = block_on(rwlock.read()); - assert_eq!(10, *guard); - }); -} - -#[test] -fn concurrent_read_write() { - let mut b = loom::model::Builder::new(); - - b.check(|| { - let rwlock = Arc::new(RwLock::::new(0)); - - let rwclone = rwlock.clone(); - let t1 = thread::spawn(move || { - block_on(async { - let mut guard = rwclone.write().await; - *guard += 5; - }); - }); - - let rwclone = rwlock.clone(); - let t2 = thread::spawn(move || { - block_on(async { - let mut guard = rwclone.write().await; - *guard += 5; - }); - }); - - let rwclone = rwlock.clone(); - let t3 = thread::spawn(move || { - block_on(async { - let guard = rwclone.read().await; - //at this state the value on the lock may either be 0, 5, or 10 - assert!(*guard == 0 || *guard == 5 || *guard == 10); - }); - }); - - t1.join().expect("thread 1 write should not panic"); - t2.join().expect("thread 2 write should not panic"); - t3.join().expect("thread 3 read should not panic"); - - let guard = block_on(rwlock.read()); - //when all threads have finished the value on the lock should be 10 - assert_eq!(10, *guard); - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_semaphore_batch.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_semaphore_batch.rs deleted file mode 100644 index 76a1bc00626e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_semaphore_batch.rs +++ /dev/null @@ -1,215 +0,0 @@ -use crate::sync::batch_semaphore::*; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::sync::atomic::AtomicUsize; -use loom::thread; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::Arc; -use std::task::Poll::Ready; -use std::task::{Context, Poll}; - -#[test] -fn basic_usage() { - const NUM: usize = 2; - - struct Shared { - semaphore: Semaphore, - active: AtomicUsize, - } - - async fn actor(shared: Arc) { - shared.semaphore.acquire(1).await.unwrap(); - let actual = shared.active.fetch_add(1, SeqCst); - assert!(actual <= NUM - 1); - - let actual = shared.active.fetch_sub(1, SeqCst); - assert!(actual <= NUM); - shared.semaphore.release(1); - } - - loom::model(|| { - let shared = Arc::new(Shared { - semaphore: Semaphore::new(NUM), - active: AtomicUsize::new(0), - }); - - for _ in 0..NUM { - let shared = shared.clone(); - - thread::spawn(move || { - block_on(actor(shared)); - }); - } - - block_on(actor(shared)); - }); -} - -#[test] -fn release() { - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - { - let semaphore = semaphore.clone(); - thread::spawn(move || { - block_on(semaphore.acquire(1)).unwrap(); - semaphore.release(1); - }); - } - - block_on(semaphore.acquire(1)).unwrap(); - - semaphore.release(1); - }); -} - -#[test] -fn basic_closing() { - const NUM: usize = 2; - - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - for _ in 0..NUM { - let semaphore = semaphore.clone(); - - thread::spawn(move || { - for _ in 0..2 { - block_on(semaphore.acquire(1)).map_err(|_| ())?; - - semaphore.release(1); - } - - Ok::<(), ()>(()) - }); - } - - semaphore.close(); - }); -} - -#[test] -fn concurrent_close() { - const NUM: usize = 3; - - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - for _ in 0..NUM { - let semaphore = semaphore.clone(); - - thread::spawn(move || { - block_on(semaphore.acquire(1)).map_err(|_| ())?; - semaphore.release(1); - semaphore.close(); - - Ok::<(), ()>(()) - }); - } - }); -} - -#[test] -fn concurrent_cancel() { - async fn poll_and_cancel(semaphore: Arc) { - let mut acquire1 = Some(semaphore.acquire(1)); - let mut acquire2 = Some(semaphore.acquire(1)); - poll_fn(|cx| { - // poll the acquire future once, and then immediately throw - // it away. this simulates a situation where a future is - // polled and then cancelled, such as by a timeout. - if let Some(acquire) = acquire1.take() { - pin!(acquire); - let _ = acquire.poll(cx); - } - if let Some(acquire) = acquire2.take() { - pin!(acquire); - let _ = acquire.poll(cx); - } - Poll::Ready(()) - }) - .await - } - - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(0)); - let t1 = { - let semaphore = semaphore.clone(); - thread::spawn(move || block_on(poll_and_cancel(semaphore))) - }; - let t2 = { - let semaphore = semaphore.clone(); - thread::spawn(move || block_on(poll_and_cancel(semaphore))) - }; - let t3 = { - let semaphore = semaphore.clone(); - thread::spawn(move || block_on(poll_and_cancel(semaphore))) - }; - - t1.join().unwrap(); - semaphore.release(10); - t2.join().unwrap(); - t3.join().unwrap(); - }); -} - -#[test] -fn batch() { - let mut b = loom::model::Builder::new(); - b.preemption_bound = Some(1); - - b.check(|| { - let semaphore = Arc::new(Semaphore::new(10)); - let active = Arc::new(AtomicUsize::new(0)); - let mut ths = vec![]; - - for _ in 0..2 { - let semaphore = semaphore.clone(); - let active = active.clone(); - - ths.push(thread::spawn(move || { - for n in &[4, 10, 8] { - block_on(semaphore.acquire(*n)).unwrap(); - - active.fetch_add(*n as usize, SeqCst); - - let num_active = active.load(SeqCst); - assert!(num_active <= 10); - - thread::yield_now(); - - active.fetch_sub(*n as usize, SeqCst); - - semaphore.release(*n as usize); - } - })); - } - - for th in ths.into_iter() { - th.join().unwrap(); - } - - assert_eq!(10, semaphore.available_permits()); - }); -} - -#[test] -fn release_during_acquire() { - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(10)); - semaphore - .try_acquire(8) - .expect("try_acquire should succeed; semaphore uncontended"); - let semaphore2 = semaphore.clone(); - let thread = thread::spawn(move || block_on(semaphore2.acquire(4)).unwrap()); - - semaphore.release(8); - thread.join().unwrap(); - semaphore.release(4); - assert_eq!(10, semaphore.available_permits()); - }) -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_semaphore_ll.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/loom_semaphore_ll.rs deleted file mode 100644 index b5e5efba82c2..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_semaphore_ll.rs +++ /dev/null @@ -1,192 +0,0 @@ -use crate::sync::semaphore_ll::*; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::thread; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::Arc; -use std::task::Poll::Ready; -use std::task::{Context, Poll}; - -#[test] -fn basic_usage() { - const NUM: usize = 2; - - struct Actor { - waiter: Permit, - shared: Arc, - } - - struct Shared { - semaphore: Semaphore, - active: AtomicUsize, - } - - impl Future for Actor { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - let me = &mut *self; - - ready!(me.waiter.poll_acquire(cx, 1, &me.shared.semaphore)).unwrap(); - - let actual = me.shared.active.fetch_add(1, SeqCst); - assert!(actual <= NUM - 1); - - let actual = me.shared.active.fetch_sub(1, SeqCst); - assert!(actual <= NUM); - - me.waiter.release(1, &me.shared.semaphore); - - Ready(()) - } - } - - loom::model(|| { - let shared = Arc::new(Shared { - semaphore: Semaphore::new(NUM), - active: AtomicUsize::new(0), - }); - - for _ in 0..NUM { - let shared = shared.clone(); - - thread::spawn(move || { - block_on(Actor { - waiter: Permit::new(), - shared, - }); - }); - } - - block_on(Actor { - waiter: Permit::new(), - shared, - }); - }); -} - -#[test] -fn release() { - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - { - let semaphore = semaphore.clone(); - thread::spawn(move || { - let mut permit = Permit::new(); - - block_on(poll_fn(|cx| permit.poll_acquire(cx, 1, &semaphore))).unwrap(); - - permit.release(1, &semaphore); - }); - } - - let mut permit = Permit::new(); - - block_on(poll_fn(|cx| permit.poll_acquire(cx, 1, &semaphore))).unwrap(); - - permit.release(1, &semaphore); - }); -} - -#[test] -fn basic_closing() { - const NUM: usize = 2; - - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - for _ in 0..NUM { - let semaphore = semaphore.clone(); - - thread::spawn(move || { - let mut permit = Permit::new(); - - for _ in 0..2 { - block_on(poll_fn(|cx| { - permit.poll_acquire(cx, 1, &semaphore).map_err(|_| ()) - }))?; - - permit.release(1, &semaphore); - } - - Ok::<(), ()>(()) - }); - } - - semaphore.close(); - }); -} - -#[test] -fn concurrent_close() { - const NUM: usize = 3; - - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - for _ in 0..NUM { - let semaphore = semaphore.clone(); - - thread::spawn(move || { - let mut permit = Permit::new(); - - block_on(poll_fn(|cx| { - permit.poll_acquire(cx, 1, &semaphore).map_err(|_| ()) - }))?; - - permit.release(1, &semaphore); - - semaphore.close(); - - Ok::<(), ()>(()) - }); - } - }); -} - -#[test] -fn batch() { - let mut b = loom::model::Builder::new(); - b.preemption_bound = Some(1); - - b.check(|| { - let semaphore = Arc::new(Semaphore::new(10)); - let active = Arc::new(AtomicUsize::new(0)); - let mut ths = vec![]; - - for _ in 0..2 { - let semaphore = semaphore.clone(); - let active = active.clone(); - - ths.push(thread::spawn(move || { - let mut permit = Permit::new(); - - for n in &[4, 10, 8] { - block_on(poll_fn(|cx| permit.poll_acquire(cx, *n, &semaphore))).unwrap(); - - active.fetch_add(*n as usize, SeqCst); - - let num_active = active.load(SeqCst); - assert!(num_active <= 10); - - thread::yield_now(); - - active.fetch_sub(*n as usize, SeqCst); - - permit.release(*n, &semaphore); - } - })); - } - - for th in ths.into_iter() { - th.join().unwrap(); - } - - assert_eq!(10, semaphore.available_permits()); - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/mod.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/mod.rs deleted file mode 100644 index 6ba8c1f9b6a5..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -cfg_not_loom! { - mod atomic_waker; - mod semaphore_ll; - mod semaphore_batch; -} - -cfg_loom! { - mod loom_atomic_waker; - mod loom_broadcast; - #[cfg(tokio_unstable)] - mod loom_cancellation_token; - mod loom_list; - mod loom_mpsc; - mod loom_notify; - mod loom_oneshot; - mod loom_semaphore_batch; - mod loom_semaphore_ll; -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/semaphore_batch.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/semaphore_batch.rs deleted file mode 100644 index 9342cd1cb3c9..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/semaphore_batch.rs +++ /dev/null @@ -1,250 +0,0 @@ -use crate::sync::batch_semaphore::Semaphore; -use tokio_test::*; - -#[test] -fn poll_acquire_one_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - // Polling for a permit succeeds immediately - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - assert_eq!(s.available_permits(), 99); -} - -#[test] -fn poll_acquire_many_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - // Polling for a permit succeeds immediately - assert_ready_ok!(task::spawn(s.acquire(5)).poll()); - assert_eq!(s.available_permits(), 95); - - assert_ready_ok!(task::spawn(s.acquire(5)).poll()); - assert_eq!(s.available_permits(), 90); -} - -#[test] -fn try_acquire_one_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - assert_ok!(s.try_acquire(1)); - assert_eq!(s.available_permits(), 99); - - assert_ok!(s.try_acquire(1)); - assert_eq!(s.available_permits(), 98); -} - -#[test] -fn try_acquire_many_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - assert_ok!(s.try_acquire(5)); - assert_eq!(s.available_permits(), 95); - - assert_ok!(s.try_acquire(5)); - assert_eq!(s.available_permits(), 90); -} - -#[test] -fn poll_acquire_one_unavailable() { - let s = Semaphore::new(1); - - // Acquire the first permit - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - assert_eq!(s.available_permits(), 0); - - let mut acquire_2 = task::spawn(s.acquire(1)); - // Try to acquire the second permit - assert_pending!(acquire_2.poll()); - assert_eq!(s.available_permits(), 0); - - s.release(1); - - assert_eq!(s.available_permits(), 0); - assert!(acquire_2.is_woken()); - assert_ready_ok!(acquire_2.poll()); - assert_eq!(s.available_permits(), 0); - - s.release(1); - assert_eq!(s.available_permits(), 1); -} - -#[test] -fn poll_acquire_many_unavailable() { - let s = Semaphore::new(5); - - // Acquire the first permit - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - assert_eq!(s.available_permits(), 4); - - // Try to acquire the second permit - let mut acquire_2 = task::spawn(s.acquire(5)); - assert_pending!(acquire_2.poll()); - assert_eq!(s.available_permits(), 0); - - // Try to acquire the third permit - let mut acquire_3 = task::spawn(s.acquire(3)); - assert_pending!(acquire_3.poll()); - assert_eq!(s.available_permits(), 0); - - s.release(1); - - assert_eq!(s.available_permits(), 0); - assert!(acquire_2.is_woken()); - assert_ready_ok!(acquire_2.poll()); - - assert!(!acquire_3.is_woken()); - assert_eq!(s.available_permits(), 0); - - s.release(1); - assert!(!acquire_3.is_woken()); - assert_eq!(s.available_permits(), 0); - - s.release(2); - assert!(acquire_3.is_woken()); - - assert_ready_ok!(acquire_3.poll()); -} - -#[test] -fn try_acquire_one_unavailable() { - let s = Semaphore::new(1); - - // Acquire the first permit - assert_ok!(s.try_acquire(1)); - assert_eq!(s.available_permits(), 0); - - assert_err!(s.try_acquire(1)); - - s.release(1); - - assert_eq!(s.available_permits(), 1); - assert_ok!(s.try_acquire(1)); - - s.release(1); - assert_eq!(s.available_permits(), 1); -} - -#[test] -fn try_acquire_many_unavailable() { - let s = Semaphore::new(5); - - // Acquire the first permit - assert_ok!(s.try_acquire(1)); - assert_eq!(s.available_permits(), 4); - - assert_err!(s.try_acquire(5)); - - s.release(1); - assert_eq!(s.available_permits(), 5); - - assert_ok!(s.try_acquire(5)); - - s.release(1); - assert_eq!(s.available_permits(), 1); - - s.release(1); - assert_eq!(s.available_permits(), 2); -} - -#[test] -fn poll_acquire_one_zero_permits() { - let s = Semaphore::new(0); - assert_eq!(s.available_permits(), 0); - - // Try to acquire the permit - let mut acquire = task::spawn(s.acquire(1)); - assert_pending!(acquire.poll()); - - s.release(1); - - assert!(acquire.is_woken()); - assert_ready_ok!(acquire.poll()); -} - -#[test] -#[should_panic] -fn validates_max_permits() { - use std::usize; - Semaphore::new((usize::MAX >> 2) + 1); -} - -#[test] -fn close_semaphore_prevents_acquire() { - let s = Semaphore::new(5); - s.close(); - - assert_eq!(5, s.available_permits()); - - assert_ready_err!(task::spawn(s.acquire(1)).poll()); - assert_eq!(5, s.available_permits()); - - assert_ready_err!(task::spawn(s.acquire(1)).poll()); - assert_eq!(5, s.available_permits()); -} - -#[test] -fn close_semaphore_notifies_permit1() { - let s = Semaphore::new(0); - let mut acquire = task::spawn(s.acquire(1)); - - assert_pending!(acquire.poll()); - - s.close(); - - assert!(acquire.is_woken()); - assert_ready_err!(acquire.poll()); -} - -#[test] -fn close_semaphore_notifies_permit2() { - let s = Semaphore::new(2); - - // Acquire a couple of permits - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - - let mut acquire3 = task::spawn(s.acquire(1)); - let mut acquire4 = task::spawn(s.acquire(1)); - assert_pending!(acquire3.poll()); - assert_pending!(acquire4.poll()); - - s.close(); - - assert!(acquire3.is_woken()); - assert!(acquire4.is_woken()); - - assert_ready_err!(acquire3.poll()); - assert_ready_err!(acquire4.poll()); - - assert_eq!(0, s.available_permits()); - - s.release(1); - - assert_eq!(1, s.available_permits()); - - assert_ready_err!(task::spawn(s.acquire(1)).poll()); - - s.release(1); - - assert_eq!(2, s.available_permits()); -} - -#[test] -fn cancel_acquire_releases_permits() { - let s = Semaphore::new(10); - s.try_acquire(4).expect("uncontended try_acquire succeeds"); - assert_eq!(6, s.available_permits()); - - let mut acquire = task::spawn(s.acquire(8)); - assert_pending!(acquire.poll()); - - assert_eq!(0, s.available_permits()); - drop(acquire); - - assert_eq!(6, s.available_permits()); - assert_ok!(s.try_acquire(6)); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/semaphore_ll.rs b/third_party/rust/tokio-0.2.25/src/sync/tests/semaphore_ll.rs deleted file mode 100644 index bfb075780bb8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/tests/semaphore_ll.rs +++ /dev/null @@ -1,470 +0,0 @@ -use crate::sync::semaphore_ll::{Permit, Semaphore}; -use tokio_test::*; - -#[test] -fn poll_acquire_one_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - // Polling for a permit succeeds immediately - let mut permit = task::spawn(Permit::new()); - assert!(!permit.is_acquired()); - - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_eq!(s.available_permits(), 99); - assert!(permit.is_acquired()); - - // Polling again on the same waiter does not claim a new permit - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_eq!(s.available_permits(), 99); - assert!(permit.is_acquired()); -} - -#[test] -fn poll_acquire_many_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - // Polling for a permit succeeds immediately - let mut permit = task::spawn(Permit::new()); - assert!(!permit.is_acquired()); - - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 5, &s))); - assert_eq!(s.available_permits(), 95); - assert!(permit.is_acquired()); - - // Polling again on the same waiter does not claim a new permit - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_eq!(s.available_permits(), 95); - assert!(permit.is_acquired()); - - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 5, &s))); - assert_eq!(s.available_permits(), 95); - assert!(permit.is_acquired()); - - // Polling for a larger number of permits acquires more - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 8, &s))); - assert_eq!(s.available_permits(), 92); - assert!(permit.is_acquired()); -} - -#[test] -fn try_acquire_one_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - // Polling for a permit succeeds immediately - let mut permit = Permit::new(); - assert!(!permit.is_acquired()); - - assert_ok!(permit.try_acquire(1, &s)); - assert_eq!(s.available_permits(), 99); - assert!(permit.is_acquired()); - - // Polling again on the same waiter does not claim a new permit - assert_ok!(permit.try_acquire(1, &s)); - assert_eq!(s.available_permits(), 99); - assert!(permit.is_acquired()); -} - -#[test] -fn try_acquire_many_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - // Polling for a permit succeeds immediately - let mut permit = Permit::new(); - assert!(!permit.is_acquired()); - - assert_ok!(permit.try_acquire(5, &s)); - assert_eq!(s.available_permits(), 95); - assert!(permit.is_acquired()); - - // Polling again on the same waiter does not claim a new permit - assert_ok!(permit.try_acquire(5, &s)); - assert_eq!(s.available_permits(), 95); - assert!(permit.is_acquired()); -} - -#[test] -fn poll_acquire_one_unavailable() { - let s = Semaphore::new(1); - - let mut permit_1 = task::spawn(Permit::new()); - let mut permit_2 = task::spawn(Permit::new()); - - // Acquire the first permit - assert_ready_ok!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_eq!(s.available_permits(), 0); - - permit_2.enter(|cx, mut p| { - // Try to acquire the second permit - assert_pending!(p.poll_acquire(cx, 1, &s)); - }); - - permit_1.release(1, &s); - - assert_eq!(s.available_permits(), 0); - assert!(permit_2.is_woken()); - assert_ready_ok!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - permit_2.release(1, &s); - assert_eq!(s.available_permits(), 1); -} - -#[test] -fn forget_acquired() { - let s = Semaphore::new(1); - - // Polling for a permit succeeds immediately - let mut permit = task::spawn(Permit::new()); - - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - assert_eq!(s.available_permits(), 0); - - permit.forget(1); - assert_eq!(s.available_permits(), 0); -} - -#[test] -fn forget_waiting() { - let s = Semaphore::new(0); - - // Polling for a permit succeeds immediately - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - assert_eq!(s.available_permits(), 0); - - permit.forget(1); - - s.add_permits(1); - - assert!(!permit.is_woken()); - assert_eq!(s.available_permits(), 1); -} - -#[test] -fn poll_acquire_many_unavailable() { - let s = Semaphore::new(5); - - let mut permit_1 = task::spawn(Permit::new()); - let mut permit_2 = task::spawn(Permit::new()); - let mut permit_3 = task::spawn(Permit::new()); - - // Acquire the first permit - assert_ready_ok!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_eq!(s.available_permits(), 4); - - permit_2.enter(|cx, mut p| { - // Try to acquire the second permit - assert_pending!(p.poll_acquire(cx, 5, &s)); - }); - - assert_eq!(s.available_permits(), 0); - - permit_3.enter(|cx, mut p| { - // Try to acquire the third permit - assert_pending!(p.poll_acquire(cx, 3, &s)); - }); - - permit_1.release(1, &s); - - assert_eq!(s.available_permits(), 0); - assert!(permit_2.is_woken()); - assert_ready_ok!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 5, &s))); - - assert!(!permit_3.is_woken()); - assert_eq!(s.available_permits(), 0); - - permit_2.release(1, &s); - assert!(!permit_3.is_woken()); - assert_eq!(s.available_permits(), 0); - - permit_2.release(2, &s); - assert!(permit_3.is_woken()); - - assert_ready_ok!(permit_3.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); -} - -#[test] -fn try_acquire_one_unavailable() { - let s = Semaphore::new(1); - - let mut permit_1 = Permit::new(); - let mut permit_2 = Permit::new(); - - // Acquire the first permit - assert_ok!(permit_1.try_acquire(1, &s)); - assert_eq!(s.available_permits(), 0); - - assert_err!(permit_2.try_acquire(1, &s)); - - permit_1.release(1, &s); - - assert_eq!(s.available_permits(), 1); - assert_ok!(permit_2.try_acquire(1, &s)); - - permit_2.release(1, &s); - assert_eq!(s.available_permits(), 1); -} - -#[test] -fn try_acquire_many_unavailable() { - let s = Semaphore::new(5); - - let mut permit_1 = Permit::new(); - let mut permit_2 = Permit::new(); - - // Acquire the first permit - assert_ok!(permit_1.try_acquire(1, &s)); - assert_eq!(s.available_permits(), 4); - - assert_err!(permit_2.try_acquire(5, &s)); - - permit_1.release(1, &s); - assert_eq!(s.available_permits(), 5); - - assert_ok!(permit_2.try_acquire(5, &s)); - - permit_2.release(1, &s); - assert_eq!(s.available_permits(), 1); - - permit_2.release(1, &s); - assert_eq!(s.available_permits(), 2); -} - -#[test] -fn poll_acquire_one_zero_permits() { - let s = Semaphore::new(0); - assert_eq!(s.available_permits(), 0); - - let mut permit = task::spawn(Permit::new()); - - // Try to acquire the permit - permit.enter(|cx, mut p| { - assert_pending!(p.poll_acquire(cx, 1, &s)); - }); - - s.add_permits(1); - - assert!(permit.is_woken()); - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); -} - -#[test] -#[should_panic] -fn validates_max_permits() { - use std::usize; - Semaphore::new((usize::MAX >> 2) + 1); -} - -#[test] -fn close_semaphore_prevents_acquire() { - let s = Semaphore::new(5); - s.close(); - - assert_eq!(5, s.available_permits()); - - let mut permit_1 = task::spawn(Permit::new()); - let mut permit_2 = task::spawn(Permit::new()); - - assert_ready_err!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_eq!(5, s.available_permits()); - - assert_ready_err!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - assert_eq!(5, s.available_permits()); -} - -#[test] -fn close_semaphore_notifies_permit1() { - let s = Semaphore::new(0); - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - s.close(); - - assert!(permit.is_woken()); - assert_ready_err!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); -} - -#[test] -fn close_semaphore_notifies_permit2() { - let s = Semaphore::new(2); - - let mut permit1 = task::spawn(Permit::new()); - let mut permit2 = task::spawn(Permit::new()); - let mut permit3 = task::spawn(Permit::new()); - let mut permit4 = task::spawn(Permit::new()); - - // Acquire a couple of permits - assert_ready_ok!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_ready_ok!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - assert_pending!(permit3.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_pending!(permit4.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - s.close(); - - assert!(permit3.is_woken()); - assert!(permit4.is_woken()); - - assert_ready_err!(permit3.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_ready_err!(permit4.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - assert_eq!(0, s.available_permits()); - - permit1.release(1, &s); - - assert_eq!(1, s.available_permits()); - - assert_ready_err!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - permit2.release(1, &s); - - assert_eq!(2, s.available_permits()); -} - -#[test] -fn poll_acquire_additional_permits_while_waiting_before_assigned() { - let s = Semaphore::new(1); - - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); - - s.add_permits(1); - assert!(!permit.is_woken()); - - s.add_permits(1); - assert!(permit.is_woken()); - - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); -} - -#[test] -fn try_acquire_additional_permits_while_waiting_before_assigned() { - let s = Semaphore::new(1); - - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - - assert_err!(permit.enter(|_, mut p| p.try_acquire(3, &s))); - - s.add_permits(1); - assert!(permit.is_woken()); - - assert_ok!(permit.enter(|_, mut p| p.try_acquire(2, &s))); -} - -#[test] -fn poll_acquire_additional_permits_while_waiting_after_assigned_success() { - let s = Semaphore::new(1); - - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - - s.add_permits(2); - - assert!(permit.is_woken()); - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); -} - -#[test] -fn poll_acquire_additional_permits_while_waiting_after_assigned_requeue() { - let s = Semaphore::new(1); - - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - - s.add_permits(2); - - assert!(permit.is_woken()); - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 4, &s))); - - s.add_permits(1); - - assert!(permit.is_woken()); - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 4, &s))); -} - -#[test] -fn poll_acquire_fewer_permits_while_waiting() { - let s = Semaphore::new(1); - - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - assert_eq!(s.available_permits(), 0); - - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - assert_eq!(s.available_permits(), 0); -} - -#[test] -fn poll_acquire_fewer_permits_after_assigned() { - let s = Semaphore::new(1); - - let mut permit1 = task::spawn(Permit::new()); - let mut permit2 = task::spawn(Permit::new()); - - assert_pending!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 5, &s))); - assert_eq!(s.available_permits(), 0); - - assert_pending!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - s.add_permits(4); - assert!(permit1.is_woken()); - assert!(!permit2.is_woken()); - - assert_ready_ok!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); - - assert!(permit2.is_woken()); - assert_eq!(s.available_permits(), 1); - - assert_ready_ok!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); -} - -#[test] -fn forget_partial_1() { - let s = Semaphore::new(0); - - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - s.add_permits(1); - - assert_eq!(0, s.available_permits()); - - permit.release(1, &s); - - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); - - assert_eq!(s.available_permits(), 0); -} - -#[test] -fn forget_partial_2() { - let s = Semaphore::new(0); - - let mut permit = task::spawn(Permit::new()); - - assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - s.add_permits(1); - - assert_eq!(0, s.available_permits()); - - permit.release(1, &s); - - s.add_permits(1); - - assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); - assert_eq!(s.available_permits(), 0); -} diff --git a/third_party/rust/tokio-0.2.25/src/sync/watch.rs b/third_party/rust/tokio-0.2.25/src/sync/watch.rs deleted file mode 100644 index 13033d9e7261..000000000000 --- a/third_party/rust/tokio-0.2.25/src/sync/watch.rs +++ /dev/null @@ -1,431 +0,0 @@ -//! A single-producer, multi-consumer channel that only retains the *last* sent -//! value. -//! -//! This channel is useful for watching for changes to a value from multiple -//! points in the code base, for example, changes to configuration values. -//! -//! # Usage -//! -//! [`channel`] returns a [`Sender`] / [`Receiver`] pair. These are -//! the producer and sender halves of the channel. The channel is -//! created with an initial value. [`Receiver::recv`] will always -//! be ready upon creation and will yield either this initial value or -//! the latest value that has been sent by `Sender`. -//! -//! Calls to [`Receiver::recv`] will always yield the latest value. -//! -//! # Examples -//! -//! ``` -//! use tokio::sync::watch; -//! -//! # async fn dox() -> Result<(), Box> { -//! let (tx, mut rx) = watch::channel("hello"); -//! -//! tokio::spawn(async move { -//! while let Some(value) = rx.recv().await { -//! println!("received = {:?}", value); -//! } -//! }); -//! -//! tx.broadcast("world")?; -//! # Ok(()) -//! # } -//! ``` -//! -//! # Closing -//! -//! [`Sender::closed`] allows the producer to detect when all [`Receiver`] -//! handles have been dropped. This indicates that there is no further interest -//! in the values being produced and work can be stopped. -//! -//! # Thread safety -//! -//! Both [`Sender`] and [`Receiver`] are thread safe. They can be moved to other -//! threads and can be used in a concurrent environment. Clones of [`Receiver`] -//! handles may be moved to separate threads and also used concurrently. -//! -//! [`Sender`]: crate::sync::watch::Sender -//! [`Receiver`]: crate::sync::watch::Receiver -//! [`Receiver::recv`]: crate::sync::watch::Receiver::recv -//! [`channel`]: crate::sync::watch::channel -//! [`Sender::closed`]: crate::sync::watch::Sender::closed - -use crate::future::poll_fn; -use crate::sync::task::AtomicWaker; - -use fnv::FnvHashSet; -use std::ops; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::{Relaxed, SeqCst}; -use std::sync::{Arc, Mutex, RwLock, RwLockReadGuard, Weak}; -use std::task::Poll::{Pending, Ready}; -use std::task::{Context, Poll}; - -/// Receives values from the associated [`Sender`](struct@Sender). -/// -/// Instances are created by the [`channel`](fn@channel) function. -#[derive(Debug)] -pub struct Receiver { - /// Pointer to the shared state - shared: Arc>, - - /// Pointer to the watcher's internal state - inner: Watcher, -} - -/// Sends values to the associated [`Receiver`](struct@Receiver). -/// -/// Instances are created by the [`channel`](fn@channel) function. -#[derive(Debug)] -pub struct Sender { - shared: Weak>, -} - -/// Returns a reference to the inner value -/// -/// Outstanding borrows hold a read lock on the inner value. This means that -/// long lived borrows could cause the produce half to block. It is recommended -/// to keep the borrow as short lived as possible. -#[derive(Debug)] -pub struct Ref<'a, T> { - inner: RwLockReadGuard<'a, T>, -} - -pub mod error { - //! Watch error types - - use std::fmt; - - /// Error produced when sending a value fails. - #[derive(Debug)] - pub struct SendError { - pub(crate) inner: T, - } - - // ===== impl SendError ===== - - impl fmt::Display for SendError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } - } - - impl std::error::Error for SendError {} -} - -#[derive(Debug)] -struct Shared { - /// The most recent value - value: RwLock, - - /// The current version - /// - /// The lowest bit represents a "closed" state. The rest of the bits - /// represent the current version. - version: AtomicUsize, - - /// All watchers - watchers: Mutex, - - /// Task to notify when all watchers drop - cancel: AtomicWaker, -} - -type Watchers = FnvHashSet; - -/// The watcher's ID is based on the Arc's pointer. -#[derive(Clone, Debug)] -struct Watcher(Arc); - -#[derive(Debug)] -struct WatchInner { - /// Last observed version - version: AtomicUsize, - waker: AtomicWaker, -} - -const CLOSED: usize = 1; - -/// Creates a new watch channel, returning the "send" and "receive" handles. -/// -/// All values sent by [`Sender`] will become visible to the [`Receiver`] handles. -/// Only the last value sent is made available to the [`Receiver`] half. All -/// intermediate values are dropped. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::watch; -/// -/// # async fn dox() -> Result<(), Box> { -/// let (tx, mut rx) = watch::channel("hello"); -/// -/// tokio::spawn(async move { -/// while let Some(value) = rx.recv().await { -/// println!("received = {:?}", value); -/// } -/// }); -/// -/// tx.broadcast("world")?; -/// # Ok(()) -/// # } -/// ``` -/// -/// [`Sender`]: struct@Sender -/// [`Receiver`]: struct@Receiver -pub fn channel(init: T) -> (Sender, Receiver) { - const VERSION_0: usize = 0; - const VERSION_1: usize = 2; - - // We don't start knowing VERSION_1 - let inner = Watcher::new_version(VERSION_0); - - // Insert the watcher - let mut watchers = FnvHashSet::with_capacity_and_hasher(0, Default::default()); - watchers.insert(inner.clone()); - - let shared = Arc::new(Shared { - value: RwLock::new(init), - version: AtomicUsize::new(VERSION_1), - watchers: Mutex::new(watchers), - cancel: AtomicWaker::new(), - }); - - let tx = Sender { - shared: Arc::downgrade(&shared), - }; - - let rx = Receiver { shared, inner }; - - (tx, rx) -} - -impl Receiver { - /// Returns a reference to the most recently sent value - /// - /// Outstanding borrows hold a read lock. This means that long lived borrows - /// could cause the send half to block. It is recommended to keep the borrow - /// as short lived as possible. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// let (_, rx) = watch::channel("hello"); - /// assert_eq!(*rx.borrow(), "hello"); - /// ``` - pub fn borrow(&self) -> Ref<'_, T> { - let inner = self.shared.value.read().unwrap(); - Ref { inner } - } - - // TODO: document - #[doc(hidden)] - pub fn poll_recv_ref<'a>(&'a mut self, cx: &mut Context<'_>) -> Poll>> { - // Make sure the task is up to date - self.inner.waker.register_by_ref(cx.waker()); - - let state = self.shared.version.load(SeqCst); - let version = state & !CLOSED; - - if self.inner.version.swap(version, Relaxed) != version { - let inner = self.shared.value.read().unwrap(); - - return Ready(Some(Ref { inner })); - } - - if CLOSED == state & CLOSED { - // The `Store` handle has been dropped. - return Ready(None); - } - - Pending - } -} - -impl Receiver { - /// Attempts to clone the latest value sent via the channel. - /// - /// If this is the first time the function is called on a `Receiver` - /// instance, then the function completes immediately with the **current** - /// value held by the channel. On the next call, the function waits until - /// a new value is sent in the channel. - /// - /// `None` is returned if the `Sender` half is dropped. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = watch::channel("hello"); - /// - /// let v = rx.recv().await.unwrap(); - /// assert_eq!(v, "hello"); - /// - /// tokio::spawn(async move { - /// tx.broadcast("goodbye").unwrap(); - /// }); - /// - /// // Waits for the new task to spawn and send the value. - /// let v = rx.recv().await.unwrap(); - /// assert_eq!(v, "goodbye"); - /// - /// let v = rx.recv().await; - /// assert!(v.is_none()); - /// } - /// ``` - pub async fn recv(&mut self) -> Option { - poll_fn(|cx| { - let v_ref = ready!(self.poll_recv_ref(cx)); - Poll::Ready(v_ref.map(|v_ref| (*v_ref).clone())) - }) - .await - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for Receiver { - type Item = T; - - fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let v_ref = ready!(self.poll_recv_ref(cx)); - - Poll::Ready(v_ref.map(|v_ref| (*v_ref).clone())) - } -} - -impl Clone for Receiver { - fn clone(&self) -> Self { - let ver = self.inner.version.load(Relaxed); - let inner = Watcher::new_version(ver); - let shared = self.shared.clone(); - - shared.watchers.lock().unwrap().insert(inner.clone()); - - Receiver { shared, inner } - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - self.shared.watchers.lock().unwrap().remove(&self.inner); - } -} - -impl Sender { - /// Broadcasts a new value via the channel, notifying all receivers. - pub fn broadcast(&self, value: T) -> Result<(), error::SendError> { - let shared = match self.shared.upgrade() { - Some(shared) => shared, - // All `Watch` handles have been canceled - None => return Err(error::SendError { inner: value }), - }; - - // Replace the value - { - let mut lock = shared.value.write().unwrap(); - *lock = value; - } - - // Update the version. 2 is used so that the CLOSED bit is not set. - shared.version.fetch_add(2, SeqCst); - - // Notify all watchers - notify_all(&*shared); - - Ok(()) - } - - /// Completes when all receivers have dropped. - /// - /// This allows the producer to get notified when interest in the produced - /// values is canceled and immediately stop doing work. - pub async fn closed(&mut self) { - poll_fn(|cx| self.poll_close(cx)).await - } - - fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll<()> { - match self.shared.upgrade() { - Some(shared) => { - shared.cancel.register_by_ref(cx.waker()); - Pending - } - None => Ready(()), - } - } -} - -/// Notifies all watchers of a change -fn notify_all(shared: &Shared) { - let watchers = shared.watchers.lock().unwrap(); - - for watcher in watchers.iter() { - // Notify the task - watcher.waker.wake(); - } -} - -impl Drop for Sender { - fn drop(&mut self) { - if let Some(shared) = self.shared.upgrade() { - shared.version.fetch_or(CLOSED, SeqCst); - notify_all(&*shared); - } - } -} - -// ===== impl Ref ===== - -impl ops::Deref for Ref<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - self.inner.deref() - } -} - -// ===== impl Shared ===== - -impl Drop for Shared { - fn drop(&mut self) { - self.cancel.wake(); - } -} - -// ===== impl Watcher ===== - -impl Watcher { - fn new_version(version: usize) -> Self { - Watcher(Arc::new(WatchInner { - version: AtomicUsize::new(version), - waker: AtomicWaker::new(), - })) - } -} - -impl std::cmp::PartialEq for Watcher { - fn eq(&self, other: &Watcher) -> bool { - Arc::ptr_eq(&self.0, &other.0) - } -} - -impl std::cmp::Eq for Watcher {} - -impl std::hash::Hash for Watcher { - fn hash(&self, state: &mut H) { - (&*self.0 as *const WatchInner).hash(state) - } -} - -impl std::ops::Deref for Watcher { - type Target = WatchInner; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} diff --git a/third_party/rust/tokio-0.2.25/src/task/blocking.rs b/third_party/rust/tokio-0.2.25/src/task/blocking.rs deleted file mode 100644 index ed60f4c4734a..000000000000 --- a/third_party/rust/tokio-0.2.25/src/task/blocking.rs +++ /dev/null @@ -1,132 +0,0 @@ -use crate::task::JoinHandle; - -cfg_rt_threaded! { - /// Runs the provided blocking function on the current thread without - /// blocking the executor. - /// - /// In general, issuing a blocking call or performing a lot of compute in a - /// future without yielding is not okay, as it may prevent the executor from - /// driving other futures forward. This function runs the closure on the - /// current thread by having the thread temporarily cease from being a core - /// thread, and turns it into a blocking thread. See the [CPU-bound tasks - /// and blocking code][blocking] section for more information. - /// - /// Although this function avoids starving other independently spawned - /// tasks, any other code running concurrently in the same task will be - /// suspended during the call to `block_in_place`. This can happen e.g. when - /// using the [`join!`] macro. To avoid this issue, use [`spawn_blocking`] - /// instead. - /// - /// Note that this function can only be used on the [threaded scheduler]. - /// - /// Code running behind `block_in_place` cannot be cancelled. When you shut - /// down the executor, it will wait indefinitely for all blocking operations - /// to finish. You can use [`shutdown_timeout`] to stop waiting for them - /// after a certain timeout. Be aware that this will still not cancel the - /// tasks — they are simply allowed to keep running after the method - /// returns. - /// - /// [blocking]: ../index.html#cpu-bound-tasks-and-blocking-code - /// [threaded scheduler]: fn@crate::runtime::Builder::threaded_scheduler - /// [`spawn_blocking`]: fn@crate::task::spawn_blocking - /// [`join!`]: macro@join - /// [`thread::spawn`]: fn@std::thread::spawn - /// [`shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout - /// - /// # Examples - /// - /// ``` - /// use tokio::task; - /// - /// # async fn docs() { - /// task::block_in_place(move || { - /// // do some compute-heavy work or call synchronous code - /// }); - /// # } - /// ``` - #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))] - pub fn block_in_place(f: F) -> R - where - F: FnOnce() -> R, - { - crate::runtime::thread_pool::block_in_place(f) - } -} - -cfg_blocking! { - /// Runs the provided closure on a thread where blocking is acceptable. - /// - /// In general, issuing a blocking call or performing a lot of compute in a - /// future without yielding is not okay, as it may prevent the executor from - /// driving other futures forward. This function runs the provided closure - /// on a thread dedicated to blocking operations. See the [CPU-bound tasks - /// and blocking code][blocking] section for more information. - /// - /// Tokio will spawn more blocking threads when they are requested through - /// this function until the upper limit configured on the [`Builder`] is - /// reached. This limit is very large by default, because `spawn_blocking` is - /// often used for various kinds of IO operations that cannot be performed - /// asynchronously. When you run CPU-bound code using `spawn_blocking`, you - /// should keep this large upper limit in mind; to run your CPU-bound - /// computations on only a few threads, you should use a separate thread - /// pool such as [rayon] rather than configuring the number of blocking - /// threads. - /// - /// This function is intended for non-async operations that eventually - /// finish on their own. If you want to spawn an ordinary thread, you should - /// use [`thread::spawn`] instead. - /// - /// Closures spawned using `spawn_blocking` cannot be cancelled. When you - /// shut down the executor, it will wait indefinitely for all blocking - /// operations to finish. You can use [`shutdown_timeout`] to stop waiting - /// for them after a certain timeout. Be aware that this will still not - /// cancel the tasks — they are simply allowed to keep running after the - /// method returns. - /// - /// Note that if you are using the [basic scheduler], this function will - /// still spawn additional threads for blocking operations. The basic - /// scheduler's single thread is only used for asynchronous code. - /// - /// [`Builder`]: struct@crate::runtime::Builder - /// [blocking]: ../index.html#cpu-bound-tasks-and-blocking-code - /// [rayon]: https://docs.rs/rayon - /// [basic scheduler]: fn@crate::runtime::Builder::basic_scheduler - /// [`thread::spawn`]: fn@std::thread::spawn - /// [`shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout - /// - /// # Examples - /// - /// ``` - /// use tokio::task; - /// - /// # async fn docs() -> Result<(), Box>{ - /// let res = task::spawn_blocking(move || { - /// // do some compute-heavy work or call synchronous code - /// "done computing" - /// }).await?; - /// - /// assert_eq!(res, "done computing"); - /// # Ok(()) - /// # } - /// ``` - pub fn spawn_blocking(f: F) -> JoinHandle - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - #[cfg(feature = "tracing")] - let f = { - let span = tracing::trace_span!( - target: "tokio::task", - "task", - kind = %"blocking", - function = %std::any::type_name::(), - ); - move || { - let _g = span.enter(); - f() - } - }; - crate::runtime::spawn_blocking(f) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/task/local.rs b/third_party/rust/tokio-0.2.25/src/task/local.rs deleted file mode 100644 index 3c409edfb906..000000000000 --- a/third_party/rust/tokio-0.2.25/src/task/local.rs +++ /dev/null @@ -1,589 +0,0 @@ -//! Runs `!Send` futures on the current thread. -use crate::runtime::task::{self, JoinHandle, Task}; -use crate::sync::AtomicWaker; -use crate::util::linked_list::LinkedList; - -use std::cell::{Cell, RefCell}; -use std::collections::VecDeque; -use std::fmt; -use std::future::Future; -use std::marker::PhantomData; -use std::pin::Pin; -use std::sync::{Arc, Mutex}; -use std::task::Poll; - -use pin_project_lite::pin_project; - -cfg_rt_util! { - /// A set of tasks which are executed on the same thread. - /// - /// In some cases, it is necessary to run one or more futures that do not - /// implement [`Send`] and thus are unsafe to send between threads. In these - /// cases, a [local task set] may be used to schedule one or more `!Send` - /// futures to run together on the same thread. - /// - /// For example, the following code will not compile: - /// - /// ```rust,compile_fail - /// use std::rc::Rc; - /// - /// #[tokio::main] - /// async fn main() { - /// // `Rc` does not implement `Send`, and thus may not be sent between - /// // threads safely. - /// let unsend_data = Rc::new("my unsend data..."); - /// - /// let unsend_data = unsend_data.clone(); - /// // Because the `async` block here moves `unsend_data`, the future is `!Send`. - /// // Since `tokio::spawn` requires the spawned future to implement `Send`, this - /// // will not compile. - /// tokio::spawn(async move { - /// println!("{}", unsend_data); - /// // ... - /// }).await.unwrap(); - /// } - /// ``` - /// In order to spawn `!Send` futures, we can use a local task set to - /// schedule them on the thread calling [`Runtime::block_on`]. When running - /// inside of the local task set, we can use [`task::spawn_local`], which can - /// spawn `!Send` futures. For example: - /// - /// ```rust - /// use std::rc::Rc; - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// let unsend_data = Rc::new("my unsend data..."); - /// - /// // Construct a local task set that can run `!Send` futures. - /// let local = task::LocalSet::new(); - /// - /// // Run the local task set. - /// local.run_until(async move { - /// let unsend_data = unsend_data.clone(); - /// // `spawn_local` ensures that the future is spawned on the local - /// // task set. - /// task::spawn_local(async move { - /// println!("{}", unsend_data); - /// // ... - /// }).await.unwrap(); - /// }).await; - /// } - /// ``` - /// - /// ## Awaiting a `LocalSet` - /// - /// Additionally, a `LocalSet` itself implements `Future`, completing when - /// *all* tasks spawned on the `LocalSet` complete. This can be used to run - /// several futures on a `LocalSet` and drive the whole set until they - /// complete. For example, - /// - /// ```rust - /// use tokio::{task, time}; - /// use std::rc::Rc; - /// - /// #[tokio::main] - /// async fn main() { - /// let unsend_data = Rc::new("world"); - /// let local = task::LocalSet::new(); - /// - /// let unsend_data2 = unsend_data.clone(); - /// local.spawn_local(async move { - /// // ... - /// println!("hello {}", unsend_data2) - /// }); - /// - /// local.spawn_local(async move { - /// time::delay_for(time::Duration::from_millis(100)).await; - /// println!("goodbye {}", unsend_data) - /// }); - /// - /// // ... - /// - /// local.await; - /// } - /// ``` - /// - /// [`Send`]: trait@std::marker::Send - /// [local task set]: struct@LocalSet - /// [`Runtime::block_on`]: method@crate::runtime::Runtime::block_on - /// [`task::spawn_local`]: fn@spawn_local - pub struct LocalSet { - /// Current scheduler tick - tick: Cell, - - /// State available from thread-local - context: Context, - - /// This type should not be Send. - _not_send: PhantomData<*const ()>, - } -} - -/// State available from the thread-local -struct Context { - /// Owned task set and local run queue - tasks: RefCell, - - /// State shared between threads. - shared: Arc, -} - -struct Tasks { - /// Collection of all active tasks spawned onto this executor. - owned: LinkedList>>, - - /// Local run queue sender and receiver. - queue: VecDeque>>, -} - -/// LocalSet state shared between threads. -struct Shared { - /// Remote run queue sender - queue: Mutex>>>, - - /// Wake the `LocalSet` task - waker: AtomicWaker, -} - -pin_project! { - #[derive(Debug)] - struct RunUntil<'a, F> { - local_set: &'a LocalSet, - #[pin] - future: F, - } -} - -scoped_thread_local!(static CURRENT: Context); - -cfg_rt_util! { - /// Spawns a `!Send` future on the local task set. - /// - /// The spawned future will be run on the same thread that called `spawn_local.` - /// This may only be called from the context of a local task set. - /// - /// # Panics - /// - /// - This function panics if called outside of a local task set. - /// - /// # Examples - /// - /// ```rust - /// use std::rc::Rc; - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// let unsend_data = Rc::new("my unsend data..."); - /// - /// let local = task::LocalSet::new(); - /// - /// // Run the local task set. - /// local.run_until(async move { - /// let unsend_data = unsend_data.clone(); - /// task::spawn_local(async move { - /// println!("{}", unsend_data); - /// // ... - /// }).await.unwrap(); - /// }).await; - /// } - /// ``` - pub fn spawn_local(future: F) -> JoinHandle - where - F: Future + 'static, - F::Output: 'static, - { - let future = crate::util::trace::task(future, "local"); - CURRENT.with(|maybe_cx| { - let cx = maybe_cx - .expect("`spawn_local` called from outside of a `task::LocalSet`"); - - // Safety: Tasks are only polled and dropped from the thread that - // spawns them. - let (task, handle) = unsafe { task::joinable_local(future) }; - cx.tasks.borrow_mut().queue.push_back(task); - handle - }) - } -} - -/// Initial queue capacity -const INITIAL_CAPACITY: usize = 64; - -/// Max number of tasks to poll per tick. -const MAX_TASKS_PER_TICK: usize = 61; - -/// How often it check the remote queue first -const REMOTE_FIRST_INTERVAL: u8 = 31; - -impl LocalSet { - /// Returns a new local task set. - pub fn new() -> LocalSet { - LocalSet { - tick: Cell::new(0), - context: Context { - tasks: RefCell::new(Tasks { - owned: LinkedList::new(), - queue: VecDeque::with_capacity(INITIAL_CAPACITY), - }), - shared: Arc::new(Shared { - queue: Mutex::new(VecDeque::with_capacity(INITIAL_CAPACITY)), - waker: AtomicWaker::new(), - }), - }, - _not_send: PhantomData, - } - } - - /// Spawns a `!Send` task onto the local task set. - /// - /// This task is guaranteed to be run on the current thread. - /// - /// Unlike the free function [`spawn_local`], this method may be used to - /// spawn local tasks when the task set is _not_ running. For example: - /// ```rust - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// let local = task::LocalSet::new(); - /// - /// // Spawn a future on the local set. This future will be run when - /// // we call `run_until` to drive the task set. - /// local.spawn_local(async { - /// // ... - /// }); - /// - /// // Run the local task set. - /// local.run_until(async move { - /// // ... - /// }).await; - /// - /// // When `run` finishes, we can spawn _more_ futures, which will - /// // run in subsequent calls to `run_until`. - /// local.spawn_local(async { - /// // ... - /// }); - /// - /// local.run_until(async move { - /// // ... - /// }).await; - /// } - /// ``` - /// [`spawn_local`]: fn@spawn_local - pub fn spawn_local(&self, future: F) -> JoinHandle - where - F: Future + 'static, - F::Output: 'static, - { - let future = crate::util::trace::task(future, "local"); - let (task, handle) = unsafe { task::joinable_local(future) }; - self.context.tasks.borrow_mut().queue.push_back(task); - handle - } - - /// Runs a future to completion on the provided runtime, driving any local - /// futures spawned on this task set on the current thread. - /// - /// This runs the given future on the runtime, blocking until it is - /// complete, and yielding its resolved result. Any tasks or timers which - /// the future spawns internally will be executed on the runtime. The future - /// may also call [`spawn_local`] to spawn_local additional local futures on the - /// current thread. - /// - /// This method should not be called from an asynchronous context. - /// - /// # Panics - /// - /// This function panics if the executor is at capacity, if the provided - /// future panics, or if called within an asynchronous execution context. - /// - /// # Notes - /// - /// Since this function internally calls [`Runtime::block_on`], and drives - /// futures in the local task set inside that call to `block_on`, the local - /// futures may not use [in-place blocking]. If a blocking call needs to be - /// issued from a local task, the [`spawn_blocking`] API may be used instead. - /// - /// For example, this will panic: - /// ```should_panic - /// use tokio::runtime::Runtime; - /// use tokio::task; - /// - /// let mut rt = Runtime::new().unwrap(); - /// let local = task::LocalSet::new(); - /// local.block_on(&mut rt, async { - /// let join = task::spawn_local(async { - /// let blocking_result = task::block_in_place(|| { - /// // ... - /// }); - /// // ... - /// }); - /// join.await.unwrap(); - /// }) - /// ``` - /// This, however, will not panic: - /// ``` - /// use tokio::runtime::Runtime; - /// use tokio::task; - /// - /// let mut rt = Runtime::new().unwrap(); - /// let local = task::LocalSet::new(); - /// local.block_on(&mut rt, async { - /// let join = task::spawn_local(async { - /// let blocking_result = task::spawn_blocking(|| { - /// // ... - /// }).await; - /// // ... - /// }); - /// join.await.unwrap(); - /// }) - /// ``` - /// - /// [`spawn_local`]: fn@spawn_local - /// [`Runtime::block_on`]: method@crate::runtime::Runtime::block_on - /// [in-place blocking]: fn@crate::task::block_in_place - /// [`spawn_blocking`]: fn@crate::task::spawn_blocking - pub fn block_on(&self, rt: &mut crate::runtime::Runtime, future: F) -> F::Output - where - F: Future, - { - rt.block_on(self.run_until(future)) - } - - /// Run a future to completion on the local set, returning its output. - /// - /// This returns a future that runs the given future with a local set, - /// allowing it to call [`spawn_local`] to spawn additional `!Send` futures. - /// Any local futures spawned on the local set will be driven in the - /// background until the future passed to `run_until` completes. When the future - /// passed to `run` finishes, any local futures which have not completed - /// will remain on the local set, and will be driven on subsequent calls to - /// `run_until` or when [awaiting the local set] itself. - /// - /// # Examples - /// - /// ```rust - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// task::LocalSet::new().run_until(async { - /// task::spawn_local(async move { - /// // ... - /// }).await.unwrap(); - /// // ... - /// }).await; - /// } - /// ``` - /// - /// [`spawn_local`]: fn@spawn_local - /// [awaiting the local set]: #awaiting-a-localset - pub async fn run_until(&self, future: F) -> F::Output - where - F: Future, - { - let run_until = RunUntil { - future, - local_set: self, - }; - run_until.await - } - - /// Tick the scheduler, returning whether the local future needs to be - /// notified again. - fn tick(&self) -> bool { - for _ in 0..MAX_TASKS_PER_TICK { - match self.next_task() { - // Run the task - // - // Safety: As spawned tasks are `!Send`, `run_unchecked` must be - // used. We are responsible for maintaining the invariant that - // `run_unchecked` is only called on threads that spawned the - // task initially. Because `LocalSet` itself is `!Send`, and - // `spawn_local` spawns into the `LocalSet` on the current - // thread, the invariant is maintained. - Some(task) => crate::coop::budget(|| task.run()), - // We have fully drained the queue of notified tasks, so the - // local future doesn't need to be notified again — it can wait - // until something else wakes a task in the local set. - None => return false, - } - } - - true - } - - fn next_task(&self) -> Option>> { - let tick = self.tick.get(); - self.tick.set(tick.wrapping_add(1)); - - if tick % REMOTE_FIRST_INTERVAL == 0 { - self.context - .shared - .queue - .lock() - .unwrap() - .pop_front() - .or_else(|| self.context.tasks.borrow_mut().queue.pop_front()) - } else { - self.context - .tasks - .borrow_mut() - .queue - .pop_front() - .or_else(|| self.context.shared.queue.lock().unwrap().pop_front()) - } - } - - fn with(&self, f: impl FnOnce() -> T) -> T { - CURRENT.set(&self.context, f) - } -} - -impl fmt::Debug for LocalSet { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("LocalSet").finish() - } -} - -impl Future for LocalSet { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - // Register the waker before starting to work - self.context.shared.waker.register_by_ref(cx.waker()); - - if self.with(|| self.tick()) { - // If `tick` returns true, we need to notify the local future again: - // there are still tasks remaining in the run queue. - cx.waker().wake_by_ref(); - Poll::Pending - } else if self.context.tasks.borrow().owned.is_empty() { - // If the scheduler has no remaining futures, we're done! - Poll::Ready(()) - } else { - // There are still futures in the local set, but we've polled all the - // futures in the run queue. Therefore, we can just return Pending - // since the remaining futures will be woken from somewhere else. - Poll::Pending - } - } -} - -impl Default for LocalSet { - fn default() -> LocalSet { - LocalSet::new() - } -} - -impl Drop for LocalSet { - fn drop(&mut self) { - self.with(|| { - // Loop required here to ensure borrow is dropped between iterations - #[allow(clippy::while_let_loop)] - loop { - let task = match self.context.tasks.borrow_mut().owned.pop_back() { - Some(task) => task, - None => break, - }; - - // Safety: same as `run_unchecked`. - task.shutdown(); - } - - for task in self.context.tasks.borrow_mut().queue.drain(..) { - task.shutdown(); - } - - for task in self.context.shared.queue.lock().unwrap().drain(..) { - task.shutdown(); - } - - assert!(self.context.tasks.borrow().owned.is_empty()); - }); - } -} - -// === impl LocalFuture === - -impl Future for RunUntil<'_, T> { - type Output = T::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - let me = self.project(); - - me.local_set.with(|| { - me.local_set - .context - .shared - .waker - .register_by_ref(cx.waker()); - - let _no_blocking = crate::runtime::enter::disallow_blocking(); - let f = me.future; - - if let Poll::Ready(output) = crate::coop::budget(|| f.poll(cx)) { - return Poll::Ready(output); - } - - if me.local_set.tick() { - // If `tick` returns `true`, we need to notify the local future again: - // there are still tasks remaining in the run queue. - cx.waker().wake_by_ref(); - } - - Poll::Pending - }) - } -} - -impl Shared { - /// Schedule the provided task on the scheduler. - fn schedule(&self, task: task::Notified>) { - CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if cx.shared.ptr_eq(self) => { - cx.tasks.borrow_mut().queue.push_back(task); - } - _ => { - self.queue.lock().unwrap().push_back(task); - self.waker.wake(); - } - }); - } - - fn ptr_eq(&self, other: &Shared) -> bool { - self as *const _ == other as *const _ - } -} - -impl task::Schedule for Arc { - fn bind(task: Task) -> Arc { - CURRENT.with(|maybe_cx| { - let cx = maybe_cx.expect("scheduler context missing"); - cx.tasks.borrow_mut().owned.push_front(task); - cx.shared.clone() - }) - } - - fn release(&self, task: &Task) -> Option> { - use std::ptr::NonNull; - - CURRENT.with(|maybe_cx| { - let cx = maybe_cx.expect("scheduler context missing"); - - assert!(cx.shared.ptr_eq(self)); - - let ptr = NonNull::from(task.header()); - // safety: task must be contained by list. It is inserted into the - // list in `bind`. - unsafe { cx.tasks.borrow_mut().owned.remove(ptr) } - }) - } - - fn schedule(&self, task: task::Notified) { - Shared::schedule(self, task); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/task/mod.rs b/third_party/rust/tokio-0.2.25/src/task/mod.rs deleted file mode 100644 index 5c89393a5e2f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/task/mod.rs +++ /dev/null @@ -1,242 +0,0 @@ -//! Asynchronous green-threads. -//! -//! ## What are Tasks? -//! -//! A _task_ is a light weight, non-blocking unit of execution. A task is similar -//! to an OS thread, but rather than being managed by the OS scheduler, they are -//! managed by the [Tokio runtime][rt]. Another name for this general pattern is -//! [green threads]. If you are familiar with [Go's goroutines], [Kotlin's -//! coroutines], or [Erlang's processes], you can think of Tokio's tasks as -//! something similar. -//! -//! Key points about tasks include: -//! -//! * Tasks are **light weight**. Because tasks are scheduled by the Tokio -//! runtime rather than the operating system, creating new tasks or switching -//! between tasks does not require a context switch and has fairly low -//! overhead. Creating, running, and destroying large numbers of tasks is -//! quite cheap, especially compared to OS threads. -//! -//! * Tasks are scheduled **cooperatively**. Most operating systems implement -//! _preemptive multitasking_. This is a scheduling technique where the -//! operating system allows each thread to run for a period of time, and then -//! _preempts_ it, temporarily pausing that thread and switching to another. -//! Tasks, on the other hand, implement _cooperative multitasking_. In -//! cooperative multitasking, a task is allowed to run until it _yields_, -//! indicating to the Tokio runtime's scheduler that it cannot currently -//! continue executing. When a task yields, the Tokio runtime switches to -//! executing the next task. -//! -//! * Tasks are **non-blocking**. Typically, when an OS thread performs I/O or -//! must synchronize with another thread, it _blocks_, allowing the OS to -//! schedule another thread. When a task cannot continue executing, it must -//! yield instead, allowing the Tokio runtime to schedule another task. Tasks -//! should generally not perform system calls or other operations that could -//! block a thread, as this would prevent other tasks running on the same -//! thread from executing as well. Instead, this module provides APIs for -//! running blocking operations in an asynchronous context. -//! -//! [rt]: crate::runtime -//! [green threads]: https://en.wikipedia.org/wiki/Green_threads -//! [Go's goroutines]: https://tour.golang.org/concurrency/1 -//! [Kotlin's coroutines]: https://kotlinlang.org/docs/reference/coroutines-overview.html -//! [Erlang's processes]: http://erlang.org/doc/getting_started/conc_prog.html#processes -//! -//! ## Working with Tasks -//! -//! This module provides the following APIs for working with tasks: -//! -//! ### Spawning -//! -//! Perhaps the most important function in this module is [`task::spawn`]. This -//! function can be thought of as an async equivalent to the standard library's -//! [`thread::spawn`][`std::thread::spawn`]. It takes an `async` block or other -//! [future], and creates a new task to run that work concurrently: -//! -//! ``` -//! use tokio::task; -//! -//! # async fn doc() { -//! task::spawn(async { -//! // perform some work here... -//! }); -//! # } -//! ``` -//! -//! Like [`std::thread::spawn`], `task::spawn` returns a [`JoinHandle`] struct. -//! A `JoinHandle` is itself a future which may be used to await the output of -//! the spawned task. For example: -//! -//! ``` -//! use tokio::task; -//! -//! # #[tokio::main] async fn main() -> Result<(), Box> { -//! let join = task::spawn(async { -//! // ... -//! "hello world!" -//! }); -//! -//! // ... -//! -//! // Await the result of the spawned task. -//! let result = join.await?; -//! assert_eq!(result, "hello world!"); -//! # Ok(()) -//! # } -//! ``` -//! -//! Again, like `std::thread`'s [`JoinHandle` type][thread_join], if the spawned -//! task panics, awaiting its `JoinHandle` will return a [`JoinError`]`. For -//! example: -//! -//! ``` -//! use tokio::task; -//! -//! # #[tokio::main] async fn main() { -//! let join = task::spawn(async { -//! panic!("something bad happened!") -//! }); -//! -//! // The returned result indicates that the task failed. -//! assert!(join.await.is_err()); -//! # } -//! ``` -//! -//! `spawn`, `JoinHandle`, and `JoinError` are present when the "rt-core" -//! feature flag is enabled. -//! -//! [`task::spawn`]: crate::task::spawn() -//! [future]: std::future::Future -//! [`std::thread::spawn`]: std::thread::spawn -//! [`JoinHandle`]: crate::task::JoinHandle -//! [thread_join]: std::thread::JoinHandle -//! [`JoinError`]: crate::task::JoinError -//! -//! ### Blocking and Yielding -//! -//! As we discussed above, code running in asynchronous tasks should not perform -//! operations that can block. A blocking operation performed in a task running -//! on a thread that is also running other tasks would block the entire thread, -//! preventing other tasks from running. -//! -//! Instead, Tokio provides two APIs for running blocking operations in an -//! asynchronous context: [`task::spawn_blocking`] and [`task::block_in_place`]. -//! -//! #### spawn_blocking -//! -//! The `task::spawn_blocking` function is similar to the `task::spawn` function -//! discussed in the previous section, but rather than spawning an -//! _non-blocking_ future on the Tokio runtime, it instead spawns a -//! _blocking_ function on a dedicated thread pool for blocking tasks. For -//! example: -//! -//! ``` -//! use tokio::task; -//! -//! # async fn docs() { -//! task::spawn_blocking(|| { -//! // do some compute-heavy work or call synchronous code -//! }); -//! # } -//! ``` -//! -//! Just like `task::spawn`, `task::spawn_blocking` returns a `JoinHandle` -//! which we can use to await the result of the blocking operation: -//! -//! ```rust -//! # use tokio::task; -//! # async fn docs() -> Result<(), Box>{ -//! let join = task::spawn_blocking(|| { -//! // do some compute-heavy work or call synchronous code -//! "blocking completed" -//! }); -//! -//! let result = join.await?; -//! assert_eq!(result, "blocking completed"); -//! # Ok(()) -//! # } -//! ``` -//! -//! #### block_in_place -//! -//! When using the [threaded runtime][rt-threaded], the [`task::block_in_place`] -//! function is also available. Like `task::spawn_blocking`, this function -//! allows running a blocking operation from an asynchronous context. Unlike -//! `spawn_blocking`, however, `block_in_place` works by transitioning the -//! _current_ worker thread to a blocking thread, moving other tasks running on -//! that thread to another worker thread. This can improve performance by avoiding -//! context switches. -//! -//! For example: -//! -//! ``` -//! use tokio::task; -//! -//! # async fn docs() { -//! let result = task::block_in_place(|| { -//! // do some compute-heavy work or call synchronous code -//! "blocking completed" -//! }); -//! -//! assert_eq!(result, "blocking completed"); -//! # } -//! ``` -//! -//! #### yield_now -//! -//! In addition, this module provides a [`task::yield_now`] async function -//! that is analogous to the standard library's [`thread::yield_now`]. Calling -//! and `await`ing this function will cause the current task to yield to the -//! Tokio runtime's scheduler, allowing other tasks to be -//! scheduled. Eventually, the yielding task will be polled again, allowing it -//! to execute. For example: -//! -//! ```rust -//! use tokio::task; -//! -//! # #[tokio::main] async fn main() { -//! async { -//! task::spawn(async { -//! // ... -//! println!("spawned task done!") -//! }); -//! -//! // Yield, allowing the newly-spawned task to execute first. -//! task::yield_now().await; -//! println!("main task done!"); -//! } -//! # .await; -//! # } -//! ``` -//! -//! [`task::spawn_blocking`]: crate::task::spawn_blocking -//! [`task::block_in_place`]: crate::task::block_in_place -//! [rt-threaded]: ../runtime/index.html#threaded-scheduler -//! [`task::yield_now`]: crate::task::yield_now() -//! [`thread::yield_now`]: std::thread::yield_now -cfg_blocking! { - mod blocking; - pub use blocking::spawn_blocking; - - cfg_rt_threaded! { - pub use blocking::block_in_place; - } -} - -cfg_rt_core! { - pub use crate::runtime::task::{JoinError, JoinHandle}; - - mod spawn; - pub use spawn::spawn; - - mod yield_now; - pub use yield_now::yield_now; -} - -cfg_rt_util! { - mod local; - pub use local::{spawn_local, LocalSet}; - - mod task_local; - pub use task_local::LocalKey; -} diff --git a/third_party/rust/tokio-0.2.25/src/task/spawn.rs b/third_party/rust/tokio-0.2.25/src/task/spawn.rs deleted file mode 100644 index 014a01952bef..000000000000 --- a/third_party/rust/tokio-0.2.25/src/task/spawn.rs +++ /dev/null @@ -1,135 +0,0 @@ -use crate::runtime; -use crate::task::JoinHandle; - -use std::future::Future; - -doc_rt_core! { - /// Spawns a new asynchronous task, returning a - /// [`JoinHandle`](super::JoinHandle) for it. - /// - /// Spawning a task enables the task to execute concurrently to other tasks. The - /// spawned task may execute on the current thread, or it may be sent to a - /// different thread to be executed. The specifics depend on the current - /// [`Runtime`](crate::runtime::Runtime) configuration. - /// - /// There is no guarantee that a spawned task will execute to completion. - /// When a runtime is shutdown, all outstanding tasks are dropped, - /// regardless of the lifecycle of that task. - /// - /// This function must be called from the context of a Tokio runtime. Tasks running on - /// the Tokio runtime are always inside its context, but you can also enter the context - /// using the [`Handle::enter`](crate::runtime::Handle::enter()) method. - /// - /// # Examples - /// - /// In this example, a server is started and `spawn` is used to start a new task - /// that processes each received connection. - /// - /// ```no_run - /// use tokio::net::{TcpListener, TcpStream}; - /// - /// use std::io; - /// - /// async fn process(socket: TcpStream) { - /// // ... - /// # drop(socket); - /// } - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?; - /// - /// loop { - /// let (socket, _) = listener.accept().await?; - /// - /// tokio::spawn(async move { - /// // Process each socket concurrently. - /// process(socket).await - /// }); - /// } - /// } - /// ``` - /// - /// # Panics - /// - /// Panics if called from **outside** of the Tokio runtime. - /// - /// # Using `!Send` values from a task - /// - /// The task supplied to `spawn` must implement `Send`. However, it is - /// possible to **use** `!Send` values from the task as long as they only - /// exist between calls to `.await`. - /// - /// For example, this will work: - /// - /// ``` - /// use tokio::task; - /// - /// use std::rc::Rc; - /// - /// fn use_rc(rc: Rc<()>) { - /// // Do stuff w/ rc - /// # drop(rc); - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// tokio::spawn(async { - /// // Force the `Rc` to stay in a scope with no `.await` - /// { - /// let rc = Rc::new(()); - /// use_rc(rc.clone()); - /// } - /// - /// task::yield_now().await; - /// }).await.unwrap(); - /// } - /// ``` - /// - /// This will **not** work: - /// - /// ```compile_fail - /// use tokio::task; - /// - /// use std::rc::Rc; - /// - /// fn use_rc(rc: Rc<()>) { - /// // Do stuff w/ rc - /// # drop(rc); - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// tokio::spawn(async { - /// let rc = Rc::new(()); - /// - /// task::yield_now().await; - /// - /// use_rc(rc.clone()); - /// }).await.unwrap(); - /// } - /// ``` - /// - /// Holding on to a `!Send` value across calls to `.await` will result in - /// an unfriendly compile error message similar to: - /// - /// ```text - /// `[... some type ...]` cannot be sent between threads safely - /// ``` - /// - /// or: - /// - /// ```text - /// error[E0391]: cycle detected when processing `main` - /// ``` - pub fn spawn(task: T) -> JoinHandle - where - T: Future + Send + 'static, - T::Output: Send + 'static, - { - let spawn_handle = runtime::context::spawn_handle() - .expect("must be called from the context of a Tokio 0.2.x runtime configured with either `basic_scheduler` or `threaded_scheduler`"); - let task = crate::util::trace::task(task, "task"); - spawn_handle.spawn(task) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/task/task_local.rs b/third_party/rust/tokio-0.2.25/src/task/task_local.rs deleted file mode 100644 index d161f2014fca..000000000000 --- a/third_party/rust/tokio-0.2.25/src/task/task_local.rs +++ /dev/null @@ -1,242 +0,0 @@ -use pin_project_lite::pin_project; -use std::cell::RefCell; -use std::error::Error; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{fmt, thread}; - -/// Declares a new task-local key of type [`tokio::task::LocalKey`]. -/// -/// # Syntax -/// -/// The macro wraps any number of static declarations and makes them local to the current task. -/// Publicity and attributes for each static is preserved. For example: -/// -/// # Examples -/// -/// ``` -/// # use tokio::task_local; -/// task_local! { -/// pub static ONE: u32; -/// -/// #[allow(unused)] -/// static TWO: f32; -/// } -/// # fn main() {} -/// ``` -/// -/// See [LocalKey documentation][`tokio::task::LocalKey`] for more -/// information. -/// -/// [`tokio::task::LocalKey`]: struct@crate::task::LocalKey -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(all(feature = "rt-util", feature = "rt-core"))))] -macro_rules! task_local { - // empty (base case for the recursion) - () => {}; - - ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => { - $crate::__task_local_inner!($(#[$attr])* $vis $name, $t); - $crate::task_local!($($rest)*); - }; - - ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => { - $crate::__task_local_inner!($(#[$attr])* $vis $name, $t); - } -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __task_local_inner { - ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => { - $vis static $name: $crate::task::LocalKey<$t> = { - std::thread_local! { - static __KEY: std::cell::RefCell> = std::cell::RefCell::new(None); - } - - $crate::task::LocalKey { inner: __KEY } - }; - }; -} - -/// A key for task-local data. -/// -/// This type is generated by the `task_local!` macro. -/// -/// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will -/// _not_ lazily initialize the value on first access. Instead, the -/// value is first initialized when the future containing -/// the task-local is first polled by a futures executor, like Tokio. -/// -/// # Examples -/// -/// ``` -/// # async fn dox() { -/// tokio::task_local! { -/// static NUMBER: u32; -/// } -/// -/// NUMBER.scope(1, async move { -/// assert_eq!(NUMBER.get(), 1); -/// }).await; -/// -/// NUMBER.scope(2, async move { -/// assert_eq!(NUMBER.get(), 2); -/// -/// NUMBER.scope(3, async move { -/// assert_eq!(NUMBER.get(), 3); -/// }).await; -/// }).await; -/// # } -/// ``` -/// [`std::thread::LocalKey`]: struct@std::thread::LocalKey -#[cfg_attr(docsrs, doc(cfg(all(feature = "rt-util", feature = "rt-core"))))] -pub struct LocalKey { - #[doc(hidden)] - pub inner: thread::LocalKey>>, -} - -impl LocalKey { - /// Sets a value `T` as the task-local value for the future `F`. - /// - /// On completion of `scope`, the task-local will be dropped. - /// - /// ### Examples - /// - /// ``` - /// # async fn dox() { - /// tokio::task_local! { - /// static NUMBER: u32; - /// } - /// - /// NUMBER.scope(1, async move { - /// println!("task local value: {}", NUMBER.get()); - /// }).await; - /// # } - /// ``` - pub async fn scope(&'static self, value: T, f: F) -> F::Output - where - F: Future, - { - TaskLocalFuture { - local: &self, - slot: Some(value), - future: f, - } - .await - } - - /// Accesses the current task-local and runs the provided closure. - /// - /// # Panics - /// - /// This function will panic if not called within the context - /// of a future containing a task-local with the corresponding key. - pub fn with(&'static self, f: F) -> R - where - F: FnOnce(&T) -> R, - { - self.try_with(f).expect( - "cannot access a Task Local Storage value \ - without setting it via `LocalKey::set`", - ) - } - - /// Accesses the current task-local and runs the provided closure. - /// - /// If the task-local with the accociated key is not present, this - /// method will return an `AccessError`. For a panicking variant, - /// see `with`. - pub fn try_with(&'static self, f: F) -> Result - where - F: FnOnce(&T) -> R, - { - self.inner.with(|v| { - if let Some(val) = v.borrow().as_ref() { - Ok(f(val)) - } else { - Err(AccessError { _private: () }) - } - }) - } -} - -impl LocalKey { - /// Returns a copy of the task-local value - /// if the task-local value implements `Copy`. - pub fn get(&'static self) -> T { - self.with(|v| *v) - } -} - -impl fmt::Debug for LocalKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("LocalKey { .. }") - } -} - -pin_project! { - struct TaskLocalFuture { - local: &'static LocalKey, - slot: Option, - #[pin] - future: F, - } -} - -impl Future for TaskLocalFuture { - type Output = F::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - struct Guard<'a, T: 'static> { - local: &'static LocalKey, - slot: &'a mut Option, - prev: Option, - } - - impl Drop for Guard<'_, T> { - fn drop(&mut self) { - let value = self.local.inner.with(|c| c.replace(self.prev.take())); - *self.slot = value; - } - } - - let mut project = self.project(); - let val = project.slot.take(); - - let prev = project.local.inner.with(|c| c.replace(val)); - - let _guard = Guard { - prev, - slot: &mut project.slot, - local: *project.local, - }; - - project.future.poll(cx) - } -} - -// Required to make `pin_project` happy. -trait StaticLifetime: 'static {} -impl StaticLifetime for T {} - -/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with). -#[derive(Clone, Copy, Eq, PartialEq)] -pub struct AccessError { - _private: (), -} - -impl fmt::Debug for AccessError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AccessError").finish() - } -} - -impl fmt::Display for AccessError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt("task-local value not set", f) - } -} - -impl Error for AccessError {} diff --git a/third_party/rust/tokio-0.2.25/src/task/yield_now.rs b/third_party/rust/tokio-0.2.25/src/task/yield_now.rs deleted file mode 100644 index e0e20841c963..000000000000 --- a/third_party/rust/tokio-0.2.25/src/task/yield_now.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -doc_rt_core! { - /// Yields execution back to the Tokio runtime. - /// - /// A task yields by awaiting on `yield_now()`, and may resume when that - /// future completes (with no output.) The current task will be re-added as - /// a pending task at the _back_ of the pending queue. Any other pending - /// tasks will be scheduled. No other waking is required for the task to - /// continue. - /// - /// See also the usage example in the [task module](index.html#yield_now). - #[must_use = "yield_now does nothing unless polled/`await`-ed"] - pub async fn yield_now() { - /// Yield implementation - struct YieldNow { - yielded: bool, - } - - impl Future for YieldNow { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - if self.yielded { - return Poll::Ready(()); - } - - self.yielded = true; - cx.waker().wake_by_ref(); - Poll::Pending - } - } - - YieldNow { yielded: false }.await - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/clock.rs b/third_party/rust/tokio-0.2.25/src/time/clock.rs deleted file mode 100644 index bd67d7a31d75..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/clock.rs +++ /dev/null @@ -1,165 +0,0 @@ -//! Source of time abstraction. -//! -//! By default, `std::time::Instant::now()` is used. However, when the -//! `test-util` feature flag is enabled, the values returned for `now()` are -//! configurable. - -cfg_not_test_util! { - use crate::time::{Duration, Instant}; - - #[derive(Debug, Clone)] - pub(crate) struct Clock {} - - pub(crate) fn now() -> Instant { - Instant::from_std(std::time::Instant::now()) - } - - impl Clock { - pub(crate) fn new() -> Clock { - Clock {} - } - - pub(crate) fn now(&self) -> Instant { - now() - } - - pub(crate) fn is_paused(&self) -> bool { - false - } - - pub(crate) fn advance(&self, _dur: Duration) { - unreachable!(); - } - } -} - -cfg_test_util! { - use crate::time::{Duration, Instant}; - use std::sync::{Arc, Mutex}; - use crate::runtime::context; - - /// A handle to a source of time. - #[derive(Debug, Clone)] - pub(crate) struct Clock { - inner: Arc>, - } - - #[derive(Debug)] - struct Inner { - /// Instant to use as the clock's base instant. - base: std::time::Instant, - - /// Instant at which the clock was last unfrozen - unfrozen: Option, - } - - /// Pause time - /// - /// The current value of `Instant::now()` is saved and all subsequent calls - /// to `Instant::now()` until the timer wheel is checked again will return the saved value. - /// Once the timer wheel is checked, time will immediately advance to the next registered - /// `Delay`. This is useful for running tests that depend on time. - /// - /// # Panics - /// - /// Panics if time is already frozen or if called from outside of the Tokio - /// runtime. - pub fn pause() { - let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime"); - clock.pause(); - } - - /// Resume time - /// - /// Clears the saved `Instant::now()` value. Subsequent calls to - /// `Instant::now()` will return the value returned by the system call. - /// - /// # Panics - /// - /// Panics if time is not frozen or if called from outside of the Tokio - /// runtime. - pub fn resume() { - let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime"); - let mut inner = clock.inner.lock().unwrap(); - - if inner.unfrozen.is_some() { - panic!("time is not frozen"); - } - - inner.unfrozen = Some(std::time::Instant::now()); - } - - /// Advance time - /// - /// Increments the saved `Instant::now()` value by `duration`. Subsequent - /// calls to `Instant::now()` will return the result of the increment. - /// - /// # Panics - /// - /// Panics if time is not frozen or if called from outside of the Tokio - /// runtime. - pub async fn advance(duration: Duration) { - let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime"); - clock.advance(duration); - crate::task::yield_now().await; - } - - /// Return the current instant, factoring in frozen time. - pub(crate) fn now() -> Instant { - if let Some(clock) = context::clock() { - clock.now() - } else { - Instant::from_std(std::time::Instant::now()) - } - } - - impl Clock { - /// Return a new `Clock` instance that uses the current execution context's - /// source of time. - pub(crate) fn new() -> Clock { - let now = std::time::Instant::now(); - - Clock { - inner: Arc::new(Mutex::new(Inner { - base: now, - unfrozen: Some(now), - })), - } - } - - pub(crate) fn pause(&self) { - let mut inner = self.inner.lock().unwrap(); - - let elapsed = inner.unfrozen.as_ref().expect("time is already frozen").elapsed(); - inner.base += elapsed; - inner.unfrozen = None; - } - - pub(crate) fn is_paused(&self) -> bool { - let inner = self.inner.lock().unwrap(); - inner.unfrozen.is_none() - } - - pub(crate) fn advance(&self, duration: Duration) { - let mut inner = self.inner.lock().unwrap(); - - if inner.unfrozen.is_some() { - panic!("time is not frozen"); - } - - inner.base += duration; - } - - pub(crate) fn now(&self) -> Instant { - let inner = self.inner.lock().unwrap(); - - let mut ret = inner.base; - - if let Some(unfrozen) = inner.unfrozen { - ret += unfrozen.elapsed(); - } - - Instant::from_std(ret) - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/delay.rs b/third_party/rust/tokio-0.2.25/src/time/delay.rs deleted file mode 100644 index 744c7e16aead..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/delay.rs +++ /dev/null @@ -1,118 +0,0 @@ -use crate::time::driver::Registration; -use crate::time::{Duration, Instant}; - -use std::future::Future; -use std::pin::Pin; -use std::task::{self, Poll}; - -/// Waits until `deadline` is reached. -/// -/// No work is performed while awaiting on the delay to complete. The delay -/// operates at millisecond granularity and should not be used for tasks that -/// require high-resolution timers. -/// -/// # Cancellation -/// -/// Canceling a delay is done by dropping the returned future. No additional -/// cleanup work is required. -pub fn delay_until(deadline: Instant) -> Delay { - let registration = Registration::new(deadline, Duration::from_millis(0)); - Delay { registration } -} - -/// Waits until `duration` has elapsed. -/// -/// Equivalent to `delay_until(Instant::now() + duration)`. An asynchronous -/// analog to `std::thread::sleep`. -/// -/// No work is performed while awaiting on the delay to complete. The delay -/// operates at millisecond granularity and should not be used for tasks that -/// require high-resolution timers. -/// -/// To run something regularly on a schedule, see [`interval`]. -/// -/// # Cancellation -/// -/// Canceling a delay is done by dropping the returned future. No additional -/// cleanup work is required. -/// -/// # Examples -/// -/// Wait 100ms and print "100 ms have elapsed". -/// -/// ``` -/// use tokio::time::{delay_for, Duration}; -/// -/// #[tokio::main] -/// async fn main() { -/// delay_for(Duration::from_millis(100)).await; -/// println!("100 ms have elapsed"); -/// } -/// ``` -/// -/// [`interval`]: crate::time::interval() -#[cfg_attr(docsrs, doc(alias = "sleep"))] -pub fn delay_for(duration: Duration) -> Delay { - delay_until(Instant::now() + duration) -} - -/// Future returned by [`delay_until`](delay_until) and -/// [`delay_for`](delay_for). -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Delay { - /// The link between the `Delay` instance and the timer that drives it. - /// - /// This also stores the `deadline` value. - registration: Registration, -} - -impl Delay { - pub(crate) fn new_timeout(deadline: Instant, duration: Duration) -> Delay { - let registration = Registration::new(deadline, duration); - Delay { registration } - } - - /// Returns the instant at which the future will complete. - pub fn deadline(&self) -> Instant { - self.registration.deadline() - } - - /// Returns `true` if the `Delay` has elapsed - /// - /// A `Delay` is elapsed when the requested duration has elapsed. - pub fn is_elapsed(&self) -> bool { - self.registration.is_elapsed() - } - - /// Resets the `Delay` instance to a new deadline. - /// - /// Calling this function allows changing the instant at which the `Delay` - /// future completes without having to create new associated state. - /// - /// This function can be called both before and after the future has - /// completed. - pub fn reset(&mut self, deadline: Instant) { - self.registration.reset(deadline); - } -} - -impl Future for Delay { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - // `poll_elapsed` can return an error in two cases: - // - // - AtCapacity: this is a pathlogical case where far too many - // delays have been scheduled. - // - Shutdown: No timer has been setup, which is a mis-use error. - // - // Both cases are extremely rare, and pretty accurately fit into - // "logic errors", so we just panic in this case. A user couldn't - // really do much better if we passed the error onwards. - match ready!(self.registration.poll_elapsed(cx)) { - Ok(()) => Poll::Ready(()), - Err(e) => panic!("timer error: {}", e), - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/driver/atomic_stack.rs b/third_party/rust/tokio-0.2.25/src/time/driver/atomic_stack.rs deleted file mode 100644 index c1972a76c93e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/driver/atomic_stack.rs +++ /dev/null @@ -1,124 +0,0 @@ -use crate::time::driver::Entry; -use crate::time::Error; - -use std::ptr; -use std::sync::atomic::AtomicPtr; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::Arc; - -/// A stack of `Entry` nodes -#[derive(Debug)] -pub(crate) struct AtomicStack { - /// Stack head - head: AtomicPtr, -} - -/// Entries that were removed from the stack -#[derive(Debug)] -pub(crate) struct AtomicStackEntries { - ptr: *mut Entry, -} - -/// Used to indicate that the timer has shutdown. -const SHUTDOWN: *mut Entry = 1 as *mut _; - -impl AtomicStack { - pub(crate) fn new() -> AtomicStack { - AtomicStack { - head: AtomicPtr::new(ptr::null_mut()), - } - } - - /// Pushes an entry onto the stack. - /// - /// Returns `true` if the entry was pushed, `false` if the entry is already - /// on the stack, `Err` if the timer is shutdown. - pub(crate) fn push(&self, entry: &Arc) -> Result { - // First, set the queued bit on the entry - let queued = entry.queued.fetch_or(true, SeqCst); - - if queued { - // Already queued, nothing more to do - return Ok(false); - } - - let ptr = Arc::into_raw(entry.clone()) as *mut _; - - let mut curr = self.head.load(SeqCst); - - loop { - if curr == SHUTDOWN { - // Don't leak the entry node - let _ = unsafe { Arc::from_raw(ptr) }; - - return Err(Error::shutdown()); - } - - // Update the `next` pointer. This is safe because setting the queued - // bit is a "lock" on this field. - unsafe { - *(entry.next_atomic.get()) = curr; - } - - let actual = self.head.compare_and_swap(curr, ptr, SeqCst); - - if actual == curr { - break; - } - - curr = actual; - } - - Ok(true) - } - - /// Takes all entries from the stack - pub(crate) fn take(&self) -> AtomicStackEntries { - let ptr = self.head.swap(ptr::null_mut(), SeqCst); - AtomicStackEntries { ptr } - } - - /// Drains all remaining nodes in the stack and prevent any new nodes from - /// being pushed onto the stack. - pub(crate) fn shutdown(&self) { - // Shutdown the processing queue - let ptr = self.head.swap(SHUTDOWN, SeqCst); - - // Let the drop fn of `AtomicStackEntries` handle draining the stack - drop(AtomicStackEntries { ptr }); - } -} - -// ===== impl AtomicStackEntries ===== - -impl Iterator for AtomicStackEntries { - type Item = Arc; - - fn next(&mut self) -> Option { - if self.ptr.is_null() || self.ptr == SHUTDOWN { - return None; - } - - // Convert the pointer to an `Arc` - let entry = unsafe { Arc::from_raw(self.ptr) }; - - // Update `self.ptr` to point to the next element of the stack - self.ptr = unsafe { *entry.next_atomic.get() }; - - // Unset the queued flag - let res = entry.queued.fetch_and(false, SeqCst); - debug_assert!(res); - - // Return the entry - Some(entry) - } -} - -impl Drop for AtomicStackEntries { - fn drop(&mut self) { - for entry in self { - // Flag the entry as errored - entry.error(Error::shutdown()); - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/driver/entry.rs b/third_party/rust/tokio-0.2.25/src/time/driver/entry.rs deleted file mode 100644 index 974465c19be0..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/driver/entry.rs +++ /dev/null @@ -1,358 +0,0 @@ -use crate::loom::sync::atomic::AtomicU64; -use crate::sync::AtomicWaker; -use crate::time::driver::{Handle, Inner}; -use crate::time::{Duration, Error, Instant}; - -use std::cell::UnsafeCell; -use std::ptr; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::atomic::{AtomicBool, AtomicU8}; -use std::sync::{Arc, Weak}; -use std::task::{self, Poll}; -use std::u64; - -/// Internal state shared between a `Delay` instance and the timer. -/// -/// This struct is used as a node in two intrusive data structures: -/// -/// * An atomic stack used to signal to the timer thread that the entry state -/// has changed. The timer thread will observe the entry on this stack and -/// perform any actions as necessary. -/// -/// * A doubly linked list used **only** by the timer thread. Each slot in the -/// timer wheel is a head pointer to the list of entries that must be -/// processed during that timer tick. -#[derive(Debug)] -pub(crate) struct Entry { - /// Only accessed from `Registration`. - time: CachePadded>, - - /// Timer internals. Using a weak pointer allows the timer to shutdown - /// without all `Delay` instances having completed. - /// - /// When empty, it means that the entry has not yet been linked with a - /// timer instance. - inner: Weak, - - /// Tracks the entry state. This value contains the following information: - /// - /// * The deadline at which the entry must be "fired". - /// * A flag indicating if the entry has already been fired. - /// * Whether or not the entry transitioned to the error state. - /// - /// When an `Entry` is created, `state` is initialized to the instant at - /// which the entry must be fired. When a timer is reset to a different - /// instant, this value is changed. - state: AtomicU64, - - /// Stores the actual error. If `state` indicates that an error occurred, - /// this is guaranteed to be a non-zero value representing the first error - /// that occurred. Otherwise its value is undefined. - error: AtomicU8, - - /// Task to notify once the deadline is reached. - waker: AtomicWaker, - - /// True when the entry is queued in the "process" stack. This value - /// is set before pushing the value and unset after popping the value. - /// - /// TODO: This could possibly be rolled up into `state`. - pub(super) queued: AtomicBool, - - /// Next entry in the "process" linked list. - /// - /// Access to this field is coordinated by the `queued` flag. - /// - /// Represents a strong Arc ref. - pub(super) next_atomic: UnsafeCell<*mut Entry>, - - /// When the entry expires, relative to the `start` of the timer - /// (Inner::start). This is only used by the timer. - /// - /// A `Delay` instance can be reset to a different deadline by the thread - /// that owns the `Delay` instance. In this case, the timer thread will not - /// immediately know that this has happened. The timer thread must know the - /// last deadline that it saw as it uses this value to locate the entry in - /// its wheel. - /// - /// Once the timer thread observes that the instant has changed, it updates - /// the wheel and sets this value. The idea is that this value eventually - /// converges to the value of `state` as the timer thread makes updates. - when: UnsafeCell>, - - /// Next entry in the State's linked list. - /// - /// This is only accessed by the timer - pub(super) next_stack: UnsafeCell>>, - - /// Previous entry in the State's linked list. - /// - /// This is only accessed by the timer and is used to unlink a canceled - /// entry. - /// - /// This is a weak reference. - pub(super) prev_stack: UnsafeCell<*const Entry>, -} - -/// Stores the info for `Delay`. -#[derive(Debug)] -pub(crate) struct Time { - pub(crate) deadline: Instant, - pub(crate) duration: Duration, -} - -/// Flag indicating a timer entry has elapsed -const ELAPSED: u64 = 1 << 63; - -/// Flag indicating a timer entry has reached an error state -const ERROR: u64 = u64::MAX; - -// ===== impl Entry ===== - -impl Entry { - pub(crate) fn new(handle: &Handle, deadline: Instant, duration: Duration) -> Arc { - let inner = handle.inner().unwrap(); - let entry: Entry; - - // Increment the number of active timeouts - if let Err(err) = inner.increment() { - entry = Entry::new2(deadline, duration, Weak::new(), ERROR); - entry.error(err); - } else { - let when = inner.normalize_deadline(deadline); - let state = if when <= inner.elapsed() { - ELAPSED - } else { - when - }; - entry = Entry::new2(deadline, duration, Arc::downgrade(&inner), state); - } - - let entry = Arc::new(entry); - if let Err(err) = inner.queue(&entry) { - entry.error(err); - } - - entry - } - - /// Only called by `Registration` - pub(crate) fn time_ref(&self) -> &Time { - unsafe { &*self.time.0.get() } - } - - /// Only called by `Registration` - #[allow(clippy::mut_from_ref)] // https://github.com/rust-lang/rust-clippy/issues/4281 - pub(crate) unsafe fn time_mut(&self) -> &mut Time { - &mut *self.time.0.get() - } - - /// The current entry state as known by the timer. This is not the value of - /// `state`, but lets the timer know how to converge its state to `state`. - pub(crate) fn when_internal(&self) -> Option { - unsafe { *self.when.get() } - } - - pub(crate) fn set_when_internal(&self, when: Option) { - unsafe { - *self.when.get() = when; - } - } - - /// Called by `Timer` to load the current value of `state` for processing - pub(crate) fn load_state(&self) -> Option { - let state = self.state.load(SeqCst); - - if is_elapsed(state) { - None - } else { - Some(state) - } - } - - pub(crate) fn is_elapsed(&self) -> bool { - let state = self.state.load(SeqCst); - is_elapsed(state) - } - - pub(crate) fn fire(&self, when: u64) { - let mut curr = self.state.load(SeqCst); - - loop { - if is_elapsed(curr) || curr > when { - return; - } - - let next = ELAPSED | curr; - let actual = self.state.compare_and_swap(curr, next, SeqCst); - - if curr == actual { - break; - } - - curr = actual; - } - - self.waker.wake(); - } - - pub(crate) fn error(&self, error: Error) { - // Record the precise nature of the error, if there isn't already an - // error present. If we don't actually transition to the error state - // below, that's fine, as the error details we set here will be ignored. - self.error.compare_and_swap(0, error.as_u8(), SeqCst); - - // Only transition to the error state if not currently elapsed - let mut curr = self.state.load(SeqCst); - - loop { - if is_elapsed(curr) { - return; - } - - let next = ERROR; - - let actual = self.state.compare_and_swap(curr, next, SeqCst); - - if curr == actual { - break; - } - - curr = actual; - } - - self.waker.wake(); - } - - pub(crate) fn cancel(entry: &Arc) { - let state = entry.state.fetch_or(ELAPSED, SeqCst); - - if is_elapsed(state) { - // Nothing more to do - return; - } - - // If registered with a timer instance, try to upgrade the Arc. - let inner = match entry.upgrade_inner() { - Some(inner) => inner, - None => return, - }; - - let _ = inner.queue(entry); - } - - pub(crate) fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll> { - let mut curr = self.state.load(SeqCst); - - if is_elapsed(curr) { - return Poll::Ready(if curr == ERROR { - Err(Error::from_u8(self.error.load(SeqCst))) - } else { - Ok(()) - }); - } - - self.waker.register_by_ref(cx.waker()); - - curr = self.state.load(SeqCst); - - if is_elapsed(curr) { - return Poll::Ready(if curr == ERROR { - Err(Error::from_u8(self.error.load(SeqCst))) - } else { - Ok(()) - }); - } - - Poll::Pending - } - - /// Only called by `Registration` - pub(crate) fn reset(entry: &mut Arc) { - let inner = match entry.upgrade_inner() { - Some(inner) => inner, - None => return, - }; - - let deadline = entry.time_ref().deadline; - let when = inner.normalize_deadline(deadline); - let elapsed = inner.elapsed(); - - let next = if when <= elapsed { ELAPSED } else { when }; - - let mut curr = entry.state.load(SeqCst); - - loop { - // In these two cases, there is no work to do when resetting the - // timer. If the `Entry` is in an error state, then it cannot be - // used anymore. If resetting the entry to the current value, then - // the reset is a noop. - if curr == ERROR || curr == when { - return; - } - - let actual = entry.state.compare_and_swap(curr, next, SeqCst); - - if curr == actual { - break; - } - - curr = actual; - } - - // If the state has transitioned to 'elapsed' then wake the task as - // this entry is ready to be polled. - if !is_elapsed(curr) && is_elapsed(next) { - entry.waker.wake(); - } - - // The driver tracks all non-elapsed entries; notify the driver that it - // should update its state for this entry unless the entry had already - // elapsed and remains elapsed. - if !is_elapsed(curr) || !is_elapsed(next) { - let _ = inner.queue(entry); - } - } - - fn new2(deadline: Instant, duration: Duration, inner: Weak, state: u64) -> Self { - Self { - time: CachePadded(UnsafeCell::new(Time { deadline, duration })), - inner, - waker: AtomicWaker::new(), - state: AtomicU64::new(state), - queued: AtomicBool::new(false), - error: AtomicU8::new(0), - next_atomic: UnsafeCell::new(ptr::null_mut()), - when: UnsafeCell::new(None), - next_stack: UnsafeCell::new(None), - prev_stack: UnsafeCell::new(ptr::null_mut()), - } - } - - fn upgrade_inner(&self) -> Option> { - self.inner.upgrade() - } -} - -fn is_elapsed(state: u64) -> bool { - state & ELAPSED == ELAPSED -} - -impl Drop for Entry { - fn drop(&mut self) { - let inner = match self.upgrade_inner() { - Some(inner) => inner, - None => return, - }; - - inner.decrement(); - } -} - -unsafe impl Send for Entry {} -unsafe impl Sync for Entry {} - -#[cfg_attr(target_arch = "x86_64", repr(align(128)))] -#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] -#[derive(Debug)] -struct CachePadded(T); diff --git a/third_party/rust/tokio-0.2.25/src/time/driver/handle.rs b/third_party/rust/tokio-0.2.25/src/time/driver/handle.rs deleted file mode 100644 index 4526304cda4b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/driver/handle.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::runtime::context; -use crate::time::driver::Inner; -use std::fmt; -use std::sync::{Arc, Weak}; - -/// Handle to time driver instance. -#[derive(Clone)] -pub(crate) struct Handle { - inner: Weak, -} - -impl Handle { - /// Creates a new timer `Handle` from a shared `Inner` timer state. - pub(crate) fn new(inner: Weak) -> Self { - Handle { inner } - } - - /// Tries to get a handle to the current timer. - /// - /// # Panics - /// - /// This function panics if there is no current timer set. - pub(crate) fn current() -> Self { - context::time_handle().expect( - "there is no timer running, must be called from the context of a Tokio 0.2.x runtime", - ) - } - - /// Tries to return a strong ref to the inner - pub(crate) fn inner(&self) -> Option> { - self.inner.upgrade() - } -} - -impl fmt::Debug for Handle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Handle") - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/driver/mod.rs b/third_party/rust/tokio-0.2.25/src/time/driver/mod.rs deleted file mode 100644 index bb6c28b34de7..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/driver/mod.rs +++ /dev/null @@ -1,415 +0,0 @@ -//! Time driver - -mod atomic_stack; -use self::atomic_stack::AtomicStack; - -mod entry; -pub(super) use self::entry::Entry; - -mod handle; -pub(crate) use self::handle::Handle; - -mod registration; -pub(crate) use self::registration::Registration; - -mod stack; -use self::stack::Stack; - -use crate::loom::sync::atomic::{AtomicU64, AtomicUsize}; -use crate::park::{Park, Unpark}; -use crate::time::{wheel, Error}; -use crate::time::{Clock, Duration, Instant}; - -use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; - -use std::sync::Arc; -use std::usize; -use std::{cmp, fmt}; - -/// Time implementation that drives [`Delay`][delay], [`Interval`][interval], and [`Timeout`][timeout]. -/// -/// A `Driver` instance tracks the state necessary for managing time and -/// notifying the [`Delay`][delay] instances once their deadlines are reached. -/// -/// It is expected that a single instance manages many individual [`Delay`][delay] -/// instances. The `Driver` implementation is thread-safe and, as such, is able -/// to handle callers from across threads. -/// -/// After creating the `Driver` instance, the caller must repeatedly call `park` -/// or `park_timeout`. The time driver will perform no work unless `park` or -/// `park_timeout` is called repeatedly. -/// -/// The driver has a resolution of one millisecond. Any unit of time that falls -/// between milliseconds are rounded up to the next millisecond. -/// -/// When an instance is dropped, any outstanding [`Delay`][delay] instance that has not -/// elapsed will be notified with an error. At this point, calling `poll` on the -/// [`Delay`][delay] instance will result in panic. -/// -/// # Implementation -/// -/// The time driver is based on the [paper by Varghese and Lauck][paper]. -/// -/// A hashed timing wheel is a vector of slots, where each slot handles a time -/// slice. As time progresses, the timer walks over the slot for the current -/// instant, and processes each entry for that slot. When the timer reaches the -/// end of the wheel, it starts again at the beginning. -/// -/// The implementation maintains six wheels arranged in a set of levels. As the -/// levels go up, the slots of the associated wheel represent larger intervals -/// of time. At each level, the wheel has 64 slots. Each slot covers a range of -/// time equal to the wheel at the lower level. At level zero, each slot -/// represents one millisecond of time. -/// -/// The wheels are: -/// -/// * Level 0: 64 x 1 millisecond slots. -/// * Level 1: 64 x 64 millisecond slots. -/// * Level 2: 64 x ~4 second slots. -/// * Level 3: 64 x ~4 minute slots. -/// * Level 4: 64 x ~4 hour slots. -/// * Level 5: 64 x ~12 day slots. -/// -/// When the timer processes entries at level zero, it will notify all the -/// `Delay` instances as their deadlines have been reached. For all higher -/// levels, all entries will be redistributed across the wheel at the next level -/// down. Eventually, as time progresses, entries will [`Delay`][delay] instances will -/// either be canceled (dropped) or their associated entries will reach level -/// zero and be notified. -/// -/// [paper]: http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf -/// [delay]: crate::time::Delay -/// [timeout]: crate::time::Timeout -/// [interval]: crate::time::Interval -#[derive(Debug)] -pub(crate) struct Driver { - /// Shared state - inner: Arc, - - /// Timer wheel - wheel: wheel::Wheel, - - /// Thread parker. The `Driver` park implementation delegates to this. - park: T, - - /// Source of "now" instances - clock: Clock, - - /// True if the driver is being shutdown - is_shutdown: bool, -} - -/// Timer state shared between `Driver`, `Handle`, and `Registration`. -pub(crate) struct Inner { - /// The instant at which the timer started running. - start: Instant, - - /// The last published timer `elapsed` value. - elapsed: AtomicU64, - - /// Number of active timeouts - num: AtomicUsize, - - /// Head of the "process" linked list. - process: AtomicStack, - - /// Unparks the timer thread. - unpark: Box, -} - -/// Maximum number of timeouts the system can handle concurrently. -const MAX_TIMEOUTS: usize = usize::MAX >> 1; - -// ===== impl Driver ===== - -impl Driver -where - T: Park, -{ - /// Creates a new `Driver` instance that uses `park` to block the current - /// thread and `clock` to get the current `Instant`. - /// - /// Specifying the source of time is useful when testing. - pub(crate) fn new(park: T, clock: Clock) -> Driver { - let unpark = Box::new(park.unpark()); - - Driver { - inner: Arc::new(Inner::new(clock.now(), unpark)), - wheel: wheel::Wheel::new(), - park, - clock, - is_shutdown: false, - } - } - - /// Returns a handle to the timer. - /// - /// The `Handle` is how `Delay` instances are created. The `Delay` instances - /// can either be created directly or the `Handle` instance can be passed to - /// `with_default`, setting the timer as the default timer for the execution - /// context. - pub(crate) fn handle(&self) -> Handle { - Handle::new(Arc::downgrade(&self.inner)) - } - - /// Converts an `Expiration` to an `Instant`. - fn expiration_instant(&self, when: u64) -> Instant { - self.inner.start + Duration::from_millis(when) - } - - /// Runs timer related logic - fn process(&mut self) { - let now = crate::time::ms( - self.clock.now() - self.inner.start, - crate::time::Round::Down, - ); - let mut poll = wheel::Poll::new(now); - - while let Some(entry) = self.wheel.poll(&mut poll, &mut ()) { - let when = entry.when_internal().expect("invalid internal entry state"); - - // Fire the entry - entry.fire(when); - - // Track that the entry has been fired - entry.set_when_internal(None); - } - - // Update the elapsed cache - self.inner.elapsed.store(self.wheel.elapsed(), SeqCst); - } - - /// Processes the entry queue - /// - /// This handles adding and canceling timeouts. - fn process_queue(&mut self) { - for entry in self.inner.process.take() { - match (entry.when_internal(), entry.load_state()) { - (None, None) => { - // Nothing to do - } - (Some(_), None) => { - // Remove the entry - self.clear_entry(&entry); - } - (None, Some(when)) => { - // Queue the entry - self.add_entry(entry, when); - } - (Some(_), Some(next)) => { - self.clear_entry(&entry); - self.add_entry(entry, next); - } - } - } - } - - fn clear_entry(&mut self, entry: &Arc) { - self.wheel.remove(entry, &mut ()); - entry.set_when_internal(None); - } - - /// Fires the entry if it needs to, otherwise queue it to be processed later. - /// - /// Returns `None` if the entry was fired. - fn add_entry(&mut self, entry: Arc, when: u64) { - use crate::time::wheel::InsertError; - - entry.set_when_internal(Some(when)); - - match self.wheel.insert(when, entry, &mut ()) { - Ok(_) => {} - Err((entry, InsertError::Elapsed)) => { - // The entry's deadline has elapsed, so fire it and update the - // internal state accordingly. - entry.set_when_internal(None); - entry.fire(when); - } - Err((entry, InsertError::Invalid)) => { - // The entry's deadline is invalid, so error it and update the - // internal state accordingly. - entry.set_when_internal(None); - entry.error(Error::invalid()); - } - } - } -} - -impl Park for Driver -where - T: Park, -{ - type Unpark = T::Unpark; - type Error = T::Error; - - fn unpark(&self) -> Self::Unpark { - self.park.unpark() - } - - fn park(&mut self) -> Result<(), Self::Error> { - self.process_queue(); - - match self.wheel.poll_at() { - Some(when) => { - let now = self.clock.now(); - let deadline = self.expiration_instant(when); - - if deadline > now { - let dur = deadline - now; - - if self.clock.is_paused() { - self.park.park_timeout(Duration::from_secs(0))?; - self.clock.advance(dur); - } else { - self.park.park_timeout(dur)?; - } - } else { - self.park.park_timeout(Duration::from_secs(0))?; - } - } - None => { - self.park.park()?; - } - } - - self.process(); - - Ok(()) - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.process_queue(); - - match self.wheel.poll_at() { - Some(when) => { - let now = self.clock.now(); - let deadline = self.expiration_instant(when); - - if deadline > now { - let duration = cmp::min(deadline - now, duration); - - if self.clock.is_paused() { - self.park.park_timeout(Duration::from_secs(0))?; - self.clock.advance(duration); - } else { - self.park.park_timeout(duration)?; - } - } else { - self.park.park_timeout(Duration::from_secs(0))?; - } - } - None => { - self.park.park_timeout(duration)?; - } - } - - self.process(); - - Ok(()) - } - - fn shutdown(&mut self) { - if self.is_shutdown { - return; - } - - use std::u64; - - // Shutdown the stack of entries to process, preventing any new entries - // from being pushed. - self.inner.process.shutdown(); - - // Clear the wheel, using u64::MAX allows us to drain everything - let mut poll = wheel::Poll::new(u64::MAX); - - while let Some(entry) = self.wheel.poll(&mut poll, &mut ()) { - entry.error(Error::shutdown()); - } - - self.park.shutdown(); - - self.is_shutdown = true; - } -} - -impl Drop for Driver -where - T: Park, -{ - fn drop(&mut self) { - self.shutdown(); - } -} - -// ===== impl Inner ===== - -impl Inner { - fn new(start: Instant, unpark: Box) -> Inner { - Inner { - num: AtomicUsize::new(0), - elapsed: AtomicU64::new(0), - process: AtomicStack::new(), - start, - unpark, - } - } - - fn elapsed(&self) -> u64 { - self.elapsed.load(SeqCst) - } - - #[cfg(all(test, loom))] - fn num(&self, ordering: std::sync::atomic::Ordering) -> usize { - self.num.load(ordering) - } - - /// Increments the number of active timeouts - fn increment(&self) -> Result<(), Error> { - let mut curr = self.num.load(Relaxed); - loop { - if curr == MAX_TIMEOUTS { - return Err(Error::at_capacity()); - } - - match self - .num - .compare_exchange_weak(curr, curr + 1, Release, Relaxed) - { - Ok(_) => return Ok(()), - Err(next) => curr = next, - } - } - } - - /// Decrements the number of active timeouts - fn decrement(&self) { - let prev = self.num.fetch_sub(1, Acquire); - debug_assert!(prev <= MAX_TIMEOUTS); - } - - fn queue(&self, entry: &Arc) -> Result<(), Error> { - if self.process.push(entry)? { - // The timer is notified so that it can process the timeout - self.unpark.unpark(); - } - - Ok(()) - } - - fn normalize_deadline(&self, deadline: Instant) -> u64 { - if deadline < self.start { - return 0; - } - - crate::time::ms(deadline - self.start, crate::time::Round::Up) - } -} - -impl fmt::Debug for Inner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Inner").finish() - } -} - -#[cfg(all(test, loom))] -mod tests; diff --git a/third_party/rust/tokio-0.2.25/src/time/driver/registration.rs b/third_party/rust/tokio-0.2.25/src/time/driver/registration.rs deleted file mode 100644 index 3a0b34501b0b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/driver/registration.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::time::driver::{Entry, Handle}; -use crate::time::{Duration, Error, Instant}; - -use std::sync::Arc; -use std::task::{self, Poll}; - -/// Registration with a timer. -/// -/// The association between a `Delay` instance and a timer is done lazily in -/// `poll` -#[derive(Debug)] -pub(crate) struct Registration { - entry: Arc, -} - -impl Registration { - pub(crate) fn new(deadline: Instant, duration: Duration) -> Registration { - let handle = Handle::current(); - - Registration { - entry: Entry::new(&handle, deadline, duration), - } - } - - pub(crate) fn deadline(&self) -> Instant { - self.entry.time_ref().deadline - } - - pub(crate) fn reset(&mut self, deadline: Instant) { - unsafe { - self.entry.time_mut().deadline = deadline; - } - - Entry::reset(&mut self.entry); - } - - pub(crate) fn is_elapsed(&self) -> bool { - self.entry.is_elapsed() - } - - pub(crate) fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll> { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - self.entry.poll_elapsed(cx).map(move |r| { - coop.made_progress(); - r - }) - } -} - -impl Drop for Registration { - fn drop(&mut self) { - Entry::cancel(&self.entry); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/driver/stack.rs b/third_party/rust/tokio-0.2.25/src/time/driver/stack.rs deleted file mode 100644 index 3e2924f26537..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/driver/stack.rs +++ /dev/null @@ -1,121 +0,0 @@ -use crate::time::driver::Entry; -use crate::time::wheel; - -use std::ptr; -use std::sync::Arc; - -/// A doubly linked stack -#[derive(Debug)] -pub(crate) struct Stack { - head: Option>, -} - -impl Default for Stack { - fn default() -> Stack { - Stack { head: None } - } -} - -impl wheel::Stack for Stack { - type Owned = Arc; - type Borrowed = Entry; - type Store = (); - - fn is_empty(&self) -> bool { - self.head.is_none() - } - - fn push(&mut self, entry: Self::Owned, _: &mut Self::Store) { - // Get a pointer to the entry to for the prev link - let ptr: *const Entry = &*entry as *const _; - - // Remove the old head entry - let old = self.head.take(); - - unsafe { - // Ensure the entry is not already in a stack. - debug_assert!((*entry.next_stack.get()).is_none()); - debug_assert!((*entry.prev_stack.get()).is_null()); - - if let Some(ref entry) = old.as_ref() { - debug_assert!({ - // The head is not already set to the entry - ptr != &***entry as *const _ - }); - - // Set the previous link on the old head - *entry.prev_stack.get() = ptr; - } - - // Set this entry's next pointer - *entry.next_stack.get() = old; - } - - // Update the head pointer - self.head = Some(entry); - } - - /// Pops an item from the stack - fn pop(&mut self, _: &mut ()) -> Option> { - let entry = self.head.take(); - - unsafe { - if let Some(entry) = entry.as_ref() { - self.head = (*entry.next_stack.get()).take(); - - if let Some(entry) = self.head.as_ref() { - *entry.prev_stack.get() = ptr::null(); - } - - *entry.prev_stack.get() = ptr::null(); - } - } - - entry - } - - fn remove(&mut self, entry: &Entry, _: &mut ()) { - unsafe { - // Ensure that the entry is in fact contained by the stack - debug_assert!({ - // This walks the full linked list even if an entry is found. - let mut next = self.head.as_ref(); - let mut contains = false; - - while let Some(n) = next { - if entry as *const _ == &**n as *const _ { - debug_assert!(!contains); - contains = true; - } - - next = (*n.next_stack.get()).as_ref(); - } - - contains - }); - - // Unlink `entry` from the next node - let next = (*entry.next_stack.get()).take(); - - if let Some(next) = next.as_ref() { - (*next.prev_stack.get()) = *entry.prev_stack.get(); - } - - // Unlink `entry` from the prev node - - if let Some(prev) = (*entry.prev_stack.get()).as_ref() { - *prev.next_stack.get() = next; - } else { - // It is the head - self.head = next; - } - - // Unset the prev pointer - *entry.prev_stack.get() = ptr::null(); - } - } - - fn when(item: &Entry, _: &()) -> u64 { - item.when_internal().expect("invalid internal state") - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/driver/tests/mod.rs b/third_party/rust/tokio-0.2.25/src/time/driver/tests/mod.rs deleted file mode 100644 index 88ff5525dab8..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/driver/tests/mod.rs +++ /dev/null @@ -1,55 +0,0 @@ -use crate::park::Unpark; -use crate::time::driver::Inner; -use crate::time::Instant; - -use loom::thread; - -use std::sync::atomic::Ordering; -use std::sync::Arc; - -struct MockUnpark; - -impl Unpark for MockUnpark { - fn unpark(&self) {} -} - -#[test] -fn balanced_incr_and_decr() { - const OPS: usize = 5; - - fn incr(inner: Arc) { - for _ in 0..OPS { - inner.increment().expect("increment should not have failed"); - thread::yield_now(); - } - } - - fn decr(inner: Arc) { - let mut ops_performed = 0; - while ops_performed < OPS { - if inner.num(Ordering::Relaxed) > 0 { - ops_performed += 1; - inner.decrement(); - } - thread::yield_now(); - } - } - - loom::model(|| { - let unpark = Box::new(MockUnpark); - let instant = Instant::now(); - - let inner = Arc::new(Inner::new(instant, unpark)); - - let incr_inner = inner.clone(); - let decr_inner = inner.clone(); - - let incr_hndle = thread::spawn(move || incr(incr_inner)); - let decr_hndle = thread::spawn(move || decr(decr_inner)); - - incr_hndle.join().expect("should never fail"); - decr_hndle.join().expect("should never fail"); - - assert_eq!(inner.num(Ordering::SeqCst), 0); - }) -} diff --git a/third_party/rust/tokio-0.2.25/src/time/error.rs b/third_party/rust/tokio-0.2.25/src/time/error.rs deleted file mode 100644 index 2f93d67115b5..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/error.rs +++ /dev/null @@ -1,101 +0,0 @@ -use self::Kind::*; -use std::error; -use std::fmt; - -/// Errors encountered by the timer implementation. -/// -/// Currently, there are two different errors that can occur: -/// -/// * `shutdown` occurs when a timer operation is attempted, but the timer -/// instance has been dropped. In this case, the operation will never be able -/// to complete and the `shutdown` error is returned. This is a permanent -/// error, i.e., once this error is observed, timer operations will never -/// succeed in the future. -/// -/// * `at_capacity` occurs when a timer operation is attempted, but the timer -/// instance is currently handling its maximum number of outstanding delays. -/// In this case, the operation is not able to be performed at the current -/// moment, and `at_capacity` is returned. This is a transient error, i.e., at -/// some point in the future, if the operation is attempted again, it might -/// succeed. Callers that observe this error should attempt to [shed load]. One -/// way to do this would be dropping the future that issued the timer operation. -/// -/// [shed load]: https://en.wikipedia.org/wiki/Load_Shedding -#[derive(Debug)] -pub struct Error(Kind); - -#[derive(Debug, Clone, Copy)] -#[repr(u8)] -enum Kind { - Shutdown = 1, - AtCapacity = 2, - Invalid = 3, -} - -impl Error { - /// Creates an error representing a shutdown timer. - pub fn shutdown() -> Error { - Error(Shutdown) - } - - /// Returns `true` if the error was caused by the timer being shutdown. - pub fn is_shutdown(&self) -> bool { - match self.0 { - Kind::Shutdown => true, - _ => false, - } - } - - /// Creates an error representing a timer at capacity. - pub fn at_capacity() -> Error { - Error(AtCapacity) - } - - /// Returns `true` if the error was caused by the timer being at capacity. - pub fn is_at_capacity(&self) -> bool { - match self.0 { - Kind::AtCapacity => true, - _ => false, - } - } - - /// Create an error representing a misconfigured timer. - pub fn invalid() -> Error { - Error(Invalid) - } - - /// Returns `true` if the error was caused by the timer being misconfigured. - pub fn is_invalid(&self) -> bool { - match self.0 { - Kind::Invalid => true, - _ => false, - } - } - - pub(crate) fn as_u8(&self) -> u8 { - self.0 as u8 - } - - pub(crate) fn from_u8(n: u8) -> Self { - Error(match n { - 1 => Shutdown, - 2 => AtCapacity, - 3 => Invalid, - _ => panic!("u8 does not correspond to any time error variant"), - }) - } -} - -impl error::Error for Error {} - -impl fmt::Display for Error { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use self::Kind::*; - let descr = match self.0 { - Shutdown => "the timer is shutdown, must be called from the context of Tokio runtime", - AtCapacity => "timer is at capacity and cannot create a new entry", - Invalid => "timer duration exceeds maximum duration", - }; - write!(fmt, "{}", descr) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/instant.rs b/third_party/rust/tokio-0.2.25/src/time/instant.rs deleted file mode 100644 index f2cb4bc97db3..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/instant.rs +++ /dev/null @@ -1,199 +0,0 @@ -#![allow(clippy::trivially_copy_pass_by_ref)] - -use std::fmt; -use std::ops; -use std::time::Duration; - -/// A measurement of the system clock, useful for talking to -/// external entities like the file system or other processes. -#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] -pub struct Instant { - std: std::time::Instant, -} - -impl Instant { - /// Returns an instant corresponding to "now". - /// - /// # Examples - /// - /// ``` - /// use tokio::time::Instant; - /// - /// let now = Instant::now(); - /// ``` - pub fn now() -> Instant { - variant::now() - } - - /// Create a `tokio::time::Instant` from a `std::time::Instant`. - pub fn from_std(std: std::time::Instant) -> Instant { - Instant { std } - } - - /// Convert the value into a `std::time::Instant`. - pub fn into_std(self) -> std::time::Instant { - self.std - } - - /// Returns the amount of time elapsed from another instant to this one. - /// - /// # Panics - /// - /// This function will panic if `earlier` is later than `self`. - pub fn duration_since(&self, earlier: Instant) -> Duration { - self.std.duration_since(earlier.std) - } - - /// Returns the amount of time elapsed from another instant to this one, or - /// None if that instant is later than this one. - /// - /// # Examples - /// - /// ``` - /// use tokio::time::{Duration, Instant, delay_for}; - /// - /// #[tokio::main] - /// async fn main() { - /// let now = Instant::now(); - /// delay_for(Duration::new(1, 0)).await; - /// let new_now = Instant::now(); - /// println!("{:?}", new_now.checked_duration_since(now)); - /// println!("{:?}", now.checked_duration_since(new_now)); // None - /// } - /// ``` - pub fn checked_duration_since(&self, earlier: Instant) -> Option { - self.std.checked_duration_since(earlier.std) - } - - /// Returns the amount of time elapsed from another instant to this one, or - /// zero duration if that instant is earlier than this one. - /// - /// # Examples - /// - /// ``` - /// use tokio::time::{Duration, Instant, delay_for}; - /// - /// #[tokio::main] - /// async fn main() { - /// let now = Instant::now(); - /// delay_for(Duration::new(1, 0)).await; - /// let new_now = Instant::now(); - /// println!("{:?}", new_now.saturating_duration_since(now)); - /// println!("{:?}", now.saturating_duration_since(new_now)); // 0ns - /// } - /// ``` - pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { - self.std.saturating_duration_since(earlier.std) - } - - /// Returns the amount of time elapsed since this instant was created. - /// - /// # Panics - /// - /// This function may panic if the current time is earlier than this - /// instant, which is something that can happen if an `Instant` is - /// produced synthetically. - /// - /// # Examples - /// - /// ``` - /// use tokio::time::{Duration, Instant, delay_for}; - /// - /// #[tokio::main] - /// async fn main() { - /// let instant = Instant::now(); - /// let three_secs = Duration::from_secs(3); - /// delay_for(three_secs).await; - /// assert!(instant.elapsed() >= three_secs); - /// } - /// ``` - pub fn elapsed(&self) -> Duration { - Instant::now() - *self - } - - /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be - /// represented as `Instant` (which means it's inside the bounds of the - /// underlying data structure), `None` otherwise. - pub fn checked_add(&self, duration: Duration) -> Option { - self.std.checked_add(duration).map(Instant::from_std) - } - - /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be - /// represented as `Instant` (which means it's inside the bounds of the - /// underlying data structure), `None` otherwise. - pub fn checked_sub(&self, duration: Duration) -> Option { - self.std.checked_sub(duration).map(Instant::from_std) - } -} - -impl From for Instant { - fn from(time: std::time::Instant) -> Instant { - Instant::from_std(time) - } -} - -impl From for std::time::Instant { - fn from(time: Instant) -> std::time::Instant { - time.into_std() - } -} - -impl ops::Add for Instant { - type Output = Instant; - - fn add(self, other: Duration) -> Instant { - Instant::from_std(self.std + other) - } -} - -impl ops::AddAssign for Instant { - fn add_assign(&mut self, rhs: Duration) { - *self = *self + rhs; - } -} - -impl ops::Sub for Instant { - type Output = Duration; - - fn sub(self, rhs: Instant) -> Duration { - self.std - rhs.std - } -} - -impl ops::Sub for Instant { - type Output = Instant; - - fn sub(self, rhs: Duration) -> Instant { - Instant::from_std(self.std - rhs) - } -} - -impl ops::SubAssign for Instant { - fn sub_assign(&mut self, rhs: Duration) { - *self = *self - rhs; - } -} - -impl fmt::Debug for Instant { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.std.fmt(fmt) - } -} - -#[cfg(not(feature = "test-util"))] -mod variant { - use super::Instant; - - pub(super) fn now() -> Instant { - Instant::from_std(std::time::Instant::now()) - } -} - -#[cfg(feature = "test-util")] -mod variant { - use super::Instant; - - pub(super) fn now() -> Instant { - crate::time::clock::now() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/interval.rs b/third_party/rust/tokio-0.2.25/src/time/interval.rs deleted file mode 100644 index 6a97f59b5aa6..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/interval.rs +++ /dev/null @@ -1,175 +0,0 @@ -use crate::future::poll_fn; -use crate::time::{delay_until, Delay, Duration, Instant}; - -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Creates new `Interval` that yields with interval of `duration`. The first -/// tick completes immediately. -/// -/// An interval will tick indefinitely. At any time, the `Interval` value can be -/// dropped. This cancels the interval. -/// -/// This function is equivalent to `interval_at(Instant::now(), period)`. -/// -/// # Panics -/// -/// This function panics if `period` is zero. -/// -/// # Examples -/// -/// ``` -/// use tokio::time::{self, Duration}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut interval = time::interval(Duration::from_millis(10)); -/// -/// interval.tick().await; -/// interval.tick().await; -/// interval.tick().await; -/// -/// // approximately 20ms have elapsed. -/// } -/// ``` -/// -/// A simple example using `interval` to execute a task every two seconds. -/// -/// The difference between `interval` and [`delay_for`] is that an `interval` -/// measures the time since the last tick, which means that `.tick().await` -/// may wait for a shorter time than the duration specified for the interval -/// if some time has passed between calls to `.tick().await`. -/// -/// If the tick in the example below was replaced with [`delay_for`], the task -/// would only be executed once every three seconds, and not every two -/// seconds. -/// -/// ``` -/// use tokio::time; -/// -/// async fn task_that_takes_a_second() { -/// println!("hello"); -/// time::delay_for(time::Duration::from_secs(1)).await -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut interval = time::interval(time::Duration::from_secs(2)); -/// for _i in 0..5 { -/// interval.tick().await; -/// task_that_takes_a_second().await; -/// } -/// } -/// ``` -/// -/// [`delay_for`]: crate::time::delay_for() -pub fn interval(period: Duration) -> Interval { - assert!(period > Duration::new(0, 0), "`period` must be non-zero."); - - interval_at(Instant::now(), period) -} - -/// Creates new `Interval` that yields with interval of `period` with the -/// first tick completing at `start`. -/// -/// An interval will tick indefinitely. At any time, the `Interval` value can be -/// dropped. This cancels the interval. -/// -/// # Panics -/// -/// This function panics if `period` is zero. -/// -/// # Examples -/// -/// ``` -/// use tokio::time::{interval_at, Duration, Instant}; -/// -/// #[tokio::main] -/// async fn main() { -/// let start = Instant::now() + Duration::from_millis(50); -/// let mut interval = interval_at(start, Duration::from_millis(10)); -/// -/// interval.tick().await; -/// interval.tick().await; -/// interval.tick().await; -/// -/// // approximately 70ms have elapsed. -/// } -/// ``` -pub fn interval_at(start: Instant, period: Duration) -> Interval { - assert!(period > Duration::new(0, 0), "`period` must be non-zero."); - - Interval { - delay: delay_until(start), - period, - } -} - -/// Stream returned by [`interval`](interval) and [`interval_at`](interval_at). -/// -/// This type only implements the [`Stream`] trait if the "stream" feature is -/// enabled. -/// -/// [`Stream`]: trait@crate::stream::Stream -#[derive(Debug)] -pub struct Interval { - /// Future that completes the next time the `Interval` yields a value. - delay: Delay, - - /// The duration between values yielded by `Interval`. - period: Duration, -} - -impl Interval { - #[doc(hidden)] // TODO: document - pub fn poll_tick(&mut self, cx: &mut Context<'_>) -> Poll { - // Wait for the delay to be done - ready!(Pin::new(&mut self.delay).poll(cx)); - - // Get the `now` by looking at the `delay` deadline - let now = self.delay.deadline(); - - // The next interval value is `duration` after the one that just - // yielded. - let next = now + self.period; - self.delay.reset(next); - - // Return the current instant - Poll::Ready(now) - } - - /// Completes when the next instant in the interval has been reached. - /// - /// # Examples - /// - /// ``` - /// use tokio::time; - /// - /// use std::time::Duration; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut interval = time::interval(Duration::from_millis(10)); - /// - /// interval.tick().await; - /// interval.tick().await; - /// interval.tick().await; - /// - /// // approximately 20ms have elapsed. - /// } - /// ``` - #[allow(clippy::should_implement_trait)] // TODO: rename (tokio-rs/tokio#1261) - pub async fn tick(&mut self) -> Instant { - poll_fn(|cx| self.poll_tick(cx)).await - } -} - -#[cfg(feature = "stream")] -impl crate::stream::Stream for Interval { - type Item = Instant; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Some(ready!(self.poll_tick(cx)))) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/time/mod.rs b/third_party/rust/tokio-0.2.25/src/time/mod.rs deleted file mode 100644 index c532b2c175f6..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/mod.rs +++ /dev/null @@ -1,162 +0,0 @@ -//! Utilities for tracking time. -//! -//! This module provides a number of types for executing code after a set period -//! of time. -//! -//! * `Delay` is a future that does no work and completes at a specific `Instant` -//! in time. -//! -//! * `Interval` is a stream yielding a value at a fixed period. It is -//! initialized with a `Duration` and repeatedly yields each time the duration -//! elapses. -//! -//! * `Timeout`: Wraps a future or stream, setting an upper bound to the amount -//! of time it is allowed to execute. If the future or stream does not -//! complete in time, then it is canceled and an error is returned. -//! -//! * `DelayQueue`: A queue where items are returned once the requested delay -//! has expired. -//! -//! These types are sufficient for handling a large number of scenarios -//! involving time. -//! -//! These types must be used from within the context of the `Runtime`. -//! -//! # Examples -//! -//! Wait 100ms and print "100 ms have elapsed" -//! -//! ``` -//! use tokio::time::delay_for; -//! -//! use std::time::Duration; -//! -//! -//! #[tokio::main] -//! async fn main() { -//! delay_for(Duration::from_millis(100)).await; -//! println!("100 ms have elapsed"); -//! } -//! ``` -//! -//! Require that an operation takes no more than 300ms. Note that this uses the -//! `timeout` function on the `FutureExt` trait. This trait is included in the -//! prelude. -//! -//! ``` -//! use tokio::time::{timeout, Duration}; -//! -//! async fn long_future() { -//! // do work here -//! } -//! -//! # async fn dox() { -//! let res = timeout(Duration::from_secs(1), long_future()).await; -//! -//! if res.is_err() { -//! println!("operation timed out"); -//! } -//! # } -//! ``` -//! -//! A simple example using [`interval`] to execute a task every two seconds. -//! -//! The difference between [`interval`] and [`delay_for`] is that an -//! [`interval`] measures the time since the last tick, which means that -//! `.tick().await` may wait for a shorter time than the duration specified -//! for the interval if some time has passed between calls to `.tick().await`. -//! -//! If the tick in the example below was replaced with [`delay_for`], the task -//! would only be executed once every three seconds, and not every two -//! seconds. -//! -//! ``` -//! use tokio::time; -//! -//! async fn task_that_takes_a_second() { -//! println!("hello"); -//! time::delay_for(time::Duration::from_secs(1)).await -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let mut interval = time::interval(time::Duration::from_secs(2)); -//! for _i in 0..5 { -//! interval.tick().await; -//! task_that_takes_a_second().await; -//! } -//! } -//! ``` -//! -//! [`delay_for`]: crate::time::delay_for() -//! [`interval`]: crate::time::interval() - -mod clock; -pub(crate) use self::clock::Clock; -#[cfg(feature = "test-util")] -pub use clock::{advance, pause, resume}; - -pub mod delay_queue; -#[doc(inline)] -pub use delay_queue::DelayQueue; - -mod delay; -pub use delay::{delay_for, delay_until, Delay}; - -pub(crate) mod driver; - -mod error; -pub use error::Error; - -mod instant; -pub use self::instant::Instant; - -mod interval; -pub use interval::{interval, interval_at, Interval}; - -mod timeout; -#[doc(inline)] -pub use timeout::{timeout, timeout_at, Elapsed, Timeout}; - -cfg_stream! { - mod throttle; - pub use throttle::{throttle, Throttle}; -} - -mod wheel; - -#[cfg(test)] -#[cfg(not(loom))] -mod tests; - -// Re-export for convenience -pub use std::time::Duration; - -// ===== Internal utils ===== - -enum Round { - Up, - Down, -} - -/// Convert a `Duration` to milliseconds, rounding up and saturating at -/// `u64::MAX`. -/// -/// The saturating is fine because `u64::MAX` milliseconds are still many -/// million years. -#[inline] -fn ms(duration: Duration, round: Round) -> u64 { - const NANOS_PER_MILLI: u32 = 1_000_000; - const MILLIS_PER_SEC: u64 = 1_000; - - // Round up. - let millis = match round { - Round::Up => (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI, - Round::Down => duration.subsec_millis(), - }; - - duration - .as_secs() - .saturating_mul(MILLIS_PER_SEC) - .saturating_add(u64::from(millis)) -} diff --git a/third_party/rust/tokio-0.2.25/src/time/tests/mod.rs b/third_party/rust/tokio-0.2.25/src/time/tests/mod.rs deleted file mode 100644 index 4710d470f783..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/tests/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -mod test_delay; - -use crate::time::{self, Instant}; -use std::time::Duration; - -fn assert_send() {} -fn assert_sync() {} - -#[test] -fn registration_is_send_and_sync() { - use crate::time::driver::Registration; - - assert_send::(); - assert_sync::(); -} - -#[test] -#[should_panic] -fn delay_is_eager() { - let when = Instant::now() + Duration::from_millis(100); - let _ = time::delay_until(when); -} diff --git a/third_party/rust/tokio-0.2.25/src/time/tests/test_delay.rs b/third_party/rust/tokio-0.2.25/src/time/tests/test_delay.rs deleted file mode 100644 index b732e4584e88..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/tests/test_delay.rs +++ /dev/null @@ -1,449 +0,0 @@ -use crate::park::{Park, Unpark}; -use crate::time::driver::{Driver, Entry, Handle}; -use crate::time::Clock; -use crate::time::{Duration, Instant}; - -use tokio_test::task; -use tokio_test::{assert_ok, assert_pending, assert_ready_ok}; - -use std::sync::Arc; - -macro_rules! poll { - ($e:expr) => { - $e.enter(|cx, e| e.poll_elapsed(cx)) - }; -} - -#[test] -fn frozen_utility_returns_correct_advanced_duration() { - let clock = Clock::new(); - clock.pause(); - let start = clock.now(); - - clock.advance(ms(10)); - assert_eq!(clock.now() - start, ms(10)); -} - -#[test] -fn immediate_delay() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - let when = clock.now(); - let mut e = task::spawn(delay_until(&handle, when)); - - assert_ready_ok!(poll!(e)); - - assert_ok!(driver.park_timeout(Duration::from_millis(1000))); - - // The time has not advanced. The `turn` completed immediately. - assert_eq!(clock.now() - start, ms(1000)); -} - -#[test] -fn delayed_delay_level_0() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - for &i in &[1, 10, 60] { - // Create a `Delay` that elapses in the future - let mut e = task::spawn(delay_until(&handle, start + ms(i))); - - // The delay has not elapsed. - assert_pending!(poll!(e)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(i)); - - assert_ready_ok!(poll!(e)); - } -} - -#[test] -fn sub_ms_delayed_delay() { - let (mut driver, clock, handle) = setup(); - - for _ in 0..5 { - let deadline = clock.now() + ms(1) + Duration::new(0, 1); - - let mut e = task::spawn(delay_until(&handle, deadline)); - - assert_pending!(poll!(e)); - - assert_ok!(driver.park()); - assert_ready_ok!(poll!(e)); - - assert!(clock.now() >= deadline); - - clock.advance(Duration::new(0, 1)); - } -} - -#[test] -fn delayed_delay_wrapping_level_0() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - assert_ok!(driver.park_timeout(ms(5))); - assert_eq!(clock.now() - start, ms(5)); - - let mut e = task::spawn(delay_until(&handle, clock.now() + ms(60))); - - assert_pending!(poll!(e)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(64)); - assert_pending!(poll!(e)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(65)); - - assert_ready_ok!(poll!(e)); -} - -#[test] -fn timer_wrapping_with_higher_levels() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Set delay to hit level 1 - let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(64))); - assert_pending!(poll!(e1)); - - // Turn a bit - assert_ok!(driver.park_timeout(ms(5))); - - // Set timeout such that it will hit level 0, but wrap - let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(60))); - assert_pending!(poll!(e2)); - - // This should result in s1 firing - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(64)); - - assert_ready_ok!(poll!(e1)); - assert_pending!(poll!(e2)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(65)); - - assert_ready_ok!(poll!(e1)); -} - -#[test] -fn delay_with_deadline_in_past() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create `Delay` that elapsed immediately. - let mut e = task::spawn(delay_until(&handle, clock.now() - ms(100))); - - // Even though the delay expires in the past, it is not ready yet - // because the timer must observe it. - assert_ready_ok!(poll!(e)); - - // Turn the timer, it runs for the elapsed time - assert_ok!(driver.park_timeout(ms(1000))); - - // The time has not advanced. The `turn` completed immediately. - assert_eq!(clock.now() - start, ms(1000)); -} - -#[test] -fn delayed_delay_level_1() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Delay` that elapses in the future - let mut e = task::spawn(delay_until(&handle, clock.now() + ms(234))); - - // The delay has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer, this will wake up to cascade the timer down. - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(192)); - - // The delay has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer again - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(234)); - - // The delay has elapsed. - assert_ready_ok!(poll!(e)); - - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Delay` that elapses in the future - let mut e = task::spawn(delay_until(&handle, clock.now() + ms(234))); - - // The delay has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer with a smaller timeout than the cascade. - assert_ok!(driver.park_timeout(ms(100))); - assert_eq!(clock.now() - start, ms(100)); - - assert_pending!(poll!(e)); - - // Turn the timer, this will wake up to cascade the timer down. - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(192)); - - // The delay has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer again - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(234)); - - // The delay has elapsed. - assert_ready_ok!(poll!(e)); -} - -#[test] -fn concurrently_set_two_timers_second_one_shorter() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(500))); - let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(200))); - - // The delay has not elapsed - assert_pending!(poll!(e1)); - assert_pending!(poll!(e2)); - - // Delay until a cascade - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(192)); - - // Delay until the second timer. - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(200)); - - // The shorter delay fires - assert_ready_ok!(poll!(e2)); - assert_pending!(poll!(e1)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(448)); - - assert_pending!(poll!(e1)); - - // Turn again, this time the time will advance to the second delay - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(500)); - - assert_ready_ok!(poll!(e1)); -} - -#[test] -fn short_delay() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Delay` that elapses in the future - let mut e = task::spawn(delay_until(&handle, clock.now() + ms(1))); - - // The delay has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer, but not enough time will go by. - assert_ok!(driver.park()); - - // The delay has elapsed. - assert_ready_ok!(poll!(e)); - - // The time has advanced to the point of the delay elapsing. - assert_eq!(clock.now() - start, ms(1)); -} - -#[test] -fn sorta_long_delay_until() { - const MIN_5: u64 = 5 * 60 * 1000; - - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Delay` that elapses in the future - let mut e = task::spawn(delay_until(&handle, clock.now() + ms(MIN_5))); - - // The delay has not elapsed. - assert_pending!(poll!(e)); - - let cascades = &[262_144, 262_144 + 9 * 4096, 262_144 + 9 * 4096 + 15 * 64]; - - for &elapsed in cascades { - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(elapsed)); - - assert_pending!(poll!(e)); - } - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(MIN_5)); - - // The delay has elapsed. - assert_ready_ok!(poll!(e)); -} - -#[test] -fn very_long_delay() { - const MO_5: u64 = 5 * 30 * 24 * 60 * 60 * 1000; - - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Delay` that elapses in the future - let mut e = task::spawn(delay_until(&handle, clock.now() + ms(MO_5))); - - // The delay has not elapsed. - assert_pending!(poll!(e)); - - let cascades = &[ - 12_884_901_888, - 12_952_010_752, - 12_959_875_072, - 12_959_997_952, - ]; - - for &elapsed in cascades { - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(elapsed)); - - assert_pending!(poll!(e)); - } - - // Turn the timer, but not enough time will go by. - assert_ok!(driver.park()); - - // The time has advanced to the point of the delay elapsing. - assert_eq!(clock.now() - start, ms(MO_5)); - - // The delay has elapsed. - assert_ready_ok!(poll!(e)); -} - -#[test] -fn unpark_is_delayed() { - // A special park that will take much longer than the requested duration - struct MockPark(Clock); - - struct MockUnpark; - - impl Park for MockPark { - type Unpark = MockUnpark; - type Error = (); - - fn unpark(&self) -> Self::Unpark { - MockUnpark - } - - fn park(&mut self) -> Result<(), Self::Error> { - panic!("parking forever"); - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - assert_eq!(duration, ms(0)); - self.0.advance(ms(436)); - Ok(()) - } - - fn shutdown(&mut self) {} - } - - impl Unpark for MockUnpark { - fn unpark(&self) {} - } - - let clock = Clock::new(); - clock.pause(); - let start = clock.now(); - let mut driver = Driver::new(MockPark(clock.clone()), clock.clone()); - let handle = driver.handle(); - - let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(100))); - let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(101))); - let mut e3 = task::spawn(delay_until(&handle, clock.now() + ms(200))); - - assert_pending!(poll!(e1)); - assert_pending!(poll!(e2)); - assert_pending!(poll!(e3)); - - assert_ok!(driver.park()); - - assert_eq!(clock.now() - start, ms(500)); - - assert_ready_ok!(poll!(e1)); - assert_ready_ok!(poll!(e2)); - assert_ready_ok!(poll!(e3)); -} - -#[test] -fn set_timeout_at_deadline_greater_than_max_timer() { - const YR_1: u64 = 365 * 24 * 60 * 60 * 1000; - const YR_5: u64 = 5 * YR_1; - - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - for _ in 0..5 { - assert_ok!(driver.park_timeout(ms(YR_1))); - } - - let mut e = task::spawn(delay_until(&handle, clock.now() + ms(1))); - assert_pending!(poll!(e)); - - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(YR_5) + ms(1)); - - assert_ready_ok!(poll!(e)); -} - -fn setup() -> (Driver, Clock, Handle) { - let clock = Clock::new(); - clock.pause(); - let driver = Driver::new(MockPark(clock.clone()), clock.clone()); - let handle = driver.handle(); - - (driver, clock, handle) -} - -fn delay_until(handle: &Handle, when: Instant) -> Arc { - Entry::new(&handle, when, ms(0)) -} - -struct MockPark(Clock); - -struct MockUnpark; - -impl Park for MockPark { - type Unpark = MockUnpark; - type Error = (); - - fn unpark(&self) -> Self::Unpark { - MockUnpark - } - - fn park(&mut self) -> Result<(), Self::Error> { - panic!("parking forever"); - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.0.advance(duration); - Ok(()) - } - - fn shutdown(&mut self) {} -} - -impl Unpark for MockUnpark { - fn unpark(&self) {} -} - -fn ms(n: u64) -> Duration { - Duration::from_millis(n) -} diff --git a/third_party/rust/tokio-0.2.25/src/time/timeout.rs b/third_party/rust/tokio-0.2.25/src/time/timeout.rs deleted file mode 100644 index efc3dc5c069f..000000000000 --- a/third_party/rust/tokio-0.2.25/src/time/timeout.rs +++ /dev/null @@ -1,184 +0,0 @@ -//! Allows a future to execute for a maximum amount of time. -//! -//! See [`Timeout`] documentation for more details. -//! -//! [`Timeout`]: struct@Timeout - -use crate::time::{delay_until, Delay, Duration, Instant}; - -use pin_project_lite::pin_project; -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::task::{self, Poll}; - -/// Require a `Future` to complete before the specified duration has elapsed. -/// -/// If the future completes before the duration has elapsed, then the completed -/// value is returned. Otherwise, an error is returned and the future is -/// canceled. -/// -/// # Cancelation -/// -/// Cancelling a timeout is done by dropping the future. No additional cleanup -/// or other work is required. -/// -/// The original future may be obtained by calling [`Timeout::into_inner`]. This -/// consumes the `Timeout`. -/// -/// # Examples -/// -/// Create a new `Timeout` set to expire in 10 milliseconds. -/// -/// ```rust -/// use tokio::time::timeout; -/// use tokio::sync::oneshot; -/// -/// use std::time::Duration; -/// -/// # async fn dox() { -/// let (tx, rx) = oneshot::channel(); -/// # tx.send(()).unwrap(); -/// -/// // Wrap the future with a `Timeout` set to expire in 10 milliseconds. -/// if let Err(_) = timeout(Duration::from_millis(10), rx).await { -/// println!("did not receive value within 10 ms"); -/// } -/// # } -/// ``` -pub fn timeout(duration: Duration, future: T) -> Timeout -where - T: Future, -{ - let delay = Delay::new_timeout(Instant::now() + duration, duration); - Timeout::new_with_delay(future, delay) -} - -/// Require a `Future` to complete before the specified instant in time. -/// -/// If the future completes before the instant is reached, then the completed -/// value is returned. Otherwise, an error is returned. -/// -/// # Cancelation -/// -/// Cancelling a timeout is done by dropping the future. No additional cleanup -/// or other work is required. -/// -/// The original future may be obtained by calling [`Timeout::into_inner`]. This -/// consumes the `Timeout`. -/// -/// # Examples -/// -/// Create a new `Timeout` set to expire in 10 milliseconds. -/// -/// ```rust -/// use tokio::time::{Instant, timeout_at}; -/// use tokio::sync::oneshot; -/// -/// use std::time::Duration; -/// -/// # async fn dox() { -/// let (tx, rx) = oneshot::channel(); -/// # tx.send(()).unwrap(); -/// -/// // Wrap the future with a `Timeout` set to expire 10 milliseconds into the -/// // future. -/// if let Err(_) = timeout_at(Instant::now() + Duration::from_millis(10), rx).await { -/// println!("did not receive value within 10 ms"); -/// } -/// # } -/// ``` -pub fn timeout_at(deadline: Instant, future: T) -> Timeout -where - T: Future, -{ - let delay = delay_until(deadline); - - Timeout { - value: future, - delay, - } -} - -pin_project! { - /// Future returned by [`timeout`](timeout) and [`timeout_at`](timeout_at). - #[must_use = "futures do nothing unless you `.await` or poll them"] - #[derive(Debug)] - pub struct Timeout { - #[pin] - value: T, - #[pin] - delay: Delay, - } -} - -/// Error returned by `Timeout`. -#[derive(Debug, PartialEq)] -pub struct Elapsed(()); - -impl Elapsed { - // Used on StreamExt::timeout - #[allow(unused)] - pub(crate) fn new() -> Self { - Elapsed(()) - } -} - -impl Timeout { - pub(crate) fn new_with_delay(value: T, delay: Delay) -> Timeout { - Timeout { value, delay } - } - - /// Gets a reference to the underlying value in this timeout. - pub fn get_ref(&self) -> &T { - &self.value - } - - /// Gets a mutable reference to the underlying value in this timeout. - pub fn get_mut(&mut self) -> &mut T { - &mut self.value - } - - /// Consumes this timeout, returning the underlying value. - pub fn into_inner(self) -> T { - self.value - } -} - -impl Future for Timeout -where - T: Future, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let me = self.project(); - - // First, try polling the future - if let Poll::Ready(v) = me.value.poll(cx) { - return Poll::Ready(Ok(v)); - } - - // Now check the timer - match me.delay.poll(cx) { - Poll::Ready(()) => Poll::Ready(Err(Elapsed(()))), - Poll::Pending => Poll::Pending, - } - } -} - -// ===== impl Elapsed ===== - -impl fmt::Display for Elapsed { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - "deadline has elapsed".fmt(fmt) - } -} - -impl std::error::Error for Elapsed {} - -impl From for std::io::Error { - fn from(_err: Elapsed) -> std::io::Error { - std::io::ErrorKind::TimedOut.into() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/bit.rs b/third_party/rust/tokio-0.2.25/src/util/bit.rs deleted file mode 100644 index e61ac2165a3b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/bit.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::fmt; - -#[derive(Clone, Copy)] -pub(crate) struct Pack { - mask: usize, - shift: u32, -} - -impl Pack { - /// Value is packed in the `width` most-significant bits. - pub(crate) const fn most_significant(width: u32) -> Pack { - let mask = mask_for(width).reverse_bits(); - - Pack { - mask, - shift: mask.trailing_zeros(), - } - } - - /// Value is packed in the `width` least-significant bits. - pub(crate) const fn least_significant(width: u32) -> Pack { - let mask = mask_for(width); - - Pack { mask, shift: 0 } - } - - /// Value is packed in the `width` more-significant bits. - pub(crate) const fn then(&self, width: u32) -> Pack { - let shift = pointer_width() - self.mask.leading_zeros(); - let mask = mask_for(width) << shift; - - Pack { mask, shift } - } - - /// Mask used to unpack value - #[cfg(all(test, loom))] - pub(crate) const fn mask(&self) -> usize { - self.mask - } - - /// Width, in bits, dedicated to storing the value. - pub(crate) const fn width(&self) -> u32 { - pointer_width() - (self.mask >> self.shift).leading_zeros() - } - - /// Max representable value - pub(crate) const fn max_value(&self) -> usize { - (1 << self.width()) - 1 - } - - pub(crate) fn pack(&self, value: usize, base: usize) -> usize { - assert!(value <= self.max_value()); - (base & !self.mask) | (value << self.shift) - } - - pub(crate) fn unpack(&self, src: usize) -> usize { - unpack(src, self.mask, self.shift) - } -} - -impl fmt::Debug for Pack { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - fmt, - "Pack {{ mask: {:b}, shift: {} }}", - self.mask, self.shift - ) - } -} - -/// Returns the width of a pointer in bits -pub(crate) const fn pointer_width() -> u32 { - std::mem::size_of::() as u32 * 8 -} - -/// Returns a `usize` with the right-most `n` bits set. -pub(crate) const fn mask_for(n: u32) -> usize { - let shift = 1usize.wrapping_shl(n - 1); - shift | (shift - 1) -} - -/// Unpack a value using a mask & shift -pub(crate) const fn unpack(src: usize, mask: usize, shift: u32) -> usize { - (src & mask) >> shift -} diff --git a/third_party/rust/tokio-0.2.25/src/util/intrusive_double_linked_list.rs b/third_party/rust/tokio-0.2.25/src/util/intrusive_double_linked_list.rs deleted file mode 100644 index 083fa31d3ec6..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/intrusive_double_linked_list.rs +++ /dev/null @@ -1,788 +0,0 @@ -//! An intrusive double linked list of data - -#![allow(dead_code, unreachable_pub)] - -use core::{ - marker::PhantomPinned, - ops::{Deref, DerefMut}, - ptr::NonNull, -}; - -/// A node which carries data of type `T` and is stored in an intrusive list -#[derive(Debug)] -pub struct ListNode { - /// The previous node in the list. `None` if there is no previous node. - prev: Option>>, - /// The next node in the list. `None` if there is no previous node. - next: Option>>, - /// The data which is associated to this list item - data: T, - /// Prevents `ListNode`s from being `Unpin`. They may never be moved, since - /// the list semantics require addresses to be stable. - _pin: PhantomPinned, -} - -impl ListNode { - /// Creates a new node with the associated data - pub fn new(data: T) -> ListNode { - Self { - prev: None, - next: None, - data, - _pin: PhantomPinned, - } - } -} - -impl Deref for ListNode { - type Target = T; - - fn deref(&self) -> &T { - &self.data - } -} - -impl DerefMut for ListNode { - fn deref_mut(&mut self) -> &mut T { - &mut self.data - } -} - -/// An intrusive linked list of nodes, where each node carries associated data -/// of type `T`. -#[derive(Debug)] -pub struct LinkedList { - head: Option>>, - tail: Option>>, -} - -impl LinkedList { - /// Creates an empty linked list - pub fn new() -> Self { - LinkedList:: { - head: None, - tail: None, - } - } - - /// Adds a node at the front of the linked list. - /// Safety: This function is only safe as long as `node` is guaranteed to - /// get removed from the list before it gets moved or dropped. - /// In addition to this `node` may not be added to another other list before - /// it is removed from the current one. - pub unsafe fn add_front(&mut self, node: &mut ListNode) { - node.next = self.head; - node.prev = None; - if let Some(mut head) = self.head { - head.as_mut().prev = Some(node.into()) - }; - self.head = Some(node.into()); - if self.tail.is_none() { - self.tail = Some(node.into()); - } - } - - /// Inserts a node into the list in a way that the list keeps being sorted. - /// Safety: This function is only safe as long as `node` is guaranteed to - /// get removed from the list before it gets moved or dropped. - /// In addition to this `node` may not be added to another other list before - /// it is removed from the current one. - pub unsafe fn add_sorted(&mut self, node: &mut ListNode) - where - T: PartialOrd, - { - if self.head.is_none() { - // First node in the list - self.head = Some(node.into()); - self.tail = Some(node.into()); - return; - } - - let mut prev: Option>> = None; - let mut current = self.head; - - while let Some(mut current_node) = current { - if node.data < current_node.as_ref().data { - // Need to insert before the current node - current_node.as_mut().prev = Some(node.into()); - match prev { - Some(mut prev) => { - prev.as_mut().next = Some(node.into()); - } - None => { - // We are inserting at the beginning of the list - self.head = Some(node.into()); - } - } - node.next = current; - node.prev = prev; - return; - } - prev = current; - current = current_node.as_ref().next; - } - - // We looped through the whole list and the nodes data is bigger or equal - // than everything we found up to now. - // Insert at the end. Since we checked before that the list isn't empty, - // tail always has a value. - node.prev = self.tail; - node.next = None; - self.tail.as_mut().unwrap().as_mut().next = Some(node.into()); - self.tail = Some(node.into()); - } - - /// Returns the first node in the linked list without removing it from the list - /// The function is only safe as long as valid pointers are stored inside - /// the linked list. - /// The returned pointer is only guaranteed to be valid as long as the list - /// is not mutated - pub fn peek_first(&self) -> Option<&mut ListNode> { - // Safety: When the node was inserted it was promised that it is alive - // until it gets removed from the list. - // The returned node has a pointer which constrains it to the lifetime - // of the list. This is ok, since the Node is supposed to outlive - // its insertion in the list. - unsafe { - self.head - .map(|mut node| &mut *(node.as_mut() as *mut ListNode)) - } - } - - /// Returns the last node in the linked list without removing it from the list - /// The function is only safe as long as valid pointers are stored inside - /// the linked list. - /// The returned pointer is only guaranteed to be valid as long as the list - /// is not mutated - pub fn peek_last(&self) -> Option<&mut ListNode> { - // Safety: When the node was inserted it was promised that it is alive - // until it gets removed from the list. - // The returned node has a pointer which constrains it to the lifetime - // of the list. This is ok, since the Node is supposed to outlive - // its insertion in the list. - unsafe { - self.tail - .map(|mut node| &mut *(node.as_mut() as *mut ListNode)) - } - } - - /// Removes the first node from the linked list - pub fn remove_first(&mut self) -> Option<&mut ListNode> { - #![allow(clippy::debug_assert_with_mut_call)] - - // Safety: When the node was inserted it was promised that it is alive - // until it gets removed from the list - unsafe { - let mut head = self.head?; - self.head = head.as_mut().next; - - let first_ref = head.as_mut(); - match first_ref.next { - None => { - // This was the only node in the list - debug_assert_eq!(Some(first_ref.into()), self.tail); - self.tail = None; - } - Some(mut next) => { - next.as_mut().prev = None; - } - } - - first_ref.prev = None; - first_ref.next = None; - Some(&mut *(first_ref as *mut ListNode)) - } - } - - /// Removes the last node from the linked list and returns it - pub fn remove_last(&mut self) -> Option<&mut ListNode> { - #![allow(clippy::debug_assert_with_mut_call)] - - // Safety: When the node was inserted it was promised that it is alive - // until it gets removed from the list - unsafe { - let mut tail = self.tail?; - self.tail = tail.as_mut().prev; - - let last_ref = tail.as_mut(); - match last_ref.prev { - None => { - // This was the last node in the list - debug_assert_eq!(Some(last_ref.into()), self.head); - self.head = None; - } - Some(mut prev) => { - prev.as_mut().next = None; - } - } - - last_ref.prev = None; - last_ref.next = None; - Some(&mut *(last_ref as *mut ListNode)) - } - } - - /// Returns whether the linked list doesn not contain any node - pub fn is_empty(&self) -> bool { - if self.head.is_some() { - return false; - } - - debug_assert!(self.tail.is_none()); - true - } - - /// Removes the given `node` from the linked list. - /// Returns whether the `node` was removed. - /// It is also only safe if it is known that the `node` is either part of this - /// list, or of no list at all. If `node` is part of another list, the - /// behavior is undefined. - pub unsafe fn remove(&mut self, node: &mut ListNode) -> bool { - #![allow(clippy::debug_assert_with_mut_call)] - - match node.prev { - None => { - // This might be the first node in the list. If it is not, the - // node is not in the list at all. Since our precondition is that - // the node must either be in this list or in no list, we check that - // the node is really in no list. - if self.head != Some(node.into()) { - debug_assert!(node.next.is_none()); - return false; - } - self.head = node.next; - } - Some(mut prev) => { - debug_assert_eq!(prev.as_ref().next, Some(node.into())); - prev.as_mut().next = node.next; - } - } - - match node.next { - None => { - // This must be the last node in our list. Otherwise the list - // is inconsistent. - debug_assert_eq!(self.tail, Some(node.into())); - self.tail = node.prev; - } - Some(mut next) => { - debug_assert_eq!(next.as_mut().prev, Some(node.into())); - next.as_mut().prev = node.prev; - } - } - - node.next = None; - node.prev = None; - - true - } - - /// Drains the list iby calling a callback on each list node - /// - /// The method does not return an iterator since stopping or deferring - /// draining the list is not permitted. If the method would push nodes to - /// an iterator we could not guarantee that the nodes do not get utilized - /// after having been removed from the list anymore. - pub fn drain(&mut self, mut func: F) - where - F: FnMut(&mut ListNode), - { - let mut current = self.head; - self.head = None; - self.tail = None; - - while let Some(mut node) = current { - // Safety: The nodes have not been removed from the list yet and must - // therefore contain valid data. The nodes can also not be added to - // the list again during iteration, since the list is mutably borrowed. - unsafe { - let node_ref = node.as_mut(); - current = node_ref.next; - - node_ref.next = None; - node_ref.prev = None; - - // Note: We do not reset the pointers from the next element in the - // list to the current one since we will iterate over the whole - // list anyway, and therefore clean up all pointers. - - func(node_ref); - } - } - } - - /// Drains the list in reverse order by calling a callback on each list node - /// - /// The method does not return an iterator since stopping or deferring - /// draining the list is not permitted. If the method would push nodes to - /// an iterator we could not guarantee that the nodes do not get utilized - /// after having been removed from the list anymore. - pub fn reverse_drain(&mut self, mut func: F) - where - F: FnMut(&mut ListNode), - { - let mut current = self.tail; - self.head = None; - self.tail = None; - - while let Some(mut node) = current { - // Safety: The nodes have not been removed from the list yet and must - // therefore contain valid data. The nodes can also not be added to - // the list again during iteration, since the list is mutably borrowed. - unsafe { - let node_ref = node.as_mut(); - current = node_ref.prev; - - node_ref.next = None; - node_ref.prev = None; - - // Note: We do not reset the pointers from the next element in the - // list to the current one since we will iterate over the whole - // list anyway, and therefore clean up all pointers. - - func(node_ref); - } - } - } -} - -#[cfg(all(test, feature = "std"))] // Tests make use of Vec at the moment -mod tests { - use super::*; - - fn collect_list(mut list: LinkedList) -> Vec { - let mut result = Vec::new(); - list.drain(|node| { - result.push(**node); - }); - result - } - - fn collect_reverse_list(mut list: LinkedList) -> Vec { - let mut result = Vec::new(); - list.reverse_drain(|node| { - result.push(**node); - }); - result - } - - unsafe fn add_nodes(list: &mut LinkedList, nodes: &mut [&mut ListNode]) { - for node in nodes.iter_mut() { - list.add_front(node); - } - } - - unsafe fn assert_clean(node: &mut ListNode) { - assert!(node.next.is_none()); - assert!(node.prev.is_none()); - } - - #[test] - fn insert_and_iterate() { - unsafe { - let mut a = ListNode::new(5); - let mut b = ListNode::new(7); - let mut c = ListNode::new(31); - - let mut setup = |list: &mut LinkedList| { - assert_eq!(true, list.is_empty()); - list.add_front(&mut c); - assert_eq!(31, **list.peek_first().unwrap()); - assert_eq!(false, list.is_empty()); - list.add_front(&mut b); - assert_eq!(7, **list.peek_first().unwrap()); - list.add_front(&mut a); - assert_eq!(5, **list.peek_first().unwrap()); - }; - - let mut list = LinkedList::new(); - setup(&mut list); - let items: Vec = collect_list(list); - assert_eq!([5, 7, 31].to_vec(), items); - - let mut list = LinkedList::new(); - setup(&mut list); - let items: Vec = collect_reverse_list(list); - assert_eq!([31, 7, 5].to_vec(), items); - } - } - - #[test] - fn add_sorted() { - unsafe { - let mut a = ListNode::new(5); - let mut b = ListNode::new(7); - let mut c = ListNode::new(31); - let mut d = ListNode::new(99); - - let mut list = LinkedList::new(); - list.add_sorted(&mut a); - let items: Vec = collect_list(list); - assert_eq!([5].to_vec(), items); - - let mut list = LinkedList::new(); - list.add_sorted(&mut a); - let items: Vec = collect_reverse_list(list); - assert_eq!([5].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut d, &mut c, &mut b]); - list.add_sorted(&mut a); - let items: Vec = collect_list(list); - assert_eq!([5, 7, 31, 99].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut d, &mut c, &mut b]); - list.add_sorted(&mut a); - let items: Vec = collect_reverse_list(list); - assert_eq!([99, 31, 7, 5].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut d, &mut c, &mut a]); - list.add_sorted(&mut b); - let items: Vec = collect_list(list); - assert_eq!([5, 7, 31, 99].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut d, &mut c, &mut a]); - list.add_sorted(&mut b); - let items: Vec = collect_reverse_list(list); - assert_eq!([99, 31, 7, 5].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut d, &mut b, &mut a]); - list.add_sorted(&mut c); - let items: Vec = collect_list(list); - assert_eq!([5, 7, 31, 99].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut d, &mut b, &mut a]); - list.add_sorted(&mut c); - let items: Vec = collect_reverse_list(list); - assert_eq!([99, 31, 7, 5].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - list.add_sorted(&mut d); - let items: Vec = collect_list(list); - assert_eq!([5, 7, 31, 99].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - list.add_sorted(&mut d); - let items: Vec = collect_reverse_list(list); - assert_eq!([99, 31, 7, 5].to_vec(), items); - } - } - - #[test] - fn drain_and_collect() { - unsafe { - let mut a = ListNode::new(5); - let mut b = ListNode::new(7); - let mut c = ListNode::new(31); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - - let taken_items: Vec = collect_list(list); - assert_eq!([5, 7, 31].to_vec(), taken_items); - } - } - - #[test] - fn peek_last() { - unsafe { - let mut a = ListNode::new(5); - let mut b = ListNode::new(7); - let mut c = ListNode::new(31); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - - let last = list.peek_last(); - assert_eq!(31, **last.unwrap()); - list.remove_last(); - - let last = list.peek_last(); - assert_eq!(7, **last.unwrap()); - list.remove_last(); - - let last = list.peek_last(); - assert_eq!(5, **last.unwrap()); - list.remove_last(); - - let last = list.peek_last(); - assert!(last.is_none()); - } - } - - #[test] - fn remove_first() { - unsafe { - // We iterate forward and backwards through the manipulated lists - // to make sure pointers in both directions are still ok. - let mut a = ListNode::new(5); - let mut b = ListNode::new(7); - let mut c = ListNode::new(31); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - let removed = list.remove_first().unwrap(); - assert_clean(removed); - assert!(!list.is_empty()); - let items: Vec = collect_list(list); - assert_eq!([7, 31].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - let removed = list.remove_first().unwrap(); - assert_clean(removed); - assert!(!list.is_empty()); - let items: Vec = collect_reverse_list(list); - assert_eq!([31, 7].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut b, &mut a]); - let removed = list.remove_first().unwrap(); - assert_clean(removed); - assert!(!list.is_empty()); - let items: Vec = collect_list(list); - assert_eq!([7].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut b, &mut a]); - let removed = list.remove_first().unwrap(); - assert_clean(removed); - assert!(!list.is_empty()); - let items: Vec = collect_reverse_list(list); - assert_eq!([7].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut a]); - let removed = list.remove_first().unwrap(); - assert_clean(removed); - assert!(list.is_empty()); - let items: Vec = collect_list(list); - assert!(items.is_empty()); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut a]); - let removed = list.remove_first().unwrap(); - assert_clean(removed); - assert!(list.is_empty()); - let items: Vec = collect_reverse_list(list); - assert!(items.is_empty()); - } - } - - #[test] - fn remove_last() { - unsafe { - // We iterate forward and backwards through the manipulated lists - // to make sure pointers in both directions are still ok. - let mut a = ListNode::new(5); - let mut b = ListNode::new(7); - let mut c = ListNode::new(31); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - let removed = list.remove_last().unwrap(); - assert_clean(removed); - assert!(!list.is_empty()); - let items: Vec = collect_list(list); - assert_eq!([5, 7].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - let removed = list.remove_last().unwrap(); - assert_clean(removed); - assert!(!list.is_empty()); - let items: Vec = collect_reverse_list(list); - assert_eq!([7, 5].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut b, &mut a]); - let removed = list.remove_last().unwrap(); - assert_clean(removed); - assert!(!list.is_empty()); - let items: Vec = collect_list(list); - assert_eq!([5].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut b, &mut a]); - let removed = list.remove_last().unwrap(); - assert_clean(removed); - assert!(!list.is_empty()); - let items: Vec = collect_reverse_list(list); - assert_eq!([5].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut a]); - let removed = list.remove_last().unwrap(); - assert_clean(removed); - assert!(list.is_empty()); - let items: Vec = collect_list(list); - assert!(items.is_empty()); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut a]); - let removed = list.remove_last().unwrap(); - assert_clean(removed); - assert!(list.is_empty()); - let items: Vec = collect_reverse_list(list); - assert!(items.is_empty()); - } - } - - #[test] - fn remove_by_address() { - unsafe { - let mut a = ListNode::new(5); - let mut b = ListNode::new(7); - let mut c = ListNode::new(31); - - { - // Remove first - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - assert_eq!(true, list.remove(&mut a)); - assert_clean((&mut a).into()); - // a should be no longer there and can't be removed twice - assert_eq!(false, list.remove(&mut a)); - assert_eq!(Some((&mut b).into()), list.head); - assert_eq!(Some((&mut c).into()), b.next); - assert_eq!(Some((&mut b).into()), c.prev); - let items: Vec = collect_list(list); - assert_eq!([7, 31].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - assert_eq!(true, list.remove(&mut a)); - assert_clean((&mut a).into()); - // a should be no longer there and can't be removed twice - assert_eq!(false, list.remove(&mut a)); - assert_eq!(Some((&mut c).into()), b.next); - assert_eq!(Some((&mut b).into()), c.prev); - let items: Vec = collect_reverse_list(list); - assert_eq!([31, 7].to_vec(), items); - } - - { - // Remove middle - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - assert_eq!(true, list.remove(&mut b)); - assert_clean((&mut b).into()); - assert_eq!(Some((&mut c).into()), a.next); - assert_eq!(Some((&mut a).into()), c.prev); - let items: Vec = collect_list(list); - assert_eq!([5, 31].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - assert_eq!(true, list.remove(&mut b)); - assert_clean((&mut b).into()); - assert_eq!(Some((&mut c).into()), a.next); - assert_eq!(Some((&mut a).into()), c.prev); - let items: Vec = collect_reverse_list(list); - assert_eq!([31, 5].to_vec(), items); - } - - { - // Remove last - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - assert_eq!(true, list.remove(&mut c)); - assert_clean((&mut c).into()); - assert!(b.next.is_none()); - assert_eq!(Some((&mut b).into()), list.tail); - let items: Vec = collect_list(list); - assert_eq!([5, 7].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); - assert_eq!(true, list.remove(&mut c)); - assert_clean((&mut c).into()); - assert!(b.next.is_none()); - assert_eq!(Some((&mut b).into()), list.tail); - let items: Vec = collect_reverse_list(list); - assert_eq!([7, 5].to_vec(), items); - } - - { - // Remove first of two - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut b, &mut a]); - assert_eq!(true, list.remove(&mut a)); - assert_clean((&mut a).into()); - // a should be no longer there and can't be removed twice - assert_eq!(false, list.remove(&mut a)); - assert_eq!(Some((&mut b).into()), list.head); - assert_eq!(Some((&mut b).into()), list.tail); - assert!(b.next.is_none()); - assert!(b.prev.is_none()); - let items: Vec = collect_list(list); - assert_eq!([7].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut b, &mut a]); - assert_eq!(true, list.remove(&mut a)); - assert_clean((&mut a).into()); - // a should be no longer there and can't be removed twice - assert_eq!(false, list.remove(&mut a)); - assert_eq!(Some((&mut b).into()), list.head); - assert_eq!(Some((&mut b).into()), list.tail); - assert!(b.next.is_none()); - assert!(b.prev.is_none()); - let items: Vec = collect_reverse_list(list); - assert_eq!([7].to_vec(), items); - } - - { - // Remove last of two - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut b, &mut a]); - assert_eq!(true, list.remove(&mut b)); - assert_clean((&mut b).into()); - assert_eq!(Some((&mut a).into()), list.head); - assert_eq!(Some((&mut a).into()), list.tail); - assert!(a.next.is_none()); - assert!(a.prev.is_none()); - let items: Vec = collect_list(list); - assert_eq!([5].to_vec(), items); - - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut b, &mut a]); - assert_eq!(true, list.remove(&mut b)); - assert_clean((&mut b).into()); - assert_eq!(Some((&mut a).into()), list.head); - assert_eq!(Some((&mut a).into()), list.tail); - assert!(a.next.is_none()); - assert!(a.prev.is_none()); - let items: Vec = collect_reverse_list(list); - assert_eq!([5].to_vec(), items); - } - - { - // Remove last item - let mut list = LinkedList::new(); - add_nodes(&mut list, &mut [&mut a]); - assert_eq!(true, list.remove(&mut a)); - assert_clean((&mut a).into()); - assert!(list.head.is_none()); - assert!(list.tail.is_none()); - let items: Vec = collect_list(list); - assert!(items.is_empty()); - } - - { - // Remove missing - let mut list = LinkedList::new(); - list.add_front(&mut b); - list.add_front(&mut a); - assert_eq!(false, list.remove(&mut c)); - } - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/linked_list.rs b/third_party/rust/tokio-0.2.25/src/util/linked_list.rs deleted file mode 100644 index aa3ce771887d..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/linked_list.rs +++ /dev/null @@ -1,585 +0,0 @@ -//! An intrusive double linked list of data -//! -//! The data structure supports tracking pinned nodes. Most of the data -//! structure's APIs are `unsafe` as they require the caller to ensure the -//! specified node is actually contained by the list. - -use core::fmt; -use core::mem::ManuallyDrop; -use core::ptr::NonNull; - -/// An intrusive linked list. -/// -/// Currently, the list is not emptied on drop. It is the caller's -/// responsibility to ensure the list is empty before dropping it. -pub(crate) struct LinkedList { - /// Linked list head - head: Option>, - - /// Linked list tail - tail: Option>, -} - -unsafe impl Send for LinkedList where T::Target: Send {} -unsafe impl Sync for LinkedList where T::Target: Sync {} - -/// Defines how a type is tracked within a linked list. -/// -/// In order to support storing a single type within multiple lists, accessing -/// the list pointers is decoupled from the entry type. -/// -/// # Safety -/// -/// Implementations must guarantee that `Target` types are pinned in memory. In -/// other words, when a node is inserted, the value will not be moved as long as -/// it is stored in the list. -pub(crate) unsafe trait Link { - /// Handle to the list entry. - /// - /// This is usually a pointer-ish type. - type Handle; - - /// Node type - type Target; - - /// Convert the handle to a raw pointer without consuming the handle - fn as_raw(handle: &Self::Handle) -> NonNull; - - /// Convert the raw pointer to a handle - unsafe fn from_raw(ptr: NonNull) -> Self::Handle; - - /// Return the pointers for a node - unsafe fn pointers(target: NonNull) -> NonNull>; -} - -/// Previous / next pointers -pub(crate) struct Pointers { - /// The previous node in the list. null if there is no previous node. - prev: Option>, - - /// The next node in the list. null if there is no previous node. - next: Option>, -} - -unsafe impl Send for Pointers {} -unsafe impl Sync for Pointers {} - -// ===== impl LinkedList ===== - -impl LinkedList { - /// Creates an empty linked list - pub(crate) fn new() -> LinkedList { - LinkedList { - head: None, - tail: None, - } - } - - /// Adds an element first in the list. - pub(crate) fn push_front(&mut self, val: T::Handle) { - // The value should not be dropped, it is being inserted into the list - let val = ManuallyDrop::new(val); - let ptr = T::as_raw(&*val); - assert_ne!(self.head, Some(ptr)); - unsafe { - T::pointers(ptr).as_mut().next = self.head; - T::pointers(ptr).as_mut().prev = None; - - if let Some(head) = self.head { - T::pointers(head).as_mut().prev = Some(ptr); - } - - self.head = Some(ptr); - - if self.tail.is_none() { - self.tail = Some(ptr); - } - } - } - - /// Removes the last element from a list and returns it, or None if it is - /// empty. - pub(crate) fn pop_back(&mut self) -> Option { - unsafe { - let last = self.tail?; - self.tail = T::pointers(last).as_ref().prev; - - if let Some(prev) = T::pointers(last).as_ref().prev { - T::pointers(prev).as_mut().next = None; - } else { - self.head = None - } - - T::pointers(last).as_mut().prev = None; - T::pointers(last).as_mut().next = None; - - Some(T::from_raw(last)) - } - } - - /// Returns whether the linked list doesn not contain any node - pub(crate) fn is_empty(&self) -> bool { - if self.head.is_some() { - return false; - } - - assert!(self.tail.is_none()); - true - } - - /// Removes the specified node from the list - /// - /// # Safety - /// - /// The caller **must** ensure that `node` is currently contained by - /// `self` or not contained by any other list. - pub(crate) unsafe fn remove(&mut self, node: NonNull) -> Option { - if let Some(prev) = T::pointers(node).as_ref().prev { - debug_assert_eq!(T::pointers(prev).as_ref().next, Some(node)); - T::pointers(prev).as_mut().next = T::pointers(node).as_ref().next; - } else { - if self.head != Some(node) { - return None; - } - - self.head = T::pointers(node).as_ref().next; - } - - if let Some(next) = T::pointers(node).as_ref().next { - debug_assert_eq!(T::pointers(next).as_ref().prev, Some(node)); - T::pointers(next).as_mut().prev = T::pointers(node).as_ref().prev; - } else { - // This might be the last item in the list - if self.tail != Some(node) { - return None; - } - - self.tail = T::pointers(node).as_ref().prev; - } - - T::pointers(node).as_mut().next = None; - T::pointers(node).as_mut().prev = None; - - Some(T::from_raw(node)) - } -} - -impl fmt::Debug for LinkedList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LinkedList") - .field("head", &self.head) - .field("tail", &self.tail) - .finish() - } -} - -cfg_sync! { - impl LinkedList { - pub(crate) fn last(&self) -> Option<&T::Target> { - let tail = self.tail.as_ref()?; - unsafe { - Some(&*tail.as_ptr()) - } - } - } -} - -// ===== impl Iter ===== - -cfg_rt_threaded! { - pub(crate) struct Iter<'a, T: Link> { - curr: Option>, - _p: core::marker::PhantomData<&'a T>, - } - - impl LinkedList { - pub(crate) fn iter(&self) -> Iter<'_, T> { - Iter { - curr: self.head, - _p: core::marker::PhantomData, - } - } - } - - impl<'a, T: Link> Iterator for Iter<'a, T> { - type Item = &'a T::Target; - - fn next(&mut self) -> Option<&'a T::Target> { - let curr = self.curr?; - // safety: the pointer references data contained by the list - self.curr = unsafe { T::pointers(curr).as_ref() }.next; - - // safety: the value is still owned by the linked list. - Some(unsafe { &*curr.as_ptr() }) - } - } -} - -// ===== impl Pointers ===== - -impl Pointers { - /// Create a new set of empty pointers - pub(crate) fn new() -> Pointers { - Pointers { - prev: None, - next: None, - } - } -} - -impl fmt::Debug for Pointers { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Pointers") - .field("prev", &self.prev) - .field("next", &self.next) - .finish() - } -} - -#[cfg(test)] -#[cfg(not(loom))] -mod tests { - use super::*; - - use std::pin::Pin; - - #[derive(Debug)] - struct Entry { - pointers: Pointers, - val: i32, - } - - unsafe impl<'a> Link for &'a Entry { - type Handle = Pin<&'a Entry>; - type Target = Entry; - - fn as_raw(handle: &Pin<&'_ Entry>) -> NonNull { - NonNull::from(handle.get_ref()) - } - - unsafe fn from_raw(ptr: NonNull) -> Pin<&'a Entry> { - Pin::new(&*ptr.as_ptr()) - } - - unsafe fn pointers(mut target: NonNull) -> NonNull> { - NonNull::from(&mut target.as_mut().pointers) - } - } - - fn entry(val: i32) -> Pin> { - Box::pin(Entry { - pointers: Pointers::new(), - val, - }) - } - - fn ptr(r: &Pin>) -> NonNull { - r.as_ref().get_ref().into() - } - - fn collect_list(list: &mut LinkedList<&'_ Entry>) -> Vec { - let mut ret = vec![]; - - while let Some(entry) = list.pop_back() { - ret.push(entry.val); - } - - ret - } - - fn push_all<'a>(list: &mut LinkedList<&'a Entry>, entries: &[Pin<&'a Entry>]) { - for entry in entries.iter() { - list.push_front(*entry); - } - } - - macro_rules! assert_clean { - ($e:ident) => {{ - assert!($e.pointers.next.is_none()); - assert!($e.pointers.prev.is_none()); - }}; - } - - macro_rules! assert_ptr_eq { - ($a:expr, $b:expr) => {{ - // Deal with mapping a Pin<&mut T> -> Option> - assert_eq!(Some($a.as_ref().get_ref().into()), $b) - }}; - } - - #[test] - fn push_and_drain() { - let a = entry(5); - let b = entry(7); - let c = entry(31); - - let mut list = LinkedList::new(); - assert!(list.is_empty()); - - list.push_front(a.as_ref()); - assert!(!list.is_empty()); - list.push_front(b.as_ref()); - list.push_front(c.as_ref()); - - let items: Vec = collect_list(&mut list); - assert_eq!([5, 7, 31].to_vec(), items); - - assert!(list.is_empty()); - } - - #[test] - fn push_pop_push_pop() { - let a = entry(5); - let b = entry(7); - - let mut list = LinkedList::<&Entry>::new(); - - list.push_front(a.as_ref()); - - let entry = list.pop_back().unwrap(); - assert_eq!(5, entry.val); - assert!(list.is_empty()); - - list.push_front(b.as_ref()); - - let entry = list.pop_back().unwrap(); - assert_eq!(7, entry.val); - - assert!(list.is_empty()); - assert!(list.pop_back().is_none()); - } - - #[test] - fn remove_by_address() { - let a = entry(5); - let b = entry(7); - let c = entry(31); - - unsafe { - // Remove first - let mut list = LinkedList::new(); - - push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); - assert!(list.remove(ptr(&a)).is_some()); - assert_clean!(a); - // `a` should be no longer there and can't be removed twice - assert!(list.remove(ptr(&a)).is_none()); - assert!(!list.is_empty()); - - assert!(list.remove(ptr(&b)).is_some()); - assert_clean!(b); - // `b` should be no longer there and can't be removed twice - assert!(list.remove(ptr(&b)).is_none()); - assert!(!list.is_empty()); - - assert!(list.remove(ptr(&c)).is_some()); - assert_clean!(c); - // `b` should be no longer there and can't be removed twice - assert!(list.remove(ptr(&c)).is_none()); - assert!(list.is_empty()); - } - - unsafe { - // Remove middle - let mut list = LinkedList::new(); - - push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&a)).is_some()); - assert_clean!(a); - - assert_ptr_eq!(b, list.head); - assert_ptr_eq!(c, b.pointers.next); - assert_ptr_eq!(b, c.pointers.prev); - - let items = collect_list(&mut list); - assert_eq!([31, 7].to_vec(), items); - } - - unsafe { - // Remove middle - let mut list = LinkedList::new(); - - push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&b)).is_some()); - assert_clean!(b); - - assert_ptr_eq!(c, a.pointers.next); - assert_ptr_eq!(a, c.pointers.prev); - - let items = collect_list(&mut list); - assert_eq!([31, 5].to_vec(), items); - } - - unsafe { - // Remove last - // Remove middle - let mut list = LinkedList::new(); - - push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&c)).is_some()); - assert_clean!(c); - - assert!(b.pointers.next.is_none()); - assert_ptr_eq!(b, list.tail); - - let items = collect_list(&mut list); - assert_eq!([7, 5].to_vec(), items); - } - - unsafe { - // Remove first of two - let mut list = LinkedList::new(); - - push_all(&mut list, &[b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&a)).is_some()); - - assert_clean!(a); - - // a should be no longer there and can't be removed twice - assert!(list.remove(ptr(&a)).is_none()); - - assert_ptr_eq!(b, list.head); - assert_ptr_eq!(b, list.tail); - - assert!(b.pointers.next.is_none()); - assert!(b.pointers.prev.is_none()); - - let items = collect_list(&mut list); - assert_eq!([7].to_vec(), items); - } - - unsafe { - // Remove last of two - let mut list = LinkedList::new(); - - push_all(&mut list, &[b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&b)).is_some()); - - assert_clean!(b); - - assert_ptr_eq!(a, list.head); - assert_ptr_eq!(a, list.tail); - - assert!(a.pointers.next.is_none()); - assert!(a.pointers.prev.is_none()); - - let items = collect_list(&mut list); - assert_eq!([5].to_vec(), items); - } - - unsafe { - // Remove last item - let mut list = LinkedList::new(); - - push_all(&mut list, &[a.as_ref()]); - - assert!(list.remove(ptr(&a)).is_some()); - assert_clean!(a); - - assert!(list.head.is_none()); - assert!(list.tail.is_none()); - let items = collect_list(&mut list); - assert!(items.is_empty()); - } - - unsafe { - // Remove missing - let mut list = LinkedList::<&Entry>::new(); - - list.push_front(b.as_ref()); - list.push_front(a.as_ref()); - - assert!(list.remove(ptr(&c)).is_none()); - } - } - - #[test] - fn iter() { - let a = entry(5); - let b = entry(7); - - let mut list = LinkedList::<&Entry>::new(); - - assert_eq!(0, list.iter().count()); - - list.push_front(a.as_ref()); - list.push_front(b.as_ref()); - - let mut i = list.iter(); - assert_eq!(7, i.next().unwrap().val); - assert_eq!(5, i.next().unwrap().val); - assert!(i.next().is_none()); - } - - proptest::proptest! { - #[test] - fn fuzz_linked_list(ops: Vec) { - run_fuzz(ops); - } - } - - fn run_fuzz(ops: Vec) { - use std::collections::VecDeque; - - #[derive(Debug)] - enum Op { - Push, - Pop, - Remove(usize), - } - - let ops = ops - .iter() - .map(|i| match i % 3 { - 0 => Op::Push, - 1 => Op::Pop, - 2 => Op::Remove(i / 3), - _ => unreachable!(), - }) - .collect::>(); - - let mut ll = LinkedList::<&Entry>::new(); - let mut reference = VecDeque::new(); - - let entries: Vec<_> = (0..ops.len()).map(|i| entry(i as i32)).collect(); - - for (i, op) in ops.iter().enumerate() { - match op { - Op::Push => { - reference.push_front(i as i32); - assert_eq!(entries[i].val, i as i32); - - ll.push_front(entries[i].as_ref()); - } - Op::Pop => { - if reference.is_empty() { - assert!(ll.is_empty()); - continue; - } - - let v = reference.pop_back(); - assert_eq!(v, ll.pop_back().map(|v| v.val)); - } - Op::Remove(n) => { - if reference.is_empty() { - assert!(ll.is_empty()); - continue; - } - - let idx = n % reference.len(); - let expect = reference.remove(idx).unwrap(); - - unsafe { - let entry = ll.remove(ptr(&entries[expect as usize])).unwrap(); - assert_eq!(expect, entry.val); - } - } - } - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/mod.rs b/third_party/rust/tokio-0.2.25/src/util/mod.rs deleted file mode 100644 index 6dda08ca4112..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/mod.rs +++ /dev/null @@ -1,28 +0,0 @@ -cfg_io_driver! { - pub(crate) mod bit; - pub(crate) mod slab; -} - -#[cfg(any(feature = "sync", feature = "rt-core"))] -pub(crate) mod linked_list; - -#[cfg(any(feature = "rt-threaded", feature = "macros", feature = "stream"))] -mod rand; - -mod wake; -pub(crate) use wake::{waker_ref, Wake}; - -cfg_rt_threaded! { - pub(crate) use rand::FastRand; - - mod try_lock; - pub(crate) use try_lock::TryLock; -} - -pub(crate) mod trace; - -#[cfg(any(feature = "macros", feature = "stream"))] -#[cfg_attr(not(feature = "macros"), allow(unreachable_pub))] -pub use rand::thread_rng_n; - -pub(crate) mod intrusive_double_linked_list; diff --git a/third_party/rust/tokio-0.2.25/src/util/pad.rs b/third_party/rust/tokio-0.2.25/src/util/pad.rs deleted file mode 100644 index bf0913ca8537..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/pad.rs +++ /dev/null @@ -1,52 +0,0 @@ -use core::fmt; -use core::ops::{Deref, DerefMut}; - -#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] -// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache -// lines at a time, so we have to align to 128 bytes rather than 64. -// -// Sources: -// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf -// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 -#[cfg_attr(target_arch = "x86_64", repr(align(128)))] -#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] -pub(crate) struct CachePadded { - value: T, -} - -unsafe impl Send for CachePadded {} -unsafe impl Sync for CachePadded {} - -impl CachePadded { - pub(crate) fn new(t: T) -> CachePadded { - CachePadded:: { value: t } - } -} - -impl Deref for CachePadded { - type Target = T; - - fn deref(&self) -> &T { - &self.value - } -} - -impl DerefMut for CachePadded { - fn deref_mut(&mut self) -> &mut T { - &mut self.value - } -} - -impl fmt::Debug for CachePadded { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("CachePadded") - .field("value", &self.value) - .finish() - } -} - -impl From for CachePadded { - fn from(t: T) -> Self { - CachePadded::new(t) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/rand.rs b/third_party/rust/tokio-0.2.25/src/util/rand.rs deleted file mode 100644 index 4b72b4b11000..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/rand.rs +++ /dev/null @@ -1,64 +0,0 @@ -use std::cell::Cell; - -/// Fast random number generate -/// -/// Implement xorshift64+: 2 32-bit xorshift sequences added together. -/// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's -/// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -/// This generator passes the SmallCrush suite, part of TestU01 framework: -/// http://simul.iro.umontreal.ca/testu01/tu01.html -#[derive(Debug)] -pub(crate) struct FastRand { - one: Cell, - two: Cell, -} - -impl FastRand { - /// Initialize a new, thread-local, fast random number generator. - pub(crate) fn new(seed: u64) -> FastRand { - let one = (seed >> 32) as u32; - let mut two = seed as u32; - - if two == 0 { - // This value cannot be zero - two = 1; - } - - FastRand { - one: Cell::new(one), - two: Cell::new(two), - } - } - - pub(crate) fn fastrand_n(&self, n: u32) -> u32 { - // This is similar to fastrand() % n, but faster. - // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - let mul = (self.fastrand() as u64).wrapping_mul(n as u64); - (mul >> 32) as u32 - } - - fn fastrand(&self) -> u32 { - let mut s1 = self.one.get(); - let s0 = self.two.get(); - - s1 ^= s1 << 17; - s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16; - - self.one.set(s0); - self.two.set(s1); - - s0.wrapping_add(s1) - } -} - -// Used by the select macro and `StreamMap` -#[cfg(any(feature = "macros", feature = "stream"))] -#[doc(hidden)] -#[cfg_attr(not(feature = "macros"), allow(unreachable_pub))] -pub fn thread_rng_n(n: u32) -> u32 { - thread_local! { - static THREAD_RNG: FastRand = FastRand::new(crate::loom::rand::seed()); - } - - THREAD_RNG.with(|rng| rng.fastrand_n(n)) -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/addr.rs b/third_party/rust/tokio-0.2.25/src/util/slab/addr.rs deleted file mode 100644 index c14e32e90951..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/addr.rs +++ /dev/null @@ -1,154 +0,0 @@ -//! Tracks the location of an entry in a slab. -//! -//! # Index packing -//! -//! A slab index consists of multiple indices packed into a single `usize` value -//! that correspond to different parts of the slab. -//! -//! The least significant `MAX_PAGES + INITIAL_PAGE_SIZE.trailing_zeros() + 1` -//! bits store the address within a shard, starting at 0 for the first slot on -//! the first page. To index a slot within a shard, we first find the index of -//! the page that the address falls on, and then the offset of the slot within -//! that page. -//! -//! Since every page is twice as large as the previous page, and all page sizes -//! are powers of two, we can determine the page index that contains a given -//! address by shifting the address down by the smallest page size and looking -//! at how many twos places necessary to represent that number, telling us what -//! power of two page size it fits inside of. We can determine the number of -//! twos places by counting the number of leading zeros (unused twos places) in -//! the number's binary representation, and subtracting that count from the -//! total number of bits in a word. -//! -//! Once we know what page contains an address, we can subtract the size of all -//! previous pages from the address to determine the offset within the page. -//! -//! After the page address, the next `MAX_THREADS.trailing_zeros() + 1` least -//! significant bits are the thread ID. These are used to index the array of -//! shards to find which shard a slot belongs to. If an entry is being removed -//! and the thread ID of its index matches that of the current thread, we can -//! use the `remove_local` fast path; otherwise, we have to use the synchronized -//! `remove_remote` path. -//! -//! Finally, a generation value is packed into the index. The `RESERVED_BITS` -//! most significant bits are left unused, and the remaining bits between the -//! last bit of the thread ID and the first reserved bit are used to store the -//! generation. The generation is used as part of an atomic read-modify-write -//! loop every time a `ScheduledIo`'s readiness is modified, or when the -//! resource is removed, to guard against the ABA problem. -//! -//! Visualized: -//! -//! ```text -//! ┌──────────┬───────────────┬──────────────────┬──────────────────────────┐ -//! │ reserved │ generation │ thread ID │ address │ -//! └▲─────────┴▲──────────────┴▲─────────────────┴▲────────────────────────▲┘ -//! │ │ │ │ │ -//! bits(usize) │ bits(MAX_THREADS) │ 0 -//! │ │ -//! bits(usize) - RESERVED MAX_PAGES + bits(INITIAL_PAGE_SIZE) -//! ``` - -use crate::util::bit; -use crate::util::slab::{Generation, INITIAL_PAGE_SIZE, MAX_PAGES, MAX_THREADS}; - -use std::usize; - -/// References the location at which an entry is stored in a slab. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub(crate) struct Address(usize); - -const PAGE_INDEX_SHIFT: u32 = INITIAL_PAGE_SIZE.trailing_zeros() + 1; - -/// Address in the shard -const SLOT: bit::Pack = bit::Pack::least_significant(MAX_PAGES as u32 + PAGE_INDEX_SHIFT); - -/// Masks the thread identifier -const THREAD: bit::Pack = SLOT.then(MAX_THREADS.trailing_zeros() + 1); - -/// Masks the generation -const GENERATION: bit::Pack = THREAD - .then(bit::pointer_width().wrapping_sub(RESERVED.width() + THREAD.width() + SLOT.width())); - -// Chosen arbitrarily -const RESERVED: bit::Pack = bit::Pack::most_significant(5); - -impl Address { - /// Represents no entry, picked to avoid collision with Mio's internals. - /// This value should not be passed to mio. - pub(crate) const NULL: usize = usize::MAX >> 1; - - /// Re-exported by `Generation`. - pub(super) const GENERATION_WIDTH: u32 = GENERATION.width(); - - pub(super) fn new(shard_index: usize, generation: Generation) -> Address { - let mut repr = 0; - - repr = SLOT.pack(shard_index, repr); - repr = GENERATION.pack(generation.to_usize(), repr); - - Address(repr) - } - - /// Convert from a `usize` representation. - pub(crate) fn from_usize(src: usize) -> Address { - assert_ne!(src, Self::NULL); - - Address(src) - } - - /// Convert to a `usize` representation - pub(crate) fn to_usize(self) -> usize { - self.0 - } - - pub(crate) fn generation(self) -> Generation { - Generation::new(GENERATION.unpack(self.0)) - } - - /// Returns the page index - pub(super) fn page(self) -> usize { - // Since every page is twice as large as the previous page, and all page - // sizes are powers of two, we can determine the page index that - // contains a given address by shifting the address down by the smallest - // page size and looking at how many twos places necessary to represent - // that number, telling us what power of two page size it fits inside - // of. We can determine the number of twos places by counting the number - // of leading zeros (unused twos places) in the number's binary - // representation, and subtracting that count from the total number of - // bits in a word. - let slot_shifted = (self.slot() + INITIAL_PAGE_SIZE) >> PAGE_INDEX_SHIFT; - (bit::pointer_width() - slot_shifted.leading_zeros()) as usize - } - - /// Returns the slot index - pub(super) fn slot(self) -> usize { - SLOT.unpack(self.0) - } -} - -#[cfg(test)] -cfg_not_loom! { - use proptest::proptest; - - #[test] - fn test_pack_format() { - assert_eq!(5, RESERVED.width()); - assert_eq!(0b11111, RESERVED.max_value()); - } - - proptest! { - #[test] - fn address_roundtrips( - slot in 0usize..SLOT.max_value(), - generation in 0usize..Generation::MAX, - ) { - let address = Address::new(slot, Generation::new(generation)); - // Round trip - let address = Address::from_usize(address.to_usize()); - - assert_eq!(address.slot(), slot); - assert_eq!(address.generation().to_usize(), generation); - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/entry.rs b/third_party/rust/tokio-0.2.25/src/util/slab/entry.rs deleted file mode 100644 index 2e0b10b0fdf9..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/entry.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::util::slab::Generation; - -pub(crate) trait Entry: Default { - fn generation(&self) -> Generation; - - fn reset(&self, generation: Generation) -> bool; -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/generation.rs b/third_party/rust/tokio-0.2.25/src/util/slab/generation.rs deleted file mode 100644 index 4b16b2caf655..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/generation.rs +++ /dev/null @@ -1,32 +0,0 @@ -use crate::util::bit; -use crate::util::slab::Address; - -/// An mutation identifier for a slot in the slab. The generation helps prevent -/// accessing an entry with an outdated token. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] -pub(crate) struct Generation(usize); - -impl Generation { - pub(crate) const WIDTH: u32 = Address::GENERATION_WIDTH; - - pub(super) const MAX: usize = bit::mask_for(Address::GENERATION_WIDTH); - - /// Create a new generation - /// - /// # Panics - /// - /// Panics if `value` is greater than max generation. - pub(crate) fn new(value: usize) -> Generation { - assert!(value <= Self::MAX); - Generation(value) - } - - /// Returns the next generation value - pub(crate) fn next(self) -> Generation { - Generation((self.0 + 1) & Self::MAX) - } - - pub(crate) fn to_usize(self) -> usize { - self.0 - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/mod.rs b/third_party/rust/tokio-0.2.25/src/util/slab/mod.rs deleted file mode 100644 index 5082970507e1..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/mod.rs +++ /dev/null @@ -1,107 +0,0 @@ -//! A lock-free concurrent slab. - -mod addr; -pub(crate) use addr::Address; - -mod entry; -pub(crate) use entry::Entry; - -mod generation; -pub(crate) use generation::Generation; - -mod page; - -mod shard; -use shard::Shard; - -mod slot; -use slot::Slot; - -mod stack; -use stack::TransferStack; - -#[cfg(all(loom, test))] -mod tests; - -use crate::loom::sync::Mutex; -use crate::util::bit; - -use std::fmt; - -#[cfg(target_pointer_width = "64")] -const MAX_THREADS: usize = 4096; - -#[cfg(target_pointer_width = "32")] -const MAX_THREADS: usize = 2048; - -/// Max number of pages per slab -const MAX_PAGES: usize = bit::pointer_width() as usize / 4; - -cfg_not_loom! { - /// Size of first page - const INITIAL_PAGE_SIZE: usize = 32; -} - -cfg_loom! { - const INITIAL_PAGE_SIZE: usize = 2; -} - -/// A sharded slab. -pub(crate) struct Slab { - // Signal shard for now. Eventually there will be more. - shard: Shard, - local: Mutex<()>, -} - -unsafe impl Send for Slab {} -unsafe impl Sync for Slab {} - -impl Slab { - /// Returns a new slab with the default configuration parameters. - pub(crate) fn new() -> Slab { - Slab { - shard: Shard::new(), - local: Mutex::new(()), - } - } - - /// allocs a value into the slab, returning a key that can be used to - /// access it. - /// - /// If this function returns `None`, then the shard for the current thread - /// is full and no items can be added until some are removed, or the maximum - /// number of shards has been reached. - pub(crate) fn alloc(&self) -> Option
{ - // we must lock the slab to alloc an item. - let _local = self.local.lock().unwrap(); - self.shard.alloc() - } - - /// Removes the value associated with the given key from the slab. - pub(crate) fn remove(&self, idx: Address) { - // try to lock the slab so that we can use `remove_local`. - let lock = self.local.try_lock(); - - // if we were able to lock the slab, we are "local" and can use the fast - // path; otherwise, we will use `remove_remote`. - if lock.is_ok() { - self.shard.remove_local(idx) - } else { - self.shard.remove_remote(idx) - } - } - - /// Return a reference to the value associated with the given key. - /// - /// If the slab does not contain a value for the given key, `None` is - /// returned instead. - pub(crate) fn get(&self, token: Address) -> Option<&T> { - self.shard.get(token) - } -} - -impl fmt::Debug for Slab { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Slab").field("shard", &self.shard).finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/page.rs b/third_party/rust/tokio-0.2.25/src/util/slab/page.rs deleted file mode 100644 index 0000e934dead..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/page.rs +++ /dev/null @@ -1,187 +0,0 @@ -use crate::loom::cell::UnsafeCell; -use crate::util::slab::{Address, Entry, Slot, TransferStack, INITIAL_PAGE_SIZE}; - -use std::fmt; - -/// Data accessed only by the thread that owns the shard. -pub(crate) struct Local { - head: UnsafeCell, -} - -/// Data accessed by any thread. -pub(crate) struct Shared { - remote: TransferStack, - size: usize, - prev_sz: usize, - slab: UnsafeCell]>>>, -} - -/// Returns the size of the page at index `n` -pub(super) fn size(n: usize) -> usize { - INITIAL_PAGE_SIZE << n -} - -impl Local { - pub(crate) fn new() -> Self { - Self { - head: UnsafeCell::new(0), - } - } - - fn head(&self) -> usize { - self.head.with(|head| unsafe { *head }) - } - - fn set_head(&self, new_head: usize) { - self.head.with_mut(|head| unsafe { - *head = new_head; - }) - } -} - -impl Shared { - pub(crate) fn new(size: usize, prev_sz: usize) -> Shared { - Self { - prev_sz, - size, - remote: TransferStack::new(), - slab: UnsafeCell::new(None), - } - } - - /// Allocates storage for this page if it does not allready exist. - /// - /// This requires unique access to the page (e.g. it is called from the - /// thread that owns the page, or, in the case of `SingleShard`, while the - /// lock is held). In order to indicate this, a reference to the page's - /// `Local` data is taken by this function; the `Local` argument is not - /// actually used, but requiring it ensures that this is only called when - /// local access is held. - #[cold] - fn alloc_page(&self, _: &Local) { - debug_assert!(self.slab.with(|s| unsafe { (*s).is_none() })); - - let mut slab = Vec::with_capacity(self.size); - slab.extend((1..self.size).map(Slot::new)); - slab.push(Slot::new(Address::NULL)); - - self.slab.with_mut(|s| { - // this mut access is safe — it only occurs to initially - // allocate the page, which only happens on this thread; if the - // page has not yet been allocated, other threads will not try - // to access it yet. - unsafe { - *s = Some(slab.into_boxed_slice()); - } - }); - } - - pub(crate) fn alloc(&self, local: &Local) -> Option
{ - let head = local.head(); - - // are there any items on the local free list? (fast path) - let head = if head < self.size { - head - } else { - // if the local free list is empty, pop all the items on the remote - // free list onto the local free list. - self.remote.pop_all()? - }; - - // if the head is still null, both the local and remote free lists are - // empty --- we can't fit any more items on this page. - if head == Address::NULL { - return None; - } - - // do we need to allocate storage for this page? - let page_needs_alloc = self.slab.with(|s| unsafe { (*s).is_none() }); - if page_needs_alloc { - self.alloc_page(local); - } - - let gen = self.slab.with(|slab| { - let slab = unsafe { &*(slab) } - .as_ref() - .expect("page must have been allocated to alloc!"); - - let slot = &slab[head]; - - local.set_head(slot.next()); - slot.generation() - }); - - let index = head + self.prev_sz; - - Some(Address::new(index, gen)) - } - - pub(crate) fn get(&self, addr: Address) -> Option<&T> { - let page_offset = addr.slot() - self.prev_sz; - - self.slab - .with(|slab| unsafe { &*slab }.as_ref()?.get(page_offset)) - .map(|slot| slot.get()) - } - - pub(crate) fn remove_local(&self, local: &Local, addr: Address) { - let offset = addr.slot() - self.prev_sz; - - self.slab.with(|slab| { - let slab = unsafe { &*slab }.as_ref(); - - let slot = if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { - slot - } else { - return; - }; - - if slot.reset(addr.generation()) { - slot.set_next(local.head()); - local.set_head(offset); - } - }) - } - - pub(crate) fn remove_remote(&self, addr: Address) { - let offset = addr.slot() - self.prev_sz; - - self.slab.with(|slab| { - let slab = unsafe { &*slab }.as_ref(); - - let slot = if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { - slot - } else { - return; - }; - - if !slot.reset(addr.generation()) { - return; - } - - self.remote.push(offset, |next| slot.set_next(next)); - }) - } -} - -impl fmt::Debug for Local { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.head.with(|head| { - let head = unsafe { *head }; - f.debug_struct("Local") - .field("head", &format_args!("{:#0x}", head)) - .finish() - }) - } -} - -impl fmt::Debug for Shared { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Shared") - .field("remote", &self.remote) - .field("prev_sz", &self.prev_sz) - .field("size", &self.size) - // .field("slab", &self.slab) - .finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/shard.rs b/third_party/rust/tokio-0.2.25/src/util/slab/shard.rs deleted file mode 100644 index eaca6f656a23..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/shard.rs +++ /dev/null @@ -1,105 +0,0 @@ -use crate::util::slab::{page, Address, Entry, MAX_PAGES}; - -use std::fmt; - -// ┌─────────────┐ ┌────────┐ -// │ page 1 │ │ │ -// ├─────────────┤ ┌───▶│ next──┼─┐ -// │ page 2 │ │ ├────────┤ │ -// │ │ │ │XXXXXXXX│ │ -// │ local_free──┼─┘ ├────────┤ │ -// │ global_free─┼─┐ │ │◀┘ -// ├─────────────┤ └───▶│ next──┼─┐ -// │ page 3 │ ├────────┤ │ -// └─────────────┘ │XXXXXXXX│ │ -// ... ├────────┤ │ -// ┌─────────────┐ │XXXXXXXX│ │ -// │ page n │ ├────────┤ │ -// └─────────────┘ │ │◀┘ -// │ next──┼───▶ -// ├────────┤ -// │XXXXXXXX│ -// └────────┘ -// ... -pub(super) struct Shard { - /// The local free list for each page. - /// - /// These are only ever accessed from this shard's thread, so they are - /// stored separately from the shared state for the page that can be - /// accessed concurrently, to minimize false sharing. - local: Box<[page::Local]>, - /// The shared state for each page in this shard. - /// - /// This consists of the page's metadata (size, previous size), remote free - /// list, and a pointer to the actual array backing that page. - shared: Box<[page::Shared]>, -} - -impl Shard { - pub(super) fn new() -> Shard { - let mut total_sz = 0; - let shared = (0..MAX_PAGES) - .map(|page_num| { - let sz = page::size(page_num); - let prev_sz = total_sz; - total_sz += sz; - page::Shared::new(sz, prev_sz) - }) - .collect(); - - let local = (0..MAX_PAGES).map(|_| page::Local::new()).collect(); - - Shard { local, shared } - } - - pub(super) fn alloc(&self) -> Option
{ - // Can we fit the value into an existing page? - for (page_idx, page) in self.shared.iter().enumerate() { - let local = self.local(page_idx); - - if let Some(page_offset) = page.alloc(local) { - return Some(page_offset); - } - } - - None - } - - pub(super) fn get(&self, addr: Address) -> Option<&T> { - let page_idx = addr.page(); - - if page_idx > self.shared.len() { - return None; - } - - self.shared[page_idx].get(addr) - } - - /// Remove an item on the shard's local thread. - pub(super) fn remove_local(&self, addr: Address) { - let page_idx = addr.page(); - - if let Some(page) = self.shared.get(page_idx) { - page.remove_local(self.local(page_idx), addr); - } - } - - /// Remove an item, while on a different thread from the shard's local thread. - pub(super) fn remove_remote(&self, addr: Address) { - if let Some(page) = self.shared.get(addr.page()) { - page.remove_remote(addr); - } - } - - fn local(&self, i: usize) -> &page::Local { - &self.local[i] - } -} - -impl fmt::Debug for Shard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Shard") - .field("shared", &self.shared) - .finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/slot.rs b/third_party/rust/tokio-0.2.25/src/util/slab/slot.rs deleted file mode 100644 index 0608b261899e..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/slot.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::loom::cell::UnsafeCell; -use crate::util::slab::{Entry, Generation}; - -/// Stores an entry in the slab. -pub(super) struct Slot { - next: UnsafeCell, - entry: T, -} - -impl Slot { - /// Initialize a new `Slot` linked to `next`. - /// - /// The entry is initialized to a default value. - pub(super) fn new(next: usize) -> Slot { - Slot { - next: UnsafeCell::new(next), - entry: T::default(), - } - } - - pub(super) fn get(&self) -> &T { - &self.entry - } - - pub(super) fn generation(&self) -> Generation { - self.entry.generation() - } - - pub(super) fn reset(&self, generation: Generation) -> bool { - self.entry.reset(generation) - } - - pub(super) fn next(&self) -> usize { - self.next.with(|next| unsafe { *next }) - } - - pub(super) fn set_next(&self, next: usize) { - self.next.with_mut(|n| unsafe { - (*n) = next; - }) - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/stack.rs b/third_party/rust/tokio-0.2.25/src/util/slab/stack.rs deleted file mode 100644 index 0ae0d71006bc..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/stack.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::loom::sync::atomic::AtomicUsize; -use crate::util::slab::Address; - -use std::fmt; -use std::sync::atomic::Ordering; -use std::usize; - -pub(super) struct TransferStack { - head: AtomicUsize, -} - -impl TransferStack { - pub(super) fn new() -> Self { - Self { - head: AtomicUsize::new(Address::NULL), - } - } - - pub(super) fn pop_all(&self) -> Option { - let val = self.head.swap(Address::NULL, Ordering::Acquire); - - if val == Address::NULL { - None - } else { - Some(val) - } - } - - pub(super) fn push(&self, value: usize, before: impl Fn(usize)) { - let mut next = self.head.load(Ordering::Relaxed); - - loop { - before(next); - - match self - .head - .compare_exchange(next, value, Ordering::AcqRel, Ordering::Acquire) - { - // lost the race! - Err(actual) => next = actual, - Ok(_) => return, - } - } - } -} - -impl fmt::Debug for TransferStack { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Loom likes to dump all its internal state in `fmt::Debug` impls, so - // we override this to just print the current value in tests. - f.debug_struct("TransferStack") - .field( - "head", - &format_args!("{:#x}", self.head.load(Ordering::Relaxed)), - ) - .finish() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/tests/loom_slab.rs b/third_party/rust/tokio-0.2.25/src/util/slab/tests/loom_slab.rs deleted file mode 100644 index 48e94f003410..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/tests/loom_slab.rs +++ /dev/null @@ -1,327 +0,0 @@ -use crate::io::driver::ScheduledIo; -use crate::util::slab::{Address, Slab}; - -use loom::sync::{Arc, Condvar, Mutex}; -use loom::thread; - -#[test] -fn local_remove() { - loom::model(|| { - let slab = Arc::new(Slab::new()); - - let s = slab.clone(); - let t1 = thread::spawn(move || { - let idx = store_val(&s, 1); - assert_eq!(get_val(&s, idx), Some(1)); - s.remove(idx); - assert_eq!(get_val(&s, idx), None); - let idx = store_val(&s, 2); - assert_eq!(get_val(&s, idx), Some(2)); - s.remove(idx); - assert_eq!(get_val(&s, idx), None); - }); - - let s = slab.clone(); - let t2 = thread::spawn(move || { - let idx = store_val(&s, 3); - assert_eq!(get_val(&s, idx), Some(3)); - s.remove(idx); - assert_eq!(get_val(&s, idx), None); - let idx = store_val(&s, 4); - s.remove(idx); - assert_eq!(get_val(&s, idx), None); - }); - - let s = slab; - let idx1 = store_val(&s, 5); - assert_eq!(get_val(&s, idx1), Some(5)); - let idx2 = store_val(&s, 6); - assert_eq!(get_val(&s, idx2), Some(6)); - s.remove(idx1); - assert_eq!(get_val(&s, idx1), None); - assert_eq!(get_val(&s, idx2), Some(6)); - s.remove(idx2); - assert_eq!(get_val(&s, idx2), None); - - t1.join().expect("thread 1 should not panic"); - t2.join().expect("thread 2 should not panic"); - }); -} - -#[test] -fn remove_remote() { - loom::model(|| { - let slab = Arc::new(Slab::new()); - - let idx1 = store_val(&slab, 1); - assert_eq!(get_val(&slab, idx1), Some(1)); - - let idx2 = store_val(&slab, 2); - assert_eq!(get_val(&slab, idx2), Some(2)); - - let idx3 = store_val(&slab, 3); - assert_eq!(get_val(&slab, idx3), Some(3)); - - let s = slab.clone(); - let t1 = thread::spawn(move || { - assert_eq!(get_val(&s, idx2), Some(2)); - s.remove(idx2); - assert_eq!(get_val(&s, idx2), None); - }); - - let s = slab.clone(); - let t2 = thread::spawn(move || { - assert_eq!(get_val(&s, idx3), Some(3)); - s.remove(idx3); - assert_eq!(get_val(&s, idx3), None); - }); - - t1.join().expect("thread 1 should not panic"); - t2.join().expect("thread 2 should not panic"); - - assert_eq!(get_val(&slab, idx1), Some(1)); - assert_eq!(get_val(&slab, idx2), None); - assert_eq!(get_val(&slab, idx3), None); - }); -} - -#[test] -fn remove_remote_and_reuse() { - loom::model(|| { - let slab = Arc::new(Slab::new()); - - let idx1 = store_val(&slab, 1); - let idx2 = store_val(&slab, 2); - - assert_eq!(get_val(&slab, idx1), Some(1)); - assert_eq!(get_val(&slab, idx2), Some(2)); - - let s = slab.clone(); - let t1 = thread::spawn(move || { - s.remove(idx1); - let value = get_val(&s, idx1); - - // We may or may not see the new value yet, depending on when - // this occurs, but we must either see the new value or `None`; - // the old value has been removed! - assert!(value == None || value == Some(3)); - }); - - let idx3 = store_when_free(&slab, 3); - t1.join().expect("thread 1 should not panic"); - - assert_eq!(get_val(&slab, idx3), Some(3)); - assert_eq!(get_val(&slab, idx2), Some(2)); - }); -} - -#[test] -fn concurrent_alloc_remove() { - loom::model(|| { - let slab = Arc::new(Slab::new()); - let pair = Arc::new((Mutex::new(None), Condvar::new())); - - let slab2 = slab.clone(); - let pair2 = pair.clone(); - let remover = thread::spawn(move || { - let (lock, cvar) = &*pair2; - for _ in 0..2 { - let mut next = lock.lock().unwrap(); - while next.is_none() { - next = cvar.wait(next).unwrap(); - } - let key = next.take().unwrap(); - slab2.remove(key); - assert_eq!(get_val(&slab2, key), None); - cvar.notify_one(); - } - }); - - let (lock, cvar) = &*pair; - for i in 0..2 { - let key = store_val(&slab, i); - - let mut next = lock.lock().unwrap(); - *next = Some(key); - cvar.notify_one(); - - // Wait for the item to be removed. - while next.is_some() { - next = cvar.wait(next).unwrap(); - } - - assert_eq!(get_val(&slab, key), None); - } - - remover.join().unwrap(); - }) -} - -#[test] -fn concurrent_remove_remote_and_reuse() { - loom::model(|| { - let slab = Arc::new(Slab::new()); - - let idx1 = store_val(&slab, 1); - let idx2 = store_val(&slab, 2); - - assert_eq!(get_val(&slab, idx1), Some(1)); - assert_eq!(get_val(&slab, idx2), Some(2)); - - let s = slab.clone(); - let s2 = slab.clone(); - let t1 = thread::spawn(move || { - s.remove(idx1); - }); - - let t2 = thread::spawn(move || { - s2.remove(idx2); - }); - - let idx3 = store_when_free(&slab, 3); - t1.join().expect("thread 1 should not panic"); - t2.join().expect("thread 1 should not panic"); - - assert!(get_val(&slab, idx1).is_none()); - assert!(get_val(&slab, idx2).is_none()); - assert_eq!(get_val(&slab, idx3), Some(3)); - }); -} - -#[test] -fn alloc_remove_get() { - loom::model(|| { - let slab = Arc::new(Slab::new()); - let pair = Arc::new((Mutex::new(None), Condvar::new())); - - let slab2 = slab.clone(); - let pair2 = pair.clone(); - let t1 = thread::spawn(move || { - let slab = slab2; - let (lock, cvar) = &*pair2; - // allocate one entry just so that we have to use the final one for - // all future allocations. - let _key0 = store_val(&slab, 0); - let key = store_val(&slab, 1); - - let mut next = lock.lock().unwrap(); - *next = Some(key); - cvar.notify_one(); - // remove the second entry - slab.remove(key); - // store a new readiness at the same location (since the slab - // already has an entry in slot 0) - store_val(&slab, 2); - }); - - let (lock, cvar) = &*pair; - // wait for the second entry to be stored... - let mut next = lock.lock().unwrap(); - while next.is_none() { - next = cvar.wait(next).unwrap(); - } - let key = next.unwrap(); - - // our generation will be stale when the second store occurs at that - // index, we must not see the value of that store. - let val = get_val(&slab, key); - assert_ne!(val, Some(2), "generation must have advanced!"); - - t1.join().unwrap(); - }) -} - -#[test] -fn alloc_remove_set() { - loom::model(|| { - let slab = Arc::new(Slab::new()); - let pair = Arc::new((Mutex::new(None), Condvar::new())); - - let slab2 = slab.clone(); - let pair2 = pair.clone(); - let t1 = thread::spawn(move || { - let slab = slab2; - let (lock, cvar) = &*pair2; - // allocate one entry just so that we have to use the final one for - // all future allocations. - let _key0 = store_val(&slab, 0); - let key = store_val(&slab, 1); - - let mut next = lock.lock().unwrap(); - *next = Some(key); - cvar.notify_one(); - - slab.remove(key); - // remove the old entry and insert a new one, with a new generation. - let key2 = slab.alloc().expect("store key 2"); - // after the remove, we must not see the value written with the - // stale index. - assert_eq!( - get_val(&slab, key), - None, - "stale set must no longer be visible" - ); - assert_eq!(get_val(&slab, key2), Some(0)); - key2 - }); - - let (lock, cvar) = &*pair; - - // wait for the second entry to be stored. the index we get from the - // other thread may become stale after a write. - let mut next = lock.lock().unwrap(); - while next.is_none() { - next = cvar.wait(next).unwrap(); - } - let key = next.unwrap(); - - // try to write to the index with our generation - slab.get(key).map(|val| val.set_readiness(key, |_| 2)); - - let key2 = t1.join().unwrap(); - // after the remove, we must not see the value written with the - // stale index either. - assert_eq!( - get_val(&slab, key), - None, - "stale set must no longer be visible" - ); - assert_eq!(get_val(&slab, key2), Some(0)); - }); -} - -fn get_val(slab: &Arc>, address: Address) -> Option { - slab.get(address).and_then(|s| s.get_readiness(address)) -} - -fn store_val(slab: &Arc>, readiness: usize) -> Address { - let key = slab.alloc().expect("allocate slot"); - - if let Some(slot) = slab.get(key) { - slot.set_readiness(key, |_| readiness) - .expect("generation should still be valid!"); - } else { - panic!("slab did not contain a value for {:?}", key); - } - - key -} - -fn store_when_free(slab: &Arc>, readiness: usize) -> Address { - let key = loop { - if let Some(key) = slab.alloc() { - break key; - } - - thread::yield_now(); - }; - - if let Some(slot) = slab.get(key) { - slot.set_readiness(key, |_| readiness) - .expect("generation should still be valid!"); - } else { - panic!("slab did not contain a value for {:?}", key); - } - - key -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/tests/loom_stack.rs b/third_party/rust/tokio-0.2.25/src/util/slab/tests/loom_stack.rs deleted file mode 100644 index 47ad46d3a1c4..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/tests/loom_stack.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::util::slab::TransferStack; - -use loom::cell::UnsafeCell; -use loom::sync::Arc; -use loom::thread; - -#[test] -fn transfer_stack() { - loom::model(|| { - let causalities = [UnsafeCell::new(None), UnsafeCell::new(None)]; - let shared = Arc::new((causalities, TransferStack::new())); - let shared1 = shared.clone(); - let shared2 = shared.clone(); - - // Spawn two threads that both try to push to the stack. - let t1 = thread::spawn(move || { - let (causalities, stack) = &*shared1; - stack.push(0, |prev| { - causalities[0].with_mut(|c| unsafe { - *c = Some(prev); - }); - }); - }); - - let t2 = thread::spawn(move || { - let (causalities, stack) = &*shared2; - stack.push(1, |prev| { - causalities[1].with_mut(|c| unsafe { - *c = Some(prev); - }); - }); - }); - - let (causalities, stack) = &*shared; - - // Try to pop from the stack... - let mut idx = stack.pop_all(); - while idx == None { - idx = stack.pop_all(); - thread::yield_now(); - } - let idx = idx.unwrap(); - - let saw_both = causalities[idx].with(|val| { - let val = unsafe { *val }; - assert!( - val.is_some(), - "UnsafeCell write must happen-before index is pushed to the stack!", - ); - // were there two entries in the stack? if so, check that - // both saw a write. - if let Some(c) = causalities.get(val.unwrap()) { - c.with(|val| { - let val = unsafe { *val }; - assert!( - val.is_some(), - "UnsafeCell write must happen-before index is pushed to the stack!", - ); - }); - true - } else { - false - } - }); - - // We only saw one push. Ensure that the other push happens too. - if !saw_both { - // Try to pop from the stack... - let mut idx = stack.pop_all(); - while idx == None { - idx = stack.pop_all(); - thread::yield_now(); - } - let idx = idx.unwrap(); - - causalities[idx].with(|val| { - let val = unsafe { *val }; - assert!( - val.is_some(), - "UnsafeCell write must happen-before index is pushed to the stack!", - ); - }); - } - - t1.join().unwrap(); - t2.join().unwrap(); - }); -} diff --git a/third_party/rust/tokio-0.2.25/src/util/slab/tests/mod.rs b/third_party/rust/tokio-0.2.25/src/util/slab/tests/mod.rs deleted file mode 100644 index 7f7935446670..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/slab/tests/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod loom_slab; -mod loom_stack; diff --git a/third_party/rust/tokio-0.2.25/src/util/trace.rs b/third_party/rust/tokio-0.2.25/src/util/trace.rs deleted file mode 100644 index d8c6120d97c2..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/trace.rs +++ /dev/null @@ -1,57 +0,0 @@ -cfg_trace! { - cfg_rt_core! { - use std::future::Future; - use std::pin::Pin; - use std::task::{Context, Poll}; - use pin_project_lite::pin_project; - - use tracing::Span; - - pin_project! { - /// A future that has been instrumented with a `tracing` span. - #[derive(Debug, Clone)] - pub(crate) struct Instrumented { - #[pin] - inner: T, - span: Span, - } - } - - impl Future for Instrumented { - type Output = T::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - let _enter = this.span.enter(); - this.inner.poll(cx) - } - } - - impl Instrumented { - pub(crate) fn new(inner: T, span: Span) -> Self { - Self { inner, span } - } - } - - #[inline] - pub(crate) fn task(task: F, kind: &'static str) -> Instrumented { - let span = tracing::trace_span!( - target: "tokio::task", - "task", - %kind, - future = %std::any::type_name::(), - ); - Instrumented::new(task, span) - } - } -} - -cfg_not_trace! { - cfg_rt_core! { - #[inline] - pub(crate) fn task(task: F, _: &'static str) -> F { - // nop - task - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/try_lock.rs b/third_party/rust/tokio-0.2.25/src/util/try_lock.rs deleted file mode 100644 index 8b0edb4a8736..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/try_lock.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::loom::sync::atomic::AtomicBool; - -use std::cell::UnsafeCell; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut}; -use std::sync::atomic::Ordering::SeqCst; - -pub(crate) struct TryLock { - locked: AtomicBool, - data: UnsafeCell, -} - -pub(crate) struct LockGuard<'a, T> { - lock: &'a TryLock, - _p: PhantomData>, -} - -unsafe impl Send for TryLock {} -unsafe impl Sync for TryLock {} - -unsafe impl Sync for LockGuard<'_, T> {} - -macro_rules! new { - ($data:ident) => { - TryLock { - locked: AtomicBool::new(false), - data: UnsafeCell::new($data), - } - }; -} - -impl TryLock { - #[cfg(not(loom))] - /// Create a new `TryLock` - pub(crate) const fn new(data: T) -> TryLock { - new!(data) - } - - #[cfg(loom)] - /// Create a new `TryLock` - pub(crate) fn new(data: T) -> TryLock { - new!(data) - } - - /// Attempt to acquire lock - pub(crate) fn try_lock(&self) -> Option> { - if self - .locked - .compare_exchange(false, true, SeqCst, SeqCst) - .is_err() - { - return None; - } - - Some(LockGuard { - lock: self, - _p: PhantomData, - }) - } -} - -impl Deref for LockGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.lock.data.get() } - } -} - -impl DerefMut for LockGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.lock.data.get() } - } -} - -impl Drop for LockGuard<'_, T> { - fn drop(&mut self) { - self.lock.locked.store(false, SeqCst); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/util/wake.rs b/third_party/rust/tokio-0.2.25/src/util/wake.rs deleted file mode 100644 index e49f1e895dc9..000000000000 --- a/third_party/rust/tokio-0.2.25/src/util/wake.rs +++ /dev/null @@ -1,83 +0,0 @@ -use std::marker::PhantomData; -use std::mem::ManuallyDrop; -use std::ops::Deref; -use std::sync::Arc; -use std::task::{RawWaker, RawWakerVTable, Waker}; - -/// Simplfied waking interface based on Arcs -pub(crate) trait Wake: Send + Sync { - /// Wake by value - fn wake(self: Arc); - - /// Wake by reference - fn wake_by_ref(arc_self: &Arc); -} - -/// A `Waker` that is only valid for a given lifetime. -#[derive(Debug)] -pub(crate) struct WakerRef<'a> { - waker: ManuallyDrop, - _p: PhantomData<&'a ()>, -} - -impl Deref for WakerRef<'_> { - type Target = Waker; - - fn deref(&self) -> &Waker { - &self.waker - } -} - -/// Creates a reference to a `Waker` from a reference to `Arc`. -pub(crate) fn waker_ref(wake: &Arc) -> WakerRef<'_> { - let ptr = &**wake as *const _ as *const (); - - let waker = unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::())) }; - - WakerRef { - waker: ManuallyDrop::new(waker), - _p: PhantomData, - } -} - -fn waker_vtable() -> &'static RawWakerVTable { - &RawWakerVTable::new( - clone_arc_raw::, - wake_arc_raw::, - wake_by_ref_arc_raw::, - drop_arc_raw::, - ) -} - -unsafe fn inc_ref_count(data: *const ()) { - // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop - let arc = ManuallyDrop::new(Arc::::from_raw(data as *const T)); - - // Now increase refcount, but don't drop new refcount either - let arc_clone: ManuallyDrop<_> = arc.clone(); - - // Drop explicitly to avoid clippy warnings - drop(arc); - drop(arc_clone); -} - -unsafe fn clone_arc_raw(data: *const ()) -> RawWaker { - inc_ref_count::(data); - RawWaker::new(data, waker_vtable::()) -} - -unsafe fn wake_arc_raw(data: *const ()) { - let arc: Arc = Arc::from_raw(data as *const T); - Wake::wake(arc); -} - -// used by `waker_ref` -unsafe fn wake_by_ref_arc_raw(data: *const ()) { - // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop - let arc = ManuallyDrop::new(Arc::::from_raw(data as *const T)); - Wake::wake_by_ref(&arc); -} - -unsafe fn drop_arc_raw(data: *const ()) { - drop(Arc::::from_raw(data as *const T)) -} diff --git a/third_party/rust/tokio-0.2.25/tests/_require_full.rs b/third_party/rust/tokio-0.2.25/tests/_require_full.rs deleted file mode 100644 index 98455bedefe6..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/_require_full.rs +++ /dev/null @@ -1,2 +0,0 @@ -#![cfg(not(feature = "full"))] -compile_error!("run main Tokio tests with `--features full`"); diff --git a/third_party/rust/tokio-0.2.25/tests/async_send_sync.rs b/third_party/rust/tokio-0.2.25/tests/async_send_sync.rs deleted file mode 100644 index afe053b1010c..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/async_send_sync.rs +++ /dev/null @@ -1,264 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use std::cell::Cell; -use std::io::Cursor; -use std::net::SocketAddr; -use std::rc::Rc; -use tokio::net::TcpStream; -use tokio::time::{Duration, Instant}; - -#[allow(dead_code)] -type BoxFutureSync = std::pin::Pin + Send + Sync>>; -#[allow(dead_code)] -type BoxFutureSend = std::pin::Pin + Send>>; -#[allow(dead_code)] -type BoxFuture = std::pin::Pin>>; - -#[allow(dead_code)] -fn require_send(_t: &T) {} -#[allow(dead_code)] -fn require_sync(_t: &T) {} - -#[allow(dead_code)] -struct Invalid; - -trait AmbiguousIfSend { - fn some_item(&self) {} -} -impl AmbiguousIfSend<()> for T {} -impl AmbiguousIfSend for T {} - -trait AmbiguousIfSync { - fn some_item(&self) {} -} -impl AmbiguousIfSync<()> for T {} -impl AmbiguousIfSync for T {} - -macro_rules! into_todo { - ($typ:ty) => {{ - let x: $typ = todo!(); - x - }}; -} -macro_rules! assert_value { - ($type:ty: Send & Sync) => { - #[allow(unreachable_code)] - #[allow(unused_variables)] - const _: fn() = || { - let f: $type = todo!(); - require_send(&f); - require_sync(&f); - }; - }; - ($type:ty: !Send & Sync) => { - #[allow(unreachable_code)] - #[allow(unused_variables)] - const _: fn() = || { - let f: $type = todo!(); - AmbiguousIfSend::some_item(&f); - require_sync(&f); - }; - }; - ($type:ty: Send & !Sync) => { - #[allow(unreachable_code)] - #[allow(unused_variables)] - const _: fn() = || { - let f: $type = todo!(); - require_send(&f); - AmbiguousIfSync::some_item(&f); - }; - }; - ($type:ty: !Send & !Sync) => { - #[allow(unreachable_code)] - #[allow(unused_variables)] - const _: fn() = || { - let f: $type = todo!(); - AmbiguousIfSend::some_item(&f); - AmbiguousIfSync::some_item(&f); - }; - }; -} -macro_rules! async_assert_fn { - ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Send & Sync) => { - #[allow(unreachable_code)] - #[allow(unused_variables)] - const _: fn() = || { - let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); - require_send(&f); - require_sync(&f); - }; - }; - ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Send & !Sync) => { - #[allow(unreachable_code)] - #[allow(unused_variables)] - const _: fn() = || { - let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); - require_send(&f); - AmbiguousIfSync::some_item(&f); - }; - }; - ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Send & Sync) => { - #[allow(unreachable_code)] - #[allow(unused_variables)] - const _: fn() = || { - let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); - AmbiguousIfSend::some_item(&f); - require_sync(&f); - }; - }; - ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Send & !Sync) => { - #[allow(unreachable_code)] - #[allow(unused_variables)] - const _: fn() = || { - let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); - AmbiguousIfSend::some_item(&f); - AmbiguousIfSync::some_item(&f); - }; - }; -} - -async_assert_fn!(tokio::io::copy(&mut TcpStream, &mut TcpStream): Send & Sync); -async_assert_fn!(tokio::io::empty(): Send & Sync); -async_assert_fn!(tokio::io::repeat(u8): Send & Sync); -async_assert_fn!(tokio::io::sink(): Send & Sync); -async_assert_fn!(tokio::io::split(TcpStream): Send & Sync); -async_assert_fn!(tokio::io::stderr(): Send & Sync); -async_assert_fn!(tokio::io::stdin(): Send & Sync); -async_assert_fn!(tokio::io::stdout(): Send & Sync); -async_assert_fn!(tokio::io::Split>>::next_segment(_): Send & Sync); - -async_assert_fn!(tokio::fs::canonicalize(&str): Send & Sync); -async_assert_fn!(tokio::fs::copy(&str, &str): Send & Sync); -async_assert_fn!(tokio::fs::create_dir(&str): Send & Sync); -async_assert_fn!(tokio::fs::create_dir_all(&str): Send & Sync); -async_assert_fn!(tokio::fs::hard_link(&str, &str): Send & Sync); -async_assert_fn!(tokio::fs::metadata(&str): Send & Sync); -async_assert_fn!(tokio::fs::read(&str): Send & Sync); -async_assert_fn!(tokio::fs::read_dir(&str): Send & Sync); -async_assert_fn!(tokio::fs::read_link(&str): Send & Sync); -async_assert_fn!(tokio::fs::read_to_string(&str): Send & Sync); -async_assert_fn!(tokio::fs::remove_dir(&str): Send & Sync); -async_assert_fn!(tokio::fs::remove_dir_all(&str): Send & Sync); -async_assert_fn!(tokio::fs::remove_file(&str): Send & Sync); -async_assert_fn!(tokio::fs::rename(&str, &str): Send & Sync); -async_assert_fn!(tokio::fs::set_permissions(&str, std::fs::Permissions): Send & Sync); -async_assert_fn!(tokio::fs::symlink_metadata(&str): Send & Sync); -async_assert_fn!(tokio::fs::write(&str, Vec): Send & Sync); -async_assert_fn!(tokio::fs::ReadDir::next_entry(_): Send & Sync); -async_assert_fn!(tokio::fs::OpenOptions::open(_, &str): Send & Sync); -async_assert_fn!(tokio::fs::DirEntry::metadata(_): Send & Sync); -async_assert_fn!(tokio::fs::DirEntry::file_type(_): Send & Sync); - -async_assert_fn!(tokio::fs::File::open(&str): Send & Sync); -async_assert_fn!(tokio::fs::File::create(&str): Send & Sync); -async_assert_fn!(tokio::fs::File::seek(_, std::io::SeekFrom): Send & Sync); -async_assert_fn!(tokio::fs::File::sync_all(_): Send & Sync); -async_assert_fn!(tokio::fs::File::sync_data(_): Send & Sync); -async_assert_fn!(tokio::fs::File::set_len(_, u64): Send & Sync); -async_assert_fn!(tokio::fs::File::metadata(_): Send & Sync); -async_assert_fn!(tokio::fs::File::try_clone(_): Send & Sync); -async_assert_fn!(tokio::fs::File::into_std(_): Send & Sync); -async_assert_fn!(tokio::fs::File::set_permissions(_, std::fs::Permissions): Send & Sync); - -async_assert_fn!(tokio::net::lookup_host(SocketAddr): Send & Sync); -async_assert_fn!(tokio::net::TcpListener::bind(SocketAddr): Send & Sync); -async_assert_fn!(tokio::net::TcpListener::accept(_): Send & Sync); -async_assert_fn!(tokio::net::TcpStream::connect(SocketAddr): Send & Sync); -async_assert_fn!(tokio::net::TcpStream::peek(_, &mut [u8]): Send & Sync); -async_assert_fn!(tokio::net::tcp::ReadHalf::peek(_, &mut [u8]): Send & Sync); -async_assert_fn!(tokio::net::UdpSocket::bind(SocketAddr): Send & Sync); -async_assert_fn!(tokio::net::UdpSocket::connect(_, SocketAddr): Send & Sync); -async_assert_fn!(tokio::net::UdpSocket::send(_, &[u8]): Send & Sync); -async_assert_fn!(tokio::net::UdpSocket::recv(_, &mut [u8]): Send & Sync); -async_assert_fn!(tokio::net::UdpSocket::send_to(_, &[u8], SocketAddr): Send & Sync); -async_assert_fn!(tokio::net::UdpSocket::recv_from(_, &mut [u8]): Send & Sync); -async_assert_fn!(tokio::net::udp::RecvHalf::recv(_, &mut [u8]): Send & Sync); -async_assert_fn!(tokio::net::udp::RecvHalf::recv_from(_, &mut [u8]): Send & Sync); -async_assert_fn!(tokio::net::udp::SendHalf::send(_, &[u8]): Send & Sync); -async_assert_fn!(tokio::net::udp::SendHalf::send_to(_, &[u8], &SocketAddr): Send & Sync); - -#[cfg(unix)] -mod unix_datagram { - use super::*; - async_assert_fn!(tokio::net::UnixListener::bind(&str): Send & Sync); - async_assert_fn!(tokio::net::UnixListener::accept(_): Send & Sync); - async_assert_fn!(tokio::net::UnixDatagram::send(_, &[u8]): Send & Sync); - async_assert_fn!(tokio::net::UnixDatagram::recv(_, &mut [u8]): Send & Sync); - async_assert_fn!(tokio::net::UnixDatagram::send_to(_, &[u8], &str): Send & Sync); - async_assert_fn!(tokio::net::UnixDatagram::recv_from(_, &mut [u8]): Send & Sync); - async_assert_fn!(tokio::net::UnixStream::connect(&str): Send & Sync); -} - -async_assert_fn!(tokio::process::Child::wait_with_output(_): Send & Sync); -async_assert_fn!(tokio::signal::ctrl_c(): Send & Sync); -#[cfg(unix)] -async_assert_fn!(tokio::signal::unix::Signal::recv(_): Send & Sync); - -async_assert_fn!(tokio::stream::empty>(): Send & Sync); -async_assert_fn!(tokio::stream::pending>(): Send & Sync); -async_assert_fn!(tokio::stream::iter(std::vec::IntoIter): Send & Sync); - -async_assert_fn!(tokio::sync::Barrier::wait(_): Send & Sync); -async_assert_fn!(tokio::sync::Mutex::lock(_): Send & Sync); -async_assert_fn!(tokio::sync::Mutex>::lock(_): Send & Sync); -async_assert_fn!(tokio::sync::Mutex>::lock(_): !Send & !Sync); -async_assert_fn!(tokio::sync::Mutex::lock_owned(_): Send & Sync); -async_assert_fn!(tokio::sync::Mutex>::lock_owned(_): Send & Sync); -async_assert_fn!(tokio::sync::Mutex>::lock_owned(_): !Send & !Sync); -async_assert_fn!(tokio::sync::Notify::notified(_): Send & !Sync); -async_assert_fn!(tokio::sync::RwLock::read(_): Send & Sync); -async_assert_fn!(tokio::sync::RwLock::write(_): Send & Sync); -async_assert_fn!(tokio::sync::RwLock>::read(_): !Send & !Sync); -async_assert_fn!(tokio::sync::RwLock>::write(_): !Send & !Sync); -async_assert_fn!(tokio::sync::RwLock>::read(_): !Send & !Sync); -async_assert_fn!(tokio::sync::RwLock>::write(_): !Send & !Sync); -async_assert_fn!(tokio::sync::Semaphore::acquire(_): Send & Sync); - -async_assert_fn!(tokio::sync::broadcast::Receiver::recv(_): Send & Sync); -async_assert_fn!(tokio::sync::broadcast::Receiver>::recv(_): Send & Sync); -async_assert_fn!(tokio::sync::broadcast::Receiver>::recv(_): !Send & !Sync); - -async_assert_fn!(tokio::sync::mpsc::Receiver::recv(_): Send & Sync); -async_assert_fn!(tokio::sync::mpsc::Receiver>::recv(_): Send & Sync); -async_assert_fn!(tokio::sync::mpsc::Receiver>::recv(_): !Send & !Sync); -async_assert_fn!(tokio::sync::mpsc::Sender::send(_, u8): Send & Sync); -async_assert_fn!(tokio::sync::mpsc::Sender>::send(_, Cell): Send & !Sync); -async_assert_fn!(tokio::sync::mpsc::Sender>::send(_, Rc): !Send & !Sync); - -async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver::recv(_): Send & Sync); -async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver>::recv(_): Send & Sync); -async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver>::recv(_): !Send & !Sync); - -async_assert_fn!(tokio::sync::watch::Receiver::recv(_): Send & Sync); -async_assert_fn!(tokio::sync::watch::Receiver>::recv(_): !Send & !Sync); -async_assert_fn!(tokio::sync::watch::Receiver>::recv(_): !Send & !Sync); -async_assert_fn!(tokio::sync::watch::Sender::closed(_): Send & Sync); -async_assert_fn!(tokio::sync::watch::Sender>::closed(_): !Send & !Sync); -async_assert_fn!(tokio::sync::watch::Sender>::closed(_): !Send & !Sync); - -async_assert_fn!(tokio::task::LocalKey::scope(_, u32, BoxFutureSync<()>): Send & Sync); -async_assert_fn!(tokio::task::LocalKey::scope(_, u32, BoxFutureSend<()>): Send & !Sync); -async_assert_fn!(tokio::task::LocalKey::scope(_, u32, BoxFuture<()>): !Send & !Sync); -async_assert_fn!(tokio::task::LocalKey>::scope(_, Cell, BoxFutureSync<()>): Send & !Sync); -async_assert_fn!(tokio::task::LocalKey>::scope(_, Cell, BoxFutureSend<()>): Send & !Sync); -async_assert_fn!(tokio::task::LocalKey>::scope(_, Cell, BoxFuture<()>): !Send & !Sync); -async_assert_fn!(tokio::task::LocalKey>::scope(_, Rc, BoxFutureSync<()>): !Send & !Sync); -async_assert_fn!(tokio::task::LocalKey>::scope(_, Rc, BoxFutureSend<()>): !Send & !Sync); -async_assert_fn!(tokio::task::LocalKey>::scope(_, Rc, BoxFuture<()>): !Send & !Sync); -async_assert_fn!(tokio::task::LocalSet::run_until(_, BoxFutureSync<()>): !Send & !Sync); -assert_value!(tokio::task::LocalSet: !Send & !Sync); - -async_assert_fn!(tokio::time::advance(Duration): Send & Sync); -async_assert_fn!(tokio::time::delay_for(Duration): Send & Sync); -async_assert_fn!(tokio::time::delay_until(Instant): Send & Sync); -async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSync<()>): Send & Sync); -async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSend<()>): Send & !Sync); -async_assert_fn!(tokio::time::timeout(Duration, BoxFuture<()>): !Send & !Sync); -async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSync<()>): Send & Sync); -async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSend<()>): Send & !Sync); -async_assert_fn!(tokio::time::timeout_at(Instant, BoxFuture<()>): !Send & !Sync); -async_assert_fn!(tokio::time::Interval::tick(_): Send & Sync); - -#[cfg(tokio_unstable)] -assert_value!(tokio::sync::CancellationToken: Send & Sync); diff --git a/third_party/rust/tokio-0.2.25/tests/buffered.rs b/third_party/rust/tokio-0.2.25/tests/buffered.rs deleted file mode 100644 index 595f855a0f73..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/buffered.rs +++ /dev/null @@ -1,51 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::net::TcpListener; -use tokio::prelude::*; -use tokio_test::assert_ok; - -use std::io::prelude::*; -use std::net::TcpStream; -use std::thread; - -#[tokio::test] -async fn echo_server() { - const N: usize = 1024; - - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - - let msg = "foo bar baz"; - - let t = thread::spawn(move || { - let mut s = assert_ok!(TcpStream::connect(&addr)); - - let t2 = thread::spawn(move || { - let mut s = assert_ok!(TcpStream::connect(&addr)); - let mut b = vec![0; msg.len() * N]; - assert_ok!(s.read_exact(&mut b)); - b - }); - - let mut expected = Vec::::new(); - for _i in 0..N { - expected.extend(msg.as_bytes()); - let res = assert_ok!(s.write(msg.as_bytes())); - assert_eq!(res, msg.len()); - } - - (expected, t2) - }); - - let (mut a, _) = assert_ok!(srv.accept().await); - let (mut b, _) = assert_ok!(srv.accept().await); - - let n = assert_ok!(io::copy(&mut a, &mut b).await); - - let (expected, t2) = t.join().unwrap(); - let actual = t2.join().unwrap(); - - assert!(expected == actual); - assert_eq!(n, msg.len() as u64 * 1024); -} diff --git a/third_party/rust/tokio-0.2.25/tests/fs.rs b/third_party/rust/tokio-0.2.25/tests/fs.rs deleted file mode 100644 index 13c44c08d6ab..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/fs.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::fs; -use tokio_test::assert_ok; - -#[tokio::test] -async fn path_read_write() { - let temp = tempdir(); - let dir = temp.path(); - - assert_ok!(fs::write(dir.join("bar"), b"bytes").await); - let out = assert_ok!(fs::read(dir.join("bar")).await); - - assert_eq!(out, b"bytes"); -} - -fn tempdir() -> tempfile::TempDir { - tempfile::tempdir().unwrap() -} diff --git a/third_party/rust/tokio-0.2.25/tests/fs_copy.rs b/third_party/rust/tokio-0.2.25/tests/fs_copy.rs deleted file mode 100644 index 8d1632013ea7..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/fs_copy.rs +++ /dev/null @@ -1,39 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tempfile::tempdir; -use tokio::fs; - -#[tokio::test] -async fn copy() { - let dir = tempdir().unwrap(); - - let source_path = dir.path().join("foo.txt"); - let dest_path = dir.path().join("bar.txt"); - - fs::write(&source_path, b"Hello File!").await.unwrap(); - fs::copy(&source_path, &dest_path).await.unwrap(); - - let from = fs::read(&source_path).await.unwrap(); - let to = fs::read(&dest_path).await.unwrap(); - - assert_eq!(from, to); -} - -#[tokio::test] -async fn copy_permissions() { - let dir = tempdir().unwrap(); - let from_path = dir.path().join("foo.txt"); - let to_path = dir.path().join("bar.txt"); - - let from = tokio::fs::File::create(&from_path).await.unwrap(); - let mut from_perms = from.metadata().await.unwrap().permissions(); - from_perms.set_readonly(true); - from.set_permissions(from_perms.clone()).await.unwrap(); - - tokio::fs::copy(from_path, &to_path).await.unwrap(); - - let to_perms = tokio::fs::metadata(to_path).await.unwrap().permissions(); - - assert_eq!(from_perms, to_perms); -} diff --git a/third_party/rust/tokio-0.2.25/tests/fs_dir.rs b/third_party/rust/tokio-0.2.25/tests/fs_dir.rs deleted file mode 100644 index 6355ef05fcb8..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/fs_dir.rs +++ /dev/null @@ -1,119 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::fs; -use tokio_test::{assert_err, assert_ok}; - -use std::sync::{Arc, Mutex}; -use tempfile::tempdir; - -#[tokio::test] -async fn create_dir() { - let base_dir = tempdir().unwrap(); - let new_dir = base_dir.path().join("foo"); - let new_dir_2 = new_dir.clone(); - - assert_ok!(fs::create_dir(new_dir).await); - - assert!(new_dir_2.is_dir()); -} - -#[tokio::test] -async fn create_all() { - let base_dir = tempdir().unwrap(); - let new_dir = base_dir.path().join("foo").join("bar"); - let new_dir_2 = new_dir.clone(); - - assert_ok!(fs::create_dir_all(new_dir).await); - assert!(new_dir_2.is_dir()); -} - -#[tokio::test] -async fn build_dir() { - let base_dir = tempdir().unwrap(); - let new_dir = base_dir.path().join("foo").join("bar"); - let new_dir_2 = new_dir.clone(); - - assert_ok!(fs::DirBuilder::new().recursive(true).create(new_dir).await); - - assert!(new_dir_2.is_dir()); - assert_err!( - fs::DirBuilder::new() - .recursive(false) - .create(new_dir_2) - .await - ); -} - -#[tokio::test] -async fn remove() { - let base_dir = tempdir().unwrap(); - let new_dir = base_dir.path().join("foo"); - let new_dir_2 = new_dir.clone(); - - std::fs::create_dir(new_dir.clone()).unwrap(); - - assert_ok!(fs::remove_dir(new_dir).await); - assert!(!new_dir_2.exists()); -} - -#[tokio::test] -async fn read_inherent() { - let base_dir = tempdir().unwrap(); - - let p = base_dir.path(); - std::fs::create_dir(p.join("aa")).unwrap(); - std::fs::create_dir(p.join("bb")).unwrap(); - std::fs::create_dir(p.join("cc")).unwrap(); - - let files = Arc::new(Mutex::new(Vec::new())); - - let f = files.clone(); - let p = p.to_path_buf(); - - let mut entries = fs::read_dir(p).await.unwrap(); - - while let Some(e) = assert_ok!(entries.next_entry().await) { - let s = e.file_name().to_str().unwrap().to_string(); - f.lock().unwrap().push(s); - } - - let mut files = files.lock().unwrap(); - files.sort(); // because the order is not guaranteed - assert_eq!( - *files, - vec!["aa".to_string(), "bb".to_string(), "cc".to_string()] - ); -} - -#[tokio::test] -async fn read_stream() { - use tokio::stream::StreamExt; - - let base_dir = tempdir().unwrap(); - - let p = base_dir.path(); - std::fs::create_dir(p.join("aa")).unwrap(); - std::fs::create_dir(p.join("bb")).unwrap(); - std::fs::create_dir(p.join("cc")).unwrap(); - - let files = Arc::new(Mutex::new(Vec::new())); - - let f = files.clone(); - let p = p.to_path_buf(); - - let mut entries = fs::read_dir(p).await.unwrap(); - - while let Some(res) = entries.next().await { - let e = assert_ok!(res); - let s = e.file_name().to_str().unwrap().to_string(); - f.lock().unwrap().push(s); - } - - let mut files = files.lock().unwrap(); - files.sort(); // because the order is not guaranteed - assert_eq!( - *files, - vec!["aa".to_string(), "bb".to_string(), "cc".to_string()] - ); -} diff --git a/third_party/rust/tokio-0.2.25/tests/fs_file.rs b/third_party/rust/tokio-0.2.25/tests/fs_file.rs deleted file mode 100644 index eee9a5b5c596..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/fs_file.rs +++ /dev/null @@ -1,87 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::fs::File; -use tokio::prelude::*; -use tokio_test::task; - -use std::io::prelude::*; -use tempfile::NamedTempFile; - -const HELLO: &[u8] = b"hello world..."; - -#[tokio::test] -async fn basic_read() { - let mut tempfile = tempfile(); - tempfile.write_all(HELLO).unwrap(); - - let mut file = File::open(tempfile.path()).await.unwrap(); - - let mut buf = [0; 1024]; - let n = file.read(&mut buf).await.unwrap(); - - assert_eq!(n, HELLO.len()); - assert_eq!(&buf[..n], HELLO); -} - -#[tokio::test] -async fn basic_write() { - let tempfile = tempfile(); - - let mut file = File::create(tempfile.path()).await.unwrap(); - - file.write_all(HELLO).await.unwrap(); - file.flush().await.unwrap(); - - let file = std::fs::read(tempfile.path()).unwrap(); - assert_eq!(file, HELLO); -} - -#[tokio::test] -async fn coop() { - let mut tempfile = tempfile(); - tempfile.write_all(HELLO).unwrap(); - - let mut task = task::spawn(async { - let mut file = File::open(tempfile.path()).await.unwrap(); - - let mut buf = [0; 1024]; - - loop { - file.read(&mut buf).await.unwrap(); - file.seek(std::io::SeekFrom::Start(0)).await.unwrap(); - } - }); - - for _ in 0..1_000 { - if task.poll().is_pending() { - return; - } - } - - panic!("did not yield"); -} - -fn tempfile() -> NamedTempFile { - NamedTempFile::new().unwrap() -} - -#[tokio::test] -#[cfg(unix)] -async fn unix_fd() { - use std::os::unix::io::AsRawFd; - let tempfile = tempfile(); - - let file = File::create(tempfile.path()).await.unwrap(); - assert!(file.as_raw_fd() as u64 > 0); -} - -#[tokio::test] -#[cfg(windows)] -async fn windows_handle() { - use std::os::windows::io::AsRawHandle; - let tempfile = tempfile(); - - let file = File::create(tempfile.path()).await.unwrap(); - assert!(file.as_raw_handle() as u64 > 0); -} diff --git a/third_party/rust/tokio-0.2.25/tests/fs_file_mocked.rs b/third_party/rust/tokio-0.2.25/tests/fs_file_mocked.rs deleted file mode 100644 index 2e7e8b7cf481..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/fs_file_mocked.rs +++ /dev/null @@ -1,777 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -macro_rules! ready { - ($e:expr $(,)?) => { - match $e { - std::task::Poll::Ready(t) => t, - std::task::Poll::Pending => return std::task::Poll::Pending, - } - }; -} - -#[macro_export] -macro_rules! cfg_fs { - ($($item:item)*) => { $($item)* } -} - -#[macro_export] -macro_rules! cfg_io_std { - ($($item:item)*) => { $($item)* } -} - -use futures::future; - -// Load source -#[allow(warnings)] -#[path = "../src/fs/file.rs"] -mod file; -use file::File; - -#[allow(warnings)] -#[path = "../src/io/blocking.rs"] -mod blocking; - -// Load mocked types -mod support { - pub(crate) mod mock_file; - pub(crate) mod mock_pool; -} -pub(crate) use support::mock_pool as pool; - -// Place them where the source expects them -pub(crate) mod io { - pub(crate) use tokio::io::*; - - pub(crate) use crate::blocking; - - pub(crate) mod sys { - pub(crate) use crate::support::mock_pool::{run, Blocking}; - } -} -pub(crate) mod fs { - pub(crate) mod sys { - pub(crate) use crate::support::mock_file::File; - pub(crate) use crate::support::mock_pool::{run, Blocking}; - } - - pub(crate) use crate::support::mock_pool::asyncify; -} -use fs::sys; - -use tokio::prelude::*; -use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok, task}; - -use std::io::SeekFrom; - -const HELLO: &[u8] = b"hello world..."; -const FOO: &[u8] = b"foo bar baz..."; - -#[test] -fn open_read() { - let (mock, file) = sys::File::mock(); - mock.read(HELLO); - - let mut file = File::from_std(file); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_eq!(0, pool::len()); - assert_pending!(t.poll()); - - assert_eq!(1, mock.remaining()); - assert_eq!(1, pool::len()); - - pool::run_one(); - - assert_eq!(0, mock.remaining()); - assert!(t.is_woken()); - - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, HELLO.len()); - assert_eq!(&buf[..n], HELLO); -} - -#[test] -fn read_twice_before_dispatch() { - let (mock, file) = sys::File::mock(); - mock.read(HELLO); - - let mut file = File::from_std(file); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - assert_pending!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - assert!(t.is_woken()); - - let n = assert_ready_ok!(t.poll()); - assert_eq!(&buf[..n], HELLO); -} - -#[test] -fn read_with_smaller_buf() { - let (mock, file) = sys::File::mock(); - mock.read(HELLO); - - let mut file = File::from_std(file); - - { - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - } - - pool::run_one(); - - { - let mut buf = [0; 4]; - let mut t = task::spawn(file.read(&mut buf)); - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, 4); - assert_eq!(&buf[..], &HELLO[..n]); - } - - // Calling again immediately succeeds with the rest of the buffer - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, 10); - assert_eq!(&buf[..n], &HELLO[4..]); - - assert_eq!(0, pool::len()); -} - -#[test] -fn read_with_bigger_buf() { - let (mock, file) = sys::File::mock(); - mock.read(&HELLO[..4]).read(&HELLO[4..]); - - let mut file = File::from_std(file); - - { - let mut buf = [0; 4]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - } - - pool::run_one(); - - { - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, 4); - assert_eq!(&buf[..n], &HELLO[..n]); - } - - // Calling again immediately succeeds with the rest of the buffer - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, 10); - assert_eq!(&buf[..n], &HELLO[4..]); - - assert_eq!(0, pool::len()); -} - -#[test] -fn read_err_then_read_success() { - let (mock, file) = sys::File::mock(); - mock.read_err().read(&HELLO); - - let mut file = File::from_std(file); - - { - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - assert_ready_err!(t.poll()); - } - - { - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - let n = assert_ready_ok!(t.poll()); - - assert_eq!(n, HELLO.len()); - assert_eq!(&buf[..n], HELLO); - } -} - -#[test] -fn open_write() { - let (mock, file) = sys::File::mock(); - mock.write(HELLO); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - - assert_eq!(0, pool::len()); - assert_ready_ok!(t.poll()); - - assert_eq!(1, mock.remaining()); - assert_eq!(1, pool::len()); - - pool::run_one(); - - assert_eq!(0, mock.remaining()); - assert!(!t.is_woken()); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn flush_while_idle() { - let (_mock, file) = sys::File::mock(); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn read_with_buffer_larger_than_max() { - // Chunks - let chunk_a = 16 * 1024; - let chunk_b = chunk_a * 2; - let chunk_c = chunk_a * 3; - let chunk_d = chunk_a * 4; - - assert_eq!(chunk_d / 1024, 64); - - let mut data = vec![]; - for i in 0..(chunk_d - 1) { - data.push((i % 151) as u8); - } - - let (mock, file) = sys::File::mock(); - mock.read(&data[0..chunk_a]) - .read(&data[chunk_a..chunk_b]) - .read(&data[chunk_b..chunk_c]) - .read(&data[chunk_c..]); - - let mut file = File::from_std(file); - - let mut actual = vec![0; chunk_d]; - let mut pos = 0; - - while pos < data.len() { - let mut t = task::spawn(file.read(&mut actual[pos..])); - - assert_pending!(t.poll()); - pool::run_one(); - assert!(t.is_woken()); - - let n = assert_ready_ok!(t.poll()); - assert!(n <= chunk_a); - - pos += n; - } - - assert_eq!(mock.remaining(), 0); - assert_eq!(data, &actual[..data.len()]); -} - -#[test] -fn write_with_buffer_larger_than_max() { - // Chunks - let chunk_a = 16 * 1024; - let chunk_b = chunk_a * 2; - let chunk_c = chunk_a * 3; - let chunk_d = chunk_a * 4; - - assert_eq!(chunk_d / 1024, 64); - - let mut data = vec![]; - for i in 0..(chunk_d - 1) { - data.push((i % 151) as u8); - } - - let (mock, file) = sys::File::mock(); - mock.write(&data[0..chunk_a]) - .write(&data[chunk_a..chunk_b]) - .write(&data[chunk_b..chunk_c]) - .write(&data[chunk_c..]); - - let mut file = File::from_std(file); - - let mut rem = &data[..]; - - let mut first = true; - - while !rem.is_empty() { - let mut task = task::spawn(file.write(rem)); - - if !first { - assert_pending!(task.poll()); - pool::run_one(); - assert!(task.is_woken()); - } - - first = false; - - let n = assert_ready_ok!(task.poll()); - - rem = &rem[n..]; - } - - pool::run_one(); - - assert_eq!(mock.remaining(), 0); -} - -#[test] -fn write_twice_before_dispatch() { - let (mock, file) = sys::File::mock(); - mock.write(HELLO).write(FOO); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.write(FOO)); - assert_pending!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - assert!(t.is_woken()); - - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.flush()); - assert_pending!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn incomplete_read_followed_by_write() { - let (mock, file) = sys::File::mock(); - mock.read(HELLO) - .seek_current_ok(-(HELLO.len() as i64), 0) - .write(FOO); - - let mut file = File::from_std(file); - - let mut buf = [0; 32]; - - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_ok!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn incomplete_partial_read_followed_by_write() { - let (mock, file) = sys::File::mock(); - mock.read(HELLO).seek_current_ok(-10, 0).write(FOO); - - let mut file = File::from_std(file); - - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - let mut buf = [0; 4]; - let mut t = task::spawn(file.read(&mut buf)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_ok!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn incomplete_read_followed_by_flush() { - let (mock, file) = sys::File::mock(); - mock.read(HELLO) - .seek_current_ok(-(HELLO.len() as i64), 0) - .write(FOO); - - let mut file = File::from_std(file); - - let mut buf = [0; 32]; - - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); -} - -#[test] -fn incomplete_flush_followed_by_write() { - let (mock, file) = sys::File::mock(); - mock.write(HELLO).write(FOO); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, HELLO.len()); - - let mut t = task::spawn(file.flush()); - assert_pending!(t.poll()); - - // TODO: Move under write - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn read_err() { - let (mock, file) = sys::File::mock(); - mock.read_err(); - - let mut file = File::from_std(file); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - - pool::run_one(); - assert!(t.is_woken()); - - assert_ready_err!(t.poll()); -} - -#[test] -fn write_write_err() { - let (mock, file) = sys::File::mock(); - mock.write_err(); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_err!(t.poll()); -} - -#[test] -fn write_read_write_err() { - let (mock, file) = sys::File::mock(); - mock.write_err().read(HELLO); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_err!(t.poll()); -} - -#[test] -fn write_read_flush_err() { - let (mock, file) = sys::File::mock(); - mock.write_err().read(HELLO); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_err!(t.poll()); -} - -#[test] -fn write_seek_write_err() { - let (mock, file) = sys::File::mock(); - mock.write_err().seek_start_ok(0); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - { - let mut t = task::spawn(file.seek(SeekFrom::Start(0))); - assert_pending!(t.poll()); - } - - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_err!(t.poll()); -} - -#[test] -fn write_seek_flush_err() { - let (mock, file) = sys::File::mock(); - mock.write_err().seek_start_ok(0); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - { - let mut t = task::spawn(file.seek(SeekFrom::Start(0))); - assert_pending!(t.poll()); - } - - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_err!(t.poll()); -} - -#[test] -fn sync_all_ordered_after_write() { - let (mock, file) = sys::File::mock(); - mock.write(HELLO).sync_all(); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.sync_all()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn sync_all_err_ordered_after_write() { - let (mock, file) = sys::File::mock(); - mock.write(HELLO).sync_all_err(); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.sync_all()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_err!(t.poll()); -} - -#[test] -fn sync_data_ordered_after_write() { - let (mock, file) = sys::File::mock(); - mock.write(HELLO).sync_data(); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.sync_data()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn sync_data_err_ordered_after_write() { - let (mock, file) = sys::File::mock(); - mock.write(HELLO).sync_data_err(); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.sync_data()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_err!(t.poll()); -} - -#[test] -fn open_set_len_ok() { - let (mock, file) = sys::File::mock(); - mock.set_len(123); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.set_len(123)); - - assert_pending!(t.poll()); - assert_eq!(1, mock.remaining()); - - pool::run_one(); - assert_eq!(0, mock.remaining()); - - assert!(t.is_woken()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn open_set_len_err() { - let (mock, file) = sys::File::mock(); - mock.set_len_err(123); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.set_len(123)); - - assert_pending!(t.poll()); - assert_eq!(1, mock.remaining()); - - pool::run_one(); - assert_eq!(0, mock.remaining()); - - assert!(t.is_woken()); - assert_ready_err!(t.poll()); -} - -#[test] -fn partial_read_set_len_ok() { - let (mock, file) = sys::File::mock(); - mock.read(HELLO) - .seek_current_ok(-14, 0) - .set_len(123) - .read(FOO); - - let mut buf = [0; 32]; - let mut file = File::from_std(file); - - { - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - } - - pool::run_one(); - - { - let mut t = task::spawn(file.set_len(123)); - - assert_pending!(t.poll()); - pool::run_one(); - assert_ready_ok!(t.poll()); - } - - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - pool::run_one(); - let n = assert_ready_ok!(t.poll()); - - assert_eq!(n, FOO.len()); - assert_eq!(&buf[..n], FOO); -} diff --git a/third_party/rust/tokio-0.2.25/tests/fs_link.rs b/third_party/rust/tokio-0.2.25/tests/fs_link.rs deleted file mode 100644 index cbbe27efe424..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/fs_link.rs +++ /dev/null @@ -1,70 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::fs; - -use std::io::prelude::*; -use std::io::BufReader; -use tempfile::tempdir; - -#[tokio::test] -async fn test_hard_link() { - let dir = tempdir().unwrap(); - let src = dir.path().join("src.txt"); - let dst = dir.path().join("dst.txt"); - - { - let mut file = std::fs::File::create(&src).unwrap(); - file.write_all(b"hello").unwrap(); - } - - let dst_2 = dst.clone(); - - assert!(fs::hard_link(src, dst_2.clone()).await.is_ok()); - - let mut content = String::new(); - - { - let file = std::fs::File::open(dst).unwrap(); - let mut reader = BufReader::new(file); - reader.read_to_string(&mut content).unwrap(); - } - - assert!(content == "hello"); -} - -#[cfg(unix)] -#[tokio::test] -async fn test_symlink() { - let dir = tempdir().unwrap(); - let src = dir.path().join("src.txt"); - let dst = dir.path().join("dst.txt"); - - { - let mut file = std::fs::File::create(&src).unwrap(); - file.write_all(b"hello").unwrap(); - } - - let src_2 = src.clone(); - let dst_2 = dst.clone(); - - assert!(fs::os::unix::symlink(src_2.clone(), dst_2.clone()) - .await - .is_ok()); - - let mut content = String::new(); - - { - let file = std::fs::File::open(dst.clone()).unwrap(); - let mut reader = BufReader::new(file); - reader.read_to_string(&mut content).unwrap(); - } - - assert!(content == "hello"); - - let read = fs::read_link(dst.clone()).await.unwrap(); - assert!(read == src); - - let symlink_meta = fs::symlink_metadata(dst.clone()).await.unwrap(); - assert!(symlink_meta.file_type().is_symlink()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_async_read.rs b/third_party/rust/tokio-0.2.25/tests/io_async_read.rs deleted file mode 100644 index 20440bbde35c..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_async_read.rs +++ /dev/null @@ -1,148 +0,0 @@ -#![allow(clippy::transmute_ptr_to_ptr)] -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::AsyncRead; -use tokio_test::task; -use tokio_test::{assert_ready_err, assert_ready_ok}; - -use bytes::{BufMut, BytesMut}; -use std::io; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[test] -fn assert_obj_safe() { - fn _assert() {} - _assert::>(); -} - -#[test] -fn read_buf_success() { - struct Rd; - - impl AsyncRead for Rd { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - buf[0..11].copy_from_slice(b"hello world"); - Poll::Ready(Ok(11)) - } - } - - let mut buf = BytesMut::with_capacity(65); - - task::spawn(Rd).enter(|cx, rd| { - let n = assert_ready_ok!(rd.poll_read_buf(cx, &mut buf)); - - assert_eq!(11, n); - assert_eq!(buf[..], b"hello world"[..]); - }); -} - -#[test] -fn read_buf_error() { - struct Rd; - - impl AsyncRead for Rd { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - _buf: &mut [u8], - ) -> Poll> { - let err = io::ErrorKind::Other.into(); - Poll::Ready(Err(err)) - } - } - - let mut buf = BytesMut::with_capacity(65); - - task::spawn(Rd).enter(|cx, rd| { - let err = assert_ready_err!(rd.poll_read_buf(cx, &mut buf)); - assert_eq!(err.kind(), io::ErrorKind::Other); - }); -} - -#[test] -fn read_buf_no_capacity() { - struct Rd; - - impl AsyncRead for Rd { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - _buf: &mut [u8], - ) -> Poll> { - unimplemented!(); - } - } - - let mut buf = [0u8; 0]; - - task::spawn(Rd).enter(|cx, rd| { - let n = assert_ready_ok!(rd.poll_read_buf(cx, &mut &mut buf[..])); - assert_eq!(0, n); - }); -} - -#[test] -fn read_buf_no_uninitialized() { - struct Rd; - - impl AsyncRead for Rd { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - for b in buf { - assert_eq!(0, *b); - } - - Poll::Ready(Ok(0)) - } - } - - let mut buf = BytesMut::with_capacity(64); - - task::spawn(Rd).enter(|cx, rd| { - let n = assert_ready_ok!(rd.poll_read_buf(cx, &mut buf)); - assert_eq!(0, n); - }); -} - -#[test] -fn read_buf_uninitialized_ok() { - struct Rd; - - impl AsyncRead for Rd { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - assert_eq!(buf[0..11], b"hello world"[..]); - Poll::Ready(Ok(0)) - } - } - - // Can't create BytesMut w/ zero capacity, so fill it up - let mut buf = BytesMut::with_capacity(64); - - unsafe { - let b: &mut [u8] = std::mem::transmute(buf.bytes_mut()); - b[0..11].copy_from_slice(b"hello world"); - } - - task::spawn(Rd).enter(|cx, rd| { - let n = assert_ready_ok!(rd.poll_read_buf(cx, &mut buf)); - assert_eq!(0, n); - }); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_chain.rs b/third_party/rust/tokio-0.2.25/tests/io_chain.rs deleted file mode 100644 index e2d59411a112..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_chain.rs +++ /dev/null @@ -1,16 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::AsyncReadExt; -use tokio_test::assert_ok; - -#[tokio::test] -async fn chain() { - let mut buf = Vec::new(); - let rd1: &[u8] = b"hello "; - let rd2: &[u8] = b"world"; - - let mut rd = rd1.chain(rd2); - assert_ok!(rd.read_to_end(&mut buf).await); - assert_eq!(buf, b"hello world"); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_copy.rs b/third_party/rust/tokio-0.2.25/tests/io_copy.rs deleted file mode 100644 index c1c6df4eb34b..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_copy.rs +++ /dev/null @@ -1,36 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{self, AsyncRead}; -use tokio_test::assert_ok; - -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[tokio::test] -async fn copy() { - struct Rd(bool); - - impl AsyncRead for Rd { - fn poll_read( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - if self.0 { - buf[0..11].copy_from_slice(b"hello world"); - self.0 = false; - Poll::Ready(Ok(11)) - } else { - Poll::Ready(Ok(0)) - } - } - } - - let mut rd = Rd(true); - let mut wr = Vec::new(); - - let n = assert_ok!(io::copy(&mut rd, &mut wr).await); - assert_eq!(n, 11); - assert_eq!(wr, b"hello world"); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_driver.rs b/third_party/rust/tokio-0.2.25/tests/io_driver.rs deleted file mode 100644 index b85abd8c2acd..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_driver.rs +++ /dev/null @@ -1,88 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::net::TcpListener; -use tokio::runtime; -use tokio_test::{assert_ok, assert_pending}; - -use futures::task::{waker_ref, ArcWake}; -use std::future::Future; -use std::net::TcpStream; -use std::pin::Pin; -use std::sync::{mpsc, Arc, Mutex}; -use std::task::Context; - -struct Task { - future: Mutex>>, -} - -impl ArcWake for Task { - fn wake_by_ref(_: &Arc) { - // Do nothing... - } -} - -impl Task { - fn new(future: T) -> Task { - Task { - future: Mutex::new(Box::pin(future)), - } - } -} - -#[test] -fn test_drop_on_notify() { - // When the reactor receives a kernel notification, it notifies the - // task that holds the associated socket. If this notification results in - // the task being dropped, the socket will also be dropped. - // - // Previously, there was a deadlock scenario where the reactor, while - // notifying, held a lock and the task being dropped attempted to acquire - // that same lock in order to clean up state. - // - // To simulate this case, we create a fake executor that does nothing when - // the task is notified. This simulates an executor in the process of - // shutting down. Then, when the task handle is dropped, the task itself is - // dropped. - - let mut rt = runtime::Builder::new() - .basic_scheduler() - .enable_all() - .build() - .unwrap(); - - let (addr_tx, addr_rx) = mpsc::channel(); - - // Define a task that just drains the listener - let task = Arc::new(Task::new(async move { - // Create a listener - let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - - // Send the address - let addr = listener.local_addr().unwrap(); - addr_tx.send(addr).unwrap(); - - loop { - let _ = listener.accept().await; - } - })); - - { - rt.enter(|| { - let waker = waker_ref(&task); - let mut cx = Context::from_waker(&waker); - assert_pending!(task.future.lock().unwrap().as_mut().poll(&mut cx)); - }); - } - - // Get the address - let addr = addr_rx.recv().unwrap(); - - drop(task); - - // Establish a connection to the acceptor - let _s = TcpStream::connect(&addr).unwrap(); - - // Force the reactor to turn - rt.block_on(async {}); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_driver_drop.rs b/third_party/rust/tokio-0.2.25/tests/io_driver_drop.rs deleted file mode 100644 index 0a5ce62513b6..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_driver_drop.rs +++ /dev/null @@ -1,53 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::net::TcpListener; -use tokio::runtime; -use tokio_test::{assert_err, assert_pending, assert_ready, task}; - -#[test] -fn tcp_doesnt_block() { - let rt = rt(); - - let mut listener = rt.enter(|| { - let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); - TcpListener::from_std(listener).unwrap() - }); - - drop(rt); - - let mut task = task::spawn(async move { - assert_err!(listener.accept().await); - }); - - assert_ready!(task.poll()); -} - -#[test] -fn drop_wakes() { - let rt = rt(); - - let mut listener = rt.enter(|| { - let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); - TcpListener::from_std(listener).unwrap() - }); - - let mut task = task::spawn(async move { - assert_err!(listener.accept().await); - }); - - assert_pending!(task.poll()); - - drop(rt); - - assert!(task.is_woken()); - assert_ready!(task.poll()); -} - -fn rt() -> runtime::Runtime { - runtime::Builder::new() - .basic_scheduler() - .enable_all() - .build() - .unwrap() -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_lines.rs b/third_party/rust/tokio-0.2.25/tests/io_lines.rs deleted file mode 100644 index 2f6b3393b998..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_lines.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::AsyncBufReadExt; -use tokio_test::assert_ok; - -#[tokio::test] -async fn lines_inherent() { - let rd: &[u8] = b"hello\r\nworld\n\n"; - let mut st = rd.lines(); - - let b = assert_ok!(st.next_line().await).unwrap(); - assert_eq!(b, "hello"); - let b = assert_ok!(st.next_line().await).unwrap(); - assert_eq!(b, "world"); - let b = assert_ok!(st.next_line().await).unwrap(); - assert_eq!(b, ""); - assert!(assert_ok!(st.next_line().await).is_none()); -} - -#[tokio::test] -async fn lines_stream() { - use tokio::stream::StreamExt; - - let rd: &[u8] = b"hello\r\nworld\n\n"; - let mut st = rd.lines(); - - let b = assert_ok!(st.next().await.unwrap()); - assert_eq!(b, "hello"); - let b = assert_ok!(st.next().await.unwrap()); - assert_eq!(b, "world"); - let b = assert_ok!(st.next().await.unwrap()); - assert_eq!(b, ""); - assert!(st.next().await.is_none()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_mem_stream.rs b/third_party/rust/tokio-0.2.25/tests/io_mem_stream.rs deleted file mode 100644 index 3335214cb9dc..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_mem_stream.rs +++ /dev/null @@ -1,83 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt}; - -#[tokio::test] -async fn ping_pong() { - let (mut a, mut b) = duplex(32); - - let mut buf = [0u8; 4]; - - a.write_all(b"ping").await.unwrap(); - b.read_exact(&mut buf).await.unwrap(); - assert_eq!(&buf, b"ping"); - - b.write_all(b"pong").await.unwrap(); - a.read_exact(&mut buf).await.unwrap(); - assert_eq!(&buf, b"pong"); -} - -#[tokio::test] -async fn across_tasks() { - let (mut a, mut b) = duplex(32); - - let t1 = tokio::spawn(async move { - a.write_all(b"ping").await.unwrap(); - let mut buf = [0u8; 4]; - a.read_exact(&mut buf).await.unwrap(); - assert_eq!(&buf, b"pong"); - }); - - let t2 = tokio::spawn(async move { - let mut buf = [0u8; 4]; - b.read_exact(&mut buf).await.unwrap(); - assert_eq!(&buf, b"ping"); - b.write_all(b"pong").await.unwrap(); - }); - - t1.await.unwrap(); - t2.await.unwrap(); -} - -#[tokio::test] -async fn disconnect() { - let (mut a, mut b) = duplex(32); - - let t1 = tokio::spawn(async move { - a.write_all(b"ping").await.unwrap(); - // and dropped - }); - - let t2 = tokio::spawn(async move { - let mut buf = [0u8; 32]; - let n = b.read(&mut buf).await.unwrap(); - assert_eq!(&buf[..n], b"ping"); - - let n = b.read(&mut buf).await.unwrap(); - assert_eq!(n, 0); - }); - - t1.await.unwrap(); - t2.await.unwrap(); -} - -#[tokio::test] -async fn max_write_size() { - let (mut a, mut b) = duplex(32); - - let t1 = tokio::spawn(async move { - let n = a.write(&[0u8; 64]).await.unwrap(); - assert_eq!(n, 32); - let n = a.write(&[0u8; 64]).await.unwrap(); - assert_eq!(n, 4); - }); - - let t2 = tokio::spawn(async move { - let mut buf = [0u8; 4]; - b.read_exact(&mut buf).await.unwrap(); - }); - - t1.await.unwrap(); - t2.await.unwrap(); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_read.rs b/third_party/rust/tokio-0.2.25/tests/io_read.rs deleted file mode 100644 index 4791c9a66181..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_read.rs +++ /dev/null @@ -1,60 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{AsyncRead, AsyncReadExt}; -use tokio_test::assert_ok; - -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[tokio::test] -async fn read() { - #[derive(Default)] - struct Rd { - poll_cnt: usize, - } - - impl AsyncRead for Rd { - fn poll_read( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - assert_eq!(0, self.poll_cnt); - self.poll_cnt += 1; - - buf[0..11].copy_from_slice(b"hello world"); - Poll::Ready(Ok(11)) - } - } - - let mut buf = Box::new([0; 11]); - let mut rd = Rd::default(); - - let n = assert_ok!(rd.read(&mut buf[..]).await); - assert_eq!(n, 11); - assert_eq!(buf[..], b"hello world"[..]); -} - -struct BadAsyncRead; - -impl AsyncRead for BadAsyncRead { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - for b in &mut *buf { - *b = b'a'; - } - Poll::Ready(Ok(buf.len() * 2)) - } -} - -#[tokio::test] -#[should_panic] -async fn read_buf_bad_async_read() { - let mut buf = Vec::with_capacity(10); - BadAsyncRead.read_buf(&mut buf).await.unwrap(); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_read_exact.rs b/third_party/rust/tokio-0.2.25/tests/io_read_exact.rs deleted file mode 100644 index d0e659bd339c..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_read_exact.rs +++ /dev/null @@ -1,15 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::AsyncReadExt; -use tokio_test::assert_ok; - -#[tokio::test] -async fn read_exact() { - let mut buf = Box::new([0; 8]); - let mut rd: &[u8] = b"hello world"; - - let n = assert_ok!(rd.read_exact(&mut buf[..]).await); - assert_eq!(n, 8); - assert_eq!(buf[..], b"hello wo"[..]); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_read_line.rs b/third_party/rust/tokio-0.2.25/tests/io_read_line.rs deleted file mode 100644 index 15841c9b49d2..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_read_line.rs +++ /dev/null @@ -1,107 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use std::io::ErrorKind; -use tokio::io::{AsyncBufReadExt, BufReader, Error}; -use tokio_test::{assert_ok, io::Builder}; - -use std::io::Cursor; - -#[tokio::test] -async fn read_line() { - let mut buf = String::new(); - let mut rd = Cursor::new(b"hello\nworld\n\n"); - - let n = assert_ok!(rd.read_line(&mut buf).await); - assert_eq!(n, 6); - assert_eq!(buf, "hello\n"); - buf.clear(); - let n = assert_ok!(rd.read_line(&mut buf).await); - assert_eq!(n, 6); - assert_eq!(buf, "world\n"); - buf.clear(); - let n = assert_ok!(rd.read_line(&mut buf).await); - assert_eq!(n, 1); - assert_eq!(buf, "\n"); - buf.clear(); - let n = assert_ok!(rd.read_line(&mut buf).await); - assert_eq!(n, 0); - assert_eq!(buf, ""); -} - -#[tokio::test] -async fn read_line_not_all_ready() { - let mock = Builder::new() - .read(b"Hello Wor") - .read(b"ld\nFizzBuz") - .read(b"z\n1\n2") - .build(); - - let mut read = BufReader::new(mock); - - let mut line = "We say ".to_string(); - let bytes = read.read_line(&mut line).await.unwrap(); - assert_eq!(bytes, "Hello World\n".len()); - assert_eq!(line.as_str(), "We say Hello World\n"); - - line = "I solve ".to_string(); - let bytes = read.read_line(&mut line).await.unwrap(); - assert_eq!(bytes, "FizzBuzz\n".len()); - assert_eq!(line.as_str(), "I solve FizzBuzz\n"); - - line.clear(); - let bytes = read.read_line(&mut line).await.unwrap(); - assert_eq!(bytes, 2); - assert_eq!(line.as_str(), "1\n"); - - line.clear(); - let bytes = read.read_line(&mut line).await.unwrap(); - assert_eq!(bytes, 1); - assert_eq!(line.as_str(), "2"); -} - -#[tokio::test] -async fn read_line_invalid_utf8() { - let mock = Builder::new().read(b"Hello Wor\xffld.\n").build(); - - let mut read = BufReader::new(mock); - - let mut line = "Foo".to_string(); - let err = read.read_line(&mut line).await.expect_err("Should fail"); - assert_eq!(err.kind(), ErrorKind::InvalidData); - assert_eq!(err.to_string(), "stream did not contain valid UTF-8"); - assert_eq!(line.as_str(), "Foo"); -} - -#[tokio::test] -async fn read_line_fail() { - let mock = Builder::new() - .read(b"Hello Wor") - .read_error(Error::new(ErrorKind::Other, "The world has no end")) - .build(); - - let mut read = BufReader::new(mock); - - let mut line = "Foo".to_string(); - let err = read.read_line(&mut line).await.expect_err("Should fail"); - assert_eq!(err.kind(), ErrorKind::Other); - assert_eq!(err.to_string(), "The world has no end"); - assert_eq!(line.as_str(), "FooHello Wor"); -} - -#[tokio::test] -async fn read_line_fail_and_utf8_fail() { - let mock = Builder::new() - .read(b"Hello Wor") - .read(b"\xff\xff\xff") - .read_error(Error::new(ErrorKind::Other, "The world has no end")) - .build(); - - let mut read = BufReader::new(mock); - - let mut line = "Foo".to_string(); - let err = read.read_line(&mut line).await.expect_err("Should fail"); - assert_eq!(err.kind(), ErrorKind::Other); - assert_eq!(err.to_string(), "The world has no end"); - assert_eq!(line.as_str(), "Foo"); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_read_to_end.rs b/third_party/rust/tokio-0.2.25/tests/io_read_to_end.rs deleted file mode 100644 index ee636ba59636..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_read_to_end.rs +++ /dev/null @@ -1,15 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::AsyncReadExt; -use tokio_test::assert_ok; - -#[tokio::test] -async fn read_to_end() { - let mut buf = vec![]; - let mut rd: &[u8] = b"hello world"; - - let n = assert_ok!(rd.read_to_end(&mut buf).await); - assert_eq!(n, 11); - assert_eq!(buf[..], b"hello world"[..]); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_read_to_string.rs b/third_party/rust/tokio-0.2.25/tests/io_read_to_string.rs deleted file mode 100644 index f30c26caa88f..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_read_to_string.rs +++ /dev/null @@ -1,63 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use std::io; -use tokio::io::AsyncReadExt; -use tokio_test::assert_ok; -use tokio_test::io::Builder; - -#[tokio::test] -async fn read_to_string() { - let mut buf = String::new(); - let mut rd: &[u8] = b"hello world"; - - let n = assert_ok!(rd.read_to_string(&mut buf).await); - assert_eq!(n, 11); - assert_eq!(buf[..], "hello world"[..]); -} - -#[tokio::test] -async fn to_string_does_not_truncate_on_utf8_error() { - let data = vec![0xff, 0xff, 0xff]; - - let mut s = "abc".to_string(); - - match AsyncReadExt::read_to_string(&mut data.as_slice(), &mut s).await { - Ok(len) => panic!("Should fail: {} bytes.", len), - Err(err) if err.to_string() == "stream did not contain valid UTF-8" => {} - Err(err) => panic!("Fail: {}.", err), - } - - assert_eq!(s, "abc"); -} - -#[tokio::test] -async fn to_string_does_not_truncate_on_io_error() { - let mut mock = Builder::new() - .read(b"def") - .read_error(io::Error::new(io::ErrorKind::Other, "whoops")) - .build(); - let mut s = "abc".to_string(); - - match AsyncReadExt::read_to_string(&mut mock, &mut s).await { - Ok(len) => panic!("Should fail: {} bytes.", len), - Err(err) if err.to_string() == "whoops" => {} - Err(err) => panic!("Fail: {}.", err), - } - - assert_eq!(s, "abc"); -} - -#[tokio::test] -async fn to_string_appends() { - let data = b"def".to_vec(); - - let mut s = "abc".to_string(); - - let len = AsyncReadExt::read_to_string(&mut data.as_slice(), &mut s) - .await - .unwrap(); - - assert_eq!(len, 3); - assert_eq!(s, "abcdef"); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_read_until.rs b/third_party/rust/tokio-0.2.25/tests/io_read_until.rs deleted file mode 100644 index 61800a0d9c19..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_read_until.rs +++ /dev/null @@ -1,74 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use std::io::ErrorKind; -use tokio::io::{AsyncBufReadExt, BufReader, Error}; -use tokio_test::{assert_ok, io::Builder}; - -#[tokio::test] -async fn read_until() { - let mut buf = vec![]; - let mut rd: &[u8] = b"hello world"; - - let n = assert_ok!(rd.read_until(b' ', &mut buf).await); - assert_eq!(n, 6); - assert_eq!(buf, b"hello "); - buf.clear(); - let n = assert_ok!(rd.read_until(b' ', &mut buf).await); - assert_eq!(n, 5); - assert_eq!(buf, b"world"); - buf.clear(); - let n = assert_ok!(rd.read_until(b' ', &mut buf).await); - assert_eq!(n, 0); - assert_eq!(buf, []); -} - -#[tokio::test] -async fn read_until_not_all_ready() { - let mock = Builder::new() - .read(b"Hello Wor") - .read(b"ld#Fizz\xffBuz") - .read(b"z#1#2") - .build(); - - let mut read = BufReader::new(mock); - - let mut chunk = b"We say ".to_vec(); - let bytes = read.read_until(b'#', &mut chunk).await.unwrap(); - assert_eq!(bytes, b"Hello World#".len()); - assert_eq!(chunk, b"We say Hello World#"); - - chunk = b"I solve ".to_vec(); - let bytes = read.read_until(b'#', &mut chunk).await.unwrap(); - assert_eq!(bytes, b"Fizz\xffBuzz\n".len()); - assert_eq!(chunk, b"I solve Fizz\xffBuzz#"); - - chunk.clear(); - let bytes = read.read_until(b'#', &mut chunk).await.unwrap(); - assert_eq!(bytes, 2); - assert_eq!(chunk, b"1#"); - - chunk.clear(); - let bytes = read.read_until(b'#', &mut chunk).await.unwrap(); - assert_eq!(bytes, 1); - assert_eq!(chunk, b"2"); -} - -#[tokio::test] -async fn read_until_fail() { - let mock = Builder::new() - .read(b"Hello \xffWor") - .read_error(Error::new(ErrorKind::Other, "The world has no end")) - .build(); - - let mut read = BufReader::new(mock); - - let mut chunk = b"Foo".to_vec(); - let err = read - .read_until(b'#', &mut chunk) - .await - .expect_err("Should fail"); - assert_eq!(err.kind(), ErrorKind::Other); - assert_eq!(err.to_string(), "The world has no end"); - assert_eq!(chunk, b"FooHello \xffWor"); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_split.rs b/third_party/rust/tokio-0.2.25/tests/io_split.rs deleted file mode 100644 index e54bf248521c..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_split.rs +++ /dev/null @@ -1,78 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{split, AsyncRead, AsyncWrite, ReadHalf, WriteHalf}; - -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -struct RW; - -impl AsyncRead for RW { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - _buf: &mut [u8], - ) -> Poll> { - Poll::Ready(Ok(1)) - } -} - -impl AsyncWrite for RW { - fn poll_write( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - _buf: &[u8], - ) -> Poll> { - Poll::Ready(Ok(1)) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -#[test] -fn is_send_and_sync() { - fn assert_bound() {} - - assert_bound::>(); - assert_bound::>(); -} - -#[test] -fn split_stream_id() { - let (r1, w1) = split(RW); - let (r2, w2) = split(RW); - assert_eq!(r1.is_pair_of(&w1), true); - assert_eq!(r1.is_pair_of(&w2), false); - assert_eq!(r2.is_pair_of(&w2), true); - assert_eq!(r2.is_pair_of(&w1), false); -} - -#[test] -fn unsplit_ok() { - let (r, w) = split(RW); - r.unsplit(w); -} - -#[test] -#[should_panic] -fn unsplit_err1() { - let (r, _) = split(RW); - let (_, w) = split(RW); - r.unsplit(w); -} - -#[test] -#[should_panic] -fn unsplit_err2() { - let (_, w) = split(RW); - let (r, _) = split(RW); - r.unsplit(w); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_take.rs b/third_party/rust/tokio-0.2.25/tests/io_take.rs deleted file mode 100644 index 683606f72721..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_take.rs +++ /dev/null @@ -1,16 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::AsyncReadExt; -use tokio_test::assert_ok; - -#[tokio::test] -async fn take() { - let mut buf = [0; 6]; - let rd: &[u8] = b"hello world"; - - let mut rd = rd.take(4); - let n = assert_ok!(rd.read(&mut buf).await); - assert_eq!(n, 4); - assert_eq!(&buf, &b"hell\0\0"[..]); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_write.rs b/third_party/rust/tokio-0.2.25/tests/io_write.rs deleted file mode 100644 index 96cebc3313be..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_write.rs +++ /dev/null @@ -1,58 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{AsyncWrite, AsyncWriteExt}; -use tokio_test::assert_ok; - -use bytes::BytesMut; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[tokio::test] -async fn write() { - struct Wr { - buf: BytesMut, - cnt: usize, - } - - impl AsyncWrite for Wr { - fn poll_write( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - assert_eq!(self.cnt, 0); - self.buf.extend(&buf[0..4]); - Ok(4).into() - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Ok(()).into() - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Ok(()).into() - } - } - - let mut wr = Wr { - buf: BytesMut::with_capacity(64), - cnt: 0, - }; - - let n = assert_ok!(wr.write(b"hello world").await); - assert_eq!(n, 4); - assert_eq!(wr.buf, b"hell"[..]); -} - -#[tokio::test] -async fn write_cursor() { - use std::io::Cursor; - - let mut wr = Cursor::new(Vec::new()); - - let n = assert_ok!(wr.write(b"hello world").await); - assert_eq!(n, 11); - assert_eq!(wr.get_ref().as_slice(), &b"hello world"[..]); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_write_all.rs b/third_party/rust/tokio-0.2.25/tests/io_write_all.rs deleted file mode 100644 index 7ca02228a3c0..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_write_all.rs +++ /dev/null @@ -1,51 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{AsyncWrite, AsyncWriteExt}; -use tokio_test::assert_ok; - -use bytes::BytesMut; -use std::cmp; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[tokio::test] -async fn write_all() { - struct Wr { - buf: BytesMut, - cnt: usize, - } - - impl AsyncWrite for Wr { - fn poll_write( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let n = cmp::min(4, buf.len()); - let buf = &buf[0..n]; - - self.cnt += 1; - self.buf.extend(buf); - Ok(buf.len()).into() - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Ok(()).into() - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Ok(()).into() - } - } - - let mut wr = Wr { - buf: BytesMut::with_capacity(64), - cnt: 0, - }; - - assert_ok!(wr.write_all(b"hello world").await); - assert_eq!(wr.buf, b"hello world"[..]); - assert_eq!(wr.cnt, 3); -} diff --git a/third_party/rust/tokio-0.2.25/tests/io_write_int.rs b/third_party/rust/tokio-0.2.25/tests/io_write_int.rs deleted file mode 100644 index 48a583d8c3f3..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/io_write_int.rs +++ /dev/null @@ -1,37 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{AsyncWrite, AsyncWriteExt}; - -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[tokio::test] -async fn write_int_should_err_if_write_count_0() { - struct Wr {} - - impl AsyncWrite for Wr { - fn poll_write( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - _buf: &[u8], - ) -> Poll> { - Ok(0).into() - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Ok(()).into() - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Ok(()).into() - } - } - - let mut wr = Wr {}; - - // should be ok just to test these 2, other cases actually expanded by same macro. - assert!(wr.write_i8(0).await.is_err()); - assert!(wr.write_i32(12).await.is_err()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/macros_join.rs b/third_party/rust/tokio-0.2.25/tests/macros_join.rs deleted file mode 100644 index 169e898f97de..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/macros_join.rs +++ /dev/null @@ -1,72 +0,0 @@ -#![allow(clippy::blacklisted_name)] -use tokio::sync::oneshot; -use tokio_test::{assert_pending, assert_ready, task}; - -#[tokio::test] -async fn sync_one_lit_expr_comma() { - let foo = tokio::join!(async { 1 },); - - assert_eq!(foo, (1,)); -} - -#[tokio::test] -async fn sync_one_lit_expr_no_comma() { - let foo = tokio::join!(async { 1 }); - - assert_eq!(foo, (1,)); -} - -#[tokio::test] -async fn sync_two_lit_expr_comma() { - let foo = tokio::join!(async { 1 }, async { 2 },); - - assert_eq!(foo, (1, 2)); -} - -#[tokio::test] -async fn sync_two_lit_expr_no_comma() { - let foo = tokio::join!(async { 1 }, async { 2 }); - - assert_eq!(foo, (1, 2)); -} - -#[tokio::test] -async fn two_await() { - let (tx1, rx1) = oneshot::channel::<&str>(); - let (tx2, rx2) = oneshot::channel::(); - - let mut join = task::spawn(async { - tokio::join!(async { rx1.await.unwrap() }, async { rx2.await.unwrap() }) - }); - - assert_pending!(join.poll()); - - tx2.send(123).unwrap(); - assert!(join.is_woken()); - assert_pending!(join.poll()); - - tx1.send("hello").unwrap(); - assert!(join.is_woken()); - let res = assert_ready!(join.poll()); - - assert_eq!(("hello", 123), res); -} - -#[test] -fn join_size() { - use futures::future; - use std::mem; - - let fut = async { - let ready = future::ready(0i32); - tokio::join!(ready) - }; - assert_eq!(mem::size_of_val(&fut), 16); - - let fut = async { - let ready1 = future::ready(0i32); - let ready2 = future::ready(0i32); - tokio::join!(ready1, ready2) - }; - assert_eq!(mem::size_of_val(&fut), 28); -} diff --git a/third_party/rust/tokio-0.2.25/tests/macros_pin.rs b/third_party/rust/tokio-0.2.25/tests/macros_pin.rs deleted file mode 100644 index da6e0be6ed2d..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/macros_pin.rs +++ /dev/null @@ -1,13 +0,0 @@ -async fn one() {} -async fn two() {} - -#[tokio::test] -async fn multi_pin() { - tokio::pin! { - let f1 = one(); - let f2 = two(); - } - - (&mut f1).await; - (&mut f2).await; -} diff --git a/third_party/rust/tokio-0.2.25/tests/macros_select.rs b/third_party/rust/tokio-0.2.25/tests/macros_select.rs deleted file mode 100644 index f71fd5f0794d..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/macros_select.rs +++ /dev/null @@ -1,465 +0,0 @@ -#![allow(clippy::blacklisted_name, clippy::stable_sort_primitive)] - -use tokio::sync::{mpsc, oneshot}; -use tokio::task; -use tokio_test::{assert_ok, assert_pending, assert_ready}; - -use futures::future::poll_fn; -use std::task::Poll::Ready; - -#[tokio::test] -async fn sync_one_lit_expr_comma() { - let foo = tokio::select! { - foo = async { 1 } => foo, - }; - - assert_eq!(foo, 1); -} - -#[tokio::test] -async fn nested_one() { - let foo = tokio::select! { - foo = async { 1 } => tokio::select! { - bar = async { foo } => bar, - }, - }; - - assert_eq!(foo, 1); -} - -#[tokio::test] -async fn sync_one_lit_expr_no_comma() { - let foo = tokio::select! { - foo = async { 1 } => foo - }; - - assert_eq!(foo, 1); -} - -#[tokio::test] -async fn sync_one_lit_expr_block() { - let foo = tokio::select! { - foo = async { 1 } => { foo } - }; - - assert_eq!(foo, 1); -} - -#[tokio::test] -async fn sync_one_await() { - let foo = tokio::select! { - foo = one() => foo, - }; - - assert_eq!(foo, 1); -} - -#[tokio::test] -async fn sync_one_ident() { - let one = one(); - - let foo = tokio::select! { - foo = one => foo, - }; - - assert_eq!(foo, 1); -} - -#[tokio::test] -async fn sync_two() { - use std::cell::Cell; - - let cnt = Cell::new(0); - - let res = tokio::select! { - foo = async { - cnt.set(cnt.get() + 1); - 1 - } => foo, - bar = async { - cnt.set(cnt.get() + 1); - 2 - } => bar, - }; - - assert_eq!(1, cnt.get()); - assert!(res == 1 || res == 2); -} - -#[tokio::test] -async fn drop_in_fut() { - let s = "hello".to_string(); - - let res = tokio::select! { - foo = async { - let v = one().await; - drop(s); - v - } => foo - }; - - assert_eq!(res, 1); -} - -#[tokio::test] -async fn one_ready() { - let (tx1, rx1) = oneshot::channel::(); - let (_tx2, rx2) = oneshot::channel::(); - - tx1.send(1).unwrap(); - - let v = tokio::select! { - res = rx1 => { - assert_ok!(res) - }, - _ = rx2 => unreachable!(), - }; - - assert_eq!(1, v); -} - -#[tokio::test] -async fn select_streams() { - let (tx1, mut rx1) = mpsc::unbounded_channel::(); - let (tx2, mut rx2) = mpsc::unbounded_channel::(); - - tokio::spawn(async move { - assert_ok!(tx2.send(1)); - task::yield_now().await; - - assert_ok!(tx1.send(2)); - task::yield_now().await; - - assert_ok!(tx2.send(3)); - task::yield_now().await; - - drop((tx1, tx2)); - }); - - let mut rem = true; - let mut msgs = vec![]; - - while rem { - tokio::select! { - Some(x) = rx1.recv() => { - msgs.push(x); - } - Some(y) = rx2.recv() => { - msgs.push(y); - } - else => { - rem = false; - } - } - } - - msgs.sort(); - assert_eq!(&msgs[..], &[1, 2, 3]); -} - -#[tokio::test] -async fn move_uncompleted_futures() { - let (tx1, mut rx1) = oneshot::channel::(); - let (tx2, mut rx2) = oneshot::channel::(); - - tx1.send(1).unwrap(); - tx2.send(2).unwrap(); - - let ran; - - tokio::select! { - res = &mut rx1 => { - assert_eq!(1, assert_ok!(res)); - assert_eq!(2, assert_ok!(rx2.await)); - ran = true; - }, - res = &mut rx2 => { - assert_eq!(2, assert_ok!(res)); - assert_eq!(1, assert_ok!(rx1.await)); - ran = true; - }, - } - - assert!(ran); -} - -#[tokio::test] -async fn nested() { - let res = tokio::select! { - x = async { 1 } => { - tokio::select! { - y = async { 2 } => x + y, - } - } - }; - - assert_eq!(res, 3); -} - -#[tokio::test] -async fn struct_size() { - use futures::future; - use std::mem; - - let fut = async { - let ready = future::ready(0i32); - - tokio::select! { - _ = ready => {}, - } - }; - - assert!(mem::size_of_val(&fut) <= 32); - - let fut = async { - let ready1 = future::ready(0i32); - let ready2 = future::ready(0i32); - - tokio::select! { - _ = ready1 => {}, - _ = ready2 => {}, - } - }; - - assert!(mem::size_of_val(&fut) <= 40); - - let fut = async { - let ready1 = future::ready(0i32); - let ready2 = future::ready(0i32); - let ready3 = future::ready(0i32); - - tokio::select! { - _ = ready1 => {}, - _ = ready2 => {}, - _ = ready3 => {}, - } - }; - - assert!(mem::size_of_val(&fut) <= 48); -} - -#[tokio::test] -async fn mutable_borrowing_future_with_same_borrow_in_block() { - let mut value = 234; - - tokio::select! { - _ = require_mutable(&mut value) => { }, - _ = async_noop() => { - value += 5; - }, - } - - assert!(value >= 234); -} - -#[tokio::test] -async fn mutable_borrowing_future_with_same_borrow_in_block_and_else() { - let mut value = 234; - - tokio::select! { - _ = require_mutable(&mut value) => { }, - _ = async_noop() => { - value += 5; - }, - else => { - value += 27; - }, - } - - assert!(value >= 234); -} - -#[tokio::test] -async fn future_panics_after_poll() { - use tokio_test::task; - - let (tx, rx) = oneshot::channel(); - - let mut polled = false; - - let f = poll_fn(|_| { - assert!(!polled); - polled = true; - Ready(None::<()>) - }); - - let mut f = task::spawn(async { - tokio::select! { - Some(_) = f => unreachable!(), - ret = rx => ret.unwrap(), - } - }); - - assert_pending!(f.poll()); - assert_pending!(f.poll()); - - assert_ok!(tx.send(1)); - - let res = assert_ready!(f.poll()); - assert_eq!(1, res); -} - -#[tokio::test] -async fn disable_with_if() { - use tokio_test::task; - - let f = poll_fn(|_| panic!()); - let (tx, rx) = oneshot::channel(); - - let mut f = task::spawn(async { - tokio::select! { - _ = f, if false => unreachable!(), - _ = rx => (), - } - }); - - assert_pending!(f.poll()); - - assert_ok!(tx.send(())); - assert!(f.is_woken()); - - assert_ready!(f.poll()); -} - -#[tokio::test] -async fn join_with_select() { - use tokio_test::task; - - let (tx1, mut rx1) = oneshot::channel(); - let (tx2, mut rx2) = oneshot::channel(); - - let mut f = task::spawn(async { - let mut a = None; - let mut b = None; - - while a.is_none() || b.is_none() { - tokio::select! { - v1 = &mut rx1, if a.is_none() => a = Some(assert_ok!(v1)), - v2 = &mut rx2, if b.is_none() => b = Some(assert_ok!(v2)) - } - } - - (a.unwrap(), b.unwrap()) - }); - - assert_pending!(f.poll()); - - assert_ok!(tx1.send(123)); - assert!(f.is_woken()); - assert_pending!(f.poll()); - - assert_ok!(tx2.send(456)); - assert!(f.is_woken()); - let (a, b) = assert_ready!(f.poll()); - - assert_eq!(a, 123); - assert_eq!(b, 456); -} - -#[tokio::test] -async fn use_future_in_if_condition() { - use tokio::time::{self, Duration}; - - let mut delay = time::delay_for(Duration::from_millis(50)); - - tokio::select! { - _ = &mut delay, if !delay.is_elapsed() => { - } - _ = async { 1 } => { - } - } -} - -#[tokio::test] -async fn many_branches() { - let num = tokio::select! { - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - x = async { 1 } => x, - }; - - assert_eq!(1, num); -} - -#[tokio::test] -async fn never_branch_no_warnings() { - let t = tokio::select! { - _ = async_never() => 0, - one_async_ready = one() => one_async_ready, - }; - assert_eq!(t, 1); -} - -async fn one() -> usize { - 1 -} - -async fn require_mutable(_: &mut i32) {} -async fn async_noop() {} - -async fn async_never() -> ! { - use tokio::time::Duration; - loop { - tokio::time::delay_for(Duration::from_millis(10)).await; - } -} diff --git a/third_party/rust/tokio-0.2.25/tests/macros_test.rs b/third_party/rust/tokio-0.2.25/tests/macros_test.rs deleted file mode 100644 index 8e68b8a4417c..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/macros_test.rs +++ /dev/null @@ -1,19 +0,0 @@ -use tokio::test; - -#[test] -async fn test_macro_can_be_used_via_use() { - tokio::spawn(async { - assert_eq!(1 + 1, 2); - }) - .await - .unwrap(); -} - -#[tokio::test] -async fn test_macro_is_resilient_to_shadowing() { - tokio::spawn(async { - assert_eq!(1 + 1, 2); - }) - .await - .unwrap(); -} diff --git a/third_party/rust/tokio-0.2.25/tests/macros_try_join.rs b/third_party/rust/tokio-0.2.25/tests/macros_try_join.rs deleted file mode 100644 index a9251532664c..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/macros_try_join.rs +++ /dev/null @@ -1,101 +0,0 @@ -#![allow(clippy::blacklisted_name)] -use tokio::sync::oneshot; -use tokio_test::{assert_pending, assert_ready, task}; - -#[tokio::test] -async fn sync_one_lit_expr_comma() { - let foo = tokio::try_join!(async { ok(1) },); - - assert_eq!(foo, Ok((1,))); -} - -#[tokio::test] -async fn sync_one_lit_expr_no_comma() { - let foo = tokio::try_join!(async { ok(1) }); - - assert_eq!(foo, Ok((1,))); -} - -#[tokio::test] -async fn sync_two_lit_expr_comma() { - let foo = tokio::try_join!(async { ok(1) }, async { ok(2) },); - - assert_eq!(foo, Ok((1, 2))); -} - -#[tokio::test] -async fn sync_two_lit_expr_no_comma() { - let foo = tokio::try_join!(async { ok(1) }, async { ok(2) }); - - assert_eq!(foo, Ok((1, 2))); -} - -#[tokio::test] -async fn two_await() { - let (tx1, rx1) = oneshot::channel::<&str>(); - let (tx2, rx2) = oneshot::channel::(); - - let mut join = - task::spawn(async { tokio::try_join!(async { rx1.await }, async { rx2.await }) }); - - assert_pending!(join.poll()); - - tx2.send(123).unwrap(); - assert!(join.is_woken()); - assert_pending!(join.poll()); - - tx1.send("hello").unwrap(); - assert!(join.is_woken()); - let res: Result<(&str, u32), _> = assert_ready!(join.poll()); - - assert_eq!(Ok(("hello", 123)), res); -} - -#[tokio::test] -async fn err_abort_early() { - let (tx1, rx1) = oneshot::channel::<&str>(); - let (tx2, rx2) = oneshot::channel::(); - let (_tx3, rx3) = oneshot::channel::(); - - let mut join = task::spawn(async { - tokio::try_join!(async { rx1.await }, async { rx2.await }, async { - rx3.await - }) - }); - - assert_pending!(join.poll()); - - tx2.send(123).unwrap(); - assert!(join.is_woken()); - assert_pending!(join.poll()); - - drop(tx1); - assert!(join.is_woken()); - - let res = assert_ready!(join.poll()); - - assert!(res.is_err()); -} - -#[test] -fn join_size() { - use futures::future; - use std::mem; - - let fut = async { - let ready = future::ready(ok(0i32)); - tokio::try_join!(ready) - }; - assert_eq!(mem::size_of_val(&fut), 16); - - let fut = async { - let ready1 = future::ready(ok(0i32)); - let ready2 = future::ready(ok(0i32)); - tokio::try_join!(ready1, ready2) - }; - assert_eq!(mem::size_of_val(&fut), 28); -} - -fn ok(val: T) -> Result { - Ok(val) -} diff --git a/third_party/rust/tokio-0.2.25/tests/net_bind_resource.rs b/third_party/rust/tokio-0.2.25/tests/net_bind_resource.rs deleted file mode 100644 index d4a0b8dab08f..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/net_bind_resource.rs +++ /dev/null @@ -1,14 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::net::TcpListener; - -use std::convert::TryFrom; -use std::net; - -#[test] -#[should_panic] -fn no_runtime_panics_binding_net_tcp_listener() { - let listener = net::TcpListener::bind("127.0.0.1:0").expect("failed to bind listener"); - let _ = TcpListener::try_from(listener); -} diff --git a/third_party/rust/tokio-0.2.25/tests/net_lookup_host.rs b/third_party/rust/tokio-0.2.25/tests/net_lookup_host.rs deleted file mode 100644 index 4d06402988a2..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/net_lookup_host.rs +++ /dev/null @@ -1,36 +0,0 @@ -use tokio::net; -use tokio_test::assert_ok; - -use std::io; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - -#[tokio::test] -async fn lookup_socket_addr() { - let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap(); - - let actual = assert_ok!(net::lookup_host(addr).await).collect::>(); - assert_eq!(vec![addr], actual); -} - -#[tokio::test] -async fn lookup_str_socket_addr() { - let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap(); - - let actual = assert_ok!(net::lookup_host("127.0.0.1:8000").await).collect::>(); - assert_eq!(vec![addr], actual); -} - -#[tokio::test] -async fn resolve_dns() -> io::Result<()> { - let mut hosts = net::lookup_host("localhost:3000").await?; - let host = hosts.next().unwrap(); - - let expected = if host.is_ipv4() { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3000) - } else { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 3000) - }; - assert_eq!(host, expected); - - Ok(()) -} diff --git a/third_party/rust/tokio-0.2.25/tests/no_rt.rs b/third_party/rust/tokio-0.2.25/tests/no_rt.rs deleted file mode 100644 index 962eed7952d6..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/no_rt.rs +++ /dev/null @@ -1,27 +0,0 @@ -use tokio::net::TcpStream; -use tokio::sync::oneshot; -use tokio::time::{timeout, Duration}; - -use futures::executor::block_on; - -use std::net::TcpListener; - -#[test] -#[should_panic(expected = "no timer running")] -fn panics_when_no_timer() { - block_on(timeout_value()); -} - -#[test] -#[should_panic(expected = "no reactor running")] -fn panics_when_no_reactor() { - let srv = TcpListener::bind("127.0.0.1:0").unwrap(); - let addr = srv.local_addr().unwrap(); - block_on(TcpStream::connect(&addr)).unwrap(); -} - -async fn timeout_value() { - let (_tx, rx) = oneshot::channel::<()>(); - let dur = Duration::from_millis(20); - let _ = timeout(dur, rx).await; -} diff --git a/third_party/rust/tokio-0.2.25/tests/process_issue_2174.rs b/third_party/rust/tokio-0.2.25/tests/process_issue_2174.rs deleted file mode 100644 index b5a63ceee832..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/process_issue_2174.rs +++ /dev/null @@ -1,46 +0,0 @@ -#![cfg(feature = "process")] -#![warn(rust_2018_idioms)] -// This test reveals a difference in behavior of kqueue on FreeBSD. When the -// reader disconnects, there does not seem to be an `EVFILT_WRITE` filter that -// is returned. -// -// It is expected that `EVFILT_WRITE` would be returned with either the -// `EV_EOF` or `EV_ERROR` flag set. If either flag is set a write would be -// attempted, but that does not seem to occur. -#![cfg(all(unix, not(target_os = "freebsd")))] - -use std::process::Stdio; -use std::time::Duration; -use tokio::prelude::*; -use tokio::process::Command; -use tokio::time; -use tokio_test::assert_err; - -#[tokio::test] -async fn issue_2174() { - let mut child = Command::new("sleep") - .arg("2") - .stdin(Stdio::piped()) - .stdout(Stdio::null()) - .spawn() - .unwrap(); - let mut input = child.stdin.take().unwrap(); - - // Writes will buffer up to 65_636. This *should* loop at least 8 times - // and then register interest. - let handle = tokio::spawn(async move { - let data = [0u8; 8192]; - loop { - input.write_all(&data).await.unwrap(); - } - }); - - // Sleep enough time so that the child process's stdin's buffer fills. - time::delay_for(Duration::from_secs(1)).await; - - // Kill the child process. - child.kill().unwrap(); - let _ = child.await; - - assert_err!(handle.await); -} diff --git a/third_party/rust/tokio-0.2.25/tests/process_issue_42.rs b/third_party/rust/tokio-0.2.25/tests/process_issue_42.rs deleted file mode 100644 index aa70af3b56e0..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/process_issue_42.rs +++ /dev/null @@ -1,36 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -use futures::future::join_all; -use std::process::Stdio; -use tokio::process::Command; -use tokio::task; - -#[tokio::test] -async fn issue_42() { - // We spawn a many batches of processes which should exit at roughly the - // same time (modulo OS scheduling delays), to make sure that consuming - // a readiness event for one process doesn't inadvertently starve another. - // We then do this many times (in parallel) in an effort to stress test the - // implementation to ensure there are no race conditions. - // See alexcrichton/tokio-process#42 for background - let join_handles = (0..10usize).map(|_| { - task::spawn(async { - let processes = (0..10usize).map(|i| { - Command::new("echo") - .arg(format!("I am spawned process #{}", i)) - .stdin(Stdio::null()) - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .kill_on_drop(true) - .spawn() - .unwrap() - }); - - join_all(processes).await; - }) - }); - - join_all(join_handles).await; -} diff --git a/third_party/rust/tokio-0.2.25/tests/process_kill_on_drop.rs b/third_party/rust/tokio-0.2.25/tests/process_kill_on_drop.rs deleted file mode 100644 index f376c15475d1..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/process_kill_on_drop.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![cfg(all(unix, feature = "process"))] -#![warn(rust_2018_idioms)] - -use std::process::Stdio; -use std::time::Duration; -use tokio::io::AsyncReadExt; -use tokio::process::Command; -use tokio::time::delay_for; -use tokio_test::assert_ok; - -#[tokio::test] -async fn kill_on_drop() { - let mut cmd = Command::new("sh"); - cmd.args(&[ - "-c", - " - # Fork another child that won't get killed - sh -c 'sleep 1; echo child ran' & - disown -a - - # Await our death - sleep 5 - echo hello from beyond the grave - ", - ]); - - let mut child = cmd - .kill_on_drop(true) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - - delay_for(Duration::from_secs(2)).await; - - let mut out = child.stdout.take().unwrap(); - drop(child); - - let mut msg = String::new(); - assert_ok!(out.read_to_string(&mut msg).await); - - assert_eq!("child ran\n", msg); -} diff --git a/third_party/rust/tokio-0.2.25/tests/process_smoke.rs b/third_party/rust/tokio-0.2.25/tests/process_smoke.rs deleted file mode 100644 index d16d1d72c1b3..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/process_smoke.rs +++ /dev/null @@ -1,29 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::process::Command; -use tokio_test::assert_ok; - -#[tokio::test] -async fn simple() { - let mut cmd; - - if cfg!(windows) { - cmd = Command::new("cmd"); - cmd.arg("/c"); - } else { - cmd = Command::new("sh"); - cmd.arg("-c"); - } - - let mut child = cmd.arg("exit 2").spawn().unwrap(); - - let id = child.id(); - assert!(id > 0); - - let status = assert_ok!((&mut child).await); - assert_eq!(status.code(), Some(2)); - - assert_eq!(child.id(), id); - drop(child.kill()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/rt_basic.rs b/third_party/rust/tokio-0.2.25/tests/rt_basic.rs deleted file mode 100644 index 0885992d7d22..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/rt_basic.rs +++ /dev/null @@ -1,137 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::runtime::Runtime; -use tokio::sync::{mpsc, oneshot}; -use tokio_test::{assert_err, assert_ok}; - -use std::thread; -use std::time::Duration; - -#[test] -fn spawned_task_does_not_progress_without_block_on() { - let (tx, mut rx) = oneshot::channel(); - - let mut rt = rt(); - - rt.spawn(async move { - assert_ok!(tx.send("hello")); - }); - - thread::sleep(Duration::from_millis(50)); - - assert_err!(rx.try_recv()); - - let out = rt.block_on(async { assert_ok!(rx.await) }); - - assert_eq!(out, "hello"); -} - -#[test] -fn no_extra_poll() { - use pin_project_lite::pin_project; - use std::pin::Pin; - use std::sync::{ - atomic::{AtomicUsize, Ordering::SeqCst}, - Arc, - }; - use std::task::{Context, Poll}; - use tokio::stream::{Stream, StreamExt}; - - pin_project! { - struct TrackPolls { - npolls: Arc, - #[pin] - s: S, - } - } - - impl Stream for TrackPolls - where - S: Stream, - { - type Item = S::Item; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - this.npolls.fetch_add(1, SeqCst); - this.s.poll_next(cx) - } - } - - let (tx, rx) = mpsc::unbounded_channel(); - let mut rx = TrackPolls { - npolls: Arc::new(AtomicUsize::new(0)), - s: rx, - }; - let npolls = Arc::clone(&rx.npolls); - - let mut rt = rt(); - - rt.spawn(async move { while rx.next().await.is_some() {} }); - rt.block_on(async { - tokio::task::yield_now().await; - }); - - // should have been polled exactly once: the initial poll - assert_eq!(npolls.load(SeqCst), 1); - - tx.send(()).unwrap(); - rt.block_on(async { - tokio::task::yield_now().await; - }); - - // should have been polled twice more: once to yield Some(), then once to yield Pending - assert_eq!(npolls.load(SeqCst), 1 + 2); - - drop(tx); - rt.block_on(async { - tokio::task::yield_now().await; - }); - - // should have been polled once more: to yield None - assert_eq!(npolls.load(SeqCst), 1 + 2 + 1); -} - -#[test] -fn acquire_mutex_in_drop() { - use futures::future::pending; - use tokio::task; - - let (tx1, rx1) = oneshot::channel(); - let (tx2, rx2) = oneshot::channel(); - - let mut rt = rt(); - - rt.spawn(async move { - let _ = rx2.await; - unreachable!(); - }); - - rt.spawn(async move { - let _ = rx1.await; - tx2.send(()).unwrap(); - unreachable!(); - }); - - // Spawn a task that will never notify - rt.spawn(async move { - pending::<()>().await; - tx1.send(()).unwrap(); - }); - - // Tick the loop - rt.block_on(async { - task::yield_now().await; - }); - - // Drop the rt - drop(rt); -} - -fn rt() -> Runtime { - tokio::runtime::Builder::new() - .basic_scheduler() - .enable_all() - .build() - .unwrap() -} diff --git a/third_party/rust/tokio-0.2.25/tests/rt_common.rs b/third_party/rust/tokio-0.2.25/tests/rt_common.rs deleted file mode 100644 index c3a609894011..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/rt_common.rs +++ /dev/null @@ -1,1067 +0,0 @@ -#![allow(clippy::needless_range_loop, clippy::stable_sort_primitive)] -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -// Tests to run on both current-thread & thread-pool runtime variants. - -macro_rules! rt_test { - ($($t:tt)*) => { - mod basic_scheduler { - $($t)* - - fn rt() -> Runtime { - tokio::runtime::Builder::new() - .basic_scheduler() - .enable_all() - .build() - .unwrap() - } - } - - mod threaded_scheduler_4_threads { - $($t)* - - fn rt() -> Runtime { - tokio::runtime::Builder::new() - .threaded_scheduler() - .core_threads(4) - .enable_all() - .build() - .unwrap() - } - } - - mod threaded_scheduler_1_thread { - $($t)* - - fn rt() -> Runtime { - tokio::runtime::Builder::new() - .threaded_scheduler() - .core_threads(1) - .enable_all() - .build() - .unwrap() - } - } - } -} - -#[test] -fn send_sync_bound() { - use tokio::runtime::Runtime; - fn is_send() {} - - is_send::(); -} - -rt_test! { - use tokio::net::{TcpListener, TcpStream, UdpSocket}; - use tokio::prelude::*; - use tokio::runtime::Runtime; - use tokio::sync::oneshot; - use tokio::{task, time}; - use tokio_test::{assert_err, assert_ok}; - - use futures::future::poll_fn; - use std::future::Future; - use std::pin::Pin; - use std::sync::{mpsc, Arc}; - use std::task::{Context, Poll}; - use std::thread; - use std::time::{Duration, Instant}; - - #[test] - fn block_on_sync() { - let mut rt = rt(); - - let mut win = false; - rt.block_on(async { - win = true; - }); - - assert!(win); - } - - #[test] - fn block_on_handle_sync() { - let rt = rt(); - - let mut win = false; - rt.handle().block_on(async { - win = true; - }); - - assert!(win); - } - - #[test] - fn block_on_async() { - let mut rt = rt(); - - let out = rt.block_on(async { - let (tx, rx) = oneshot::channel(); - - thread::spawn(move || { - thread::sleep(Duration::from_millis(50)); - tx.send("ZOMG").unwrap(); - }); - - assert_ok!(rx.await) - }); - - assert_eq!(out, "ZOMG"); - } - - #[test] - fn block_on_handle_async() { - let rt = rt(); - - let out = rt.handle().block_on(async { - let (tx, rx) = oneshot::channel(); - - thread::spawn(move || { - thread::sleep(Duration::from_millis(50)); - tx.send("ZOMG").unwrap(); - }); - - assert_ok!(rx.await) - }); - - assert_eq!(out, "ZOMG"); - } - - #[test] - fn spawn_one_bg() { - let mut rt = rt(); - - let out = rt.block_on(async { - let (tx, rx) = oneshot::channel(); - - tokio::spawn(async move { - tx.send("ZOMG").unwrap(); - }); - - assert_ok!(rx.await) - }); - - assert_eq!(out, "ZOMG"); - } - - #[test] - fn spawn_one_join() { - let mut rt = rt(); - - let out = rt.block_on(async { - let (tx, rx) = oneshot::channel(); - - let handle = tokio::spawn(async move { - tx.send("ZOMG").unwrap(); - "DONE" - }); - - let msg = assert_ok!(rx.await); - - let out = assert_ok!(handle.await); - assert_eq!(out, "DONE"); - - msg - }); - - assert_eq!(out, "ZOMG"); - } - - #[test] - fn spawn_two() { - let mut rt = rt(); - - let out = rt.block_on(async { - let (tx1, rx1) = oneshot::channel(); - let (tx2, rx2) = oneshot::channel(); - - tokio::spawn(async move { - assert_ok!(tx1.send("ZOMG")); - }); - - tokio::spawn(async move { - let msg = assert_ok!(rx1.await); - assert_ok!(tx2.send(msg)); - }); - - assert_ok!(rx2.await) - }); - - assert_eq!(out, "ZOMG"); - } - - #[test] - fn spawn_many_from_block_on() { - use tokio::sync::mpsc; - - const ITER: usize = 200; - - let mut rt = rt(); - - let out = rt.block_on(async { - let (done_tx, mut done_rx) = mpsc::unbounded_channel(); - - let mut txs = (0..ITER) - .map(|i| { - let (tx, rx) = oneshot::channel(); - let done_tx = done_tx.clone(); - - tokio::spawn(async move { - let msg = assert_ok!(rx.await); - assert_eq!(i, msg); - assert_ok!(done_tx.send(msg)); - }); - - tx - }) - .collect::>(); - - drop(done_tx); - - thread::spawn(move || { - for (i, tx) in txs.drain(..).enumerate() { - assert_ok!(tx.send(i)); - } - }); - - let mut out = vec![]; - while let Some(i) = done_rx.recv().await { - out.push(i); - } - - out.sort(); - out - }); - - assert_eq!(ITER, out.len()); - - for i in 0..ITER { - assert_eq!(i, out[i]); - } - } - - #[test] - fn spawn_many_from_task() { - use tokio::sync::mpsc; - - const ITER: usize = 500; - - let mut rt = rt(); - - let out = rt.block_on(async { - tokio::spawn(async move { - let (done_tx, mut done_rx) = mpsc::unbounded_channel(); - - /* - for _ in 0..100 { - tokio::spawn(async move { }); - } - - tokio::task::yield_now().await; - */ - - let mut txs = (0..ITER) - .map(|i| { - let (tx, rx) = oneshot::channel(); - let done_tx = done_tx.clone(); - - tokio::spawn(async move { - let msg = assert_ok!(rx.await); - assert_eq!(i, msg); - assert_ok!(done_tx.send(msg)); - }); - - tx - }) - .collect::>(); - - drop(done_tx); - - thread::spawn(move || { - for (i, tx) in txs.drain(..).enumerate() { - assert_ok!(tx.send(i)); - } - }); - - let mut out = vec![]; - while let Some(i) = done_rx.recv().await { - out.push(i); - } - - out.sort(); - out - }).await.unwrap() - }); - - assert_eq!(ITER, out.len()); - - for i in 0..ITER { - assert_eq!(i, out[i]); - } - } - - #[test] - fn spawn_await_chain() { - let mut rt = rt(); - - let out = rt.block_on(async { - assert_ok!(tokio::spawn(async { - assert_ok!(tokio::spawn(async { - "hello" - }).await) - }).await) - }); - - assert_eq!(out, "hello"); - } - - #[test] - fn outstanding_tasks_dropped() { - let mut rt = rt(); - - let cnt = Arc::new(()); - - rt.block_on(async { - let cnt = cnt.clone(); - - tokio::spawn(poll_fn(move |_| { - assert_eq!(2, Arc::strong_count(&cnt)); - Poll::<()>::Pending - })); - }); - - assert_eq!(2, Arc::strong_count(&cnt)); - - drop(rt); - - assert_eq!(1, Arc::strong_count(&cnt)); - } - - #[test] - #[should_panic] - fn nested_rt() { - let mut rt1 = rt(); - let mut rt2 = rt(); - - rt1.block_on(async { rt2.block_on(async { "hello" }) }); - } - - #[test] - fn create_rt_in_block_on() { - let mut rt1 = rt(); - let mut rt2 = rt1.block_on(async { rt() }); - let out = rt2.block_on(async { "ZOMG" }); - - assert_eq!(out, "ZOMG"); - } - - #[test] - fn complete_block_on_under_load() { - let mut rt = rt(); - - rt.block_on(async { - let (tx, rx) = oneshot::channel(); - - // Spin hard - tokio::spawn(async { - loop { - yield_once().await; - } - }); - - thread::spawn(move || { - thread::sleep(Duration::from_millis(50)); - assert_ok!(tx.send(())); - }); - - assert_ok!(rx.await); - }); - } - - #[test] - fn complete_task_under_load() { - let mut rt = rt(); - - rt.block_on(async { - let (tx1, rx1) = oneshot::channel(); - let (tx2, rx2) = oneshot::channel(); - - // Spin hard - tokio::spawn(async { - loop { - yield_once().await; - } - }); - - thread::spawn(move || { - thread::sleep(Duration::from_millis(50)); - assert_ok!(tx1.send(())); - }); - - tokio::spawn(async move { - assert_ok!(rx1.await); - assert_ok!(tx2.send(())); - }); - - assert_ok!(rx2.await); - }); - } - - #[test] - fn spawn_from_other_thread_idle() { - let mut rt = rt(); - let handle = rt.handle().clone(); - - let (tx, rx) = oneshot::channel(); - - thread::spawn(move || { - thread::sleep(Duration::from_millis(50)); - - handle.spawn(async move { - assert_ok!(tx.send(())); - }); - }); - - rt.block_on(async move { - assert_ok!(rx.await); - }); - } - - #[test] - fn spawn_from_other_thread_under_load() { - let mut rt = rt(); - let handle = rt.handle().clone(); - - let (tx, rx) = oneshot::channel(); - - thread::spawn(move || { - handle.spawn(async move { - assert_ok!(tx.send(())); - }); - }); - - rt.block_on(async move { - // Spin hard - tokio::spawn(async { - loop { - yield_once().await; - } - }); - - assert_ok!(rx.await); - }); - } - - #[test] - fn delay_at_root() { - let mut rt = rt(); - - let now = Instant::now(); - let dur = Duration::from_millis(50); - - rt.block_on(async move { - time::delay_for(dur).await; - }); - - assert!(now.elapsed() >= dur); - } - - #[test] - fn delay_in_spawn() { - let mut rt = rt(); - - let now = Instant::now(); - let dur = Duration::from_millis(50); - - rt.block_on(async move { - let (tx, rx) = oneshot::channel(); - - tokio::spawn(async move { - time::delay_for(dur).await; - assert_ok!(tx.send(())); - }); - - assert_ok!(rx.await); - }); - - assert!(now.elapsed() >= dur); - } - - #[test] - fn block_on_socket() { - let mut rt = rt(); - - rt.block_on(async move { - let (tx, rx) = oneshot::channel(); - - let mut listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - let addr = listener.local_addr().unwrap(); - - tokio::spawn(async move { - let _ = listener.accept().await; - tx.send(()).unwrap(); - }); - - TcpStream::connect(&addr).await.unwrap(); - rx.await.unwrap(); - }); - } - - #[test] - fn spawn_from_blocking() { - let mut rt = rt(); - - let out = rt.block_on(async move { - let inner = assert_ok!(tokio::task::spawn_blocking(|| { - tokio::spawn(async move { "hello" }) - }).await); - - assert_ok!(inner.await) - }); - - assert_eq!(out, "hello") - } - - #[test] - fn spawn_blocking_from_blocking() { - let mut rt = rt(); - - let out = rt.block_on(async move { - let inner = assert_ok!(tokio::task::spawn_blocking(|| { - tokio::task::spawn_blocking(|| "hello") - }).await); - - assert_ok!(inner.await) - }); - - assert_eq!(out, "hello") - } - - #[test] - fn delay_from_blocking() { - let mut rt = rt(); - - rt.block_on(async move { - assert_ok!(tokio::task::spawn_blocking(|| { - let now = std::time::Instant::now(); - let dur = Duration::from_millis(1); - - // use the futures' block_on fn to make sure we aren't setting - // any Tokio context - futures::executor::block_on(async { - tokio::time::delay_for(dur).await; - }); - - assert!(now.elapsed() >= dur); - }).await); - }); - } - - #[test] - fn socket_from_blocking() { - let mut rt = rt(); - - rt.block_on(async move { - let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(listener.local_addr()); - - let peer = tokio::task::spawn_blocking(move || { - // use the futures' block_on fn to make sure we aren't setting - // any Tokio context - futures::executor::block_on(async { - assert_ok!(TcpStream::connect(addr).await); - }); - }); - - // Wait for the client to connect - let _ = assert_ok!(listener.accept().await); - - assert_ok!(peer.await); - }); - } - - #[test] - fn spawn_blocking_after_shutdown() { - let rt = rt(); - let handle = rt.handle().clone(); - - // Shutdown - drop(rt); - - handle.enter(|| { - let res = task::spawn_blocking(|| unreachable!()); - - // Avoid using a tokio runtime - let out = futures::executor::block_on(res); - assert!(out.is_err()); - }); - } - - #[test] - // IOCP requires setting the "max thread" concurrency value. The sane, - // default, is to set this to the number of cores. Threads that poll I/O - // become associated with the IOCP handle. Once those threads sleep for any - // reason (mutex), they yield their ownership. - // - // This test hits an edge case on windows where more threads than cores are - // created, none of those threads ever yield due to being at capacity, so - // IOCP gets "starved". - // - // For now, this is a very edge case that is probably not a real production - // concern. There also isn't a great/obvious solution to take. For now, the - // test is disabled. - #[cfg(not(windows))] - fn io_driver_called_when_under_load() { - let mut rt = rt(); - - // Create a lot of constant load. The scheduler will always be busy. - for _ in 0..100 { - rt.spawn(async { - loop { - tokio::task::yield_now().await; - } - }); - } - - // Do some I/O work - rt.block_on(async { - let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(listener.local_addr()); - - let srv = tokio::spawn(async move { - let (mut stream, _) = assert_ok!(listener.accept().await); - assert_ok!(stream.write_all(b"hello world").await); - }); - - let cli = tokio::spawn(async move { - let mut stream = assert_ok!(TcpStream::connect(addr).await); - let mut dst = vec![0; 11]; - - assert_ok!(stream.read_exact(&mut dst).await); - assert_eq!(dst, b"hello world"); - }); - - assert_ok!(srv.await); - assert_ok!(cli.await); - }); - } - - #[test] - fn client_server_block_on() { - let mut rt = rt(); - let (tx, rx) = mpsc::channel(); - - rt.block_on(async move { client_server(tx).await }); - - assert_ok!(rx.try_recv()); - assert_err!(rx.try_recv()); - } - - #[test] - fn panic_in_task() { - let mut rt = rt(); - let (tx, rx) = oneshot::channel(); - - struct Boom(Option>); - - impl Future for Boom { - type Output = (); - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { - panic!(); - } - } - - impl Drop for Boom { - fn drop(&mut self) { - assert!(std::thread::panicking()); - self.0.take().unwrap().send(()).unwrap(); - } - } - - rt.spawn(Boom(Some(tx))); - assert_ok!(rt.block_on(rx)); - } - - #[test] - #[should_panic] - fn panic_in_block_on() { - let mut rt = rt(); - rt.block_on(async { panic!() }); - } - - async fn yield_once() { - let mut yielded = false; - poll_fn(|cx| { - if yielded { - Poll::Ready(()) - } else { - yielded = true; - cx.waker().wake_by_ref(); - Poll::Pending - } - }) - .await - } - - #[test] - fn enter_and_spawn() { - let mut rt = rt(); - let handle = rt.enter(|| { - tokio::spawn(async {}) - }); - - assert_ok!(rt.block_on(handle)); - } - - #[test] - fn eagerly_drops_futures_on_shutdown() { - use std::sync::mpsc; - - struct Never { - drop_tx: mpsc::Sender<()>, - } - - impl Future for Never { - type Output = (); - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { - Poll::Pending - } - } - - impl Drop for Never { - fn drop(&mut self) { - self.drop_tx.send(()).unwrap(); - } - } - - let mut rt = rt(); - - let (drop_tx, drop_rx) = mpsc::channel(); - let (run_tx, run_rx) = oneshot::channel(); - - rt.block_on(async move { - tokio::spawn(async move { - assert_ok!(run_tx.send(())); - - Never { drop_tx }.await - }); - - assert_ok!(run_rx.await); - }); - - drop(rt); - - assert_ok!(drop_rx.recv()); - } - - #[test] - fn wake_while_rt_is_dropping() { - use tokio::task; - - struct OnDrop(F); - - impl Drop for OnDrop { - fn drop(&mut self) { - (self.0)() - } - } - - let (tx1, rx1) = oneshot::channel(); - let (tx2, rx2) = oneshot::channel(); - let (tx3, rx3) = oneshot::channel(); - - let mut rt = rt(); - - let h1 = rt.handle().clone(); - - rt.handle().spawn(async move { - // Ensure a waker gets stored in oneshot 1. - let _ = rx1.await; - tx3.send(()).unwrap(); - }); - - rt.handle().spawn(async move { - // When this task is dropped, we'll be "closing remotes". - // We spawn a new task that owns the `tx1`, to move its Drop - // out of here. - // - // Importantly, the oneshot 1 has a waker already stored, so - // the eventual drop here will try to re-schedule again. - let mut opt_tx1 = Some(tx1); - let _d = OnDrop(move || { - let tx1 = opt_tx1.take().unwrap(); - h1.spawn(async move { - tx1.send(()).unwrap(); - }); - }); - let _ = rx2.await; - }); - - rt.handle().spawn(async move { - let _ = rx3.await; - // We'll never get here, but once task 3 drops, this will - // force task 2 to re-schedule since it's waiting on oneshot 2. - tx2.send(()).unwrap(); - }); - - // Tick the loop - rt.block_on(async { - task::yield_now().await; - }); - - // Drop the rt - drop(rt); - } - - #[test] - fn io_notify_while_shutting_down() { - use std::net::Ipv6Addr; - - for _ in 1..10 { - let mut runtime = rt(); - - runtime.block_on(async { - let socket = UdpSocket::bind((Ipv6Addr::LOCALHOST, 0)).await.unwrap(); - let addr = socket.local_addr().unwrap(); - let (mut recv_half, mut send_half) = socket.split(); - - tokio::spawn(async move { - let mut buf = [0]; - loop { - recv_half.recv_from(&mut buf).await.unwrap(); - std::thread::sleep(Duration::from_millis(2)); - } - }); - - tokio::spawn(async move { - let buf = [0]; - loop { - send_half.send_to(&buf, &addr).await.unwrap(); - tokio::time::delay_for(Duration::from_millis(1)).await; - } - }); - - tokio::time::delay_for(Duration::from_millis(5)).await; - }); - } - } - - #[test] - fn shutdown_timeout() { - let (tx, rx) = oneshot::channel(); - let mut runtime = rt(); - - runtime.block_on(async move { - task::spawn_blocking(move || { - tx.send(()).unwrap(); - thread::sleep(Duration::from_secs(10_000)); - }); - - rx.await.unwrap(); - }); - - runtime.shutdown_timeout(Duration::from_millis(100)); - } - - #[test] - fn shutdown_wakeup_time() { - let mut runtime = rt(); - - runtime.block_on(async move { - tokio::time::delay_for(std::time::Duration::from_millis(100)).await; - }); - - runtime.shutdown_timeout(Duration::from_secs(10_000)); - } - - // This test is currently ignored on Windows because of a - // rust-lang issue in thread local storage destructors. - // See https://github.com/rust-lang/rust/issues/74875 - #[test] - #[cfg(not(windows))] - fn runtime_in_thread_local() { - use std::cell::RefCell; - use std::thread; - - thread_local!( - static R: RefCell> = RefCell::new(None); - ); - - thread::spawn(|| { - R.with(|cell| { - *cell.borrow_mut() = Some(rt()); - }); - - let _rt = rt(); - }).join().unwrap(); - } - - async fn client_server(tx: mpsc::Sender<()>) { - let mut server = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - - // Get the assigned address - let addr = assert_ok!(server.local_addr()); - - // Spawn the server - tokio::spawn(async move { - // Accept a socket - let (mut socket, _) = server.accept().await.unwrap(); - - // Write some data - socket.write_all(b"hello").await.unwrap(); - }); - - let mut client = TcpStream::connect(&addr).await.unwrap(); - - let mut buf = vec![]; - client.read_to_end(&mut buf).await.unwrap(); - - assert_eq!(buf, b"hello"); - tx.send(()).unwrap(); - } - - #[test] - fn local_set_block_on_socket() { - let mut rt = rt(); - let local = task::LocalSet::new(); - - local.block_on(&mut rt, async move { - let (tx, rx) = oneshot::channel(); - - let mut listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - let addr = listener.local_addr().unwrap(); - - task::spawn_local(async move { - let _ = listener.accept().await; - tx.send(()).unwrap(); - }); - - TcpStream::connect(&addr).await.unwrap(); - rx.await.unwrap(); - }); - } - - #[test] - fn local_set_client_server_block_on() { - let mut rt = rt(); - let (tx, rx) = mpsc::channel(); - - let local = task::LocalSet::new(); - - local.block_on(&mut rt, async move { client_server_local(tx).await }); - - assert_ok!(rx.try_recv()); - assert_err!(rx.try_recv()); - } - - async fn client_server_local(tx: mpsc::Sender<()>) { - let mut server = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - - // Get the assigned address - let addr = assert_ok!(server.local_addr()); - - // Spawn the server - task::spawn_local(async move { - // Accept a socket - let (mut socket, _) = server.accept().await.unwrap(); - - // Write some data - socket.write_all(b"hello").await.unwrap(); - }); - - let mut client = TcpStream::connect(&addr).await.unwrap(); - - let mut buf = vec![]; - client.read_to_end(&mut buf).await.unwrap(); - - assert_eq!(buf, b"hello"); - tx.send(()).unwrap(); - } - - #[test] - fn coop() { - use std::task::Poll::Ready; - - let mut rt = rt(); - - rt.block_on(async { - // Create a bunch of tasks - let mut tasks = (0..1_000).map(|_| { - tokio::spawn(async { }) - }).collect::>(); - - // Hope that all the tasks complete... - time::delay_for(Duration::from_millis(100)).await; - - poll_fn(|cx| { - // At least one task should not be ready - for task in &mut tasks { - if Pin::new(task).poll(cx).is_pending() { - return Ready(()); - } - } - - panic!("did not yield"); - }).await; - }); - } - - // Tests that the "next task" scheduler optimization is not able to starve - // other tasks. - #[test] - fn ping_pong_saturation() { - use tokio::sync::mpsc; - - const NUM: usize = 100; - - let mut rt = rt(); - - rt.block_on(async { - let (spawned_tx, mut spawned_rx) = mpsc::unbounded_channel(); - - // Spawn a bunch of tasks that ping ping between each other to - // saturate the runtime. - for _ in 0..NUM { - let (tx1, mut rx1) = mpsc::unbounded_channel(); - let (tx2, mut rx2) = mpsc::unbounded_channel(); - let spawned_tx = spawned_tx.clone(); - - task::spawn(async move { - spawned_tx.send(()).unwrap(); - - tx1.send(()).unwrap(); - - loop { - rx2.recv().await.unwrap(); - tx1.send(()).unwrap(); - } - }); - - task::spawn(async move { - loop { - rx1.recv().await.unwrap(); - tx2.send(()).unwrap(); - } - }); - } - - for _ in 0..NUM { - spawned_rx.recv().await.unwrap(); - } - - // spawn another task and wait for it to complete - let handle = task::spawn(async { - for _ in 0..5 { - // Yielding forces it back into the local queue. - task::yield_now().await; - } - }); - handle.await.unwrap(); - }); - } -} diff --git a/third_party/rust/tokio-0.2.25/tests/rt_threaded.rs b/third_party/rust/tokio-0.2.25/tests/rt_threaded.rs deleted file mode 100644 index b5ec96dec351..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/rt_threaded.rs +++ /dev/null @@ -1,396 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{TcpListener, TcpStream}; -use tokio::runtime::{self, Runtime}; -use tokio::sync::oneshot; -use tokio_test::{assert_err, assert_ok}; - -use futures::future::poll_fn; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::Relaxed; -use std::sync::{mpsc, Arc}; -use std::task::{Context, Poll}; - -#[test] -fn single_thread() { - // No panic when starting a runtime w/ a single thread - let _ = runtime::Builder::new() - .threaded_scheduler() - .enable_all() - .core_threads(1) - .build(); -} - -#[test] -fn many_oneshot_futures() { - // used for notifying the main thread - const NUM: usize = 1_000; - - for _ in 0..5 { - let (tx, rx) = mpsc::channel(); - - let rt = rt(); - let cnt = Arc::new(AtomicUsize::new(0)); - - for _ in 0..NUM { - let cnt = cnt.clone(); - let tx = tx.clone(); - - rt.spawn(async move { - let num = cnt.fetch_add(1, Relaxed) + 1; - - if num == NUM { - tx.send(()).unwrap(); - } - }); - } - - rx.recv().unwrap(); - - // Wait for the pool to shutdown - drop(rt); - } -} -#[test] -fn many_multishot_futures() { - use tokio::sync::mpsc; - - const CHAIN: usize = 200; - const CYCLES: usize = 5; - const TRACKS: usize = 50; - - for _ in 0..50 { - let mut rt = rt(); - let mut start_txs = Vec::with_capacity(TRACKS); - let mut final_rxs = Vec::with_capacity(TRACKS); - - for _ in 0..TRACKS { - let (start_tx, mut chain_rx) = mpsc::channel(10); - - for _ in 0..CHAIN { - let (mut next_tx, next_rx) = mpsc::channel(10); - - // Forward all the messages - rt.spawn(async move { - while let Some(v) = chain_rx.recv().await { - next_tx.send(v).await.unwrap(); - } - }); - - chain_rx = next_rx; - } - - // This final task cycles if needed - let (mut final_tx, final_rx) = mpsc::channel(10); - let mut cycle_tx = start_tx.clone(); - let mut rem = CYCLES; - - rt.spawn(async move { - for _ in 0..CYCLES { - let msg = chain_rx.recv().await.unwrap(); - - rem -= 1; - - if rem == 0 { - final_tx.send(msg).await.unwrap(); - } else { - cycle_tx.send(msg).await.unwrap(); - } - } - }); - - start_txs.push(start_tx); - final_rxs.push(final_rx); - } - - { - rt.block_on(async move { - for mut start_tx in start_txs { - start_tx.send("ping").await.unwrap(); - } - - for mut final_rx in final_rxs { - final_rx.recv().await.unwrap(); - } - }); - } - } -} - -#[test] -fn spawn_shutdown() { - let mut rt = rt(); - let (tx, rx) = mpsc::channel(); - - rt.block_on(async { - tokio::spawn(client_server(tx.clone())); - }); - - // Use spawner - rt.spawn(client_server(tx)); - - assert_ok!(rx.recv()); - assert_ok!(rx.recv()); - - drop(rt); - assert_err!(rx.try_recv()); -} - -async fn client_server(tx: mpsc::Sender<()>) { - let mut server = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - - // Get the assigned address - let addr = assert_ok!(server.local_addr()); - - // Spawn the server - tokio::spawn(async move { - // Accept a socket - let (mut socket, _) = server.accept().await.unwrap(); - - // Write some data - socket.write_all(b"hello").await.unwrap(); - }); - - let mut client = TcpStream::connect(&addr).await.unwrap(); - - let mut buf = vec![]; - client.read_to_end(&mut buf).await.unwrap(); - - assert_eq!(buf, b"hello"); - tx.send(()).unwrap(); -} - -#[test] -fn drop_threadpool_drops_futures() { - for _ in 0..1_000 { - let num_inc = Arc::new(AtomicUsize::new(0)); - let num_dec = Arc::new(AtomicUsize::new(0)); - let num_drop = Arc::new(AtomicUsize::new(0)); - - struct Never(Arc); - - impl Future for Never { - type Output = (); - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { - Poll::Pending - } - } - - impl Drop for Never { - fn drop(&mut self) { - self.0.fetch_add(1, Relaxed); - } - } - - let a = num_inc.clone(); - let b = num_dec.clone(); - - let rt = runtime::Builder::new() - .threaded_scheduler() - .enable_all() - .on_thread_start(move || { - a.fetch_add(1, Relaxed); - }) - .on_thread_stop(move || { - b.fetch_add(1, Relaxed); - }) - .build() - .unwrap(); - - rt.spawn(Never(num_drop.clone())); - - // Wait for the pool to shutdown - drop(rt); - - // Assert that only a single thread was spawned. - let a = num_inc.load(Relaxed); - assert!(a >= 1); - - // Assert that all threads shutdown - let b = num_dec.load(Relaxed); - assert_eq!(a, b); - - // Assert that the future was dropped - let c = num_drop.load(Relaxed); - assert_eq!(c, 1); - } -} - -#[test] -fn start_stop_callbacks_called() { - use std::sync::atomic::{AtomicUsize, Ordering}; - - let after_start = Arc::new(AtomicUsize::new(0)); - let before_stop = Arc::new(AtomicUsize::new(0)); - - let after_inner = after_start.clone(); - let before_inner = before_stop.clone(); - let mut rt = tokio::runtime::Builder::new() - .threaded_scheduler() - .enable_all() - .on_thread_start(move || { - after_inner.clone().fetch_add(1, Ordering::Relaxed); - }) - .on_thread_stop(move || { - before_inner.clone().fetch_add(1, Ordering::Relaxed); - }) - .build() - .unwrap(); - - let (tx, rx) = oneshot::channel(); - - rt.spawn(async move { - assert_ok!(tx.send(())); - }); - - assert_ok!(rt.block_on(rx)); - - drop(rt); - - assert!(after_start.load(Ordering::Relaxed) > 0); - assert!(before_stop.load(Ordering::Relaxed) > 0); -} - -#[test] -fn blocking() { - // used for notifying the main thread - const NUM: usize = 1_000; - - for _ in 0..10 { - let (tx, rx) = mpsc::channel(); - - let rt = rt(); - let cnt = Arc::new(AtomicUsize::new(0)); - - // there are four workers in the pool - // so, if we run 4 blocking tasks, we know that handoff must have happened - let block = Arc::new(std::sync::Barrier::new(5)); - for _ in 0..4 { - let block = block.clone(); - rt.spawn(async move { - tokio::task::block_in_place(move || { - block.wait(); - block.wait(); - }) - }); - } - block.wait(); - - for _ in 0..NUM { - let cnt = cnt.clone(); - let tx = tx.clone(); - - rt.spawn(async move { - let num = cnt.fetch_add(1, Relaxed) + 1; - - if num == NUM { - tx.send(()).unwrap(); - } - }); - } - - rx.recv().unwrap(); - - // Wait for the pool to shutdown - block.wait(); - } -} - -#[test] -fn multi_threadpool() { - use tokio::sync::oneshot; - - let rt1 = rt(); - let rt2 = rt(); - - let (tx, rx) = oneshot::channel(); - let (done_tx, done_rx) = mpsc::channel(); - - rt2.spawn(async move { - rx.await.unwrap(); - done_tx.send(()).unwrap(); - }); - - rt1.spawn(async move { - tx.send(()).unwrap(); - }); - - done_rx.recv().unwrap(); -} - -// When `block_in_place` returns, it attempts to reclaim the yielded runtime -// worker. In this case, the remainder of the task is on the runtime worker and -// must take part in the cooperative task budgeting system. -// -// The test ensures that, when this happens, attempting to consume from a -// channel yields occasionally even if there are values ready to receive. -#[test] -fn coop_and_block_in_place() { - use tokio::sync::mpsc; - - let mut rt = tokio::runtime::Builder::new() - .threaded_scheduler() - // Setting max threads to 1 prevents another thread from claiming the - // runtime worker yielded as part of `block_in_place` and guarantees the - // same thread will reclaim the worker at the end of the - // `block_in_place` call. - .max_threads(1) - .build() - .unwrap(); - - rt.block_on(async move { - let (mut tx, mut rx) = mpsc::channel(1024); - - // Fill the channel - for _ in 0..1024 { - tx.send(()).await.unwrap(); - } - - drop(tx); - - tokio::spawn(async move { - // Block in place without doing anything - tokio::task::block_in_place(|| {}); - - // Receive all the values, this should trigger a `Pending` as the - // coop limit will be reached. - poll_fn(|cx| { - while let Poll::Ready(v) = { - tokio::pin! { - let fut = rx.recv(); - } - - Pin::new(&mut fut).poll(cx) - } { - if v.is_none() { - panic!("did not yield"); - } - } - - Poll::Ready(()) - }) - .await - }) - .await - .unwrap(); - }); -} - -// Testing this does not panic -#[test] -fn max_threads() { - let _rt = tokio::runtime::Builder::new() - .threaded_scheduler() - .max_threads(1) - .build() - .unwrap(); -} - -fn rt() -> Runtime { - Runtime::new().unwrap() -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_ctrl_c.rs b/third_party/rust/tokio-0.2.25/tests/signal_ctrl_c.rs deleted file mode 100644 index 4b057ee7e140..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_ctrl_c.rs +++ /dev/null @@ -1,30 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -mod support { - pub mod signal; -} -use support::signal::send_signal; - -use tokio::signal; -use tokio::sync::oneshot; -use tokio_test::assert_ok; - -#[tokio::test] -async fn ctrl_c() { - let ctrl_c = signal::ctrl_c(); - - let (fire, wait) = oneshot::channel(); - - // NB: simulate a signal coming in by exercising our signal handler - // to avoid complications with sending SIGINT to the test process - tokio::spawn(async { - wait.await.expect("wait failed"); - send_signal(libc::SIGINT); - }); - - let _ = fire.send(()); - - assert_ok!(ctrl_c.await); -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_drop_recv.rs b/third_party/rust/tokio-0.2.25/tests/signal_drop_recv.rs deleted file mode 100644 index b0d9213e6185..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_drop_recv.rs +++ /dev/null @@ -1,22 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -mod support { - pub mod signal; -} -use support::signal::send_signal; - -use tokio::signal::unix::{signal, SignalKind}; - -#[tokio::test] -async fn drop_then_get_a_signal() { - let kind = SignalKind::user_defined1(); - let sig = signal(kind).expect("failed to create first signal"); - drop(sig); - - send_signal(libc::SIGUSR1); - let mut sig = signal(kind).expect("failed to create second signal"); - - let _ = sig.recv().await; -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_drop_rt.rs b/third_party/rust/tokio-0.2.25/tests/signal_drop_rt.rs deleted file mode 100644 index aeedd96e4e67..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_drop_rt.rs +++ /dev/null @@ -1,45 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -mod support { - pub mod signal; -} -use support::signal::send_signal; - -use tokio::runtime::Runtime; -use tokio::signal::unix::{signal, SignalKind}; - -#[test] -fn dropping_loops_does_not_cause_starvation() { - let kind = SignalKind::user_defined1(); - - let mut first_rt = rt(); - let mut first_signal = - first_rt.block_on(async { signal(kind).expect("failed to register first signal") }); - - let mut second_rt = rt(); - let mut second_signal = - second_rt.block_on(async { signal(kind).expect("failed to register second signal") }); - - send_signal(libc::SIGUSR1); - - first_rt - .block_on(first_signal.recv()) - .expect("failed to await first signal"); - - drop(first_rt); - drop(first_signal); - - send_signal(libc::SIGUSR1); - - second_rt.block_on(second_signal.recv()); -} - -fn rt() -> Runtime { - tokio::runtime::Builder::new() - .basic_scheduler() - .enable_all() - .build() - .unwrap() -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_drop_signal.rs b/third_party/rust/tokio-0.2.25/tests/signal_drop_signal.rs deleted file mode 100644 index 92ac4050d575..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_drop_signal.rs +++ /dev/null @@ -1,26 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -mod support { - pub mod signal; -} -use support::signal::send_signal; - -use tokio::signal::unix::{signal, SignalKind}; - -#[tokio::test] -async fn dropping_signal_does_not_deregister_any_other_instances() { - let kind = SignalKind::user_defined1(); - - // Signals should not starve based on ordering - let first_duplicate_signal = signal(kind).expect("failed to register first duplicate signal"); - let mut sig = signal(kind).expect("failed to register signal"); - let second_duplicate_signal = signal(kind).expect("failed to register second duplicate signal"); - - drop(first_duplicate_signal); - drop(second_duplicate_signal); - - send_signal(libc::SIGUSR1); - let _ = sig.recv().await; -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_multi_rt.rs b/third_party/rust/tokio-0.2.25/tests/signal_multi_rt.rs deleted file mode 100644 index 9d78469578c0..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_multi_rt.rs +++ /dev/null @@ -1,55 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -mod support { - pub mod signal; -} -use support::signal::send_signal; - -use tokio::runtime::Runtime; -use tokio::signal::unix::{signal, SignalKind}; - -use std::sync::mpsc::channel; -use std::thread; - -#[test] -fn multi_loop() { - // An "ordinary" (non-future) channel - let (sender, receiver) = channel(); - // Run multiple times, to make sure there are no race conditions - for _ in 0..10 { - // Run multiple event loops, each one in its own thread - let threads: Vec<_> = (0..4) - .map(|_| { - let sender = sender.clone(); - thread::spawn(move || { - let mut rt = rt(); - let _ = rt.block_on(async { - let mut signal = signal(SignalKind::hangup()).unwrap(); - sender.send(()).unwrap(); - signal.recv().await - }); - }) - }) - .collect(); - // Wait for them to declare they're ready - for &_ in threads.iter() { - receiver.recv().unwrap(); - } - // Send a signal - send_signal(libc::SIGHUP); - // Make sure the threads terminated correctly - for t in threads { - t.join().unwrap(); - } - } -} - -fn rt() -> Runtime { - tokio::runtime::Builder::new() - .basic_scheduler() - .enable_all() - .build() - .unwrap() -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_no_rt.rs b/third_party/rust/tokio-0.2.25/tests/signal_no_rt.rs deleted file mode 100644 index b0f32b2d10f5..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_no_rt.rs +++ /dev/null @@ -1,11 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -use tokio::signal::unix::{signal, SignalKind}; - -#[test] -#[should_panic] -fn no_runtime_panics_creating_signals() { - let _ = signal(SignalKind::hangup()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_notify_both.rs b/third_party/rust/tokio-0.2.25/tests/signal_notify_both.rs deleted file mode 100644 index 3481f808b36d..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_notify_both.rs +++ /dev/null @@ -1,23 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -mod support { - pub mod signal; -} -use support::signal::send_signal; - -use tokio::signal::unix::{signal, SignalKind}; - -#[tokio::test] -async fn notify_both() { - let kind = SignalKind::user_defined2(); - - let mut signal1 = signal(kind).expect("failed to create signal1"); - let mut signal2 = signal(kind).expect("failed to create signal2"); - - send_signal(libc::SIGUSR2); - - signal1.recv().await; - signal2.recv().await; -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_twice.rs b/third_party/rust/tokio-0.2.25/tests/signal_twice.rs deleted file mode 100644 index 8f33d22a82dd..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_twice.rs +++ /dev/null @@ -1,22 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -mod support { - pub mod signal; -} -use support::signal::send_signal; - -use tokio::signal::unix::{signal, SignalKind}; - -#[tokio::test] -async fn twice() { - let kind = SignalKind::user_defined1(); - let mut sig = signal(kind).expect("failed to get signal"); - - for _ in 0..2 { - send_signal(libc::SIGUSR1); - - assert!(sig.recv().await.is_some()); - } -} diff --git a/third_party/rust/tokio-0.2.25/tests/signal_usr1.rs b/third_party/rust/tokio-0.2.25/tests/signal_usr1.rs deleted file mode 100644 index d74c7d31ab59..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/signal_usr1.rs +++ /dev/null @@ -1,23 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -mod support { - pub mod signal; -} -use support::signal::send_signal; - -use tokio::signal::unix::{signal, SignalKind}; -use tokio_test::assert_ok; - -#[tokio::test] -async fn signal_usr1() { - let mut signal = assert_ok!( - signal(SignalKind::user_defined1()), - "failed to create signal" - ); - - send_signal(libc::SIGUSR1); - - signal.recv().await; -} diff --git a/third_party/rust/tokio-0.2.25/tests/support/mock_file.rs b/third_party/rust/tokio-0.2.25/tests/support/mock_file.rs deleted file mode 100644 index 9895f835e6b6..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/support/mock_file.rs +++ /dev/null @@ -1,281 +0,0 @@ -#![allow(clippy::unnecessary_operation)] - -use std::collections::VecDeque; -use std::fmt; -use std::fs::{Metadata, Permissions}; -use std::io; -use std::io::prelude::*; -use std::io::SeekFrom; -use std::path::PathBuf; -use std::sync::{Arc, Mutex}; - -pub struct File { - shared: Arc>, -} - -pub struct Handle { - shared: Arc>, -} - -struct Shared { - calls: VecDeque, -} - -#[derive(Debug)] -enum Call { - Read(io::Result>), - Write(io::Result>), - Seek(SeekFrom, io::Result), - SyncAll(io::Result<()>), - SyncData(io::Result<()>), - SetLen(u64, io::Result<()>), -} - -impl Handle { - pub fn read(&self, data: &[u8]) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls.push_back(Call::Read(Ok(data.to_owned()))); - self - } - - pub fn read_err(&self) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls - .push_back(Call::Read(Err(io::ErrorKind::Other.into()))); - self - } - - pub fn write(&self, data: &[u8]) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls.push_back(Call::Write(Ok(data.to_owned()))); - self - } - - pub fn write_err(&self) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls - .push_back(Call::Write(Err(io::ErrorKind::Other.into()))); - self - } - - pub fn seek_start_ok(&self, offset: u64) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls - .push_back(Call::Seek(SeekFrom::Start(offset), Ok(offset))); - self - } - - pub fn seek_current_ok(&self, offset: i64, ret: u64) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls - .push_back(Call::Seek(SeekFrom::Current(offset), Ok(ret))); - self - } - - pub fn sync_all(&self) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls.push_back(Call::SyncAll(Ok(()))); - self - } - - pub fn sync_all_err(&self) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls - .push_back(Call::SyncAll(Err(io::ErrorKind::Other.into()))); - self - } - - pub fn sync_data(&self) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls.push_back(Call::SyncData(Ok(()))); - self - } - - pub fn sync_data_err(&self) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls - .push_back(Call::SyncData(Err(io::ErrorKind::Other.into()))); - self - } - - pub fn set_len(&self, size: u64) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls.push_back(Call::SetLen(size, Ok(()))); - self - } - - pub fn set_len_err(&self, size: u64) -> &Self { - let mut s = self.shared.lock().unwrap(); - s.calls - .push_back(Call::SetLen(size, Err(io::ErrorKind::Other.into()))); - self - } - - pub fn remaining(&self) -> usize { - let s = self.shared.lock().unwrap(); - s.calls.len() - } -} - -impl Drop for Handle { - fn drop(&mut self) { - if !std::thread::panicking() { - let s = self.shared.lock().unwrap(); - assert_eq!(0, s.calls.len()); - } - } -} - -impl File { - pub fn open(_: PathBuf) -> io::Result { - unimplemented!(); - } - - pub fn create(_: PathBuf) -> io::Result { - unimplemented!(); - } - - pub fn mock() -> (Handle, File) { - let shared = Arc::new(Mutex::new(Shared { - calls: VecDeque::new(), - })); - - let handle = Handle { - shared: shared.clone(), - }; - let file = File { shared }; - - (handle, file) - } - - pub fn sync_all(&self) -> io::Result<()> { - use self::Call::*; - - let mut s = self.shared.lock().unwrap(); - - match s.calls.pop_front() { - Some(SyncAll(ret)) => ret, - Some(op) => panic!("expected next call to be {:?}; was sync_all", op), - None => panic!("did not expect call"), - } - } - - pub fn sync_data(&self) -> io::Result<()> { - use self::Call::*; - - let mut s = self.shared.lock().unwrap(); - - match s.calls.pop_front() { - Some(SyncData(ret)) => ret, - Some(op) => panic!("expected next call to be {:?}; was sync_all", op), - None => panic!("did not expect call"), - } - } - - pub fn set_len(&self, size: u64) -> io::Result<()> { - use self::Call::*; - - let mut s = self.shared.lock().unwrap(); - - match s.calls.pop_front() { - Some(SetLen(arg, ret)) => { - assert_eq!(arg, size); - ret - } - Some(op) => panic!("expected next call to be {:?}; was sync_all", op), - None => panic!("did not expect call"), - } - } - - pub fn metadata(&self) -> io::Result { - unimplemented!(); - } - - pub fn set_permissions(&self, _perm: Permissions) -> io::Result<()> { - unimplemented!(); - } - - pub fn try_clone(&self) -> io::Result { - unimplemented!(); - } -} - -impl Read for &'_ File { - fn read(&mut self, dst: &mut [u8]) -> io::Result { - use self::Call::*; - - let mut s = self.shared.lock().unwrap(); - - match s.calls.pop_front() { - Some(Read(Ok(data))) => { - assert!(dst.len() >= data.len()); - assert!(dst.len() <= 16 * 1024, "actual = {}", dst.len()); // max buffer - - &mut dst[..data.len()].copy_from_slice(&data); - Ok(data.len()) - } - Some(Read(Err(e))) => Err(e), - Some(op) => panic!("expected next call to be {:?}; was a read", op), - None => panic!("did not expect call"), - } - } -} - -impl Write for &'_ File { - fn write(&mut self, src: &[u8]) -> io::Result { - use self::Call::*; - - let mut s = self.shared.lock().unwrap(); - - match s.calls.pop_front() { - Some(Write(Ok(data))) => { - assert_eq!(src, &data[..]); - Ok(src.len()) - } - Some(Write(Err(e))) => Err(e), - Some(op) => panic!("expected next call to be {:?}; was write", op), - None => panic!("did not expect call"), - } - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -impl Seek for &'_ File { - fn seek(&mut self, pos: SeekFrom) -> io::Result { - use self::Call::*; - - let mut s = self.shared.lock().unwrap(); - - match s.calls.pop_front() { - Some(Seek(expect, res)) => { - assert_eq!(expect, pos); - res - } - Some(op) => panic!("expected call {:?}; was `seek`", op), - None => panic!("did not expect call; was `seek`"), - } - } -} - -impl fmt::Debug for File { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("mock::File").finish() - } -} - -#[cfg(unix)] -impl std::os::unix::io::AsRawFd for File { - fn as_raw_fd(&self) -> std::os::unix::io::RawFd { - unimplemented!(); - } -} - -#[cfg(windows)] -impl std::os::windows::io::AsRawHandle for File { - fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { - unimplemented!(); - } -} diff --git a/third_party/rust/tokio-0.2.25/tests/support/mock_pool.rs b/third_party/rust/tokio-0.2.25/tests/support/mock_pool.rs deleted file mode 100644 index e1fdb426417a..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/support/mock_pool.rs +++ /dev/null @@ -1,66 +0,0 @@ -use tokio::sync::oneshot; - -use std::cell::RefCell; -use std::collections::VecDeque; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -thread_local! { - static QUEUE: RefCell>> = RefCell::new(VecDeque::new()) -} - -#[derive(Debug)] -pub(crate) struct Blocking { - rx: oneshot::Receiver, -} - -pub(crate) fn run(f: F) -> Blocking -where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - let (tx, rx) = oneshot::channel(); - let task = Box::new(move || { - let _ = tx.send(f()); - }); - - QUEUE.with(|cell| cell.borrow_mut().push_back(task)); - - Blocking { rx } -} - -impl Future for Blocking { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - use std::task::Poll::*; - - match Pin::new(&mut self.rx).poll(cx) { - Ready(Ok(v)) => Ready(Ok(v)), - Ready(Err(e)) => panic!("error = {:?}", e), - Pending => Pending, - } - } -} - -pub(crate) async fn asyncify(f: F) -> io::Result -where - F: FnOnce() -> io::Result + Send + 'static, - T: Send + 'static, -{ - run(f).await? -} - -pub(crate) fn len() -> usize { - QUEUE.with(|cell| cell.borrow().len()) -} - -pub(crate) fn run_one() { - let task = QUEUE - .with(|cell| cell.borrow_mut().pop_front()) - .expect("expected task to run, but none ready"); - - task(); -} diff --git a/third_party/rust/tokio-0.2.25/tests/support/signal.rs b/third_party/rust/tokio-0.2.25/tests/support/signal.rs deleted file mode 100644 index ea06058764d1..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/support/signal.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub fn send_signal(signal: libc::c_int) { - use libc::{getpid, kill}; - - unsafe { - assert_eq!(kill(getpid(), signal), 0); - } -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_barrier.rs b/third_party/rust/tokio-0.2.25/tests/sync_barrier.rs deleted file mode 100644 index f280fe8600b4..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_barrier.rs +++ /dev/null @@ -1,96 +0,0 @@ -#![allow(clippy::unnecessary_operation)] -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::sync::Barrier; - -use tokio_test::task::spawn; -use tokio_test::{assert_pending, assert_ready}; - -struct IsSend(T); -#[test] -fn barrier_future_is_send() { - let b = Barrier::new(0); - IsSend(b.wait()); -} - -#[test] -fn zero_does_not_block() { - let b = Barrier::new(0); - - { - let mut w = spawn(b.wait()); - let wr = assert_ready!(w.poll()); - assert!(wr.is_leader()); - } - { - let mut w = spawn(b.wait()); - let wr = assert_ready!(w.poll()); - assert!(wr.is_leader()); - } -} - -#[test] -fn single() { - let b = Barrier::new(1); - - { - let mut w = spawn(b.wait()); - let wr = assert_ready!(w.poll()); - assert!(wr.is_leader()); - } - { - let mut w = spawn(b.wait()); - let wr = assert_ready!(w.poll()); - assert!(wr.is_leader()); - } - { - let mut w = spawn(b.wait()); - let wr = assert_ready!(w.poll()); - assert!(wr.is_leader()); - } -} - -#[test] -fn tango() { - let b = Barrier::new(2); - - let mut w1 = spawn(b.wait()); - assert_pending!(w1.poll()); - - let mut w2 = spawn(b.wait()); - let wr2 = assert_ready!(w2.poll()); - let wr1 = assert_ready!(w1.poll()); - - assert!(wr1.is_leader() || wr2.is_leader()); - assert!(!(wr1.is_leader() && wr2.is_leader())); -} - -#[test] -fn lots() { - let b = Barrier::new(100); - - for _ in 0..10 { - let mut wait = Vec::new(); - for _ in 0..99 { - let mut w = spawn(b.wait()); - assert_pending!(w.poll()); - wait.push(w); - } - for w in &mut wait { - assert_pending!(w.poll()); - } - - // pass the barrier - let mut w = spawn(b.wait()); - let mut found_leader = assert_ready!(w.poll()).is_leader(); - for mut w in wait { - let wr = assert_ready!(w.poll()); - if wr.is_leader() { - assert!(!found_leader); - found_leader = true; - } - } - assert!(found_leader); - } -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_broadcast.rs b/third_party/rust/tokio-0.2.25/tests/sync_broadcast.rs deleted file mode 100644 index 6f89c7a024cd..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_broadcast.rs +++ /dev/null @@ -1,532 +0,0 @@ -#![allow(clippy::cognitive_complexity, clippy::match_like_matches_macro)] -#![warn(rust_2018_idioms)] -#![cfg(feature = "sync")] - -use tokio::sync::broadcast; -use tokio_test::task; -use tokio_test::{ - assert_err, assert_ok, assert_pending, assert_ready, assert_ready_err, assert_ready_ok, -}; - -use std::sync::Arc; - -macro_rules! assert_recv { - ($e:expr) => { - match $e.try_recv() { - Ok(value) => value, - Err(e) => panic!("expected recv; got = {:?}", e), - } - }; -} - -macro_rules! assert_empty { - ($e:expr) => { - match $e.try_recv() { - Ok(value) => panic!("expected empty; got = {:?}", value), - Err(broadcast::TryRecvError::Empty) => {} - Err(e) => panic!("expected empty; got = {:?}", e), - } - }; -} - -macro_rules! assert_lagged { - ($e:expr, $n:expr) => { - match assert_err!($e) { - broadcast::TryRecvError::Lagged(n) => { - assert_eq!(n, $n); - } - _ => panic!("did not lag"), - } - }; -} - -macro_rules! assert_closed { - ($e:expr) => { - match assert_err!($e) { - broadcast::TryRecvError::Closed => {} - _ => panic!("did not lag"), - } - }; -} - -trait AssertSend: Send + Sync {} -impl AssertSend for broadcast::Sender {} -impl AssertSend for broadcast::Receiver {} - -#[test] -fn send_try_recv_bounded() { - let (tx, mut rx) = broadcast::channel(16); - - assert_empty!(rx); - - let n = assert_ok!(tx.send("hello")); - assert_eq!(n, 1); - - let val = assert_recv!(rx); - assert_eq!(val, "hello"); - - assert_empty!(rx); -} - -#[test] -fn send_two_recv() { - let (tx, mut rx1) = broadcast::channel(16); - let mut rx2 = tx.subscribe(); - - assert_empty!(rx1); - assert_empty!(rx2); - - let n = assert_ok!(tx.send("hello")); - assert_eq!(n, 2); - - let val = assert_recv!(rx1); - assert_eq!(val, "hello"); - - let val = assert_recv!(rx2); - assert_eq!(val, "hello"); - - assert_empty!(rx1); - assert_empty!(rx2); -} - -#[tokio::test] -async fn send_recv_into_stream_ready() { - use tokio::stream::StreamExt; - - let (tx, rx) = broadcast::channel::(8); - tokio::pin! { - let rx = rx.into_stream(); - } - - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - - assert_eq!(Some(Ok(1)), rx.next().await); - assert_eq!(Some(Ok(2)), rx.next().await); - - drop(tx); - - assert_eq!(None, rx.next().await); -} - -#[tokio::test] -async fn send_recv_into_stream_pending() { - use tokio::stream::StreamExt; - - let (tx, rx) = broadcast::channel::(8); - - tokio::pin! { - let rx = rx.into_stream(); - } - - let mut recv = task::spawn(rx.next()); - assert_pending!(recv.poll()); - - assert_ok!(tx.send(1)); - - assert!(recv.is_woken()); - let val = assert_ready!(recv.poll()); - assert_eq!(val, Some(Ok(1))); -} - -#[test] -fn send_recv_bounded() { - let (tx, mut rx) = broadcast::channel(16); - - let mut recv = task::spawn(rx.recv()); - - assert_pending!(recv.poll()); - - assert_ok!(tx.send("hello")); - - assert!(recv.is_woken()); - let val = assert_ready_ok!(recv.poll()); - assert_eq!(val, "hello"); -} - -#[test] -fn send_two_recv_bounded() { - let (tx, mut rx1) = broadcast::channel(16); - let mut rx2 = tx.subscribe(); - - let mut recv1 = task::spawn(rx1.recv()); - let mut recv2 = task::spawn(rx2.recv()); - - assert_pending!(recv1.poll()); - assert_pending!(recv2.poll()); - - assert_ok!(tx.send("hello")); - - assert!(recv1.is_woken()); - assert!(recv2.is_woken()); - - let val1 = assert_ready_ok!(recv1.poll()); - let val2 = assert_ready_ok!(recv2.poll()); - assert_eq!(val1, "hello"); - assert_eq!(val2, "hello"); - - drop((recv1, recv2)); - - let mut recv1 = task::spawn(rx1.recv()); - let mut recv2 = task::spawn(rx2.recv()); - - assert_pending!(recv1.poll()); - - assert_ok!(tx.send("world")); - - assert!(recv1.is_woken()); - assert!(!recv2.is_woken()); - - let val1 = assert_ready_ok!(recv1.poll()); - let val2 = assert_ready_ok!(recv2.poll()); - assert_eq!(val1, "world"); - assert_eq!(val2, "world"); -} - -#[test] -fn change_tasks() { - let (tx, mut rx) = broadcast::channel(1); - - let mut recv = Box::pin(rx.recv()); - - let mut task1 = task::spawn(&mut recv); - assert_pending!(task1.poll()); - - let mut task2 = task::spawn(&mut recv); - assert_pending!(task2.poll()); - - tx.send("hello").unwrap(); - - assert!(task2.is_woken()); -} - -#[test] -fn send_slow_rx() { - let (tx, mut rx1) = broadcast::channel(16); - let mut rx2 = tx.subscribe(); - - { - let mut recv2 = task::spawn(rx2.recv()); - - { - let mut recv1 = task::spawn(rx1.recv()); - - assert_pending!(recv1.poll()); - assert_pending!(recv2.poll()); - - assert_ok!(tx.send("one")); - - assert!(recv1.is_woken()); - assert!(recv2.is_woken()); - - assert_ok!(tx.send("two")); - - let val = assert_ready_ok!(recv1.poll()); - assert_eq!(val, "one"); - } - - let val = assert_ready_ok!(task::spawn(rx1.recv()).poll()); - assert_eq!(val, "two"); - - let mut recv1 = task::spawn(rx1.recv()); - - assert_pending!(recv1.poll()); - - assert_ok!(tx.send("three")); - - assert!(recv1.is_woken()); - - let val = assert_ready_ok!(recv1.poll()); - assert_eq!(val, "three"); - - let val = assert_ready_ok!(recv2.poll()); - assert_eq!(val, "one"); - } - - let val = assert_recv!(rx2); - assert_eq!(val, "two"); - - let val = assert_recv!(rx2); - assert_eq!(val, "three"); -} - -#[test] -fn drop_rx_while_values_remain() { - let (tx, mut rx1) = broadcast::channel(16); - let mut rx2 = tx.subscribe(); - - assert_ok!(tx.send("one")); - assert_ok!(tx.send("two")); - - assert_recv!(rx1); - assert_recv!(rx2); - - drop(rx2); - drop(rx1); -} - -#[test] -fn lagging_rx() { - let (tx, mut rx1) = broadcast::channel(2); - let mut rx2 = tx.subscribe(); - - assert_ok!(tx.send("one")); - assert_ok!(tx.send("two")); - - assert_eq!("one", assert_recv!(rx1)); - - assert_ok!(tx.send("three")); - - // Lagged too far - let x = dbg!(rx2.try_recv()); - assert_lagged!(x, 1); - - // Calling again gets the next value - assert_eq!("two", assert_recv!(rx2)); - - assert_eq!("two", assert_recv!(rx1)); - assert_eq!("three", assert_recv!(rx1)); - - assert_ok!(tx.send("four")); - assert_ok!(tx.send("five")); - - assert_lagged!(rx2.try_recv(), 1); - - assert_ok!(tx.send("six")); - - assert_lagged!(rx2.try_recv(), 1); -} - -#[test] -fn send_no_rx() { - let (tx, _) = broadcast::channel(16); - - assert_err!(tx.send("hello")); - - let mut rx = tx.subscribe(); - - assert_ok!(tx.send("world")); - - let val = assert_recv!(rx); - assert_eq!("world", val); -} - -#[test] -#[should_panic] -fn zero_capacity() { - broadcast::channel::<()>(0); -} - -#[test] -#[should_panic] -fn capacity_too_big() { - use std::usize; - - broadcast::channel::<()>(1 + (usize::MAX >> 1)); -} - -#[test] -fn panic_in_clone() { - use std::panic::{self, AssertUnwindSafe}; - - #[derive(Eq, PartialEq, Debug)] - struct MyVal(usize); - - impl Clone for MyVal { - fn clone(&self) -> MyVal { - assert_ne!(0, self.0); - MyVal(self.0) - } - } - - let (tx, mut rx) = broadcast::channel(16); - - assert_ok!(tx.send(MyVal(0))); - assert_ok!(tx.send(MyVal(1))); - - let res = panic::catch_unwind(AssertUnwindSafe(|| { - let _ = rx.try_recv(); - })); - - assert_err!(res); - - let val = assert_recv!(rx); - assert_eq!(val, MyVal(1)); -} - -#[test] -fn dropping_tx_notifies_rx() { - let (tx, mut rx1) = broadcast::channel::<()>(16); - let mut rx2 = tx.subscribe(); - - let tx2 = tx.clone(); - - let mut recv1 = task::spawn(rx1.recv()); - let mut recv2 = task::spawn(rx2.recv()); - - assert_pending!(recv1.poll()); - assert_pending!(recv2.poll()); - - drop(tx); - - assert_pending!(recv1.poll()); - assert_pending!(recv2.poll()); - - drop(tx2); - - assert!(recv1.is_woken()); - assert!(recv2.is_woken()); - - let err = assert_ready_err!(recv1.poll()); - assert!(is_closed(err)); - - let err = assert_ready_err!(recv2.poll()); - assert!(is_closed(err)); -} - -#[test] -fn unconsumed_messages_are_dropped() { - let (tx, rx) = broadcast::channel(16); - - let msg = Arc::new(()); - - assert_ok!(tx.send(msg.clone())); - - assert_eq!(2, Arc::strong_count(&msg)); - - drop(rx); - - assert_eq!(1, Arc::strong_count(&msg)); -} - -#[test] -fn single_capacity_recvs() { - let (tx, mut rx) = broadcast::channel(1); - - assert_ok!(tx.send(1)); - - assert_eq!(assert_recv!(rx), 1); - assert_empty!(rx); -} - -#[test] -fn single_capacity_recvs_after_drop_1() { - let (tx, mut rx) = broadcast::channel(1); - - assert_ok!(tx.send(1)); - drop(tx); - - assert_eq!(assert_recv!(rx), 1); - assert_closed!(rx.try_recv()); -} - -#[test] -fn single_capacity_recvs_after_drop_2() { - let (tx, mut rx) = broadcast::channel(1); - - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - drop(tx); - - assert_lagged!(rx.try_recv(), 1); - assert_eq!(assert_recv!(rx), 2); - assert_closed!(rx.try_recv()); -} - -#[test] -fn dropping_sender_does_not_overwrite() { - let (tx, mut rx) = broadcast::channel(2); - - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - drop(tx); - - assert_eq!(assert_recv!(rx), 1); - assert_eq!(assert_recv!(rx), 2); - assert_closed!(rx.try_recv()); -} - -#[test] -fn lagging_receiver_recovers_after_wrap_closed_1() { - let (tx, mut rx) = broadcast::channel(2); - - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - assert_ok!(tx.send(3)); - drop(tx); - - assert_lagged!(rx.try_recv(), 1); - assert_eq!(assert_recv!(rx), 2); - assert_eq!(assert_recv!(rx), 3); - assert_closed!(rx.try_recv()); -} - -#[test] -fn lagging_receiver_recovers_after_wrap_closed_2() { - let (tx, mut rx) = broadcast::channel(2); - - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - assert_ok!(tx.send(3)); - assert_ok!(tx.send(4)); - drop(tx); - - assert_lagged!(rx.try_recv(), 2); - assert_eq!(assert_recv!(rx), 3); - assert_eq!(assert_recv!(rx), 4); - assert_closed!(rx.try_recv()); -} - -#[test] -fn lagging_receiver_recovers_after_wrap_open() { - let (tx, mut rx) = broadcast::channel(2); - - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - assert_ok!(tx.send(3)); - - assert_lagged!(rx.try_recv(), 1); - assert_eq!(assert_recv!(rx), 2); - assert_eq!(assert_recv!(rx), 3); - assert_empty!(rx); -} - -#[tokio::test] -async fn send_recv_stream_ready_deprecated() { - use tokio::stream::StreamExt; - - let (tx, mut rx) = broadcast::channel::(8); - - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - - assert_eq!(Some(Ok(1)), rx.next().await); - assert_eq!(Some(Ok(2)), rx.next().await); - - drop(tx); - - assert_eq!(None, rx.next().await); -} - -#[tokio::test] -async fn send_recv_stream_pending_deprecated() { - use tokio::stream::StreamExt; - - let (tx, mut rx) = broadcast::channel::(8); - - let mut recv = task::spawn(rx.next()); - assert_pending!(recv.poll()); - - assert_ok!(tx.send(1)); - - assert!(recv.is_woken()); - let val = assert_ready!(recv.poll()); - assert_eq!(val, Some(Ok(1))); -} - -fn is_closed(err: broadcast::RecvError) -> bool { - match err { - broadcast::RecvError::Closed => true, - _ => false, - } -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_cancellation_token.rs b/third_party/rust/tokio-0.2.25/tests/sync_cancellation_token.rs deleted file mode 100644 index de543c94b1f8..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_cancellation_token.rs +++ /dev/null @@ -1,220 +0,0 @@ -#![cfg(tokio_unstable)] - -use tokio::pin; -use tokio::sync::CancellationToken; - -use core::future::Future; -use core::task::{Context, Poll}; -use futures_test::task::new_count_waker; - -#[test] -fn cancel_token() { - let (waker, wake_counter) = new_count_waker(); - let token = CancellationToken::new(); - assert_eq!(false, token.is_cancelled()); - - let wait_fut = token.cancelled(); - pin!(wait_fut); - - assert_eq!( - Poll::Pending, - wait_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!(wake_counter, 0); - - let wait_fut_2 = token.cancelled(); - pin!(wait_fut_2); - - token.cancel(); - assert_eq!(wake_counter, 1); - assert_eq!(true, token.is_cancelled()); - - assert_eq!( - Poll::Ready(()), - wait_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!( - Poll::Ready(()), - wait_fut_2.as_mut().poll(&mut Context::from_waker(&waker)) - ); -} - -#[test] -fn cancel_child_token_through_parent() { - let (waker, wake_counter) = new_count_waker(); - let token = CancellationToken::new(); - - let child_token = token.child_token(); - assert!(!child_token.is_cancelled()); - - let child_fut = child_token.cancelled(); - pin!(child_fut); - let parent_fut = token.cancelled(); - pin!(parent_fut); - - assert_eq!( - Poll::Pending, - child_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!( - Poll::Pending, - parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!(wake_counter, 0); - - token.cancel(); - assert_eq!(wake_counter, 2); - assert_eq!(true, token.is_cancelled()); - assert_eq!(true, child_token.is_cancelled()); - - assert_eq!( - Poll::Ready(()), - child_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!( - Poll::Ready(()), - parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); -} - -#[test] -fn cancel_child_token_without_parent() { - let (waker, wake_counter) = new_count_waker(); - let token = CancellationToken::new(); - - let child_token_1 = token.child_token(); - - let child_fut = child_token_1.cancelled(); - pin!(child_fut); - let parent_fut = token.cancelled(); - pin!(parent_fut); - - assert_eq!( - Poll::Pending, - child_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!( - Poll::Pending, - parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!(wake_counter, 0); - - child_token_1.cancel(); - assert_eq!(wake_counter, 1); - assert_eq!(false, token.is_cancelled()); - assert_eq!(true, child_token_1.is_cancelled()); - - assert_eq!( - Poll::Ready(()), - child_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!( - Poll::Pending, - parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - - let child_token_2 = token.child_token(); - let child_fut_2 = child_token_2.cancelled(); - pin!(child_fut_2); - - assert_eq!( - Poll::Pending, - child_fut_2.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!( - Poll::Pending, - parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - - token.cancel(); - assert_eq!(wake_counter, 3); - assert_eq!(true, token.is_cancelled()); - assert_eq!(true, child_token_2.is_cancelled()); - - assert_eq!( - Poll::Ready(()), - child_fut_2.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!( - Poll::Ready(()), - parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); -} - -#[test] -fn create_child_token_after_parent_was_cancelled() { - for drop_child_first in [true, false].iter().cloned() { - let (waker, wake_counter) = new_count_waker(); - let token = CancellationToken::new(); - token.cancel(); - - let child_token = token.child_token(); - assert!(child_token.is_cancelled()); - - { - let child_fut = child_token.cancelled(); - pin!(child_fut); - let parent_fut = token.cancelled(); - pin!(parent_fut); - - assert_eq!( - Poll::Ready(()), - child_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!( - Poll::Ready(()), - parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) - ); - assert_eq!(wake_counter, 0); - - drop(child_fut); - drop(parent_fut); - } - - if drop_child_first { - drop(child_token); - drop(token); - } else { - drop(token); - drop(child_token); - } - } -} - -#[test] -fn drop_multiple_child_tokens() { - for drop_first_child_first in &[true, false] { - let token = CancellationToken::new(); - let mut child_tokens = [None, None, None]; - for i in 0..child_tokens.len() { - child_tokens[i] = Some(token.child_token()); - } - - assert!(!token.is_cancelled()); - assert!(!child_tokens[0].as_ref().unwrap().is_cancelled()); - - for i in 0..child_tokens.len() { - if *drop_first_child_first { - child_tokens[i] = None; - } else { - child_tokens[child_tokens.len() - 1 - i] = None; - } - assert!(!token.is_cancelled()); - } - - drop(token); - } -} - -#[test] -fn drop_parent_before_child_tokens() { - let token = CancellationToken::new(); - let child1 = token.child_token(); - let child2 = token.child_token(); - - drop(token); - assert!(!child1.is_cancelled()); - - drop(child1); - drop(child2); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_errors.rs b/third_party/rust/tokio-0.2.25/tests/sync_errors.rs deleted file mode 100644 index 66e8f0c098e4..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_errors.rs +++ /dev/null @@ -1,27 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -fn is_error() {} - -#[test] -fn mpsc_error_bound() { - use tokio::sync::mpsc::error; - - is_error::>(); - is_error::>(); -} - -#[test] -fn oneshot_error_bound() { - use tokio::sync::oneshot::error; - - is_error::(); - is_error::(); -} - -#[test] -fn watch_error_bound() { - use tokio::sync::watch::error; - - is_error::>(); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_mpsc.rs b/third_party/rust/tokio-0.2.25/tests/sync_mpsc.rs deleted file mode 100644 index c6c8620170fb..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_mpsc.rs +++ /dev/null @@ -1,538 +0,0 @@ -#![allow(clippy::redundant_clone)] -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::{TryRecvError, TrySendError}; -use tokio_test::task; -use tokio_test::{ - assert_err, assert_ok, assert_pending, assert_ready, assert_ready_err, assert_ready_ok, -}; - -use std::sync::Arc; - -trait AssertSend: Send {} -impl AssertSend for mpsc::Sender {} -impl AssertSend for mpsc::Receiver {} - -#[test] -fn send_recv_with_buffer() { - let (tx, rx) = mpsc::channel::(16); - let mut tx = task::spawn(tx); - let mut rx = task::spawn(rx); - - // Using poll_ready / try_send - assert_ready_ok!(tx.enter(|cx, mut tx| tx.poll_ready(cx))); - tx.try_send(1).unwrap(); - - // Without poll_ready - tx.try_send(2).unwrap(); - - drop(tx); - - let val = assert_ready!(rx.enter(|cx, mut rx| rx.poll_recv(cx))); - assert_eq!(val, Some(1)); - - let val = assert_ready!(rx.enter(|cx, mut rx| rx.poll_recv(cx))); - assert_eq!(val, Some(2)); - - let val = assert_ready!(rx.enter(|cx, mut rx| rx.poll_recv(cx))); - assert!(val.is_none()); -} - -#[test] -fn disarm() { - let (tx, rx) = mpsc::channel::(2); - let mut tx1 = task::spawn(tx.clone()); - let mut tx2 = task::spawn(tx.clone()); - let mut tx3 = task::spawn(tx.clone()); - let mut tx4 = task::spawn(tx); - let mut rx = task::spawn(rx); - - // We should be able to `poll_ready` two handles without problem - assert_ready_ok!(tx1.enter(|cx, mut tx| tx.poll_ready(cx))); - assert_ready_ok!(tx2.enter(|cx, mut tx| tx.poll_ready(cx))); - - // But a third should not be ready - assert_pending!(tx3.enter(|cx, mut tx| tx.poll_ready(cx))); - - // Using one of the reserved slots should allow a new handle to become ready - tx1.try_send(1).unwrap(); - // We also need to receive for the slot to be free - let _ = assert_ready!(rx.enter(|cx, mut rx| rx.poll_recv(cx))).unwrap(); - // Now there's a free slot! - assert_ready_ok!(tx3.enter(|cx, mut tx| tx.poll_ready(cx))); - assert_pending!(tx4.enter(|cx, mut tx| tx.poll_ready(cx))); - - // Dropping a ready handle should also open up a slot - drop(tx2); - assert_ready_ok!(tx4.enter(|cx, mut tx| tx.poll_ready(cx))); - assert_pending!(tx1.enter(|cx, mut tx| tx.poll_ready(cx))); - - // Explicitly disarming a handle should also open a slot - assert!(tx3.disarm()); - assert_ready_ok!(tx1.enter(|cx, mut tx| tx.poll_ready(cx))); - - // Disarming a non-armed sender does not free up a slot - assert!(!tx3.disarm()); - assert_pending!(tx3.enter(|cx, mut tx| tx.poll_ready(cx))); -} - -#[tokio::test] -async fn send_recv_stream_with_buffer() { - use tokio::stream::StreamExt; - - let (mut tx, mut rx) = mpsc::channel::(16); - - tokio::spawn(async move { - assert_ok!(tx.send(1).await); - assert_ok!(tx.send(2).await); - }); - - assert_eq!(Some(1), rx.next().await); - assert_eq!(Some(2), rx.next().await); - assert_eq!(None, rx.next().await); -} - -#[tokio::test] -async fn async_send_recv_with_buffer() { - let (mut tx, mut rx) = mpsc::channel(16); - - tokio::spawn(async move { - assert_ok!(tx.send(1).await); - assert_ok!(tx.send(2).await); - }); - - assert_eq!(Some(1), rx.recv().await); - assert_eq!(Some(2), rx.recv().await); - assert_eq!(None, rx.recv().await); -} - -#[test] -fn start_send_past_cap() { - let mut t1 = task::spawn(()); - let mut t2 = task::spawn(()); - let mut t3 = task::spawn(()); - - let (mut tx1, mut rx) = mpsc::channel(1); - let mut tx2 = tx1.clone(); - - assert_ok!(tx1.try_send(())); - - t1.enter(|cx, _| { - assert_pending!(tx1.poll_ready(cx)); - }); - - t2.enter(|cx, _| { - assert_pending!(tx2.poll_ready(cx)); - }); - - drop(tx1); - - let val = t3.enter(|cx, _| assert_ready!(rx.poll_recv(cx))); - assert!(val.is_some()); - - assert!(t2.is_woken()); - assert!(!t1.is_woken()); - - drop(tx2); - - let val = t3.enter(|cx, _| assert_ready!(rx.poll_recv(cx))); - assert!(val.is_none()); -} - -#[test] -#[should_panic] -fn buffer_gteq_one() { - mpsc::channel::(0); -} - -#[test] -fn send_recv_unbounded() { - let mut t1 = task::spawn(()); - - let (tx, mut rx) = mpsc::unbounded_channel::(); - - // Using `try_send` - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - - let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx))); - assert_eq!(val, Some(1)); - - let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx))); - assert_eq!(val, Some(2)); - - drop(tx); - - let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx))); - assert!(val.is_none()); -} - -#[tokio::test] -async fn async_send_recv_unbounded() { - let (tx, mut rx) = mpsc::unbounded_channel(); - - tokio::spawn(async move { - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - }); - - assert_eq!(Some(1), rx.recv().await); - assert_eq!(Some(2), rx.recv().await); - assert_eq!(None, rx.recv().await); -} - -#[tokio::test] -async fn send_recv_stream_unbounded() { - use tokio::stream::StreamExt; - - let (tx, mut rx) = mpsc::unbounded_channel::(); - - tokio::spawn(async move { - assert_ok!(tx.send(1)); - assert_ok!(tx.send(2)); - }); - - assert_eq!(Some(1), rx.next().await); - assert_eq!(Some(2), rx.next().await); - assert_eq!(None, rx.next().await); -} - -#[test] -fn no_t_bounds_buffer() { - struct NoImpls; - - let mut t1 = task::spawn(()); - let (tx, mut rx) = mpsc::channel(100); - - // sender should be Debug even though T isn't Debug - println!("{:?}", tx); - // same with Receiver - println!("{:?}", rx); - // and sender should be Clone even though T isn't Clone - assert!(tx.clone().try_send(NoImpls).is_ok()); - - let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx))); - assert!(val.is_some()); -} - -#[test] -fn no_t_bounds_unbounded() { - struct NoImpls; - - let mut t1 = task::spawn(()); - let (tx, mut rx) = mpsc::unbounded_channel(); - - // sender should be Debug even though T isn't Debug - println!("{:?}", tx); - // same with Receiver - println!("{:?}", rx); - // and sender should be Clone even though T isn't Clone - assert!(tx.clone().send(NoImpls).is_ok()); - - let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx))); - assert!(val.is_some()); -} - -#[test] -fn send_recv_buffer_limited() { - let mut t1 = task::spawn(()); - let mut t2 = task::spawn(()); - - let (mut tx, mut rx) = mpsc::channel::(1); - - // Run on a task context - t1.enter(|cx, _| { - assert_ready_ok!(tx.poll_ready(cx)); - - // Send first message - assert_ok!(tx.try_send(1)); - - // Not ready - assert_pending!(tx.poll_ready(cx)); - - // Send second message - assert_err!(tx.try_send(1337)); - }); - - t2.enter(|cx, _| { - // Take the value - let val = assert_ready!(rx.poll_recv(cx)); - assert_eq!(Some(1), val); - }); - - assert!(t1.is_woken()); - - t1.enter(|cx, _| { - assert_ready_ok!(tx.poll_ready(cx)); - - assert_ok!(tx.try_send(2)); - - // Not ready - assert_pending!(tx.poll_ready(cx)); - }); - - t2.enter(|cx, _| { - // Take the value - let val = assert_ready!(rx.poll_recv(cx)); - assert_eq!(Some(2), val); - }); - - t1.enter(|cx, _| { - assert_ready_ok!(tx.poll_ready(cx)); - }); -} - -#[test] -fn recv_close_gets_none_idle() { - let mut t1 = task::spawn(()); - - let (mut tx, mut rx) = mpsc::channel::(10); - - rx.close(); - - t1.enter(|cx, _| { - let val = assert_ready!(rx.poll_recv(cx)); - assert!(val.is_none()); - assert_ready_err!(tx.poll_ready(cx)); - }); -} - -#[test] -fn recv_close_gets_none_reserved() { - let mut t1 = task::spawn(()); - let mut t2 = task::spawn(()); - let mut t3 = task::spawn(()); - - let (mut tx1, mut rx) = mpsc::channel::(1); - let mut tx2 = tx1.clone(); - - assert_ready_ok!(t1.enter(|cx, _| tx1.poll_ready(cx))); - - t2.enter(|cx, _| { - assert_pending!(tx2.poll_ready(cx)); - }); - - rx.close(); - - assert!(t2.is_woken()); - - t2.enter(|cx, _| { - assert_ready_err!(tx2.poll_ready(cx)); - }); - - t3.enter(|cx, _| assert_pending!(rx.poll_recv(cx))); - - assert!(!t1.is_woken()); - assert!(!t2.is_woken()); - - assert_ok!(tx1.try_send(123)); - - assert!(t3.is_woken()); - - t3.enter(|cx, _| { - let v = assert_ready!(rx.poll_recv(cx)); - assert_eq!(v, Some(123)); - - let v = assert_ready!(rx.poll_recv(cx)); - assert!(v.is_none()); - }); -} - -#[test] -fn tx_close_gets_none() { - let mut t1 = task::spawn(()); - - let (_, mut rx) = mpsc::channel::(10); - - // Run on a task context - t1.enter(|cx, _| { - let v = assert_ready!(rx.poll_recv(cx)); - assert!(v.is_none()); - }); -} - -#[test] -fn try_send_fail() { - let mut t1 = task::spawn(()); - - let (mut tx, mut rx) = mpsc::channel(1); - - tx.try_send("hello").unwrap(); - - // This should fail - match assert_err!(tx.try_send("fail")) { - TrySendError::Full(..) => {} - _ => panic!(), - } - - let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx))); - assert_eq!(val, Some("hello")); - - assert_ok!(tx.try_send("goodbye")); - drop(tx); - - let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx))); - assert_eq!(val, Some("goodbye")); - - let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx))); - assert!(val.is_none()); -} - -#[test] -fn drop_tx_with_permit_releases_permit() { - let mut t1 = task::spawn(()); - let mut t2 = task::spawn(()); - - // poll_ready reserves capacity, ensure that the capacity is released if tx - // is dropped w/o sending a value. - let (mut tx1, _rx) = mpsc::channel::(1); - let mut tx2 = tx1.clone(); - - assert_ready_ok!(t1.enter(|cx, _| tx1.poll_ready(cx))); - - t2.enter(|cx, _| { - assert_pending!(tx2.poll_ready(cx)); - }); - - drop(tx1); - - assert!(t2.is_woken()); - - assert_ready_ok!(t2.enter(|cx, _| tx2.poll_ready(cx))); -} - -#[test] -fn dropping_rx_closes_channel() { - let mut t1 = task::spawn(()); - - let (mut tx, rx) = mpsc::channel(100); - - let msg = Arc::new(()); - assert_ok!(tx.try_send(msg.clone())); - - drop(rx); - assert_ready_err!(t1.enter(|cx, _| tx.poll_ready(cx))); - - assert_eq!(1, Arc::strong_count(&msg)); -} - -#[test] -fn dropping_rx_closes_channel_for_try() { - let (mut tx, rx) = mpsc::channel(100); - - let msg = Arc::new(()); - tx.try_send(msg.clone()).unwrap(); - - drop(rx); - - { - let err = assert_err!(tx.try_send(msg.clone())); - match err { - TrySendError::Closed(..) => {} - _ => panic!(), - } - } - - assert_eq!(1, Arc::strong_count(&msg)); -} - -#[test] -fn unconsumed_messages_are_dropped() { - let msg = Arc::new(()); - - let (mut tx, rx) = mpsc::channel(100); - - tx.try_send(msg.clone()).unwrap(); - - assert_eq!(2, Arc::strong_count(&msg)); - - drop((tx, rx)); - - assert_eq!(1, Arc::strong_count(&msg)); -} - -#[test] -fn try_recv() { - let (mut tx, mut rx) = mpsc::channel(1); - match rx.try_recv() { - Err(TryRecvError::Empty) => {} - _ => panic!(), - } - tx.try_send(42).unwrap(); - match rx.try_recv() { - Ok(42) => {} - _ => panic!(), - } - drop(tx); - match rx.try_recv() { - Err(TryRecvError::Closed) => {} - _ => panic!(), - } -} - -#[test] -fn try_recv_unbounded() { - let (tx, mut rx) = mpsc::unbounded_channel(); - match rx.try_recv() { - Err(TryRecvError::Empty) => {} - _ => panic!(), - } - tx.send(42).unwrap(); - match rx.try_recv() { - Ok(42) => {} - _ => panic!(), - } - drop(tx); - match rx.try_recv() { - Err(TryRecvError::Closed) => {} - _ => panic!(), - } -} - -#[test] -fn ready_close_cancel_bounded() { - use futures::future::poll_fn; - - let (mut tx, mut rx) = mpsc::channel::<()>(100); - let _tx2 = tx.clone(); - - { - let mut ready = task::spawn(async { poll_fn(|cx| tx.poll_ready(cx)).await }); - assert_ready_ok!(ready.poll()); - } - - rx.close(); - - let mut recv = task::spawn(async { rx.recv().await }); - assert_pending!(recv.poll()); - - drop(tx); - - assert!(recv.is_woken()); -} - -#[tokio::test] -async fn permit_available_not_acquired_close() { - use futures::future::poll_fn; - - let (mut tx1, mut rx) = mpsc::channel::<()>(1); - let mut tx2 = tx1.clone(); - - { - let mut ready = task::spawn(poll_fn(|cx| tx1.poll_ready(cx))); - assert_ready_ok!(ready.poll()); - } - - let mut ready = task::spawn(poll_fn(|cx| tx2.poll_ready(cx))); - assert_pending!(ready.poll()); - - rx.close(); - - drop(tx1); - assert!(ready.is_woken()); - - drop(tx2); - assert!(rx.recv().await.is_none()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_mutex.rs b/third_party/rust/tokio-0.2.25/tests/sync_mutex.rs deleted file mode 100644 index 96194b31d9d4..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_mutex.rs +++ /dev/null @@ -1,163 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::sync::Mutex; -use tokio::time::{interval, timeout}; -use tokio_test::task::spawn; -use tokio_test::{assert_pending, assert_ready}; - -use std::sync::Arc; -use std::time::Duration; - -#[test] -fn straight_execution() { - let l = Mutex::new(100); - - { - let mut t = spawn(l.lock()); - let mut g = assert_ready!(t.poll()); - assert_eq!(&*g, &100); - *g = 99; - } - { - let mut t = spawn(l.lock()); - let mut g = assert_ready!(t.poll()); - assert_eq!(&*g, &99); - *g = 98; - } - { - let mut t = spawn(l.lock()); - let g = assert_ready!(t.poll()); - assert_eq!(&*g, &98); - } -} - -#[test] -fn readiness() { - let l1 = Arc::new(Mutex::new(100)); - let l2 = Arc::clone(&l1); - let mut t1 = spawn(l1.lock()); - let mut t2 = spawn(l2.lock()); - - let g = assert_ready!(t1.poll()); - - // We can't now acquire the lease since it's already held in g - assert_pending!(t2.poll()); - - // But once g unlocks, we can acquire it - drop(g); - assert!(t2.is_woken()); - assert_ready!(t2.poll()); -} - -/* -#[test] -#[ignore] -fn lock() { - let mut lock = Mutex::new(false); - - let mut lock2 = lock.clone(); - std::thread::spawn(move || { - let l = lock2.lock(); - pin_mut!(l); - - let mut task = MockTask::new(); - let mut g = assert_ready!(task.poll(&mut l)); - std::thread::sleep(std::time::Duration::from_millis(500)); - *g = true; - drop(g); - }); - - std::thread::sleep(std::time::Duration::from_millis(50)); - let mut task = MockTask::new(); - let l = lock.lock(); - pin_mut!(l); - - assert_pending!(task.poll(&mut l)); - - std::thread::sleep(std::time::Duration::from_millis(500)); - assert!(task.is_woken()); - let result = assert_ready!(task.poll(&mut l)); - assert!(*result); -} -*/ - -#[tokio::test] -/// Ensure a mutex is unlocked if a future holding the lock -/// is aborted prematurely. -async fn aborted_future_1() { - let m1: Arc> = Arc::new(Mutex::new(0)); - { - let m2 = m1.clone(); - // Try to lock mutex in a future that is aborted prematurely - timeout(Duration::from_millis(1u64), async move { - let mut iv = interval(Duration::from_millis(1000)); - m2.lock().await; - iv.tick().await; - iv.tick().await; - }) - .await - .unwrap_err(); - } - // This should succeed as there is no lock left for the mutex. - timeout(Duration::from_millis(1u64), async move { - m1.lock().await; - }) - .await - .expect("Mutex is locked"); -} - -#[tokio::test] -/// This test is similar to `aborted_future_1` but this time the -/// aborted future is waiting for the lock. -async fn aborted_future_2() { - let m1: Arc> = Arc::new(Mutex::new(0)); - { - // Lock mutex - let _lock = m1.lock().await; - { - let m2 = m1.clone(); - // Try to lock mutex in a future that is aborted prematurely - timeout(Duration::from_millis(1u64), async move { - m2.lock().await; - }) - .await - .unwrap_err(); - } - } - // This should succeed as there is no lock left for the mutex. - timeout(Duration::from_millis(1u64), async move { - m1.lock().await; - }) - .await - .expect("Mutex is locked"); -} - -#[test] -fn try_lock() { - let m: Mutex = Mutex::new(0); - { - let g1 = m.try_lock(); - assert_eq!(g1.is_ok(), true); - let g2 = m.try_lock(); - assert_eq!(g2.is_ok(), false); - } - let g3 = m.try_lock(); - assert_eq!(g3.is_ok(), true); -} - -#[tokio::test] -async fn debug_format() { - let s = "debug"; - let m = Mutex::new(s.to_string()); - assert_eq!(format!("{:?}", s), format!("{:?}", m.lock().await)); -} - -#[tokio::test] -async fn mutex_debug() { - let s = "data"; - let m = Mutex::new(s.to_string()); - assert_eq!(format!("{:?}", m), r#"Mutex { data: "data" }"#); - let _guard = m.lock().await; - assert_eq!(format!("{:?}", m), r#"Mutex { data: }"#) -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_mutex_owned.rs b/third_party/rust/tokio-0.2.25/tests/sync_mutex_owned.rs deleted file mode 100644 index 394a6708bd27..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_mutex_owned.rs +++ /dev/null @@ -1,121 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::sync::Mutex; -use tokio::time::{interval, timeout}; -use tokio_test::task::spawn; -use tokio_test::{assert_pending, assert_ready}; - -use std::sync::Arc; -use std::time::Duration; - -#[test] -fn straight_execution() { - let l = Arc::new(Mutex::new(100)); - - { - let mut t = spawn(l.clone().lock_owned()); - let mut g = assert_ready!(t.poll()); - assert_eq!(&*g, &100); - *g = 99; - } - { - let mut t = spawn(l.clone().lock_owned()); - let mut g = assert_ready!(t.poll()); - assert_eq!(&*g, &99); - *g = 98; - } - { - let mut t = spawn(l.lock_owned()); - let g = assert_ready!(t.poll()); - assert_eq!(&*g, &98); - } -} - -#[test] -fn readiness() { - let l = Arc::new(Mutex::new(100)); - let mut t1 = spawn(l.clone().lock_owned()); - let mut t2 = spawn(l.lock_owned()); - - let g = assert_ready!(t1.poll()); - - // We can't now acquire the lease since it's already held in g - assert_pending!(t2.poll()); - - // But once g unlocks, we can acquire it - drop(g); - assert!(t2.is_woken()); - assert_ready!(t2.poll()); -} - -#[tokio::test] -/// Ensure a mutex is unlocked if a future holding the lock -/// is aborted prematurely. -async fn aborted_future_1() { - let m1: Arc> = Arc::new(Mutex::new(0)); - { - let m2 = m1.clone(); - // Try to lock mutex in a future that is aborted prematurely - timeout(Duration::from_millis(1u64), async move { - let mut iv = interval(Duration::from_millis(1000)); - m2.lock_owned().await; - iv.tick().await; - iv.tick().await; - }) - .await - .unwrap_err(); - } - // This should succeed as there is no lock left for the mutex. - timeout(Duration::from_millis(1u64), async move { - m1.lock_owned().await; - }) - .await - .expect("Mutex is locked"); -} - -#[tokio::test] -/// This test is similar to `aborted_future_1` but this time the -/// aborted future is waiting for the lock. -async fn aborted_future_2() { - let m1: Arc> = Arc::new(Mutex::new(0)); - { - // Lock mutex - let _lock = m1.clone().lock_owned().await; - { - let m2 = m1.clone(); - // Try to lock mutex in a future that is aborted prematurely - timeout(Duration::from_millis(1u64), async move { - m2.lock_owned().await; - }) - .await - .unwrap_err(); - } - } - // This should succeed as there is no lock left for the mutex. - timeout(Duration::from_millis(1u64), async move { - m1.lock_owned().await; - }) - .await - .expect("Mutex is locked"); -} - -#[test] -fn try_lock_owned() { - let m: Arc> = Arc::new(Mutex::new(0)); - { - let g1 = m.clone().try_lock_owned(); - assert_eq!(g1.is_ok(), true); - let g2 = m.clone().try_lock_owned(); - assert_eq!(g2.is_ok(), false); - } - let g3 = m.try_lock_owned(); - assert_eq!(g3.is_ok(), true); -} - -#[tokio::test] -async fn debug_format() { - let s = "debug"; - let m = Arc::new(Mutex::new(s.to_string())); - assert_eq!(format!("{:?}", s), format!("{:?}", m.lock_owned().await)); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_notify.rs b/third_party/rust/tokio-0.2.25/tests/sync_notify.rs deleted file mode 100644 index be39ce32dfdd..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_notify.rs +++ /dev/null @@ -1,102 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::sync::Notify; -use tokio_test::task::spawn; -use tokio_test::*; - -trait AssertSend: Send + Sync {} -impl AssertSend for Notify {} - -#[test] -fn notify_notified_one() { - let notify = Notify::new(); - let mut notified = spawn(async { notify.notified().await }); - - notify.notify(); - assert_ready!(notified.poll()); -} - -#[test] -fn notified_one_notify() { - let notify = Notify::new(); - let mut notified = spawn(async { notify.notified().await }); - - assert_pending!(notified.poll()); - - notify.notify(); - assert!(notified.is_woken()); - assert_ready!(notified.poll()); -} - -#[test] -fn notified_multi_notify() { - let notify = Notify::new(); - let mut notified1 = spawn(async { notify.notified().await }); - let mut notified2 = spawn(async { notify.notified().await }); - - assert_pending!(notified1.poll()); - assert_pending!(notified2.poll()); - - notify.notify(); - assert!(notified1.is_woken()); - assert!(!notified2.is_woken()); - - assert_ready!(notified1.poll()); - assert_pending!(notified2.poll()); -} - -#[test] -fn notify_notified_multi() { - let notify = Notify::new(); - - notify.notify(); - - let mut notified1 = spawn(async { notify.notified().await }); - let mut notified2 = spawn(async { notify.notified().await }); - - assert_ready!(notified1.poll()); - assert_pending!(notified2.poll()); - - notify.notify(); - - assert!(notified2.is_woken()); - assert_ready!(notified2.poll()); -} - -#[test] -fn notified_drop_notified_notify() { - let notify = Notify::new(); - let mut notified1 = spawn(async { notify.notified().await }); - let mut notified2 = spawn(async { notify.notified().await }); - - assert_pending!(notified1.poll()); - - drop(notified1); - - assert_pending!(notified2.poll()); - - notify.notify(); - assert!(notified2.is_woken()); - assert_ready!(notified2.poll()); -} - -#[test] -fn notified_multi_notify_drop_one() { - let notify = Notify::new(); - let mut notified1 = spawn(async { notify.notified().await }); - let mut notified2 = spawn(async { notify.notified().await }); - - assert_pending!(notified1.poll()); - assert_pending!(notified2.poll()); - - notify.notify(); - - assert!(notified1.is_woken()); - assert!(!notified2.is_woken()); - - drop(notified1); - - assert!(notified2.is_woken()); - assert_ready!(notified2.poll()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_oneshot.rs b/third_party/rust/tokio-0.2.25/tests/sync_oneshot.rs deleted file mode 100644 index 13e526d48e8a..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_oneshot.rs +++ /dev/null @@ -1,234 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::sync::oneshot; -use tokio_test::*; - -use std::future::Future; -use std::pin::Pin; - -trait AssertSend: Send {} -impl AssertSend for oneshot::Sender {} -impl AssertSend for oneshot::Receiver {} - -#[test] -fn send_recv() { - let (tx, rx) = oneshot::channel(); - let mut rx = task::spawn(rx); - - assert_pending!(rx.poll()); - - assert_ok!(tx.send(1)); - - assert!(rx.is_woken()); - - let val = assert_ready_ok!(rx.poll()); - assert_eq!(val, 1); -} - -#[tokio::test] -async fn async_send_recv() { - let (tx, rx) = oneshot::channel(); - - assert_ok!(tx.send(1)); - assert_eq!(1, assert_ok!(rx.await)); -} - -#[test] -fn close_tx() { - let (tx, rx) = oneshot::channel::(); - let mut rx = task::spawn(rx); - - assert_pending!(rx.poll()); - - drop(tx); - - assert!(rx.is_woken()); - assert_ready_err!(rx.poll()); -} - -#[test] -fn close_rx() { - // First, without checking poll_closed() - // - let (tx, _) = oneshot::channel(); - - assert_err!(tx.send(1)); - - // Second, via poll_closed(); - - let (tx, rx) = oneshot::channel(); - let mut tx = task::spawn(tx); - - assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx))); - - drop(rx); - - assert!(tx.is_woken()); - assert!(tx.is_closed()); - assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx))); - - assert_err!(tx.into_inner().send(1)); -} - -#[tokio::test] -async fn async_rx_closed() { - let (mut tx, rx) = oneshot::channel::<()>(); - - tokio::spawn(async move { - drop(rx); - }); - - tx.closed().await; -} - -#[test] -fn explicit_close_poll() { - // First, with message sent - let (tx, rx) = oneshot::channel(); - let mut rx = task::spawn(rx); - - assert_ok!(tx.send(1)); - - rx.close(); - - let value = assert_ready_ok!(rx.poll()); - assert_eq!(value, 1); - - // Second, without the message sent - let (tx, rx) = oneshot::channel::(); - let mut tx = task::spawn(tx); - let mut rx = task::spawn(rx); - - assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx))); - - rx.close(); - - assert!(tx.is_woken()); - assert!(tx.is_closed()); - assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx))); - - assert_err!(tx.into_inner().send(1)); - assert_ready_err!(rx.poll()); - - // Again, but without sending the value this time - let (tx, rx) = oneshot::channel::(); - let mut tx = task::spawn(tx); - let mut rx = task::spawn(rx); - - assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx))); - - rx.close(); - - assert!(tx.is_woken()); - assert!(tx.is_closed()); - assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx))); - - assert_ready_err!(rx.poll()); -} - -#[test] -fn explicit_close_try_recv() { - // First, with message sent - let (tx, mut rx) = oneshot::channel(); - - assert_ok!(tx.send(1)); - - rx.close(); - - let val = assert_ok!(rx.try_recv()); - assert_eq!(1, val); - - // Second, without the message sent - let (tx, mut rx) = oneshot::channel::(); - let mut tx = task::spawn(tx); - - assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx))); - - rx.close(); - - assert!(tx.is_woken()); - assert!(tx.is_closed()); - assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx))); - - assert_err!(rx.try_recv()); -} - -#[test] -#[should_panic] -fn close_try_recv_poll() { - let (_tx, rx) = oneshot::channel::(); - let mut rx = task::spawn(rx); - - rx.close(); - - assert_err!(rx.try_recv()); - - let _ = rx.poll(); -} - -#[test] -fn drops_tasks() { - let (mut tx, mut rx) = oneshot::channel::(); - let mut tx_task = task::spawn(()); - let mut rx_task = task::spawn(()); - - assert_pending!(tx_task.enter(|cx, _| tx.poll_closed(cx))); - assert_pending!(rx_task.enter(|cx, _| Pin::new(&mut rx).poll(cx))); - - drop(tx); - drop(rx); - - assert_eq!(1, tx_task.waker_ref_count()); - assert_eq!(1, rx_task.waker_ref_count()); -} - -#[test] -fn receiver_changes_task() { - let (tx, mut rx) = oneshot::channel(); - - let mut task1 = task::spawn(()); - let mut task2 = task::spawn(()); - - assert_pending!(task1.enter(|cx, _| Pin::new(&mut rx).poll(cx))); - - assert_eq!(2, task1.waker_ref_count()); - assert_eq!(1, task2.waker_ref_count()); - - assert_pending!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx))); - - assert_eq!(1, task1.waker_ref_count()); - assert_eq!(2, task2.waker_ref_count()); - - assert_ok!(tx.send(1)); - - assert!(!task1.is_woken()); - assert!(task2.is_woken()); - - assert_ready_ok!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx))); -} - -#[test] -fn sender_changes_task() { - let (mut tx, rx) = oneshot::channel::(); - - let mut task1 = task::spawn(()); - let mut task2 = task::spawn(()); - - assert_pending!(task1.enter(|cx, _| tx.poll_closed(cx))); - - assert_eq!(2, task1.waker_ref_count()); - assert_eq!(1, task2.waker_ref_count()); - - assert_pending!(task2.enter(|cx, _| tx.poll_closed(cx))); - - assert_eq!(1, task1.waker_ref_count()); - assert_eq!(2, task2.waker_ref_count()); - - drop(rx); - - assert!(!task1.is_woken()); - assert!(task2.is_woken()); - - assert_ready!(task2.enter(|cx, _| tx.poll_closed(cx))); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_rwlock.rs b/third_party/rust/tokio-0.2.25/tests/sync_rwlock.rs deleted file mode 100644 index 87010b658e07..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_rwlock.rs +++ /dev/null @@ -1,237 +0,0 @@ -#![warn(rust_2018_idioms)] - -use std::sync::Arc; -use std::task::Poll; - -use futures::future::FutureExt; -use futures::stream; -use futures::stream::StreamExt; - -use tokio::sync::{Barrier, RwLock}; -use tokio_test::task::spawn; -use tokio_test::{assert_pending, assert_ready}; - -#[test] -fn into_inner() { - let rwlock = RwLock::new(42); - assert_eq!(rwlock.into_inner(), 42); -} - -// multiple reads should be Ready -#[test] -fn read_shared() { - let rwlock = RwLock::new(100); - - let mut t1 = spawn(rwlock.read()); - let _g1 = assert_ready!(t1.poll()); - let mut t2 = spawn(rwlock.read()); - assert_ready!(t2.poll()); -} - -// When there is an active shared owner, exclusive access should not be possible -#[test] -fn write_shared_pending() { - let rwlock = RwLock::new(100); - let mut t1 = spawn(rwlock.read()); - - let _g1 = assert_ready!(t1.poll()); - let mut t2 = spawn(rwlock.write()); - assert_pending!(t2.poll()); -} - -// When there is an active exclusive owner, subsequent exclusive access should not be possible -#[test] -fn read_exclusive_pending() { - let rwlock = RwLock::new(100); - let mut t1 = spawn(rwlock.write()); - - let _g1 = assert_ready!(t1.poll()); - let mut t2 = spawn(rwlock.read()); - assert_pending!(t2.poll()); -} - -// If the max shared access is reached and subsquent shared access is pending -// should be made available when one of the shared acesses is dropped -#[test] -fn exhaust_reading() { - let rwlock = RwLock::new(100); - let mut reads = Vec::new(); - loop { - let mut t = spawn(rwlock.read()); - match t.poll() { - Poll::Ready(guard) => reads.push(guard), - Poll::Pending => break, - } - } - - let mut t1 = spawn(rwlock.read()); - assert_pending!(t1.poll()); - let g2 = reads.pop().unwrap(); - drop(g2); - assert!(t1.is_woken()); - assert_ready!(t1.poll()); -} - -// When there is an active exclusive owner, subsequent exclusive access should not be possible -#[test] -fn write_exclusive_pending() { - let rwlock = RwLock::new(100); - let mut t1 = spawn(rwlock.write()); - - let _g1 = assert_ready!(t1.poll()); - let mut t2 = spawn(rwlock.write()); - assert_pending!(t2.poll()); -} - -// When there is an active shared owner, exclusive access should be possible after shared is dropped -#[test] -fn write_shared_drop() { - let rwlock = RwLock::new(100); - let mut t1 = spawn(rwlock.read()); - - let g1 = assert_ready!(t1.poll()); - let mut t2 = spawn(rwlock.write()); - assert_pending!(t2.poll()); - drop(g1); - assert!(t2.is_woken()); - assert_ready!(t2.poll()); -} - -// when there is an active shared owner, and exclusive access is triggered, -// subsequent shared access should not be possible as write gathers all the available semaphore permits -#[test] -fn write_read_shared_pending() { - let rwlock = RwLock::new(100); - let mut t1 = spawn(rwlock.read()); - let _g1 = assert_ready!(t1.poll()); - - let mut t2 = spawn(rwlock.read()); - assert_ready!(t2.poll()); - - let mut t3 = spawn(rwlock.write()); - assert_pending!(t3.poll()); - - let mut t4 = spawn(rwlock.read()); - assert_pending!(t4.poll()); -} - -// when there is an active shared owner, and exclusive access is triggered, -// reading should be possible after pending exclusive access is dropped -#[test] -fn write_read_shared_drop_pending() { - let rwlock = RwLock::new(100); - let mut t1 = spawn(rwlock.read()); - let _g1 = assert_ready!(t1.poll()); - - let mut t2 = spawn(rwlock.write()); - assert_pending!(t2.poll()); - - let mut t3 = spawn(rwlock.read()); - assert_pending!(t3.poll()); - drop(t2); - - assert!(t3.is_woken()); - assert_ready!(t3.poll()); -} - -// Acquire an RwLock nonexclusively by a single task -#[tokio::test] -async fn read_uncontested() { - let rwlock = RwLock::new(100); - let result = *rwlock.read().await; - - assert_eq!(result, 100); -} - -// Acquire an uncontested RwLock in exclusive mode -#[tokio::test] -async fn write_uncontested() { - let rwlock = RwLock::new(100); - let mut result = rwlock.write().await; - *result += 50; - assert_eq!(*result, 150); -} - -// RwLocks should be acquired in the order that their Futures are waited upon. -#[tokio::test] -async fn write_order() { - let rwlock = RwLock::>::new(vec![]); - let fut2 = rwlock.write().map(|mut guard| guard.push(2)); - let fut1 = rwlock.write().map(|mut guard| guard.push(1)); - fut1.await; - fut2.await; - - let g = rwlock.read().await; - assert_eq!(*g, vec![1, 2]); -} - -// A single RwLock is contested by tasks in multiple threads -#[tokio::test(threaded_scheduler)] -async fn multithreaded() { - let barrier = Arc::new(Barrier::new(5)); - let rwlock = Arc::new(RwLock::::new(0)); - let rwclone1 = rwlock.clone(); - let rwclone2 = rwlock.clone(); - let rwclone3 = rwlock.clone(); - let rwclone4 = rwlock.clone(); - - let b1 = barrier.clone(); - tokio::spawn(async move { - stream::iter(0..1000) - .for_each(move |_| { - let rwlock = rwclone1.clone(); - async move { - let mut guard = rwlock.write().await; - *guard += 2; - } - }) - .await; - b1.wait().await; - }); - - let b2 = barrier.clone(); - tokio::spawn(async move { - stream::iter(0..1000) - .for_each(move |_| { - let rwlock = rwclone2.clone(); - async move { - let mut guard = rwlock.write().await; - *guard += 3; - } - }) - .await; - b2.wait().await; - }); - - let b3 = barrier.clone(); - tokio::spawn(async move { - stream::iter(0..1000) - .for_each(move |_| { - let rwlock = rwclone3.clone(); - async move { - let mut guard = rwlock.write().await; - *guard += 5; - } - }) - .await; - b3.wait().await; - }); - - let b4 = barrier.clone(); - tokio::spawn(async move { - stream::iter(0..1000) - .for_each(move |_| { - let rwlock = rwclone4.clone(); - async move { - let mut guard = rwlock.write().await; - *guard += 7; - } - }) - .await; - b4.wait().await; - }); - - barrier.wait().await; - let g = rwlock.read().await; - assert_eq!(*g, 17_000); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_semaphore.rs b/third_party/rust/tokio-0.2.25/tests/sync_semaphore.rs deleted file mode 100644 index 1cb0c749db3d..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_semaphore.rs +++ /dev/null @@ -1,81 +0,0 @@ -#![cfg(feature = "full")] - -use std::sync::Arc; -use tokio::sync::Semaphore; - -#[test] -fn no_permits() { - // this should not panic - Semaphore::new(0); -} - -#[test] -fn try_acquire() { - let sem = Semaphore::new(1); - { - let p1 = sem.try_acquire(); - assert!(p1.is_ok()); - let p2 = sem.try_acquire(); - assert!(p2.is_err()); - } - let p3 = sem.try_acquire(); - assert!(p3.is_ok()); -} - -#[tokio::test] -async fn acquire() { - let sem = Arc::new(Semaphore::new(1)); - let p1 = sem.try_acquire().unwrap(); - let sem_clone = sem.clone(); - let j = tokio::spawn(async move { - let _p2 = sem_clone.acquire().await; - }); - drop(p1); - j.await.unwrap(); -} - -#[tokio::test] -async fn add_permits() { - let sem = Arc::new(Semaphore::new(0)); - let sem_clone = sem.clone(); - let j = tokio::spawn(async move { - let _p2 = sem_clone.acquire().await; - }); - sem.add_permits(1); - j.await.unwrap(); -} - -#[test] -fn forget() { - let sem = Arc::new(Semaphore::new(1)); - { - let p = sem.try_acquire().unwrap(); - assert_eq!(sem.available_permits(), 0); - p.forget(); - assert_eq!(sem.available_permits(), 0); - } - assert_eq!(sem.available_permits(), 0); - assert!(sem.try_acquire().is_err()); -} - -#[tokio::test] -async fn stresstest() { - let sem = Arc::new(Semaphore::new(5)); - let mut join_handles = Vec::new(); - for _ in 0..1000 { - let sem_clone = sem.clone(); - join_handles.push(tokio::spawn(async move { - let _p = sem_clone.acquire().await; - })); - } - for j in join_handles { - j.await.unwrap(); - } - // there should be exactly 5 semaphores available now - let _p1 = sem.try_acquire().unwrap(); - let _p2 = sem.try_acquire().unwrap(); - let _p3 = sem.try_acquire().unwrap(); - let _p4 = sem.try_acquire().unwrap(); - let _p5 = sem.try_acquire().unwrap(); - assert!(sem.try_acquire().is_err()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_semaphore_owned.rs b/third_party/rust/tokio-0.2.25/tests/sync_semaphore_owned.rs deleted file mode 100644 index 8ed6209f3b90..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_semaphore_owned.rs +++ /dev/null @@ -1,75 +0,0 @@ -#![cfg(feature = "full")] - -use std::sync::Arc; -use tokio::sync::Semaphore; - -#[test] -fn try_acquire() { - let sem = Arc::new(Semaphore::new(1)); - { - let p1 = sem.clone().try_acquire_owned(); - assert!(p1.is_ok()); - let p2 = sem.clone().try_acquire_owned(); - assert!(p2.is_err()); - } - let p3 = sem.try_acquire_owned(); - assert!(p3.is_ok()); -} - -#[tokio::test] -async fn acquire() { - let sem = Arc::new(Semaphore::new(1)); - let p1 = sem.clone().try_acquire_owned().unwrap(); - let sem_clone = sem.clone(); - let j = tokio::spawn(async move { - let _p2 = sem_clone.acquire_owned().await; - }); - drop(p1); - j.await.unwrap(); -} - -#[tokio::test] -async fn add_permits() { - let sem = Arc::new(Semaphore::new(0)); - let sem_clone = sem.clone(); - let j = tokio::spawn(async move { - let _p2 = sem_clone.acquire_owned().await; - }); - sem.add_permits(1); - j.await.unwrap(); -} - -#[test] -fn forget() { - let sem = Arc::new(Semaphore::new(1)); - { - let p = sem.clone().try_acquire_owned().unwrap(); - assert_eq!(sem.available_permits(), 0); - p.forget(); - assert_eq!(sem.available_permits(), 0); - } - assert_eq!(sem.available_permits(), 0); - assert!(sem.try_acquire_owned().is_err()); -} - -#[tokio::test] -async fn stresstest() { - let sem = Arc::new(Semaphore::new(5)); - let mut join_handles = Vec::new(); - for _ in 0..1000 { - let sem_clone = sem.clone(); - join_handles.push(tokio::spawn(async move { - let _p = sem_clone.acquire_owned().await; - })); - } - for j in join_handles { - j.await.unwrap(); - } - // there should be exactly 5 semaphores available now - let _p1 = sem.clone().try_acquire_owned().unwrap(); - let _p2 = sem.clone().try_acquire_owned().unwrap(); - let _p3 = sem.clone().try_acquire_owned().unwrap(); - let _p4 = sem.clone().try_acquire_owned().unwrap(); - let _p5 = sem.clone().try_acquire_owned().unwrap(); - assert!(sem.try_acquire_owned().is_err()); -} diff --git a/third_party/rust/tokio-0.2.25/tests/sync_watch.rs b/third_party/rust/tokio-0.2.25/tests/sync_watch.rs deleted file mode 100644 index 2bc5bb2a85a8..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/sync_watch.rs +++ /dev/null @@ -1,231 +0,0 @@ -#![allow(clippy::cognitive_complexity)] -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::sync::watch; -use tokio_test::task::spawn; -use tokio_test::{assert_pending, assert_ready}; - -#[test] -fn single_rx_recv() { - let (tx, mut rx) = watch::channel("one"); - - { - let mut t = spawn(rx.recv()); - let v = assert_ready!(t.poll()).unwrap(); - assert_eq!(v, "one"); - } - - { - let mut t = spawn(rx.recv()); - - assert_pending!(t.poll()); - - tx.broadcast("two").unwrap(); - - assert!(t.is_woken()); - - let v = assert_ready!(t.poll()).unwrap(); - assert_eq!(v, "two"); - } - - { - let mut t = spawn(rx.recv()); - - assert_pending!(t.poll()); - - drop(tx); - - let res = assert_ready!(t.poll()); - assert!(res.is_none()); - } -} - -#[test] -fn multi_rx() { - let (tx, mut rx1) = watch::channel("one"); - let mut rx2 = rx1.clone(); - - { - let mut t1 = spawn(rx1.recv()); - let mut t2 = spawn(rx2.recv()); - - let res = assert_ready!(t1.poll()); - assert_eq!(res.unwrap(), "one"); - - let res = assert_ready!(t2.poll()); - assert_eq!(res.unwrap(), "one"); - } - - let mut t2 = spawn(rx2.recv()); - - { - let mut t1 = spawn(rx1.recv()); - - assert_pending!(t1.poll()); - assert_pending!(t2.poll()); - - tx.broadcast("two").unwrap(); - - assert!(t1.is_woken()); - assert!(t2.is_woken()); - - let res = assert_ready!(t1.poll()); - assert_eq!(res.unwrap(), "two"); - } - - { - let mut t1 = spawn(rx1.recv()); - - assert_pending!(t1.poll()); - - tx.broadcast("three").unwrap(); - - assert!(t1.is_woken()); - assert!(t2.is_woken()); - - let res = assert_ready!(t1.poll()); - assert_eq!(res.unwrap(), "three"); - - let res = assert_ready!(t2.poll()); - assert_eq!(res.unwrap(), "three"); - } - - drop(t2); - - { - let mut t1 = spawn(rx1.recv()); - let mut t2 = spawn(rx2.recv()); - - assert_pending!(t1.poll()); - assert_pending!(t2.poll()); - - tx.broadcast("four").unwrap(); - - let res = assert_ready!(t1.poll()); - assert_eq!(res.unwrap(), "four"); - drop(t1); - - let mut t1 = spawn(rx1.recv()); - assert_pending!(t1.poll()); - - drop(tx); - - assert!(t1.is_woken()); - let res = assert_ready!(t1.poll()); - assert!(res.is_none()); - - let res = assert_ready!(t2.poll()); - assert_eq!(res.unwrap(), "four"); - - drop(t2); - let mut t2 = spawn(rx2.recv()); - let res = assert_ready!(t2.poll()); - assert!(res.is_none()); - } -} - -#[test] -fn rx_observes_final_value() { - // Initial value - - let (tx, mut rx) = watch::channel("one"); - drop(tx); - - { - let mut t1 = spawn(rx.recv()); - let res = assert_ready!(t1.poll()); - assert_eq!(res.unwrap(), "one"); - } - - { - let mut t1 = spawn(rx.recv()); - let res = assert_ready!(t1.poll()); - assert!(res.is_none()); - } - - // Sending a value - - let (tx, mut rx) = watch::channel("one"); - - tx.broadcast("two").unwrap(); - - { - let mut t1 = spawn(rx.recv()); - let res = assert_ready!(t1.poll()); - assert_eq!(res.unwrap(), "two"); - } - - { - let mut t1 = spawn(rx.recv()); - assert_pending!(t1.poll()); - - tx.broadcast("three").unwrap(); - drop(tx); - - assert!(t1.is_woken()); - - let res = assert_ready!(t1.poll()); - assert_eq!(res.unwrap(), "three"); - } - - { - let mut t1 = spawn(rx.recv()); - let res = assert_ready!(t1.poll()); - assert!(res.is_none()); - } -} - -#[test] -fn poll_close() { - let (mut tx, rx) = watch::channel("one"); - - { - let mut t = spawn(tx.closed()); - assert_pending!(t.poll()); - - drop(rx); - - assert!(t.is_woken()); - assert_ready!(t.poll()); - } - - assert!(tx.broadcast("two").is_err()); -} - -#[test] -fn stream_impl() { - use tokio::stream::StreamExt; - - let (tx, mut rx) = watch::channel("one"); - - { - let mut t = spawn(rx.next()); - let v = assert_ready!(t.poll()).unwrap(); - assert_eq!(v, "one"); - } - - { - let mut t = spawn(rx.next()); - - assert_pending!(t.poll()); - - tx.broadcast("two").unwrap(); - - assert!(t.is_woken()); - - let v = assert_ready!(t.poll()).unwrap(); - assert_eq!(v, "two"); - } - - { - let mut t = spawn(rx.next()); - - assert_pending!(t.poll()); - - drop(tx); - - let res = assert_ready!(t.poll()); - assert!(res.is_none()); - } -} diff --git a/third_party/rust/tokio-0.2.25/tests/task_blocking.rs b/third_party/rust/tokio-0.2.25/tests/task_blocking.rs deleted file mode 100644 index 4ca1596e052a..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/task_blocking.rs +++ /dev/null @@ -1,245 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::{runtime, task}; -use tokio_test::assert_ok; - -use std::thread; -use std::time::Duration; - -#[tokio::test] -async fn basic_blocking() { - // Run a few times - for _ in 0..100 { - let out = assert_ok!( - tokio::spawn(async { - assert_ok!( - task::spawn_blocking(|| { - thread::sleep(Duration::from_millis(5)); - "hello" - }) - .await - ) - }) - .await - ); - - assert_eq!(out, "hello"); - } -} - -#[tokio::test(threaded_scheduler)] -async fn block_in_blocking() { - // Run a few times - for _ in 0..100 { - let out = assert_ok!( - tokio::spawn(async { - assert_ok!( - task::spawn_blocking(|| { - task::block_in_place(|| { - thread::sleep(Duration::from_millis(5)); - }); - "hello" - }) - .await - ) - }) - .await - ); - - assert_eq!(out, "hello"); - } -} - -#[tokio::test(threaded_scheduler)] -async fn block_in_block() { - // Run a few times - for _ in 0..100 { - let out = assert_ok!( - tokio::spawn(async { - task::block_in_place(|| { - task::block_in_place(|| { - thread::sleep(Duration::from_millis(5)); - }); - "hello" - }) - }) - .await - ); - - assert_eq!(out, "hello"); - } -} - -#[tokio::test(basic_scheduler)] -#[should_panic] -async fn no_block_in_basic_scheduler() { - task::block_in_place(|| {}); -} - -#[test] -fn yes_block_in_threaded_block_on() { - let mut rt = runtime::Builder::new() - .threaded_scheduler() - .build() - .unwrap(); - rt.block_on(async { - task::block_in_place(|| {}); - }); -} - -#[test] -#[should_panic] -fn no_block_in_basic_block_on() { - let mut rt = runtime::Builder::new().basic_scheduler().build().unwrap(); - rt.block_on(async { - task::block_in_place(|| {}); - }); -} - -#[test] -fn can_enter_basic_rt_from_within_block_in_place() { - let mut outer = tokio::runtime::Builder::new() - .threaded_scheduler() - .build() - .unwrap(); - - outer.block_on(async { - tokio::task::block_in_place(|| { - let mut inner = tokio::runtime::Builder::new() - .basic_scheduler() - .build() - .unwrap(); - - inner.block_on(async {}) - }) - }); -} - -#[test] -fn useful_panic_message_when_dropping_rt_in_rt() { - use std::panic::{catch_unwind, AssertUnwindSafe}; - - let mut outer = tokio::runtime::Builder::new() - .threaded_scheduler() - .build() - .unwrap(); - - let result = catch_unwind(AssertUnwindSafe(|| { - outer.block_on(async { - let _ = tokio::runtime::Builder::new() - .basic_scheduler() - .build() - .unwrap(); - }); - })); - - assert!(result.is_err()); - let err = result.unwrap_err(); - let err: &'static str = err.downcast_ref::<&'static str>().unwrap(); - - assert!( - err.find("Cannot drop a runtime").is_some(), - "Wrong panic message: {:?}", - err - ); -} - -#[test] -fn can_shutdown_with_zero_timeout_in_runtime() { - let mut outer = tokio::runtime::Builder::new() - .threaded_scheduler() - .build() - .unwrap(); - - outer.block_on(async { - let rt = tokio::runtime::Builder::new() - .basic_scheduler() - .build() - .unwrap(); - rt.shutdown_timeout(Duration::from_nanos(0)); - }); -} - -#[test] -fn can_shutdown_now_in_runtime() { - let mut outer = tokio::runtime::Builder::new() - .threaded_scheduler() - .build() - .unwrap(); - - outer.block_on(async { - let rt = tokio::runtime::Builder::new() - .basic_scheduler() - .build() - .unwrap(); - rt.shutdown_background(); - }); -} - -#[test] -fn coop_disabled_in_block_in_place() { - let mut outer = tokio::runtime::Builder::new() - .threaded_scheduler() - .enable_time() - .build() - .unwrap(); - - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - for i in 0..200 { - tx.send(i).unwrap(); - } - drop(tx); - - outer.block_on(async move { - let jh = tokio::spawn(async move { - tokio::task::block_in_place(move || { - futures::executor::block_on(async move { - use tokio::stream::StreamExt; - assert_eq!(rx.fold(0, |n, _| n + 1).await, 200); - }) - }) - }); - - tokio::time::timeout(Duration::from_secs(1), jh) - .await - .expect("timed out (probably hanging)") - .unwrap() - }); -} - -#[test] -fn coop_disabled_in_block_in_place_in_block_on() { - let (done_tx, done_rx) = std::sync::mpsc::channel(); - let done = done_tx.clone(); - thread::spawn(move || { - let mut outer = tokio::runtime::Builder::new() - .threaded_scheduler() - .build() - .unwrap(); - - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - for i in 0..200 { - tx.send(i).unwrap(); - } - drop(tx); - - outer.block_on(async move { - tokio::task::block_in_place(move || { - futures::executor::block_on(async move { - use tokio::stream::StreamExt; - assert_eq!(rx.fold(0, |n, _| n + 1).await, 200); - }) - }) - }); - - let _ = done.send(Ok(())); - }); - - thread::spawn(move || { - thread::sleep(Duration::from_secs(1)); - let _ = done_tx.send(Err("timed out (probably hanging)")); - }); - - done_rx.recv().unwrap().unwrap(); -} diff --git a/third_party/rust/tokio-0.2.25/tests/task_local.rs b/third_party/rust/tokio-0.2.25/tests/task_local.rs deleted file mode 100644 index 7f508997f237..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/task_local.rs +++ /dev/null @@ -1,31 +0,0 @@ -tokio::task_local! { - static REQ_ID: u32; - pub static FOO: bool; -} - -#[tokio::test(threaded_scheduler)] -async fn local() { - let j1 = tokio::spawn(REQ_ID.scope(1, async move { - assert_eq!(REQ_ID.get(), 1); - assert_eq!(REQ_ID.get(), 1); - })); - - let j2 = tokio::spawn(REQ_ID.scope(2, async move { - REQ_ID.with(|v| { - assert_eq!(REQ_ID.get(), 2); - assert_eq!(*v, 2); - }); - - tokio::time::delay_for(std::time::Duration::from_millis(10)).await; - - assert_eq!(REQ_ID.get(), 2); - })); - - let j3 = tokio::spawn(FOO.scope(true, async move { - assert!(FOO.get()); - })); - - j1.await.unwrap(); - j2.await.unwrap(); - j3.await.unwrap(); -} diff --git a/third_party/rust/tokio-0.2.25/tests/task_local_set.rs b/third_party/rust/tokio-0.2.25/tests/task_local_set.rs deleted file mode 100644 index bf80b8ee5f5d..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/task_local_set.rs +++ /dev/null @@ -1,499 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::runtime::{self, Runtime}; -use tokio::sync::{mpsc, oneshot}; -use tokio::task::{self, LocalSet}; -use tokio::time; - -use std::cell::Cell; -use std::sync::atomic::Ordering::{self, SeqCst}; -use std::sync::atomic::{AtomicBool, AtomicUsize}; -use std::time::Duration; - -#[tokio::test(basic_scheduler)] -async fn local_basic_scheduler() { - LocalSet::new() - .run_until(async { - task::spawn_local(async {}).await.unwrap(); - }) - .await; -} - -#[tokio::test(threaded_scheduler)] -async fn local_threadpool() { - thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); - } - - ON_RT_THREAD.with(|cell| cell.set(true)); - - LocalSet::new() - .run_until(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - task::spawn_local(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - }) - .await - .unwrap(); - }) - .await; -} - -#[tokio::test(threaded_scheduler)] -async fn localset_future_threadpool() { - thread_local! { - static ON_LOCAL_THREAD: Cell = Cell::new(false); - } - - ON_LOCAL_THREAD.with(|cell| cell.set(true)); - - let local = LocalSet::new(); - local.spawn_local(async move { - assert!(ON_LOCAL_THREAD.with(|cell| cell.get())); - }); - local.await; -} - -#[tokio::test(threaded_scheduler)] -async fn localset_future_timers() { - static RAN1: AtomicBool = AtomicBool::new(false); - static RAN2: AtomicBool = AtomicBool::new(false); - - let local = LocalSet::new(); - local.spawn_local(async move { - time::delay_for(Duration::from_millis(10)).await; - RAN1.store(true, Ordering::SeqCst); - }); - local.spawn_local(async move { - time::delay_for(Duration::from_millis(20)).await; - RAN2.store(true, Ordering::SeqCst); - }); - local.await; - assert!(RAN1.load(Ordering::SeqCst)); - assert!(RAN2.load(Ordering::SeqCst)); -} - -#[tokio::test] -async fn localset_future_drives_all_local_futs() { - static RAN1: AtomicBool = AtomicBool::new(false); - static RAN2: AtomicBool = AtomicBool::new(false); - static RAN3: AtomicBool = AtomicBool::new(false); - - let local = LocalSet::new(); - local.spawn_local(async move { - task::spawn_local(async { - task::yield_now().await; - RAN3.store(true, Ordering::SeqCst); - }); - task::yield_now().await; - RAN1.store(true, Ordering::SeqCst); - }); - local.spawn_local(async move { - task::yield_now().await; - RAN2.store(true, Ordering::SeqCst); - }); - local.await; - assert!(RAN1.load(Ordering::SeqCst)); - assert!(RAN2.load(Ordering::SeqCst)); - assert!(RAN3.load(Ordering::SeqCst)); -} - -#[tokio::test(threaded_scheduler)] -async fn local_threadpool_timer() { - // This test ensures that runtime services like the timer are properly - // set for the local task set. - thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); - } - - ON_RT_THREAD.with(|cell| cell.set(true)); - - LocalSet::new() - .run_until(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - let join = task::spawn_local(async move { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - time::delay_for(Duration::from_millis(10)).await; - assert!(ON_RT_THREAD.with(|cell| cell.get())); - }); - join.await.unwrap(); - }) - .await; -} - -#[test] -// This will panic, since the thread that calls `block_on` cannot use -// in-place blocking inside of `block_on`. -#[should_panic] -fn local_threadpool_blocking_in_place() { - thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); - } - - ON_RT_THREAD.with(|cell| cell.set(true)); - - let mut rt = runtime::Builder::new() - .threaded_scheduler() - .enable_all() - .build() - .unwrap(); - LocalSet::new().block_on(&mut rt, async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - let join = task::spawn_local(async move { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - task::block_in_place(|| {}); - assert!(ON_RT_THREAD.with(|cell| cell.get())); - }); - join.await.unwrap(); - }); -} - -#[tokio::test(threaded_scheduler)] -async fn local_threadpool_blocking_run() { - thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); - } - - ON_RT_THREAD.with(|cell| cell.set(true)); - - LocalSet::new() - .run_until(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - let join = task::spawn_local(async move { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - task::spawn_blocking(|| { - assert!( - !ON_RT_THREAD.with(|cell| cell.get()), - "blocking must not run on the local task set's thread" - ); - }) - .await - .unwrap(); - assert!(ON_RT_THREAD.with(|cell| cell.get())); - }); - join.await.unwrap(); - }) - .await; -} - -#[tokio::test(threaded_scheduler)] -async fn all_spawns_are_local() { - use futures::future; - thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); - } - - ON_RT_THREAD.with(|cell| cell.set(true)); - - LocalSet::new() - .run_until(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - let handles = (0..128) - .map(|_| { - task::spawn_local(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - }) - }) - .collect::>(); - for joined in future::join_all(handles).await { - joined.unwrap(); - } - }) - .await; -} - -#[tokio::test(threaded_scheduler)] -async fn nested_spawn_is_local() { - thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); - } - - ON_RT_THREAD.with(|cell| cell.set(true)); - - LocalSet::new() - .run_until(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - task::spawn_local(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - task::spawn_local(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - task::spawn_local(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - task::spawn_local(async { - assert!(ON_RT_THREAD.with(|cell| cell.get())); - }) - .await - .unwrap(); - }) - .await - .unwrap(); - }) - .await - .unwrap(); - }) - .await - .unwrap(); - }) - .await; -} - -#[test] -fn join_local_future_elsewhere() { - thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); - } - - ON_RT_THREAD.with(|cell| cell.set(true)); - - let mut rt = runtime::Builder::new() - .threaded_scheduler() - .build() - .unwrap(); - let local = LocalSet::new(); - local.block_on(&mut rt, async move { - let (tx, rx) = oneshot::channel(); - let join = task::spawn_local(async move { - println!("hello world running..."); - assert!( - ON_RT_THREAD.with(|cell| cell.get()), - "local task must run on local thread, no matter where it is awaited" - ); - rx.await.unwrap(); - - println!("hello world task done"); - "hello world" - }); - let join2 = task::spawn(async move { - assert!( - !ON_RT_THREAD.with(|cell| cell.get()), - "spawned task should be on a worker" - ); - - tx.send(()).expect("task shouldn't have ended yet"); - println!("waking up hello world..."); - - join.await.expect("task should complete successfully"); - - println!("hello world task joined"); - }); - join2.await.unwrap() - }); -} - -#[test] -fn drop_cancels_tasks() { - use std::rc::Rc; - - // This test reproduces issue #1842 - let mut rt = rt(); - let rc1 = Rc::new(()); - let rc2 = rc1.clone(); - - let (started_tx, started_rx) = oneshot::channel(); - - let local = LocalSet::new(); - local.spawn_local(async move { - // Move this in - let _rc2 = rc2; - - started_tx.send(()).unwrap(); - loop { - time::delay_for(Duration::from_secs(3600)).await; - } - }); - - local.block_on(&mut rt, async { - started_rx.await.unwrap(); - }); - drop(local); - drop(rt); - - assert_eq!(1, Rc::strong_count(&rc1)); -} - -/// Runs a test function in a separate thread, and panics if the test does not -/// complete within the specified timeout, or if the test function panics. -/// -/// This is intended for running tests whose failure mode is a hang or infinite -/// loop that cannot be detected otherwise. -fn with_timeout(timeout: Duration, f: impl FnOnce() + Send + 'static) { - use std::sync::mpsc::RecvTimeoutError; - - let (done_tx, done_rx) = std::sync::mpsc::channel(); - let thread = std::thread::spawn(move || { - f(); - - // Send a message on the channel so that the test thread can - // determine if we have entered an infinite loop: - done_tx.send(()).unwrap(); - }); - - // Since the failure mode of this test is an infinite loop, rather than - // something we can easily make assertions about, we'll run it in a - // thread. When the test thread finishes, it will send a message on a - // channel to this thread. We'll wait for that message with a fairly - // generous timeout, and if we don't recieve it, we assume the test - // thread has hung. - // - // Note that it should definitely complete in under a minute, but just - // in case CI is slow, we'll give it a long timeout. - match done_rx.recv_timeout(timeout) { - Err(RecvTimeoutError::Timeout) => panic!( - "test did not complete within {:?} seconds, \ - we have (probably) entered an infinite loop!", - timeout, - ), - // Did the test thread panic? We'll find out for sure when we `join` - // with it. - Err(RecvTimeoutError::Disconnected) => { - println!("done_rx dropped, did the test thread panic?"); - } - // Test completed successfully! - Ok(()) => {} - } - - thread.join().expect("test thread should not panic!") -} - -#[test] -fn drop_cancels_remote_tasks() { - // This test reproduces issue #1885. - with_timeout(Duration::from_secs(60), || { - let (tx, mut rx) = mpsc::channel::<()>(1024); - - let mut rt = rt(); - - let local = LocalSet::new(); - local.spawn_local(async move { while rx.recv().await.is_some() {} }); - local.block_on(&mut rt, async { - time::delay_for(Duration::from_millis(1)).await; - }); - - drop(tx); - - // This enters an infinite loop if the remote notified tasks are not - // properly cancelled. - drop(local); - }); -} - -#[test] -fn local_tasks_wake_join_all() { - // This test reproduces issue #2460. - with_timeout(Duration::from_secs(60), || { - use futures::future::join_all; - use tokio::task::LocalSet; - - let mut rt = rt(); - let set = LocalSet::new(); - let mut handles = Vec::new(); - - for _ in 1..=128 { - handles.push(set.spawn_local(async move { - tokio::task::spawn_local(async move {}).await.unwrap(); - })); - } - - rt.block_on(set.run_until(join_all(handles))); - }); -} - -#[tokio::test] -async fn local_tasks_are_polled_after_tick() { - // Reproduces issues #1899 and #1900 - - static RX1: AtomicUsize = AtomicUsize::new(0); - static RX2: AtomicUsize = AtomicUsize::new(0); - static EXPECTED: usize = 500; - - let (tx, mut rx) = mpsc::unbounded_channel(); - - let local = LocalSet::new(); - - local - .run_until(async { - let task2 = task::spawn(async move { - // Wait a bit - time::delay_for(Duration::from_millis(100)).await; - - let mut oneshots = Vec::with_capacity(EXPECTED); - - // Send values - for _ in 0..EXPECTED { - let (oneshot_tx, oneshot_rx) = oneshot::channel(); - oneshots.push(oneshot_tx); - tx.send(oneshot_rx).unwrap(); - } - - time::delay_for(Duration::from_millis(100)).await; - - for tx in oneshots.drain(..) { - tx.send(()).unwrap(); - } - - time::delay_for(Duration::from_millis(300)).await; - let rx1 = RX1.load(SeqCst); - let rx2 = RX2.load(SeqCst); - println!("EXPECT = {}; RX1 = {}; RX2 = {}", EXPECTED, rx1, rx2); - assert_eq!(EXPECTED, rx1); - assert_eq!(EXPECTED, rx2); - }); - - while let Some(oneshot) = rx.recv().await { - RX1.fetch_add(1, SeqCst); - - task::spawn_local(async move { - oneshot.await.unwrap(); - RX2.fetch_add(1, SeqCst); - }); - } - - task2.await.unwrap(); - }) - .await; -} - -#[tokio::test] -async fn acquire_mutex_in_drop() { - use futures::future::pending; - - let (tx1, rx1) = oneshot::channel(); - let (tx2, rx2) = oneshot::channel(); - let local = LocalSet::new(); - - local.spawn_local(async move { - let _ = rx2.await; - unreachable!(); - }); - - local.spawn_local(async move { - let _ = rx1.await; - tx2.send(()).unwrap(); - unreachable!(); - }); - - // Spawn a task that will never notify - local.spawn_local(async move { - pending::<()>().await; - tx1.send(()).unwrap(); - }); - - // Tick the loop - local - .run_until(async { - task::yield_now().await; - }) - .await; - - // Drop the LocalSet - drop(local); -} - -fn rt() -> Runtime { - tokio::runtime::Builder::new() - .basic_scheduler() - .enable_all() - .build() - .unwrap() -} diff --git a/third_party/rust/tokio-0.2.25/tests/tcp_accept.rs b/third_party/rust/tokio-0.2.25/tests/tcp_accept.rs deleted file mode 100644 index 9f5b441468d0..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/tcp_accept.rs +++ /dev/null @@ -1,101 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::{mpsc, oneshot}; -use tokio_test::assert_ok; - -use std::net::{IpAddr, SocketAddr}; - -macro_rules! test_accept { - ($(($ident:ident, $target:expr),)*) => { - $( - #[tokio::test] - async fn $ident() { - let mut listener = assert_ok!(TcpListener::bind($target).await); - let addr = listener.local_addr().unwrap(); - - let (tx, rx) = oneshot::channel(); - - tokio::spawn(async move { - let (socket, _) = assert_ok!(listener.accept().await); - assert_ok!(tx.send(socket)); - }); - - let cli = assert_ok!(TcpStream::connect(&addr).await); - let srv = assert_ok!(rx.await); - - assert_eq!(cli.local_addr().unwrap(), srv.peer_addr().unwrap()); - } - )* - } -} - -test_accept! { - (ip_str, "127.0.0.1:0"), - (host_str, "localhost:0"), - (socket_addr, "127.0.0.1:0".parse::().unwrap()), - (str_port_tuple, ("127.0.0.1", 0)), - (ip_port_tuple, ("127.0.0.1".parse::().unwrap(), 0)), -} - -use pin_project_lite::pin_project; -use std::pin::Pin; -use std::sync::{ - atomic::{AtomicUsize, Ordering::SeqCst}, - Arc, -}; -use std::task::{Context, Poll}; -use tokio::stream::{Stream, StreamExt}; - -pin_project! { - struct TrackPolls { - npolls: Arc, - #[pin] - s: S, - } -} - -impl Stream for TrackPolls -where - S: Stream, -{ - type Item = S::Item; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - this.npolls.fetch_add(1, SeqCst); - this.s.poll_next(cx) - } -} - -#[tokio::test] -async fn no_extra_poll() { - let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = listener.local_addr().unwrap(); - - let (tx, rx) = oneshot::channel(); - let (accepted_tx, mut accepted_rx) = mpsc::unbounded_channel(); - - tokio::spawn(async move { - let mut incoming = TrackPolls { - npolls: Arc::new(AtomicUsize::new(0)), - s: listener.incoming(), - }; - assert_ok!(tx.send(Arc::clone(&incoming.npolls))); - while incoming.next().await.is_some() { - accepted_tx.send(()).unwrap(); - } - }); - - let npolls = assert_ok!(rx.await); - tokio::task::yield_now().await; - - // should have been polled exactly once: the initial poll - assert_eq!(npolls.load(SeqCst), 1); - - let _ = assert_ok!(TcpStream::connect(&addr).await); - accepted_rx.next().await.unwrap(); - - // should have been polled twice more: once to yield Some(), then once to yield Pending - assert_eq!(npolls.load(SeqCst), 1 + 2); -} diff --git a/third_party/rust/tokio-0.2.25/tests/tcp_connect.rs b/third_party/rust/tokio-0.2.25/tests/tcp_connect.rs deleted file mode 100644 index de1cead829e2..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/tcp_connect.rs +++ /dev/null @@ -1,229 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::oneshot; -use tokio_test::assert_ok; - -use futures::join; - -#[tokio::test] -async fn connect_v4() { - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - assert!(addr.is_ipv4()); - - let (tx, rx) = oneshot::channel(); - - tokio::spawn(async move { - let (socket, addr) = assert_ok!(srv.accept().await); - assert_eq!(addr, assert_ok!(socket.peer_addr())); - assert_ok!(tx.send(socket)); - }); - - let mine = assert_ok!(TcpStream::connect(&addr).await); - let theirs = assert_ok!(rx.await); - - assert_eq!( - assert_ok!(mine.local_addr()), - assert_ok!(theirs.peer_addr()) - ); - assert_eq!( - assert_ok!(theirs.local_addr()), - assert_ok!(mine.peer_addr()) - ); -} - -#[tokio::test] -async fn connect_v6() { - let mut srv = assert_ok!(TcpListener::bind("[::1]:0").await); - let addr = assert_ok!(srv.local_addr()); - assert!(addr.is_ipv6()); - - let (tx, rx) = oneshot::channel(); - - tokio::spawn(async move { - let (socket, addr) = assert_ok!(srv.accept().await); - assert_eq!(addr, assert_ok!(socket.peer_addr())); - assert_ok!(tx.send(socket)); - }); - - let mine = assert_ok!(TcpStream::connect(&addr).await); - let theirs = assert_ok!(rx.await); - - assert_eq!( - assert_ok!(mine.local_addr()), - assert_ok!(theirs.peer_addr()) - ); - assert_eq!( - assert_ok!(theirs.local_addr()), - assert_ok!(mine.peer_addr()) - ); -} - -#[tokio::test] -async fn connect_addr_ip_string() { - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - let addr = format!("127.0.0.1:{}", addr.port()); - - let server = async { - assert_ok!(srv.accept().await); - }; - - let client = async { - assert_ok!(TcpStream::connect(addr).await); - }; - - join!(server, client); -} - -#[tokio::test] -async fn connect_addr_ip_str_slice() { - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - let addr = format!("127.0.0.1:{}", addr.port()); - - let server = async { - assert_ok!(srv.accept().await); - }; - - let client = async { - assert_ok!(TcpStream::connect(&addr[..]).await); - }; - - join!(server, client); -} - -#[tokio::test] -async fn connect_addr_host_string() { - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - let addr = format!("localhost:{}", addr.port()); - - let server = async { - assert_ok!(srv.accept().await); - }; - - let client = async { - assert_ok!(TcpStream::connect(addr).await); - }; - - join!(server, client); -} - -#[tokio::test] -async fn connect_addr_ip_port_tuple() { - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - let addr = (addr.ip(), addr.port()); - - let server = async { - assert_ok!(srv.accept().await); - }; - - let client = async { - assert_ok!(TcpStream::connect(&addr).await); - }; - - join!(server, client); -} - -#[tokio::test] -async fn connect_addr_ip_str_port_tuple() { - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - let addr = ("127.0.0.1", addr.port()); - - let server = async { - assert_ok!(srv.accept().await); - }; - - let client = async { - assert_ok!(TcpStream::connect(&addr).await); - }; - - join!(server, client); -} - -#[tokio::test] -async fn connect_addr_host_str_port_tuple() { - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - let addr = ("localhost", addr.port()); - - let server = async { - assert_ok!(srv.accept().await); - }; - - let client = async { - assert_ok!(TcpStream::connect(&addr).await); - }; - - join!(server, client); -} - -/* - * TODO: bring this back once TCP exposes HUP again - * -#[cfg(target_os = "linux")] -mod linux { - use tokio::net::{TcpListener, TcpStream}; - use tokio::prelude::*; - use tokio_test::assert_ok; - - use mio::unix::UnixReady; - - use futures_util::future::poll_fn; - use std::io::Write; - use std::time::Duration; - use std::{net, thread}; - - #[tokio::test] - fn poll_hup() { - let addr = assert_ok!("127.0.0.1:0".parse()); - let mut srv = assert_ok!(TcpListener::bind(&addr)); - let addr = assert_ok!(srv.local_addr()); - - tokio::spawn(async move { - let (mut client, _) = assert_ok!(srv.accept().await); - assert_ok!(client.set_linger(Some(Duration::from_millis(0)))); - assert_ok!(client.write_all(b"hello world").await); - - // TODO: Drop? - }); - - /* - let t = thread::spawn(move || { - let mut client = assert_ok!(srv.accept()).0; - client.set_linger(Some(Duration::from_millis(0))).unwrap(); - client.write(b"hello world").unwrap(); - thread::sleep(Duration::from_millis(200)); - }); - */ - - let mut stream = assert_ok!(TcpStream::connect(&addr).await); - - // Poll for HUP before reading. - future::poll_fn(|| stream.poll_read_ready(UnixReady::hup().into())) - .wait() - .unwrap(); - - // Same for write half - future::poll_fn(|| stream.poll_write_ready()) - .wait() - .unwrap(); - - let mut buf = vec![0; 11]; - - // Read the data - future::poll_fn(|| stream.poll_read(&mut buf)) - .wait() - .unwrap(); - - assert_eq!(b"hello world", &buf[..]); - - t.join().unwrap(); - } -} -*/ diff --git a/third_party/rust/tokio-0.2.25/tests/tcp_echo.rs b/third_party/rust/tokio-0.2.25/tests/tcp_echo.rs deleted file mode 100644 index 1feba63ee739..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/tcp_echo.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::net::{TcpListener, TcpStream}; -use tokio::prelude::*; -use tokio::sync::oneshot; -use tokio_test::assert_ok; - -#[tokio::test] -async fn echo_server() { - const ITER: usize = 1024; - - let (tx, rx) = oneshot::channel(); - - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - - let msg = "foo bar baz"; - tokio::spawn(async move { - let mut stream = assert_ok!(TcpStream::connect(&addr).await); - - for _ in 0..ITER { - // write - assert_ok!(stream.write_all(msg.as_bytes()).await); - - // read - let mut buf = [0; 11]; - assert_ok!(stream.read_exact(&mut buf).await); - assert_eq!(&buf[..], msg.as_bytes()); - } - - assert_ok!(tx.send(())); - }); - - let (mut stream, _) = assert_ok!(srv.accept().await); - let (mut rd, mut wr) = stream.split(); - - let n = assert_ok!(io::copy(&mut rd, &mut wr).await); - assert_eq!(n, (ITER * msg.len()) as u64); - - assert_ok!(rx.await); -} diff --git a/third_party/rust/tokio-0.2.25/tests/tcp_into_split.rs b/third_party/rust/tokio-0.2.25/tests/tcp_into_split.rs deleted file mode 100644 index 86ed461923d3..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/tcp_into_split.rs +++ /dev/null @@ -1,131 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use std::io::{Error, ErrorKind, Result}; -use std::io::{Read, Write}; -use std::{net, thread}; - -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{TcpListener, TcpStream}; -use tokio::try_join; - -#[tokio::test] -async fn split() -> Result<()> { - const MSG: &[u8] = b"split"; - - let mut listener = TcpListener::bind("127.0.0.1:0").await?; - let addr = listener.local_addr()?; - - let (stream1, (mut stream2, _)) = try_join! { - TcpStream::connect(&addr), - listener.accept(), - }?; - let (mut read_half, mut write_half) = stream1.into_split(); - - let ((), (), ()) = try_join! { - async { - let len = stream2.write(MSG).await?; - assert_eq!(len, MSG.len()); - - let mut read_buf = vec![0u8; 32]; - let read_len = stream2.read(&mut read_buf).await?; - assert_eq!(&read_buf[..read_len], MSG); - Result::Ok(()) - }, - async { - let len = write_half.write(MSG).await?; - assert_eq!(len, MSG.len()); - Ok(()) - }, - async { - let mut read_buf = vec![0u8; 32]; - let peek_len1 = read_half.peek(&mut read_buf[..]).await?; - let peek_len2 = read_half.peek(&mut read_buf[..]).await?; - assert_eq!(peek_len1, peek_len2); - - let read_len = read_half.read(&mut read_buf[..]).await?; - assert_eq!(peek_len1, read_len); - assert_eq!(&read_buf[..read_len], MSG); - Ok(()) - }, - }?; - - Ok(()) -} - -#[tokio::test] -async fn reunite() -> Result<()> { - let listener = net::TcpListener::bind("127.0.0.1:0")?; - let addr = listener.local_addr()?; - - let handle = thread::spawn(move || { - drop(listener.accept().unwrap()); - drop(listener.accept().unwrap()); - }); - - let stream1 = TcpStream::connect(&addr).await?; - let (read1, write1) = stream1.into_split(); - - let stream2 = TcpStream::connect(&addr).await?; - let (_, write2) = stream2.into_split(); - - let read1 = match read1.reunite(write2) { - Ok(_) => panic!("Reunite should not succeed"), - Err(err) => err.0, - }; - - read1.reunite(write1).expect("Reunite should succeed"); - - handle.join().unwrap(); - Ok(()) -} - -/// Test that dropping the write half actually closes the stream. -#[tokio::test] -async fn drop_write() -> Result<()> { - const MSG: &[u8] = b"split"; - - let listener = net::TcpListener::bind("127.0.0.1:0")?; - let addr = listener.local_addr()?; - - let handle = thread::spawn(move || { - let (mut stream, _) = listener.accept().unwrap(); - stream.write_all(MSG).unwrap(); - - let mut read_buf = [0u8; 32]; - let res = match stream.read(&mut read_buf) { - Ok(0) => Ok(()), - Ok(len) => Err(Error::new( - ErrorKind::Other, - format!("Unexpected read: {} bytes.", len), - )), - Err(err) => Err(err), - }; - - drop(stream); - - res - }); - - let stream = TcpStream::connect(&addr).await?; - let (mut read_half, write_half) = stream.into_split(); - - let mut read_buf = [0u8; 32]; - let read_len = read_half.read(&mut read_buf[..]).await?; - assert_eq!(&read_buf[..read_len], MSG); - - // drop it while the read is in progress - std::thread::spawn(move || { - thread::sleep(std::time::Duration::from_millis(50)); - drop(write_half); - }); - - match read_half.read(&mut read_buf[..]).await { - Ok(0) => {} - Ok(len) => panic!("Unexpected read: {} bytes.", len), - Err(err) => panic!("Unexpected error: {}.", err), - } - - handle.join().unwrap().unwrap(); - Ok(()) -} diff --git a/third_party/rust/tokio-0.2.25/tests/tcp_peek.rs b/third_party/rust/tokio-0.2.25/tests/tcp_peek.rs deleted file mode 100644 index aecc0ac19cd2..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/tcp_peek.rs +++ /dev/null @@ -1,29 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::AsyncReadExt; -use tokio::net::TcpStream; - -use tokio_test::assert_ok; - -use std::thread; -use std::{convert::TryInto, io::Write, net}; - -#[tokio::test] -async fn peek() { - let listener = net::TcpListener::bind("127.0.0.1:0").unwrap(); - let addr = listener.local_addr().unwrap(); - let t = thread::spawn(move || assert_ok!(listener.accept()).0); - - let left = net::TcpStream::connect(&addr).unwrap(); - let mut right = t.join().unwrap(); - let _ = right.write(&[1, 2, 3, 4]).unwrap(); - - let mut left: TcpStream = left.try_into().unwrap(); - let mut buf = [0u8; 16]; - let n = assert_ok!(left.peek(&mut buf).await); - assert_eq!([1, 2, 3, 4], buf[..n]); - - let n = assert_ok!(left.read(&mut buf).await); - assert_eq!([1, 2, 3, 4], buf[..n]); -} diff --git a/third_party/rust/tokio-0.2.25/tests/tcp_shutdown.rs b/third_party/rust/tokio-0.2.25/tests/tcp_shutdown.rs deleted file mode 100644 index bd43e143b8da..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/tcp_shutdown.rs +++ /dev/null @@ -1,29 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::io::{self, AsyncWriteExt}; -use tokio::net::{TcpListener, TcpStream}; -use tokio::prelude::*; -use tokio_test::assert_ok; - -#[tokio::test] -async fn shutdown() { - let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await); - let addr = assert_ok!(srv.local_addr()); - - tokio::spawn(async move { - let mut stream = assert_ok!(TcpStream::connect(&addr).await); - - assert_ok!(AsyncWriteExt::shutdown(&mut stream).await); - - let mut buf = [0; 1]; - let n = assert_ok!(stream.read(&mut buf).await); - assert_eq!(n, 0); - }); - - let (mut stream, _) = assert_ok!(srv.accept().await); - let (mut rd, mut wr) = stream.split(); - - let n = assert_ok!(io::copy(&mut rd, &mut wr).await); - assert_eq!(n, 0); -} diff --git a/third_party/rust/tokio-0.2.25/tests/tcp_split.rs b/third_party/rust/tokio-0.2.25/tests/tcp_split.rs deleted file mode 100644 index 7171dac4635b..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/tcp_split.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use std::io::Result; -use std::io::{Read, Write}; -use std::{net, thread}; - -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::TcpStream; - -#[tokio::test] -async fn split() -> Result<()> { - const MSG: &[u8] = b"split"; - - let listener = net::TcpListener::bind("127.0.0.1:0")?; - let addr = listener.local_addr()?; - - let handle = thread::spawn(move || { - let (mut stream, _) = listener.accept().unwrap(); - stream.write_all(MSG).unwrap(); - - let mut read_buf = [0u8; 32]; - let read_len = stream.read(&mut read_buf).unwrap(); - assert_eq!(&read_buf[..read_len], MSG); - }); - - let mut stream = TcpStream::connect(&addr).await?; - let (mut read_half, mut write_half) = stream.split(); - - let mut read_buf = [0u8; 32]; - let peek_len1 = read_half.peek(&mut read_buf[..]).await?; - let peek_len2 = read_half.peek(&mut read_buf[..]).await?; - assert_eq!(peek_len1, peek_len2); - - let read_len = read_half.read(&mut read_buf[..]).await?; - assert_eq!(peek_len1, read_len); - assert_eq!(&read_buf[..read_len], MSG); - - write_half.write(MSG).await?; - handle.join().unwrap(); - Ok(()) -} diff --git a/third_party/rust/tokio-0.2.25/tests/test_clock.rs b/third_party/rust/tokio-0.2.25/tests/test_clock.rs deleted file mode 100644 index 891636fdb28d..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/test_clock.rs +++ /dev/null @@ -1,50 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::time::{self, Duration, Instant}; - -#[tokio::test] -async fn resume_lets_time_move_forward_instead_of_resetting_it() { - let start = Instant::now(); - time::pause(); - time::advance(Duration::from_secs(10)).await; - let advanced_by_ten_secs = Instant::now(); - assert!(advanced_by_ten_secs - start > Duration::from_secs(10)); - assert!(advanced_by_ten_secs - start < Duration::from_secs(11)); - time::resume(); - assert!(advanced_by_ten_secs < Instant::now()); - assert!(Instant::now() - advanced_by_ten_secs < Duration::from_secs(1)); -} - -#[tokio::test] -async fn can_pause_after_resume() { - let start = Instant::now(); - time::pause(); - time::advance(Duration::from_secs(10)).await; - time::resume(); - time::pause(); - time::advance(Duration::from_secs(10)).await; - assert!(Instant::now() - start > Duration::from_secs(20)); - assert!(Instant::now() - start < Duration::from_secs(21)); -} - -#[tokio::test] -#[should_panic] -async fn freezing_time_while_frozen_panics() { - time::pause(); - time::pause(); -} - -#[tokio::test] -#[should_panic] -async fn advancing_time_when_time_is_not_frozen_panics() { - time::advance(Duration::from_secs(1)).await; -} - -#[tokio::test] -#[should_panic] -async fn resuming_time_when_not_frozen_panics() { - time::pause(); - time::resume(); - time::resume(); -} diff --git a/third_party/rust/tokio-0.2.25/tests/time_delay.rs b/third_party/rust/tokio-0.2.25/tests/time_delay.rs deleted file mode 100644 index e4804ec6740f..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/time_delay.rs +++ /dev/null @@ -1,196 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::time::{self, Duration, Instant}; -use tokio_test::{assert_pending, assert_ready, task}; - -macro_rules! assert_elapsed { - ($now:expr, $ms:expr) => {{ - let elapsed = $now.elapsed(); - let lower = ms($ms); - - // Handles ms rounding - assert!( - elapsed >= lower && elapsed <= lower + ms(1), - "actual = {:?}, expected = {:?}", - elapsed, - lower - ); - }}; -} - -#[tokio::test] -async fn immediate_delay() { - time::pause(); - - let now = Instant::now(); - - // Ready! - time::delay_until(now).await; - assert_elapsed!(now, 0); -} - -#[tokio::test] -async fn delayed_delay_level_0() { - time::pause(); - - for &i in &[1, 10, 60] { - let now = Instant::now(); - - time::delay_until(now + ms(i)).await; - - assert_elapsed!(now, i); - } -} - -#[tokio::test] -async fn sub_ms_delayed_delay() { - time::pause(); - - for _ in 0..5 { - let now = Instant::now(); - let deadline = now + ms(1) + Duration::new(0, 1); - - time::delay_until(deadline).await; - - assert_elapsed!(now, 1); - } -} - -#[tokio::test] -async fn delayed_delay_wrapping_level_0() { - time::pause(); - - time::delay_for(ms(5)).await; - - let now = Instant::now(); - time::delay_until(now + ms(60)).await; - - assert_elapsed!(now, 60); -} - -#[tokio::test] -async fn reset_future_delay_before_fire() { - time::pause(); - - let now = Instant::now(); - - let mut delay = task::spawn(time::delay_until(now + ms(100))); - assert_pending!(delay.poll()); - - let mut delay = delay.into_inner(); - - delay.reset(Instant::now() + ms(200)); - delay.await; - - assert_elapsed!(now, 200); -} - -#[tokio::test] -async fn reset_past_delay_before_turn() { - time::pause(); - - let now = Instant::now(); - - let mut delay = task::spawn(time::delay_until(now + ms(100))); - assert_pending!(delay.poll()); - - let mut delay = delay.into_inner(); - - delay.reset(now + ms(80)); - delay.await; - - assert_elapsed!(now, 80); -} - -#[tokio::test] -async fn reset_past_delay_before_fire() { - time::pause(); - - let now = Instant::now(); - - let mut delay = task::spawn(time::delay_until(now + ms(100))); - assert_pending!(delay.poll()); - - let mut delay = delay.into_inner(); - - time::delay_for(ms(10)).await; - - delay.reset(now + ms(80)); - delay.await; - - assert_elapsed!(now, 80); -} - -#[tokio::test] -async fn reset_future_delay_after_fire() { - time::pause(); - - let now = Instant::now(); - let mut delay = time::delay_until(now + ms(100)); - - (&mut delay).await; - assert_elapsed!(now, 100); - - delay.reset(now + ms(110)); - delay.await; - assert_elapsed!(now, 110); -} - -#[tokio::test] -async fn reset_delay_to_past() { - time::pause(); - - let now = Instant::now(); - - let mut delay = task::spawn(time::delay_until(now + ms(100))); - assert_pending!(delay.poll()); - - time::delay_for(ms(50)).await; - - assert!(!delay.is_woken()); - - delay.reset(now + ms(40)); - - assert!(delay.is_woken()); - - assert_ready!(delay.poll()); -} - -#[test] -#[should_panic] -fn creating_delay_outside_of_context() { - let now = Instant::now(); - - // This creates a delay outside of the context of a mock timer. This tests - // that it will panic. - let _fut = time::delay_until(now + ms(500)); -} - -#[should_panic] -#[tokio::test] -async fn greater_than_max() { - const YR_5: u64 = 5 * 365 * 24 * 60 * 60 * 1000; - - time::delay_until(Instant::now() + ms(YR_5)).await; -} - -const NUM_LEVELS: usize = 6; -const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1; - -#[should_panic] -#[tokio::test] -async fn exactly_max() { - // TODO: this should not panic but `time::ms()` is acting up - time::delay_for(ms(MAX_DURATION)).await; -} - -#[tokio::test] -async fn no_out_of_bounds_close_to_max() { - time::pause(); - time::delay_for(ms(MAX_DURATION - 1)).await; -} - -fn ms(n: u64) -> Duration { - Duration::from_millis(n) -} diff --git a/third_party/rust/tokio-0.2.25/tests/time_interval.rs b/third_party/rust/tokio-0.2.25/tests/time_interval.rs deleted file mode 100644 index 1123681f492f..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/time_interval.rs +++ /dev/null @@ -1,66 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::time::{self, Duration, Instant}; -use tokio_test::{assert_pending, assert_ready_eq, task}; - -use std::task::Poll; - -#[tokio::test] -#[should_panic] -async fn interval_zero_duration() { - let _ = time::interval_at(Instant::now(), ms(0)); -} - -#[tokio::test] -async fn usage() { - time::pause(); - - let start = Instant::now(); - - // TODO: Skip this - time::advance(ms(1)).await; - - let mut i = task::spawn(time::interval_at(start, ms(300))); - - assert_ready_eq!(poll_next(&mut i), start); - assert_pending!(poll_next(&mut i)); - - time::advance(ms(100)).await; - assert_pending!(poll_next(&mut i)); - - time::advance(ms(200)).await; - assert_ready_eq!(poll_next(&mut i), start + ms(300)); - assert_pending!(poll_next(&mut i)); - - time::advance(ms(400)).await; - assert_ready_eq!(poll_next(&mut i), start + ms(600)); - assert_pending!(poll_next(&mut i)); - - time::advance(ms(500)).await; - assert_ready_eq!(poll_next(&mut i), start + ms(900)); - assert_ready_eq!(poll_next(&mut i), start + ms(1200)); - assert_pending!(poll_next(&mut i)); -} - -#[tokio::test] -async fn usage_stream() { - use tokio::stream::StreamExt; - - let start = Instant::now(); - let mut interval = time::interval(ms(10)); - - for _ in 0..3 { - interval.next().await.unwrap(); - } - - assert!(start.elapsed() > ms(20)); -} - -fn poll_next(interval: &mut task::Spawn) -> Poll { - interval.enter(|cx, mut interval| interval.poll_tick(cx)) -} - -fn ms(n: u64) -> Duration { - Duration::from_millis(n) -} diff --git a/third_party/rust/tokio-0.2.25/tests/time_rt.rs b/third_party/rust/tokio-0.2.25/tests/time_rt.rs deleted file mode 100644 index b739f1b2f68c..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/time_rt.rs +++ /dev/null @@ -1,93 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::time::*; - -use std::sync::mpsc; - -#[test] -fn timer_with_threaded_runtime() { - use tokio::runtime::Runtime; - - let rt = Runtime::new().unwrap(); - let (tx, rx) = mpsc::channel(); - - rt.spawn(async move { - let when = Instant::now() + Duration::from_millis(100); - - delay_until(when).await; - assert!(Instant::now() >= when); - - tx.send(()).unwrap(); - }); - - rx.recv().unwrap(); -} - -#[test] -fn timer_with_basic_scheduler() { - use tokio::runtime::Builder; - - let mut rt = Builder::new() - .basic_scheduler() - .enable_all() - .build() - .unwrap(); - let (tx, rx) = mpsc::channel(); - - rt.block_on(async move { - let when = Instant::now() + Duration::from_millis(100); - - delay_until(when).await; - assert!(Instant::now() >= when); - - tx.send(()).unwrap(); - }); - - rx.recv().unwrap(); -} - -#[tokio::test] -async fn starving() { - use std::future::Future; - use std::pin::Pin; - use std::task::{Context, Poll}; - - struct Starve + Unpin>(T, u64); - - impl + Unpin> Future for Starve { - type Output = u64; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if Pin::new(&mut self.0).poll(cx).is_ready() { - return Poll::Ready(self.1); - } - - self.1 += 1; - - cx.waker().wake_by_ref(); - - Poll::Pending - } - } - - let when = Instant::now() + Duration::from_millis(20); - let starve = Starve(delay_until(when), 0); - - starve.await; - assert!(Instant::now() >= when); -} - -#[tokio::test] -async fn timeout_value() { - use tokio::sync::oneshot; - - let (_tx, rx) = oneshot::channel::<()>(); - - let now = Instant::now(); - let dur = Duration::from_millis(20); - - let res = timeout(dur, rx).await; - assert!(res.is_err()); - assert!(Instant::now() >= now + dur); -} diff --git a/third_party/rust/tokio-0.2.25/tests/time_timeout.rs b/third_party/rust/tokio-0.2.25/tests/time_timeout.rs deleted file mode 100644 index 4efcd8ca82fe..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/time_timeout.rs +++ /dev/null @@ -1,110 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::sync::oneshot; -use tokio::time::{self, timeout, timeout_at, Instant}; -use tokio_test::*; - -use futures::future::pending; -use std::time::Duration; - -#[tokio::test] -async fn simultaneous_deadline_future_completion() { - // Create a future that is immediately ready - let mut fut = task::spawn(timeout_at(Instant::now(), async {})); - - // Ready! - assert_ready_ok!(fut.poll()); -} - -#[tokio::test] -async fn completed_future_past_deadline() { - // Wrap it with a deadline - let mut fut = task::spawn(timeout_at(Instant::now() - ms(1000), async {})); - - // Ready! - assert_ready_ok!(fut.poll()); -} - -#[tokio::test] -async fn future_and_deadline_in_future() { - time::pause(); - - // Not yet complete - let (tx, rx) = oneshot::channel(); - - // Wrap it with a deadline - let mut fut = task::spawn(timeout_at(Instant::now() + ms(100), rx)); - - assert_pending!(fut.poll()); - - // Turn the timer, it runs for the elapsed time - time::advance(ms(90)).await; - - assert_pending!(fut.poll()); - - // Complete the future - tx.send(()).unwrap(); - assert!(fut.is_woken()); - - assert_ready_ok!(fut.poll()).unwrap(); -} - -#[tokio::test] -async fn future_and_timeout_in_future() { - time::pause(); - - // Not yet complete - let (tx, rx) = oneshot::channel(); - - // Wrap it with a deadline - let mut fut = task::spawn(timeout(ms(100), rx)); - - // Ready! - assert_pending!(fut.poll()); - - // Turn the timer, it runs for the elapsed time - time::advance(ms(90)).await; - - assert_pending!(fut.poll()); - - // Complete the future - tx.send(()).unwrap(); - - assert_ready_ok!(fut.poll()).unwrap(); -} - -#[tokio::test] -async fn deadline_now_elapses() { - use futures::future::pending; - - time::pause(); - - // Wrap it with a deadline - let mut fut = task::spawn(timeout_at(Instant::now(), pending::<()>())); - - // Factor in jitter - // TODO: don't require this - time::advance(ms(1)).await; - - assert_ready_err!(fut.poll()); -} - -#[tokio::test] -async fn deadline_future_elapses() { - time::pause(); - - // Wrap it with a deadline - let mut fut = task::spawn(timeout_at(Instant::now() + ms(300), pending::<()>())); - - assert_pending!(fut.poll()); - - time::advance(ms(301)).await; - - assert!(fut.is_woken()); - assert_ready_err!(fut.poll()); -} - -fn ms(n: u64) -> Duration { - Duration::from_millis(n) -} diff --git a/third_party/rust/tokio-0.2.25/tests/udp.rs b/third_party/rust/tokio-0.2.25/tests/udp.rs deleted file mode 100644 index 62a2234fa7c1..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/udp.rs +++ /dev/null @@ -1,120 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] - -use tokio::net::UdpSocket; - -const MSG: &[u8] = b"hello"; -const MSG_LEN: usize = MSG.len(); - -#[tokio::test] -async fn send_recv() -> std::io::Result<()> { - let mut sender = UdpSocket::bind("127.0.0.1:0").await?; - let mut receiver = UdpSocket::bind("127.0.0.1:0").await?; - - sender.connect(receiver.local_addr()?).await?; - receiver.connect(sender.local_addr()?).await?; - - sender.send(MSG).await?; - - let mut recv_buf = [0u8; 32]; - let len = receiver.recv(&mut recv_buf[..]).await?; - - assert_eq!(&recv_buf[..len], MSG); - Ok(()) -} - -#[tokio::test] -async fn send_to_recv_from() -> std::io::Result<()> { - let mut sender = UdpSocket::bind("127.0.0.1:0").await?; - let mut receiver = UdpSocket::bind("127.0.0.1:0").await?; - - let receiver_addr = receiver.local_addr()?; - sender.send_to(MSG, &receiver_addr).await?; - - let mut recv_buf = [0u8; 32]; - let (len, addr) = receiver.recv_from(&mut recv_buf[..]).await?; - - assert_eq!(&recv_buf[..len], MSG); - assert_eq!(addr, sender.local_addr()?); - Ok(()) -} - -#[tokio::test] -async fn split() -> std::io::Result<()> { - let socket = UdpSocket::bind("127.0.0.1:0").await?; - let (mut r, mut s) = socket.split(); - - let addr = s.as_ref().local_addr()?; - tokio::spawn(async move { - s.send_to(MSG, &addr).await.unwrap(); - }); - let mut recv_buf = [0u8; 32]; - let (len, _) = r.recv_from(&mut recv_buf[..]).await?; - assert_eq!(&recv_buf[..len], MSG); - Ok(()) -} - -#[tokio::test] -async fn reunite() -> std::io::Result<()> { - let socket = UdpSocket::bind("127.0.0.1:0").await?; - let (s, r) = socket.split(); - assert!(s.reunite(r).is_ok()); - Ok(()) -} - -#[tokio::test] -async fn reunite_error() -> std::io::Result<()> { - let socket = UdpSocket::bind("127.0.0.1:0").await?; - let socket1 = UdpSocket::bind("127.0.0.1:0").await?; - let (s, _) = socket.split(); - let (_, r1) = socket1.split(); - assert!(s.reunite(r1).is_err()); - Ok(()) -} - -// # Note -// -// This test is purposely written such that each time `sender` sends data on -// the socket, `receiver` awaits the data. On Unix, it would be okay waiting -// until the end of the test to receive all the data. On Windows, this would -// **not** be okay because it's resources are completion based (via IOCP). -// If data is sent and not yet received, attempting to send more data will -// result in `ErrorKind::WouldBlock` until the first operation completes. -#[tokio::test] -async fn try_send_spawn() { - const MSG2: &[u8] = b"world!"; - const MSG2_LEN: usize = MSG2.len(); - - let sender = UdpSocket::bind("127.0.0.1:0").await.unwrap(); - let mut receiver = UdpSocket::bind("127.0.0.1:0").await.unwrap(); - - receiver - .connect(sender.local_addr().unwrap()) - .await - .unwrap(); - - let sent = &sender - .try_send_to(MSG, receiver.local_addr().unwrap()) - .unwrap(); - assert_eq!(sent, &MSG_LEN); - let mut buf = [0u8; 32]; - let mut received = receiver.recv(&mut buf[..]).await.unwrap(); - - sender - .connect(receiver.local_addr().unwrap()) - .await - .unwrap(); - let sent = &sender.try_send(MSG2).unwrap(); - assert_eq!(sent, &MSG2_LEN); - received += receiver.recv(&mut buf[..]).await.unwrap(); - - std::thread::spawn(move || { - let sent = &sender.try_send(MSG).unwrap(); - assert_eq!(sent, &MSG_LEN); - }) - .join() - .unwrap(); - received += receiver.recv(&mut buf[..]).await.unwrap(); - - assert_eq!(received, MSG_LEN * 2 + MSG2_LEN); -} diff --git a/third_party/rust/tokio-0.2.25/tests/uds_cred.rs b/third_party/rust/tokio-0.2.25/tests/uds_cred.rs deleted file mode 100644 index c02b2aee4a7a..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/uds_cred.rs +++ /dev/null @@ -1,30 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(all(unix, not(target_os = "dragonfly")))] - -use tokio::net::UnixStream; - -use libc::getegid; -use libc::geteuid; - -#[tokio::test] -#[cfg_attr( - target_os = "freebsd", - ignore = "Requires FreeBSD 12.0 or later. https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=176419" -)] -#[cfg_attr( - target_os = "netbsd", - ignore = "NetBSD does not support getpeereid() for sockets created by socketpair()" -)] -async fn test_socket_pair() { - let (a, b) = UnixStream::pair().unwrap(); - let cred_a = a.peer_cred().unwrap(); - let cred_b = b.peer_cred().unwrap(); - assert_eq!(cred_a, cred_b); - - let uid = unsafe { geteuid() }; - let gid = unsafe { getegid() }; - - assert_eq!(cred_a.uid, uid); - assert_eq!(cred_a.gid, gid); -} diff --git a/third_party/rust/tokio-0.2.25/tests/uds_datagram.rs b/third_party/rust/tokio-0.2.25/tests/uds_datagram.rs deleted file mode 100644 index d3c3535e7f4a..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/uds_datagram.rs +++ /dev/null @@ -1,133 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -use tokio::net::UnixDatagram; -use tokio::try_join; - -use std::io; - -async fn echo_server(mut socket: UnixDatagram) -> io::Result<()> { - let mut recv_buf = vec![0u8; 1024]; - loop { - let (len, peer_addr) = socket.recv_from(&mut recv_buf[..]).await?; - if let Some(path) = peer_addr.as_pathname() { - socket.send_to(&recv_buf[..len], path).await?; - } - } -} - -#[tokio::test] -async fn echo() -> io::Result<()> { - let dir = tempfile::tempdir().unwrap(); - let server_path = dir.path().join("server.sock"); - let client_path = dir.path().join("client.sock"); - - let server_socket = UnixDatagram::bind(server_path.clone())?; - - tokio::spawn(async move { - if let Err(e) = echo_server(server_socket).await { - eprintln!("Error in echo server: {}", e); - } - }); - - { - let mut socket = UnixDatagram::bind(&client_path).unwrap(); - socket.connect(server_path)?; - socket.send(b"ECHO").await?; - let mut recv_buf = [0u8; 16]; - let len = socket.recv(&mut recv_buf[..]).await?; - assert_eq!(&recv_buf[..len], b"ECHO"); - } - - Ok(()) -} - -// Even though we use sync non-blocking io we still need a reactor. -#[tokio::test] -async fn try_send_recv_never_block() -> io::Result<()> { - let mut recv_buf = [0u8; 16]; - let payload = b"PAYLOAD"; - let mut count = 0; - - let (mut dgram1, mut dgram2) = UnixDatagram::pair()?; - - // Send until we hit the OS `net.unix.max_dgram_qlen`. - loop { - match dgram1.try_send(payload) { - Err(err) => match err.kind() { - io::ErrorKind::WouldBlock | io::ErrorKind::Other => break, - _ => unreachable!("unexpected error {:?}", err), - }, - Ok(len) => { - assert_eq!(len, payload.len()); - } - } - count += 1; - } - - // Read every dgram we sent. - while count > 0 { - let len = dgram2.try_recv(&mut recv_buf[..])?; - assert_eq!(len, payload.len()); - assert_eq!(payload, &recv_buf[..len]); - count -= 1; - } - - let err = dgram2.try_recv(&mut recv_buf[..]).unwrap_err(); - match err.kind() { - io::ErrorKind::WouldBlock => (), - _ => unreachable!("unexpected error {:?}", err), - } - - Ok(()) -} - -#[tokio::test] -async fn split() -> std::io::Result<()> { - let dir = tempfile::tempdir().unwrap(); - let path = dir.path().join("split.sock"); - let socket = UnixDatagram::bind(path.clone())?; - let (mut r, mut s) = socket.into_split(); - - let msg = b"hello"; - let ((), ()) = try_join! { - async { - s.send_to(msg, path).await?; - io::Result::Ok(()) - }, - async { - let mut recv_buf = [0u8; 32]; - let (len, _) = r.recv_from(&mut recv_buf[..]).await?; - assert_eq!(&recv_buf[..len], msg); - Ok(()) - }, - }?; - - Ok(()) -} - -#[tokio::test] -async fn reunite() -> std::io::Result<()> { - let dir = tempfile::tempdir().unwrap(); - let path = dir.path().join("reunite.sock"); - let socket = UnixDatagram::bind(path)?; - let (s, r) = socket.into_split(); - assert!(s.reunite(r).is_ok()); - Ok(()) -} - -#[tokio::test] -async fn reunite_error() -> std::io::Result<()> { - let dir = tempfile::tempdir().unwrap(); - let path = dir.path().join("reunit.sock"); - let dir = tempfile::tempdir().unwrap(); - let path1 = dir.path().join("reunit.sock"); - let socket = UnixDatagram::bind(path)?; - let socket1 = UnixDatagram::bind(path1)?; - - let (s, _) = socket.into_split(); - let (_, r1) = socket1.into_split(); - assert!(s.reunite(r1).is_err()); - Ok(()) -} diff --git a/third_party/rust/tokio-0.2.25/tests/uds_split.rs b/third_party/rust/tokio-0.2.25/tests/uds_split.rs deleted file mode 100644 index 76ff4613cd21..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/uds_split.rs +++ /dev/null @@ -1,43 +0,0 @@ -#![warn(rust_2018_idioms)] -#![cfg(feature = "full")] -#![cfg(unix)] - -use tokio::net::UnixStream; -use tokio::prelude::*; - -/// Checks that `UnixStream` can be split into a read half and a write half using -/// `UnixStream::split` and `UnixStream::split_mut`. -/// -/// Verifies that the implementation of `AsyncWrite::poll_shutdown` shutdowns the stream for -/// writing by reading to the end of stream on the other side of the connection. -#[tokio::test] -async fn split() -> std::io::Result<()> { - let (mut a, mut b) = UnixStream::pair()?; - - let (mut a_read, mut a_write) = a.split(); - let (mut b_read, mut b_write) = b.split(); - - let (a_response, b_response) = futures::future::try_join( - send_recv_all(&mut a_read, &mut a_write, b"A"), - send_recv_all(&mut b_read, &mut b_write, b"B"), - ) - .await?; - - assert_eq!(a_response, b"B"); - assert_eq!(b_response, b"A"); - - Ok(()) -} - -async fn send_recv_all( - read: &mut (dyn AsyncRead + Unpin), - write: &mut (dyn AsyncWrite + Unpin), - input: &[u8], -) -> std::io::Result> { - write.write_all(input).await?; - write.shutdown().await?; - - let mut output = Vec::new(); - read.read_to_end(&mut output).await?; - Ok(output) -} diff --git a/third_party/rust/tokio-0.2.25/tests/uds_stream.rs b/third_party/rust/tokio-0.2.25/tests/uds_stream.rs deleted file mode 100644 index 29f118a2d480..000000000000 --- a/third_party/rust/tokio-0.2.25/tests/uds_stream.rs +++ /dev/null @@ -1,58 +0,0 @@ -#![cfg(feature = "full")] -#![warn(rust_2018_idioms)] -#![cfg(unix)] - -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{UnixListener, UnixStream}; - -use futures::future::try_join; - -#[tokio::test] -async fn accept_read_write() -> std::io::Result<()> { - let dir = tempfile::Builder::new() - .prefix("tokio-uds-tests") - .tempdir() - .unwrap(); - let sock_path = dir.path().join("connect.sock"); - - let mut listener = UnixListener::bind(&sock_path)?; - - let accept = listener.accept(); - let connect = UnixStream::connect(&sock_path); - let ((mut server, _), mut client) = try_join(accept, connect).await?; - - // Write to the client. TODO: Switch to write_all. - let write_len = client.write(b"hello").await?; - assert_eq!(write_len, 5); - drop(client); - // Read from the server. TODO: Switch to read_to_end. - let mut buf = [0u8; 5]; - server.read_exact(&mut buf).await?; - assert_eq!(&buf, b"hello"); - let len = server.read(&mut buf).await?; - assert_eq!(len, 0); - Ok(()) -} - -#[tokio::test] -async fn shutdown() -> std::io::Result<()> { - let dir = tempfile::Builder::new() - .prefix("tokio-uds-tests") - .tempdir() - .unwrap(); - let sock_path = dir.path().join("connect.sock"); - - let mut listener = UnixListener::bind(&sock_path)?; - - let accept = listener.accept(); - let connect = UnixStream::connect(&sock_path); - let ((mut server, _), mut client) = try_join(accept, connect).await?; - - // Shut down the client - AsyncWriteExt::shutdown(&mut client).await?; - // Read from the server should return 0 to indicate the channel has been closed. - let mut buf = [0u8; 1]; - let n = server.read(&mut buf).await?; - assert_eq!(n, 0); - Ok(()) -} diff --git a/third_party/rust/tokio-stream/.cargo-checksum.json b/third_party/rust/tokio-stream/.cargo-checksum.json new file mode 100644 index 000000000000..47f91c4528e0 --- /dev/null +++ b/third_party/rust/tokio-stream/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"3e8a8c9f24bc9b003ca0b7a2f82ec65cb52b43071d35a8fa119cff567ab70754","Cargo.toml":"648be8805c864fc36e1f808d9a85670c5d3261f1278458d5aa1571ef690d6c97","LICENSE":"d4ecf952201c14fd13203c2aa64dee75d81271e3d7e3ca0e8aa0e59b9aeeb9c6","src/empty.rs":"292148fe9fe821b7a40200b87f3be63e54e881fa031fd1705ce7c2e264457f6b","src/iter.rs":"b2db4e9075cc5254aed52d7b9a93d56adb0e4a717fe5819372314b5fb1999ff4","src/lib.rs":"f9e3316124cf66629be4ded3c0b7c7dfe99190fea582a5f72b1081a8aa56d3ea","src/macros.rs":"8073292109c2f923906966cdeddc6efb56c356a3427deeeabe8205624850b6c4","src/once.rs":"d3bc03c25c20de945355302b6fb8c116e6dd54d532e5c1fdb4aa848736bb6b41","src/pending.rs":"daeac87ee04243da5ab4996d783a116f5bc9028c6a7cadc66ebf05155fece26a","src/stream_ext.rs":"b5a0c92deb0f461db32e3a11576752f5cf65e3d4efe9935eeeb6991365c49e15","src/stream_ext/all.rs":"de7b9800ee6c6a3927d7efc3519e56a509b77351fa4c5d7bc3d4b87137088a46","src/stream_ext/any.rs":"4a0e55b517b2fa36f1366d4e98a42bb25b5f861488e6a1a93cf4936df7a0161d","src/stream_ext/chain.rs":"a697b878f50a566c62e6bcf7f2d75c332bec39aef042f8a797a6c307d6057193","src/stream_ext/collect.rs":"6b1e764a05b77832cc2c92b41ab998f342ee0b03542413b77e53ebec67dbccf7","src/stream_ext/filter.rs":"1972b359807eed2958954221a0fde46822c15caed1cc2dcbd51d364dc91143ec","src/stream_ext/filter_map.rs":"08236304228bf1747cc378ed36bc3e5ca9c98cfe7b2bfeebed01d6301d9dbc8a","src/stream_ext/fold.rs":"5bdece90730309d44b5092a9114afa5399a5f52d1c3c5895396b971ec30a8504","src/stream_ext/fuse.rs":"b62ea9d293db4373b7a97c78cfd070a7ab691e31d597a7005c5e274d913be625","src/stream_ext/map.rs":"ce3f1a17665d9524c599aa8aa94109e4077941b75d5836495def653db5f64f33","src/stream_ext/merge.rs":"4cc0edcb30b9cc0752709c444c17bf84255b9695d5989ec50159eade5d3ab942","src/stream_ext/next.rs":"d3ded67add46f226bc52c9b5a8cefd90ee188191c05fb9f34c749ef4c163690a","src/stream_ext/skip.rs":"88cea5a1c314db260abf48cd4ac8e4e4aea23e46bec4cdb82413803c03962686","src/stream_ext/skip_while.rs":"bc571cc68406ab2bb51ef83359274fa5cb0356ca60f24fe5a17a01bc04c38632","src/stream_ext/take.rs":"c7dd38ca72d4162481869e9cd8226594fe3deefd21dee4d0befa1818337bbc3a","src/stream_ext/take_while.rs":"3a3122116a8743f66199a71f733f7c3bfce77d084a752c73b4cc54a3d26365d8","src/stream_ext/throttle.rs":"b14bd343e61b370f5cec828ad7093aecf777d5e7035b5ee52263e98e184b34a7","src/stream_ext/timeout.rs":"6b637972001e95f97afb8c29eefa4b3e1db43d0ba6ada782b14dff450ac55536","src/stream_ext/try_next.rs":"98370f3775a92d2c0afe887b3dc8d65de439adef31795cefe2ee6c2b601069b5","src/stream_map.rs":"643617af99537b5355a0d63b5ae52185239c1876b792164349c21afed3d2ed2f","src/wrappers.rs":"c779121d9fc729c9e9e6275de506ce41e9405cd82627589b54e6e28d911676d1","src/wrappers/broadcast.rs":"d14082124401f43a8dd70954af08fddf65e64eefd9dfdef3c49cdc1095113ba4","src/wrappers/interval.rs":"f9abd6ef4abb0bd61064eac2141c2f65b31351dd14b8403625d4471bb7d86040","src/wrappers/lines.rs":"75bd75aa5d22b9eba49c35c7d4dde7e6c5546be0246523a6fa75bd81a5dd2d6c","src/wrappers/mpsc_bounded.rs":"a56b727831246075689124256f9da3c311d362fd39b5f5350dfd123f45ab600e","src/wrappers/mpsc_unbounded.rs":"92d595d15ed1d21710a6c15aee95a7889c3437534cdfcda08325f26c19c6a621","src/wrappers/read_dir.rs":"e2556f80104b424331e045e28417abcc09b9724444c812244516d5213f367aa8","src/wrappers/signal_unix.rs":"44862e50828bb22c7bdfe11fa39e46589e67e27f36de11a8be0a813c8296a4ca","src/wrappers/signal_windows.rs":"9facd73b3692cfd2e7d439be8576351498dbe1789e54eb1d14f71ccdb28da198","src/wrappers/split.rs":"c0db118b774697d1a84a02c53f2ef1e1e597399aa8afda6a4c9008408aa946d1","src/wrappers/tcp_listener.rs":"0a420994f716d62ea5536904adb5ab5f30fd322359651fcc86ae7368e745d98a","src/wrappers/unix_listener.rs":"e206aa40af1c5df68475f045d3cfce7b3f9922db8628b0530aa71ced991e0261","src/wrappers/watch.rs":"5db7512423e721f721f9b694831a94b422e01cf50ebbc33fe86af1a91205799b","tests/async_send_sync.rs":"1fbbf36e28058ce249f5c1d95a9b387f8b2beed6914d0f51f212e9498076ba14","tests/stream_chain.rs":"79415d868cac8737dc4da095cf8a7aaf11e628f0daa26fe9322c692db0a3e118","tests/stream_collect.rs":"672597e3a00df5283702ab1c7b98979bb2db397472f639e677d4e83c67ef1050","tests/stream_empty.rs":"4a79c50ccf8c95d3f41fc8eb02720318f3bdd84372530a826b6edfd4b94b953b","tests/stream_fuse.rs":"8975326b01eb0eabf017be3cbd683388970a60f6d9d0d6e7fdc9ae657b2f2ac7","tests/stream_iter.rs":"664fed872dba90449acd992c9840e45cda110273836b700f1f11bf1210d6b0a4","tests/stream_merge.rs":"e7ba9ac03fecd05061aa48ca17c50472ec358693e5d8e07e478e3c5d676af0bd","tests/stream_once.rs":"4ece414df7a39dfe09336cfb2802ea85e41cf49d42f23e35bd3646907be05b0a","tests/stream_pending.rs":"c43d970af93c79ddf11657ff067bcb3a221e533109a03afe1cccd1bb4b3a1ffe","tests/stream_stream_map.rs":"b2440d6dbef5665c5eb76c76f61ae8d5be3d5c1d87ad9f144c398b6db4f3d5c5","tests/stream_timeout.rs":"4b09967171a3f427de9fc435c93dc37fb43dc289c877716e06744baa98279faa","tests/support/mpsc.rs":"2ac4d35619b5f418fa4c7ae4f5120137e1c8541937204d027f63088410a44ca7","tests/time_throttle.rs":"1c2add848fc5dc1d95781c19e14630f8a13ffc66c089a2607de5c9896208c2bc","tests/watch.rs":"2ff8ca63f090ec1f0d1a58cf0932dc7d2a0c0a7326bc6ec5d0e38fd7892dd4e7"},"package":"50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3"} \ No newline at end of file diff --git a/third_party/rust/tokio-stream/CHANGELOG.md b/third_party/rust/tokio-stream/CHANGELOG.md new file mode 100644 index 000000000000..4ef469ef0a40 --- /dev/null +++ b/third_party/rust/tokio-stream/CHANGELOG.md @@ -0,0 +1,89 @@ +# 0.1.8 (October 29, 2021) + +- stream: add `From>` impl for receiver streams ([#4080]) +- stream: impl `FromIterator` for `StreamMap` ([#4052]) +- signal: make windows docs for signal module show up on unix builds ([#3770]) + +[#3770]: https://github.com/tokio-rs/tokio/pull/3770 +[#4052]: https://github.com/tokio-rs/tokio/pull/4052 +[#4080]: https://github.com/tokio-rs/tokio/pull/4080 + +# 0.1.7 (July 7, 2021) + +### Fixed + +- sync: fix watch wrapper ([#3914]) +- time: fix `Timeout::size_hint` ([#3902]) + +[#3902]: https://github.com/tokio-rs/tokio/pull/3902 +[#3914]: https://github.com/tokio-rs/tokio/pull/3914 + +# 0.1.6 (May 14, 2021) + +### Added + +- stream: implement `Error` and `Display` for `BroadcastStreamRecvError` ([#3745]) + +### Fixed + +- stream: avoid yielding in `AllFuture` and `AnyFuture` ([#3625]) + +[#3745]: https://github.com/tokio-rs/tokio/pull/3745 +[#3625]: https://github.com/tokio-rs/tokio/pull/3625 + +# 0.1.5 (March 20, 2021) + +### Fixed + +- stream: documentation note for throttle `Unpin` ([#3600]) + +[#3600]: https://github.com/tokio-rs/tokio/pull/3600 + +# 0.1.4 (March 9, 2021) + +Added + +- signal: add `Signal` wrapper ([#3510]) + +Fixed + +- stream: remove duplicate `doc_cfg` declaration ([#3561]) +- sync: yield initial value in `WatchStream` ([#3576]) + +[#3510]: https://github.com/tokio-rs/tokio/pull/3510 +[#3561]: https://github.com/tokio-rs/tokio/pull/3561 +[#3576]: https://github.com/tokio-rs/tokio/pull/3576 + +# 0.1.3 (February 5, 2021) + +Added + + - sync: add wrapper for broadcast and watch ([#3384], [#3504]) + +[#3384]: https://github.com/tokio-rs/tokio/pull/3384 +[#3504]: https://github.com/tokio-rs/tokio/pull/3504 + +# 0.1.2 (January 12, 2021) + +Fixed + + - docs: fix some wrappers missing in documentation ([#3378]) + +[#3378]: https://github.com/tokio-rs/tokio/pull/3378 + +# 0.1.1 (January 4, 2021) + +Added + + - add `Stream` wrappers ([#3343]) + +Fixed + + - move `async-stream` to `dev-dependencies` ([#3366]) + +[#3366]: https://github.com/tokio-rs/tokio/pull/3366 +[#3343]: https://github.com/tokio-rs/tokio/pull/3343 + +# 0.1.0 (December 23, 2020) + + - Initial release diff --git a/third_party/rust/tokio-stream/Cargo.toml b/third_party/rust/tokio-stream/Cargo.toml new file mode 100644 index 000000000000..699d94a8355e --- /dev/null +++ b/third_party/rust/tokio-stream/Cargo.toml @@ -0,0 +1,61 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "tokio-stream" +version = "0.1.8" +authors = ["Tokio Contributors "] +description = "Utilities to work with `Stream` and `tokio`.\n" +homepage = "https://tokio.rs" +documentation = "https://docs.rs/tokio-stream/0.1.8/tokio_stream" +categories = ["asynchronous"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[package.metadata.docs.rs] +all-features = true +rustc-args = ["--cfg", "docsrs"] +rustdoc-args = ["--cfg", "docsrs"] +[dependencies.futures-core] +version = "0.3.0" + +[dependencies.pin-project-lite] +version = "0.2.0" + +[dependencies.tokio] +version = "1.8.0" +features = ["sync"] + +[dependencies.tokio-util] +version = "0.6.3" +optional = true +[dev-dependencies.async-stream] +version = "0.3" + +[dev-dependencies.futures] +version = "0.3" +default-features = false + +[dev-dependencies.proptest] +version = "1" + +[dev-dependencies.tokio] +version = "1.2.0" +features = ["full", "test-util"] + +[features] +default = ["time"] +fs = ["tokio/fs"] +io-util = ["tokio/io-util"] +net = ["tokio/net"] +signal = ["tokio/signal"] +sync = ["tokio/sync", "tokio-util"] +time = ["tokio/time"] diff --git a/third_party/rust/bytes-0.5.6/LICENSE b/third_party/rust/tokio-stream/LICENSE similarity index 96% rename from third_party/rust/bytes-0.5.6/LICENSE rename to third_party/rust/tokio-stream/LICENSE index 58fb29a12384..ffa38bb61cc0 100644 --- a/third_party/rust/bytes-0.5.6/LICENSE +++ b/third_party/rust/tokio-stream/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2018 Carl Lerche +Copyright (c) 2021 Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/third_party/rust/tokio-0.2.25/src/stream/empty.rs b/third_party/rust/tokio-stream/src/empty.rs similarity index 93% rename from third_party/rust/tokio-0.2.25/src/stream/empty.rs rename to third_party/rust/tokio-stream/src/empty.rs index 2f56ac6cad35..965dcf5da7a5 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/empty.rs +++ b/third_party/rust/tokio-stream/src/empty.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::marker::PhantomData; use core::pin::Pin; @@ -24,7 +24,7 @@ unsafe impl Sync for Empty {} /// Basic usage: /// /// ``` -/// use tokio::stream::{self, StreamExt}; +/// use tokio_stream::{self as stream, StreamExt}; /// /// #[tokio::main] /// async fn main() { diff --git a/third_party/rust/tokio-0.2.25/src/stream/iter.rs b/third_party/rust/tokio-stream/src/iter.rs similarity index 74% rename from third_party/rust/tokio-0.2.25/src/stream/iter.rs rename to third_party/rust/tokio-stream/src/iter.rs index bc0388a14425..128be616fc13 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/iter.rs +++ b/third_party/rust/tokio-stream/src/iter.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::pin::Pin; use core::task::{Context, Poll}; @@ -8,6 +8,7 @@ use core::task::{Context, Poll}; #[must_use = "streams do nothing unless polled"] pub struct Iter { iter: I, + yield_amt: usize, } impl Unpin for Iter {} @@ -20,7 +21,7 @@ impl Unpin for Iter {} /// /// ``` /// # async fn dox() { -/// use tokio::stream::{self, StreamExt}; +/// use tokio_stream::{self as stream, StreamExt}; /// /// let mut stream = stream::iter(vec![17, 19]); /// @@ -35,6 +36,7 @@ where { Iter { iter: i.into_iter(), + yield_amt: 0, } } @@ -45,9 +47,18 @@ where type Item = I::Item; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let coop = ready!(crate::coop::poll_proceed(cx)); - coop.made_progress(); - Poll::Ready(self.iter.next()) + // TODO: add coop back + if self.yield_amt >= 32 { + self.yield_amt = 0; + + cx.waker().wake_by_ref(); + + Poll::Pending + } else { + self.yield_amt += 1; + + Poll::Ready(self.iter.next()) + } } fn size_hint(&self) -> (usize, Option) { diff --git a/third_party/rust/tokio-stream/src/lib.rs b/third_party/rust/tokio-stream/src/lib.rs new file mode 100644 index 000000000000..b7f232fdadcc --- /dev/null +++ b/third_party/rust/tokio-stream/src/lib.rs @@ -0,0 +1,98 @@ +#![allow( + clippy::cognitive_complexity, + clippy::large_enum_variant, + clippy::needless_doctest_main +)] +#![warn( + missing_debug_implementations, + missing_docs, + rust_2018_idioms, + unreachable_pub +)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] +#![doc(test( + no_crate_inject, + attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) +))] + +//! Stream utilities for Tokio. +//! +//! A `Stream` is an asynchronous sequence of values. It can be thought of as +//! an asynchronous version of the standard library's `Iterator` trait. +//! +//! This crate provides helpers to work with them. For examples of usage and a more in-depth +//! description of streams you can also refer to the [streams +//! tutorial](https://tokio.rs/tokio/tutorial/streams) on the tokio website. +//! +//! # Iterating over a Stream +//! +//! Due to similarities with the standard library's `Iterator` trait, some new +//! users may assume that they can use `for in` syntax to iterate over a +//! `Stream`, but this is unfortunately not possible. Instead, you can use a +//! `while let` loop as follows: +//! +//! ```rust +//! use tokio_stream::{self as stream, StreamExt}; +//! +//! #[tokio::main] +//! async fn main() { +//! let mut stream = stream::iter(vec![0, 1, 2]); +//! +//! while let Some(value) = stream.next().await { +//! println!("Got {}", value); +//! } +//! } +//! ``` +//! +//! # Returning a Stream from a function +//! +//! A common way to stream values from a function is to pass in the sender +//! half of a channel and use the receiver as the stream. This requires awaiting +//! both futures to ensure progress is made. Another alternative is the +//! [async-stream] crate, which contains macros that provide a `yield` keyword +//! and allow you to return an `impl Stream`. +//! +//! [async-stream]: https://docs.rs/async-stream +//! +//! # Conversion to and from AsyncRead/AsyncWrite +//! +//! It is often desirable to convert a `Stream` into an [`AsyncRead`], +//! especially when dealing with plaintext formats streamed over the network. +//! The opposite conversion from an [`AsyncRead`] into a `Stream` is also +//! another commonly required feature. To enable these conversions, +//! [`tokio-util`] provides the [`StreamReader`] and [`ReaderStream`] +//! types when the io feature is enabled. +//! +//! [`tokio-util`]: https://docs.rs/tokio-util/0.4/tokio_util/codec/index.html +//! [`tokio::io`]: https://docs.rs/tokio/1.0/tokio/io/index.html +//! [`AsyncRead`]: https://docs.rs/tokio/1.0/tokio/io/trait.AsyncRead.html +//! [`AsyncWrite`]: https://docs.rs/tokio/1.0/tokio/io/trait.AsyncWrite.html +//! [`ReaderStream`]: https://docs.rs/tokio-util/0.4/tokio_util/io/struct.ReaderStream.html +//! [`StreamReader`]: https://docs.rs/tokio-util/0.4/tokio_util/io/struct.StreamReader.html + +#[macro_use] +mod macros; + +pub mod wrappers; + +mod stream_ext; +pub use stream_ext::{collect::FromStream, StreamExt}; + +mod empty; +pub use empty::{empty, Empty}; + +mod iter; +pub use iter::{iter, Iter}; + +mod once; +pub use once::{once, Once}; + +mod pending; +pub use pending::{pending, Pending}; + +mod stream_map; +pub use stream_map::StreamMap; + +#[doc(no_inline)] +pub use futures_core::Stream; diff --git a/third_party/rust/tokio-stream/src/macros.rs b/third_party/rust/tokio-stream/src/macros.rs new file mode 100644 index 000000000000..1e3b61bac729 --- /dev/null +++ b/third_party/rust/tokio-stream/src/macros.rs @@ -0,0 +1,68 @@ +macro_rules! cfg_fs { + ($($item:item)*) => { + $( + #[cfg(feature = "fs")] + #[cfg_attr(docsrs, doc(cfg(feature = "fs")))] + $item + )* + } +} + +macro_rules! cfg_io_util { + ($($item:item)*) => { + $( + #[cfg(feature = "io-util")] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + $item + )* + } +} + +macro_rules! cfg_net { + ($($item:item)*) => { + $( + #[cfg(feature = "net")] + #[cfg_attr(docsrs, doc(cfg(feature = "net")))] + $item + )* + } +} + +macro_rules! cfg_time { + ($($item:item)*) => { + $( + #[cfg(feature = "time")] + #[cfg_attr(docsrs, doc(cfg(feature = "time")))] + $item + )* + } +} + +macro_rules! cfg_sync { + ($($item:item)*) => { + $( + #[cfg(feature = "sync")] + #[cfg_attr(docsrs, doc(cfg(feature = "sync")))] + $item + )* + } +} + +macro_rules! cfg_signal { + ($($item:item)*) => { + $( + #[cfg(feature = "signal")] + #[cfg_attr(docsrs, doc(cfg(feature = "signal")))] + $item + )* + } +} + +macro_rules! ready { + ($e:expr $(,)?) => { + match $e { + std::task::Poll::Ready(t) => t, + std::task::Poll::Pending => return std::task::Poll::Pending, + } + }; +} diff --git a/third_party/rust/tokio-0.2.25/src/stream/once.rs b/third_party/rust/tokio-stream/src/once.rs similarity index 88% rename from third_party/rust/tokio-0.2.25/src/stream/once.rs rename to third_party/rust/tokio-stream/src/once.rs index 7fe204cc127f..04b4c052b848 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/once.rs +++ b/third_party/rust/tokio-stream/src/once.rs @@ -1,4 +1,4 @@ -use crate::stream::{self, Iter, Stream}; +use crate::{Iter, Stream}; use core::option; use core::pin::Pin; @@ -20,7 +20,7 @@ impl Unpin for Once {} /// # Examples /// /// ``` -/// use tokio::stream::{self, StreamExt}; +/// use tokio_stream::{self as stream, StreamExt}; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +35,7 @@ impl Unpin for Once {} /// ``` pub fn once(value: T) -> Once { Once { - iter: stream::iter(Some(value).into_iter()), + iter: crate::iter(Some(value).into_iter()), } } diff --git a/third_party/rust/tokio-0.2.25/src/stream/pending.rs b/third_party/rust/tokio-stream/src/pending.rs similarity index 89% rename from third_party/rust/tokio-0.2.25/src/stream/pending.rs rename to third_party/rust/tokio-stream/src/pending.rs index 21224c385969..b50fd3335417 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/pending.rs +++ b/third_party/rust/tokio-stream/src/pending.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::marker::PhantomData; use core::pin::Pin; @@ -16,7 +16,7 @@ unsafe impl Sync for Pending {} /// Creates a stream that is never ready /// /// The returned stream is never ready. Attempting to call -/// [`next()`](crate::stream::StreamExt::next) will never complete. Use +/// [`next()`](crate::StreamExt::next) will never complete. Use /// [`stream::empty()`](super::empty()) to obtain a stream that is is /// immediately empty but returns no values. /// @@ -25,7 +25,7 @@ unsafe impl Sync for Pending {} /// Basic usage: /// /// ```no_run -/// use tokio::stream::{self, StreamExt}; +/// use tokio_stream::{self as stream, StreamExt}; /// /// #[tokio::main] /// async fn main() { diff --git a/third_party/rust/tokio-0.2.25/src/stream/mod.rs b/third_party/rust/tokio-stream/src/stream_ext.rs similarity index 82% rename from third_party/rust/tokio-0.2.25/src/stream/mod.rs rename to third_party/rust/tokio-stream/src/stream_ext.rs index 7b061efeb69f..1157c9ee3535 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/mod.rs +++ b/third_party/rust/tokio-stream/src/stream_ext.rs @@ -1,8 +1,4 @@ -//! Stream utilities for Tokio. -//! -//! A `Stream` is an asynchronous sequence of values. It can be thought of as an asynchronous version of the standard library's `Iterator` trait. -//! -//! This module provides helpers to work with them. +use futures_core::Stream; mod all; use all::AllFuture; @@ -13,12 +9,8 @@ use any::AnyFuture; mod chain; use chain::Chain; -mod collect; -use collect::Collect; -pub use collect::FromStream; - -mod empty; -pub use empty::{empty, Empty}; +pub(crate) mod collect; +use collect::{Collect, FromStream}; mod filter; use filter::Filter; @@ -32,9 +24,6 @@ use fold::FoldFuture; mod fuse; use fuse::Fuse; -mod iter; -pub use iter::{iter, Iter}; - mod map; use map::Map; @@ -44,15 +33,6 @@ use merge::Merge; mod next; use next::Next; -mod once; -pub use once::{once, Once}; - -mod pending; -pub use pending::{pending, Pending}; - -mod stream_map; -pub use stream_map::StreamMap; - mod skip; use skip::Skip; @@ -71,13 +51,44 @@ use take_while::TakeWhile; cfg_time! { mod timeout; use timeout::Timeout; - use std::time::Duration; + use tokio::time::Duration; + mod throttle; + use throttle::{throttle, Throttle}; } -pub use futures_core::Stream; - -/// An extension trait for `Stream`s that provides a variety of convenient -/// combinator functions. +/// An extension trait for the [`Stream`] trait that provides a variety of +/// convenient combinator functions. +/// +/// Be aware that the `Stream` trait in Tokio is a re-export of the trait found +/// in the [futures] crate, however both Tokio and futures provide separate +/// `StreamExt` utility traits, and some utilities are only available on one of +/// these traits. Click [here][futures-StreamExt] to see the other `StreamExt` +/// trait in the futures crate. +/// +/// If you need utilities from both `StreamExt` traits, you should prefer to +/// import one of them, and use the other through the fully qualified call +/// syntax. For example: +/// ``` +/// // import one of the traits: +/// use futures::stream::StreamExt; +/// # #[tokio::main(flavor = "current_thread")] +/// # async fn main() { +/// +/// let a = tokio_stream::iter(vec![1, 3, 5]); +/// let b = tokio_stream::iter(vec![2, 4, 6]); +/// +/// // use the fully qualified call syntax for the other trait: +/// let merged = tokio_stream::StreamExt::merge(a, b); +/// +/// // use normal call notation for futures::stream::StreamExt::collect +/// let output: Vec<_> = merged.collect().await; +/// assert_eq!(output, vec![1, 2, 3, 4, 5, 6]); +/// # } +/// ``` +/// +/// [`Stream`]: crate::Stream +/// [futures]: https://docs.rs/futures +/// [futures-StreamExt]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html pub trait StreamExt: Stream { /// Consumes and returns the next value in the stream or `None` if the /// stream is finished. @@ -100,7 +111,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let mut stream = stream::iter(1..=3); /// @@ -136,7 +147,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let mut stream = stream::iter(vec![Ok(1), Ok(2), Err("nope")]); /// @@ -168,7 +179,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let stream = stream::iter(1..=3); /// let mut stream = stream.map(|x| x + 3); @@ -201,25 +212,39 @@ pub trait StreamExt: Stream { /// /// For merging multiple streams, consider using [`StreamMap`] instead. /// - /// [`StreamMap`]: crate::stream::StreamMap + /// [`StreamMap`]: crate::StreamMap /// /// # Examples /// /// ``` - /// use tokio::stream::StreamExt; + /// use tokio_stream::{StreamExt, Stream}; /// use tokio::sync::mpsc; /// use tokio::time; /// /// use std::time::Duration; + /// use std::pin::Pin; /// /// # /* /// #[tokio::main] /// # */ - /// # #[tokio::main(basic_scheduler)] + /// # #[tokio::main(flavor = "current_thread")] /// async fn main() { /// # time::pause(); - /// let (mut tx1, rx1) = mpsc::channel(10); - /// let (mut tx2, rx2) = mpsc::channel(10); + /// let (tx1, mut rx1) = mpsc::channel::(10); + /// let (tx2, mut rx2) = mpsc::channel::(10); + /// + /// // Convert the channels to a `Stream`. + /// let rx1 = Box::pin(async_stream::stream! { + /// while let Some(item) = rx1.recv().await { + /// yield item; + /// } + /// }) as Pin + Send>>; + /// + /// let rx2 = Box::pin(async_stream::stream! { + /// while let Some(item) = rx2.recv().await { + /// yield item; + /// } + /// }) as Pin + Send>>; /// /// let mut rx = rx1.merge(rx2); /// @@ -229,18 +254,18 @@ pub trait StreamExt: Stream { /// tx1.send(2).await.unwrap(); /// /// // Let the other task send values - /// time::delay_for(Duration::from_millis(20)).await; + /// time::sleep(Duration::from_millis(20)).await; /// /// tx1.send(4).await.unwrap(); /// }); /// /// tokio::spawn(async move { /// // Wait for the first task to send values - /// time::delay_for(Duration::from_millis(5)).await; + /// time::sleep(Duration::from_millis(5)).await; /// /// tx2.send(3).await.unwrap(); /// - /// time::delay_for(Duration::from_millis(25)).await; + /// time::sleep(Duration::from_millis(25)).await; /// /// // Send the final value /// tx2.send(5).await.unwrap(); @@ -282,7 +307,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let stream = stream::iter(1..=8); /// let mut evens = stream.filter(|x| x % 2 == 0); @@ -318,7 +343,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let stream = stream::iter(1..=8); /// let mut evens = stream.filter_map(|x| { @@ -350,7 +375,7 @@ pub trait StreamExt: Stream { /// # Examples /// /// ``` - /// use tokio::stream::{Stream, StreamExt}; + /// use tokio_stream::{Stream, StreamExt}; /// /// use std::pin::Pin; /// use std::task::{Context, Poll}; @@ -415,7 +440,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let mut stream = stream::iter(1..=10).take(3); /// @@ -444,7 +469,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let mut stream = stream::iter(1..=10).take_while(|x| *x <= 3); /// @@ -470,7 +495,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let mut stream = stream::iter(1..=10).skip(7); /// @@ -490,7 +515,7 @@ pub trait StreamExt: Stream { /// Skip elements from the underlying stream while the provided predicate /// resolves to `true`. /// - /// This function, like [`Iterator::skip_while`], will ignore elemets from the + /// This function, like [`Iterator::skip_while`], will ignore elements from the /// stream until the predicate `f` resolves to `false`. Once one element /// returns false, the rest of the elements will be yielded. /// @@ -501,7 +526,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// let mut stream = stream::iter(vec![1,2,3,4,1]).skip_while(|x| *x < 3); /// /// assert_eq!(Some(3), stream.next().await); @@ -520,6 +545,12 @@ pub trait StreamExt: Stream { /// Tests if every element of the stream matches a predicate. /// + /// Equivalent to: + /// + /// ```ignore + /// async fn all(&mut self, f: F) -> bool; + /// ``` + /// /// `all()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the stream, and if they all return /// `true`, then so does `all`. If any of them return `false`, it @@ -538,7 +569,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let a = [1, 2, 3]; /// @@ -553,7 +584,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let a = [1, 2, 3]; /// @@ -575,6 +606,12 @@ pub trait StreamExt: Stream { /// Tests if any element of the stream matches a predicate. /// + /// Equivalent to: + /// + /// ```ignore + /// async fn any(&mut self, f: F) -> bool; + /// ``` + /// /// `any()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the stream, and if any of them return /// `true`, then so does `any()`. If they all return `false`, it @@ -591,7 +628,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let a = [1, 2, 3]; /// @@ -606,7 +643,7 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// let a = [1, 2, 3]; /// @@ -635,7 +672,7 @@ pub trait StreamExt: Stream { /// # Examples /// /// ``` - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// #[tokio::main] /// async fn main() { @@ -664,12 +701,18 @@ pub trait StreamExt: Stream { /// A combinator that applies a function to every element in a stream /// producing a single, final value. /// + /// Equivalent to: + /// + /// ```ignore + /// async fn fold(self, init: B, f: F) -> B; + /// ``` + /// /// # Examples /// Basic usage: /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, *}; + /// use tokio_stream::{self as stream, *}; /// /// let s = stream::iter(vec![1u8, 2, 3]); /// let sum = s.fold(0, |acc, x| acc + x).await; @@ -687,10 +730,18 @@ pub trait StreamExt: Stream { /// Drain stream pushing all emitted values into a collection. /// + /// Equivalent to: + /// + /// ```ignore + /// async fn collect(self) -> T; + /// ``` + /// /// `collect` streams all values, awaiting as needed. Values are pushed into /// a collection. A number of different target collection types are /// supported, including [`Vec`](std::vec::Vec), - /// [`String`](std::string::String), and [`Bytes`](bytes::Bytes). + /// [`String`](std::string::String), and [`Bytes`]. + /// + /// [`Bytes`]: https://docs.rs/bytes/0.6.0/bytes/struct.Bytes.html /// /// # `Result` /// @@ -709,7 +760,7 @@ pub trait StreamExt: Stream { /// Basic usage: /// /// ``` - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// #[tokio::main] /// async fn main() { @@ -726,7 +777,7 @@ pub trait StreamExt: Stream { /// Collecting a stream of `Result` values /// /// ``` - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// /// #[tokio::main] /// async fn main() { @@ -782,11 +833,12 @@ pub trait StreamExt: Stream { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use tokio::stream::{self, StreamExt}; + /// use tokio_stream::{self as stream, StreamExt}; /// use std::time::Duration; /// # let int_stream = stream::iter(1..=3); /// - /// let mut int_stream = int_stream.timeout(Duration::from_secs(1)); + /// let int_stream = int_stream.timeout(Duration::from_secs(1)); + /// tokio::pin!(int_stream); /// /// // When no items time out, we get the 3 elements in succession: /// assert_eq!(int_stream.try_next().await, Ok(Some(1))); @@ -819,6 +871,34 @@ pub trait StreamExt: Stream { { Timeout::new(self, duration) } + + /// Slows down a stream by enforcing a delay between items. + /// + /// # Example + /// + /// Create a throttled stream. + /// ```rust,no_run + /// use std::time::Duration; + /// use tokio_stream::StreamExt; + /// + /// # async fn dox() { + /// let item_stream = futures::stream::repeat("one").throttle(Duration::from_secs(2)); + /// tokio::pin!(item_stream); + /// + /// loop { + /// // The string will be produced at most every 2 seconds + /// println!("{:?}", item_stream.next().await); + /// } + /// # } + /// ``` + #[cfg(all(feature = "time"))] + #[cfg_attr(docsrs, doc(cfg(feature = "time")))] + fn throttle(self, duration: Duration) -> Throttle + where + Self: Sized, + { + throttle(duration, self) + } } impl StreamExt for St where St: Stream {} diff --git a/third_party/rust/tokio-stream/src/stream_ext/all.rs b/third_party/rust/tokio-stream/src/stream_ext/all.rs new file mode 100644 index 000000000000..b4dbc1e97c3b --- /dev/null +++ b/third_party/rust/tokio-stream/src/stream_ext/all.rs @@ -0,0 +1,58 @@ +use crate::Stream; + +use core::future::Future; +use core::marker::PhantomPinned; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Future for the [`all`](super::StreamExt::all) method. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct AllFuture<'a, St: ?Sized, F> { + stream: &'a mut St, + f: F, + // Make this future `!Unpin` for compatibility with async trait methods. + #[pin] + _pin: PhantomPinned, + } +} + +impl<'a, St: ?Sized, F> AllFuture<'a, St, F> { + pub(super) fn new(stream: &'a mut St, f: F) -> Self { + Self { + stream, + f, + _pin: PhantomPinned, + } + } +} + +impl Future for AllFuture<'_, St, F> +where + St: ?Sized + Stream + Unpin, + F: FnMut(St::Item) -> bool, +{ + type Output = bool; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let me = self.project(); + let mut stream = Pin::new(me.stream); + + // Take a maximum of 32 items from the stream before yielding. + for _ in 0..32 { + match futures_core::ready!(stream.as_mut().poll_next(cx)) { + Some(v) => { + if !(me.f)(v) { + return Poll::Ready(false); + } + } + None => return Poll::Ready(true), + } + } + + cx.waker().wake_by_ref(); + Poll::Pending + } +} diff --git a/third_party/rust/tokio-stream/src/stream_ext/any.rs b/third_party/rust/tokio-stream/src/stream_ext/any.rs new file mode 100644 index 000000000000..31394f249b80 --- /dev/null +++ b/third_party/rust/tokio-stream/src/stream_ext/any.rs @@ -0,0 +1,58 @@ +use crate::Stream; + +use core::future::Future; +use core::marker::PhantomPinned; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Future for the [`any`](super::StreamExt::any) method. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct AnyFuture<'a, St: ?Sized, F> { + stream: &'a mut St, + f: F, + // Make this future `!Unpin` for compatibility with async trait methods. + #[pin] + _pin: PhantomPinned, + } +} + +impl<'a, St: ?Sized, F> AnyFuture<'a, St, F> { + pub(super) fn new(stream: &'a mut St, f: F) -> Self { + Self { + stream, + f, + _pin: PhantomPinned, + } + } +} + +impl Future for AnyFuture<'_, St, F> +where + St: ?Sized + Stream + Unpin, + F: FnMut(St::Item) -> bool, +{ + type Output = bool; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let me = self.project(); + let mut stream = Pin::new(me.stream); + + // Take a maximum of 32 items from the stream before yielding. + for _ in 0..32 { + match futures_core::ready!(stream.as_mut().poll_next(cx)) { + Some(v) => { + if (me.f)(v) { + return Poll::Ready(true); + } + } + None => return Poll::Ready(false), + } + } + + cx.waker().wake_by_ref(); + Poll::Pending + } +} diff --git a/third_party/rust/tokio-0.2.25/src/stream/chain.rs b/third_party/rust/tokio-stream/src/stream_ext/chain.rs similarity index 95% rename from third_party/rust/tokio-0.2.25/src/stream/chain.rs rename to third_party/rust/tokio-stream/src/stream_ext/chain.rs index 6124c91e44fb..bd64f33ce4e4 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/chain.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/chain.rs @@ -1,4 +1,5 @@ -use crate::stream::{Fuse, Stream}; +use crate::stream_ext::Fuse; +use crate::Stream; use core::pin::Pin; use core::task::{Context, Poll}; diff --git a/third_party/rust/tokio-stream/src/stream_ext/collect.rs b/third_party/rust/tokio-stream/src/stream_ext/collect.rs new file mode 100644 index 000000000000..a33a6d6692a3 --- /dev/null +++ b/third_party/rust/tokio-stream/src/stream_ext/collect.rs @@ -0,0 +1,233 @@ +use crate::Stream; + +use core::future::Future; +use core::marker::PhantomPinned; +use core::mem; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +// Do not export this struct until `FromStream` can be unsealed. +pin_project! { + /// Future returned by the [`collect`](super::StreamExt::collect) method. + #[must_use = "futures do nothing unless you `.await` or poll them"] + #[derive(Debug)] + pub struct Collect + where + T: Stream, + U: FromStream, + { + #[pin] + stream: T, + collection: U::InternalCollection, + // Make this future `!Unpin` for compatibility with async trait methods. + #[pin] + _pin: PhantomPinned, + } +} + +/// Convert from a [`Stream`](crate::Stream). +/// +/// This trait is not intended to be used directly. Instead, call +/// [`StreamExt::collect()`](super::StreamExt::collect). +/// +/// # Implementing +/// +/// Currently, this trait may not be implemented by third parties. The trait is +/// sealed in order to make changes in the future. Stabilization is pending +/// enhancements to the Rust language. +pub trait FromStream: sealed::FromStreamPriv {} + +impl Collect +where + T: Stream, + U: FromStream, +{ + pub(super) fn new(stream: T) -> Collect { + let (lower, upper) = stream.size_hint(); + let collection = U::initialize(sealed::Internal, lower, upper); + + Collect { + stream, + collection, + _pin: PhantomPinned, + } + } +} + +impl Future for Collect +where + T: Stream, + U: FromStream, +{ + type Output = U; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + use Poll::Ready; + + loop { + let mut me = self.as_mut().project(); + + let item = match ready!(me.stream.poll_next(cx)) { + Some(item) => item, + None => { + return Ready(U::finalize(sealed::Internal, &mut me.collection)); + } + }; + + if !U::extend(sealed::Internal, &mut me.collection, item) { + return Ready(U::finalize(sealed::Internal, &mut me.collection)); + } + } + } +} + +// ===== FromStream implementations + +impl FromStream<()> for () {} + +impl sealed::FromStreamPriv<()> for () { + type InternalCollection = (); + + fn initialize(_: sealed::Internal, _lower: usize, _upper: Option) {} + + fn extend(_: sealed::Internal, _collection: &mut (), _item: ()) -> bool { + true + } + + fn finalize(_: sealed::Internal, _collection: &mut ()) {} +} + +impl> FromStream for String {} + +impl> sealed::FromStreamPriv for String { + type InternalCollection = String; + + fn initialize(_: sealed::Internal, _lower: usize, _upper: Option) -> String { + String::new() + } + + fn extend(_: sealed::Internal, collection: &mut String, item: T) -> bool { + collection.push_str(item.as_ref()); + true + } + + fn finalize(_: sealed::Internal, collection: &mut String) -> String { + mem::take(collection) + } +} + +impl FromStream for Vec {} + +impl sealed::FromStreamPriv for Vec { + type InternalCollection = Vec; + + fn initialize(_: sealed::Internal, lower: usize, _upper: Option) -> Vec { + Vec::with_capacity(lower) + } + + fn extend(_: sealed::Internal, collection: &mut Vec, item: T) -> bool { + collection.push(item); + true + } + + fn finalize(_: sealed::Internal, collection: &mut Vec) -> Vec { + mem::take(collection) + } +} + +impl FromStream for Box<[T]> {} + +impl sealed::FromStreamPriv for Box<[T]> { + type InternalCollection = Vec; + + fn initialize(_: sealed::Internal, lower: usize, upper: Option) -> Vec { + as sealed::FromStreamPriv>::initialize(sealed::Internal, lower, upper) + } + + fn extend(_: sealed::Internal, collection: &mut Vec, item: T) -> bool { + as sealed::FromStreamPriv>::extend(sealed::Internal, collection, item) + } + + fn finalize(_: sealed::Internal, collection: &mut Vec) -> Box<[T]> { + as sealed::FromStreamPriv>::finalize(sealed::Internal, collection) + .into_boxed_slice() + } +} + +impl FromStream> for Result where U: FromStream {} + +impl sealed::FromStreamPriv> for Result +where + U: FromStream, +{ + type InternalCollection = Result; + + fn initialize( + _: sealed::Internal, + lower: usize, + upper: Option, + ) -> Result { + Ok(U::initialize(sealed::Internal, lower, upper)) + } + + fn extend( + _: sealed::Internal, + collection: &mut Self::InternalCollection, + item: Result, + ) -> bool { + assert!(collection.is_ok()); + match item { + Ok(item) => { + let collection = collection.as_mut().ok().expect("invalid state"); + U::extend(sealed::Internal, collection, item) + } + Err(err) => { + *collection = Err(err); + false + } + } + } + + fn finalize(_: sealed::Internal, collection: &mut Self::InternalCollection) -> Result { + if let Ok(collection) = collection.as_mut() { + Ok(U::finalize(sealed::Internal, collection)) + } else { + let res = mem::replace(collection, Ok(U::initialize(sealed::Internal, 0, Some(0)))); + + if let Err(err) = res { + Err(err) + } else { + unreachable!(); + } + } + } +} + +pub(crate) mod sealed { + #[doc(hidden)] + pub trait FromStreamPriv { + /// Intermediate type used during collection process + /// + /// The name of this type is internal and cannot be relied upon. + type InternalCollection; + + /// Initialize the collection + fn initialize( + internal: Internal, + lower: usize, + upper: Option, + ) -> Self::InternalCollection; + + /// Extend the collection with the received item + /// + /// Return `true` to continue streaming, `false` complete collection. + fn extend(internal: Internal, collection: &mut Self::InternalCollection, item: T) -> bool; + + /// Finalize collection into target type. + fn finalize(internal: Internal, collection: &mut Self::InternalCollection) -> Self; + } + + #[allow(missing_debug_implementations)] + pub struct Internal; +} diff --git a/third_party/rust/tokio-0.2.25/src/stream/filter.rs b/third_party/rust/tokio-stream/src/stream_ext/filter.rs similarity index 98% rename from third_party/rust/tokio-0.2.25/src/stream/filter.rs rename to third_party/rust/tokio-stream/src/stream_ext/filter.rs index 799630b23469..f3dd8716b48e 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/filter.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/filter.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::fmt; use core::pin::Pin; diff --git a/third_party/rust/tokio-0.2.25/src/stream/filter_map.rs b/third_party/rust/tokio-stream/src/stream_ext/filter_map.rs similarity index 98% rename from third_party/rust/tokio-0.2.25/src/stream/filter_map.rs rename to third_party/rust/tokio-stream/src/stream_ext/filter_map.rs index 8dc05a546037..fe604a6f4b52 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/filter_map.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/filter_map.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::fmt; use core::pin::Pin; diff --git a/third_party/rust/tokio-0.2.25/src/stream/fold.rs b/third_party/rust/tokio-stream/src/stream_ext/fold.rs similarity index 80% rename from third_party/rust/tokio-0.2.25/src/stream/fold.rs rename to third_party/rust/tokio-stream/src/stream_ext/fold.rs index 7b9fead3dbb4..e2e97d8f3753 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/fold.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/fold.rs @@ -1,6 +1,7 @@ -use crate::stream::Stream; +use crate::Stream; use core::future::Future; +use core::marker::PhantomPinned; use core::pin::Pin; use core::task::{Context, Poll}; use pin_project_lite::pin_project; @@ -8,11 +9,15 @@ use pin_project_lite::pin_project; pin_project! { /// Future returned by the [`fold`](super::StreamExt::fold) method. #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct FoldFuture { #[pin] stream: St, acc: Option, f: F, + // Make this future `!Unpin` for compatibility with async trait methods. + #[pin] + _pin: PhantomPinned, } } @@ -22,6 +27,7 @@ impl FoldFuture { stream, acc: Some(init), f, + _pin: PhantomPinned, } } } diff --git a/third_party/rust/tokio-0.2.25/src/stream/fuse.rs b/third_party/rust/tokio-stream/src/stream_ext/fuse.rs similarity index 97% rename from third_party/rust/tokio-0.2.25/src/stream/fuse.rs rename to third_party/rust/tokio-stream/src/stream_ext/fuse.rs index 6c9e02d6643f..2500641d95d1 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/fuse.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/fuse.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use pin_project_lite::pin_project; use std::pin::Pin; diff --git a/third_party/rust/tokio-0.2.25/src/stream/map.rs b/third_party/rust/tokio-stream/src/stream_ext/map.rs similarity index 97% rename from third_party/rust/tokio-0.2.25/src/stream/map.rs rename to third_party/rust/tokio-stream/src/stream_ext/map.rs index dfac5a2c9425..e6b47cd25820 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/map.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/map.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::fmt; use core::pin::Pin; diff --git a/third_party/rust/tokio-0.2.25/src/stream/merge.rs b/third_party/rust/tokio-stream/src/stream_ext/merge.rs similarity index 97% rename from third_party/rust/tokio-0.2.25/src/stream/merge.rs rename to third_party/rust/tokio-stream/src/stream_ext/merge.rs index 50ba518ce39c..9d5123c85a39 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/merge.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/merge.rs @@ -1,4 +1,5 @@ -use crate::stream::{Fuse, Stream}; +use crate::stream_ext::Fuse; +use crate::Stream; use core::pin::Pin; use core::task::{Context, Poll}; diff --git a/third_party/rust/tokio-stream/src/stream_ext/next.rs b/third_party/rust/tokio-stream/src/stream_ext/next.rs new file mode 100644 index 000000000000..175490c488a3 --- /dev/null +++ b/third_party/rust/tokio-stream/src/stream_ext/next.rs @@ -0,0 +1,37 @@ +use crate::Stream; + +use core::future::Future; +use core::marker::PhantomPinned; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Future for the [`next`](super::StreamExt::next) method. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Next<'a, St: ?Sized> { + stream: &'a mut St, + // Make this future `!Unpin` for compatibility with async trait methods. + #[pin] + _pin: PhantomPinned, + } +} + +impl<'a, St: ?Sized> Next<'a, St> { + pub(super) fn new(stream: &'a mut St) -> Self { + Next { + stream, + _pin: PhantomPinned, + } + } +} + +impl Future for Next<'_, St> { + type Output = Option; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let me = self.project(); + Pin::new(me.stream).poll_next(cx) + } +} diff --git a/third_party/rust/tokio-0.2.25/src/stream/skip.rs b/third_party/rust/tokio-stream/src/stream_ext/skip.rs similarity index 98% rename from third_party/rust/tokio-0.2.25/src/stream/skip.rs rename to third_party/rust/tokio-stream/src/stream_ext/skip.rs index 39540cc984ce..80a0a0aff0dc 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/skip.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/skip.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::fmt; use core::pin::Pin; diff --git a/third_party/rust/tokio-0.2.25/src/stream/skip_while.rs b/third_party/rust/tokio-stream/src/stream_ext/skip_while.rs similarity index 98% rename from third_party/rust/tokio-0.2.25/src/stream/skip_while.rs rename to third_party/rust/tokio-stream/src/stream_ext/skip_while.rs index 4e0500701a3e..985a92666e0c 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/skip_while.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/skip_while.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::fmt; use core::pin::Pin; diff --git a/third_party/rust/tokio-0.2.25/src/stream/take.rs b/third_party/rust/tokio-stream/src/stream_ext/take.rs similarity index 98% rename from third_party/rust/tokio-0.2.25/src/stream/take.rs rename to third_party/rust/tokio-stream/src/stream_ext/take.rs index a92430b77c86..c75648f60658 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/take.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/take.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::cmp; use core::fmt; diff --git a/third_party/rust/tokio-0.2.25/src/stream/take_while.rs b/third_party/rust/tokio-stream/src/stream_ext/take_while.rs similarity index 98% rename from third_party/rust/tokio-0.2.25/src/stream/take_while.rs rename to third_party/rust/tokio-stream/src/stream_ext/take_while.rs index cf1e16061315..5ce4dd98a98a 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/take_while.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/take_while.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use core::fmt; use core::pin::Pin; diff --git a/third_party/rust/tokio-0.2.25/src/time/throttle.rs b/third_party/rust/tokio-stream/src/stream_ext/throttle.rs similarity index 50% rename from third_party/rust/tokio-0.2.25/src/time/throttle.rs rename to third_party/rust/tokio-stream/src/stream_ext/throttle.rs index d53a6f762111..f36c66a5d39a 100644 --- a/third_party/rust/tokio-0.2.25/src/time/throttle.rs +++ b/third_party/rust/tokio-stream/src/stream_ext/throttle.rs @@ -1,7 +1,7 @@ //! Slow down a stream by enforcing a delay between items. -use crate::stream::Stream; -use crate::time::{Delay, Duration, Instant}; +use crate::Stream; +use tokio::time::{Duration, Instant, Sleep}; use std::future::Future; use std::marker::Unpin; @@ -10,38 +10,12 @@ use std::task::{self, Poll}; use pin_project_lite::pin_project; -/// Slows down a stream by enforcing a delay between items. -/// They will be produced not more often than the specified interval. -/// -/// # Example -/// -/// Create a throttled stream. -/// ```rust,no_run -/// use std::time::Duration; -/// use tokio::stream::StreamExt; -/// use tokio::time::throttle; -/// -/// # async fn dox() { -/// let mut item_stream = throttle(Duration::from_secs(2), futures::stream::repeat("one")); -/// -/// loop { -/// // The string will be produced at most every 2 seconds -/// println!("{:?}", item_stream.next().await); -/// } -/// # } -/// ``` -pub fn throttle(duration: Duration, stream: T) -> Throttle +pub(super) fn throttle(duration: Duration, stream: T) -> Throttle where T: Stream, { - let delay = if duration == Duration::from_millis(0) { - None - } else { - Some(Delay::new_timeout(Instant::now() + duration, duration)) - }; - Throttle { - delay, + delay: tokio::time::sleep_until(Instant::now() + duration), duration, has_delayed: true, stream, @@ -49,12 +23,13 @@ where } pin_project! { - /// Stream for the [`throttle`](throttle) function. + /// Stream for the [`throttle`](throttle) function. This object is `!Unpin`. If you need it to + /// implement `Unpin` you can pin your throttle like this: `Box::pin(your_throttle)`. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct Throttle { - // `None` when duration is zero. - delay: Option, + #[pin] + delay: Sleep, duration: Duration, // Set to true when `delay` has returned ready, but `stream` hasn't. @@ -95,23 +70,29 @@ impl Throttle { impl Stream for Throttle { type Item = T::Item; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - if !self.has_delayed && self.delay.is_some() { - ready!(Pin::new(self.as_mut().project().delay.as_mut().unwrap()).poll(cx)); - *self.as_mut().project().has_delayed = true; + fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + let mut me = self.project(); + let dur = *me.duration; + + if !*me.has_delayed && !is_zero(dur) { + ready!(me.delay.as_mut().poll(cx)); + *me.has_delayed = true; } - let value = ready!(self.as_mut().project().stream.poll_next(cx)); + let value = ready!(me.stream.poll_next(cx)); if value.is_some() { - let dur = self.duration; - if let Some(ref mut delay) = self.as_mut().project().delay { - delay.reset(Instant::now() + dur); + if !is_zero(dur) { + me.delay.reset(Instant::now() + dur); } - *self.as_mut().project().has_delayed = false; + *me.has_delayed = false; } Poll::Ready(value) } } + +fn is_zero(dur: Duration) -> bool { + dur == Duration::from_millis(0) +} diff --git a/third_party/rust/tokio-stream/src/stream_ext/timeout.rs b/third_party/rust/tokio-stream/src/stream_ext/timeout.rs new file mode 100644 index 000000000000..98d7cd5c06df --- /dev/null +++ b/third_party/rust/tokio-stream/src/stream_ext/timeout.rs @@ -0,0 +1,107 @@ +use crate::stream_ext::Fuse; +use crate::Stream; +use tokio::time::{Instant, Sleep}; + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; +use std::fmt; +use std::time::Duration; + +pin_project! { + /// Stream returned by the [`timeout`](super::StreamExt::timeout) method. + #[must_use = "streams do nothing unless polled"] + #[derive(Debug)] + pub struct Timeout { + #[pin] + stream: Fuse, + #[pin] + deadline: Sleep, + duration: Duration, + poll_deadline: bool, + } +} + +/// Error returned by `Timeout`. +#[derive(Debug, PartialEq)] +pub struct Elapsed(()); + +impl Timeout { + pub(super) fn new(stream: S, duration: Duration) -> Self { + let next = Instant::now() + duration; + let deadline = tokio::time::sleep_until(next); + + Timeout { + stream: Fuse::new(stream), + deadline, + duration, + poll_deadline: true, + } + } +} + +impl Stream for Timeout { + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let me = self.project(); + + match me.stream.poll_next(cx) { + Poll::Ready(v) => { + if v.is_some() { + let next = Instant::now() + *me.duration; + me.deadline.reset(next); + *me.poll_deadline = true; + } + return Poll::Ready(v.map(Ok)); + } + Poll::Pending => {} + }; + + if *me.poll_deadline { + ready!(me.deadline.poll(cx)); + *me.poll_deadline = false; + return Poll::Ready(Some(Err(Elapsed::new()))); + } + + Poll::Pending + } + + fn size_hint(&self) -> (usize, Option) { + let (lower, upper) = self.stream.size_hint(); + + // The timeout stream may insert an error before and after each message + // from the underlying stream, but no more than one error between each + // message. Hence the upper bound is computed as 2x+1. + + // Using a helper function to enable use of question mark operator. + fn twice_plus_one(value: Option) -> Option { + value?.checked_mul(2)?.checked_add(1) + } + + (lower, twice_plus_one(upper)) + } +} + +// ===== impl Elapsed ===== + +impl Elapsed { + pub(crate) fn new() -> Self { + Elapsed(()) + } +} + +impl fmt::Display for Elapsed { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + "deadline has elapsed".fmt(fmt) + } +} + +impl std::error::Error for Elapsed {} + +impl From for std::io::Error { + fn from(_err: Elapsed) -> std::io::Error { + std::io::ErrorKind::TimedOut.into() + } +} diff --git a/third_party/rust/tokio-stream/src/stream_ext/try_next.rs b/third_party/rust/tokio-stream/src/stream_ext/try_next.rs new file mode 100644 index 000000000000..af27d87d8e93 --- /dev/null +++ b/third_party/rust/tokio-stream/src/stream_ext/try_next.rs @@ -0,0 +1,39 @@ +use crate::stream_ext::Next; +use crate::Stream; + +use core::future::Future; +use core::marker::PhantomPinned; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Future for the [`try_next`](super::StreamExt::try_next) method. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct TryNext<'a, St: ?Sized> { + #[pin] + inner: Next<'a, St>, + // Make this future `!Unpin` for compatibility with async trait methods. + #[pin] + _pin: PhantomPinned, + } +} + +impl<'a, St: ?Sized> TryNext<'a, St> { + pub(super) fn new(stream: &'a mut St) -> Self { + Self { + inner: Next::new(stream), + _pin: PhantomPinned, + } + } +} + +impl> + Unpin> Future for TryNext<'_, St> { + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let me = self.project(); + me.inner.poll(cx).map(Option::transpose) + } +} diff --git a/third_party/rust/tokio-0.2.25/src/stream/stream_map.rs b/third_party/rust/tokio-stream/src/stream_map.rs similarity index 67% rename from third_party/rust/tokio-0.2.25/src/stream/stream_map.rs rename to third_party/rust/tokio-stream/src/stream_map.rs index 2f60ea4ddafc..80a521ee17ae 100644 --- a/third_party/rust/tokio-0.2.25/src/stream/stream_map.rs +++ b/third_party/rust/tokio-stream/src/stream_map.rs @@ -1,4 +1,4 @@ -use crate::stream::Stream; +use crate::Stream; use std::borrow::Borrow; use std::hash::Hash; @@ -42,9 +42,9 @@ use std::task::{Context, Poll}; /// to be merged, it may be advisable to use tasks sending values on a shared /// [`mpsc`] channel. /// -/// [`StreamExt::merge`]: crate::stream::StreamExt::merge -/// [`mpsc`]: crate::sync::mpsc -/// [`pin!`]: macro@pin +/// [`StreamExt::merge`]: crate::StreamExt::merge +/// [`mpsc`]: https://docs.rs/tokio/1.0/tokio/sync/mpsc/index.html +/// [`pin!`]: https://docs.rs/tokio/1.0/tokio/macro.pin.html /// [`Box::pin`]: std::boxed::Box::pin /// /// # Examples @@ -52,13 +52,27 @@ use std::task::{Context, Poll}; /// Merging two streams, then remove them after receiving the first value /// /// ``` -/// use tokio::stream::{StreamExt, StreamMap}; +/// use tokio_stream::{StreamExt, StreamMap, Stream}; /// use tokio::sync::mpsc; +/// use std::pin::Pin; /// /// #[tokio::main] /// async fn main() { -/// let (mut tx1, rx1) = mpsc::channel(10); -/// let (mut tx2, rx2) = mpsc::channel(10); +/// let (tx1, mut rx1) = mpsc::channel::(10); +/// let (tx2, mut rx2) = mpsc::channel::(10); +/// +/// // Convert the channels to a `Stream`. +/// let rx1 = Box::pin(async_stream::stream! { +/// while let Some(item) = rx1.recv().await { +/// yield item; +/// } +/// }) as Pin + Send>>; +/// +/// let rx2 = Box::pin(async_stream::stream! { +/// while let Some(item) = rx2.recv().await { +/// yield item; +/// } +/// }) as Pin + Send>>; /// /// tokio::spawn(async move { /// tx1.send(1).await.unwrap(); @@ -103,7 +117,7 @@ use std::task::{Context, Poll}; /// sent to the client over a socket. /// /// ```no_run -/// use tokio::stream::{Stream, StreamExt, StreamMap}; +/// use tokio_stream::{Stream, StreamExt, StreamMap}; /// /// enum Command { /// Join(String), @@ -112,13 +126,13 @@ use std::task::{Context, Poll}; /// /// fn commands() -> impl Stream { /// // Streams in user commands by parsing `stdin`. -/// # tokio::stream::pending() +/// # tokio_stream::pending() /// } /// /// // Join a channel, returns a stream of messages received on the channel. /// fn join(channel: &str) -> impl Stream + Unpin { /// // left as an exercise to the reader -/// # tokio::stream::pending() +/// # tokio_stream::pending() /// } /// /// #[tokio::main] @@ -156,13 +170,59 @@ use std::task::{Context, Poll}; /// } /// } /// ``` -#[derive(Debug, Default)] +#[derive(Debug)] pub struct StreamMap { /// Streams stored in the map entries: Vec<(K, V)>, } impl StreamMap { + /// An iterator visiting all key-value pairs in arbitrary order. + /// + /// The iterator element type is &'a (K, V). + /// + /// # Examples + /// + /// ``` + /// use tokio_stream::{StreamMap, pending}; + /// + /// let mut map = StreamMap::new(); + /// + /// map.insert("a", pending::()); + /// map.insert("b", pending()); + /// map.insert("c", pending()); + /// + /// for (key, stream) in map.iter() { + /// println!("({}, {:?})", key, stream); + /// } + /// ``` + pub fn iter(&self) -> impl Iterator { + self.entries.iter() + } + + /// An iterator visiting all key-value pairs mutably in arbitrary order. + /// + /// The iterator element type is &'a mut (K, V). + /// + /// # Examples + /// + /// ``` + /// use tokio_stream::{StreamMap, pending}; + /// + /// let mut map = StreamMap::new(); + /// + /// map.insert("a", pending::()); + /// map.insert("b", pending()); + /// map.insert("c", pending()); + /// + /// for (key, stream) in map.iter_mut() { + /// println!("({}, {:?})", key, stream); + /// } + /// ``` + pub fn iter_mut(&mut self) -> impl Iterator { + self.entries.iter_mut() + } + /// Creates an empty `StreamMap`. /// /// The stream map is initially created with a capacity of `0`, so it will @@ -171,7 +231,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, Pending}; + /// use tokio_stream::{StreamMap, Pending}; /// /// let map: StreamMap<&str, Pending<()>> = StreamMap::new(); /// ``` @@ -187,7 +247,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, Pending}; + /// use tokio_stream::{StreamMap, Pending}; /// /// let map: StreamMap<&str, Pending<()>> = StreamMap::with_capacity(10); /// ``` @@ -204,7 +264,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, pending}; + /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// @@ -217,7 +277,7 @@ impl StreamMap { /// } /// ``` pub fn keys(&self) -> impl Iterator { - self.entries.iter().map(|(k, _)| k) + self.iter().map(|(k, _)| k) } /// An iterator visiting all values in arbitrary order. @@ -227,7 +287,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, pending}; + /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// @@ -240,7 +300,7 @@ impl StreamMap { /// } /// ``` pub fn values(&self) -> impl Iterator { - self.entries.iter().map(|(_, v)| v) + self.iter().map(|(_, v)| v) } /// An iterator visiting all values mutably in arbitrary order. @@ -250,7 +310,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, pending}; + /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// @@ -263,7 +323,7 @@ impl StreamMap { /// } /// ``` pub fn values_mut(&mut self) -> impl Iterator { - self.entries.iter_mut().map(|(_, v)| v) + self.iter_mut().map(|(_, v)| v) } /// Returns the number of streams the map can hold without reallocating. @@ -274,7 +334,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, Pending}; + /// use tokio_stream::{StreamMap, Pending}; /// /// let map: StreamMap> = StreamMap::with_capacity(100); /// assert!(map.capacity() >= 100); @@ -288,7 +348,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, pending}; + /// use tokio_stream::{StreamMap, pending}; /// /// let mut a = StreamMap::new(); /// assert_eq!(a.len(), 0); @@ -304,11 +364,11 @@ impl StreamMap { /// # Examples /// /// ``` - /// use std::collections::HashMap; + /// use tokio_stream::{StreamMap, pending}; /// - /// let mut a = HashMap::new(); + /// let mut a = StreamMap::new(); /// assert!(a.is_empty()); - /// a.insert(1, "a"); + /// a.insert(1, pending::()); /// assert!(!a.is_empty()); /// ``` pub fn is_empty(&self) -> bool { @@ -321,7 +381,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, pending}; + /// use tokio_stream::{StreamMap, pending}; /// /// let mut a = StreamMap::new(); /// a.insert(1, pending::()); @@ -342,7 +402,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, pending}; + /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// @@ -370,7 +430,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, pending}; + /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// map.insert(1, pending::()); @@ -399,7 +459,7 @@ impl StreamMap { /// # Examples /// /// ``` - /// use tokio::stream::{StreamMap, pending}; + /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// map.insert(1, pending::()); @@ -430,7 +490,7 @@ where fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll> { use Poll::*; - let start = crate::util::thread_rng_n(self.entries.len() as u32) as usize; + let start = self::rand::thread_rng_n(self.entries.len() as u32) as usize; let mut idx = start; for _ in 0..self.entries.len() { @@ -467,6 +527,12 @@ where } } +impl Default for StreamMap { + fn default() -> Self { + Self::new() + } +} + impl Stream for StreamMap where K: Clone + Unpin, @@ -501,3 +567,115 @@ where ret } } + +impl std::iter::FromIterator<(K, V)> for StreamMap +where + K: Hash + Eq, +{ + fn from_iter>(iter: T) -> Self { + let iterator = iter.into_iter(); + let (lower_bound, _) = iterator.size_hint(); + let mut stream_map = Self::with_capacity(lower_bound); + + for (key, value) in iterator { + stream_map.insert(key, value); + } + + stream_map + } +} + +mod rand { + use std::cell::Cell; + + mod loom { + #[cfg(not(loom))] + pub(crate) mod rand { + use std::collections::hash_map::RandomState; + use std::hash::{BuildHasher, Hash, Hasher}; + use std::sync::atomic::AtomicU32; + use std::sync::atomic::Ordering::Relaxed; + + static COUNTER: AtomicU32 = AtomicU32::new(1); + + pub(crate) fn seed() -> u64 { + let rand_state = RandomState::new(); + + let mut hasher = rand_state.build_hasher(); + + // Hash some unique-ish data to generate some new state + COUNTER.fetch_add(1, Relaxed).hash(&mut hasher); + + // Get the seed + hasher.finish() + } + } + + #[cfg(loom)] + pub(crate) mod rand { + pub(crate) fn seed() -> u64 { + 1 + } + } + } + + /// Fast random number generate + /// + /// Implement xorshift64+: 2 32-bit xorshift sequences added together. + /// Shift triplet `[17,7,16]` was calculated as indicated in Marsaglia's + /// Xorshift paper: + /// This generator passes the SmallCrush suite, part of TestU01 framework: + /// + #[derive(Debug)] + pub(crate) struct FastRand { + one: Cell, + two: Cell, + } + + impl FastRand { + /// Initialize a new, thread-local, fast random number generator. + pub(crate) fn new(seed: u64) -> FastRand { + let one = (seed >> 32) as u32; + let mut two = seed as u32; + + if two == 0 { + // This value cannot be zero + two = 1; + } + + FastRand { + one: Cell::new(one), + two: Cell::new(two), + } + } + + pub(crate) fn fastrand_n(&self, n: u32) -> u32 { + // This is similar to fastrand() % n, but faster. + // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + let mul = (self.fastrand() as u64).wrapping_mul(n as u64); + (mul >> 32) as u32 + } + + fn fastrand(&self) -> u32 { + let mut s1 = self.one.get(); + let s0 = self.two.get(); + + s1 ^= s1 << 17; + s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16; + + self.one.set(s0); + self.two.set(s1); + + s0.wrapping_add(s1) + } + } + + // Used by `StreamMap` + pub(crate) fn thread_rng_n(n: u32) -> u32 { + thread_local! { + static THREAD_RNG: FastRand = FastRand::new(loom::rand::seed()); + } + + THREAD_RNG.with(|rng| rng.fastrand_n(n)) + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers.rs b/third_party/rust/tokio-stream/src/wrappers.rs new file mode 100644 index 000000000000..62cabe4f7d00 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers.rs @@ -0,0 +1,62 @@ +//! Wrappers for Tokio types that implement `Stream`. + +/// Error types for the wrappers. +pub mod errors { + cfg_sync! { + pub use crate::wrappers::broadcast::BroadcastStreamRecvError; + } +} + +mod mpsc_bounded; +pub use mpsc_bounded::ReceiverStream; + +mod mpsc_unbounded; +pub use mpsc_unbounded::UnboundedReceiverStream; + +cfg_sync! { + mod broadcast; + pub use broadcast::BroadcastStream; + + mod watch; + pub use watch::WatchStream; +} + +cfg_signal! { + #[cfg(unix)] + mod signal_unix; + #[cfg(unix)] + pub use signal_unix::SignalStream; + + #[cfg(any(windows, docsrs))] + mod signal_windows; + #[cfg(any(windows, docsrs))] + pub use signal_windows::{CtrlCStream, CtrlBreakStream}; +} + +cfg_time! { + mod interval; + pub use interval::IntervalStream; +} + +cfg_net! { + mod tcp_listener; + pub use tcp_listener::TcpListenerStream; + + #[cfg(unix)] + mod unix_listener; + #[cfg(unix)] + pub use unix_listener::UnixListenerStream; +} + +cfg_io_util! { + mod split; + pub use split::SplitStream; + + mod lines; + pub use lines::LinesStream; +} + +cfg_fs! { + mod read_dir; + pub use read_dir::ReadDirStream; +} diff --git a/third_party/rust/tokio-stream/src/wrappers/broadcast.rs b/third_party/rust/tokio-stream/src/wrappers/broadcast.rs new file mode 100644 index 000000000000..c8346a68ec76 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/broadcast.rs @@ -0,0 +1,79 @@ +use std::pin::Pin; +use tokio::sync::broadcast::error::RecvError; +use tokio::sync::broadcast::Receiver; + +use futures_core::Stream; +use tokio_util::sync::ReusableBoxFuture; + +use std::fmt; +use std::task::{Context, Poll}; + +/// A wrapper around [`tokio::sync::broadcast::Receiver`] that implements [`Stream`]. +/// +/// [`tokio::sync::broadcast::Receiver`]: struct@tokio::sync::broadcast::Receiver +/// [`Stream`]: trait@crate::Stream +#[cfg_attr(docsrs, doc(cfg(feature = "sync")))] +pub struct BroadcastStream { + inner: ReusableBoxFuture<(Result, Receiver)>, +} + +/// An error returned from the inner stream of a [`BroadcastStream`]. +#[derive(Debug, PartialEq)] +pub enum BroadcastStreamRecvError { + /// The receiver lagged too far behind. Attempting to receive again will + /// return the oldest message still retained by the channel. + /// + /// Includes the number of skipped messages. + Lagged(u64), +} + +impl fmt::Display for BroadcastStreamRecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BroadcastStreamRecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt), + } + } +} + +impl std::error::Error for BroadcastStreamRecvError {} + +async fn make_future(mut rx: Receiver) -> (Result, Receiver) { + let result = rx.recv().await; + (result, rx) +} + +impl BroadcastStream { + /// Create a new `BroadcastStream`. + pub fn new(rx: Receiver) -> Self { + Self { + inner: ReusableBoxFuture::new(make_future(rx)), + } + } +} + +impl Stream for BroadcastStream { + type Item = Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let (result, rx) = ready!(self.inner.poll(cx)); + self.inner.set(make_future(rx)); + match result { + Ok(item) => Poll::Ready(Some(Ok(item))), + Err(RecvError::Closed) => Poll::Ready(None), + Err(RecvError::Lagged(n)) => { + Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(n)))) + } + } + } +} + +impl fmt::Debug for BroadcastStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BroadcastStream").finish() + } +} + +impl From> for BroadcastStream { + fn from(recv: Receiver) -> Self { + Self::new(recv) + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/interval.rs b/third_party/rust/tokio-stream/src/wrappers/interval.rs new file mode 100644 index 000000000000..2bf0194bd0fa --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/interval.rs @@ -0,0 +1,50 @@ +use crate::Stream; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::time::{Instant, Interval}; + +/// A wrapper around [`Interval`] that implements [`Stream`]. +/// +/// [`Interval`]: struct@tokio::time::Interval +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(feature = "time")))] +pub struct IntervalStream { + inner: Interval, +} + +impl IntervalStream { + /// Create a new `IntervalStream`. + pub fn new(interval: Interval) -> Self { + Self { inner: interval } + } + + /// Get back the inner `Interval`. + pub fn into_inner(self) -> Interval { + self.inner + } +} + +impl Stream for IntervalStream { + type Item = Instant; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_tick(cx).map(Some) + } + + fn size_hint(&self) -> (usize, Option) { + (std::usize::MAX, None) + } +} + +impl AsRef for IntervalStream { + fn as_ref(&self) -> &Interval { + &self.inner + } +} + +impl AsMut for IntervalStream { + fn as_mut(&mut self) -> &mut Interval { + &mut self.inner + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/lines.rs b/third_party/rust/tokio-stream/src/wrappers/lines.rs new file mode 100644 index 000000000000..ad3c25349f63 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/lines.rs @@ -0,0 +1,60 @@ +use crate::Stream; +use pin_project_lite::pin_project; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncBufRead, Lines}; + +pin_project! { + /// A wrapper around [`tokio::io::Lines`] that implements [`Stream`]. + /// + /// [`tokio::io::Lines`]: struct@tokio::io::Lines + /// [`Stream`]: trait@crate::Stream + #[derive(Debug)] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct LinesStream { + #[pin] + inner: Lines, + } +} + +impl LinesStream { + /// Create a new `LinesStream`. + pub fn new(lines: Lines) -> Self { + Self { inner: lines } + } + + /// Get back the inner `Lines`. + pub fn into_inner(self) -> Lines { + self.inner + } + + /// Obtain a pinned reference to the inner `Lines`. + #[allow(clippy::wrong_self_convention)] // https://github.com/rust-lang/rust-clippy/issues/4546 + pub fn as_pin_mut(self: Pin<&mut Self>) -> Pin<&mut Lines> { + self.project().inner + } +} + +impl Stream for LinesStream { + type Item = io::Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project() + .inner + .poll_next_line(cx) + .map(Result::transpose) + } +} + +impl AsRef> for LinesStream { + fn as_ref(&self) -> &Lines { + &self.inner + } +} + +impl AsMut> for LinesStream { + fn as_mut(&mut self) -> &mut Lines { + &mut self.inner + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/mpsc_bounded.rs b/third_party/rust/tokio-stream/src/wrappers/mpsc_bounded.rs new file mode 100644 index 000000000000..b5362680ee98 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/mpsc_bounded.rs @@ -0,0 +1,65 @@ +use crate::Stream; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::sync::mpsc::Receiver; + +/// A wrapper around [`tokio::sync::mpsc::Receiver`] that implements [`Stream`]. +/// +/// [`tokio::sync::mpsc::Receiver`]: struct@tokio::sync::mpsc::Receiver +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +pub struct ReceiverStream { + inner: Receiver, +} + +impl ReceiverStream { + /// Create a new `ReceiverStream`. + pub fn new(recv: Receiver) -> Self { + Self { inner: recv } + } + + /// Get back the inner `Receiver`. + pub fn into_inner(self) -> Receiver { + self.inner + } + + /// Closes the receiving half of a channel without dropping it. + /// + /// This prevents any further messages from being sent on the channel while + /// still enabling the receiver to drain messages that are buffered. Any + /// outstanding [`Permit`] values will still be able to send messages. + /// + /// To guarantee no messages are dropped, after calling `close()`, you must + /// receive all items from the stream until `None` is returned. + /// + /// [`Permit`]: struct@tokio::sync::mpsc::Permit + pub fn close(&mut self) { + self.inner.close() + } +} + +impl Stream for ReceiverStream { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_recv(cx) + } +} + +impl AsRef> for ReceiverStream { + fn as_ref(&self) -> &Receiver { + &self.inner + } +} + +impl AsMut> for ReceiverStream { + fn as_mut(&mut self) -> &mut Receiver { + &mut self.inner + } +} + +impl From> for ReceiverStream { + fn from(recv: Receiver) -> Self { + Self::new(recv) + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/mpsc_unbounded.rs b/third_party/rust/tokio-stream/src/wrappers/mpsc_unbounded.rs new file mode 100644 index 000000000000..54597b7f6fe9 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/mpsc_unbounded.rs @@ -0,0 +1,59 @@ +use crate::Stream; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::sync::mpsc::UnboundedReceiver; + +/// A wrapper around [`tokio::sync::mpsc::UnboundedReceiver`] that implements [`Stream`]. +/// +/// [`tokio::sync::mpsc::UnboundedReceiver`]: struct@tokio::sync::mpsc::UnboundedReceiver +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +pub struct UnboundedReceiverStream { + inner: UnboundedReceiver, +} + +impl UnboundedReceiverStream { + /// Create a new `UnboundedReceiverStream`. + pub fn new(recv: UnboundedReceiver) -> Self { + Self { inner: recv } + } + + /// Get back the inner `UnboundedReceiver`. + pub fn into_inner(self) -> UnboundedReceiver { + self.inner + } + + /// Closes the receiving half of a channel without dropping it. + /// + /// This prevents any further messages from being sent on the channel while + /// still enabling the receiver to drain messages that are buffered. + pub fn close(&mut self) { + self.inner.close() + } +} + +impl Stream for UnboundedReceiverStream { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_recv(cx) + } +} + +impl AsRef> for UnboundedReceiverStream { + fn as_ref(&self) -> &UnboundedReceiver { + &self.inner + } +} + +impl AsMut> for UnboundedReceiverStream { + fn as_mut(&mut self) -> &mut UnboundedReceiver { + &mut self.inner + } +} + +impl From> for UnboundedReceiverStream { + fn from(recv: UnboundedReceiver) -> Self { + Self::new(recv) + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/read_dir.rs b/third_party/rust/tokio-stream/src/wrappers/read_dir.rs new file mode 100644 index 000000000000..b5cf54f79e19 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/read_dir.rs @@ -0,0 +1,47 @@ +use crate::Stream; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::fs::{DirEntry, ReadDir}; + +/// A wrapper around [`tokio::fs::ReadDir`] that implements [`Stream`]. +/// +/// [`tokio::fs::ReadDir`]: struct@tokio::fs::ReadDir +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(feature = "fs")))] +pub struct ReadDirStream { + inner: ReadDir, +} + +impl ReadDirStream { + /// Create a new `ReadDirStream`. + pub fn new(read_dir: ReadDir) -> Self { + Self { inner: read_dir } + } + + /// Get back the inner `ReadDir`. + pub fn into_inner(self) -> ReadDir { + self.inner + } +} + +impl Stream for ReadDirStream { + type Item = io::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_next_entry(cx).map(Result::transpose) + } +} + +impl AsRef for ReadDirStream { + fn as_ref(&self) -> &ReadDir { + &self.inner + } +} + +impl AsMut for ReadDirStream { + fn as_mut(&mut self) -> &mut ReadDir { + &mut self.inner + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/signal_unix.rs b/third_party/rust/tokio-stream/src/wrappers/signal_unix.rs new file mode 100644 index 000000000000..2f74e7d1527f --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/signal_unix.rs @@ -0,0 +1,46 @@ +use crate::Stream; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::signal::unix::Signal; + +/// A wrapper around [`Signal`] that implements [`Stream`]. +/// +/// [`Signal`]: struct@tokio::signal::unix::Signal +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "signal"))))] +pub struct SignalStream { + inner: Signal, +} + +impl SignalStream { + /// Create a new `SignalStream`. + pub fn new(interval: Signal) -> Self { + Self { inner: interval } + } + + /// Get back the inner `Signal`. + pub fn into_inner(self) -> Signal { + self.inner + } +} + +impl Stream for SignalStream { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_recv(cx) + } +} + +impl AsRef for SignalStream { + fn as_ref(&self) -> &Signal { + &self.inner + } +} + +impl AsMut for SignalStream { + fn as_mut(&mut self) -> &mut Signal { + &mut self.inner + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/signal_windows.rs b/third_party/rust/tokio-stream/src/wrappers/signal_windows.rs new file mode 100644 index 000000000000..4631fbad8dc2 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/signal_windows.rs @@ -0,0 +1,88 @@ +use crate::Stream; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::signal::windows::{CtrlBreak, CtrlC}; + +/// A wrapper around [`CtrlC`] that implements [`Stream`]. +/// +/// [`CtrlC`]: struct@tokio::signal::windows::CtrlC +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(all(windows, feature = "signal"))))] +pub struct CtrlCStream { + inner: CtrlC, +} + +impl CtrlCStream { + /// Create a new `CtrlCStream`. + pub fn new(interval: CtrlC) -> Self { + Self { inner: interval } + } + + /// Get back the inner `CtrlC`. + pub fn into_inner(self) -> CtrlC { + self.inner + } +} + +impl Stream for CtrlCStream { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_recv(cx) + } +} + +impl AsRef for CtrlCStream { + fn as_ref(&self) -> &CtrlC { + &self.inner + } +} + +impl AsMut for CtrlCStream { + fn as_mut(&mut self) -> &mut CtrlC { + &mut self.inner + } +} + +/// A wrapper around [`CtrlBreak`] that implements [`Stream`]. +/// +/// [`CtrlBreak`]: struct@tokio::signal::windows::CtrlBreak +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(all(windows, feature = "signal"))))] +pub struct CtrlBreakStream { + inner: CtrlBreak, +} + +impl CtrlBreakStream { + /// Create a new `CtrlBreakStream`. + pub fn new(interval: CtrlBreak) -> Self { + Self { inner: interval } + } + + /// Get back the inner `CtrlBreak`. + pub fn into_inner(self) -> CtrlBreak { + self.inner + } +} + +impl Stream for CtrlBreakStream { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_recv(cx) + } +} + +impl AsRef for CtrlBreakStream { + fn as_ref(&self) -> &CtrlBreak { + &self.inner + } +} + +impl AsMut for CtrlBreakStream { + fn as_mut(&mut self) -> &mut CtrlBreak { + &mut self.inner + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/split.rs b/third_party/rust/tokio-stream/src/wrappers/split.rs new file mode 100644 index 000000000000..5a6bb2d408c5 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/split.rs @@ -0,0 +1,60 @@ +use crate::Stream; +use pin_project_lite::pin_project; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncBufRead, Split}; + +pin_project! { + /// A wrapper around [`tokio::io::Split`] that implements [`Stream`]. + /// + /// [`tokio::io::Split`]: struct@tokio::io::Split + /// [`Stream`]: trait@crate::Stream + #[derive(Debug)] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct SplitStream { + #[pin] + inner: Split, + } +} + +impl SplitStream { + /// Create a new `SplitStream`. + pub fn new(split: Split) -> Self { + Self { inner: split } + } + + /// Get back the inner `Split`. + pub fn into_inner(self) -> Split { + self.inner + } + + /// Obtain a pinned reference to the inner `Split`. + #[allow(clippy::wrong_self_convention)] // https://github.com/rust-lang/rust-clippy/issues/4546 + pub fn as_pin_mut(self: Pin<&mut Self>) -> Pin<&mut Split> { + self.project().inner + } +} + +impl Stream for SplitStream { + type Item = io::Result>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project() + .inner + .poll_next_segment(cx) + .map(Result::transpose) + } +} + +impl AsRef> for SplitStream { + fn as_ref(&self) -> &Split { + &self.inner + } +} + +impl AsMut> for SplitStream { + fn as_mut(&mut self) -> &mut Split { + &mut self.inner + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/tcp_listener.rs b/third_party/rust/tokio-stream/src/wrappers/tcp_listener.rs new file mode 100644 index 000000000000..ce7cb1635071 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/tcp_listener.rs @@ -0,0 +1,54 @@ +use crate::Stream; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::net::{TcpListener, TcpStream}; + +/// A wrapper around [`TcpListener`] that implements [`Stream`]. +/// +/// [`TcpListener`]: struct@tokio::net::TcpListener +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(feature = "net")))] +pub struct TcpListenerStream { + inner: TcpListener, +} + +impl TcpListenerStream { + /// Create a new `TcpListenerStream`. + pub fn new(listener: TcpListener) -> Self { + Self { inner: listener } + } + + /// Get back the inner `TcpListener`. + pub fn into_inner(self) -> TcpListener { + self.inner + } +} + +impl Stream for TcpListenerStream { + type Item = io::Result; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + match self.inner.poll_accept(cx) { + Poll::Ready(Ok((stream, _))) => Poll::Ready(Some(Ok(stream))), + Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), + Poll::Pending => Poll::Pending, + } + } +} + +impl AsRef for TcpListenerStream { + fn as_ref(&self) -> &TcpListener { + &self.inner + } +} + +impl AsMut for TcpListenerStream { + fn as_mut(&mut self) -> &mut TcpListener { + &mut self.inner + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/unix_listener.rs b/third_party/rust/tokio-stream/src/wrappers/unix_listener.rs new file mode 100644 index 000000000000..0beba588c202 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/unix_listener.rs @@ -0,0 +1,54 @@ +use crate::Stream; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::net::{UnixListener, UnixStream}; + +/// A wrapper around [`UnixListener`] that implements [`Stream`]. +/// +/// [`UnixListener`]: struct@tokio::net::UnixListener +/// [`Stream`]: trait@crate::Stream +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "net"))))] +pub struct UnixListenerStream { + inner: UnixListener, +} + +impl UnixListenerStream { + /// Create a new `UnixListenerStream`. + pub fn new(listener: UnixListener) -> Self { + Self { inner: listener } + } + + /// Get back the inner `UnixListener`. + pub fn into_inner(self) -> UnixListener { + self.inner + } +} + +impl Stream for UnixListenerStream { + type Item = io::Result; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + match self.inner.poll_accept(cx) { + Poll::Ready(Ok((stream, _))) => Poll::Ready(Some(Ok(stream))), + Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), + Poll::Pending => Poll::Pending, + } + } +} + +impl AsRef for UnixListenerStream { + fn as_ref(&self) -> &UnixListener { + &self.inner + } +} + +impl AsMut for UnixListenerStream { + fn as_mut(&mut self) -> &mut UnixListener { + &mut self.inner + } +} diff --git a/third_party/rust/tokio-stream/src/wrappers/watch.rs b/third_party/rust/tokio-stream/src/wrappers/watch.rs new file mode 100644 index 000000000000..bd3a18a58314 --- /dev/null +++ b/third_party/rust/tokio-stream/src/wrappers/watch.rs @@ -0,0 +1,102 @@ +use std::pin::Pin; +use tokio::sync::watch::Receiver; + +use futures_core::Stream; +use tokio_util::sync::ReusableBoxFuture; + +use std::fmt; +use std::task::{Context, Poll}; +use tokio::sync::watch::error::RecvError; + +/// A wrapper around [`tokio::sync::watch::Receiver`] that implements [`Stream`]. +/// +/// This stream will always start by yielding the current value when the WatchStream is polled, +/// regardless of whether it was the initial value or sent afterwards. +/// +/// # Examples +/// +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use tokio_stream::{StreamExt, wrappers::WatchStream}; +/// use tokio::sync::watch; +/// +/// let (tx, rx) = watch::channel("hello"); +/// let mut rx = WatchStream::new(rx); +/// +/// assert_eq!(rx.next().await, Some("hello")); +/// +/// tx.send("goodbye").unwrap(); +/// assert_eq!(rx.next().await, Some("goodbye")); +/// # } +/// ``` +/// +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use tokio_stream::{StreamExt, wrappers::WatchStream}; +/// use tokio::sync::watch; +/// +/// let (tx, rx) = watch::channel("hello"); +/// let mut rx = WatchStream::new(rx); +/// +/// tx.send("goodbye").unwrap(); +/// assert_eq!(rx.next().await, Some("goodbye")); +/// # } +/// ``` +/// +/// [`tokio::sync::watch::Receiver`]: struct@tokio::sync::watch::Receiver +/// [`Stream`]: trait@crate::Stream +#[cfg_attr(docsrs, doc(cfg(feature = "sync")))] +pub struct WatchStream { + inner: ReusableBoxFuture<(Result<(), RecvError>, Receiver)>, +} + +async fn make_future( + mut rx: Receiver, +) -> (Result<(), RecvError>, Receiver) { + let result = rx.changed().await; + (result, rx) +} + +impl WatchStream { + /// Create a new `WatchStream`. + pub fn new(rx: Receiver) -> Self { + Self { + inner: ReusableBoxFuture::new(async move { (Ok(()), rx) }), + } + } +} + +impl Stream for WatchStream { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let (result, mut rx) = ready!(self.inner.poll(cx)); + match result { + Ok(_) => { + let received = (*rx.borrow_and_update()).clone(); + self.inner.set(make_future(rx)); + Poll::Ready(Some(received)) + } + Err(_) => { + self.inner.set(make_future(rx)); + Poll::Ready(None) + } + } + } +} + +impl Unpin for WatchStream {} + +impl fmt::Debug for WatchStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WatchStream").finish() + } +} + +impl From> for WatchStream { + fn from(recv: Receiver) -> Self { + Self::new(recv) + } +} diff --git a/third_party/rust/tokio-stream/tests/async_send_sync.rs b/third_party/rust/tokio-stream/tests/async_send_sync.rs new file mode 100644 index 000000000000..f1c8b4efe252 --- /dev/null +++ b/third_party/rust/tokio-stream/tests/async_send_sync.rs @@ -0,0 +1,107 @@ +#![allow(clippy::diverging_sub_expression)] + +use std::rc::Rc; + +#[allow(dead_code)] +type BoxStream = std::pin::Pin>>; + +#[allow(dead_code)] +fn require_send(_t: &T) {} +#[allow(dead_code)] +fn require_sync(_t: &T) {} +#[allow(dead_code)] +fn require_unpin(_t: &T) {} + +#[allow(dead_code)] +struct Invalid; + +trait AmbiguousIfSend { + fn some_item(&self) {} +} +impl AmbiguousIfSend<()> for T {} +impl AmbiguousIfSend for T {} + +trait AmbiguousIfSync { + fn some_item(&self) {} +} +impl AmbiguousIfSync<()> for T {} +impl AmbiguousIfSync for T {} + +trait AmbiguousIfUnpin { + fn some_item(&self) {} +} +impl AmbiguousIfUnpin<()> for T {} +impl AmbiguousIfUnpin for T {} + +macro_rules! into_todo { + ($typ:ty) => {{ + let x: $typ = todo!(); + x + }}; +} + +macro_rules! async_assert_fn { + ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Send & Sync) => { + #[allow(unreachable_code)] + #[allow(unused_variables)] + const _: fn() = || { + let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); + require_send(&f); + require_sync(&f); + }; + }; + ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Send & !Sync) => { + #[allow(unreachable_code)] + #[allow(unused_variables)] + const _: fn() = || { + let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); + require_send(&f); + AmbiguousIfSync::some_item(&f); + }; + }; + ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Send & Sync) => { + #[allow(unreachable_code)] + #[allow(unused_variables)] + const _: fn() = || { + let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); + AmbiguousIfSend::some_item(&f); + require_sync(&f); + }; + }; + ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Send & !Sync) => { + #[allow(unreachable_code)] + #[allow(unused_variables)] + const _: fn() = || { + let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); + AmbiguousIfSend::some_item(&f); + AmbiguousIfSync::some_item(&f); + }; + }; + ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Unpin) => { + #[allow(unreachable_code)] + #[allow(unused_variables)] + const _: fn() = || { + let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); + AmbiguousIfUnpin::some_item(&f); + }; + }; + ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Unpin) => { + #[allow(unreachable_code)] + #[allow(unused_variables)] + const _: fn() = || { + let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* ); + require_unpin(&f); + }; + }; +} + +async_assert_fn!(tokio_stream::empty>(): Send & Sync); +async_assert_fn!(tokio_stream::pending>(): Send & Sync); +async_assert_fn!(tokio_stream::iter(std::vec::IntoIter): Send & Sync); + +async_assert_fn!(tokio_stream::StreamExt::next(&mut BoxStream<()>): !Unpin); +async_assert_fn!(tokio_stream::StreamExt::try_next(&mut BoxStream>): !Unpin); +async_assert_fn!(tokio_stream::StreamExt::all(&mut BoxStream<()>, fn(())->bool): !Unpin); +async_assert_fn!(tokio_stream::StreamExt::any(&mut BoxStream<()>, fn(())->bool): !Unpin); +async_assert_fn!(tokio_stream::StreamExt::fold(&mut BoxStream<()>, (), fn((), ())->()): !Unpin); +async_assert_fn!(tokio_stream::StreamExt::collect>(&mut BoxStream<()>): !Unpin); diff --git a/third_party/rust/tokio-0.2.25/tests/stream_chain.rs b/third_party/rust/tokio-stream/tests/stream_chain.rs similarity index 85% rename from third_party/rust/tokio-0.2.25/tests/stream_chain.rs rename to third_party/rust/tokio-stream/tests/stream_chain.rs index 98461a8ccb36..f3b7edb16a6f 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_chain.rs +++ b/third_party/rust/tokio-stream/tests/stream_chain.rs @@ -1,7 +1,12 @@ -use tokio::stream::{self, Stream, StreamExt}; -use tokio::sync::mpsc; +use tokio_stream::{self as stream, Stream, StreamExt}; use tokio_test::{assert_pending, assert_ready, task}; +mod support { + pub(crate) mod mpsc; +} + +use support::mpsc; + #[tokio::test] async fn basic_usage() { let one = stream::iter(vec![1, 2, 3]); @@ -36,8 +41,8 @@ async fn basic_usage() { #[tokio::test] async fn pending_first() { - let (tx1, rx1) = mpsc::unbounded_channel(); - let (tx2, rx2) = mpsc::unbounded_channel(); + let (tx1, rx1) = mpsc::unbounded_channel_stream(); + let (tx2, rx2) = mpsc::unbounded_channel_stream(); let mut stream = task::spawn(rx1.chain(rx2)); assert_eq!(stream.size_hint(), (0, None)); @@ -74,7 +79,7 @@ async fn pending_first() { fn size_overflow() { struct Monster; - impl tokio::stream::Stream for Monster { + impl tokio_stream::Stream for Monster { type Item = (); fn poll_next( self: std::pin::Pin<&mut Self>, @@ -84,12 +89,12 @@ fn size_overflow() { } fn size_hint(&self) -> (usize, Option) { - (usize::max_value(), Some(usize::max_value())) + (usize::MAX, Some(usize::MAX)) } } let m1 = Monster; let m2 = Monster; let m = m1.chain(m2); - assert_eq!(m.size_hint(), (usize::max_value(), None)); + assert_eq!(m.size_hint(), (usize::MAX, None)); } diff --git a/third_party/rust/tokio-0.2.25/tests/stream_collect.rs b/third_party/rust/tokio-stream/tests/stream_collect.rs similarity index 73% rename from third_party/rust/tokio-0.2.25/tests/stream_collect.rs rename to third_party/rust/tokio-stream/tests/stream_collect.rs index 70051e7f6732..07659a1fc3df 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_collect.rs +++ b/third_party/rust/tokio-stream/tests/stream_collect.rs @@ -1,8 +1,11 @@ -use tokio::stream::{self, StreamExt}; -use tokio::sync::mpsc; +use tokio_stream::{self as stream, StreamExt}; use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok, task}; -use bytes::{Bytes, BytesMut}; +mod support { + pub(crate) mod mpsc; +} + +use support::mpsc; #[allow(clippy::let_unit_value)] #[tokio::test] @@ -25,18 +28,6 @@ async fn empty_box_slice() { assert!(coll.is_empty()); } -#[tokio::test] -async fn empty_bytes() { - let coll: Bytes = stream::empty::<&[u8]>().collect().await; - assert!(coll.is_empty()); -} - -#[tokio::test] -async fn empty_bytes_mut() { - let coll: BytesMut = stream::empty::<&[u8]>().collect().await; - assert!(coll.is_empty()); -} - #[tokio::test] async fn empty_string() { let coll: String = stream::empty::<&str>().collect().await; @@ -51,7 +42,7 @@ async fn empty_result() { #[tokio::test] async fn collect_vec_items() { - let (tx, rx) = mpsc::unbounded_channel(); + let (tx, rx) = mpsc::unbounded_channel_stream(); let mut fut = task::spawn(rx.collect::>()); assert_pending!(fut.poll()); @@ -72,7 +63,8 @@ async fn collect_vec_items() { #[tokio::test] async fn collect_string_items() { - let (tx, rx) = mpsc::unbounded_channel(); + let (tx, rx) = mpsc::unbounded_channel_stream(); + let mut fut = task::spawn(rx.collect::()); assert_pending!(fut.poll()); @@ -93,7 +85,8 @@ async fn collect_string_items() { #[tokio::test] async fn collect_str_items() { - let (tx, rx) = mpsc::unbounded_channel(); + let (tx, rx) = mpsc::unbounded_channel_stream(); + let mut fut = task::spawn(rx.collect::()); assert_pending!(fut.poll()); @@ -112,30 +105,10 @@ async fn collect_str_items() { assert_eq!("hello world", coll); } -#[tokio::test] -async fn collect_bytes() { - let (tx, rx) = mpsc::unbounded_channel(); - let mut fut = task::spawn(rx.collect::()); - - assert_pending!(fut.poll()); - - tx.send(&b"hello "[..]).unwrap(); - assert!(fut.is_woken()); - assert_pending!(fut.poll()); - - tx.send(&b"world"[..]).unwrap(); - assert!(fut.is_woken()); - assert_pending!(fut.poll()); - - drop(tx); - assert!(fut.is_woken()); - let coll = assert_ready!(fut.poll()); - assert_eq!(&b"hello world"[..], coll); -} - #[tokio::test] async fn collect_results_ok() { - let (tx, rx) = mpsc::unbounded_channel(); + let (tx, rx) = mpsc::unbounded_channel_stream(); + let mut fut = task::spawn(rx.collect::>()); assert_pending!(fut.poll()); @@ -156,7 +129,8 @@ async fn collect_results_ok() { #[tokio::test] async fn collect_results_err() { - let (tx, rx) = mpsc::unbounded_channel(); + let (tx, rx) = mpsc::unbounded_channel_stream(); + let mut fut = task::spawn(rx.collect::>()); assert_pending!(fut.poll()); diff --git a/third_party/rust/tokio-0.2.25/tests/stream_empty.rs b/third_party/rust/tokio-stream/tests/stream_empty.rs similarity index 79% rename from third_party/rust/tokio-0.2.25/tests/stream_empty.rs rename to third_party/rust/tokio-stream/tests/stream_empty.rs index f278076d1ae5..c06f5c41c01f 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_empty.rs +++ b/third_party/rust/tokio-stream/tests/stream_empty.rs @@ -1,4 +1,4 @@ -use tokio::stream::{self, Stream, StreamExt}; +use tokio_stream::{self as stream, Stream, StreamExt}; #[tokio::test] async fn basic_usage() { diff --git a/third_party/rust/tokio-0.2.25/tests/stream_fuse.rs b/third_party/rust/tokio-stream/tests/stream_fuse.rs similarity index 96% rename from third_party/rust/tokio-0.2.25/tests/stream_fuse.rs rename to third_party/rust/tokio-stream/tests/stream_fuse.rs index 9d7d969f8ba6..9b6cf054cfdc 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_fuse.rs +++ b/third_party/rust/tokio-stream/tests/stream_fuse.rs @@ -1,4 +1,4 @@ -use tokio::stream::{Stream, StreamExt}; +use tokio_stream::{Stream, StreamExt}; use std::pin::Pin; use std::task::{Context, Poll}; diff --git a/third_party/rust/tokio-0.2.25/tests/stream_iter.rs b/third_party/rust/tokio-stream/tests/stream_iter.rs similarity index 91% rename from third_party/rust/tokio-0.2.25/tests/stream_iter.rs rename to third_party/rust/tokio-stream/tests/stream_iter.rs index 45148a7a8b23..8b9ee3ce5b71 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_iter.rs +++ b/third_party/rust/tokio-stream/tests/stream_iter.rs @@ -1,4 +1,4 @@ -use tokio::stream; +use tokio_stream as stream; use tokio_test::task; use std::iter; diff --git a/third_party/rust/tokio-0.2.25/tests/stream_merge.rs b/third_party/rust/tokio-stream/tests/stream_merge.rs similarity index 81% rename from third_party/rust/tokio-0.2.25/tests/stream_merge.rs rename to third_party/rust/tokio-stream/tests/stream_merge.rs index 45ecdcb6625a..f603bccf8874 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_merge.rs +++ b/third_party/rust/tokio-stream/tests/stream_merge.rs @@ -1,8 +1,13 @@ -use tokio::stream::{self, Stream, StreamExt}; -use tokio::sync::mpsc; +use tokio_stream::{self as stream, Stream, StreamExt}; use tokio_test::task; use tokio_test::{assert_pending, assert_ready}; +mod support { + pub(crate) mod mpsc; +} + +use support::mpsc; + #[tokio::test] async fn merge_sync_streams() { let mut s = stream::iter(vec![0, 2, 4, 6]).merge(stream::iter(vec![1, 3, 5])); @@ -18,8 +23,8 @@ async fn merge_sync_streams() { #[tokio::test] async fn merge_async_streams() { - let (tx1, rx1) = mpsc::unbounded_channel(); - let (tx2, rx2) = mpsc::unbounded_channel(); + let (tx1, rx1) = mpsc::unbounded_channel_stream(); + let (tx2, rx2) = mpsc::unbounded_channel_stream(); let mut rx = task::spawn(rx1.merge(rx2)); @@ -57,7 +62,7 @@ async fn merge_async_streams() { fn size_overflow() { struct Monster; - impl tokio::stream::Stream for Monster { + impl tokio_stream::Stream for Monster { type Item = (); fn poll_next( self: std::pin::Pin<&mut Self>, @@ -67,12 +72,12 @@ fn size_overflow() { } fn size_hint(&self) -> (usize, Option) { - (usize::max_value(), Some(usize::max_value())) + (usize::MAX, Some(usize::MAX)) } } let m1 = Monster; let m2 = Monster; let m = m1.merge(m2); - assert_eq!(m.size_hint(), (usize::max_value(), None)); + assert_eq!(m.size_hint(), (usize::MAX, None)); } diff --git a/third_party/rust/tokio-0.2.25/tests/stream_once.rs b/third_party/rust/tokio-stream/tests/stream_once.rs similarity index 82% rename from third_party/rust/tokio-0.2.25/tests/stream_once.rs rename to third_party/rust/tokio-stream/tests/stream_once.rs index bb4635ac9efe..f32bad3a1208 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_once.rs +++ b/third_party/rust/tokio-stream/tests/stream_once.rs @@ -1,4 +1,4 @@ -use tokio::stream::{self, Stream, StreamExt}; +use tokio_stream::{self as stream, Stream, StreamExt}; #[tokio::test] async fn basic_usage() { diff --git a/third_party/rust/tokio-0.2.25/tests/stream_pending.rs b/third_party/rust/tokio-stream/tests/stream_pending.rs similarity index 85% rename from third_party/rust/tokio-0.2.25/tests/stream_pending.rs rename to third_party/rust/tokio-stream/tests/stream_pending.rs index f4d3080de822..87b5d03bda25 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_pending.rs +++ b/third_party/rust/tokio-stream/tests/stream_pending.rs @@ -1,4 +1,4 @@ -use tokio::stream::{self, Stream, StreamExt}; +use tokio_stream::{self as stream, Stream, StreamExt}; use tokio_test::{assert_pending, task}; #[tokio::test] diff --git a/third_party/rust/tokio-0.2.25/tests/stream_stream_map.rs b/third_party/rust/tokio-stream/tests/stream_stream_map.rs similarity index 90% rename from third_party/rust/tokio-0.2.25/tests/stream_stream_map.rs rename to third_party/rust/tokio-stream/tests/stream_stream_map.rs index ccfae00d2a14..53f3d86c7689 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_stream_map.rs +++ b/third_party/rust/tokio-stream/tests/stream_stream_map.rs @@ -1,9 +1,12 @@ -#![allow(clippy::stable_sort_primitive)] - -use tokio::stream::{self, pending, Stream, StreamExt, StreamMap}; -use tokio::sync::mpsc; +use tokio_stream::{self as stream, pending, Stream, StreamExt, StreamMap}; use tokio_test::{assert_ok, assert_pending, assert_ready, task}; +mod support { + pub(crate) mod mpsc; +} + +use support::mpsc; + use std::pin::Pin; macro_rules! assert_ready_some { @@ -40,7 +43,8 @@ async fn empty() { #[tokio::test] async fn single_entry() { let mut map = task::spawn(StreamMap::new()); - let (tx, rx) = mpsc::unbounded_channel(); + let (tx, rx) = mpsc::unbounded_channel_stream(); + let rx = Box::pin(rx); assert_ready_none!(map.poll_next()); @@ -78,8 +82,11 @@ async fn single_entry() { #[tokio::test] async fn multiple_entries() { let mut map = task::spawn(StreamMap::new()); - let (tx1, rx1) = mpsc::unbounded_channel(); - let (tx2, rx2) = mpsc::unbounded_channel(); + let (tx1, rx1) = mpsc::unbounded_channel_stream(); + let (tx2, rx2) = mpsc::unbounded_channel_stream(); + + let rx1 = Box::pin(rx1); + let rx2 = Box::pin(rx2); map.insert("foo", rx1); map.insert("bar", rx2); @@ -117,7 +124,7 @@ async fn multiple_entries() { assert_pending!(map.poll_next()); - v.sort(); + v.sort_unstable(); assert_eq!(v[0].0, "bar"); assert_eq!(v[0].1, 4); assert_eq!(v[1].0, "foo"); @@ -134,7 +141,9 @@ async fn multiple_entries() { #[tokio::test] async fn insert_remove() { let mut map = task::spawn(StreamMap::new()); - let (tx, rx) = mpsc::unbounded_channel(); + let (tx, rx) = mpsc::unbounded_channel_stream(); + + let rx = Box::pin(rx); assert_ready_none!(map.poll_next()); @@ -162,8 +171,11 @@ async fn insert_remove() { #[tokio::test] async fn replace() { let mut map = task::spawn(StreamMap::new()); - let (tx1, rx1) = mpsc::unbounded_channel(); - let (tx2, rx2) = mpsc::unbounded_channel(); + let (tx1, rx1) = mpsc::unbounded_channel_stream(); + let (tx2, rx2) = mpsc::unbounded_channel_stream(); + + let rx1 = Box::pin(rx1); + let rx2 = Box::pin(rx2); assert!(map.insert("foo", rx1).is_none()); @@ -215,8 +227,7 @@ fn new_capacity_zero() { let map = StreamMap::<&str, stream::Pending<()>>::new(); assert_eq!(0, map.capacity()); - let mut keys = map.keys(); - assert!(keys.next().is_none()); + assert!(map.keys().next().is_none()); } #[test] @@ -224,8 +235,7 @@ fn with_capacity() { let map = StreamMap::<&str, stream::Pending<()>>::with_capacity(10); assert!(10 <= map.capacity()); - let mut keys = map.keys(); - assert!(keys.next().is_none()); + assert!(map.keys().next().is_none()); } #[test] @@ -237,7 +247,7 @@ fn iter_keys() { map.insert("c", pending()); let mut keys = map.keys().collect::>(); - keys.sort(); + keys.sort_unstable(); assert_eq!(&keys[..], &[&"a", &"b", &"c"]); } @@ -252,7 +262,7 @@ fn iter_values() { let mut size_hints = map.values().map(|s| s.size_hint().0).collect::>(); - size_hints.sort(); + size_hints.sort_unstable(); assert_eq!(&size_hints[..], &[1, 2, 3]); } @@ -270,7 +280,7 @@ fn iter_values_mut() { .map(|s: &mut _| s.size_hint().0) .collect::>(); - size_hints.sort(); + size_hints.sort_unstable(); assert_eq!(&size_hints[..], &[1, 2, 3]); } diff --git a/third_party/rust/tokio-0.2.25/tests/stream_timeout.rs b/third_party/rust/tokio-stream/tests/stream_timeout.rs similarity index 88% rename from third_party/rust/tokio-0.2.25/tests/stream_timeout.rs rename to third_party/rust/tokio-stream/tests/stream_timeout.rs index f65c835196c2..5697ace69236 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_timeout.rs +++ b/third_party/rust/tokio-stream/tests/stream_timeout.rs @@ -1,14 +1,14 @@ #![cfg(feature = "full")] -use tokio::stream::{self, StreamExt}; -use tokio::time::{self, delay_for, Duration}; +use tokio::time::{self, sleep, Duration}; +use tokio_stream::{self, StreamExt}; use tokio_test::*; use futures::StreamExt as _; -async fn maybe_delay(idx: i32) -> i32 { +async fn maybe_sleep(idx: i32) -> i32 { if idx % 2 == 0 { - delay_for(ms(200)).await; + sleep(ms(200)).await; } idx } @@ -26,7 +26,7 @@ async fn basic_usage() { // // [Ok(1), Err(Elapsed), Ok(2), Ok(3), Err(Elapsed), Ok(4)] - let stream = stream::iter(1..=4).then(maybe_delay).timeout(ms(100)); + let stream = stream::iter(1..=4).then(maybe_sleep).timeout(ms(100)); let mut stream = task::spawn(stream); // First item completes immediately @@ -68,7 +68,7 @@ async fn basic_usage() { async fn return_elapsed_errors_only_once() { time::pause(); - let stream = stream::iter(1..=3).then(maybe_delay).timeout(ms(50)); + let stream = stream::iter(1..=3).then(maybe_sleep).timeout(ms(50)); let mut stream = task::spawn(stream); // First item completes immediately @@ -78,7 +78,7 @@ async fn return_elapsed_errors_only_once() { // error is returned. assert_pending!(stream.poll_next()); // - time::advance(ms(50)).await; + time::advance(ms(51)).await; let v = assert_ready!(stream.poll_next()); assert!(v.unwrap().is_err()); // timeout! @@ -97,7 +97,7 @@ async fn return_elapsed_errors_only_once() { #[tokio::test] async fn no_timeouts() { let stream = stream::iter(vec![1, 3, 5]) - .then(maybe_delay) + .then(maybe_sleep) .timeout(ms(100)); let mut stream = task::spawn(stream); diff --git a/third_party/rust/tokio-stream/tests/support/mpsc.rs b/third_party/rust/tokio-stream/tests/support/mpsc.rs new file mode 100644 index 000000000000..09dbe04215eb --- /dev/null +++ b/third_party/rust/tokio-stream/tests/support/mpsc.rs @@ -0,0 +1,15 @@ +use async_stream::stream; +use tokio::sync::mpsc::{self, UnboundedSender}; +use tokio_stream::Stream; + +pub fn unbounded_channel_stream() -> (UnboundedSender, impl Stream) { + let (tx, mut rx) = mpsc::unbounded_channel(); + + let stream = stream! { + while let Some(item) = rx.recv().await { + yield item; + } + }; + + (tx, stream) +} diff --git a/third_party/rust/tokio-0.2.25/tests/time_throttle.rs b/third_party/rust/tokio-stream/tests/time_throttle.rs similarity index 74% rename from third_party/rust/tokio-0.2.25/tests/time_throttle.rs rename to third_party/rust/tokio-stream/tests/time_throttle.rs index 7102d17343e6..42a643bfa5d6 100644 --- a/third_party/rust/tokio-0.2.25/tests/time_throttle.rs +++ b/third_party/rust/tokio-stream/tests/time_throttle.rs @@ -1,7 +1,8 @@ #![warn(rust_2018_idioms)] #![cfg(feature = "full")] -use tokio::time::{self, throttle}; +use tokio::time; +use tokio_stream::StreamExt; use tokio_test::*; use std::time::Duration; @@ -10,10 +11,7 @@ use std::time::Duration; async fn usage() { time::pause(); - let mut stream = task::spawn(throttle( - Duration::from_millis(100), - futures::stream::repeat(()), - )); + let mut stream = task::spawn(futures::stream::repeat(()).throttle(Duration::from_millis(100))); assert_ready!(stream.poll_next()); assert_pending!(stream.poll_next()); diff --git a/third_party/rust/tokio-stream/tests/watch.rs b/third_party/rust/tokio-stream/tests/watch.rs new file mode 100644 index 000000000000..a56254edefd3 --- /dev/null +++ b/third_party/rust/tokio-stream/tests/watch.rs @@ -0,0 +1,29 @@ +#![cfg(feature = "sync")] + +use tokio::sync::watch; +use tokio_stream::wrappers::WatchStream; +use tokio_stream::StreamExt; + +#[tokio::test] +async fn message_not_twice() { + let (tx, rx) = watch::channel("hello"); + + let mut counter = 0; + let mut stream = WatchStream::new(rx).map(move |payload| { + println!("{}", payload); + if payload == "goodbye" { + counter += 1; + } + if counter >= 2 { + panic!("too many goodbyes"); + } + }); + + let task = tokio::spawn(async move { while stream.next().await.is_some() {} }); + + // Send goodbye just once + tx.send("goodbye").unwrap(); + + drop(tx); + task.await.unwrap(); +} diff --git a/third_party/rust/tokio-util/.cargo-checksum.json b/third_party/rust/tokio-util/.cargo-checksum.json index 81d5abd4a6e8..d6d315e3726e 100644 --- a/third_party/rust/tokio-util/.cargo-checksum.json +++ b/third_party/rust/tokio-util/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"d39499c8607262a6112cf883e057780d74435427094588428b33e06d6de49976","Cargo.toml":"7dc2883683e9235e2a8663b2229f29496a5730e22c465be4a277a31e5b0cfbc4","LICENSE":"898b1ae9821e98daf8964c8d6c7f61641f5f5aa78ad500020771c0939ee0dea1","README.md":"e895cbba8345655607ebd830f0101e66d2e6e9287ad3b9ddf697cca230738053","src/cfg.rs":"841d4ffefda4dcfc58bb446a2e5f1979624197144d9b5270a0d87d83bba339f0","src/codec/bytes_codec.rs":"8f72654774efa35ad05f9a6cb442968e29da917610e9829e03b07c6be929c241","src/codec/decoder.rs":"d3dda76d433634afe069096fb7441c4b2a7e7e397d6b539405e57c0091509c51","src/codec/encoder.rs":"e4544af47cdde075d1238ddee9555037912089bf25ce51cb4dd6f5d4f76ecf70","src/codec/framed.rs":"b2658ec109350bede71c921f677837a02e15a6bbe454912377346ca6c26c065d","src/codec/framed_read.rs":"bb8b22d0161a8c12cf2912293d2e7ebef46924f200e99860a4c7419a3cb164b7","src/codec/framed_write.rs":"43710103c96166b47c9f92646e2b0f60ad082aa932d08fd888f39daff78091b0","src/codec/length_delimited.rs":"d7e89abede31908c88b6abaa1cb5f22d0666ce7ba9613a35aed22b0a029e9874","src/codec/lines_codec.rs":"3e5b8bff27094d0084f76b477db8dd3e358c3b5491248c93ac2bfe6ec3d77671","src/codec/mod.rs":"774f4fe3dd1426b2cec676068919e9919225bd483c1a4ec6b2fe5a279698c9be","src/compat.rs":"0ec982a329d043228ac737fede8a6760584a90788b9eef89f6a6532122f1bb05","src/lib.rs":"cc5dd262c58c5f93d7e68a18bfaa25c410510d2a0f6f9f7f3a53cd8fbc320293","src/udp/frame.rs":"2245be9d4199df8731ba699fc011408c1ff3638b89fd23df5c0a80292026b435","src/udp/mod.rs":"699abcf4e12d8180a9e3f2b986edee6e2989ca995ec07c8be1d62d2c263c4a1e","tests/codecs.rs":"6b3ed6b6324a6603644d616bfb8c0719ccc8c3adc30dd31064dd83b0e2385a69","tests/framed.rs":"825c69bf7d5786181e49207a22855aabbb9d61369e9edc11be1dcf1763ec8a8c","tests/framed_read.rs":"426294cf1673156e7faaf58cb3b84a65f57d99ebf90852c3d0423a2e8d635505","tests/framed_write.rs":"fd84338701bcf4b54414c46112910d0193d2db73f794c28f663b4460fe2a3a0b","tests/length_delimited.rs":"3bad5a560b7bf081ebc03b34b190db58035b30138ba2f7c5cc64ae8fd2507700","tests/udp.rs":"d903ab35440ee6e8a10b31cb84937c9454a9ad3c582cb332e909788a15c00c0e"},"package":"be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499"} \ No newline at end of file +{"files":{"CHANGELOG.md":"06d030881733c323d8c9cc92a0b8241947aaeaa979e2ea7ec1a42c91394c83c5","Cargo.toml":"1c8557b55277e84909c7e0ed2fcadb1a3c8590ea342ec1ed605aa4b9e7d0ca3e","LICENSE":"697fc7385b1b0593f77d00db6e3ae8c146c2ccef505c4f09327bbedf952bfe35","README.md":"91c8da557ba5fbfb5a9b6e58d5efd5fd1700dd836509cf017628155c249e192c","src/cfg.rs":"800248e35ac58cbff4327959990e83783cf0e6dd82fec4ccf3fd55038a92115c","src/codec/any_delimiter_codec.rs":"66a4c3aee5328ef8a8be20a58d6ce388bda2394bc70e4800cf52e95760a22e09","src/codec/bytes_codec.rs":"e8f14a93415768f5a8736cbcc0a1684742e3be6df1a6b60cb95bd146544eee74","src/codec/decoder.rs":"c3f6c5197f80412684c15f905fd5172e0ffe3b8bad9589e8bfa16fe5f1a92f81","src/codec/encoder.rs":"e4544af47cdde075d1238ddee9555037912089bf25ce51cb4dd6f5d4f76ecf70","src/codec/framed.rs":"3137b9f2480429d00ee6fb2b567da78f41bb0dcba775e14956c0d93409cbbe38","src/codec/framed_impl.rs":"1bdbbebd724d2734c887cb1c90c78061ea212e9d2c0a9e91c390f81381395a03","src/codec/framed_read.rs":"367cc5517513c8fe256d8175b0497402f8b18dbf596aa19e6aa0c714772f0619","src/codec/framed_write.rs":"ca6a714e94c9778a7f01624299d2a58fa310dfe486c19e056d5d12c62d6be4fb","src/codec/length_delimited.rs":"4d2a62dfa2c9cf1a7242c5b04283cb3e88817844fee78c491e5aa76e5bf279da","src/codec/lines_codec.rs":"912302c500ea224e9002936e50505a5a29911e13d8c615b74954553f22d59826","src/codec/mod.rs":"95a11e3018bb850ff5934ef48cbc5056bdf2758d130cfa357ad406f0531946ae","src/compat.rs":"719b3a4ee8534647ae72df2d1a7b4937c60a9ee41e018fa7305dc6d5b3b41ed6","src/context.rs":"45a23756c6ce6b834da0f1817f556cc5bdd16412ddfc1dc9082da8a56819741a","src/either.rs":"25e022d51a44490e175b525d4493dd9e6fa51bd03aa27b763be9509eb7c4c0ee","src/io/mod.rs":"e2bf2cc05d6b57fa3cafcf95f5eb73996edd090fc012a99a1c4ad915276b80c2","src/io/read_buf.rs":"7043c2fbec74e548395eb2f12073c41c1b30e2f2c283b30eddfb5a16125387d0","src/io/reader_stream.rs":"98d0819ef38f94d56d7085a82d29fd83bde92a9178bebfe73c4533d0022b3d94","src/io/stream_reader.rs":"f36f95178b61f8498929dfc53416558037667d913d1765082e77a0b45460ea77","src/io/sync_bridge.rs":"06eed8295906e1a746e071433c185424d96f75e988a0c4dcb70e3efc7cc50513","src/lib.rs":"769afc23670c71d441233a5c0dffe12799518fcfffbdf2073ba798d0e3e4a104","src/loom.rs":"9028ba504687ad5ec98f3c69387bc2424ec23324b1eed3df5082a9cf23c6502e","src/net/mod.rs":"a75736c1d71408b4f5fb0f0bdcb535cc113e430a2479e01e5fca733ef3fcb15b","src/net/unix/mod.rs":"d667dbb53d7003a15a4705ca0654b35be7165b56ac0d631a23e90d18027a1a90","src/sync/cancellation_token.rs":"8bc59e142a2b3576ccdfb248957c627b28cd0de5d2aa20fccf74d1cdd163fe13","src/sync/cancellation_token/guard.rs":"6582ba676c4126b4fa292221f94ddcb5fd6d3e0e2a794819fa424a8f661bff8a","src/sync/cancellation_token/tree_node.rs":"4b46e5c3247387abdc421dc58c0bcc31166ef873a4847933fa354cb78eac58cc","src/sync/mod.rs":"385fbc1c98c330644adb76a3333b6a5c1f644a00ab5735d84293bd2ef878c18e","src/sync/mpsc.rs":"b0c6af8395ae5779c31ea08a307aa37c4138953af1e69d6b6f94efa485eb1da1","src/sync/poll_semaphore.rs":"817b520a5bb3b84bff6008a06ef0f6d5d256574a3ca9bf89d01a609d669790c3","src/sync/reusable_box.rs":"9b486884a036e9af3683945523714ce93db5d309454fd3ff198ccc357c8ef0c4","src/sync/tests/loom_cancellation_token.rs":"6393c5a12f09abef9300be4b66bb039bf02a63a04d6175fb7cfe68464784bdbd","src/sync/tests/mod.rs":"01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b","src/task/mod.rs":"f61140aaff3e34b7005b0e88846e9af4440ec2e37045c0323d93e7d954f747b4","src/task/spawn_pinned.rs":"87519f35a28142decdbbe8d7598da60250359de3622e99eac91f6d5879609119","src/time/delay_queue.rs":"ab93368c84aaa5ae3d52e1a3939a0aa70e2d5ce9406f4e888666fddb7e119c7d","src/time/mod.rs":"a76126419f30539dcceb72477251de79b38fe38278ef08b0743f1f716294fd9d","src/time/wheel/level.rs":"75f6b29212e0a58aa3196c78688cc9e23272d8eae3fc829d8b381ab49de2372b","src/time/wheel/mod.rs":"4d97f3d7130553adabb1866831962e2665fabc323b3781693bfc803387ef25a4","src/time/wheel/stack.rs":"648d3c071e9754a820343c53de0cbe9c07e47276bb04155e67873c276ed13a61","src/udp/frame.rs":"5afa77955b497c0e2812705f8cd9517b5439847c1381d2e3939eab28c489a578","src/udp/mod.rs":"869302c0c15eb41f7079ef12ce03fa7c889a83152282ab0b7faf73d1c092ed4e","tests/_require_full.rs":"f8dedb6ad88884209b074ff6f5faa4979528b32653b45ab8047d2ebb28e19122","tests/codecs.rs":"493df228f9dce98de69e0afa7be491d6fe4588e7a381a7910c28f248d2d038a6","tests/context.rs":"917f80db694b54db07e6d1660aa5210272efda71cc0203f062dfecd81a8289a7","tests/framed.rs":"4e808fbc8d601138ff787b3603a377c23b3f42b4a7b882f9a1eb8cf1234c89e3","tests/framed_read.rs":"df41071388645518cc6b0700b75dd678219214b04de42605a0122f007f4ed281","tests/framed_stream.rs":"c3118fc5db62f225ad6d97f8b32ac03812b3b68cdab7a94d189f4a4d9337f13f","tests/framed_write.rs":"1b311ae6d79616e41f20b6213f8585a9c65830578eddea2a012ba72a3a359611","tests/io_reader_stream.rs":"1c9f79782c5574c5e489e86205bdb63b332fc0e8560fc4c2d602dfc2d2741a5e","tests/io_stream_reader.rs":"d86e225eafbd196be3124147b1275bed384459e7ec3e8cb21775906253f75086","tests/io_sync_bridge.rs":"7852a934bbe497822423c8a75c4c0f3cd651f2e3d11fc7c35a01f723752b7e6c","tests/length_delimited.rs":"6bb4714c29b8b76ccdaddb59b1ea51f73499e8e223e8b08a62107a9190af4ba5","tests/mpsc.rs":"4f4c4edaaa295cb61d05900408a463345b790aec9b228260ffab41d90fecc4b5","tests/poll_semaphore.rs":"a04ffcf40cd0b65d8809ad4e881b579c20b3ef7de49d8c4094fe455fc3d1887d","tests/reusable_box.rs":"f12e98533443fd6c53ea586185c7e349a95c595bfd00930d764510592a5274cd","tests/spawn_pinned.rs":"f92e8a700c71074a29649036d17034b810da3c181b3afef8c33de04152fe2a12","tests/sync_cancellation_token.rs":"71c3f431384fc4313213f30893d44ec38582f712c855a5c9cd385d01f3e21c2c","tests/time_delay_queue.rs":"b522aff22601513cbdfe848802a8959ac3f27d78823fa430c3a9e6c6560024dd","tests/udp.rs":"c2f8d90eeae9d3b7f107c12f3723d54ba591ec9e879893e195ec13ecfcb4db27"},"package":"f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c"} \ No newline at end of file diff --git a/third_party/rust/tokio-util/CHANGELOG.md b/third_party/rust/tokio-util/CHANGELOG.md index 9847064a76e1..d200cb380f0f 100644 --- a/third_party/rust/tokio-util/CHANGELOG.md +++ b/third_party/rust/tokio-util/CHANGELOG.md @@ -1,26 +1,247 @@ +# 0.7.2 (May 14, 2022) + +This release contains a rewrite of `CancellationToken` that fixes a memory leak. ([#4652]) + +[#4652]: https://github.com/tokio-rs/tokio/pull/4652 + +# 0.7.1 (February 21, 2022) + +### Added + +- codec: add `length_field_type` to `LengthDelimitedCodec` builder ([#4508]) +- io: add `StreamReader::into_inner_with_chunk()` ([#4559]) + +### Changed + +- switch from log to tracing ([#4539]) + +### Fixed + +- sync: fix waker update condition in `CancellationToken` ([#4497]) +- bumped tokio dependency to 1.6 to satisfy minimum requirements ([#4490]) + +[#4490]: https://github.com/tokio-rs/tokio/pull/4490 +[#4497]: https://github.com/tokio-rs/tokio/pull/4497 +[#4508]: https://github.com/tokio-rs/tokio/pull/4508 +[#4539]: https://github.com/tokio-rs/tokio/pull/4539 +[#4559]: https://github.com/tokio-rs/tokio/pull/4559 + +# 0.7.0 (February 9, 2022) + +### Added + +- task: add `spawn_pinned` ([#3370]) +- time: add `shrink_to_fit` and `compact` methods to `DelayQueue` ([#4170]) +- codec: improve `Builder::max_frame_length` docs ([#4352]) +- codec: add mutable reference getters for codecs to pinned `Framed` ([#4372]) +- net: add generic trait to combine `UnixListener` and `TcpListener` ([#4385]) +- codec: implement `Framed::map_codec` ([#4427]) +- codec: implement `Encoder` for `BytesCodec` ([#4465]) + +### Changed + +- sync: add lifetime parameter to `ReusableBoxFuture` ([#3762]) +- sync: refactored `PollSender` to fix a subtly broken `Sink` implementation ([#4214]) +- time: remove error case from the infallible `DelayQueue::poll_elapsed` ([#4241]) + +[#3370]: https://github.com/tokio-rs/tokio/pull/3370 +[#4170]: https://github.com/tokio-rs/tokio/pull/4170 +[#4352]: https://github.com/tokio-rs/tokio/pull/4352 +[#4372]: https://github.com/tokio-rs/tokio/pull/4372 +[#4385]: https://github.com/tokio-rs/tokio/pull/4385 +[#4427]: https://github.com/tokio-rs/tokio/pull/4427 +[#4465]: https://github.com/tokio-rs/tokio/pull/4465 +[#3762]: https://github.com/tokio-rs/tokio/pull/3762 +[#4214]: https://github.com/tokio-rs/tokio/pull/4214 +[#4241]: https://github.com/tokio-rs/tokio/pull/4241 + +# 0.6.10 (May 14, 2021) + +This is a backport for the memory leak in `CancellationToken` that was originally fixed in 0.7.2. ([#4652]) + +[#4652]: https://github.com/tokio-rs/tokio/pull/4652 + +# 0.6.9 (October 29, 2021) + +### Added + +- codec: implement `Clone` for `LengthDelimitedCodec` ([#4089]) +- io: add `SyncIoBridge` ([#4146]) + +### Fixed + +- time: update deadline on removal in `DelayQueue` ([#4178]) +- codec: Update stream impl for Framed to return None after Err ([#4166]) + +[#4089]: https://github.com/tokio-rs/tokio/pull/4089 +[#4146]: https://github.com/tokio-rs/tokio/pull/4146 +[#4166]: https://github.com/tokio-rs/tokio/pull/4166 +[#4178]: https://github.com/tokio-rs/tokio/pull/4178 + +# 0.6.8 (September 3, 2021) + +### Added + +- sync: add drop guard for `CancellationToken` ([#3839]) +- compact: added `AsyncSeek` compat ([#4078]) +- time: expose `Key` used in `DelayQueue`'s `Expired` ([#4081]) +- io: add `with_capacity` to `ReaderStream` ([#4086]) + +### Fixed + +- codec: remove unnecessary `doc(cfg(...))` ([#3989]) + +[#3839]: https://github.com/tokio-rs/tokio/pull/3839 +[#4078]: https://github.com/tokio-rs/tokio/pull/4078 +[#4081]: https://github.com/tokio-rs/tokio/pull/4081 +[#4086]: https://github.com/tokio-rs/tokio/pull/4086 +[#3989]: https://github.com/tokio-rs/tokio/pull/3989 + +# 0.6.7 (May 14, 2021) + +### Added + +- udp: make `UdpFramed` take `Borrow` ([#3451]) +- compat: implement `AsRawFd`/`AsRawHandle` for `Compat` ([#3765]) + +[#3451]: https://github.com/tokio-rs/tokio/pull/3451 +[#3765]: https://github.com/tokio-rs/tokio/pull/3765 + +# 0.6.6 (April 12, 2021) + +### Added + +- util: makes `Framed` and `FramedStream` resumable after eof ([#3272]) +- util: add `PollSemaphore::{add_permits, available_permits}` ([#3683]) + +### Fixed + +- chore: avoid allocation if `PollSemaphore` is unused ([#3634]) + +[#3272]: https://github.com/tokio-rs/tokio/pull/3272 +[#3634]: https://github.com/tokio-rs/tokio/pull/3634 +[#3683]: https://github.com/tokio-rs/tokio/pull/3683 + +# 0.6.5 (March 20, 2021) + +### Fixed + +- util: annotate time module as requiring `time` feature ([#3606]) + +[#3606]: https://github.com/tokio-rs/tokio/pull/3606 + +# 0.6.4 (March 9, 2021) + +### Added + +- codec: `AnyDelimiter` codec ([#3406]) +- sync: add pollable `mpsc::Sender` ([#3490]) + +### Fixed + +- codec: `LinesCodec` should only return `MaxLineLengthExceeded` once per line ([#3556]) +- sync: fuse PollSemaphore ([#3578]) + +[#3406]: https://github.com/tokio-rs/tokio/pull/3406 +[#3490]: https://github.com/tokio-rs/tokio/pull/3490 +[#3556]: https://github.com/tokio-rs/tokio/pull/3556 +[#3578]: https://github.com/tokio-rs/tokio/pull/3578 + +# 0.6.3 (January 31, 2021) + +### Added + +- sync: add `ReusableBoxFuture` utility ([#3464]) + +### Changed + +- sync: use `ReusableBoxFuture` for `PollSemaphore` ([#3463]) +- deps: remove `async-stream` dependency ([#3463]) +- deps: remove `tokio-stream` dependency ([#3487]) + +# 0.6.2 (January 21, 2021) + +### Added + +- sync: add pollable `Semaphore` ([#3444]) + +### Fixed + +- time: fix panics on updating `DelayQueue` entries ([#3270]) + +# 0.6.1 (January 12, 2021) + +### Added + +- codec: `get_ref()`, `get_mut()`, `get_pin_mut()` and `into_inner()` for + `Framed`, `FramedRead`, `FramedWrite` and `StreamReader` ([#3364]). +- codec: `write_buffer()` and `write_buffer_mut()` for `Framed` and + `FramedWrite` ([#3387]). + +# 0.6.0 (December 23, 2020) + +### Changed +- depend on `tokio` 1.0. + +### Added +- rt: add constructors to `TokioContext` (#3221). + +# 0.5.1 (December 3, 2020) + +### Added +- io: `poll_read_buf` util fn (#2972). +- io: `poll_write_buf` util fn with vectored write support (#3156). + +# 0.5.0 (October 30, 2020) + +### Changed +- io: update `bytes` to 0.6 (#3071). + +# 0.4.0 (October 15, 2020) + +### Added +- sync: `CancellationToken` for coordinating task cancellation (#2747). +- rt: `TokioContext` sets the Tokio runtime for the duration of a future (#2791) +- io: `StreamReader`/`ReaderStream` map between `AsyncRead` values and `Stream` + of bytes (#2788). +- time: `DelayQueue` to manage many delays (#2897). + # 0.3.1 (March 18, 2020) ### Fixed - Adjust minimum-supported Tokio version to v0.2.5 to account for an internal - dependency on features in that version of Tokio. (#2326) + dependency on features in that version of Tokio. ([#2326]) # 0.3.0 (March 4, 2020) ### Changed - **Breaking Change**: Change `Encoder` trait to take a generic `Item` parameter, which allows - codec writers to pass references into `Framed` and `FramedWrite` types. (#1746) + codec writers to pass references into `Framed` and `FramedWrite` types. ([#1746]) ### Added -- Add futures-io/tokio::io compatibility layer. (#2117) -- Add `Framed::with_capacity`. (#2215) +- Add futures-io/tokio::io compatibility layer. ([#2117]) +- Add `Framed::with_capacity`. ([#2215]) ### Fixed -- Use advance over split_to when data is not needed. (#2198) +- Use advance over split_to when data is not needed. ([#2198]) # 0.2.0 (November 26, 2019) - Initial release + +[#3487]: https://github.com/tokio-rs/tokio/pull/3487 +[#3464]: https://github.com/tokio-rs/tokio/pull/3464 +[#3463]: https://github.com/tokio-rs/tokio/pull/3463 +[#3444]: https://github.com/tokio-rs/tokio/pull/3444 +[#3387]: https://github.com/tokio-rs/tokio/pull/3387 +[#3364]: https://github.com/tokio-rs/tokio/pull/3364 +[#3270]: https://github.com/tokio-rs/tokio/pull/3270 +[#2326]: https://github.com/tokio-rs/tokio/pull/2326 +[#2215]: https://github.com/tokio-rs/tokio/pull/2215 +[#2198]: https://github.com/tokio-rs/tokio/pull/2198 +[#2117]: https://github.com/tokio-rs/tokio/pull/2117 +[#1746]: https://github.com/tokio-rs/tokio/pull/1746 diff --git a/third_party/rust/tokio-util/Cargo.toml b/third_party/rust/tokio-util/Cargo.toml index 73289f1c9cd5..001b0b78b19c 100644 --- a/third_party/rust/tokio-util/Cargo.toml +++ b/third_party/rust/tokio-util/Cargo.toml @@ -3,29 +3,35 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] edition = "2018" +rust-version = "1.49" name = "tokio-util" -version = "0.3.1" +version = "0.7.2" authors = ["Tokio Contributors "] -description = "Additional utilities for working with Tokio.\n" +description = """ +Additional utilities for working with Tokio. +""" homepage = "https://tokio.rs" -documentation = "https://docs.rs/tokio-util/0.3.1/tokio_util" categories = ["asynchronous"] license = "MIT" repository = "https://github.com/tokio-rs/tokio" + [package.metadata.docs.rs] all-features = true -rustdoc-args = ["--cfg", "docsrs"] +rustdoc-args = [ + "--cfg", + "docsrs", +] + [dependencies.bytes] -version = "0.5.0" +version = "1.0.0" [dependencies.futures-core] version = "0.3.0" @@ -37,27 +43,70 @@ optional = true [dependencies.futures-sink] version = "0.3.0" -[dependencies.log] -version = "0.4" +[dependencies.futures-util] +version = "0.3.0" +optional = true [dependencies.pin-project-lite] -version = "0.1.4" +version = "0.2.0" + +[dependencies.slab] +version = "0.4.4" +optional = true [dependencies.tokio] -version = "0.2.5" +version = "1.7.0" +features = ["sync"] + +[dependencies.tracing] +version = "0.1.25" +optional = true + +[dev-dependencies.async-stream] +version = "0.3.0" + [dev-dependencies.futures] version = "0.3.0" +[dev-dependencies.futures-test] +version = "0.3.5" + [dev-dependencies.tokio] -version = "0.2.0" +version = "1.0.0" features = ["full"] +[dev-dependencies.tokio-stream] +version = "0.1" + [dev-dependencies.tokio-test] -version = "0.2.0" +version = "0.4.0" [features] -codec = ["tokio/stream"] +__docs_rs = ["futures-util"] +codec = ["tracing"] compat = ["futures-io"] default = [] -full = ["codec", "udp", "compat"] -udp = ["tokio/udp"] +full = [ + "codec", + "compat", + "io-util", + "time", + "net", + "rt", +] +io = [] +io-util = [ + "io", + "tokio/rt", + "tokio/io-util", +] +net = ["tokio/net"] +rt = [ + "tokio/rt", + "tokio/sync", + "futures-util", +] +time = [ + "tokio/time", + "slab", +] diff --git a/third_party/rust/tokio-util/LICENSE b/third_party/rust/tokio-util/LICENSE index cdb28b4b56a4..8af5baf01ea6 100644 --- a/third_party/rust/tokio-util/LICENSE +++ b/third_party/rust/tokio-util/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 Tokio Contributors +Copyright (c) 2022 Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/third_party/rust/tokio-util/README.md b/third_party/rust/tokio-util/README.md index 11b2b1841a7e..0d74f36d9a35 100644 --- a/third_party/rust/tokio-util/README.md +++ b/third_party/rust/tokio-util/README.md @@ -1,6 +1,6 @@ # tokio-util -Utilities for encoding and decoding frames. +Utilities for working with Tokio. ## License diff --git a/third_party/rust/tokio-util/src/cfg.rs b/third_party/rust/tokio-util/src/cfg.rs index 27e8c66a4333..4035255aff05 100644 --- a/third_party/rust/tokio-util/src/cfg.rs +++ b/third_party/rust/tokio-util/src/cfg.rs @@ -18,11 +18,53 @@ macro_rules! cfg_compat { } } -macro_rules! cfg_udp { +macro_rules! cfg_net { ($($item:item)*) => { $( - #[cfg(all(feature = "udp", feature = "codec"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "udp", feature = "codec"))))] + #[cfg(all(feature = "net", feature = "codec"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "net", feature = "codec"))))] + $item + )* + } +} + +macro_rules! cfg_io { + ($($item:item)*) => { + $( + #[cfg(feature = "io")] + #[cfg_attr(docsrs, doc(cfg(feature = "io")))] + $item + )* + } +} + +cfg_io! { + macro_rules! cfg_io_util { + ($($item:item)*) => { + $( + #[cfg(feature = "io-util")] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + $item + )* + } + } +} + +macro_rules! cfg_rt { + ($($item:item)*) => { + $( + #[cfg(feature = "rt")] + #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] + $item + )* + } +} + +macro_rules! cfg_time { + ($($item:item)*) => { + $( + #[cfg(feature = "time")] + #[cfg_attr(docsrs, doc(cfg(feature = "time")))] $item )* } diff --git a/third_party/rust/tokio-util/src/codec/any_delimiter_codec.rs b/third_party/rust/tokio-util/src/codec/any_delimiter_codec.rs new file mode 100644 index 000000000000..3dbfd456b0aa --- /dev/null +++ b/third_party/rust/tokio-util/src/codec/any_delimiter_codec.rs @@ -0,0 +1,263 @@ +use crate::codec::decoder::Decoder; +use crate::codec::encoder::Encoder; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use std::{cmp, fmt, io, str, usize}; + +const DEFAULT_SEEK_DELIMITERS: &[u8] = b",;\n\r"; +const DEFAULT_SEQUENCE_WRITER: &[u8] = b","; +/// A simple [`Decoder`] and [`Encoder`] implementation that splits up data into chunks based on any character in the given delimiter string. +/// +/// [`Decoder`]: crate::codec::Decoder +/// [`Encoder`]: crate::codec::Encoder +/// +/// # Example +/// Decode string of bytes containing various different delimiters. +/// +/// [`BytesMut`]: bytes::BytesMut +/// [`Error`]: std::io::Error +/// +/// ``` +/// use tokio_util::codec::{AnyDelimiterCodec, Decoder}; +/// use bytes::{BufMut, BytesMut}; +/// +/// # +/// # #[tokio::main(flavor = "current_thread")] +/// # async fn main() -> Result<(), std::io::Error> { +/// let mut codec = AnyDelimiterCodec::new(b",;\r\n".to_vec(),b";".to_vec()); +/// let buf = &mut BytesMut::new(); +/// buf.reserve(200); +/// buf.put_slice(b"chunk 1,chunk 2;chunk 3\n\r"); +/// assert_eq!("chunk 1", codec.decode(buf).unwrap().unwrap()); +/// assert_eq!("chunk 2", codec.decode(buf).unwrap().unwrap()); +/// assert_eq!("chunk 3", codec.decode(buf).unwrap().unwrap()); +/// assert_eq!("", codec.decode(buf).unwrap().unwrap()); +/// assert_eq!(None, codec.decode(buf).unwrap()); +/// # Ok(()) +/// # } +/// ``` +/// +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct AnyDelimiterCodec { + // Stored index of the next index to examine for the delimiter character. + // This is used to optimize searching. + // For example, if `decode` was called with `abc` and the delimiter is '{}', it would hold `3`, + // because that is the next index to examine. + // The next time `decode` is called with `abcde}`, the method will + // only look at `de}` before returning. + next_index: usize, + + /// The maximum length for a given chunk. If `usize::MAX`, chunks will be + /// read until a delimiter character is reached. + max_length: usize, + + /// Are we currently discarding the remainder of a chunk which was over + /// the length limit? + is_discarding: bool, + + /// The bytes that are using for search during decode + seek_delimiters: Vec, + + /// The bytes that are using for encoding + sequence_writer: Vec, +} + +impl AnyDelimiterCodec { + /// Returns a `AnyDelimiterCodec` for splitting up data into chunks. + /// + /// # Note + /// + /// The returned `AnyDelimiterCodec` will not have an upper bound on the length + /// of a buffered chunk. See the documentation for [`new_with_max_length`] + /// for information on why this could be a potential security risk. + /// + /// [`new_with_max_length`]: crate::codec::AnyDelimiterCodec::new_with_max_length() + pub fn new(seek_delimiters: Vec, sequence_writer: Vec) -> AnyDelimiterCodec { + AnyDelimiterCodec { + next_index: 0, + max_length: usize::MAX, + is_discarding: false, + seek_delimiters, + sequence_writer, + } + } + + /// Returns a `AnyDelimiterCodec` with a maximum chunk length limit. + /// + /// If this is set, calls to `AnyDelimiterCodec::decode` will return a + /// [`AnyDelimiterCodecError`] when a chunk exceeds the length limit. Subsequent calls + /// will discard up to `limit` bytes from that chunk until a delimiter + /// character is reached, returning `None` until the delimiter over the limit + /// has been fully discarded. After that point, calls to `decode` will + /// function as normal. + /// + /// # Note + /// + /// Setting a length limit is highly recommended for any `AnyDelimiterCodec` which + /// will be exposed to untrusted input. Otherwise, the size of the buffer + /// that holds the chunk currently being read is unbounded. An attacker could + /// exploit this unbounded buffer by sending an unbounded amount of input + /// without any delimiter characters, causing unbounded memory consumption. + /// + /// [`AnyDelimiterCodecError`]: crate::codec::AnyDelimiterCodecError + pub fn new_with_max_length( + seek_delimiters: Vec, + sequence_writer: Vec, + max_length: usize, + ) -> Self { + AnyDelimiterCodec { + max_length, + ..AnyDelimiterCodec::new(seek_delimiters, sequence_writer) + } + } + + /// Returns the maximum chunk length when decoding. + /// + /// ``` + /// use std::usize; + /// use tokio_util::codec::AnyDelimiterCodec; + /// + /// let codec = AnyDelimiterCodec::new(b",;\n".to_vec(), b";".to_vec()); + /// assert_eq!(codec.max_length(), usize::MAX); + /// ``` + /// ``` + /// use tokio_util::codec::AnyDelimiterCodec; + /// + /// let codec = AnyDelimiterCodec::new_with_max_length(b",;\n".to_vec(), b";".to_vec(), 256); + /// assert_eq!(codec.max_length(), 256); + /// ``` + pub fn max_length(&self) -> usize { + self.max_length + } +} + +impl Decoder for AnyDelimiterCodec { + type Item = Bytes; + type Error = AnyDelimiterCodecError; + + fn decode(&mut self, buf: &mut BytesMut) -> Result, AnyDelimiterCodecError> { + loop { + // Determine how far into the buffer we'll search for a delimiter. If + // there's no max_length set, we'll read to the end of the buffer. + let read_to = cmp::min(self.max_length.saturating_add(1), buf.len()); + + let new_chunk_offset = buf[self.next_index..read_to].iter().position(|b| { + self.seek_delimiters + .iter() + .any(|delimiter| *b == *delimiter) + }); + + match (self.is_discarding, new_chunk_offset) { + (true, Some(offset)) => { + // If we found a new chunk, discard up to that offset and + // then stop discarding. On the next iteration, we'll try + // to read a chunk normally. + buf.advance(offset + self.next_index + 1); + self.is_discarding = false; + self.next_index = 0; + } + (true, None) => { + // Otherwise, we didn't find a new chunk, so we'll discard + // everything we read. On the next iteration, we'll continue + // discarding up to max_len bytes unless we find a new chunk. + buf.advance(read_to); + self.next_index = 0; + if buf.is_empty() { + return Ok(None); + } + } + (false, Some(offset)) => { + // Found a chunk! + let new_chunk_index = offset + self.next_index; + self.next_index = 0; + let mut chunk = buf.split_to(new_chunk_index + 1); + chunk.truncate(chunk.len() - 1); + let chunk = chunk.freeze(); + return Ok(Some(chunk)); + } + (false, None) if buf.len() > self.max_length => { + // Reached the maximum length without finding a + // new chunk, return an error and start discarding on the + // next call. + self.is_discarding = true; + return Err(AnyDelimiterCodecError::MaxChunkLengthExceeded); + } + (false, None) => { + // We didn't find a chunk or reach the length limit, so the next + // call will resume searching at the current offset. + self.next_index = read_to; + return Ok(None); + } + } + } + } + + fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, AnyDelimiterCodecError> { + Ok(match self.decode(buf)? { + Some(frame) => Some(frame), + None => { + // return remaining data, if any + if buf.is_empty() { + None + } else { + let chunk = buf.split_to(buf.len()); + self.next_index = 0; + Some(chunk.freeze()) + } + } + }) + } +} + +impl Encoder for AnyDelimiterCodec +where + T: AsRef, +{ + type Error = AnyDelimiterCodecError; + + fn encode(&mut self, chunk: T, buf: &mut BytesMut) -> Result<(), AnyDelimiterCodecError> { + let chunk = chunk.as_ref(); + buf.reserve(chunk.len() + 1); + buf.put(chunk.as_bytes()); + buf.put(self.sequence_writer.as_ref()); + + Ok(()) + } +} + +impl Default for AnyDelimiterCodec { + fn default() -> Self { + Self::new( + DEFAULT_SEEK_DELIMITERS.to_vec(), + DEFAULT_SEQUENCE_WRITER.to_vec(), + ) + } +} + +/// An error occurred while encoding or decoding a chunk. +#[derive(Debug)] +pub enum AnyDelimiterCodecError { + /// The maximum chunk length was exceeded. + MaxChunkLengthExceeded, + /// An IO error occurred. + Io(io::Error), +} + +impl fmt::Display for AnyDelimiterCodecError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + AnyDelimiterCodecError::MaxChunkLengthExceeded => { + write!(f, "max chunk length exceeded") + } + AnyDelimiterCodecError::Io(e) => write!(f, "{}", e), + } + } +} + +impl From for AnyDelimiterCodecError { + fn from(e: io::Error) -> AnyDelimiterCodecError { + AnyDelimiterCodecError::Io(e) + } +} + +impl std::error::Error for AnyDelimiterCodecError {} diff --git a/third_party/rust/tokio-util/src/codec/bytes_codec.rs b/third_party/rust/tokio-util/src/codec/bytes_codec.rs index a5e73749ef1a..ceab228b9433 100644 --- a/third_party/rust/tokio-util/src/codec/bytes_codec.rs +++ b/third_party/rust/tokio-util/src/codec/bytes_codec.rs @@ -33,7 +33,7 @@ use std::io; /// # } /// # } /// # -/// # #[tokio::main(core_threads = 1)] +/// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> Result<(), std::io::Error> { /// let my_async_read = File::open("filename.txt").await?; /// let my_stream_of_bytes = FramedRead::new(my_async_read, BytesCodec::new()); @@ -74,3 +74,13 @@ impl Encoder for BytesCodec { Ok(()) } } + +impl Encoder for BytesCodec { + type Error = io::Error; + + fn encode(&mut self, data: BytesMut, buf: &mut BytesMut) -> Result<(), io::Error> { + buf.reserve(data.len()); + buf.put(data); + Ok(()) + } +} diff --git a/third_party/rust/tokio-util/src/codec/decoder.rs b/third_party/rust/tokio-util/src/codec/decoder.rs index 84d27fbf14eb..c5927783d153 100644 --- a/third_party/rust/tokio-util/src/codec/decoder.rs +++ b/third_party/rust/tokio-util/src/codec/decoder.rs @@ -16,6 +16,20 @@ use std::io; /// implementing stateful streaming parsers. In many cases, though, this type /// will simply be a unit struct (e.g. `struct HttpDecoder`). /// +/// For some underlying data-sources, namely files and FIFOs, +/// it's possible to temporarily read 0 bytes by reaching EOF. +/// +/// In these cases `decode_eof` will be called until it signals +/// fullfillment of all closing frames by returning `Ok(None)`. +/// After that, repeated attempts to read from the [`Framed`] or [`FramedRead`] +/// will not invoke `decode` or `decode_eof` again, until data can be read +/// during a retry. +/// +/// It is up to the Decoder to keep track of a restart after an EOF, +/// and to decide how to handle such an event by, for example, +/// allowing frames to cross EOF boundaries, re-emitting opening frames, or +/// resetting the entire internal state. +/// /// [`Framed`]: crate::codec::Framed /// [`FramedRead`]: crate::codec::FramedRead pub trait Decoder { @@ -115,13 +129,18 @@ pub trait Decoder { /// This method defaults to calling `decode` and returns an error if /// `Ok(None)` is returned while there is unconsumed data in `buf`. /// Typically this doesn't need to be implemented unless the framing - /// protocol differs near the end of the stream. + /// protocol differs near the end of the stream, or if you need to construct + /// frames _across_ eof boundaries on sources that can be resumed. /// /// Note that the `buf` argument may be empty. If a previous call to /// `decode_eof` consumed all the bytes in the buffer, `decode_eof` will be /// called again until it returns `None`, indicating that there are no more /// frames to yield. This behavior enables returning finalization frames /// that may not be based on inbound data. + /// + /// Once `None` has been returned, `decode_eof` won't be called again until + /// an attempt to resume the stream has been made, where the underlying stream + /// actually returned more data. fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { match self.decode(buf)? { Some(frame) => Ok(Some(frame)), @@ -153,7 +172,7 @@ pub trait Decoder { /// calling `split` on the [`Framed`] returned by this method, which will /// break them into separate objects, allowing them to interact more easily. /// - /// [`Stream`]: tokio::stream::Stream + /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`Framed`]: crate::codec::Framed fn framed(self, io: T) -> Framed diff --git a/third_party/rust/tokio-util/src/codec/framed.rs b/third_party/rust/tokio-util/src/codec/framed.rs index d2e7659eda2b..d89b8b6dc340 100644 --- a/third_party/rust/tokio-util/src/codec/framed.rs +++ b/third_party/rust/tokio-util/src/codec/framed.rs @@ -1,19 +1,15 @@ use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; -use crate::codec::framed_read::{framed_read2, framed_read2_with_buffer, FramedRead2}; -use crate::codec::framed_write::{framed_write2, framed_write2_with_buffer, FramedWrite2}; +use crate::codec::framed_impl::{FramedImpl, RWFrames, ReadFrame, WriteFrame}; -use tokio::{ - io::{AsyncBufRead, AsyncRead, AsyncWrite}, - stream::Stream, -}; +use futures_core::Stream; +use tokio::io::{AsyncRead, AsyncWrite}; use bytes::BytesMut; use futures_sink::Sink; use pin_project_lite::pin_project; use std::fmt; -use std::io::{self, BufRead, Read, Write}; -use std::mem::MaybeUninit; +use std::io; use std::pin::Pin; use std::task::{Context, Poll}; @@ -24,43 +20,13 @@ pin_project! { /// You can create a `Framed` instance by using the [`Decoder::framed`] adapter, or /// by using the `new` function seen below. /// - /// [`Stream`]: tokio::stream::Stream + /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`AsyncRead`]: tokio::io::AsyncRead /// [`Decoder::framed`]: crate::codec::Decoder::framed() pub struct Framed { #[pin] - inner: FramedRead2>>, - } -} - -pin_project! { - pub(crate) struct Fuse { - #[pin] - pub(crate) io: T, - pub(crate) codec: U, - } -} - -/// Abstracts over `FramedRead2` being either `FramedRead2>>` or -/// `FramedRead2>` and lets the io and codec parts be extracted in either case. -pub(crate) trait ProjectFuse { - type Io; - type Codec; - - fn project(self: Pin<&mut Self>) -> Fuse, &mut Self::Codec>; -} - -impl ProjectFuse for Fuse { - type Io = T; - type Codec = U; - - fn project(self: Pin<&mut Self>) -> Fuse, &mut Self::Codec> { - let self_ = self.project(); - Fuse { - io: self_.io, - codec: self_.codec, - } + inner: FramedImpl } } @@ -86,14 +52,23 @@ where /// calling [`split`] on the `Framed` returned by this method, which will /// break them into separate objects, allowing them to interact more easily. /// - /// [`Stream`]: tokio::stream::Stream + /// Note that, for some byte sources, the stream can be resumed after an EOF + /// by reading from it, even after it has returned `None`. Repeated attempts + /// to do so, without new data available, continue to return `None` without + /// creating more (closing) frames. + /// + /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`Decode`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split pub fn new(inner: T, codec: U) -> Framed { Framed { - inner: framed_read2(framed_write2(Fuse { io: inner, codec })), + inner: FramedImpl { + inner, + codec, + state: Default::default(), + }, } } @@ -116,17 +91,26 @@ where /// calling [`split`] on the `Framed` returned by this method, which will /// break them into separate objects, allowing them to interact more easily. /// - /// [`Stream`]: tokio::stream::Stream + /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`Decode`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split pub fn with_capacity(inner: T, codec: U, capacity: usize) -> Framed { Framed { - inner: framed_read2_with_buffer( - framed_write2(Fuse { io: inner, codec }), - BytesMut::with_capacity(capacity), - ), + inner: FramedImpl { + inner, + codec, + state: RWFrames { + read: ReadFrame { + eof: false, + is_readable: false, + buffer: BytesMut::with_capacity(capacity), + has_errored: false, + }, + write: WriteFrame::default(), + }, + }, } } } @@ -153,7 +137,7 @@ impl Framed { /// calling [`split`] on the `Framed` returned by this method, which will /// break them into separate objects, allowing them to interact more easily. /// - /// [`Stream`]: tokio::stream::Stream + /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`Decoder`]: crate::codec::Decoder /// [`Encoder`]: crate::codec::Encoder @@ -161,16 +145,14 @@ impl Framed { /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split pub fn from_parts(parts: FramedParts) -> Framed { Framed { - inner: framed_read2_with_buffer( - framed_write2_with_buffer( - Fuse { - io: parts.io, - codec: parts.codec, - }, - parts.write_buf, - ), - parts.read_buf, - ), + inner: FramedImpl { + inner: parts.io, + codec: parts.codec, + state: RWFrames { + read: parts.read_buf.into(), + write: parts.write_buf.into(), + }, + }, } } @@ -181,7 +163,7 @@ impl Framed { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_ref(&self) -> &T { - &self.inner.get_ref().get_ref().io + &self.inner.inner } /// Returns a mutable reference to the underlying I/O stream wrapped by @@ -191,7 +173,17 @@ impl Framed { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_mut(&mut self) -> &mut T { - &mut self.inner.get_mut().get_mut().io + &mut self.inner.inner + } + + /// Returns a pinned mutable reference to the underlying I/O stream wrapped by + /// `Framed`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { + self.project().inner.project().inner } /// Returns a reference to the underlying codec wrapped by @@ -200,7 +192,7 @@ impl Framed { /// Note that care should be taken to not tamper with the underlying codec /// as it may corrupt the stream of frames otherwise being worked with. pub fn codec(&self) -> &U { - &self.inner.get_ref().get_ref().codec + &self.inner.codec } /// Returns a mutable reference to the underlying codec wrapped by @@ -209,12 +201,56 @@ impl Framed { /// Note that care should be taken to not tamper with the underlying codec /// as it may corrupt the stream of frames otherwise being worked with. pub fn codec_mut(&mut self) -> &mut U { - &mut self.inner.get_mut().get_mut().codec + &mut self.inner.codec + } + + /// Maps the codec `U` to `C`, preserving the read and write buffers + /// wrapped by `Framed`. + /// + /// Note that care should be taken to not tamper with the underlying codec + /// as it may corrupt the stream of frames otherwise being worked with. + pub fn map_codec(self, map: F) -> Framed + where + F: FnOnce(U) -> C, + { + // This could be potentially simplified once rust-lang/rust#86555 hits stable + let parts = self.into_parts(); + Framed::from_parts(FramedParts { + io: parts.io, + codec: map(parts.codec), + read_buf: parts.read_buf, + write_buf: parts.write_buf, + _priv: (), + }) + } + + /// Returns a mutable reference to the underlying codec wrapped by + /// `Framed`. + /// + /// Note that care should be taken to not tamper with the underlying codec + /// as it may corrupt the stream of frames otherwise being worked with. + pub fn codec_pin_mut(self: Pin<&mut Self>) -> &mut U { + self.project().inner.project().codec } /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { - self.inner.buffer() + &self.inner.state.read.buffer + } + + /// Returns a mutable reference to the read buffer. + pub fn read_buffer_mut(&mut self) -> &mut BytesMut { + &mut self.inner.state.read.buffer + } + + /// Returns a reference to the write buffer. + pub fn write_buffer(&self) -> &BytesMut { + &self.inner.state.write.buffer + } + + /// Returns a mutable reference to the write buffer. + pub fn write_buffer_mut(&mut self) -> &mut BytesMut { + &mut self.inner.state.write.buffer } /// Consumes the `Framed`, returning its underlying I/O stream. @@ -223,7 +259,7 @@ impl Framed { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn into_inner(self) -> T { - self.inner.into_inner().into_inner().io + self.inner.inner } /// Consumes the `Framed`, returning its underlying I/O stream, the buffer @@ -233,19 +269,17 @@ impl Framed { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn into_parts(self) -> FramedParts { - let (inner, read_buf) = self.inner.into_parts(); - let (inner, write_buf) = inner.into_parts(); - FramedParts { - io: inner.io, - codec: inner.codec, - read_buf, - write_buf, + io: self.inner.inner, + codec: self.inner.codec, + read_buf: self.inner.state.read.buffer, + write_buf: self.inner.state.write.buffer, _priv: (), } } } +// This impl just defers to the underlying FramedImpl impl Stream for Framed where T: AsyncRead, @@ -258,6 +292,7 @@ where } } +// This impl just defers to the underlying FramedImpl impl Sink for Framed where T: AsyncWrite, @@ -267,19 +302,19 @@ where type Error = U::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.get_pin_mut().poll_ready(cx) + self.project().inner.poll_ready(cx) } fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - self.project().inner.get_pin_mut().start_send(item) + self.project().inner.start_send(item) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.get_pin_mut().poll_flush(cx) + self.project().inner.poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.get_pin_mut().poll_close(cx) + self.project().inner.poll_close(cx) } } @@ -290,109 +325,19 @@ where { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Framed") - .field("io", &self.inner.get_ref().get_ref().io) - .field("codec", &self.inner.get_ref().get_ref().codec) + .field("io", self.get_ref()) + .field("codec", self.codec()) .finish() } } -// ===== impl Fuse ===== - -impl Read for Fuse { - fn read(&mut self, dst: &mut [u8]) -> io::Result { - self.io.read(dst) - } -} - -impl BufRead for Fuse { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - self.io.fill_buf() - } - - fn consume(&mut self, amt: usize) { - self.io.consume(amt) - } -} - -impl AsyncRead for Fuse { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.io.prepare_uninitialized_buffer(buf) - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.project().io.poll_read(cx, buf) - } -} - -impl AsyncBufRead for Fuse { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().io.poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.project().io.consume(amt) - } -} - -impl Write for Fuse { - fn write(&mut self, src: &[u8]) -> io::Result { - self.io.write(src) - } - - fn flush(&mut self) -> io::Result<()> { - self.io.flush() - } -} - -impl AsyncWrite for Fuse { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.project().io.poll_write(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().io.poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().io.poll_shutdown(cx) - } -} - -impl Decoder for Fuse { - type Item = U::Item; - type Error = U::Error; - - fn decode(&mut self, buffer: &mut BytesMut) -> Result, Self::Error> { - self.codec.decode(buffer) - } - - fn decode_eof(&mut self, buffer: &mut BytesMut) -> Result, Self::Error> { - self.codec.decode_eof(buffer) - } -} - -impl> Encoder for Fuse { - type Error = U::Error; - - fn encode(&mut self, item: I, dst: &mut BytesMut) -> Result<(), Self::Error> { - self.codec.encode(item, dst) - } -} - /// `FramedParts` contains an export of the data of a Framed transport. /// It can be used to construct a new [`Framed`] with a different codec. /// It contains all current buffers and the inner transport. /// /// [`Framed`]: crate::codec::Framed #[derive(Debug)] +#[allow(clippy::manual_non_exhaustive)] pub struct FramedParts { /// The inner transport used to read bytes to and write bytes to pub io: T, diff --git a/third_party/rust/tokio-util/src/codec/framed_impl.rs b/third_party/rust/tokio-util/src/codec/framed_impl.rs new file mode 100644 index 000000000000..ce1a6db87390 --- /dev/null +++ b/third_party/rust/tokio-util/src/codec/framed_impl.rs @@ -0,0 +1,308 @@ +use crate::codec::decoder::Decoder; +use crate::codec::encoder::Encoder; + +use futures_core::Stream; +use tokio::io::{AsyncRead, AsyncWrite}; + +use bytes::BytesMut; +use futures_core::ready; +use futures_sink::Sink; +use pin_project_lite::pin_project; +use std::borrow::{Borrow, BorrowMut}; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tracing::trace; + +pin_project! { + #[derive(Debug)] + pub(crate) struct FramedImpl { + #[pin] + pub(crate) inner: T, + pub(crate) state: State, + pub(crate) codec: U, + } +} + +const INITIAL_CAPACITY: usize = 8 * 1024; +const BACKPRESSURE_BOUNDARY: usize = INITIAL_CAPACITY; + +#[derive(Debug)] +pub(crate) struct ReadFrame { + pub(crate) eof: bool, + pub(crate) is_readable: bool, + pub(crate) buffer: BytesMut, + pub(crate) has_errored: bool, +} + +pub(crate) struct WriteFrame { + pub(crate) buffer: BytesMut, +} + +#[derive(Default)] +pub(crate) struct RWFrames { + pub(crate) read: ReadFrame, + pub(crate) write: WriteFrame, +} + +impl Default for ReadFrame { + fn default() -> Self { + Self { + eof: false, + is_readable: false, + buffer: BytesMut::with_capacity(INITIAL_CAPACITY), + has_errored: false, + } + } +} + +impl Default for WriteFrame { + fn default() -> Self { + Self { + buffer: BytesMut::with_capacity(INITIAL_CAPACITY), + } + } +} + +impl From for ReadFrame { + fn from(mut buffer: BytesMut) -> Self { + let size = buffer.capacity(); + if size < INITIAL_CAPACITY { + buffer.reserve(INITIAL_CAPACITY - size); + } + + Self { + buffer, + is_readable: size > 0, + eof: false, + has_errored: false, + } + } +} + +impl From for WriteFrame { + fn from(mut buffer: BytesMut) -> Self { + let size = buffer.capacity(); + if size < INITIAL_CAPACITY { + buffer.reserve(INITIAL_CAPACITY - size); + } + + Self { buffer } + } +} + +impl Borrow for RWFrames { + fn borrow(&self) -> &ReadFrame { + &self.read + } +} +impl BorrowMut for RWFrames { + fn borrow_mut(&mut self) -> &mut ReadFrame { + &mut self.read + } +} +impl Borrow for RWFrames { + fn borrow(&self) -> &WriteFrame { + &self.write + } +} +impl BorrowMut for RWFrames { + fn borrow_mut(&mut self) -> &mut WriteFrame { + &mut self.write + } +} +impl Stream for FramedImpl +where + T: AsyncRead, + U: Decoder, + R: BorrowMut, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + use crate::util::poll_read_buf; + + let mut pinned = self.project(); + let state: &mut ReadFrame = pinned.state.borrow_mut(); + // The following loops implements a state machine with each state corresponding + // to a combination of the `is_readable` and `eof` flags. States persist across + // loop entries and most state transitions occur with a return. + // + // The initial state is `reading`. + // + // | state | eof | is_readable | has_errored | + // |---------|-------|-------------|-------------| + // | reading | false | false | false | + // | framing | false | true | false | + // | pausing | true | true | false | + // | paused | true | false | false | + // | errored | | | true | + // `decode_eof` returns Err + // ┌────────────────────────────────────────────────────────┐ + // `decode_eof` returns │ │ + // `Ok(Some)` │ │ + // ┌─────┐ │ `decode_eof` returns After returning │ + // Read 0 bytes ├─────▼──┴┐ `Ok(None)` ┌────────┐ ◄───┐ `None` ┌───▼─────┐ + // ┌────────────────►│ Pausing ├───────────────────────►│ Paused ├─┐ └───────────┤ Errored │ + // │ └─────────┘ └─┬──▲───┘ │ └───▲───▲─┘ + // Pending read │ │ │ │ │ │ + // ┌──────┐ │ `decode` returns `Some` │ └─────┘ │ │ + // │ │ │ ┌──────┐ │ Pending │ │ + // │ ┌────▼──┴─┐ Read n>0 bytes ┌┴──────▼─┐ read n>0 bytes │ read │ │ + // └─┤ Reading ├───────────────►│ Framing │◄────────────────────────┘ │ │ + // └──┬─▲────┘ └─────┬──┬┘ │ │ + // │ │ │ │ `decode` returns Err │ │ + // │ └───decode` returns `None`──┘ └───────────────────────────────────────────────────────┘ │ + // │ read returns Err │ + // └────────────────────────────────────────────────────────────────────────────────────────────┘ + loop { + // Return `None` if we have encountered an error from the underlying decoder + // See: https://github.com/tokio-rs/tokio/issues/3976 + if state.has_errored { + // preparing has_errored -> paused + trace!("Returning None and setting paused"); + state.is_readable = false; + state.has_errored = false; + return Poll::Ready(None); + } + + // Repeatedly call `decode` or `decode_eof` while the buffer is "readable", + // i.e. it _might_ contain data consumable as a frame or closing frame. + // Both signal that there is no such data by returning `None`. + // + // If `decode` couldn't read a frame and the upstream source has returned eof, + // `decode_eof` will attempt to decode the remaining bytes as closing frames. + // + // If the underlying AsyncRead is resumable, we may continue after an EOF, + // but must finish emitting all of it's associated `decode_eof` frames. + // Furthermore, we don't want to emit any `decode_eof` frames on retried + // reads after an EOF unless we've actually read more data. + if state.is_readable { + // pausing or framing + if state.eof { + // pausing + let frame = pinned.codec.decode_eof(&mut state.buffer).map_err(|err| { + trace!("Got an error, going to errored state"); + state.has_errored = true; + err + })?; + if frame.is_none() { + state.is_readable = false; // prepare pausing -> paused + } + // implicit pausing -> pausing or pausing -> paused + return Poll::Ready(frame.map(Ok)); + } + + // framing + trace!("attempting to decode a frame"); + + if let Some(frame) = pinned.codec.decode(&mut state.buffer).map_err(|op| { + trace!("Got an error, going to errored state"); + state.has_errored = true; + op + })? { + trace!("frame decoded from buffer"); + // implicit framing -> framing + return Poll::Ready(Some(Ok(frame))); + } + + // framing -> reading + state.is_readable = false; + } + // reading or paused + // If we can't build a frame yet, try to read more data and try again. + // Make sure we've got room for at least one byte to read to ensure + // that we don't get a spurious 0 that looks like EOF. + state.buffer.reserve(1); + let bytect = match poll_read_buf(pinned.inner.as_mut(), cx, &mut state.buffer).map_err( + |err| { + trace!("Got an error, going to errored state"); + state.has_errored = true; + err + }, + )? { + Poll::Ready(ct) => ct, + // implicit reading -> reading or implicit paused -> paused + Poll::Pending => return Poll::Pending, + }; + if bytect == 0 { + if state.eof { + // We're already at an EOF, and since we've reached this path + // we're also not readable. This implies that we've already finished + // our `decode_eof` handling, so we can simply return `None`. + // implicit paused -> paused + return Poll::Ready(None); + } + // prepare reading -> paused + state.eof = true; + } else { + // prepare paused -> framing or noop reading -> framing + state.eof = false; + } + + // paused -> framing or reading -> framing or reading -> pausing + state.is_readable = true; + } + } +} + +impl Sink for FramedImpl +where + T: AsyncWrite, + U: Encoder, + U::Error: From, + W: BorrowMut, +{ + type Error = U::Error; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.state.borrow().buffer.len() >= BACKPRESSURE_BOUNDARY { + self.as_mut().poll_flush(cx) + } else { + Poll::Ready(Ok(())) + } + } + + fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { + let pinned = self.project(); + pinned + .codec + .encode(item, &mut pinned.state.borrow_mut().buffer)?; + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + use crate::util::poll_write_buf; + trace!("flushing framed transport"); + let mut pinned = self.project(); + + while !pinned.state.borrow_mut().buffer.is_empty() { + let WriteFrame { buffer } = pinned.state.borrow_mut(); + trace!(remaining = buffer.len(), "writing;"); + + let n = ready!(poll_write_buf(pinned.inner.as_mut(), cx, buffer))?; + + if n == 0 { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::WriteZero, + "failed to \ + write frame to transport", + ) + .into())); + } + } + + // Try flushing the underlying IO + ready!(pinned.inner.poll_flush(cx))?; + + trace!("framed transport flushed"); + Poll::Ready(Ok(())) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(self.as_mut().poll_flush(cx))?; + ready!(self.project().inner.poll_shutdown(cx))?; + + Poll::Ready(Ok(())) + } +} diff --git a/third_party/rust/tokio-util/src/codec/framed_read.rs b/third_party/rust/tokio-util/src/codec/framed_read.rs index e7798c327ee6..184c567b498f 100644 --- a/third_party/rust/tokio-util/src/codec/framed_read.rs +++ b/third_party/rust/tokio-util/src/codec/framed_read.rs @@ -1,11 +1,11 @@ -use crate::codec::framed::{Fuse, ProjectFuse}; +use crate::codec::framed_impl::{FramedImpl, ReadFrame}; use crate::codec::Decoder; -use tokio::{io::AsyncRead, stream::Stream}; +use futures_core::Stream; +use tokio::io::AsyncRead; use bytes::BytesMut; use futures_sink::Sink; -use log::trace; use pin_project_lite::pin_project; use std::fmt; use std::pin::Pin; @@ -14,26 +14,14 @@ use std::task::{Context, Poll}; pin_project! { /// A [`Stream`] of messages decoded from an [`AsyncRead`]. /// - /// [`Stream`]: tokio::stream::Stream + /// [`Stream`]: futures_core::Stream /// [`AsyncRead`]: tokio::io::AsyncRead pub struct FramedRead { #[pin] - inner: FramedRead2>, + inner: FramedImpl, } } -pin_project! { - pub(crate) struct FramedRead2 { - #[pin] - inner: T, - eof: bool, - is_readable: bool, - buffer: BytesMut, - } -} - -const INITIAL_CAPACITY: usize = 8 * 1024; - // ===== impl FramedRead ===== impl FramedRead @@ -44,10 +32,11 @@ where /// Creates a new `FramedRead` with the given `decoder`. pub fn new(inner: T, decoder: D) -> FramedRead { FramedRead { - inner: framed_read2(Fuse { - io: inner, + inner: FramedImpl { + inner, codec: decoder, - }), + state: Default::default(), + }, } } @@ -55,13 +44,16 @@ where /// initial size. pub fn with_capacity(inner: T, decoder: D, capacity: usize) -> FramedRead { FramedRead { - inner: framed_read2_with_buffer( - Fuse { - io: inner, - codec: decoder, + inner: FramedImpl { + inner, + codec: decoder, + state: ReadFrame { + eof: false, + is_readable: false, + buffer: BytesMut::with_capacity(capacity), + has_errored: false, }, - BytesMut::with_capacity(capacity), - ), + }, } } } @@ -74,7 +66,7 @@ impl FramedRead { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_ref(&self) -> &T { - &self.inner.inner.io + &self.inner.inner } /// Returns a mutable reference to the underlying I/O stream wrapped by @@ -84,7 +76,17 @@ impl FramedRead { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_mut(&mut self) -> &mut T { - &mut self.inner.inner.io + &mut self.inner.inner + } + + /// Returns a pinned mutable reference to the underlying I/O stream wrapped by + /// `FramedRead`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { + self.project().inner.project().inner } /// Consumes the `FramedRead`, returning its underlying I/O stream. @@ -93,25 +95,57 @@ impl FramedRead { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn into_inner(self) -> T { - self.inner.inner.io + self.inner.inner } /// Returns a reference to the underlying decoder. pub fn decoder(&self) -> &D { - &self.inner.inner.codec + &self.inner.codec } /// Returns a mutable reference to the underlying decoder. pub fn decoder_mut(&mut self) -> &mut D { - &mut self.inner.inner.codec + &mut self.inner.codec + } + + /// Maps the decoder `D` to `C`, preserving the read buffer + /// wrapped by `Framed`. + pub fn map_decoder(self, map: F) -> FramedRead + where + F: FnOnce(D) -> C, + { + // This could be potentially simplified once rust-lang/rust#86555 hits stable + let FramedImpl { + inner, + state, + codec, + } = self.inner; + FramedRead { + inner: FramedImpl { + inner, + state, + codec: map(codec), + }, + } + } + + /// Returns a mutable reference to the underlying decoder. + pub fn decoder_pin_mut(self: Pin<&mut Self>) -> &mut D { + self.project().inner.project().codec } /// Returns a reference to the read buffer. pub fn read_buffer(&self) -> &BytesMut { - &self.inner.buffer + &self.inner.state.buffer + } + + /// Returns a mutable reference to the read buffer. + pub fn read_buffer_mut(&mut self) -> &mut BytesMut { + &mut self.inner.state.buffer } } +// This impl just defers to the underlying FramedImpl impl Stream for FramedRead where T: AsyncRead, @@ -132,43 +166,19 @@ where type Error = T::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .project() - .inner - .project() - .io - .poll_ready(cx) + self.project().inner.project().inner.poll_ready(cx) } fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - self.project() - .inner - .project() - .inner - .project() - .io - .start_send(item) + self.project().inner.project().inner.start_send(item) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .project() - .inner - .project() - .io - .poll_flush(cx) + self.project().inner.project().inner.poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .project() - .inner - .project() - .io - .poll_close(cx) + self.project().inner.project().inner.poll_close(cx) } } @@ -179,126 +189,11 @@ where { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FramedRead") - .field("inner", &self.inner.inner.io) - .field("decoder", &self.inner.inner.codec) - .field("eof", &self.inner.eof) - .field("is_readable", &self.inner.is_readable) - .field("buffer", &self.inner.buffer) + .field("inner", &self.get_ref()) + .field("decoder", &self.decoder()) + .field("eof", &self.inner.state.eof) + .field("is_readable", &self.inner.state.is_readable) + .field("buffer", &self.read_buffer()) .finish() } } - -// ===== impl FramedRead2 ===== - -pub(crate) fn framed_read2(inner: T) -> FramedRead2 { - FramedRead2 { - inner, - eof: false, - is_readable: false, - buffer: BytesMut::with_capacity(INITIAL_CAPACITY), - } -} - -pub(crate) fn framed_read2_with_buffer(inner: T, mut buf: BytesMut) -> FramedRead2 { - if buf.capacity() < INITIAL_CAPACITY { - let bytes_to_reserve = INITIAL_CAPACITY - buf.capacity(); - buf.reserve(bytes_to_reserve); - } - FramedRead2 { - inner, - eof: false, - is_readable: !buf.is_empty(), - buffer: buf, - } -} - -impl FramedRead2 { - pub(crate) fn get_ref(&self) -> &T { - &self.inner - } - - pub(crate) fn into_inner(self) -> T { - self.inner - } - - pub(crate) fn into_parts(self) -> (T, BytesMut) { - (self.inner, self.buffer) - } - - pub(crate) fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - pub(crate) fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().inner - } - - pub(crate) fn buffer(&self) -> &BytesMut { - &self.buffer - } -} - -impl Stream for FramedRead2 -where - T: ProjectFuse + AsyncRead, - T::Codec: Decoder, -{ - type Item = Result<::Item, ::Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut pinned = self.project(); - loop { - // Repeatedly call `decode` or `decode_eof` as long as it is - // "readable". Readable is defined as not having returned `None`. If - // the upstream has returned EOF, and the decoder is no longer - // readable, it can be assumed that the decoder will never become - // readable again, at which point the stream is terminated. - if *pinned.is_readable { - if *pinned.eof { - let frame = pinned - .inner - .as_mut() - .project() - .codec - .decode_eof(&mut pinned.buffer)?; - return Poll::Ready(frame.map(Ok)); - } - - trace!("attempting to decode a frame"); - - if let Some(frame) = pinned - .inner - .as_mut() - .project() - .codec - .decode(&mut pinned.buffer)? - { - trace!("frame decoded from buffer"); - return Poll::Ready(Some(Ok(frame))); - } - - *pinned.is_readable = false; - } - - assert!(!*pinned.eof); - - // Otherwise, try to read more data and try again. Make sure we've - // got room for at least one byte to read to ensure that we don't - // get a spurious 0 that looks like EOF - pinned.buffer.reserve(1); - let bytect = match pinned - .inner - .as_mut() - .poll_read_buf(cx, &mut pinned.buffer)? - { - Poll::Ready(ct) => ct, - Poll::Pending => return Poll::Pending, - }; - if bytect == 0 { - *pinned.eof = true; - } - - *pinned.is_readable = true; - } - } -} diff --git a/third_party/rust/tokio-util/src/codec/framed_write.rs b/third_party/rust/tokio-util/src/codec/framed_write.rs index c0049b2d04a4..aa4cec982013 100644 --- a/third_party/rust/tokio-util/src/codec/framed_write.rs +++ b/third_party/rust/tokio-util/src/codec/framed_write.rs @@ -1,20 +1,14 @@ -use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; -use crate::codec::framed::{Fuse, ProjectFuse}; +use crate::codec::framed_impl::{FramedImpl, WriteFrame}; -use tokio::{ - io::{AsyncBufRead, AsyncRead, AsyncWrite}, - stream::Stream, -}; +use futures_core::Stream; +use tokio::io::AsyncWrite; -use bytes::{Buf, BytesMut}; -use futures_core::ready; +use bytes::BytesMut; use futures_sink::Sink; -use log::trace; use pin_project_lite::pin_project; use std::fmt; -use std::io::{self, BufRead, Read}; -use std::mem::MaybeUninit; +use std::io; use std::pin::Pin; use std::task::{Context, Poll}; @@ -24,21 +18,10 @@ pin_project! { /// [`Sink`]: futures_sink::Sink pub struct FramedWrite { #[pin] - inner: FramedWrite2>, + inner: FramedImpl, } } -pin_project! { - pub(crate) struct FramedWrite2 { - #[pin] - inner: T, - buffer: BytesMut, - } -} - -const INITIAL_CAPACITY: usize = 8 * 1024; -const BACKPRESSURE_BOUNDARY: usize = INITIAL_CAPACITY; - impl FramedWrite where T: AsyncWrite, @@ -46,10 +29,11 @@ where /// Creates a new `FramedWrite` with the given `encoder`. pub fn new(inner: T, encoder: E) -> FramedWrite { FramedWrite { - inner: framed_write2(Fuse { - io: inner, + inner: FramedImpl { + inner, codec: encoder, - }), + state: WriteFrame::default(), + }, } } } @@ -62,7 +46,7 @@ impl FramedWrite { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_ref(&self) -> &T { - &self.inner.inner.io + &self.inner.inner } /// Returns a mutable reference to the underlying I/O stream wrapped by @@ -72,7 +56,17 @@ impl FramedWrite { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn get_mut(&mut self) -> &mut T { - &mut self.inner.inner.io + &mut self.inner.inner + } + + /// Returns a pinned mutable reference to the underlying I/O stream wrapped by + /// `FramedWrite`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { + self.project().inner.project().inner } /// Consumes the `FramedWrite`, returning its underlying I/O stream. @@ -81,21 +75,57 @@ impl FramedWrite { /// of data coming in as it may corrupt the stream of frames otherwise /// being worked with. pub fn into_inner(self) -> T { - self.inner.inner.io + self.inner.inner } - /// Returns a reference to the underlying decoder. + /// Returns a reference to the underlying encoder. pub fn encoder(&self) -> &E { - &self.inner.inner.codec + &self.inner.codec } - /// Returns a mutable reference to the underlying decoder. + /// Returns a mutable reference to the underlying encoder. pub fn encoder_mut(&mut self) -> &mut E { - &mut self.inner.inner.codec + &mut self.inner.codec + } + + /// Maps the encoder `E` to `C`, preserving the write buffer + /// wrapped by `Framed`. + pub fn map_encoder(self, map: F) -> FramedWrite + where + F: FnOnce(E) -> C, + { + // This could be potentially simplified once rust-lang/rust#86555 hits stable + let FramedImpl { + inner, + state, + codec, + } = self.inner; + FramedWrite { + inner: FramedImpl { + inner, + state, + codec: map(codec), + }, + } + } + + /// Returns a mutable reference to the underlying encoder. + pub fn encoder_pin_mut(self: Pin<&mut Self>) -> &mut E { + self.project().inner.project().codec + } + + /// Returns a reference to the write buffer. + pub fn write_buffer(&self) -> &BytesMut { + &self.inner.state.buffer + } + + /// Returns a mutable reference to the write buffer. + pub fn write_buffer_mut(&mut self) -> &mut BytesMut { + &mut self.inner.state.buffer } } -// This impl just defers to the underlying FramedWrite2 +// This impl just defers to the underlying FramedImpl impl Sink for FramedWrite where T: AsyncWrite, @@ -121,6 +151,7 @@ where } } +// This impl just defers to the underlying T: Stream impl Stream for FramedWrite where T: Stream, @@ -128,13 +159,7 @@ where type Item = T::Item; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project() - .inner - .project() - .inner - .project() - .io - .poll_next(cx) + self.project().inner.project().inner.poll_next(cx) } } @@ -145,180 +170,9 @@ where { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FramedWrite") - .field("inner", &self.inner.get_ref().io) - .field("encoder", &self.inner.get_ref().codec) - .field("buffer", &self.inner.buffer) + .field("inner", &self.get_ref()) + .field("encoder", &self.encoder()) + .field("buffer", &self.inner.state.buffer) .finish() } } - -// ===== impl FramedWrite2 ===== - -pub(crate) fn framed_write2(inner: T) -> FramedWrite2 { - FramedWrite2 { - inner, - buffer: BytesMut::with_capacity(INITIAL_CAPACITY), - } -} - -pub(crate) fn framed_write2_with_buffer(inner: T, mut buf: BytesMut) -> FramedWrite2 { - if buf.capacity() < INITIAL_CAPACITY { - let bytes_to_reserve = INITIAL_CAPACITY - buf.capacity(); - buf.reserve(bytes_to_reserve); - } - FramedWrite2 { inner, buffer: buf } -} - -impl FramedWrite2 { - pub(crate) fn get_ref(&self) -> &T { - &self.inner - } - - pub(crate) fn into_inner(self) -> T { - self.inner - } - - pub(crate) fn into_parts(self) -> (T, BytesMut) { - (self.inner, self.buffer) - } - - pub(crate) fn get_mut(&mut self) -> &mut T { - &mut self.inner - } -} - -impl Sink for FramedWrite2 -where - T: ProjectFuse + AsyncWrite, - T::Codec: Encoder, -{ - type Error = >::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // If the buffer is already over 8KiB, then attempt to flush it. If after flushing it's - // *still* over 8KiB, then apply backpressure (reject the send). - if self.buffer.len() >= BACKPRESSURE_BOUNDARY { - match self.as_mut().poll_flush(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), - Poll::Ready(Ok(())) => (), - }; - - if self.buffer.len() >= BACKPRESSURE_BOUNDARY { - return Poll::Pending; - } - } - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - let mut pinned = self.project(); - pinned - .inner - .project() - .codec - .encode(item, &mut pinned.buffer)?; - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - trace!("flushing framed transport"); - let mut pinned = self.project(); - - while !pinned.buffer.is_empty() { - trace!("writing; remaining={}", pinned.buffer.len()); - - let buf = &pinned.buffer; - let n = ready!(pinned.inner.as_mut().poll_write(cx, &buf))?; - - if n == 0 { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::WriteZero, - "failed to \ - write frame to transport", - ) - .into())); - } - - pinned.buffer.advance(n); - } - - // Try flushing the underlying IO - ready!(pinned.inner.poll_flush(cx))?; - - trace!("framed transport flushed"); - Poll::Ready(Ok(())) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll_flush(cx))?; - ready!(self.project().inner.poll_shutdown(cx))?; - - Poll::Ready(Ok(())) - } -} - -impl Decoder for FramedWrite2 { - type Item = T::Item; - type Error = T::Error; - - fn decode(&mut self, src: &mut BytesMut) -> Result, T::Error> { - self.inner.decode(src) - } - - fn decode_eof(&mut self, src: &mut BytesMut) -> Result, T::Error> { - self.inner.decode_eof(src) - } -} - -impl Read for FramedWrite2 { - fn read(&mut self, dst: &mut [u8]) -> io::Result { - self.inner.read(dst) - } -} - -impl BufRead for FramedWrite2 { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - self.inner.fill_buf() - } - - fn consume(&mut self, amt: usize) { - self.inner.consume(amt) - } -} - -impl AsyncRead for FramedWrite2 { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.inner.prepare_uninitialized_buffer(buf) - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.project().inner.poll_read(cx, buf) - } -} - -impl AsyncBufRead for FramedWrite2 { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.project().inner.consume(amt) - } -} - -impl ProjectFuse for FramedWrite2 -where - T: ProjectFuse, -{ - type Io = T::Io; - type Codec = T::Codec; - - fn project(self: Pin<&mut Self>) -> Fuse, &mut Self::Codec> { - self.project().inner.project() - } -} diff --git a/third_party/rust/tokio-util/src/codec/length_delimited.rs b/third_party/rust/tokio-util/src/codec/length_delimited.rs index 5e98d4a70312..93d2f180d0f9 100644 --- a/third_party/rust/tokio-util/src/codec/length_delimited.rs +++ b/third_party/rust/tokio-util/src/codec/length_delimited.rs @@ -39,7 +39,7 @@ //! Specifically, given the following: //! //! ``` -//! use tokio::prelude::*; +//! use tokio::io::{AsyncRead, AsyncWrite}; //! use tokio_util::codec::{Framed, LengthDelimitedCodec}; //! //! use futures::SinkExt; @@ -84,7 +84,7 @@ //! # fn bind_read(io: T) { //! LengthDelimitedCodec::builder() //! .length_field_offset(0) // default value -//! .length_field_length(2) +//! .length_field_type::() //! .length_adjustment(0) // default value //! .num_skip(0) // Do not strip frame header //! .new_read(io); @@ -118,7 +118,7 @@ //! # fn bind_read(io: T) { //! LengthDelimitedCodec::builder() //! .length_field_offset(0) // default value -//! .length_field_length(2) +//! .length_field_type::() //! .length_adjustment(0) // default value //! // `num_skip` is not needed, the default is to skip //! .new_read(io); @@ -150,7 +150,7 @@ //! # fn bind_read(io: T) { //! LengthDelimitedCodec::builder() //! .length_field_offset(0) // default value -//! .length_field_length(2) +//! .length_field_type::() //! .length_adjustment(-2) // size of head //! .num_skip(0) //! .new_read(io); @@ -228,7 +228,7 @@ //! # fn bind_read(io: T) { //! LengthDelimitedCodec::builder() //! .length_field_offset(1) // length of hdr1 -//! .length_field_length(2) +//! .length_field_type::() //! .length_adjustment(1) // length of hdr2 //! .num_skip(3) // length of hdr1 + LEN //! .new_read(io); @@ -274,7 +274,7 @@ //! # fn bind_read(io: T) { //! LengthDelimitedCodec::builder() //! .length_field_offset(1) // length of hdr1 -//! .length_field_length(2) +//! .length_field_type::() //! .length_adjustment(-3) // length of hdr1 + LEN, negative //! .num_skip(3) //! .new_read(io); @@ -302,6 +302,37 @@ //! anywhere because it already is factored into the total frame length that //! is read from the byte stream. //! +//! ## Example 7 +//! +//! The following will parse a 3 byte length field at offset 0 in a 4 byte +//! frame head, excluding the 4th byte from the yielded `BytesMut`. +//! +//! ``` +//! # use tokio::io::AsyncRead; +//! # use tokio_util::codec::LengthDelimitedCodec; +//! # fn bind_read(io: T) { +//! LengthDelimitedCodec::builder() +//! .length_field_offset(0) // default value +//! .length_field_length(3) +//! .length_adjustment(0) // default value +//! .num_skip(4) // skip the first 4 bytes +//! .new_read(io); +//! # } +//! # pub fn main() {} +//! ``` +//! +//! The following frame will be decoded as such: +//! +//! ```text +//! INPUT DECODED +//! +------- len ------+--- Payload ---+ +--- Payload ---+ +//! | \x00\x00\x0B\xFF | Hello world | => | Hello world | +//! +------------------+---------------+ +---------------+ +//! ``` +//! +//! A simple example where there are unused bytes between the length field +//! and the payload. +//! //! # Encoding //! //! [`FramedWrite`] adapts an [`AsyncWrite`] into a `Sink` of [`BytesMut`], @@ -319,7 +350,7 @@ //! # fn write_frame(io: T) { //! # let _ = //! LengthDelimitedCodec::builder() -//! .length_field_length(2) +//! .length_field_type::() //! .new_write(io); //! # } //! # pub fn main() {} @@ -333,13 +364,13 @@ //! +------------+--------------+ //! ``` //! -//! [`LengthDelimitedCodec::new()`]: struct.LengthDelimitedCodec.html#method.new -//! [`FramedRead`]: struct.FramedRead.html -//! [`FramedWrite`]: struct.FramedWrite.html -//! [`AsyncRead`]: ../../trait.AsyncRead.html -//! [`AsyncWrite`]: ../../trait.AsyncWrite.html -//! [`Encoder`]: ../trait.Encoder.html -//! [`BytesMut`]: https://docs.rs/bytes/0.4/bytes/struct.BytesMut.html +//! [`LengthDelimitedCodec::new()`]: method@LengthDelimitedCodec::new +//! [`FramedRead`]: struct@FramedRead +//! [`FramedWrite`]: struct@FramedWrite +//! [`AsyncRead`]: trait@tokio::io::AsyncRead +//! [`AsyncWrite`]: trait@tokio::io::AsyncWrite +//! [`Encoder`]: trait@Encoder +//! [`BytesMut`]: bytes::BytesMut use crate::codec::{Decoder, Encoder, Framed, FramedRead, FramedWrite}; @@ -348,7 +379,7 @@ use tokio::io::{AsyncRead, AsyncWrite}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use std::error::Error as StdError; use std::io::{self, Cursor}; -use std::{cmp, fmt}; +use std::{cmp, fmt, mem}; /// Configure length delimited `LengthDelimitedCodec`s. /// @@ -390,7 +421,7 @@ pub struct LengthDelimitedCodecError { /// See [module level] documentation for more detail. /// /// [module level]: index.html -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct LengthDelimitedCodec { // Configuration values builder: Builder, @@ -455,7 +486,7 @@ impl LengthDelimitedCodec { // Skip the required bytes src.advance(self.builder.length_field_offset); - // match endianess + // match endianness let n = if self.builder.length_field_is_big_endian { src.get_uint(field_len) } else { @@ -504,14 +535,14 @@ impl LengthDelimitedCodec { Ok(Some(n)) } - fn decode_data(&self, n: usize, src: &mut BytesMut) -> io::Result> { + fn decode_data(&self, n: usize, src: &mut BytesMut) -> Option { // At this point, the buffer has already had the required capacity // reserved. All there is to do is read. if src.len() < n { - return Ok(None); + return None; } - Ok(Some(src.split_to(n))) + Some(src.split_to(n)) } } @@ -531,7 +562,7 @@ impl Decoder for LengthDelimitedCodec { DecodeState::Data(n) => n, }; - match self.decode_data(n, src)? { + match self.decode_data(n, src) { Some(data) => { // Update the decode state self.state = DecodeState::Head; @@ -598,6 +629,24 @@ impl Default for LengthDelimitedCodec { // ===== impl Builder ===== +mod builder { + /// Types that can be used with `Builder::length_field_type`. + pub trait LengthFieldType {} + + impl LengthFieldType for u8 {} + impl LengthFieldType for u16 {} + impl LengthFieldType for u32 {} + impl LengthFieldType for u64 {} + + #[cfg(any( + target_pointer_width = "8", + target_pointer_width = "16", + target_pointer_width = "32", + target_pointer_width = "64", + ))] + impl LengthFieldType for usize {} +} + impl Builder { /// Creates a new length delimited codec builder with default configuration /// values. @@ -611,7 +660,7 @@ impl Builder { /// # fn bind_read(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_offset(0) - /// .length_field_length(2) + /// .length_field_type::() /// .length_adjustment(0) /// .num_skip(0) /// .new_read(io); @@ -715,7 +764,7 @@ impl Builder { } } - /// Sets the max frame length + /// Sets the max frame length in bytes /// /// This configuration option applies to both encoding and decoding. The /// default value is 8MB. @@ -736,7 +785,7 @@ impl Builder { /// /// # fn bind_read(io: T) { /// LengthDelimitedCodec::builder() - /// .max_frame_length(8 * 1024) + /// .max_frame_length(8 * 1024 * 1024) /// .new_read(io); /// # } /// # pub fn main() {} @@ -746,6 +795,42 @@ impl Builder { self } + /// Sets the unsigned integer type used to represent the length field. + /// + /// The default type is [`u32`]. The max type is [`u64`] (or [`usize`] on + /// 64-bit targets). + /// + /// # Examples + /// + /// ``` + /// # use tokio::io::AsyncRead; + /// use tokio_util::codec::LengthDelimitedCodec; + /// + /// # fn bind_read(io: T) { + /// LengthDelimitedCodec::builder() + /// .length_field_type::() + /// .new_read(io); + /// # } + /// # pub fn main() {} + /// ``` + /// + /// Unlike [`Builder::length_field_length`], this does not fail at runtime + /// and instead produces a compile error: + /// + /// ```compile_fail + /// # use tokio::io::AsyncRead; + /// # use tokio_util::codec::LengthDelimitedCodec; + /// # fn bind_read(io: T) { + /// LengthDelimitedCodec::builder() + /// .length_field_type::() + /// .new_read(io); + /// # } + /// # pub fn main() {} + /// ``` + pub fn length_field_type(&mut self) -> &mut Self { + self.length_field_length(mem::size_of::()) + } + /// Sets the number of bytes used to represent the length field /// /// The default value is `4`. The max value is `8`. @@ -847,7 +932,7 @@ impl Builder { /// # pub fn main() { /// LengthDelimitedCodec::builder() /// .length_field_offset(0) - /// .length_field_length(2) + /// .length_field_type::() /// .length_adjustment(0) /// .num_skip(0) /// .new_codec(); @@ -871,7 +956,7 @@ impl Builder { /// # fn bind_read(io: T) { /// LengthDelimitedCodec::builder() /// .length_field_offset(0) - /// .length_field_length(2) + /// .length_field_type::() /// .length_adjustment(0) /// .num_skip(0) /// .new_read(io); @@ -894,7 +979,7 @@ impl Builder { /// # use tokio_util::codec::LengthDelimitedCodec; /// # fn write_frame(io: T) { /// LengthDelimitedCodec::builder() - /// .length_field_length(2) + /// .length_field_type::() /// .new_write(io); /// # } /// # pub fn main() {} @@ -916,7 +1001,7 @@ impl Builder { /// # fn write_frame(io: T) { /// # let _ = /// LengthDelimitedCodec::builder() - /// .length_field_length(2) + /// .length_field_type::() /// .new_framed(io); /// # } /// # pub fn main() {} diff --git a/third_party/rust/tokio-util/src/codec/lines_codec.rs b/third_party/rust/tokio-util/src/codec/lines_codec.rs index e1816d8c4e87..7a0a8f04541c 100644 --- a/third_party/rust/tokio-util/src/codec/lines_codec.rs +++ b/third_party/rust/tokio-util/src/codec/lines_codec.rs @@ -133,7 +133,7 @@ impl Decoder for LinesCodec { buf.advance(read_to); self.next_index = 0; if buf.is_empty() { - return Err(LinesCodecError::MaxLineLengthExceeded); + return Ok(None); } } (false, Some(offset)) => { @@ -203,12 +203,12 @@ impl Default for LinesCodec { } } -/// An error occured while encoding or decoding a line. +/// An error occurred while encoding or decoding a line. #[derive(Debug)] pub enum LinesCodecError { /// The maximum line length was exceeded. MaxLineLengthExceeded, - /// An IO error occured. + /// An IO error occurred. Io(io::Error), } diff --git a/third_party/rust/tokio-util/src/codec/mod.rs b/third_party/rust/tokio-util/src/codec/mod.rs index ec76a6419f01..2295176bdcea 100644 --- a/third_party/rust/tokio-util/src/codec/mod.rs +++ b/third_party/rust/tokio-util/src/codec/mod.rs @@ -1,13 +1,262 @@ -//! Utilities for encoding and decoding frames. +//! Adaptors from AsyncRead/AsyncWrite to Stream/Sink //! -//! Contains adapters to go from streams of bytes, [`AsyncRead`] and +//! Raw I/O objects work with byte sequences, but higher-level code usually +//! wants to batch these into meaningful chunks, called "frames". +//! +//! This module contains adapters to go from streams of bytes, [`AsyncRead`] and //! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`]. //! Framed streams are also known as transports. //! +//! # The Decoder trait +//! +//! A [`Decoder`] is used together with [`FramedRead`] or [`Framed`] to turn an +//! [`AsyncRead`] into a [`Stream`]. The job of the decoder trait is to specify +//! how sequences of bytes are turned into a sequence of frames, and to +//! determine where the boundaries between frames are. The job of the +//! `FramedRead` is to repeatedly switch between reading more data from the IO +//! resource, and asking the decoder whether we have received enough data to +//! decode another frame of data. +//! +//! The main method on the `Decoder` trait is the [`decode`] method. This method +//! takes as argument the data that has been read so far, and when it is called, +//! it will be in one of the following situations: +//! +//! 1. The buffer contains less than a full frame. +//! 2. The buffer contains exactly a full frame. +//! 3. The buffer contains more than a full frame. +//! +//! In the first situation, the decoder should return `Ok(None)`. +//! +//! In the second situation, the decoder should clear the provided buffer and +//! return `Ok(Some(the_decoded_frame))`. +//! +//! In the third situation, the decoder should use a method such as [`split_to`] +//! or [`advance`] to modify the buffer such that the frame is removed from the +//! buffer, but any data in the buffer after that frame should still remain in +//! the buffer. The decoder should also return `Ok(Some(the_decoded_frame))` in +//! this case. +//! +//! Finally the decoder may return an error if the data is invalid in some way. +//! The decoder should _not_ return an error just because it has yet to receive +//! a full frame. +//! +//! It is guaranteed that, from one call to `decode` to another, the provided +//! buffer will contain the exact same data as before, except that if more data +//! has arrived through the IO resource, that data will have been appended to +//! the buffer. This means that reading frames from a `FramedRead` is +//! essentially equivalent to the following loop: +//! +//! ```no_run +//! use tokio::io::AsyncReadExt; +//! # // This uses async_stream to create an example that compiles. +//! # fn foo() -> impl futures_core::Stream> { async_stream::try_stream! { +//! # use tokio_util::codec::Decoder; +//! # let mut decoder = tokio_util::codec::BytesCodec::new(); +//! # let io_resource = &mut &[0u8, 1, 2, 3][..]; +//! +//! let mut buf = bytes::BytesMut::new(); +//! loop { +//! // The read_buf call will append to buf rather than overwrite existing data. +//! let len = io_resource.read_buf(&mut buf).await?; +//! +//! if len == 0 { +//! while let Some(frame) = decoder.decode_eof(&mut buf)? { +//! yield frame; +//! } +//! break; +//! } +//! +//! while let Some(frame) = decoder.decode(&mut buf)? { +//! yield frame; +//! } +//! } +//! # }} +//! ``` +//! The example above uses `yield` whenever the `Stream` produces an item. +//! +//! ## Example decoder +//! +//! As an example, consider a protocol that can be used to send strings where +//! each frame is a four byte integer that contains the length of the frame, +//! followed by that many bytes of string data. The decoder fails with an error +//! if the string data is not valid utf-8 or too long. +//! +//! Such a decoder can be written like this: +//! ``` +//! use tokio_util::codec::Decoder; +//! use bytes::{BytesMut, Buf}; +//! +//! struct MyStringDecoder {} +//! +//! const MAX: usize = 8 * 1024 * 1024; +//! +//! impl Decoder for MyStringDecoder { +//! type Item = String; +//! type Error = std::io::Error; +//! +//! fn decode( +//! &mut self, +//! src: &mut BytesMut +//! ) -> Result, Self::Error> { +//! if src.len() < 4 { +//! // Not enough data to read length marker. +//! return Ok(None); +//! } +//! +//! // Read length marker. +//! let mut length_bytes = [0u8; 4]; +//! length_bytes.copy_from_slice(&src[..4]); +//! let length = u32::from_le_bytes(length_bytes) as usize; +//! +//! // Check that the length is not too large to avoid a denial of +//! // service attack where the server runs out of memory. +//! if length > MAX { +//! return Err(std::io::Error::new( +//! std::io::ErrorKind::InvalidData, +//! format!("Frame of length {} is too large.", length) +//! )); +//! } +//! +//! if src.len() < 4 + length { +//! // The full string has not yet arrived. +//! // +//! // We reserve more space in the buffer. This is not strictly +//! // necessary, but is a good idea performance-wise. +//! src.reserve(4 + length - src.len()); +//! +//! // We inform the Framed that we need more bytes to form the next +//! // frame. +//! return Ok(None); +//! } +//! +//! // Use advance to modify src such that it no longer contains +//! // this frame. +//! let data = src[4..4 + length].to_vec(); +//! src.advance(4 + length); +//! +//! // Convert the data to a string, or fail if it is not valid utf-8. +//! match String::from_utf8(data) { +//! Ok(string) => Ok(Some(string)), +//! Err(utf8_error) => { +//! Err(std::io::Error::new( +//! std::io::ErrorKind::InvalidData, +//! utf8_error.utf8_error(), +//! )) +//! }, +//! } +//! } +//! } +//! ``` +//! +//! # The Encoder trait +//! +//! An [`Encoder`] is used together with [`FramedWrite`] or [`Framed`] to turn +//! an [`AsyncWrite`] into a [`Sink`]. The job of the encoder trait is to +//! specify how frames are turned into a sequences of bytes. The job of the +//! `FramedWrite` is to take the resulting sequence of bytes and write it to the +//! IO resource. +//! +//! The main method on the `Encoder` trait is the [`encode`] method. This method +//! takes an item that is being written, and a buffer to write the item to. The +//! buffer may already contain data, and in this case, the encoder should append +//! the new frame the to buffer rather than overwrite the existing data. +//! +//! It is guaranteed that, from one call to `encode` to another, the provided +//! buffer will contain the exact same data as before, except that some of the +//! data may have been removed from the front of the buffer. Writing to a +//! `FramedWrite` is essentially equivalent to the following loop: +//! +//! ```no_run +//! use tokio::io::AsyncWriteExt; +//! use bytes::Buf; // for advance +//! # use tokio_util::codec::Encoder; +//! # async fn next_frame() -> bytes::Bytes { bytes::Bytes::new() } +//! # async fn no_more_frames() { } +//! # #[tokio::main] async fn main() -> std::io::Result<()> { +//! # let mut io_resource = tokio::io::sink(); +//! # let mut encoder = tokio_util::codec::BytesCodec::new(); +//! +//! const MAX: usize = 8192; +//! +//! let mut buf = bytes::BytesMut::new(); +//! loop { +//! tokio::select! { +//! num_written = io_resource.write(&buf), if !buf.is_empty() => { +//! buf.advance(num_written?); +//! }, +//! frame = next_frame(), if buf.len() < MAX => { +//! encoder.encode(frame, &mut buf)?; +//! }, +//! _ = no_more_frames() => { +//! io_resource.write_all(&buf).await?; +//! io_resource.shutdown().await?; +//! return Ok(()); +//! }, +//! } +//! } +//! # } +//! ``` +//! Here the `next_frame` method corresponds to any frames you write to the +//! `FramedWrite`. The `no_more_frames` method corresponds to closing the +//! `FramedWrite` with [`SinkExt::close`]. +//! +//! ## Example encoder +//! +//! As an example, consider a protocol that can be used to send strings where +//! each frame is a four byte integer that contains the length of the frame, +//! followed by that many bytes of string data. The encoder will fail if the +//! string is too long. +//! +//! Such an encoder can be written like this: +//! ``` +//! use tokio_util::codec::Encoder; +//! use bytes::BytesMut; +//! +//! struct MyStringEncoder {} +//! +//! const MAX: usize = 8 * 1024 * 1024; +//! +//! impl Encoder for MyStringEncoder { +//! type Error = std::io::Error; +//! +//! fn encode(&mut self, item: String, dst: &mut BytesMut) -> Result<(), Self::Error> { +//! // Don't send a string if it is longer than the other end will +//! // accept. +//! if item.len() > MAX { +//! return Err(std::io::Error::new( +//! std::io::ErrorKind::InvalidData, +//! format!("Frame of length {} is too large.", item.len()) +//! )); +//! } +//! +//! // Convert the length into a byte array. +//! // The cast to u32 cannot overflow due to the length check above. +//! let len_slice = u32::to_le_bytes(item.len() as u32); +//! +//! // Reserve space in the buffer. +//! dst.reserve(4 + item.len()); +//! +//! // Write the length and string to the buffer. +//! dst.extend_from_slice(&len_slice); +//! dst.extend_from_slice(item.as_bytes()); +//! Ok(()) +//! } +//! } +//! ``` +//! //! [`AsyncRead`]: tokio::io::AsyncRead //! [`AsyncWrite`]: tokio::io::AsyncWrite -//! [`Stream`]: tokio::stream::Stream +//! [`Stream`]: futures_core::Stream //! [`Sink`]: futures_sink::Sink +//! [`SinkExt::close`]: https://docs.rs/futures/0.3/futures/sink/trait.SinkExt.html#method.close +//! [`FramedRead`]: struct@crate::codec::FramedRead +//! [`FramedWrite`]: struct@crate::codec::FramedWrite +//! [`Framed`]: struct@crate::codec::Framed +//! [`Decoder`]: trait@crate::codec::Decoder +//! [`decode`]: fn@crate::codec::Decoder::decode +//! [`encode`]: fn@crate::codec::Encoder::encode +//! [`split_to`]: fn@bytes::BytesMut::split_to +//! [`advance`]: fn@bytes::Buf::advance mod bytes_codec; pub use self::bytes_codec::BytesCodec; @@ -18,6 +267,10 @@ pub use self::decoder::Decoder; mod encoder; pub use self::encoder::Encoder; +mod framed_impl; +#[allow(unused_imports)] +pub(crate) use self::framed_impl::{FramedImpl, RWFrames, ReadFrame, WriteFrame}; + mod framed; pub use self::framed::{Framed, FramedParts}; @@ -32,3 +285,6 @@ pub use self::length_delimited::{LengthDelimitedCodec, LengthDelimitedCodecError mod lines_codec; pub use self::lines_codec::{LinesCodec, LinesCodecError}; + +mod any_delimiter_codec; +pub use self::any_delimiter_codec::{AnyDelimiterCodec, AnyDelimiterCodecError}; diff --git a/third_party/rust/tokio-util/src/compat.rs b/third_party/rust/tokio-util/src/compat.rs index 769e30c2bb99..6a8802d96995 100644 --- a/third_party/rust/tokio-util/src/compat.rs +++ b/third_party/rust/tokio-util/src/compat.rs @@ -1,5 +1,6 @@ //! Compatibility between the `tokio::io` and `futures-io` versions of the //! `AsyncRead` and `AsyncWrite` traits. +use futures_core::ready; use pin_project_lite::pin_project; use std::io; use std::pin::Pin; @@ -12,6 +13,7 @@ pin_project! { pub struct Compat { #[pin] inner: T, + seek_pos: Option, } } @@ -19,7 +21,7 @@ pin_project! { /// `futures_io::AsyncRead` to implement `tokio::io::AsyncRead`. pub trait FuturesAsyncReadCompatExt: futures_io::AsyncRead { /// Wraps `self` with a compatibility layer that implements - /// `tokio_io::AsyncWrite`. + /// `tokio_io::AsyncRead`. fn compat(self) -> Compat where Self: Sized, @@ -47,7 +49,7 @@ impl FuturesAsyncWriteCompatExt for T {} /// Extension trait that allows converting a type implementing /// `tokio::io::AsyncRead` to implement `futures_io::AsyncRead`. -pub trait Tokio02AsyncReadCompatExt: tokio::io::AsyncRead { +pub trait TokioAsyncReadCompatExt: tokio::io::AsyncRead { /// Wraps `self` with a compatibility layer that implements /// `futures_io::AsyncRead`. fn compat(self) -> Compat @@ -58,11 +60,11 @@ pub trait Tokio02AsyncReadCompatExt: tokio::io::AsyncRead { } } -impl Tokio02AsyncReadCompatExt for T {} +impl TokioAsyncReadCompatExt for T {} /// Extension trait that allows converting a type implementing /// `tokio::io::AsyncWrite` to implement `futures_io::AsyncWrite`. -pub trait Tokio02AsyncWriteCompatExt: tokio::io::AsyncWrite { +pub trait TokioAsyncWriteCompatExt: tokio::io::AsyncWrite { /// Wraps `self` with a compatibility layer that implements /// `futures_io::AsyncWrite`. fn compat_write(self) -> Compat @@ -73,13 +75,16 @@ pub trait Tokio02AsyncWriteCompatExt: tokio::io::AsyncWrite { } } -impl Tokio02AsyncWriteCompatExt for T {} +impl TokioAsyncWriteCompatExt for T {} // === impl Compat === impl Compat { fn new(inner: T) -> Self { - Self { inner } + Self { + inner, + seek_pos: None, + } } /// Get a reference to the `Future`, `Stream`, `AsyncRead`, or `AsyncWrite` object @@ -107,9 +112,18 @@ where fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - futures_io::AsyncRead::poll_read(self.project().inner, cx, buf) + buf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + // We can't trust the inner type to not peak at the bytes, + // so we must defensively initialize the buffer. + let slice = buf.initialize_unfilled(); + let n = ready!(futures_io::AsyncRead::poll_read( + self.project().inner, + cx, + slice + ))?; + buf.advance(n); + Poll::Ready(Ok(())) } } @@ -120,9 +134,15 @@ where fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], + slice: &mut [u8], ) -> Poll> { - tokio::io::AsyncRead::poll_read(self.project().inner, cx, buf) + let mut buf = tokio::io::ReadBuf::new(slice); + ready!(tokio::io::AsyncRead::poll_read( + self.project().inner, + cx, + &mut buf + ))?; + Poll::Ready(Ok(buf.filled().len())) } } @@ -199,3 +219,56 @@ where tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx) } } + +impl futures_io::AsyncSeek for Compat { + fn poll_seek( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: io::SeekFrom, + ) -> Poll> { + if self.seek_pos != Some(pos) { + self.as_mut().project().inner.start_seek(pos)?; + *self.as_mut().project().seek_pos = Some(pos); + } + let res = ready!(self.as_mut().project().inner.poll_complete(cx)); + *self.as_mut().project().seek_pos = None; + Poll::Ready(res.map(|p| p as u64)) + } +} + +impl tokio::io::AsyncSeek for Compat { + fn start_seek(mut self: Pin<&mut Self>, pos: io::SeekFrom) -> io::Result<()> { + *self.as_mut().project().seek_pos = Some(pos); + Ok(()) + } + + fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let pos = match self.seek_pos { + None => { + // tokio 1.x AsyncSeek recommends calling poll_complete before start_seek. + // We don't have to guarantee that the value returned by + // poll_complete called without start_seek is correct, + // so we'll return 0. + return Poll::Ready(Ok(0)); + } + Some(pos) => pos, + }; + let res = ready!(self.as_mut().project().inner.poll_seek(cx, pos)); + *self.as_mut().project().seek_pos = None; + Poll::Ready(res.map(|p| p as u64)) + } +} + +#[cfg(unix)] +impl std::os::unix::io::AsRawFd for Compat { + fn as_raw_fd(&self) -> std::os::unix::io::RawFd { + self.inner.as_raw_fd() + } +} + +#[cfg(windows)] +impl std::os::windows::io::AsRawHandle for Compat { + fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { + self.inner.as_raw_handle() + } +} diff --git a/third_party/rust/tokio-util/src/context.rs b/third_party/rust/tokio-util/src/context.rs new file mode 100644 index 000000000000..a7a5e0294900 --- /dev/null +++ b/third_party/rust/tokio-util/src/context.rs @@ -0,0 +1,190 @@ +//! Tokio context aware futures utilities. +//! +//! This module includes utilities around integrating tokio with other runtimes +//! by allowing the context to be attached to futures. This allows spawning +//! futures on other executors while still using tokio to drive them. This +//! can be useful if you need to use a tokio based library in an executor/runtime +//! that does not provide a tokio context. + +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tokio::runtime::{Handle, Runtime}; + +pin_project! { + /// `TokioContext` allows running futures that must be inside Tokio's + /// context on a non-Tokio runtime. + /// + /// It contains a [`Handle`] to the runtime. A handle to the runtime can be + /// obtain by calling the [`Runtime::handle()`] method. + /// + /// Note that the `TokioContext` wrapper only works if the `Runtime` it is + /// connected to has not yet been destroyed. You must keep the `Runtime` + /// alive until the future has finished executing. + /// + /// **Warning:** If `TokioContext` is used together with a [current thread] + /// runtime, that runtime must be inside a call to `block_on` for the + /// wrapped future to work. For this reason, it is recommended to use a + /// [multi thread] runtime, even if you configure it to only spawn one + /// worker thread. + /// + /// # Examples + /// + /// This example creates two runtimes, but only [enables time] on one of + /// them. It then uses the context of the runtime with the timer enabled to + /// execute a [`sleep`] future on the runtime with timing disabled. + /// ``` + /// use tokio::time::{sleep, Duration}; + /// use tokio_util::context::RuntimeExt; + /// + /// // This runtime has timers enabled. + /// let rt = tokio::runtime::Builder::new_multi_thread() + /// .enable_all() + /// .build() + /// .unwrap(); + /// + /// // This runtime has timers disabled. + /// let rt2 = tokio::runtime::Builder::new_multi_thread() + /// .build() + /// .unwrap(); + /// + /// // Wrap the sleep future in the context of rt. + /// let fut = rt.wrap(async { sleep(Duration::from_millis(2)).await }); + /// + /// // Execute the future on rt2. + /// rt2.block_on(fut); + /// ``` + /// + /// [`Handle`]: struct@tokio::runtime::Handle + /// [`Runtime::handle()`]: fn@tokio::runtime::Runtime::handle + /// [`RuntimeExt`]: trait@crate::context::RuntimeExt + /// [`new_static`]: fn@Self::new_static + /// [`sleep`]: fn@tokio::time::sleep + /// [current thread]: fn@tokio::runtime::Builder::new_current_thread + /// [enables time]: fn@tokio::runtime::Builder::enable_time + /// [multi thread]: fn@tokio::runtime::Builder::new_multi_thread + pub struct TokioContext { + #[pin] + inner: F, + handle: Handle, + } +} + +impl TokioContext { + /// Associate the provided future with the context of the runtime behind + /// the provided `Handle`. + /// + /// This constructor uses a `'static` lifetime to opt-out of checking that + /// the runtime still exists. + /// + /// # Examples + /// + /// This is the same as the example above, but uses the `new` constructor + /// rather than [`RuntimeExt::wrap`]. + /// + /// [`RuntimeExt::wrap`]: fn@RuntimeExt::wrap + /// + /// ``` + /// use tokio::time::{sleep, Duration}; + /// use tokio_util::context::TokioContext; + /// + /// // This runtime has timers enabled. + /// let rt = tokio::runtime::Builder::new_multi_thread() + /// .enable_all() + /// .build() + /// .unwrap(); + /// + /// // This runtime has timers disabled. + /// let rt2 = tokio::runtime::Builder::new_multi_thread() + /// .build() + /// .unwrap(); + /// + /// let fut = TokioContext::new( + /// async { sleep(Duration::from_millis(2)).await }, + /// rt.handle().clone(), + /// ); + /// + /// // Execute the future on rt2. + /// rt2.block_on(fut); + /// ``` + pub fn new(future: F, handle: Handle) -> TokioContext { + TokioContext { + inner: future, + handle, + } + } + + /// Obtain a reference to the handle inside this `TokioContext`. + pub fn handle(&self) -> &Handle { + &self.handle + } + + /// Remove the association between the Tokio runtime and the wrapped future. + pub fn into_inner(self) -> F { + self.inner + } +} + +impl Future for TokioContext { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let me = self.project(); + let handle = me.handle; + let fut = me.inner; + + let _enter = handle.enter(); + fut.poll(cx) + } +} + +/// Extension trait that simplifies bundling a `Handle` with a `Future`. +pub trait RuntimeExt { + /// Create a [`TokioContext`] that wraps the provided future and runs it in + /// this runtime's context. + /// + /// # Examples + /// + /// This example creates two runtimes, but only [enables time] on one of + /// them. It then uses the context of the runtime with the timer enabled to + /// execute a [`sleep`] future on the runtime with timing disabled. + /// + /// ``` + /// use tokio::time::{sleep, Duration}; + /// use tokio_util::context::RuntimeExt; + /// + /// // This runtime has timers enabled. + /// let rt = tokio::runtime::Builder::new_multi_thread() + /// .enable_all() + /// .build() + /// .unwrap(); + /// + /// // This runtime has timers disabled. + /// let rt2 = tokio::runtime::Builder::new_multi_thread() + /// .build() + /// .unwrap(); + /// + /// // Wrap the sleep future in the context of rt. + /// let fut = rt.wrap(async { sleep(Duration::from_millis(2)).await }); + /// + /// // Execute the future on rt2. + /// rt2.block_on(fut); + /// ``` + /// + /// [`TokioContext`]: struct@crate::context::TokioContext + /// [`sleep`]: fn@tokio::time::sleep + /// [enables time]: fn@tokio::runtime::Builder::enable_time + fn wrap(&self, fut: F) -> TokioContext; +} + +impl RuntimeExt for Runtime { + fn wrap(&self, fut: F) -> TokioContext { + TokioContext { + inner: fut, + handle: self.handle().clone(), + } + } +} diff --git a/third_party/rust/tokio-util/src/either.rs b/third_party/rust/tokio-util/src/either.rs new file mode 100644 index 000000000000..9225e53ca686 --- /dev/null +++ b/third_party/rust/tokio-util/src/either.rs @@ -0,0 +1,188 @@ +//! Module defining an Either type. +use std::{ + future::Future, + io::SeekFrom, + pin::Pin, + task::{Context, Poll}, +}; +use tokio::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf, Result}; + +/// Combines two different futures, streams, or sinks having the same associated types into a single type. +/// +/// This type implements common asynchronous traits such as [`Future`] and those in Tokio. +/// +/// [`Future`]: std::future::Future +/// +/// # Example +/// +/// The following code will not work: +/// +/// ```compile_fail +/// # fn some_condition() -> bool { true } +/// # async fn some_async_function() -> u32 { 10 } +/// # async fn other_async_function() -> u32 { 20 } +/// #[tokio::main] +/// async fn main() { +/// let result = if some_condition() { +/// some_async_function() +/// } else { +/// other_async_function() // <- Will print: "`if` and `else` have incompatible types" +/// }; +/// +/// println!("Result is {}", result.await); +/// } +/// ``` +/// +// This is because although the output types for both futures is the same, the exact future +// types are different, but the compiler must be able to choose a single type for the +// `result` variable. +/// +/// When the output type is the same, we can wrap each future in `Either` to avoid the +/// issue: +/// +/// ``` +/// use tokio_util::either::Either; +/// # fn some_condition() -> bool { true } +/// # async fn some_async_function() -> u32 { 10 } +/// # async fn other_async_function() -> u32 { 20 } +/// +/// #[tokio::main] +/// async fn main() { +/// let result = if some_condition() { +/// Either::Left(some_async_function()) +/// } else { +/// Either::Right(other_async_function()) +/// }; +/// +/// let value = result.await; +/// println!("Result is {}", value); +/// # assert_eq!(value, 10); +/// } +/// ``` +#[allow(missing_docs)] // Doc-comments for variants in this particular case don't make much sense. +#[derive(Debug, Clone)] +pub enum Either { + Left(L), + Right(R), +} + +/// A small helper macro which reduces amount of boilerplate in the actual trait method implementation. +/// It takes an invocation of method as an argument (e.g. `self.poll(cx)`), and redirects it to either +/// enum variant held in `self`. +macro_rules! delegate_call { + ($self:ident.$method:ident($($args:ident),+)) => { + unsafe { + match $self.get_unchecked_mut() { + Self::Left(l) => Pin::new_unchecked(l).$method($($args),+), + Self::Right(r) => Pin::new_unchecked(r).$method($($args),+), + } + } + } +} + +impl Future for Either +where + L: Future, + R: Future, +{ + type Output = O; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + delegate_call!(self.poll(cx)) + } +} + +impl AsyncRead for Either +where + L: AsyncRead, + R: AsyncRead, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + delegate_call!(self.poll_read(cx, buf)) + } +} + +impl AsyncBufRead for Either +where + L: AsyncBufRead, + R: AsyncBufRead, +{ + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + delegate_call!(self.poll_fill_buf(cx)) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + delegate_call!(self.consume(amt)) + } +} + +impl AsyncSeek for Either +where + L: AsyncSeek, + R: AsyncSeek, +{ + fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> Result<()> { + delegate_call!(self.start_seek(position)) + } + + fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + delegate_call!(self.poll_complete(cx)) + } +} + +impl AsyncWrite for Either +where + L: AsyncWrite, + R: AsyncWrite, +{ + fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + delegate_call!(self.poll_write(cx, buf)) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + delegate_call!(self.poll_flush(cx)) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + delegate_call!(self.poll_shutdown(cx)) + } +} + +impl futures_core::stream::Stream for Either +where + L: futures_core::stream::Stream, + R: futures_core::stream::Stream, +{ + type Item = L::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + delegate_call!(self.poll_next(cx)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::io::{repeat, AsyncReadExt, Repeat}; + use tokio_stream::{once, Once, StreamExt}; + + #[tokio::test] + async fn either_is_stream() { + let mut either: Either, Once> = Either::Left(once(1)); + + assert_eq!(Some(1u32), either.next().await); + } + + #[tokio::test] + async fn either_is_async_read() { + let mut buffer = [0; 3]; + let mut either: Either = Either::Right(repeat(0b101)); + + either.read_exact(&mut buffer).await.unwrap(); + assert_eq!(buffer, [0b101, 0b101, 0b101]); + } +} diff --git a/third_party/rust/tokio-util/src/io/mod.rs b/third_party/rust/tokio-util/src/io/mod.rs new file mode 100644 index 000000000000..eb48a21fb982 --- /dev/null +++ b/third_party/rust/tokio-util/src/io/mod.rs @@ -0,0 +1,24 @@ +//! Helpers for IO related tasks. +//! +//! The stream types are often used in combination with hyper or reqwest, as they +//! allow converting between a hyper [`Body`] and [`AsyncRead`]. +//! +//! The [`SyncIoBridge`] type converts from the world of async I/O +//! to synchronous I/O; this may often come up when using synchronous APIs +//! inside [`tokio::task::spawn_blocking`]. +//! +//! [`Body`]: https://docs.rs/hyper/0.13/hyper/struct.Body.html +//! [`AsyncRead`]: tokio::io::AsyncRead + +mod read_buf; +mod reader_stream; +mod stream_reader; +cfg_io_util! { + mod sync_bridge; + pub use self::sync_bridge::SyncIoBridge; +} + +pub use self::read_buf::read_buf; +pub use self::reader_stream::ReaderStream; +pub use self::stream_reader::StreamReader; +pub use crate::util::{poll_read_buf, poll_write_buf}; diff --git a/third_party/rust/tokio-util/src/io/read_buf.rs b/third_party/rust/tokio-util/src/io/read_buf.rs new file mode 100644 index 000000000000..d7938a3bc165 --- /dev/null +++ b/third_party/rust/tokio-util/src/io/read_buf.rs @@ -0,0 +1,65 @@ +use bytes::BufMut; +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::AsyncRead; + +/// Read data from an `AsyncRead` into an implementer of the [`BufMut`] trait. +/// +/// [`BufMut`]: bytes::BufMut +/// +/// # Example +/// +/// ``` +/// use bytes::{Bytes, BytesMut}; +/// use tokio_stream as stream; +/// use tokio::io::Result; +/// use tokio_util::io::{StreamReader, read_buf}; +/// # #[tokio::main] +/// # async fn main() -> std::io::Result<()> { +/// +/// // Create a reader from an iterator. This particular reader will always be +/// // ready. +/// let mut read = StreamReader::new(stream::iter(vec![Result::Ok(Bytes::from_static(&[0, 1, 2, 3]))])); +/// +/// let mut buf = BytesMut::new(); +/// let mut reads = 0; +/// +/// loop { +/// reads += 1; +/// let n = read_buf(&mut read, &mut buf).await?; +/// +/// if n == 0 { +/// break; +/// } +/// } +/// +/// // one or more reads might be necessary. +/// assert!(reads >= 1); +/// assert_eq!(&buf[..], &[0, 1, 2, 3]); +/// # Ok(()) +/// # } +/// ``` +pub async fn read_buf(read: &mut R, buf: &mut B) -> io::Result +where + R: AsyncRead + Unpin, + B: BufMut, +{ + return ReadBufFn(read, buf).await; + + struct ReadBufFn<'a, R, B>(&'a mut R, &'a mut B); + + impl<'a, R, B> Future for ReadBufFn<'a, R, B> + where + R: AsyncRead + Unpin, + B: BufMut, + { + type Output = io::Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = &mut *self; + crate::util::poll_read_buf(Pin::new(this.0), cx, this.1) + } + } +} diff --git a/third_party/rust/tokio-util/src/io/reader_stream.rs b/third_party/rust/tokio-util/src/io/reader_stream.rs new file mode 100644 index 000000000000..866c11408d5d --- /dev/null +++ b/third_party/rust/tokio-util/src/io/reader_stream.rs @@ -0,0 +1,118 @@ +use bytes::{Bytes, BytesMut}; +use futures_core::stream::Stream; +use pin_project_lite::pin_project; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::AsyncRead; + +const DEFAULT_CAPACITY: usize = 4096; + +pin_project! { + /// Convert an [`AsyncRead`] into a [`Stream`] of byte chunks. + /// + /// This stream is fused. It performs the inverse operation of + /// [`StreamReader`]. + /// + /// # Example + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() -> std::io::Result<()> { + /// use tokio_stream::StreamExt; + /// use tokio_util::io::ReaderStream; + /// + /// // Create a stream of data. + /// let data = b"hello, world!"; + /// let mut stream = ReaderStream::new(&data[..]); + /// + /// // Read all of the chunks into a vector. + /// let mut stream_contents = Vec::new(); + /// while let Some(chunk) = stream.next().await { + /// stream_contents.extend_from_slice(&chunk?); + /// } + /// + /// // Once the chunks are concatenated, we should have the + /// // original data. + /// assert_eq!(stream_contents, data); + /// # Ok(()) + /// # } + /// ``` + /// + /// [`AsyncRead`]: tokio::io::AsyncRead + /// [`StreamReader`]: crate::io::StreamReader + /// [`Stream`]: futures_core::Stream + #[derive(Debug)] + pub struct ReaderStream { + // Reader itself. + // + // This value is `None` if the stream has terminated. + #[pin] + reader: Option, + // Working buffer, used to optimize allocations. + buf: BytesMut, + capacity: usize, + } +} + +impl ReaderStream { + /// Convert an [`AsyncRead`] into a [`Stream`] with item type + /// `Result`. + /// + /// [`AsyncRead`]: tokio::io::AsyncRead + /// [`Stream`]: futures_core::Stream + pub fn new(reader: R) -> Self { + ReaderStream { + reader: Some(reader), + buf: BytesMut::new(), + capacity: DEFAULT_CAPACITY, + } + } + + /// Convert an [`AsyncRead`] into a [`Stream`] with item type + /// `Result`, + /// with a specific read buffer initial capacity. + /// + /// [`AsyncRead`]: tokio::io::AsyncRead + /// [`Stream`]: futures_core::Stream + pub fn with_capacity(reader: R, capacity: usize) -> Self { + ReaderStream { + reader: Some(reader), + buf: BytesMut::with_capacity(capacity), + capacity, + } + } +} + +impl Stream for ReaderStream { + type Item = std::io::Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + use crate::util::poll_read_buf; + + let mut this = self.as_mut().project(); + + let reader = match this.reader.as_pin_mut() { + Some(r) => r, + None => return Poll::Ready(None), + }; + + if this.buf.capacity() == 0 { + this.buf.reserve(*this.capacity); + } + + match poll_read_buf(reader, cx, &mut this.buf) { + Poll::Pending => Poll::Pending, + Poll::Ready(Err(err)) => { + self.project().reader.set(None); + Poll::Ready(Some(Err(err))) + } + Poll::Ready(Ok(0)) => { + self.project().reader.set(None); + Poll::Ready(None) + } + Poll::Ready(Ok(_)) => { + let chunk = this.buf.split(); + Poll::Ready(Some(Ok(chunk.freeze()))) + } + } + } +} diff --git a/third_party/rust/tokio-util/src/io/stream_reader.rs b/third_party/rust/tokio-util/src/io/stream_reader.rs new file mode 100644 index 000000000000..05ae88655739 --- /dev/null +++ b/third_party/rust/tokio-util/src/io/stream_reader.rs @@ -0,0 +1,203 @@ +use bytes::Buf; +use futures_core::stream::Stream; +use pin_project_lite::pin_project; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncBufRead, AsyncRead, ReadBuf}; + +pin_project! { + /// Convert a [`Stream`] of byte chunks into an [`AsyncRead`]. + /// + /// This type performs the inverse operation of [`ReaderStream`]. + /// + /// # Example + /// + /// ``` + /// use bytes::Bytes; + /// use tokio::io::{AsyncReadExt, Result}; + /// use tokio_util::io::StreamReader; + /// # #[tokio::main] + /// # async fn main() -> std::io::Result<()> { + /// + /// // Create a stream from an iterator. + /// let stream = tokio_stream::iter(vec![ + /// Result::Ok(Bytes::from_static(&[0, 1, 2, 3])), + /// Result::Ok(Bytes::from_static(&[4, 5, 6, 7])), + /// Result::Ok(Bytes::from_static(&[8, 9, 10, 11])), + /// ]); + /// + /// // Convert it to an AsyncRead. + /// let mut read = StreamReader::new(stream); + /// + /// // Read five bytes from the stream. + /// let mut buf = [0; 5]; + /// read.read_exact(&mut buf).await?; + /// assert_eq!(buf, [0, 1, 2, 3, 4]); + /// + /// // Read the rest of the current chunk. + /// assert_eq!(read.read(&mut buf).await?, 3); + /// assert_eq!(&buf[..3], [5, 6, 7]); + /// + /// // Read the next chunk. + /// assert_eq!(read.read(&mut buf).await?, 4); + /// assert_eq!(&buf[..4], [8, 9, 10, 11]); + /// + /// // We have now reached the end. + /// assert_eq!(read.read(&mut buf).await?, 0); + /// + /// # Ok(()) + /// # } + /// ``` + /// + /// [`AsyncRead`]: tokio::io::AsyncRead + /// [`Stream`]: futures_core::Stream + /// [`ReaderStream`]: crate::io::ReaderStream + #[derive(Debug)] + pub struct StreamReader { + #[pin] + inner: S, + chunk: Option, + } +} + +impl StreamReader +where + S: Stream>, + B: Buf, + E: Into, +{ + /// Convert a stream of byte chunks into an [`AsyncRead`](tokio::io::AsyncRead). + /// + /// The item should be a [`Result`] with the ok variant being something that + /// implements the [`Buf`] trait (e.g. `Vec` or `Bytes`). The error + /// should be convertible into an [io error]. + /// + /// [`Result`]: std::result::Result + /// [`Buf`]: bytes::Buf + /// [io error]: std::io::Error + pub fn new(stream: S) -> Self { + Self { + inner: stream, + chunk: None, + } + } + + /// Do we have a chunk and is it non-empty? + fn has_chunk(&self) -> bool { + if let Some(ref chunk) = self.chunk { + chunk.remaining() > 0 + } else { + false + } + } + + /// Consumes this `StreamReader`, returning a Tuple consisting + /// of the underlying stream and an Option of the interal buffer, + /// which is Some in case the buffer contains elements. + pub fn into_inner_with_chunk(self) -> (S, Option) { + if self.has_chunk() { + (self.inner, self.chunk) + } else { + (self.inner, None) + } + } +} + +impl StreamReader { + /// Gets a reference to the underlying stream. + /// + /// It is inadvisable to directly read from the underlying stream. + pub fn get_ref(&self) -> &S { + &self.inner + } + + /// Gets a mutable reference to the underlying stream. + /// + /// It is inadvisable to directly read from the underlying stream. + pub fn get_mut(&mut self) -> &mut S { + &mut self.inner + } + + /// Gets a pinned mutable reference to the underlying stream. + /// + /// It is inadvisable to directly read from the underlying stream. + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut S> { + self.project().inner + } + + /// Consumes this `BufWriter`, returning the underlying stream. + /// + /// Note that any leftover data in the internal buffer is lost. + /// If you additionally want access to the internal buffer use + /// [`into_inner_with_chunk`]. + /// + /// [`into_inner_with_chunk`]: crate::io::StreamReader::into_inner_with_chunk + pub fn into_inner(self) -> S { + self.inner + } +} + +impl AsyncRead for StreamReader +where + S: Stream>, + B: Buf, + E: Into, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + if buf.remaining() == 0 { + return Poll::Ready(Ok(())); + } + + let inner_buf = match self.as_mut().poll_fill_buf(cx) { + Poll::Ready(Ok(buf)) => buf, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Pending => return Poll::Pending, + }; + let len = std::cmp::min(inner_buf.len(), buf.remaining()); + buf.put_slice(&inner_buf[..len]); + + self.consume(len); + Poll::Ready(Ok(())) + } +} + +impl AsyncBufRead for StreamReader +where + S: Stream>, + B: Buf, + E: Into, +{ + fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + if self.as_mut().has_chunk() { + // This unwrap is very sad, but it can't be avoided. + let buf = self.project().chunk.as_ref().unwrap().chunk(); + return Poll::Ready(Ok(buf)); + } else { + match self.as_mut().project().inner.poll_next(cx) { + Poll::Ready(Some(Ok(chunk))) => { + // Go around the loop in case the chunk is empty. + *self.as_mut().project().chunk = Some(chunk); + } + Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())), + Poll::Ready(None) => return Poll::Ready(Ok(&[])), + Poll::Pending => return Poll::Pending, + } + } + } + } + fn consume(self: Pin<&mut Self>, amt: usize) { + if amt > 0 { + self.project() + .chunk + .as_mut() + .expect("No chunk present") + .advance(amt); + } + } +} diff --git a/third_party/rust/tokio-util/src/io/sync_bridge.rs b/third_party/rust/tokio-util/src/io/sync_bridge.rs new file mode 100644 index 000000000000..9be9446a7de5 --- /dev/null +++ b/third_party/rust/tokio-util/src/io/sync_bridge.rs @@ -0,0 +1,103 @@ +use std::io::{Read, Write}; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; + +/// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or +/// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. +#[derive(Debug)] +pub struct SyncIoBridge { + src: T, + rt: tokio::runtime::Handle, +} + +impl Read for SyncIoBridge { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + let src = &mut self.src; + self.rt.block_on(AsyncReadExt::read(src, buf)) + } + + fn read_to_end(&mut self, buf: &mut Vec) -> std::io::Result { + let src = &mut self.src; + self.rt.block_on(src.read_to_end(buf)) + } + + fn read_to_string(&mut self, buf: &mut String) -> std::io::Result { + let src = &mut self.src; + self.rt.block_on(src.read_to_string(buf)) + } + + fn read_exact(&mut self, buf: &mut [u8]) -> std::io::Result<()> { + let src = &mut self.src; + // The AsyncRead trait returns the count, synchronous doesn't. + let _n = self.rt.block_on(src.read_exact(buf))?; + Ok(()) + } +} + +impl Write for SyncIoBridge { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + let src = &mut self.src; + self.rt.block_on(src.write(buf)) + } + + fn flush(&mut self) -> std::io::Result<()> { + let src = &mut self.src; + self.rt.block_on(src.flush()) + } + + fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> { + let src = &mut self.src; + self.rt.block_on(src.write_all(buf)) + } + + fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result { + let src = &mut self.src; + self.rt.block_on(src.write_vectored(bufs)) + } +} + +// Because https://doc.rust-lang.org/std/io/trait.Write.html#method.is_write_vectored is at the time +// of this writing still unstable, we expose this as part of a standalone method. +impl SyncIoBridge { + /// Determines if the underlying [`tokio::io::AsyncWrite`] target supports efficient vectored writes. + /// + /// See [`tokio::io::AsyncWrite::is_write_vectored`]. + pub fn is_write_vectored(&self) -> bool { + self.src.is_write_vectored() + } +} + +impl SyncIoBridge { + /// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or + /// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. + /// + /// When this struct is created, it captures a handle to the current thread's runtime with [`tokio::runtime::Handle::current`]. + /// It is hence OK to move this struct into a separate thread outside the runtime, as created + /// by e.g. [`tokio::task::spawn_blocking`]. + /// + /// Stated even more strongly: to make use of this bridge, you *must* move + /// it into a separate thread outside the runtime. The synchronous I/O will use the + /// underlying handle to block on the backing asynchronous source, via + /// [`tokio::runtime::Handle::block_on`]. As noted in the documentation for that + /// function, an attempt to `block_on` from an asynchronous execution context + /// will panic. + /// + /// # Wrapping `!Unpin` types + /// + /// Use e.g. `SyncIoBridge::new(Box::pin(src))`. + /// + /// # Panic + /// + /// This will panic if called outside the context of a Tokio runtime. + pub fn new(src: T) -> Self { + Self::new_with_handle(src, tokio::runtime::Handle::current()) + } + + /// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or + /// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. + /// + /// This is the same as [`SyncIoBridge::new`], but allows passing an arbitrary handle and hence may + /// be initially invoked outside of an asynchronous context. + pub fn new_with_handle(src: T, rt: tokio::runtime::Handle) -> Self { + Self { src, rt } + } +} diff --git a/third_party/rust/tokio-util/src/lib.rs b/third_party/rust/tokio-util/src/lib.rs index 260c9485c266..fd14a8ac9475 100644 --- a/third_party/rust/tokio-util/src/lib.rs +++ b/third_party/rust/tokio-util/src/lib.rs @@ -1,4 +1,3 @@ -#![doc(html_root_url = "https://docs.rs/tokio-util/0.3.1")] #![allow(clippy::needless_doctest_main)] #![warn( missing_debug_implementations, @@ -6,7 +5,6 @@ rust_2018_idioms, unreachable_pub )] -#![deny(intra_doc_link_resolution_failure)] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) @@ -24,14 +22,180 @@ #[macro_use] mod cfg; +mod loom; + cfg_codec! { pub mod codec; } -cfg_udp! { +cfg_net! { pub mod udp; + pub mod net; } cfg_compat! { pub mod compat; } + +cfg_io! { + pub mod io; +} + +cfg_rt! { + pub mod context; + pub mod task; +} + +cfg_time! { + pub mod time; +} + +pub mod sync; + +pub mod either; + +#[cfg(any(feature = "io", feature = "codec"))] +mod util { + use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; + + use bytes::{Buf, BufMut}; + use futures_core::ready; + use std::io::{self, IoSlice}; + use std::mem::MaybeUninit; + use std::pin::Pin; + use std::task::{Context, Poll}; + + /// Try to read data from an `AsyncRead` into an implementer of the [`BufMut`] trait. + /// + /// [`BufMut`]: bytes::Buf + /// + /// # Example + /// + /// ``` + /// use bytes::{Bytes, BytesMut}; + /// use tokio_stream as stream; + /// use tokio::io::Result; + /// use tokio_util::io::{StreamReader, poll_read_buf}; + /// use futures::future::poll_fn; + /// use std::pin::Pin; + /// # #[tokio::main] + /// # async fn main() -> std::io::Result<()> { + /// + /// // Create a reader from an iterator. This particular reader will always be + /// // ready. + /// let mut read = StreamReader::new(stream::iter(vec![Result::Ok(Bytes::from_static(&[0, 1, 2, 3]))])); + /// + /// let mut buf = BytesMut::new(); + /// let mut reads = 0; + /// + /// loop { + /// reads += 1; + /// let n = poll_fn(|cx| poll_read_buf(Pin::new(&mut read), cx, &mut buf)).await?; + /// + /// if n == 0 { + /// break; + /// } + /// } + /// + /// // one or more reads might be necessary. + /// assert!(reads >= 1); + /// assert_eq!(&buf[..], &[0, 1, 2, 3]); + /// # Ok(()) + /// # } + /// ``` + #[cfg_attr(not(feature = "io"), allow(unreachable_pub))] + pub fn poll_read_buf( + io: Pin<&mut T>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> { + if !buf.has_remaining_mut() { + return Poll::Ready(Ok(0)); + } + + let n = { + let dst = buf.chunk_mut(); + let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit]) }; + let mut buf = ReadBuf::uninit(dst); + let ptr = buf.filled().as_ptr(); + ready!(io.poll_read(cx, &mut buf)?); + + // Ensure the pointer does not change from under us + assert_eq!(ptr, buf.filled().as_ptr()); + buf.filled().len() + }; + + // Safety: This is guaranteed to be the number of initialized (and read) + // bytes due to the invariants provided by `ReadBuf::filled`. + unsafe { + buf.advance_mut(n); + } + + Poll::Ready(Ok(n)) + } + + /// Try to write data from an implementer of the [`Buf`] trait to an + /// [`AsyncWrite`], advancing the buffer's internal cursor. + /// + /// This function will use [vectored writes] when the [`AsyncWrite`] supports + /// vectored writes. + /// + /// # Examples + /// + /// [`File`] implements [`AsyncWrite`] and [`Cursor<&[u8]>`] implements + /// [`Buf`]: + /// + /// ```no_run + /// use tokio_util::io::poll_write_buf; + /// use tokio::io; + /// use tokio::fs::File; + /// + /// use bytes::Buf; + /// use std::io::Cursor; + /// use std::pin::Pin; + /// use futures::future::poll_fn; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut file = File::create("foo.txt").await?; + /// let mut buf = Cursor::new(b"data to write"); + /// + /// // Loop until the entire contents of the buffer are written to + /// // the file. + /// while buf.has_remaining() { + /// poll_fn(|cx| poll_write_buf(Pin::new(&mut file), cx, &mut buf)).await?; + /// } + /// + /// Ok(()) + /// } + /// ``` + /// + /// [`Buf`]: bytes::Buf + /// [`AsyncWrite`]: tokio::io::AsyncWrite + /// [`File`]: tokio::fs::File + /// [vectored writes]: tokio::io::AsyncWrite::poll_write_vectored + #[cfg_attr(not(feature = "io"), allow(unreachable_pub))] + pub fn poll_write_buf( + io: Pin<&mut T>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> { + const MAX_BUFS: usize = 64; + + if !buf.has_remaining() { + return Poll::Ready(Ok(0)); + } + + let n = if io.is_write_vectored() { + let mut slices = [IoSlice::new(&[]); MAX_BUFS]; + let cnt = buf.chunks_vectored(&mut slices); + ready!(io.poll_write_vectored(cx, &slices[..cnt]))? + } else { + ready!(io.poll_write(cx, buf.chunk()))? + }; + + buf.advance(n); + + Poll::Ready(Ok(n)) + } +} diff --git a/third_party/rust/tokio-util/src/loom.rs b/third_party/rust/tokio-util/src/loom.rs new file mode 100644 index 000000000000..dd03feaba1ac --- /dev/null +++ b/third_party/rust/tokio-util/src/loom.rs @@ -0,0 +1 @@ +pub(crate) use std::sync; diff --git a/third_party/rust/tokio-util/src/net/mod.rs b/third_party/rust/tokio-util/src/net/mod.rs new file mode 100644 index 000000000000..4817e10d0f3a --- /dev/null +++ b/third_party/rust/tokio-util/src/net/mod.rs @@ -0,0 +1,97 @@ +//! TCP/UDP/Unix helpers for tokio. + +use crate::either::Either; +use std::future::Future; +use std::io::Result; +use std::pin::Pin; +use std::task::{Context, Poll}; + +#[cfg(unix)] +pub mod unix; + +/// A trait for a listener: `TcpListener` and `UnixListener`. +pub trait Listener { + /// The stream's type of this listener. + type Io: tokio::io::AsyncRead + tokio::io::AsyncWrite; + /// The socket address type of this listener. + type Addr; + + /// Polls to accept a new incoming connection to this listener. + fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll>; + + /// Accepts a new incoming connection from this listener. + fn accept(&mut self) -> ListenerAcceptFut<'_, Self> + where + Self: Sized, + { + ListenerAcceptFut { listener: self } + } + + /// Returns the local address that this listener is bound to. + fn local_addr(&self) -> Result; +} + +impl Listener for tokio::net::TcpListener { + type Io = tokio::net::TcpStream; + type Addr = std::net::SocketAddr; + + fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll> { + Self::poll_accept(self, cx) + } + + fn local_addr(&self) -> Result { + self.local_addr().map(Into::into) + } +} + +/// Future for accepting a new connection from a listener. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct ListenerAcceptFut<'a, L> { + listener: &'a mut L, +} + +impl<'a, L> Future for ListenerAcceptFut<'a, L> +where + L: Listener, +{ + type Output = Result<(L::Io, L::Addr)>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.listener.poll_accept(cx) + } +} + +impl Either +where + L: Listener, + R: Listener, +{ + /// Accepts a new incoming connection from this listener. + pub async fn accept(&mut self) -> Result> { + match self { + Either::Left(listener) => { + let (stream, addr) = listener.accept().await?; + Ok(Either::Left((stream, addr))) + } + Either::Right(listener) => { + let (stream, addr) = listener.accept().await?; + Ok(Either::Right((stream, addr))) + } + } + } + + /// Returns the local address that this listener is bound to. + pub fn local_addr(&self) -> Result> { + match self { + Either::Left(listener) => { + let addr = listener.local_addr()?; + Ok(Either::Left(addr)) + } + Either::Right(listener) => { + let addr = listener.local_addr()?; + Ok(Either::Right(addr)) + } + } + } +} diff --git a/third_party/rust/tokio-util/src/net/unix/mod.rs b/third_party/rust/tokio-util/src/net/unix/mod.rs new file mode 100644 index 000000000000..0b522c90a34a --- /dev/null +++ b/third_party/rust/tokio-util/src/net/unix/mod.rs @@ -0,0 +1,18 @@ +//! Unix domain socket helpers. + +use super::Listener; +use std::io::Result; +use std::task::{Context, Poll}; + +impl Listener for tokio::net::UnixListener { + type Io = tokio::net::UnixStream; + type Addr = tokio::net::unix::SocketAddr; + + fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll> { + Self::poll_accept(self, cx) + } + + fn local_addr(&self) -> Result { + self.local_addr().map(Into::into) + } +} diff --git a/third_party/rust/tokio-util/src/sync/cancellation_token.rs b/third_party/rust/tokio-util/src/sync/cancellation_token.rs new file mode 100644 index 000000000000..2a6ef392bd40 --- /dev/null +++ b/third_party/rust/tokio-util/src/sync/cancellation_token.rs @@ -0,0 +1,224 @@ +//! An asynchronously awaitable `CancellationToken`. +//! The token allows to signal a cancellation request to one or more tasks. +pub(crate) mod guard; +mod tree_node; + +use crate::loom::sync::Arc; +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; + +use guard::DropGuard; +use pin_project_lite::pin_project; + +/// A token which can be used to signal a cancellation request to one or more +/// tasks. +/// +/// Tasks can call [`CancellationToken::cancelled()`] in order to +/// obtain a Future which will be resolved when cancellation is requested. +/// +/// Cancellation can be requested through the [`CancellationToken::cancel`] method. +/// +/// # Examples +/// +/// ```no_run +/// use tokio::select; +/// use tokio_util::sync::CancellationToken; +/// +/// #[tokio::main] +/// async fn main() { +/// let token = CancellationToken::new(); +/// let cloned_token = token.clone(); +/// +/// let join_handle = tokio::spawn(async move { +/// // Wait for either cancellation or a very long time +/// select! { +/// _ = cloned_token.cancelled() => { +/// // The token was cancelled +/// 5 +/// } +/// _ = tokio::time::sleep(std::time::Duration::from_secs(9999)) => { +/// 99 +/// } +/// } +/// }); +/// +/// tokio::spawn(async move { +/// tokio::time::sleep(std::time::Duration::from_millis(10)).await; +/// token.cancel(); +/// }); +/// +/// assert_eq!(5, join_handle.await.unwrap()); +/// } +/// ``` +pub struct CancellationToken { + inner: Arc, +} + +pin_project! { + /// A Future that is resolved once the corresponding [`CancellationToken`] + /// is cancelled. + #[must_use = "futures do nothing unless polled"] + pub struct WaitForCancellationFuture<'a> { + cancellation_token: &'a CancellationToken, + #[pin] + future: tokio::sync::futures::Notified<'a>, + } +} + +// ===== impl CancellationToken ===== + +impl core::fmt::Debug for CancellationToken { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("CancellationToken") + .field("is_cancelled", &self.is_cancelled()) + .finish() + } +} + +impl Clone for CancellationToken { + fn clone(&self) -> Self { + tree_node::increase_handle_refcount(&self.inner); + CancellationToken { + inner: self.inner.clone(), + } + } +} + +impl Drop for CancellationToken { + fn drop(&mut self) { + tree_node::decrease_handle_refcount(&self.inner); + } +} + +impl Default for CancellationToken { + fn default() -> CancellationToken { + CancellationToken::new() + } +} + +impl CancellationToken { + /// Creates a new CancellationToken in the non-cancelled state. + pub fn new() -> CancellationToken { + CancellationToken { + inner: Arc::new(tree_node::TreeNode::new()), + } + } + + /// Creates a `CancellationToken` which will get cancelled whenever the + /// current token gets cancelled. + /// + /// If the current token is already cancelled, the child token will get + /// returned in cancelled state. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::select; + /// use tokio_util::sync::CancellationToken; + /// + /// #[tokio::main] + /// async fn main() { + /// let token = CancellationToken::new(); + /// let child_token = token.child_token(); + /// + /// let join_handle = tokio::spawn(async move { + /// // Wait for either cancellation or a very long time + /// select! { + /// _ = child_token.cancelled() => { + /// // The token was cancelled + /// 5 + /// } + /// _ = tokio::time::sleep(std::time::Duration::from_secs(9999)) => { + /// 99 + /// } + /// } + /// }); + /// + /// tokio::spawn(async move { + /// tokio::time::sleep(std::time::Duration::from_millis(10)).await; + /// token.cancel(); + /// }); + /// + /// assert_eq!(5, join_handle.await.unwrap()); + /// } + /// ``` + pub fn child_token(&self) -> CancellationToken { + CancellationToken { + inner: tree_node::child_node(&self.inner), + } + } + + /// Cancel the [`CancellationToken`] and all child tokens which had been + /// derived from it. + /// + /// This will wake up all tasks which are waiting for cancellation. + /// + /// Be aware that cancellation is not an atomic operation. It is possible + /// for another thread running in parallel with a call to `cancel` to first + /// receive `true` from `is_cancelled` on one child node, and then receive + /// `false` from `is_cancelled` on another child node. However, once the + /// call to `cancel` returns, all child nodes have been fully cancelled. + pub fn cancel(&self) { + tree_node::cancel(&self.inner); + } + + /// Returns `true` if the `CancellationToken` is cancelled. + pub fn is_cancelled(&self) -> bool { + tree_node::is_cancelled(&self.inner) + } + + /// Returns a `Future` that gets fulfilled when cancellation is requested. + /// + /// The future will complete immediately if the token is already cancelled + /// when this method is called. + /// + /// # Cancel safety + /// + /// This method is cancel safe. + pub fn cancelled(&self) -> WaitForCancellationFuture<'_> { + WaitForCancellationFuture { + cancellation_token: self, + future: self.inner.notified(), + } + } + + /// Creates a `DropGuard` for this token. + /// + /// Returned guard will cancel this token (and all its children) on drop + /// unless disarmed. + pub fn drop_guard(self) -> DropGuard { + DropGuard { inner: Some(self) } + } +} + +// ===== impl WaitForCancellationFuture ===== + +impl<'a> core::fmt::Debug for WaitForCancellationFuture<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("WaitForCancellationFuture").finish() + } +} + +impl<'a> Future for WaitForCancellationFuture<'a> { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { + let mut this = self.project(); + loop { + if this.cancellation_token.is_cancelled() { + return Poll::Ready(()); + } + + // No wakeups can be lost here because there is always a call to + // `is_cancelled` between the creation of the future and the call to + // `poll`, and the code that sets the cancelled flag does so before + // waking the `Notified`. + if this.future.as_mut().poll(cx).is_pending() { + return Poll::Pending; + } + + this.future.set(this.cancellation_token.inner.notified()); + } + } +} diff --git a/third_party/rust/tokio-util/src/sync/cancellation_token/guard.rs b/third_party/rust/tokio-util/src/sync/cancellation_token/guard.rs new file mode 100644 index 000000000000..54ed7ea2ed75 --- /dev/null +++ b/third_party/rust/tokio-util/src/sync/cancellation_token/guard.rs @@ -0,0 +1,27 @@ +use crate::sync::CancellationToken; + +/// A wrapper for cancellation token which automatically cancels +/// it on drop. It is created using `drop_guard` method on the `CancellationToken`. +#[derive(Debug)] +pub struct DropGuard { + pub(super) inner: Option, +} + +impl DropGuard { + /// Returns stored cancellation token and removes this drop guard instance + /// (i.e. it will no longer cancel token). Other guards for this token + /// are not affected. + pub fn disarm(mut self) -> CancellationToken { + self.inner + .take() + .expect("`inner` can be only None in a destructor") + } +} + +impl Drop for DropGuard { + fn drop(&mut self) { + if let Some(inner) = &self.inner { + inner.cancel(); + } + } +} diff --git a/third_party/rust/tokio-util/src/sync/cancellation_token/tree_node.rs b/third_party/rust/tokio-util/src/sync/cancellation_token/tree_node.rs new file mode 100644 index 000000000000..b6cd698e23d5 --- /dev/null +++ b/third_party/rust/tokio-util/src/sync/cancellation_token/tree_node.rs @@ -0,0 +1,373 @@ +//! This mod provides the logic for the inner tree structure of the CancellationToken. +//! +//! CancellationTokens are only light handles with references to TreeNode. +//! All the logic is actually implemented in the TreeNode. +//! +//! A TreeNode is part of the cancellation tree and may have one parent and an arbitrary number of +//! children. +//! +//! A TreeNode can receive the request to perform a cancellation through a CancellationToken. +//! This cancellation request will cancel the node and all of its descendants. +//! +//! As soon as a node cannot get cancelled any more (because it was already cancelled or it has no +//! more CancellationTokens pointing to it any more), it gets removed from the tree, to keep the +//! tree as small as possible. +//! +//! # Invariants +//! +//! Those invariants shall be true at any time. +//! +//! 1. A node that has no parents and no handles can no longer be cancelled. +//! This is important during both cancellation and refcounting. +//! +//! 2. If node B *is* or *was* a child of node A, then node B was created *after* node A. +//! This is important for deadlock safety, as it is used for lock order. +//! Node B can only become the child of node A in two ways: +//! - being created with `child_node()`, in which case it is trivially true that +//! node A already existed when node B was created +//! - being moved A->C->B to A->B because node C was removed in `decrease_handle_refcount()` +//! or `cancel()`. In this case the invariant still holds, as B was younger than C, and C +//! was younger than A, therefore B is also younger than A. +//! +//! 3. If two nodes are both unlocked and node A is the parent of node B, then node B is a child of +//! node A. It is important to always restore that invariant before dropping the lock of a node. +//! +//! # Deadlock safety +//! +//! We always lock in the order of creation time. We can prove this through invariant #2. +//! Specifically, through invariant #2, we know that we always have to lock a parent +//! before its child. +//! +use crate::loom::sync::{Arc, Mutex, MutexGuard}; + +/// A node of the cancellation tree structure +/// +/// The actual data it holds is wrapped inside a mutex for synchronization. +pub(crate) struct TreeNode { + inner: Mutex, + waker: tokio::sync::Notify, +} +impl TreeNode { + pub(crate) fn new() -> Self { + Self { + inner: Mutex::new(Inner { + parent: None, + parent_idx: 0, + children: vec![], + is_cancelled: false, + num_handles: 1, + }), + waker: tokio::sync::Notify::new(), + } + } + + pub(crate) fn notified(&self) -> tokio::sync::futures::Notified<'_> { + self.waker.notified() + } +} + +/// The data contained inside a TreeNode. +/// +/// This struct exists so that the data of the node can be wrapped +/// in a Mutex. +struct Inner { + parent: Option>, + parent_idx: usize, + children: Vec>, + is_cancelled: bool, + num_handles: usize, +} + +/// Returns whether or not the node is cancelled +pub(crate) fn is_cancelled(node: &Arc) -> bool { + node.inner.lock().unwrap().is_cancelled +} + +/// Creates a child node +pub(crate) fn child_node(parent: &Arc) -> Arc { + let mut locked_parent = parent.inner.lock().unwrap(); + + // Do not register as child if we are already cancelled. + // Cancelled trees can never be uncancelled and therefore + // need no connection to parents or children any more. + if locked_parent.is_cancelled { + return Arc::new(TreeNode { + inner: Mutex::new(Inner { + parent: None, + parent_idx: 0, + children: vec![], + is_cancelled: true, + num_handles: 1, + }), + waker: tokio::sync::Notify::new(), + }); + } + + let child = Arc::new(TreeNode { + inner: Mutex::new(Inner { + parent: Some(parent.clone()), + parent_idx: locked_parent.children.len(), + children: vec![], + is_cancelled: false, + num_handles: 1, + }), + waker: tokio::sync::Notify::new(), + }); + + locked_parent.children.push(child.clone()); + + child +} + +/// Disconnects the given parent from all of its children. +/// +/// Takes a reference to [Inner] to make sure the parent is already locked. +fn disconnect_children(node: &mut Inner) { + for child in std::mem::take(&mut node.children) { + let mut locked_child = child.inner.lock().unwrap(); + locked_child.parent_idx = 0; + locked_child.parent = None; + } +} + +/// Figures out the parent of the node and locks the node and its parent atomically. +/// +/// The basic principle of preventing deadlocks in the tree is +/// that we always lock the parent first, and then the child. +/// For more info look at *deadlock safety* and *invariant #2*. +/// +/// Sadly, it's impossible to figure out the parent of a node without +/// locking it. To then achieve locking order consistency, the node +/// has to be unlocked before the parent gets locked. +/// This leaves a small window where we already assume that we know the parent, +/// but neither the parent nor the node is locked. Therefore, the parent could change. +/// +/// To prevent that this problem leaks into the rest of the code, it is abstracted +/// in this function. +/// +/// The locked child and optionally its locked parent, if a parent exists, get passed +/// to the `func` argument via (node, None) or (node, Some(parent)). +fn with_locked_node_and_parent(node: &Arc, func: F) -> Ret +where + F: FnOnce(MutexGuard<'_, Inner>, Option>) -> Ret, +{ + let mut potential_parent = { + let locked_node = node.inner.lock().unwrap(); + match locked_node.parent.clone() { + Some(parent) => parent, + // If we locked the node and its parent is `None`, we are in a valid state + // and can return. + None => return func(locked_node, None), + } + }; + + loop { + // Deadlock safety: + // + // Due to invariant #2, we know that we have to lock the parent first, and then the child. + // This is true even if the potential_parent is no longer the current parent or even its + // sibling, as the invariant still holds. + let locked_parent = potential_parent.inner.lock().unwrap(); + let locked_node = node.inner.lock().unwrap(); + + let actual_parent = match locked_node.parent.clone() { + Some(parent) => parent, + // If we locked the node and its parent is `None`, we are in a valid state + // and can return. + None => { + // Was the wrong parent, so unlock it before calling `func` + drop(locked_parent); + return func(locked_node, None); + } + }; + + // Loop until we managed to lock both the node and its parent + if Arc::ptr_eq(&actual_parent, &potential_parent) { + return func(locked_node, Some(locked_parent)); + } + + // Drop locked_parent before reassigning to potential_parent, + // as potential_parent is borrowed in it + drop(locked_node); + drop(locked_parent); + + potential_parent = actual_parent; + } +} + +/// Moves all children from `node` to `parent`. +/// +/// `parent` MUST have been a parent of the node when they both got locked, +/// otherwise there is a potential for a deadlock as invariant #2 would be violated. +/// +/// To aquire the locks for node and parent, use [with_locked_node_and_parent]. +fn move_children_to_parent(node: &mut Inner, parent: &mut Inner) { + // Pre-allocate in the parent, for performance + parent.children.reserve(node.children.len()); + + for child in std::mem::take(&mut node.children) { + { + let mut child_locked = child.inner.lock().unwrap(); + child_locked.parent = node.parent.clone(); + child_locked.parent_idx = parent.children.len(); + } + parent.children.push(child); + } +} + +/// Removes a child from the parent. +/// +/// `parent` MUST be the parent of `node`. +/// To aquire the locks for node and parent, use [with_locked_node_and_parent]. +fn remove_child(parent: &mut Inner, mut node: MutexGuard<'_, Inner>) { + // Query the position from where to remove a node + let pos = node.parent_idx; + node.parent = None; + node.parent_idx = 0; + + // Unlock node, so that only one child at a time is locked. + // Otherwise we would violate the lock order (see 'deadlock safety') as we + // don't know the creation order of the child nodes + drop(node); + + // If `node` is the last element in the list, we don't need any swapping + if parent.children.len() == pos + 1 { + parent.children.pop().unwrap(); + } else { + // If `node` is not the last element in the list, we need to + // replace it with the last element + let replacement_child = parent.children.pop().unwrap(); + replacement_child.inner.lock().unwrap().parent_idx = pos; + parent.children[pos] = replacement_child; + } + + let len = parent.children.len(); + if 4 * len <= parent.children.capacity() { + // equal to: + // parent.children.shrink_to(2 * len); + // but shrink_to was not yet stabilized in our minimal compatible version + let old_children = std::mem::replace(&mut parent.children, Vec::with_capacity(2 * len)); + parent.children.extend(old_children); + } +} + +/// Increases the reference count of handles. +pub(crate) fn increase_handle_refcount(node: &Arc) { + let mut locked_node = node.inner.lock().unwrap(); + + // Once no handles are left over, the node gets detached from the tree. + // There should never be a new handle once all handles are dropped. + assert!(locked_node.num_handles > 0); + + locked_node.num_handles += 1; +} + +/// Decreases the reference count of handles. +/// +/// Once no handle is left, we can remove the node from the +/// tree and connect its parent directly to its children. +pub(crate) fn decrease_handle_refcount(node: &Arc) { + let num_handles = { + let mut locked_node = node.inner.lock().unwrap(); + locked_node.num_handles -= 1; + locked_node.num_handles + }; + + if num_handles == 0 { + with_locked_node_and_parent(node, |mut node, parent| { + // Remove the node from the tree + match parent { + Some(mut parent) => { + // As we want to remove ourselves from the tree, + // we have to move the children to the parent, so that + // they still receive the cancellation event without us. + // Moving them does not violate invariant #1. + move_children_to_parent(&mut node, &mut parent); + + // Remove the node from the parent + remove_child(&mut parent, node); + } + None => { + // Due to invariant #1, we can assume that our + // children can no longer be cancelled through us. + // (as we now have neither a parent nor handles) + // Therefore we can disconnect them. + disconnect_children(&mut node); + } + } + }); + } +} + +/// Cancels a node and its children. +pub(crate) fn cancel(node: &Arc) { + let mut locked_node = node.inner.lock().unwrap(); + + if locked_node.is_cancelled { + return; + } + + // One by one, adopt grandchildren and then cancel and detach the child + while let Some(child) = locked_node.children.pop() { + // This can't deadlock because the mutex we are already + // holding is the parent of child. + let mut locked_child = child.inner.lock().unwrap(); + + // Detach the child from node + // No need to modify node.children, as the child already got removed with `.pop` + locked_child.parent = None; + locked_child.parent_idx = 0; + + // If child is already cancelled, detaching is enough + if locked_child.is_cancelled { + continue; + } + + // Cancel or adopt grandchildren + while let Some(grandchild) = locked_child.children.pop() { + // This can't deadlock because the two mutexes we are already + // holding is the parent and grandparent of grandchild. + let mut locked_grandchild = grandchild.inner.lock().unwrap(); + + // Detach the grandchild + locked_grandchild.parent = None; + locked_grandchild.parent_idx = 0; + + // If grandchild is already cancelled, detaching is enough + if locked_grandchild.is_cancelled { + continue; + } + + // For performance reasons, only adopt grandchildren that have children. + // Otherwise, just cancel them right away, no need for another iteration. + if locked_grandchild.children.is_empty() { + // Cancel the grandchild + locked_grandchild.is_cancelled = true; + locked_grandchild.children = Vec::new(); + drop(locked_grandchild); + grandchild.waker.notify_waiters(); + } else { + // Otherwise, adopt grandchild + locked_grandchild.parent = Some(node.clone()); + locked_grandchild.parent_idx = locked_node.children.len(); + drop(locked_grandchild); + locked_node.children.push(grandchild); + } + } + + // Cancel the child + locked_child.is_cancelled = true; + locked_child.children = Vec::new(); + drop(locked_child); + child.waker.notify_waiters(); + + // Now the child is cancelled and detached and all its children are adopted. + // Just continue until all (including adopted) children are cancelled and detached. + } + + // Cancel the node itself. + locked_node.is_cancelled = true; + locked_node.children = Vec::new(); + drop(locked_node); + node.waker.notify_waiters(); +} diff --git a/third_party/rust/tokio-util/src/sync/mod.rs b/third_party/rust/tokio-util/src/sync/mod.rs new file mode 100644 index 000000000000..de392f0bb126 --- /dev/null +++ b/third_party/rust/tokio-util/src/sync/mod.rs @@ -0,0 +1,13 @@ +//! Synchronization primitives + +mod cancellation_token; +pub use cancellation_token::{guard::DropGuard, CancellationToken, WaitForCancellationFuture}; + +mod mpsc; +pub use mpsc::{PollSendError, PollSender}; + +mod poll_semaphore; +pub use poll_semaphore::PollSemaphore; + +mod reusable_box; +pub use reusable_box::ReusableBoxFuture; diff --git a/third_party/rust/tokio-util/src/sync/mpsc.rs b/third_party/rust/tokio-util/src/sync/mpsc.rs new file mode 100644 index 000000000000..34a47c189112 --- /dev/null +++ b/third_party/rust/tokio-util/src/sync/mpsc.rs @@ -0,0 +1,283 @@ +use futures_sink::Sink; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::{fmt, mem}; +use tokio::sync::mpsc::OwnedPermit; +use tokio::sync::mpsc::Sender; + +use super::ReusableBoxFuture; + +/// Error returned by the `PollSender` when the channel is closed. +#[derive(Debug)] +pub struct PollSendError(Option); + +impl PollSendError { + /// Consumes the stored value, if any. + /// + /// If this error was encountered when calling `start_send`/`send_item`, this will be the item + /// that the caller attempted to send. Otherwise, it will be `None`. + pub fn into_inner(self) -> Option { + self.0 + } +} + +impl fmt::Display for PollSendError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "channel closed") + } +} + +impl std::error::Error for PollSendError {} + +#[derive(Debug)] +enum State { + Idle(Sender), + Acquiring, + ReadyToSend(OwnedPermit), + Closed, +} + +/// A wrapper around [`mpsc::Sender`] that can be polled. +/// +/// [`mpsc::Sender`]: tokio::sync::mpsc::Sender +#[derive(Debug)] +pub struct PollSender { + sender: Option>, + state: State, + acquire: ReusableBoxFuture<'static, Result, PollSendError>>, +} + +// Creates a future for acquiring a permit from the underlying channel. This is used to ensure +// there's capacity for a send to complete. +// +// By reusing the same async fn for both `Some` and `None`, we make sure every future passed to +// ReusableBoxFuture has the same underlying type, and hence the same size and alignment. +async fn make_acquire_future( + data: Option>, +) -> Result, PollSendError> { + match data { + Some(sender) => sender + .reserve_owned() + .await + .map_err(|_| PollSendError(None)), + None => unreachable!("this future should not be pollable in this state"), + } +} + +impl PollSender { + /// Creates a new `PollSender`. + pub fn new(sender: Sender) -> Self { + Self { + sender: Some(sender.clone()), + state: State::Idle(sender), + acquire: ReusableBoxFuture::new(make_acquire_future(None)), + } + } + + fn take_state(&mut self) -> State { + mem::replace(&mut self.state, State::Closed) + } + + /// Attempts to prepare the sender to receive a value. + /// + /// This method must be called and return `Poll::Ready(Ok(()))` prior to each call to + /// `send_item`. + /// + /// This method returns `Poll::Ready` once the underlying channel is ready to receive a value, + /// by reserving a slot in the channel for the item to be sent. If this method returns + /// `Poll::Pending`, the current task is registered to be notified (via + /// `cx.waker().wake_by_ref()`) when `poll_reserve` should be called again. + /// + /// # Errors + /// + /// If the channel is closed, an error will be returned. This is a permanent state. + pub fn poll_reserve(&mut self, cx: &mut Context<'_>) -> Poll>> { + loop { + let (result, next_state) = match self.take_state() { + State::Idle(sender) => { + // Start trying to acquire a permit to reserve a slot for our send, and + // immediately loop back around to poll it the first time. + self.acquire.set(make_acquire_future(Some(sender))); + (None, State::Acquiring) + } + State::Acquiring => match self.acquire.poll(cx) { + // Channel has capacity. + Poll::Ready(Ok(permit)) => { + (Some(Poll::Ready(Ok(()))), State::ReadyToSend(permit)) + } + // Channel is closed. + Poll::Ready(Err(e)) => (Some(Poll::Ready(Err(e))), State::Closed), + // Channel doesn't have capacity yet, so we need to wait. + Poll::Pending => (Some(Poll::Pending), State::Acquiring), + }, + // We're closed, either by choice or because the underlying sender was closed. + s @ State::Closed => (Some(Poll::Ready(Err(PollSendError(None)))), s), + // We're already ready to send an item. + s @ State::ReadyToSend(_) => (Some(Poll::Ready(Ok(()))), s), + }; + + self.state = next_state; + if let Some(result) = result { + return result; + } + } + } + + /// Sends an item to the channel. + /// + /// Before calling `send_item`, `poll_reserve` must be called with a successful return + /// value of `Poll::Ready(Ok(()))`. + /// + /// # Errors + /// + /// If the channel is closed, an error will be returned. This is a permanent state. + /// + /// # Panics + /// + /// If `poll_reserve` was not successfully called prior to calling `send_item`, then this method + /// will panic. + pub fn send_item(&mut self, value: T) -> Result<(), PollSendError> { + let (result, next_state) = match self.take_state() { + State::Idle(_) | State::Acquiring => { + panic!("`send_item` called without first calling `poll_reserve`") + } + // We have a permit to send our item, so go ahead, which gets us our sender back. + State::ReadyToSend(permit) => (Ok(()), State::Idle(permit.send(value))), + // We're closed, either by choice or because the underlying sender was closed. + State::Closed => (Err(PollSendError(Some(value))), State::Closed), + }; + + // Handle deferred closing if `close` was called between `poll_reserve` and `send_item`. + self.state = if self.sender.is_some() { + next_state + } else { + State::Closed + }; + result + } + + /// Checks whether this sender is been closed. + /// + /// The underlying channel that this sender was wrapping may still be open. + pub fn is_closed(&self) -> bool { + matches!(self.state, State::Closed) || self.sender.is_none() + } + + /// Gets a reference to the `Sender` of the underlying channel. + /// + /// If `PollSender` has been closed, `None` is returned. The underlying channel that this sender + /// was wrapping may still be open. + pub fn get_ref(&self) -> Option<&Sender> { + self.sender.as_ref() + } + + /// Closes this sender. + /// + /// No more messages will be able to be sent from this sender, but the underlying channel will + /// remain open until all senders have dropped, or until the [`Receiver`] closes the channel. + /// + /// If a slot was previously reserved by calling `poll_reserve`, then a final call can be made + /// to `send_item` in order to consume the reserved slot. After that, no further sends will be + /// possible. If you do not intend to send another item, you can release the reserved slot back + /// to the underlying sender by calling [`abort_send`]. + /// + /// [`abort_send`]: crate::sync::PollSender::abort_send + /// [`Receiver`]: tokio::sync::mpsc::Receiver + pub fn close(&mut self) { + // Mark ourselves officially closed by dropping our main sender. + self.sender = None; + + // If we're already idle, closed, or we haven't yet reserved a slot, we can quickly + // transition to the closed state. Otherwise, leave the existing permit in place for the + // caller if they want to complete the send. + match self.state { + State::Idle(_) => self.state = State::Closed, + State::Acquiring => { + self.acquire.set(make_acquire_future(None)); + self.state = State::Closed; + } + _ => {} + } + } + + /// Aborts the current in-progress send, if any. + /// + /// Returns `true` if a send was aborted. If the sender was closed prior to calling + /// `abort_send`, then the sender will remain in the closed state, otherwise the sender will be + /// ready to attempt another send. + pub fn abort_send(&mut self) -> bool { + // We may have been closed in the meantime, after a call to `poll_reserve` already + // succeeded. We'll check if `self.sender` is `None` to see if we should transition to the + // closed state when we actually abort a send, rather than resetting ourselves back to idle. + + let (result, next_state) = match self.take_state() { + // We're currently trying to reserve a slot to send into. + State::Acquiring => { + // Replacing the future drops the in-flight one. + self.acquire.set(make_acquire_future(None)); + + // If we haven't closed yet, we have to clone our stored sender since we have no way + // to get it back from the acquire future we just dropped. + let state = match self.sender.clone() { + Some(sender) => State::Idle(sender), + None => State::Closed, + }; + (true, state) + } + // We got the permit. If we haven't closed yet, get the sender back. + State::ReadyToSend(permit) => { + let state = if self.sender.is_some() { + State::Idle(permit.release()) + } else { + State::Closed + }; + (true, state) + } + s => (false, s), + }; + + self.state = next_state; + result + } +} + +impl Clone for PollSender { + /// Clones this `PollSender`. + /// + /// The resulting `PollSender` will have an initial state identical to calling `PollSender::new`. + fn clone(&self) -> PollSender { + let (sender, state) = match self.sender.clone() { + Some(sender) => (Some(sender.clone()), State::Idle(sender)), + None => (None, State::Closed), + }; + + Self { + sender, + state, + // We don't use `make_acquire_future` here because our relaxed bounds on `T` are not + // compatible with the transitive bounds required by `Sender`. + acquire: ReusableBoxFuture::new(async { unreachable!() }), + } + } +} + +impl Sink for PollSender { + type Error = PollSendError; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::into_inner(self).poll_reserve(cx) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { + Pin::into_inner(self).send_item(item) + } + + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Pin::into_inner(self).close(); + Poll::Ready(Ok(())) + } +} diff --git a/third_party/rust/tokio-util/src/sync/poll_semaphore.rs b/third_party/rust/tokio-util/src/sync/poll_semaphore.rs new file mode 100644 index 000000000000..d0b1dedc2739 --- /dev/null +++ b/third_party/rust/tokio-util/src/sync/poll_semaphore.rs @@ -0,0 +1,136 @@ +use futures_core::{ready, Stream}; +use std::fmt; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore, TryAcquireError}; + +use super::ReusableBoxFuture; + +/// A wrapper around [`Semaphore`] that provides a `poll_acquire` method. +/// +/// [`Semaphore`]: tokio::sync::Semaphore +pub struct PollSemaphore { + semaphore: Arc, + permit_fut: Option>>, +} + +impl PollSemaphore { + /// Create a new `PollSemaphore`. + pub fn new(semaphore: Arc) -> Self { + Self { + semaphore, + permit_fut: None, + } + } + + /// Closes the semaphore. + pub fn close(&self) { + self.semaphore.close() + } + + /// Obtain a clone of the inner semaphore. + pub fn clone_inner(&self) -> Arc { + self.semaphore.clone() + } + + /// Get back the inner semaphore. + pub fn into_inner(self) -> Arc { + self.semaphore + } + + /// Poll to acquire a permit from the semaphore. + /// + /// This can return the following values: + /// + /// - `Poll::Pending` if a permit is not currently available. + /// - `Poll::Ready(Some(permit))` if a permit was acquired. + /// - `Poll::Ready(None)` if the semaphore has been closed. + /// + /// When this method returns `Poll::Pending`, the current task is scheduled + /// to receive a wakeup when a permit becomes available, or when the + /// semaphore is closed. Note that on multiple calls to `poll_acquire`, only + /// the `Waker` from the `Context` passed to the most recent call is + /// scheduled to receive a wakeup. + pub fn poll_acquire(&mut self, cx: &mut Context<'_>) -> Poll> { + let permit_future = match self.permit_fut.as_mut() { + Some(fut) => fut, + None => { + // avoid allocations completely if we can grab a permit immediately + match Arc::clone(&self.semaphore).try_acquire_owned() { + Ok(permit) => return Poll::Ready(Some(permit)), + Err(TryAcquireError::Closed) => return Poll::Ready(None), + Err(TryAcquireError::NoPermits) => {} + } + + let next_fut = Arc::clone(&self.semaphore).acquire_owned(); + self.permit_fut + .get_or_insert(ReusableBoxFuture::new(next_fut)) + } + }; + + let result = ready!(permit_future.poll(cx)); + + let next_fut = Arc::clone(&self.semaphore).acquire_owned(); + permit_future.set(next_fut); + + match result { + Ok(permit) => Poll::Ready(Some(permit)), + Err(_closed) => { + self.permit_fut = None; + Poll::Ready(None) + } + } + } + + /// Returns the current number of available permits. + /// + /// This is equivalent to the [`Semaphore::available_permits`] method on the + /// `tokio::sync::Semaphore` type. + /// + /// [`Semaphore::available_permits`]: tokio::sync::Semaphore::available_permits + pub fn available_permits(&self) -> usize { + self.semaphore.available_permits() + } + + /// Adds `n` new permits to the semaphore. + /// + /// The maximum number of permits is `usize::MAX >> 3`, and this function + /// will panic if the limit is exceeded. + /// + /// This is equivalent to the [`Semaphore::add_permits`] method on the + /// `tokio::sync::Semaphore` type. + /// + /// [`Semaphore::add_permits`]: tokio::sync::Semaphore::add_permits + pub fn add_permits(&self, n: usize) { + self.semaphore.add_permits(n); + } +} + +impl Stream for PollSemaphore { + type Item = OwnedSemaphorePermit; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::into_inner(self).poll_acquire(cx) + } +} + +impl Clone for PollSemaphore { + fn clone(&self) -> PollSemaphore { + PollSemaphore::new(self.clone_inner()) + } +} + +impl fmt::Debug for PollSemaphore { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PollSemaphore") + .field("semaphore", &self.semaphore) + .finish() + } +} + +impl AsRef for PollSemaphore { + fn as_ref(&self) -> &Semaphore { + &*self.semaphore + } +} diff --git a/third_party/rust/tokio-util/src/sync/reusable_box.rs b/third_party/rust/tokio-util/src/sync/reusable_box.rs new file mode 100644 index 000000000000..3204207db79b --- /dev/null +++ b/third_party/rust/tokio-util/src/sync/reusable_box.rs @@ -0,0 +1,148 @@ +use std::alloc::Layout; +use std::future::Future; +use std::panic::AssertUnwindSafe; +use std::pin::Pin; +use std::ptr::{self, NonNull}; +use std::task::{Context, Poll}; +use std::{fmt, panic}; + +/// A reusable `Pin + Send + 'a>>`. +/// +/// This type lets you replace the future stored in the box without +/// reallocating when the size and alignment permits this. +pub struct ReusableBoxFuture<'a, T> { + boxed: NonNull + Send + 'a>, +} + +impl<'a, T> ReusableBoxFuture<'a, T> { + /// Create a new `ReusableBoxFuture` containing the provided future. + pub fn new(future: F) -> Self + where + F: Future + Send + 'a, + { + let boxed: Box + Send + 'a> = Box::new(future); + + let boxed = NonNull::from(Box::leak(boxed)); + + Self { boxed } + } + + /// Replace the future currently stored in this box. + /// + /// This reallocates if and only if the layout of the provided future is + /// different from the layout of the currently stored future. + pub fn set(&mut self, future: F) + where + F: Future + Send + 'a, + { + if let Err(future) = self.try_set(future) { + *self = Self::new(future); + } + } + + /// Replace the future currently stored in this box. + /// + /// This function never reallocates, but returns an error if the provided + /// future has a different size or alignment from the currently stored + /// future. + pub fn try_set(&mut self, future: F) -> Result<(), F> + where + F: Future + Send + 'a, + { + // SAFETY: The pointer is not dangling. + let self_layout = { + let dyn_future: &(dyn Future + Send) = unsafe { self.boxed.as_ref() }; + Layout::for_value(dyn_future) + }; + + if Layout::new::() == self_layout { + // SAFETY: We just checked that the layout of F is correct. + unsafe { + self.set_same_layout(future); + } + + Ok(()) + } else { + Err(future) + } + } + + /// Set the current future. + /// + /// # Safety + /// + /// This function requires that the layout of the provided future is the + /// same as `self.layout`. + unsafe fn set_same_layout(&mut self, future: F) + where + F: Future + Send + 'a, + { + // Drop the existing future, catching any panics. + let result = panic::catch_unwind(AssertUnwindSafe(|| { + ptr::drop_in_place(self.boxed.as_ptr()); + })); + + // Overwrite the future behind the pointer. This is safe because the + // allocation was allocated with the same size and alignment as the type F. + let self_ptr: *mut F = self.boxed.as_ptr() as *mut F; + ptr::write(self_ptr, future); + + // Update the vtable of self.boxed. The pointer is not null because we + // just got it from self.boxed, which is not null. + self.boxed = NonNull::new_unchecked(self_ptr); + + // If the old future's destructor panicked, resume unwinding. + match result { + Ok(()) => {} + Err(payload) => { + panic::resume_unwind(payload); + } + } + } + + /// Get a pinned reference to the underlying future. + pub fn get_pin(&mut self) -> Pin<&mut (dyn Future + Send)> { + // SAFETY: The user of this box cannot move the box, and we do not move it + // either. + unsafe { Pin::new_unchecked(self.boxed.as_mut()) } + } + + /// Poll the future stored inside this box. + pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + self.get_pin().poll(cx) + } +} + +impl Future for ReusableBoxFuture<'_, T> { + type Output = T; + + /// Poll the future stored inside this box. + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + Pin::into_inner(self).get_pin().poll(cx) + } +} + +// The future stored inside ReusableBoxFuture<'_, T> must be Send. +unsafe impl Send for ReusableBoxFuture<'_, T> {} + +// The only method called on self.boxed is poll, which takes &mut self, so this +// struct being Sync does not permit any invalid access to the Future, even if +// the future is not Sync. +unsafe impl Sync for ReusableBoxFuture<'_, T> {} + +// Just like a Pin> is always Unpin, so is this type. +impl Unpin for ReusableBoxFuture<'_, T> {} + +impl Drop for ReusableBoxFuture<'_, T> { + fn drop(&mut self) { + unsafe { + drop(Box::from_raw(self.boxed.as_ptr())); + } + } +} + +impl fmt::Debug for ReusableBoxFuture<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ReusableBoxFuture").finish() + } +} diff --git a/third_party/rust/tokio-0.2.25/src/sync/tests/loom_cancellation_token.rs b/third_party/rust/tokio-util/src/sync/tests/loom_cancellation_token.rs similarity index 100% rename from third_party/rust/tokio-0.2.25/src/sync/tests/loom_cancellation_token.rs rename to third_party/rust/tokio-util/src/sync/tests/loom_cancellation_token.rs diff --git a/third_party/rust/tokio-util/src/sync/tests/mod.rs b/third_party/rust/tokio-util/src/sync/tests/mod.rs new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/third_party/rust/tokio-util/src/sync/tests/mod.rs @@ -0,0 +1 @@ + diff --git a/third_party/rust/tokio-util/src/task/mod.rs b/third_party/rust/tokio-util/src/task/mod.rs new file mode 100644 index 000000000000..5aa33df2dc0f --- /dev/null +++ b/third_party/rust/tokio-util/src/task/mod.rs @@ -0,0 +1,4 @@ +//! Extra utilities for spawning tasks + +mod spawn_pinned; +pub use spawn_pinned::LocalPoolHandle; diff --git a/third_party/rust/tokio-util/src/task/spawn_pinned.rs b/third_party/rust/tokio-util/src/task/spawn_pinned.rs new file mode 100644 index 000000000000..6f553e9d07ae --- /dev/null +++ b/third_party/rust/tokio-util/src/task/spawn_pinned.rs @@ -0,0 +1,307 @@ +use futures_util::future::{AbortHandle, Abortable}; +use std::fmt; +use std::fmt::{Debug, Formatter}; +use std::future::Future; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use tokio::runtime::Builder; +use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; +use tokio::sync::oneshot; +use tokio::task::{spawn_local, JoinHandle, LocalSet}; + +/// A handle to a local pool, used for spawning `!Send` tasks. +#[derive(Clone)] +pub struct LocalPoolHandle { + pool: Arc, +} + +impl LocalPoolHandle { + /// Create a new pool of threads to handle `!Send` tasks. Spawn tasks onto this + /// pool via [`LocalPoolHandle::spawn_pinned`]. + /// + /// # Panics + /// Panics if the pool size is less than one. + pub fn new(pool_size: usize) -> LocalPoolHandle { + assert!(pool_size > 0); + + let workers = (0..pool_size) + .map(|_| LocalWorkerHandle::new_worker()) + .collect(); + + let pool = Arc::new(LocalPool { workers }); + + LocalPoolHandle { pool } + } + + /// Spawn a task onto a worker thread and pin it there so it can't be moved + /// off of the thread. Note that the future is not [`Send`], but the + /// [`FnOnce`] which creates it is. + /// + /// # Examples + /// ``` + /// use std::rc::Rc; + /// use tokio_util::task::LocalPoolHandle; + /// + /// #[tokio::main] + /// async fn main() { + /// // Create the local pool + /// let pool = LocalPoolHandle::new(1); + /// + /// // Spawn a !Send future onto the pool and await it + /// let output = pool + /// .spawn_pinned(|| { + /// // Rc is !Send + !Sync + /// let local_data = Rc::new("test"); + /// + /// // This future holds an Rc, so it is !Send + /// async move { local_data.to_string() } + /// }) + /// .await + /// .unwrap(); + /// + /// assert_eq!(output, "test"); + /// } + /// ``` + pub fn spawn_pinned(&self, create_task: F) -> JoinHandle + where + F: FnOnce() -> Fut, + F: Send + 'static, + Fut: Future + 'static, + Fut::Output: Send + 'static, + { + self.pool.spawn_pinned(create_task) + } +} + +impl Debug for LocalPoolHandle { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str("LocalPoolHandle") + } +} + +struct LocalPool { + workers: Vec, +} + +impl LocalPool { + /// Spawn a `?Send` future onto a worker + fn spawn_pinned(&self, create_task: F) -> JoinHandle + where + F: FnOnce() -> Fut, + F: Send + 'static, + Fut: Future + 'static, + Fut::Output: Send + 'static, + { + let (sender, receiver) = oneshot::channel(); + + let (worker, job_guard) = self.find_and_incr_least_burdened_worker(); + let worker_spawner = worker.spawner.clone(); + + // Spawn a future onto the worker's runtime so we can immediately return + // a join handle. + worker.runtime_handle.spawn(async move { + // Move the job guard into the task + let _job_guard = job_guard; + + // Propagate aborts via Abortable/AbortHandle + let (abort_handle, abort_registration) = AbortHandle::new_pair(); + let _abort_guard = AbortGuard(abort_handle); + + // Inside the future we can't run spawn_local yet because we're not + // in the context of a LocalSet. We need to send create_task to the + // LocalSet task for spawning. + let spawn_task = Box::new(move || { + // Once we're in the LocalSet context we can call spawn_local + let join_handle = + spawn_local( + async move { Abortable::new(create_task(), abort_registration).await }, + ); + + // Send the join handle back to the spawner. If sending fails, + // we assume the parent task was canceled, so cancel this task + // as well. + if let Err(join_handle) = sender.send(join_handle) { + join_handle.abort() + } + }); + + // Send the callback to the LocalSet task + if let Err(e) = worker_spawner.send(spawn_task) { + // Propagate the error as a panic in the join handle. + panic!("Failed to send job to worker: {}", e); + } + + // Wait for the task's join handle + let join_handle = match receiver.await { + Ok(handle) => handle, + Err(e) => { + // We sent the task successfully, but failed to get its + // join handle... We assume something happened to the worker + // and the task was not spawned. Propagate the error as a + // panic in the join handle. + panic!("Worker failed to send join handle: {}", e); + } + }; + + // Wait for the task to complete + let join_result = join_handle.await; + + match join_result { + Ok(Ok(output)) => output, + Ok(Err(_)) => { + // Pinned task was aborted. But that only happens if this + // task is aborted. So this is an impossible branch. + unreachable!( + "Reaching this branch means this task was previously \ + aborted but it continued running anyways" + ) + } + Err(e) => { + if e.is_panic() { + std::panic::resume_unwind(e.into_panic()); + } else if e.is_cancelled() { + // No one else should have the join handle, so this is + // unexpected. Forward this error as a panic in the join + // handle. + panic!("spawn_pinned task was canceled: {}", e); + } else { + // Something unknown happened (not a panic or + // cancellation). Forward this error as a panic in the + // join handle. + panic!("spawn_pinned task failed: {}", e); + } + } + } + }) + } + + /// Find the worker with the least number of tasks, increment its task + /// count, and return its handle. Make sure to actually spawn a task on + /// the worker so the task count is kept consistent with load. + /// + /// A job count guard is also returned to ensure the task count gets + /// decremented when the job is done. + fn find_and_incr_least_burdened_worker(&self) -> (&LocalWorkerHandle, JobCountGuard) { + loop { + let (worker, task_count) = self + .workers + .iter() + .map(|worker| (worker, worker.task_count.load(Ordering::SeqCst))) + .min_by_key(|&(_, count)| count) + .expect("There must be more than one worker"); + + // Make sure the task count hasn't changed since when we choose this + // worker. Otherwise, restart the search. + if worker + .task_count + .compare_exchange( + task_count, + task_count + 1, + Ordering::SeqCst, + Ordering::Relaxed, + ) + .is_ok() + { + return (worker, JobCountGuard(Arc::clone(&worker.task_count))); + } + } + } +} + +/// Automatically decrements a worker's job count when a job finishes (when +/// this gets dropped). +struct JobCountGuard(Arc); + +impl Drop for JobCountGuard { + fn drop(&mut self) { + // Decrement the job count + let previous_value = self.0.fetch_sub(1, Ordering::SeqCst); + debug_assert!(previous_value >= 1); + } +} + +/// Calls abort on the handle when dropped. +struct AbortGuard(AbortHandle); + +impl Drop for AbortGuard { + fn drop(&mut self) { + self.0.abort(); + } +} + +type PinnedFutureSpawner = Box; + +struct LocalWorkerHandle { + runtime_handle: tokio::runtime::Handle, + spawner: UnboundedSender, + task_count: Arc, +} + +impl LocalWorkerHandle { + /// Create a new worker for executing pinned tasks + fn new_worker() -> LocalWorkerHandle { + let (sender, receiver) = unbounded_channel(); + let runtime = Builder::new_current_thread() + .enable_all() + .build() + .expect("Failed to start a pinned worker thread runtime"); + let runtime_handle = runtime.handle().clone(); + let task_count = Arc::new(AtomicUsize::new(0)); + let task_count_clone = Arc::clone(&task_count); + + std::thread::spawn(|| Self::run(runtime, receiver, task_count_clone)); + + LocalWorkerHandle { + runtime_handle, + spawner: sender, + task_count, + } + } + + fn run( + runtime: tokio::runtime::Runtime, + mut task_receiver: UnboundedReceiver, + task_count: Arc, + ) { + let local_set = LocalSet::new(); + local_set.block_on(&runtime, async { + while let Some(spawn_task) = task_receiver.recv().await { + // Calls spawn_local(future) + (spawn_task)(); + } + }); + + // If there are any tasks on the runtime associated with a LocalSet task + // that has already completed, but whose output has not yet been + // reported, let that task complete. + // + // Since the task_count is decremented when the runtime task exits, + // reading that counter lets us know if any such tasks completed during + // the call to `block_on`. + // + // Tasks on the LocalSet can't complete during this loop since they're + // stored on the LocalSet and we aren't accessing it. + let mut previous_task_count = task_count.load(Ordering::SeqCst); + loop { + // This call will also run tasks spawned on the runtime. + runtime.block_on(tokio::task::yield_now()); + let new_task_count = task_count.load(Ordering::SeqCst); + if new_task_count == previous_task_count { + break; + } else { + previous_task_count = new_task_count; + } + } + + // It's now no longer possible for a task on the runtime to be + // associated with a LocalSet task that has completed. Drop both the + // LocalSet and runtime to let tasks on the runtime be cancelled if and + // only if they are still on the LocalSet. + // + // Drop the LocalSet task first so that anyone awaiting the runtime + // JoinHandle will see the cancelled error after the LocalSet task + // destructor has completed. + drop(local_set); + drop(runtime); + } +} diff --git a/third_party/rust/tokio-0.2.25/src/time/delay_queue.rs b/third_party/rust/tokio-util/src/time/delay_queue.rs similarity index 51% rename from third_party/rust/tokio-0.2.25/src/time/delay_queue.rs rename to third_party/rust/tokio-util/src/time/delay_queue.rs index b02153b9e0fa..a0c5e5c5b068 100644 --- a/third_party/rust/tokio-0.2.25/src/time/delay_queue.rs +++ b/third_party/rust/tokio-util/src/time/delay_queue.rs @@ -5,14 +5,21 @@ //! [`DelayQueue`]: struct@DelayQueue use crate::time::wheel::{self, Wheel}; -use crate::time::{delay_until, Delay, Duration, Error, Instant}; +use futures_core::ready; +use tokio::time::{sleep_until, Duration, Instant, Sleep}; + +use core::ops::{Index, IndexMut}; use slab::Slab; use std::cmp; +use std::collections::HashMap; +use std::convert::From; +use std::fmt; +use std::fmt::Debug; use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; -use std::task::{self, Poll}; +use std::task::{self, Poll, Waker}; /// A queue of delayed elements. /// @@ -28,30 +35,30 @@ use std::task::{self, Poll}; /// /// Once delays have been configured, the `DelayQueue` is used via its /// [`Stream`] implementation. [`poll_expired`] is called. If an entry has reached its -/// deadline, it is returned. If not, `Poll::Pending` indicating that the +/// deadline, it is returned. If not, `Poll::Pending` is returned indicating that the /// current task will be notified once the deadline has been reached. /// /// # `Stream` implementation /// /// Items are retrieved from the queue via [`DelayQueue::poll_expired`]. If no delays have -/// expired, no items are returned. In this case, `NotReady` is returned and the +/// expired, no items are returned. In this case, `Poll::Pending` is returned and the /// current task is registered to be notified once the next item's delay has /// expired. /// /// If no items are in the queue, i.e. `is_empty()` returns `true`, then `poll` -/// returns `Ready(None)`. This indicates that the stream has reached an end. +/// returns `Poll::Ready(None)`. This indicates that the stream has reached an end. /// However, if a new item is inserted *after*, `poll` will once again start -/// returning items or `NotReady. +/// returning items or `Poll::Pending`. /// /// Items are returned ordered by their expirations. Items that are configured /// to expire first will be returned first. There are no ordering guarantees -/// for items configured to expire the same instant. Also note that delays are +/// for items configured to expire at the same instant. Also note that delays are /// rounded to the closest millisecond. /// /// # Implementation /// -/// The [`DelayQueue`] is backed by a separate instance of the same timer wheel used internally by -/// Tokio's standalone timer utilities such as [`delay_for`]. Because of this, it offers the same +/// The [`DelayQueue`] is backed by a separate instance of a timer wheel similar to that used internally +/// by Tokio's standalone timer utilities such as [`sleep`]. Because of this, it offers the same /// performance and scalability benefits. /// /// State associated with each entry is stored in a [`slab`]. This amortizes the cost of allocation, @@ -65,7 +72,7 @@ use std::task::{self, Poll}; /// Using `DelayQueue` to manage cache entries. /// /// ```rust,no_run -/// use tokio::time::{delay_queue, DelayQueue, Error}; +/// use tokio_util::time::{DelayQueue, delay_queue}; /// /// use futures::ready; /// use std::collections::HashMap; @@ -100,13 +107,12 @@ use std::task::{self, Poll}; /// } /// } /// -/// fn poll_purge(&mut self, cx: &mut Context<'_>) -> Poll> { -/// while let Some(res) = ready!(self.expirations.poll_expired(cx)) { -/// let entry = res?; +/// fn poll_purge(&mut self, cx: &mut Context<'_>) -> Poll<()> { +/// while let Some(entry) = ready!(self.expirations.poll_expired(cx)) { /// self.entries.remove(entry.get_ref()); /// } /// -/// Poll::Ready(Ok(())) +/// Poll::Ready(()) /// } /// } /// ``` @@ -118,14 +124,14 @@ use std::task::{self, Poll}; /// [`poll_expired`]: method@Self::poll_expired /// [`Stream::poll_expired`]: method@Self::poll_expired /// [`DelayQueue`]: struct@DelayQueue -/// [`delay_for`]: fn@super::delay_for +/// [`sleep`]: fn@tokio::time::sleep /// [`slab`]: slab /// [`capacity`]: method@Self::capacity /// [`reserve`]: method@Self::reserve #[derive(Debug)] pub struct DelayQueue { /// Stores data associated with entries - slab: Slab>, + slab: SlabStorage, /// Lookup structure tracking all delays in the queue wheel: Wheel>, @@ -135,16 +141,231 @@ pub struct DelayQueue { expired: Stack, /// Delay expiring when the *first* item in the queue expires - delay: Option, + delay: Option>>, /// Wheel polling state - poll: wheel::Poll, + wheel_now: u64, /// Instant at which the timer starts start: Instant, + + /// Waker that is invoked when we potentially need to reset the timer. + /// Because we lazily create the timer when the first entry is created, we + /// need to awaken any poller that polled us before that point. + waker: Option, } -/// An entry in `DelayQueue` that has expired and removed. +#[derive(Default)] +struct SlabStorage { + inner: Slab>, + + // A `compact` call requires a re-mapping of the `Key`s that were changed + // during the `compact` call of the `slab`. Since the keys that were given out + // cannot be changed retroactively we need to keep track of these re-mappings. + // The keys of `key_map` correspond to the old keys that were given out and + // the values to the `Key`s that were re-mapped by the `compact` call. + key_map: HashMap, + + // Index used to create new keys to hand out. + next_key_index: usize, + + // Whether `compact` has been called, necessary in order to decide whether + // to include keys in `key_map`. + compact_called: bool, +} + +impl SlabStorage { + pub(crate) fn with_capacity(capacity: usize) -> SlabStorage { + SlabStorage { + inner: Slab::with_capacity(capacity), + key_map: HashMap::new(), + next_key_index: 0, + compact_called: false, + } + } + + // Inserts data into the inner slab and re-maps keys if necessary + pub(crate) fn insert(&mut self, val: Data) -> Key { + let mut key = KeyInternal::new(self.inner.insert(val)); + let key_contained = self.key_map.contains_key(&key.into()); + + if key_contained { + // It's possible that a `compact` call creates capacitiy in `self.inner` in + // such a way that a `self.inner.insert` call creates a `key` which was + // previously given out during an `insert` call prior to the `compact` call. + // If `key` is contained in `self.key_map`, we have encountered this exact situation, + // We need to create a new key `key_to_give_out` and include the relation + // `key_to_give_out` -> `key` in `self.key_map`. + let key_to_give_out = self.create_new_key(); + assert!(!self.key_map.contains_key(&key_to_give_out.into())); + self.key_map.insert(key_to_give_out.into(), key); + key = key_to_give_out; + } else if self.compact_called { + // Include an identity mapping in `self.key_map` in order to allow us to + // panic if a key that was handed out is removed more than once. + self.key_map.insert(key.into(), key); + } + + key.into() + } + + // Re-map the key in case compact was previously called. + // Note: Since we include identity mappings in key_map after compact was called, + // we have information about all keys that were handed out. In the case in which + // compact was called and we try to remove a Key that was previously removed + // we can detect invalid keys if no key is found in `key_map`. This is necessary + // in order to prevent situations in which a previously removed key + // corresponds to a re-mapped key internally and which would then be incorrectly + // removed from the slab. + // + // Example to illuminate this problem: + // + // Let's assume our `key_map` is {1 -> 2, 2 -> 1} and we call remove(1). If we + // were to remove 1 again, we would not find it inside `key_map` anymore. + // If we were to imply from this that no re-mapping was necessary, we would + // incorrectly remove 1 from `self.slab.inner`, which corresponds to the + // handed-out key 2. + pub(crate) fn remove(&mut self, key: &Key) -> Data { + let remapped_key = if self.compact_called { + match self.key_map.remove(key) { + Some(key_internal) => key_internal, + None => panic!("invalid key"), + } + } else { + (*key).into() + }; + + self.inner.remove(remapped_key.index) + } + + pub(crate) fn shrink_to_fit(&mut self) { + self.inner.shrink_to_fit(); + self.key_map.shrink_to_fit(); + } + + pub(crate) fn compact(&mut self) { + if !self.compact_called { + for (key, _) in self.inner.iter() { + self.key_map.insert(Key::new(key), KeyInternal::new(key)); + } + } + + let mut remapping = HashMap::new(); + self.inner.compact(|_, from, to| { + remapping.insert(from, to); + true + }); + + // At this point `key_map` contains a mapping for every element. + for internal_key in self.key_map.values_mut() { + if let Some(new_internal_key) = remapping.get(&internal_key.index) { + *internal_key = KeyInternal::new(*new_internal_key); + } + } + + if self.key_map.capacity() > 2 * self.key_map.len() { + self.key_map.shrink_to_fit(); + } + + self.compact_called = true; + } + + // Tries to re-map a `Key` that was given out to the user to its + // corresponding internal key. + fn remap_key(&self, key: &Key) -> Option { + let key_map = &self.key_map; + if self.compact_called { + key_map.get(&*key).copied() + } else { + Some((*key).into()) + } + } + + fn create_new_key(&mut self) -> KeyInternal { + while self.key_map.contains_key(&Key::new(self.next_key_index)) { + self.next_key_index = self.next_key_index.wrapping_add(1); + } + + KeyInternal::new(self.next_key_index) + } + + pub(crate) fn len(&self) -> usize { + self.inner.len() + } + + pub(crate) fn capacity(&self) -> usize { + self.inner.capacity() + } + + pub(crate) fn clear(&mut self) { + self.inner.clear(); + self.key_map.clear(); + self.compact_called = false; + } + + pub(crate) fn reserve(&mut self, additional: usize) { + self.inner.reserve(additional); + + if self.compact_called { + self.key_map.reserve(additional); + } + } + + pub(crate) fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub(crate) fn contains(&self, key: &Key) -> bool { + let remapped_key = self.remap_key(key); + + match remapped_key { + Some(internal_key) => self.inner.contains(internal_key.index), + None => false, + } + } +} + +impl fmt::Debug for SlabStorage +where + T: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + if fmt.alternate() { + fmt.debug_map().entries(self.inner.iter()).finish() + } else { + fmt.debug_struct("Slab") + .field("len", &self.len()) + .field("cap", &self.capacity()) + .finish() + } + } +} + +impl Index for SlabStorage { + type Output = Data; + + fn index(&self, key: Key) -> &Self::Output { + let remapped_key = self.remap_key(&key); + + match remapped_key { + Some(internal_key) => &self.inner[internal_key.index], + None => panic!("Invalid index {}", key.index), + } + } +} + +impl IndexMut for SlabStorage { + fn index_mut(&mut self, key: Key) -> &mut Data { + let remapped_key = self.remap_key(&key); + + match remapped_key { + Some(internal_key) => &mut self.inner[internal_key.index], + None => panic!("Invalid index {}", key.index), + } + } +} + +/// An entry in `DelayQueue` that has expired and been removed. /// /// Values are returned by [`DelayQueue::poll_expired`]. /// @@ -168,15 +389,23 @@ pub struct Expired { /// /// [`DelayQueue`]: struct@DelayQueue /// [`DelayQueue::insert`]: method@DelayQueue::insert -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Key { index: usize, } +// Whereas `Key` is given out to users that use `DelayQueue`, internally we use +// `KeyInternal` as the key type in order to make the logic of mapping between keys +// as a result of `compact` calls clearer. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +struct KeyInternal { + index: usize, +} + #[derive(Debug)] struct Stack { /// Head of the stack - head: Option, + head: Option, _p: PhantomData T>, } @@ -193,24 +422,24 @@ struct Data { expired: bool, /// Next entry in the stack - next: Option, + next: Option, /// Previous entry in the stack - prev: Option, + prev: Option, } /// Maximum number of entries the queue can handle const MAX_ENTRIES: usize = (1 << 30) - 1; impl DelayQueue { - /// Creates a new, empty, `DelayQueue` + /// Creates a new, empty, `DelayQueue`. /// /// The queue will not allocate storage until items are inserted into it. /// /// # Examples /// /// ```rust - /// # use tokio::time::DelayQueue; + /// # use tokio_util::time::DelayQueue; /// let delay_queue: DelayQueue = DelayQueue::new(); /// ``` pub fn new() -> DelayQueue { @@ -226,30 +455,31 @@ impl DelayQueue { /// # Examples /// /// ```rust - /// # use tokio::time::DelayQueue; + /// # use tokio_util::time::DelayQueue; /// # use std::time::Duration; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::with_capacity(10); + /// let mut delay_queue = DelayQueue::with_capacity(10); /// - /// // These insertions are done without further allocation - /// for i in 0..10 { - /// delay_queue.insert(i, Duration::from_secs(i)); - /// } + /// // These insertions are done without further allocation + /// for i in 0..10 { + /// delay_queue.insert(i, Duration::from_secs(i)); + /// } /// - /// // This will make the queue allocate additional storage - /// delay_queue.insert(11, Duration::from_secs(11)); + /// // This will make the queue allocate additional storage + /// delay_queue.insert(11, Duration::from_secs(11)); /// # } /// ``` pub fn with_capacity(capacity: usize) -> DelayQueue { DelayQueue { wheel: Wheel::new(), - slab: Slab::with_capacity(capacity), + slab: SlabStorage::with_capacity(capacity), expired: Stack::default(), delay: None, - poll: wheel::Poll::new(0), + wheel_now: 0, start: Instant::now(), + waker: None, } } @@ -263,8 +493,8 @@ impl DelayQueue { /// `value` will be returned from [`poll_expired`]. If `when` has already been /// reached, then `value` is immediately made available to poll. /// - /// The return value represents the insertion and is used at an argument to - /// [`remove`] and [`reset`]. Note that [`Key`] is token and is reused once + /// The return value represents the insertion and is used as an argument to + /// [`remove`] and [`reset`]. Note that [`Key`] is a token and is reused once /// `value` is removed from the queue either by calling [`poll_expired`] after /// `when` is reached or by calling [`remove`]. At this point, the caller /// must take care to not use the returned [`Key`] again as it may reference @@ -281,17 +511,18 @@ impl DelayQueue { /// Basic usage /// /// ```rust - /// use tokio::time::{DelayQueue, Duration, Instant}; + /// use tokio::time::{Duration, Instant}; + /// use tokio_util::time::DelayQueue; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert_at( - /// "foo", Instant::now() + Duration::from_secs(5)); + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert_at( + /// "foo", Instant::now() + Duration::from_secs(5)); /// - /// // Remove the entry - /// let item = delay_queue.remove(&key); - /// assert_eq!(*item.get_ref(), "foo"); + /// // Remove the entry + /// let item = delay_queue.remove(&key); + /// assert_eq!(*item.get_ref(), "foo"); /// # } /// ``` /// @@ -326,37 +557,45 @@ impl DelayQueue { }; if should_set_delay { + if let Some(waker) = self.waker.take() { + waker.wake(); + } + let delay_time = self.start + Duration::from_millis(when); if let Some(ref mut delay) = &mut self.delay { - delay.reset(delay_time); + delay.as_mut().reset(delay_time); } else { - self.delay = Some(delay_until(delay_time)); + self.delay = Some(Box::pin(sleep_until(delay_time))); } } - Key::new(key) + key } /// Attempts to pull out the next value of the delay queue, registering the /// current task for wakeup if the value is not yet available, and returning - /// None if the queue is exhausted. - pub fn poll_expired( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll, Error>>> { - let item = ready!(self.poll_idx(cx)); - Poll::Ready(item.map(|result| { - result.map(|idx| { - let data = self.slab.remove(idx); - debug_assert!(data.next.is_none()); - debug_assert!(data.prev.is_none()); + /// `None` if the queue is exhausted. + pub fn poll_expired(&mut self, cx: &mut task::Context<'_>) -> Poll>> { + if !self + .waker + .as_ref() + .map(|w| w.will_wake(cx.waker())) + .unwrap_or(false) + { + self.waker = Some(cx.waker().clone()); + } - Expired { - key: Key::new(idx), - data: data.inner, - deadline: self.start + Duration::from_millis(data.when), - } - }) + let item = ready!(self.poll_idx(cx)); + Poll::Ready(item.map(|key| { + let data = self.slab.remove(&key); + debug_assert!(data.next.is_none()); + debug_assert!(data.prev.is_none()); + + Expired { + key, + data: data.inner, + deadline: self.start + Duration::from_millis(data.when), + } })) } @@ -366,40 +605,42 @@ impl DelayQueue { /// This function is identical to `insert_at`, but takes a `Duration` /// instead of an `Instant`. /// - /// `value` is stored in the queue until `when` is reached. At which point, - /// `value` will be returned from [`poll_expired`]. If `when` has already been - /// reached, then `value` is immediately made available to poll. + /// `value` is stored in the queue until `timeout` duration has + /// elapsed after `insert` was called. At that point, `value` will + /// be returned from [`poll_expired`]. If `timeout` is a `Duration` of + /// zero, then `value` is immediately made available to poll. /// - /// The return value represents the insertion and is used at an argument to - /// [`remove`] and [`reset`]. Note that [`Key`] is token and is reused once - /// `value` is removed from the queue either by calling [`poll_expired`] after - /// `when` is reached or by calling [`remove`]. At this point, the caller - /// must take care to not use the returned [`Key`] again as it may reference - /// a different item in the queue. + /// The return value represents the insertion and is used as an + /// argument to [`remove`] and [`reset`]. Note that [`Key`] is a + /// token and is reused once `value` is removed from the queue + /// either by calling [`poll_expired`] after `timeout` has elapsed + /// or by calling [`remove`]. At this point, the caller must not + /// use the returned [`Key`] again as it may reference a different + /// item in the queue. /// /// See [type] level documentation for more details. /// /// # Panics /// - /// This function panics if `timeout` is greater than the maximum supported - /// duration. + /// This function panics if `timeout` is greater than the maximum + /// duration supported by the timer in the current `Runtime`. /// /// # Examples /// /// Basic usage /// /// ```rust - /// use tokio::time::DelayQueue; + /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert("foo", Duration::from_secs(5)); /// - /// // Remove the entry - /// let item = delay_queue.remove(&key); - /// assert_eq!(*item.get_ref(), "foo"); + /// // Remove the entry + /// let item = delay_queue.remove(&key); + /// assert_eq!(*item.get_ref(), "foo"); /// # } /// ``` /// @@ -412,7 +653,7 @@ impl DelayQueue { self.insert_at(value, Instant::now() + timeout) } - fn insert_idx(&mut self, when: u64, key: usize) { + fn insert_idx(&mut self, when: u64, key: Key) { use self::wheel::{InsertError, Stack}; // Register the deadline with the timer wheel @@ -427,19 +668,20 @@ impl DelayQueue { } } - /// Removes the key fom the expired queue or the timer wheel - /// depending on its expiration status + /// Removes the key from the expired queue or the timer wheel + /// depending on its expiration status. /// /// # Panics - /// Panics if the key is not contained in the expired queue or the wheel + /// + /// Panics if the key is not contained in the expired queue or the wheel. fn remove_key(&mut self, key: &Key) { use crate::time::wheel::Stack; // Special case the `expired` queue - if self.slab[key.index].expired { - self.expired.remove(&key.index, &mut self.slab); + if self.slab[*key].expired { + self.expired.remove(key, &mut self.slab); } else { - self.wheel.remove(&key.index, &mut self.slab); + self.wheel.remove(key, &mut self.slab); } } @@ -458,22 +700,33 @@ impl DelayQueue { /// Basic usage /// /// ```rust - /// use tokio::time::DelayQueue; + /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert("foo", Duration::from_secs(5)); /// - /// // Remove the entry - /// let item = delay_queue.remove(&key); - /// assert_eq!(*item.get_ref(), "foo"); + /// // Remove the entry + /// let item = delay_queue.remove(&key); + /// assert_eq!(*item.get_ref(), "foo"); /// # } /// ``` pub fn remove(&mut self, key: &Key) -> Expired { + let prev_deadline = self.next_deadline(); + self.remove_key(key); - let data = self.slab.remove(key.index); + let data = self.slab.remove(key); + + let next_deadline = self.next_deadline(); + if prev_deadline != next_deadline { + match (next_deadline, &mut self.delay) { + (None, _) => self.delay = None, + (Some(deadline), Some(delay)) => delay.as_mut().reset(deadline), + (Some(deadline), None) => self.delay = Some(Box::pin(sleep_until(deadline))), + } + } Expired { key: Key::new(key.index), @@ -501,18 +754,19 @@ impl DelayQueue { /// Basic usage /// /// ```rust - /// use tokio::time::{DelayQueue, Duration, Instant}; + /// use tokio::time::{Duration, Instant}; + /// use tokio_util::time::DelayQueue; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert("foo", Duration::from_secs(5)); /// - /// // "foo" is scheduled to be returned in 5 seconds + /// // "foo" is scheduled to be returned in 5 seconds /// - /// delay_queue.reset_at(&key, Instant::now() + Duration::from_secs(10)); + /// delay_queue.reset_at(&key, Instant::now() + Duration::from_secs(10)); /// - /// // "foo"is now scheduled to be returned in 10 seconds + /// // "foo" is now scheduled to be returned in 10 seconds /// # } /// ``` pub fn reset_at(&mut self, key: &Key, when: Instant) { @@ -521,16 +775,63 @@ impl DelayQueue { // Normalize the deadline. Values cannot be set to expire in the past. let when = self.normalize_deadline(when); - self.slab[key.index].when = when; - self.insert_idx(when, key.index); + self.slab[*key].when = when; + self.slab[*key].expired = false; + + self.insert_idx(when, *key); let next_deadline = self.next_deadline(); if let (Some(ref mut delay), Some(deadline)) = (&mut self.delay, next_deadline) { - delay.reset(deadline); + // This should awaken us if necessary (ie, if already expired) + delay.as_mut().reset(deadline); } } - /// Returns the next time poll as determined by the wheel + /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation. + /// This function is not guaranteed to, and in most cases, won't decrease the capacity of the slab + /// to the number of elements still contained in it, because elements cannot be moved to a different + /// index. To decrease the capacity to the size of the slab use [`compact`]. + /// + /// This function can take O(n) time even when the capacity cannot be reduced or the allocation is + /// shrunk in place. Repeated calls run in O(1) though. + /// + /// [`compact`]: method@Self::compact + pub fn shrink_to_fit(&mut self) { + self.slab.shrink_to_fit(); + } + + /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation, + /// to the number of elements that are contained in it. + /// + /// This methods runs in O(n). + /// + /// # Examples + /// + /// Basic usage + /// + /// ```rust + /// use tokio_util::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::with_capacity(10); + /// + /// let key1 = delay_queue.insert(5, Duration::from_secs(5)); + /// let key2 = delay_queue.insert(10, Duration::from_secs(10)); + /// let key3 = delay_queue.insert(15, Duration::from_secs(15)); + /// + /// delay_queue.remove(&key2); + /// + /// delay_queue.compact(); + /// assert_eq!(delay_queue.capacity(), 2); + /// # } + /// ``` + pub fn compact(&mut self) { + self.slab.compact(); + } + + /// Returns the next time to poll as determined by the wheel fn next_deadline(&mut self) -> Option { self.wheel .poll_at() @@ -557,19 +858,19 @@ impl DelayQueue { /// Basic usage /// /// ```rust - /// use tokio::time::DelayQueue; + /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert("foo", Duration::from_secs(5)); /// - /// // "foo" is scheduled to be returned in 5 seconds + /// // "foo" is scheduled to be returned in 5 seconds /// - /// delay_queue.reset(&key, Duration::from_secs(10)); + /// delay_queue.reset(&key, Duration::from_secs(10)); /// - /// // "foo"is now scheduled to be returned in 10 seconds + /// // "foo"is now scheduled to be returned in 10 seconds /// # } /// ``` pub fn reset(&mut self, key: &Key, timeout: Duration) { @@ -587,20 +888,20 @@ impl DelayQueue { /// # Examples /// /// ```rust - /// use tokio::time::DelayQueue; + /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); + /// let mut delay_queue = DelayQueue::new(); /// - /// delay_queue.insert("foo", Duration::from_secs(5)); + /// delay_queue.insert("foo", Duration::from_secs(5)); /// - /// assert!(!delay_queue.is_empty()); + /// assert!(!delay_queue.is_empty()); /// - /// delay_queue.clear(); + /// delay_queue.clear(); /// - /// assert!(delay_queue.is_empty()); + /// assert!(delay_queue.is_empty()); /// # } /// ``` pub fn clear(&mut self) { @@ -615,7 +916,7 @@ impl DelayQueue { /// # Examples /// /// ```rust - /// use tokio::time::DelayQueue; + /// use tokio_util::time::DelayQueue; /// /// let delay_queue: DelayQueue = DelayQueue::with_capacity(10); /// assert_eq!(delay_queue.capacity(), 10); @@ -629,15 +930,15 @@ impl DelayQueue { /// # Examples /// /// ```rust - /// use tokio::time::DelayQueue; + /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue: DelayQueue = DelayQueue::with_capacity(10); - /// assert_eq!(delay_queue.len(), 0); - /// delay_queue.insert(3, Duration::from_secs(5)); - /// assert_eq!(delay_queue.len(), 1); + /// let mut delay_queue: DelayQueue = DelayQueue::with_capacity(10); + /// assert_eq!(delay_queue.len(), 0); + /// delay_queue.insert(3, Duration::from_secs(5)); + /// assert_eq!(delay_queue.len(), 1); /// # } /// ``` pub fn len(&self) -> usize { @@ -664,17 +965,17 @@ impl DelayQueue { /// # Examples /// /// ``` - /// use tokio::time::DelayQueue; + /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); + /// let mut delay_queue = DelayQueue::new(); /// - /// delay_queue.insert("hello", Duration::from_secs(10)); - /// delay_queue.reserve(10); + /// delay_queue.insert("hello", Duration::from_secs(10)); + /// delay_queue.reserve(10); /// - /// assert!(delay_queue.capacity() >= 11); + /// assert!(delay_queue.capacity() >= 11); /// # } /// ``` pub fn reserve(&mut self, additional: usize) { @@ -684,21 +985,21 @@ impl DelayQueue { /// Returns `true` if there are no items in the queue. /// /// Note that this function returns `false` even if all items have not yet - /// expired and a call to `poll` will return `NotReady`. + /// expired and a call to `poll` will return `Poll::Pending`. /// /// # Examples /// /// ``` - /// use tokio::time::DelayQueue; + /// use tokio_util::time::DelayQueue; /// use std::time::Duration; /// /// # #[tokio::main] /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// assert!(delay_queue.is_empty()); + /// let mut delay_queue = DelayQueue::new(); + /// assert!(delay_queue.is_empty()); /// - /// delay_queue.insert("hello", Duration::from_secs(5)); - /// assert!(!delay_queue.is_empty()); + /// delay_queue.insert("hello", Duration::from_secs(5)); + /// assert!(!delay_queue.is_empty()); /// # } /// ``` pub fn is_empty(&self) -> bool { @@ -709,13 +1010,13 @@ impl DelayQueue { /// should be returned. /// /// A slot should be returned when the associated deadline has been reached. - fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll>> { + fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll> { use self::wheel::Stack; let expired = self.expired.pop(&mut self.slab); if expired.is_some() { - return Poll::Ready(expired.map(Ok)); + return Poll::Ready(expired); } loop { @@ -726,16 +1027,16 @@ impl DelayQueue { let now = crate::time::ms(delay.deadline() - self.start, crate::time::Round::Down); - self.poll = wheel::Poll::new(now); + self.wheel_now = now; } // We poll the wheel to get the next value out before finding the next deadline. - let wheel_idx = self.wheel.poll(&mut self.poll, &mut self.slab); + let wheel_idx = self.wheel.poll(self.wheel_now, &mut self.slab); - self.delay = self.next_deadline().map(delay_until); + self.delay = self.next_deadline().map(|when| Box::pin(sleep_until(when))); if let Some(idx) = wheel_idx { - return Poll::Ready(Some(Ok(idx))); + return Poll::Ready(Some(idx)); } if self.delay.is_none() { @@ -764,11 +1065,10 @@ impl Default for DelayQueue { } } -#[cfg(feature = "stream")] impl futures_core::Stream for DelayQueue { // DelayQueue seems much more specific, where a user may care that it // has reached capacity, so return those errors instead of panicking. - type Item = Result, Error>; + type Item = Expired; fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { DelayQueue::poll_expired(self.get_mut(), cx) @@ -776,9 +1076,9 @@ impl futures_core::Stream for DelayQueue { } impl wheel::Stack for Stack { - type Owned = usize; - type Borrowed = usize; - type Store = Slab>; + type Owned = Key; + type Borrowed = Key; + type Store = SlabStorage; fn is_empty(&self) -> bool { self.head.is_none() @@ -797,28 +1097,29 @@ impl wheel::Stack for Stack { } store[item].next = old; - self.head = Some(item) + self.head = Some(item); } fn pop(&mut self, store: &mut Self::Store) -> Option { - if let Some(idx) = self.head { - self.head = store[idx].next; + if let Some(key) = self.head { + self.head = store[key].next; if let Some(idx) = self.head { store[idx].prev = None; } - store[idx].next = None; - debug_assert!(store[idx].prev.is_none()); + store[key].next = None; + debug_assert!(store[key].prev.is_none()); - Some(idx) + Some(key) } else { None } } fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store) { - assert!(store.contains(*item)); + let key = *item; + assert!(store.contains(item)); // Ensure that the entry is in fact contained by the stack debug_assert!({ @@ -827,29 +1128,31 @@ impl wheel::Stack for Stack { let mut contains = false; while let Some(idx) = next { + let data = &store[idx]; + if idx == *item { debug_assert!(!contains); contains = true; } - next = store[idx].next; + next = data.next; } contains }); - if let Some(next) = store[*item].next { - store[next].prev = store[*item].prev; + if let Some(next) = store[key].next { + store[next].prev = store[key].prev; } - if let Some(prev) = store[*item].prev { - store[prev].next = store[*item].next; + if let Some(prev) = store[key].prev { + store[prev].next = store[key].next; } else { - self.head = store[*item].next; + self.head = store[key].next; } - store[*item].next = None; - store[*item].prev = None; + store[key].next = None; + store[key].prev = None; } fn when(item: &Self::Borrowed, store: &Self::Store) -> u64 { @@ -872,6 +1175,24 @@ impl Key { } } +impl KeyInternal { + pub(crate) fn new(index: usize) -> KeyInternal { + KeyInternal { index } + } +} + +impl From for KeyInternal { + fn from(item: Key) -> Self { + KeyInternal::new(item.index) + } +} + +impl From for Key { + fn from(item: KeyInternal) -> Self { + Key::new(item.index) + } +} + impl Expired { /// Returns a reference to the inner value. pub fn get_ref(&self) -> &T { @@ -892,4 +1213,9 @@ impl Expired { pub fn deadline(&self) -> Instant { self.deadline } + + /// Returns the key that the expiration is indexed by. + pub fn key(&self) -> Key { + self.key + } } diff --git a/third_party/rust/tokio-util/src/time/mod.rs b/third_party/rust/tokio-util/src/time/mod.rs new file mode 100644 index 000000000000..2d3400836045 --- /dev/null +++ b/third_party/rust/tokio-util/src/time/mod.rs @@ -0,0 +1,47 @@ +//! Additional utilities for tracking time. +//! +//! This module provides additional utilities for executing code after a set period +//! of time. Currently there is only one: +//! +//! * `DelayQueue`: A queue where items are returned once the requested delay +//! has expired. +//! +//! This type must be used from within the context of the `Runtime`. + +use std::time::Duration; + +mod wheel; + +pub mod delay_queue; + +#[doc(inline)] +pub use delay_queue::DelayQueue; + +// ===== Internal utils ===== + +enum Round { + Up, + Down, +} + +/// Convert a `Duration` to milliseconds, rounding up and saturating at +/// `u64::MAX`. +/// +/// The saturating is fine because `u64::MAX` milliseconds are still many +/// million years. +#[inline] +fn ms(duration: Duration, round: Round) -> u64 { + const NANOS_PER_MILLI: u32 = 1_000_000; + const MILLIS_PER_SEC: u64 = 1_000; + + // Round up. + let millis = match round { + Round::Up => (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI, + Round::Down => duration.subsec_millis(), + }; + + duration + .as_secs() + .saturating_mul(MILLIS_PER_SEC) + .saturating_add(u64::from(millis)) +} diff --git a/third_party/rust/tokio-0.2.25/src/time/wheel/level.rs b/third_party/rust/tokio-util/src/time/wheel/level.rs similarity index 99% rename from third_party/rust/tokio-0.2.25/src/time/wheel/level.rs rename to third_party/rust/tokio-util/src/time/wheel/level.rs index 49f9bfb9cf04..8ea30af30fd6 100644 --- a/third_party/rust/tokio-0.2.25/src/time/wheel/level.rs +++ b/third_party/rust/tokio-util/src/time/wheel/level.rs @@ -46,7 +46,7 @@ impl Level { () => { T::default() }; - }; + } Level { level, @@ -233,14 +233,13 @@ fn slot_for(duration: u64, level: usize) -> usize { ((duration >> (level * 6)) % LEVEL_MULT as u64) as usize } -/* #[cfg(all(test, not(loom)))] mod test { use super::*; #[test] fn test_slot_for() { - for pos in 1..64 { + for pos in 0..64 { assert_eq!(pos as usize, slot_for(pos, 0)); } @@ -252,4 +251,3 @@ mod test { } } } -*/ diff --git a/third_party/rust/tokio-0.2.25/src/time/wheel/mod.rs b/third_party/rust/tokio-util/src/time/wheel/mod.rs similarity index 84% rename from third_party/rust/tokio-0.2.25/src/time/wheel/mod.rs rename to third_party/rust/tokio-util/src/time/wheel/mod.rs index a2ef27fc6c51..4191e401df41 100644 --- a/third_party/rust/tokio-0.2.25/src/time/wheel/mod.rs +++ b/third_party/rust/tokio-util/src/time/wheel/mod.rs @@ -6,6 +6,7 @@ mod stack; pub(crate) use self::stack::Stack; use std::borrow::Borrow; +use std::fmt::Debug; use std::usize; /// Timing wheel implementation. @@ -51,13 +52,6 @@ pub(crate) enum InsertError { Invalid, } -/// Poll expirations from the wheel -#[derive(Debug, Default)] -pub(crate) struct Poll { - now: u64, - expiration: Option, -} - impl Wheel where T: Stack, @@ -123,9 +117,17 @@ where Ok(()) } - /// Remove `item` from thee timing wheel. + /// Remove `item` from the timing wheel. pub(crate) fn remove(&mut self, item: &T::Borrowed, store: &mut T::Store) { let when = T::when(item, store); + + assert!( + self.elapsed <= when, + "elapsed={}; when={}", + self.elapsed, + when + ); + let level = self.level_for(when); self.levels[level].remove_entry(when, item, store); @@ -136,19 +138,18 @@ where self.next_expiration().map(|expiration| expiration.deadline) } - pub(crate) fn poll(&mut self, poll: &mut Poll, store: &mut T::Store) -> Option { + /// Advances the timer up to the instant represented by `now`. + pub(crate) fn poll(&mut self, now: u64, store: &mut T::Store) -> Option { loop { - if poll.expiration.is_none() { - poll.expiration = self.next_expiration().and_then(|expiration| { - if expiration.deadline > poll.now { - None - } else { - Some(expiration) - } - }); - } + let expiration = self.next_expiration().and_then(|expiration| { + if expiration.deadline > now { + None + } else { + Some(expiration) + } + }); - match poll.expiration { + match expiration { Some(ref expiration) => { if let Some(item) = self.poll_expiration(expiration, store) { return Some(item); @@ -157,12 +158,14 @@ where self.set_elapsed(expiration.deadline); } None => { - self.set_elapsed(poll.now); + // in this case the poll did not indicate an expiration + // _and_ we were not able to find a next expiration in + // the current list of timers. advance to the poll's + // current time and do nothing else. + self.set_elapsed(now); return None; } } - - poll.expiration = None; } } @@ -197,6 +200,10 @@ where res } + /// iteratively find entries that are between the wheel's current + /// time and the expiration time. for each in that population either + /// return it for notification (in the case of the last level) or tier + /// it down to the next level (in all other cases). pub(crate) fn poll_expiration( &mut self, expiration: &Expiration, @@ -242,31 +249,24 @@ where } fn level_for(elapsed: u64, when: u64) -> usize { - let masked = elapsed ^ when; + const SLOT_MASK: u64 = (1 << 6) - 1; - assert!(masked != 0, "elapsed={}; when={}", elapsed, when); + // Mask in the trailing bits ignored by the level calculation in order to cap + // the possible leading zeros + let masked = elapsed ^ when | SLOT_MASK; let leading_zeros = masked.leading_zeros() as usize; let significant = 63 - leading_zeros; significant / 6 } -impl Poll { - pub(crate) fn new(now: u64) -> Poll { - Poll { - now, - expiration: None, - } - } -} - #[cfg(all(test, not(loom)))] mod test { use super::*; #[test] fn test_level_for() { - for pos in 1..64 { + for pos in 0..64 { assert_eq!( 0, level_for(0, pos), diff --git a/third_party/rust/tokio-0.2.25/src/time/wheel/stack.rs b/third_party/rust/tokio-util/src/time/wheel/stack.rs similarity index 91% rename from third_party/rust/tokio-0.2.25/src/time/wheel/stack.rs rename to third_party/rust/tokio-util/src/time/wheel/stack.rs index 6e55c38ccda7..c87adcafda8a 100644 --- a/third_party/rust/tokio-0.2.25/src/time/wheel/stack.rs +++ b/third_party/rust/tokio-util/src/time/wheel/stack.rs @@ -1,4 +1,6 @@ use std::borrow::Borrow; +use std::cmp::Eq; +use std::hash::Hash; /// Abstracts the stack operations needed to track timeouts. pub(crate) trait Stack: Default { @@ -6,7 +8,7 @@ pub(crate) trait Stack: Default { type Owned: Borrow; /// Borrowed item - type Borrowed; + type Borrowed: Eq + Hash; /// Item storage, this allows a slab to be used instead of just the heap type Store; diff --git a/third_party/rust/tokio-util/src/udp/frame.rs b/third_party/rust/tokio-util/src/udp/frame.rs index 5b098bd49b2c..d900fd7691e7 100644 --- a/third_party/rust/tokio-util/src/udp/frame.rs +++ b/third_party/rust/tokio-util/src/udp/frame.rs @@ -1,16 +1,20 @@ use crate::codec::{Decoder, Encoder}; -use tokio::{net::UdpSocket, stream::Stream}; +use futures_core::Stream; +use tokio::{io::ReadBuf, net::UdpSocket}; use bytes::{BufMut, BytesMut}; use futures_core::ready; use futures_sink::Sink; -use std::io; -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::pin::Pin; use std::task::{Context, Poll}; +use std::{ + borrow::Borrow, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, +}; +use std::{io, mem::MaybeUninit}; -/// A unified `Stream` and `Sink` interface to an underlying `UdpSocket`, using +/// A unified [`Stream`] and [`Sink`] interface to an underlying `UdpSocket`, using /// the `Encoder` and `Decoder` traits to encode and decode frames. /// /// Raw UDP sockets work with datagrams, but higher-level code usually wants to @@ -19,26 +23,40 @@ use std::task::{Context, Poll}; /// handle encoding and decoding of messages frames. Note that the incoming and /// outgoing frame types may be distinct. /// -/// This function returns a *single* object that is both `Stream` and `Sink`; +/// This function returns a *single* object that is both [`Stream`] and [`Sink`]; /// grouping this into a single object is often useful for layering things which /// require both read and write access to the underlying object. /// /// If you want to work more directly with the streams and sink, consider -/// calling `split` on the `UdpFramed` returned by this method, which will break +/// calling [`split`] on the `UdpFramed` returned by this method, which will break /// them into separate objects, allowing them to interact more easily. +/// +/// [`Stream`]: futures_core::Stream +/// [`Sink`]: futures_sink::Sink +/// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split #[must_use = "sinks do nothing unless polled"] -#[cfg_attr(docsrs, doc(all(feature = "codec", feature = "udp")))] #[derive(Debug)] -pub struct UdpFramed { - socket: UdpSocket, +pub struct UdpFramed { + socket: T, codec: C, rd: BytesMut, wr: BytesMut, out_addr: SocketAddr, flushed: bool, + is_readable: bool, + current_addr: Option, } -impl Stream for UdpFramed { +const INITIAL_RD_CAPACITY: usize = 64 * 1024; +const INITIAL_WR_CAPACITY: usize = 8 * 1024; + +impl Unpin for UdpFramed {} + +impl Stream for UdpFramed +where + T: Borrow, + C: Decoder, +{ type Item = Result<(C::Item, SocketAddr), C::Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -46,31 +64,48 @@ impl Stream for UdpFramed { pin.rd.reserve(INITIAL_RD_CAPACITY); - let (_n, addr) = unsafe { - // Read into the buffer without having to initialize the memory. - // - // safety: we know tokio::net::UdpSocket never reads from the memory - // during a recv - let res = { - let bytes = &mut *(pin.rd.bytes_mut() as *mut _ as *mut [u8]); - ready!(Pin::new(&mut pin.socket).poll_recv_from(cx, bytes)) + loop { + // Are there still bytes left in the read buffer to decode? + if pin.is_readable { + if let Some(frame) = pin.codec.decode_eof(&mut pin.rd)? { + let current_addr = pin + .current_addr + .expect("will always be set before this line is called"); + + return Poll::Ready(Some(Ok((frame, current_addr)))); + } + + // if this line has been reached then decode has returned `None`. + pin.is_readable = false; + pin.rd.clear(); + } + + // We're out of data. Try and fetch more data to decode + let addr = unsafe { + // Convert `&mut [MaybeUnit]` to `&mut [u8]` because we will be + // writing to it via `poll_recv_from` and therefore initializing the memory. + let buf = &mut *(pin.rd.chunk_mut() as *mut _ as *mut [MaybeUninit]); + let mut read = ReadBuf::uninit(buf); + let ptr = read.filled().as_ptr(); + let res = ready!(pin.socket.borrow().poll_recv_from(cx, &mut read)); + + assert_eq!(ptr, read.filled().as_ptr()); + let addr = res?; + pin.rd.advance_mut(read.filled().len()); + addr }; - let (n, addr) = res?; - pin.rd.advance_mut(n); - (n, addr) - }; - - let frame_res = pin.codec.decode(&mut pin.rd); - pin.rd.clear(); - let frame = frame_res?; - let result = frame.map(|frame| Ok((frame, addr))); // frame -> (frame, addr) - - Poll::Ready(result) + pin.current_addr = Some(addr); + pin.is_readable = true; + } } } -impl + Unpin> Sink<(I, SocketAddr)> for UdpFramed { +impl Sink<(I, SocketAddr)> for UdpFramed +where + T: Borrow, + C: Encoder, +{ type Error = C::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -102,13 +137,13 @@ impl + Unpin> Sink<(I, SocketAddr)> for UdpFramed { } let Self { - ref mut socket, + ref socket, ref mut out_addr, ref mut wr, .. } = *self; - let n = ready!(socket.poll_send_to(cx, &wr, &out_addr))?; + let n = ready!(socket.borrow().poll_send_to(cx, wr, *out_addr))?; let wrote_all = n == self.wr.len(); self.wr.clear(); @@ -133,21 +168,23 @@ impl + Unpin> Sink<(I, SocketAddr)> for UdpFramed { } } -const INITIAL_RD_CAPACITY: usize = 64 * 1024; -const INITIAL_WR_CAPACITY: usize = 8 * 1024; - -impl UdpFramed { +impl UdpFramed +where + T: Borrow, +{ /// Create a new `UdpFramed` backed by the given socket and codec. /// /// See struct level documentation for more details. - pub fn new(socket: UdpSocket, codec: C) -> UdpFramed { - UdpFramed { + pub fn new(socket: T, codec: C) -> UdpFramed { + Self { socket, codec, out_addr: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0)), rd: BytesMut::with_capacity(INITIAL_RD_CAPACITY), wr: BytesMut::with_capacity(INITIAL_WR_CAPACITY), flushed: true, + is_readable: false, + current_addr: None, } } @@ -158,24 +195,51 @@ impl UdpFramed { /// Care should be taken to not tamper with the underlying stream of data /// coming in as it may corrupt the stream of frames otherwise being worked /// with. - pub fn get_ref(&self) -> &UdpSocket { + pub fn get_ref(&self) -> &T { &self.socket } - /// Returns a mutable reference to the underlying I/O stream wrapped by - /// `Framed`. + /// Returns a mutable reference to the underlying I/O stream wrapped by `Framed`. /// /// # Note /// /// Care should be taken to not tamper with the underlying stream of data /// coming in as it may corrupt the stream of frames otherwise being worked /// with. - pub fn get_mut(&mut self) -> &mut UdpSocket { + pub fn get_mut(&mut self) -> &mut T { &mut self.socket } + /// Returns a reference to the underlying codec wrapped by + /// `Framed`. + /// + /// Note that care should be taken to not tamper with the underlying codec + /// as it may corrupt the stream of frames otherwise being worked with. + pub fn codec(&self) -> &C { + &self.codec + } + + /// Returns a mutable reference to the underlying codec wrapped by + /// `UdpFramed`. + /// + /// Note that care should be taken to not tamper with the underlying codec + /// as it may corrupt the stream of frames otherwise being worked with. + pub fn codec_mut(&mut self) -> &mut C { + &mut self.codec + } + + /// Returns a reference to the read buffer. + pub fn read_buffer(&self) -> &BytesMut { + &self.rd + } + + /// Returns a mutable reference to the read buffer. + pub fn read_buffer_mut(&mut self) -> &mut BytesMut { + &mut self.rd + } + /// Consumes the `Framed`, returning its underlying I/O stream. - pub fn into_inner(self) -> UdpSocket { + pub fn into_inner(self) -> T { self.socket } } diff --git a/third_party/rust/tokio-util/src/udp/mod.rs b/third_party/rust/tokio-util/src/udp/mod.rs index 7c4bb2b3cb50..f88ea030aa35 100644 --- a/third_party/rust/tokio-util/src/udp/mod.rs +++ b/third_party/rust/tokio-util/src/udp/mod.rs @@ -1,4 +1,4 @@ //! UDP framing mod frame; -pub use self::frame::UdpFramed; +pub use frame::UdpFramed; diff --git a/third_party/rust/tokio-util/tests/_require_full.rs b/third_party/rust/tokio-util/tests/_require_full.rs new file mode 100644 index 000000000000..045934d17599 --- /dev/null +++ b/third_party/rust/tokio-util/tests/_require_full.rs @@ -0,0 +1,2 @@ +#![cfg(not(feature = "full"))] +compile_error!("run tokio-util tests with `--features full`"); diff --git a/third_party/rust/tokio-util/tests/codecs.rs b/third_party/rust/tokio-util/tests/codecs.rs index a22effbeb308..f9a780140a23 100644 --- a/third_party/rust/tokio-util/tests/codecs.rs +++ b/third_party/rust/tokio-util/tests/codecs.rs @@ -1,6 +1,6 @@ #![warn(rust_2018_idioms)] -use tokio_util::codec::{BytesCodec, Decoder, Encoder, LinesCodec}; +use tokio_util::codec::{AnyDelimiterCodec, BytesCodec, Decoder, Encoder, LinesCodec}; use bytes::{BufMut, Bytes, BytesMut}; @@ -38,6 +38,9 @@ fn bytes_encoder() { codec .encode(Bytes::from_static(&[0; INITIAL_CAPACITY + 1]), &mut buf) .unwrap(); + codec + .encode(BytesMut::from(&b"hello"[..]), &mut buf) + .unwrap(); } #[test] @@ -201,7 +204,26 @@ fn lines_decoder_discard_repeat() { buf.put_slice(b"aa"); assert!(codec.decode(buf).is_err()); buf.put_slice(b"a"); + assert_eq!(None, codec.decode(buf).unwrap()); +} + +// Regression test for [subsequent calls to LinesCodec decode does not return the desired results bug](https://github.com/tokio-rs/tokio/issues/3555) +#[test] +fn lines_decoder_max_length_underrun_twice() { + const MAX_LENGTH: usize = 11; + + let mut codec = LinesCodec::new_with_max_length(MAX_LENGTH); + let buf = &mut BytesMut::new(); + + buf.reserve(200); + buf.put_slice(b"line "); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"too very l"); assert!(codec.decode(buf).is_err()); + buf.put_slice(b"aaaaaaaaaaaaaaaaaaaaaaa"); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"ong\nshort\n"); + assert_eq!("short", codec.decode(buf).unwrap().unwrap()); } #[test] @@ -215,3 +237,228 @@ fn lines_encoder() { codec.encode("line 2", &mut buf).unwrap(); assert_eq!("line 1\nline 2\n", buf); } + +#[test] +fn any_delimiters_decoder_any_character() { + let mut codec = AnyDelimiterCodec::new(b",;\n\r".to_vec(), b",".to_vec()); + let buf = &mut BytesMut::new(); + buf.reserve(200); + buf.put_slice(b"chunk 1,chunk 2;chunk 3\n\r"); + assert_eq!("chunk 1", codec.decode(buf).unwrap().unwrap()); + assert_eq!("chunk 2", codec.decode(buf).unwrap().unwrap()); + assert_eq!("chunk 3", codec.decode(buf).unwrap().unwrap()); + assert_eq!("", codec.decode(buf).unwrap().unwrap()); + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!(None, codec.decode_eof(buf).unwrap()); + buf.put_slice(b"k"); + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!("k", codec.decode_eof(buf).unwrap().unwrap()); + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!(None, codec.decode_eof(buf).unwrap()); +} + +#[test] +fn any_delimiters_decoder_max_length() { + const MAX_LENGTH: usize = 7; + + let mut codec = + AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH); + let buf = &mut BytesMut::new(); + + buf.reserve(200); + buf.put_slice(b"chunk 1 is too long\nchunk 2\nchunk 3\r\nchunk 4\n\r\n"); + + assert!(codec.decode(buf).is_err()); + + let chunk = codec.decode(buf).unwrap().unwrap(); + assert!( + chunk.len() <= MAX_LENGTH, + "{:?}.len() <= {:?}", + chunk, + MAX_LENGTH + ); + assert_eq!("chunk 2", chunk); + + let chunk = codec.decode(buf).unwrap().unwrap(); + assert!( + chunk.len() <= MAX_LENGTH, + "{:?}.len() <= {:?}", + chunk, + MAX_LENGTH + ); + assert_eq!("chunk 3", chunk); + + // \r\n cause empty chunk + let chunk = codec.decode(buf).unwrap().unwrap(); + assert!( + chunk.len() <= MAX_LENGTH, + "{:?}.len() <= {:?}", + chunk, + MAX_LENGTH + ); + assert_eq!("", chunk); + + let chunk = codec.decode(buf).unwrap().unwrap(); + assert!( + chunk.len() <= MAX_LENGTH, + "{:?}.len() <= {:?}", + chunk, + MAX_LENGTH + ); + assert_eq!("chunk 4", chunk); + + let chunk = codec.decode(buf).unwrap().unwrap(); + assert!( + chunk.len() <= MAX_LENGTH, + "{:?}.len() <= {:?}", + chunk, + MAX_LENGTH + ); + assert_eq!("", chunk); + + let chunk = codec.decode(buf).unwrap().unwrap(); + assert!( + chunk.len() <= MAX_LENGTH, + "{:?}.len() <= {:?}", + chunk, + MAX_LENGTH + ); + assert_eq!("", chunk); + + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!(None, codec.decode_eof(buf).unwrap()); + buf.put_slice(b"k"); + assert_eq!(None, codec.decode(buf).unwrap()); + + let chunk = codec.decode_eof(buf).unwrap().unwrap(); + assert!( + chunk.len() <= MAX_LENGTH, + "{:?}.len() <= {:?}", + chunk, + MAX_LENGTH + ); + assert_eq!("k", chunk); + + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!(None, codec.decode_eof(buf).unwrap()); + + // Delimiter that's one character too long. This could cause an out of bounds + // error if we peek at the next characters using slice indexing. + buf.put_slice(b"aaabbbcc"); + assert!(codec.decode(buf).is_err()); +} + +#[test] +fn any_delimiter_decoder_max_length_underrun() { + const MAX_LENGTH: usize = 7; + + let mut codec = + AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH); + let buf = &mut BytesMut::new(); + + buf.reserve(200); + buf.put_slice(b"chunk "); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"too l"); + assert!(codec.decode(buf).is_err()); + buf.put_slice(b"ong\n"); + assert_eq!(None, codec.decode(buf).unwrap()); + + buf.put_slice(b"chunk 2"); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b","); + assert_eq!("chunk 2", codec.decode(buf).unwrap().unwrap()); +} + +#[test] +fn any_delimiter_decoder_max_length_underrun_twice() { + const MAX_LENGTH: usize = 11; + + let mut codec = + AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH); + let buf = &mut BytesMut::new(); + + buf.reserve(200); + buf.put_slice(b"chunk "); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"too very l"); + assert!(codec.decode(buf).is_err()); + buf.put_slice(b"aaaaaaaaaaaaaaaaaaaaaaa"); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"ong\nshort\n"); + assert_eq!("short", codec.decode(buf).unwrap().unwrap()); +} +#[test] +fn any_delimiter_decoder_max_length_bursts() { + const MAX_LENGTH: usize = 11; + + let mut codec = + AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH); + let buf = &mut BytesMut::new(); + + buf.reserve(200); + buf.put_slice(b"chunk "); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"too l"); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"ong\n"); + assert!(codec.decode(buf).is_err()); +} + +#[test] +fn any_delimiter_decoder_max_length_big_burst() { + const MAX_LENGTH: usize = 11; + + let mut codec = + AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH); + let buf = &mut BytesMut::new(); + + buf.reserve(200); + buf.put_slice(b"chunk "); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"too long!\n"); + assert!(codec.decode(buf).is_err()); +} + +#[test] +fn any_delimiter_decoder_max_length_delimiter_between_decodes() { + const MAX_LENGTH: usize = 5; + + let mut codec = + AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH); + let buf = &mut BytesMut::new(); + + buf.reserve(200); + buf.put_slice(b"hello"); + assert_eq!(None, codec.decode(buf).unwrap()); + + buf.put_slice(b",world"); + assert_eq!("hello", codec.decode(buf).unwrap().unwrap()); +} + +#[test] +fn any_delimiter_decoder_discard_repeat() { + const MAX_LENGTH: usize = 1; + + let mut codec = + AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH); + let buf = &mut BytesMut::new(); + + buf.reserve(200); + buf.put_slice(b"aa"); + assert!(codec.decode(buf).is_err()); + buf.put_slice(b"a"); + assert_eq!(None, codec.decode(buf).unwrap()); +} + +#[test] +fn any_delimiter_encoder() { + let mut codec = AnyDelimiterCodec::new(b",".to_vec(), b";--;".to_vec()); + let mut buf = BytesMut::new(); + + codec.encode("chunk 1", &mut buf).unwrap(); + assert_eq!("chunk 1;--;", buf); + + codec.encode("chunk 2", &mut buf).unwrap(); + assert_eq!("chunk 1;--;chunk 2;--;", buf); +} diff --git a/third_party/rust/tokio-util/tests/context.rs b/third_party/rust/tokio-util/tests/context.rs new file mode 100644 index 000000000000..7510f36fd175 --- /dev/null +++ b/third_party/rust/tokio-util/tests/context.rs @@ -0,0 +1,24 @@ +#![cfg(feature = "rt")] +#![warn(rust_2018_idioms)] + +use tokio::runtime::Builder; +use tokio::time::*; +use tokio_util::context::RuntimeExt; + +#[test] +fn tokio_context_with_another_runtime() { + let rt1 = Builder::new_multi_thread() + .worker_threads(1) + // no timer! + .build() + .unwrap(); + let rt2 = Builder::new_multi_thread() + .worker_threads(1) + .enable_all() + .build() + .unwrap(); + + // Without the `HandleExt.wrap()` there would be a panic because there is + // no timer running, since it would be referencing runtime r1. + let _ = rt1.block_on(rt2.wrap(async move { sleep(Duration::from_millis(2)).await })); +} diff --git a/third_party/rust/tokio-util/tests/framed.rs b/third_party/rust/tokio-util/tests/framed.rs index d7ee3ef51fb9..ec8cdf00d09f 100644 --- a/third_party/rust/tokio-util/tests/framed.rs +++ b/third_party/rust/tokio-util/tests/framed.rs @@ -1,6 +1,6 @@ #![warn(rust_2018_idioms)] -use tokio::{prelude::*, stream::StreamExt}; +use tokio_stream::StreamExt; use tokio_test::assert_ok; use tokio_util::codec::{Decoder, Encoder, Framed, FramedParts}; @@ -12,7 +12,10 @@ use std::task::{Context, Poll}; const INITIAL_CAPACITY: usize = 8 * 1024; /// Encode and decode u32 values. -struct U32Codec; +#[derive(Default)] +struct U32Codec { + read_bytes: usize, +} impl Decoder for U32Codec { type Item = u32; @@ -24,6 +27,7 @@ impl Decoder for U32Codec { } let n = buf.split_to(4).get_u32(); + self.read_bytes += 4; Ok(Some(n)) } } @@ -39,6 +43,38 @@ impl Encoder for U32Codec { } } +/// Encode and decode u64 values. +#[derive(Default)] +struct U64Codec { + read_bytes: usize, +} + +impl Decoder for U64Codec { + type Item = u64; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> io::Result> { + if buf.len() < 8 { + return Ok(None); + } + + let n = buf.split_to(8).get_u64(); + self.read_bytes += 8; + Ok(Some(n)) + } +} + +impl Encoder for U64Codec { + type Error = io::Error; + + fn encode(&mut self, item: u64, dst: &mut BytesMut) -> io::Result<()> { + // Reserve space + dst.reserve(8); + dst.put_u64(item); + Ok(()) + } +} + /// This value should never be used struct DontReadIntoThis; @@ -51,30 +87,51 @@ impl Read for DontReadIntoThis { } } -impl AsyncRead for DontReadIntoThis { +impl tokio::io::AsyncRead for DontReadIntoThis { fn poll_read( self: Pin<&mut Self>, _cx: &mut Context<'_>, - _buf: &mut [u8], - ) -> Poll> { + _buf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { unreachable!() } } #[tokio::test] async fn can_read_from_existing_buf() { - let mut parts = FramedParts::new(DontReadIntoThis, U32Codec); + let mut parts = FramedParts::new(DontReadIntoThis, U32Codec::default()); parts.read_buf = BytesMut::from(&[0, 0, 0, 42][..]); let mut framed = Framed::from_parts(parts); let num = assert_ok!(framed.next().await.unwrap()); assert_eq!(num, 42); + assert_eq!(framed.codec().read_bytes, 4); +} + +#[tokio::test] +async fn can_read_from_existing_buf_after_codec_changed() { + let mut parts = FramedParts::new(DontReadIntoThis, U32Codec::default()); + parts.read_buf = BytesMut::from(&[0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 84][..]); + + let mut framed = Framed::from_parts(parts); + let num = assert_ok!(framed.next().await.unwrap()); + + assert_eq!(num, 42); + assert_eq!(framed.codec().read_bytes, 4); + + let mut framed = framed.map_codec(|codec| U64Codec { + read_bytes: codec.read_bytes, + }); + let num = assert_ok!(framed.next().await.unwrap()); + + assert_eq!(num, 84); + assert_eq!(framed.codec().read_bytes, 12); } #[test] fn external_buf_grows_to_init() { - let mut parts = FramedParts::new(DontReadIntoThis, U32Codec); + let mut parts = FramedParts::new(DontReadIntoThis, U32Codec::default()); parts.read_buf = BytesMut::from(&[0, 0, 0, 42][..]); let framed = Framed::from_parts(parts); @@ -85,7 +142,7 @@ fn external_buf_grows_to_init() { #[test] fn external_buf_does_not_shrink() { - let mut parts = FramedParts::new(DontReadIntoThis, U32Codec); + let mut parts = FramedParts::new(DontReadIntoThis, U32Codec::default()); parts.read_buf = BytesMut::from(&vec![0; INITIAL_CAPACITY * 2][..]); let framed = Framed::from_parts(parts); diff --git a/third_party/rust/tokio-util/tests/framed_read.rs b/third_party/rust/tokio-util/tests/framed_read.rs index 27bb298a7faf..2a9e27e22f53 100644 --- a/third_party/rust/tokio-util/tests/framed_read.rs +++ b/third_party/rust/tokio-util/tests/framed_read.rs @@ -1,6 +1,6 @@ #![warn(rust_2018_idioms)] -use tokio::io::AsyncRead; +use tokio::io::{AsyncRead, ReadBuf}; use tokio_test::assert_ready; use tokio_test::task; use tokio_util::codec::{Decoder, FramedRead}; @@ -50,6 +50,22 @@ impl Decoder for U32Decoder { } } +struct U64Decoder; + +impl Decoder for U64Decoder { + type Item = u64; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> io::Result> { + if buf.len() < 8 { + return Ok(None); + } + + let n = buf.split_to(8).get_u64(); + Ok(Some(n)) + } +} + #[test] fn read_multi_frame_in_packet() { let mut task = task::spawn(()); @@ -84,6 +100,24 @@ fn read_multi_frame_across_packets() { }); } +#[test] +fn read_multi_frame_in_packet_after_codec_changed() { + let mut task = task::spawn(()); + let mock = mock! { + Ok(b"\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08".to_vec()), + }; + let mut framed = FramedRead::new(mock, U32Decoder); + + task.enter(|cx, _| { + assert_read!(pin!(framed).poll_next(cx), 0x04); + + let mut framed = framed.map_decoder(|_| U64Decoder); + assert_read!(pin!(framed).poll_next(cx), 0x08); + + assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none()); + }); +} + #[test] fn read_not_ready() { let mut task = task::spawn(()); @@ -185,8 +219,8 @@ fn read_partial_would_block_then_err() { #[test] fn huge_size() { let mut task = task::spawn(()); - let data = [0; 32 * 1024]; - let mut framed = FramedRead::new(Slice(&data[..]), BigDecoder); + let data = &[0; 32 * 1024][..]; + let mut framed = FramedRead::new(data, BigDecoder); task.enter(|cx, _| { assert_read!(pin!(framed).poll_next(cx), 0); @@ -212,7 +246,7 @@ fn huge_size() { #[test] fn data_remaining_is_error() { let mut task = task::spawn(()); - let slice = Slice(&[0; 5]); + let slice = &[0; 5][..]; let mut framed = FramedRead::new(slice, U32Decoder); task.enter(|cx, _| { @@ -254,6 +288,29 @@ fn multi_frames_on_eof() { }); } +#[test] +fn read_eof_then_resume() { + let mut task = task::spawn(()); + let mock = mock! { + Ok(b"\x00\x00\x00\x01".to_vec()), + Ok(b"".to_vec()), + Ok(b"\x00\x00\x00\x02".to_vec()), + Ok(b"".to_vec()), + Ok(b"\x00\x00\x00\x03".to_vec()), + }; + let mut framed = FramedRead::new(mock, U32Decoder); + + task.enter(|cx, _| { + assert_read!(pin!(framed).poll_next(cx), 1); + assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none()); + assert_read!(pin!(framed).poll_next(cx), 2); + assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none()); + assert_read!(pin!(framed).poll_next(cx), 3); + assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none()); + assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none()); + }); +} + // ===== Mock ====== struct Mock { @@ -264,32 +321,19 @@ impl AsyncRead for Mock { fn poll_read( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { use io::ErrorKind::WouldBlock; match self.calls.pop_front() { Some(Ok(data)) => { - debug_assert!(buf.len() >= data.len()); - buf[..data.len()].copy_from_slice(&data[..]); - Ready(Ok(data.len())) + debug_assert!(buf.remaining() >= data.len()); + buf.put_slice(&data); + Ready(Ok(())) } Some(Err(ref e)) if e.kind() == WouldBlock => Pending, Some(Err(e)) => Ready(Err(e)), - None => Ready(Ok(0)), + None => Ready(Ok(())), } } } - -// TODO this newtype is necessary because `&[u8]` does not currently implement `AsyncRead` -struct Slice<'a>(&'a [u8]); - -impl AsyncRead for Slice<'_> { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut self.0).poll_read(cx, buf) - } -} diff --git a/third_party/rust/tokio-util/tests/framed_stream.rs b/third_party/rust/tokio-util/tests/framed_stream.rs new file mode 100644 index 000000000000..76d8af7b7d6e --- /dev/null +++ b/third_party/rust/tokio-util/tests/framed_stream.rs @@ -0,0 +1,38 @@ +use futures_core::stream::Stream; +use std::{io, pin::Pin}; +use tokio_test::{assert_ready, io::Builder, task}; +use tokio_util::codec::{BytesCodec, FramedRead}; + +macro_rules! pin { + ($id:ident) => { + Pin::new(&mut $id) + }; +} + +macro_rules! assert_read { + ($e:expr, $n:expr) => {{ + let val = assert_ready!($e); + assert_eq!(val.unwrap().unwrap(), $n); + }}; +} + +#[tokio::test] +async fn return_none_after_error() { + let mut io = FramedRead::new( + Builder::new() + .read(b"abcdef") + .read_error(io::Error::new(io::ErrorKind::Other, "Resource errored out")) + .read(b"more data") + .build(), + BytesCodec::new(), + ); + + let mut task = task::spawn(()); + + task.enter(|cx, _| { + assert_read!(pin!(io).poll_next(cx), b"abcdef".to_vec()); + assert!(assert_ready!(pin!(io).poll_next(cx)).unwrap().is_err()); + assert!(assert_ready!(pin!(io).poll_next(cx)).is_none()); + assert_read!(pin!(io).poll_next(cx), b"more data".to_vec()); + }) +} diff --git a/third_party/rust/tokio-util/tests/framed_write.rs b/third_party/rust/tokio-util/tests/framed_write.rs index 9ac6c1d11d48..259d9b0c9f3b 100644 --- a/third_party/rust/tokio-util/tests/framed_write.rs +++ b/third_party/rust/tokio-util/tests/framed_write.rs @@ -39,6 +39,19 @@ impl Encoder for U32Encoder { } } +struct U64Encoder; + +impl Encoder for U64Encoder { + type Error = io::Error; + + fn encode(&mut self, item: u64, dst: &mut BytesMut) -> io::Result<()> { + // Reserve space + dst.reserve(8); + dst.put_u64(item); + Ok(()) + } +} + #[test] fn write_multi_frame_in_packet() { let mut task = task::spawn(()); @@ -65,6 +78,32 @@ fn write_multi_frame_in_packet() { }); } +#[test] +fn write_multi_frame_after_codec_changed() { + let mut task = task::spawn(()); + let mock = mock! { + Ok(b"\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08".to_vec()), + }; + let mut framed = FramedWrite::new(mock, U32Encoder); + + task.enter(|cx, _| { + assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok()); + assert!(pin!(framed).start_send(0x04).is_ok()); + + let mut framed = framed.map_encoder(|_| U64Encoder); + assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok()); + assert!(pin!(framed).start_send(0x08).is_ok()); + + // Nothing written yet + assert_eq!(1, framed.get_ref().calls.len()); + + // Flush the writes + assert!(assert_ready!(pin!(framed).poll_flush(cx)).is_ok()); + + assert_eq!(0, framed.get_ref().calls.len()); + }); +} + #[test] fn write_hits_backpressure() { const ITER: usize = 2 * 1024; diff --git a/third_party/rust/tokio-0.2.25/tests/io_reader_stream.rs b/third_party/rust/tokio-util/tests/io_reader_stream.rs similarity index 74% rename from third_party/rust/tokio-0.2.25/tests/io_reader_stream.rs rename to third_party/rust/tokio-util/tests/io_reader_stream.rs index 6546a0ef4daf..e30cd85164cc 100644 --- a/third_party/rust/tokio-0.2.25/tests/io_reader_stream.rs +++ b/third_party/rust/tokio-util/tests/io_reader_stream.rs @@ -1,10 +1,9 @@ #![warn(rust_2018_idioms)] -#![cfg(feature = "full")] use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::AsyncRead; -use tokio::stream::StreamExt; +use tokio::io::{AsyncRead, ReadBuf}; +use tokio_stream::StreamExt; /// produces at most `remaining` zeros, that returns error. /// each time it reads at most 31 byte. @@ -16,18 +15,19 @@ impl AsyncRead for Reader { fn poll_read( self: Pin<&mut Self>, _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { let this = Pin::into_inner(self); - assert_ne!(buf.len(), 0); + assert_ne!(buf.remaining(), 0); if this.remaining > 0 { - let n = std::cmp::min(this.remaining, buf.len()); + let n = std::cmp::min(this.remaining, buf.remaining()); let n = std::cmp::min(n, 31); - for x in &mut buf[..n] { + for x in &mut buf.initialize_unfilled_to(n)[..n] { *x = 0; } + buf.advance(n); this.remaining -= n; - Poll::Ready(Ok(n)) + Poll::Ready(Ok(())) } else { Poll::Ready(Err(std::io::Error::from_raw_os_error(22))) } @@ -37,11 +37,12 @@ impl AsyncRead for Reader { #[tokio::test] async fn correct_behavior_on_errors() { let reader = Reader { remaining: 8000 }; - let mut stream = tokio::io::reader_stream(reader); + let mut stream = tokio_util::io::ReaderStream::new(reader); let mut zeros_received = 0; let mut had_error = false; loop { let item = stream.next().await.unwrap(); + println!("{:?}", item); match item { Ok(bytes) => { let bytes = &*bytes; diff --git a/third_party/rust/tokio-0.2.25/tests/stream_reader.rs b/third_party/rust/tokio-util/tests/io_stream_reader.rs similarity index 80% rename from third_party/rust/tokio-0.2.25/tests/stream_reader.rs rename to third_party/rust/tokio-util/tests/io_stream_reader.rs index 8370df4dac7d..59759941c51e 100644 --- a/third_party/rust/tokio-0.2.25/tests/stream_reader.rs +++ b/third_party/rust/tokio-util/tests/io_stream_reader.rs @@ -1,14 +1,14 @@ #![warn(rust_2018_idioms)] -#![cfg(feature = "full")] use bytes::Bytes; -use tokio::io::{stream_reader, AsyncReadExt}; -use tokio::stream::iter; +use tokio::io::AsyncReadExt; +use tokio_stream::iter; +use tokio_util::io::StreamReader; #[tokio::test] async fn test_stream_reader() -> std::io::Result<()> { let stream = iter(vec![ - Ok(Bytes::from_static(&[])), + std::io::Result::Ok(Bytes::from_static(&[])), Ok(Bytes::from_static(&[0, 1, 2, 3])), Ok(Bytes::from_static(&[])), Ok(Bytes::from_static(&[4, 5, 6, 7])), @@ -17,7 +17,7 @@ async fn test_stream_reader() -> std::io::Result<()> { Ok(Bytes::from_static(&[])), ]); - let mut read = stream_reader(stream); + let mut read = StreamReader::new(stream); let mut buf = [0; 5]; read.read_exact(&mut buf).await?; diff --git a/third_party/rust/tokio-util/tests/io_sync_bridge.rs b/third_party/rust/tokio-util/tests/io_sync_bridge.rs new file mode 100644 index 000000000000..0d420857b50c --- /dev/null +++ b/third_party/rust/tokio-util/tests/io_sync_bridge.rs @@ -0,0 +1,43 @@ +#![cfg(feature = "io-util")] + +use std::error::Error; +use std::io::{Cursor, Read, Result as IoResult}; +use tokio::io::AsyncRead; +use tokio_util::io::SyncIoBridge; + +async fn test_reader_len( + r: impl AsyncRead + Unpin + Send + 'static, + expected_len: usize, +) -> IoResult<()> { + let mut r = SyncIoBridge::new(r); + let res = tokio::task::spawn_blocking(move || { + let mut buf = Vec::new(); + r.read_to_end(&mut buf)?; + Ok::<_, std::io::Error>(buf) + }) + .await?; + assert_eq!(res?.len(), expected_len); + Ok(()) +} + +#[tokio::test] +async fn test_async_read_to_sync() -> Result<(), Box> { + test_reader_len(tokio::io::empty(), 0).await?; + let buf = b"hello world"; + test_reader_len(Cursor::new(buf), buf.len()).await?; + Ok(()) +} + +#[tokio::test] +async fn test_async_write_to_sync() -> Result<(), Box> { + let mut dest = Vec::new(); + let src = b"hello world"; + let dest = tokio::task::spawn_blocking(move || -> Result<_, String> { + let mut w = SyncIoBridge::new(Cursor::new(&mut dest)); + std::io::copy(&mut Cursor::new(src), &mut w).map_err(|e| e.to_string())?; + Ok(dest) + }) + .await??; + assert_eq!(dest.as_slice(), src); + Ok(()) +} diff --git a/third_party/rust/tokio-util/tests/length_delimited.rs b/third_party/rust/tokio-util/tests/length_delimited.rs index 6c5199167be2..126e41b5cd37 100644 --- a/third_party/rust/tokio-util/tests/length_delimited.rs +++ b/third_party/rust/tokio-util/tests/length_delimited.rs @@ -1,6 +1,6 @@ #![warn(rust_2018_idioms)] -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio_test::task; use tokio_test::{ assert_err, assert_ok, assert_pending, assert_ready, assert_ready_err, assert_ready_ok, @@ -372,6 +372,25 @@ fn read_single_multi_frame_one_packet_skip_none_adjusted() { assert_done!(io); } +#[test] +fn read_single_frame_length_adjusted() { + let mut d: Vec = vec![]; + d.extend_from_slice(b"\x00\x00\x0b\x0cHello world"); + + let io = length_delimited::Builder::new() + .length_field_offset(0) + .length_field_length(3) + .length_adjustment(0) + .num_skip(4) + .new_read(mock! { + data(&d), + }); + pin_mut!(io); + + assert_next_eq!(io, b"Hello world"); + assert_done!(io); +} + #[test] fn read_single_multi_frame_one_packet_length_includes_head() { let mut d: Vec = vec![]; @@ -688,18 +707,18 @@ impl AsyncRead for Mock { fn poll_read( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, - dst: &mut [u8], - ) -> Poll> { + dst: &mut ReadBuf<'_>, + ) -> Poll> { match self.calls.pop_front() { Some(Ready(Ok(Op::Data(data)))) => { - debug_assert!(dst.len() >= data.len()); - dst[..data.len()].copy_from_slice(&data[..]); - Ready(Ok(data.len())) + debug_assert!(dst.remaining() >= data.len()); + dst.put_slice(&data); + Ready(Ok(())) } Some(Ready(Ok(_))) => panic!(), Some(Ready(Err(e))) => Ready(Err(e)), Some(Pending) => Pending, - None => Ready(Ok(0)), + None => Ready(Ok(())), } } } diff --git a/third_party/rust/tokio-util/tests/mpsc.rs b/third_party/rust/tokio-util/tests/mpsc.rs new file mode 100644 index 000000000000..a3c164d3ecad --- /dev/null +++ b/third_party/rust/tokio-util/tests/mpsc.rs @@ -0,0 +1,239 @@ +use futures::future::poll_fn; +use tokio::sync::mpsc::channel; +use tokio_test::task::spawn; +use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok}; +use tokio_util::sync::PollSender; + +#[tokio::test] +async fn simple() { + let (send, mut recv) = channel(3); + let mut send = PollSender::new(send); + + for i in 1..=3i32 { + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); + send.send_item(i).unwrap(); + } + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_pending!(reserve.poll()); + + assert_eq!(recv.recv().await.unwrap(), 1); + assert!(reserve.is_woken()); + assert_ready_ok!(reserve.poll()); + + drop(recv); + + send.send_item(42).unwrap(); +} + +#[tokio::test] +async fn repeated_poll_reserve() { + let (send, mut recv) = channel::(1); + let mut send = PollSender::new(send); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); + assert_ready_ok!(reserve.poll()); + send.send_item(1).unwrap(); + + assert_eq!(recv.recv().await.unwrap(), 1); +} + +#[tokio::test] +async fn abort_send() { + let (send, mut recv) = channel(3); + let mut send = PollSender::new(send); + let send2 = send.get_ref().cloned().unwrap(); + + for i in 1..=3i32 { + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); + send.send_item(i).unwrap(); + } + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_pending!(reserve.poll()); + assert_eq!(recv.recv().await.unwrap(), 1); + assert!(reserve.is_woken()); + assert_ready_ok!(reserve.poll()); + + let mut send2_send = spawn(send2.send(5)); + assert_pending!(send2_send.poll()); + assert!(send.abort_send()); + assert!(send2_send.is_woken()); + assert_ready_ok!(send2_send.poll()); + + assert_eq!(recv.recv().await.unwrap(), 2); + assert_eq!(recv.recv().await.unwrap(), 3); + assert_eq!(recv.recv().await.unwrap(), 5); +} + +#[tokio::test] +async fn close_sender_last() { + let (send, mut recv) = channel::(3); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + send.close(); + + assert!(recv_task.is_woken()); + assert!(assert_ready!(recv_task.poll()).is_none()); +} + +#[tokio::test] +async fn close_sender_not_last() { + let (send, mut recv) = channel::(3); + let mut send = PollSender::new(send); + let send2 = send.get_ref().cloned().unwrap(); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + send.close(); + + assert!(!recv_task.is_woken()); + assert_pending!(recv_task.poll()); + + drop(send2); + + assert!(recv_task.is_woken()); + assert!(assert_ready!(recv_task.poll()).is_none()); +} + +#[tokio::test] +async fn close_sender_before_reserve() { + let (send, mut recv) = channel::(3); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + send.close(); + + assert!(recv_task.is_woken()); + assert!(assert_ready!(recv_task.poll()).is_none()); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_err!(reserve.poll()); +} + +#[tokio::test] +async fn close_sender_after_pending_reserve() { + let (send, mut recv) = channel::(1); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); + send.send_item(1).unwrap(); + + assert!(recv_task.is_woken()); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_pending!(reserve.poll()); + drop(reserve); + + send.close(); + + assert!(send.is_closed()); + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_err!(reserve.poll()); +} + +#[tokio::test] +async fn close_sender_after_successful_reserve() { + let (send, mut recv) = channel::(3); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); + drop(reserve); + + send.close(); + assert!(send.is_closed()); + assert!(!recv_task.is_woken()); + assert_pending!(recv_task.poll()); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); +} + +#[tokio::test] +async fn abort_send_after_pending_reserve() { + let (send, mut recv) = channel::(1); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); + send.send_item(1).unwrap(); + + assert_eq!(send.get_ref().unwrap().capacity(), 0); + assert!(!send.abort_send()); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_pending!(reserve.poll()); + + assert!(send.abort_send()); + assert_eq!(send.get_ref().unwrap().capacity(), 0); +} + +#[tokio::test] +async fn abort_send_after_successful_reserve() { + let (send, mut recv) = channel::(1); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + assert_eq!(send.get_ref().unwrap().capacity(), 1); + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); + assert_eq!(send.get_ref().unwrap().capacity(), 0); + + assert!(send.abort_send()); + assert_eq!(send.get_ref().unwrap().capacity(), 1); +} + +#[tokio::test] +async fn closed_when_receiver_drops() { + let (send, _) = channel::(1); + let mut send = PollSender::new(send); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_err!(reserve.poll()); +} + +#[should_panic] +#[test] +fn start_send_panics_when_idle() { + let (send, _) = channel::(3); + let mut send = PollSender::new(send); + + send.send_item(1).unwrap(); +} + +#[should_panic] +#[test] +fn start_send_panics_when_acquiring() { + let (send, _) = channel::(1); + let mut send = PollSender::new(send); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_ready_ok!(reserve.poll()); + send.send_item(1).unwrap(); + + let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx))); + assert_pending!(reserve.poll()); + send.send_item(2).unwrap(); +} diff --git a/third_party/rust/tokio-util/tests/poll_semaphore.rs b/third_party/rust/tokio-util/tests/poll_semaphore.rs new file mode 100644 index 000000000000..50f36dd803ba --- /dev/null +++ b/third_party/rust/tokio-util/tests/poll_semaphore.rs @@ -0,0 +1,36 @@ +use std::future::Future; +use std::sync::Arc; +use std::task::Poll; +use tokio::sync::{OwnedSemaphorePermit, Semaphore}; +use tokio_util::sync::PollSemaphore; + +type SemRet = Option; + +fn semaphore_poll( + sem: &mut PollSemaphore, +) -> tokio_test::task::Spawn + '_> { + let fut = futures::future::poll_fn(move |cx| sem.poll_acquire(cx)); + tokio_test::task::spawn(fut) +} + +#[tokio::test] +async fn it_works() { + let sem = Arc::new(Semaphore::new(1)); + let mut poll_sem = PollSemaphore::new(sem.clone()); + + let permit = sem.acquire().await.unwrap(); + let mut poll = semaphore_poll(&mut poll_sem); + assert!(poll.poll().is_pending()); + drop(permit); + + assert!(matches!(poll.poll(), Poll::Ready(Some(_)))); + drop(poll); + + sem.close(); + + assert!(semaphore_poll(&mut poll_sem).await.is_none()); + + // Check that it is fused. + assert!(semaphore_poll(&mut poll_sem).await.is_none()); + assert!(semaphore_poll(&mut poll_sem).await.is_none()); +} diff --git a/third_party/rust/tokio-util/tests/reusable_box.rs b/third_party/rust/tokio-util/tests/reusable_box.rs new file mode 100644 index 000000000000..c8f6da02ae2d --- /dev/null +++ b/third_party/rust/tokio-util/tests/reusable_box.rs @@ -0,0 +1,72 @@ +use futures::future::FutureExt; +use std::alloc::Layout; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio_util::sync::ReusableBoxFuture; + +#[test] +fn test_different_futures() { + let fut = async move { 10 }; + // Not zero sized! + assert_eq!(Layout::for_value(&fut).size(), 1); + + let mut b = ReusableBoxFuture::new(fut); + + assert_eq!(b.get_pin().now_or_never(), Some(10)); + + b.try_set(async move { 20 }) + .unwrap_or_else(|_| panic!("incorrect size")); + + assert_eq!(b.get_pin().now_or_never(), Some(20)); + + b.try_set(async move { 30 }) + .unwrap_or_else(|_| panic!("incorrect size")); + + assert_eq!(b.get_pin().now_or_never(), Some(30)); +} + +#[test] +fn test_different_sizes() { + let fut1 = async move { 10 }; + let val = [0u32; 1000]; + let fut2 = async move { val[0] }; + let fut3 = ZeroSizedFuture {}; + + assert_eq!(Layout::for_value(&fut1).size(), 1); + assert_eq!(Layout::for_value(&fut2).size(), 4004); + assert_eq!(Layout::for_value(&fut3).size(), 0); + + let mut b = ReusableBoxFuture::new(fut1); + assert_eq!(b.get_pin().now_or_never(), Some(10)); + b.set(fut2); + assert_eq!(b.get_pin().now_or_never(), Some(0)); + b.set(fut3); + assert_eq!(b.get_pin().now_or_never(), Some(5)); +} + +struct ZeroSizedFuture {} +impl Future for ZeroSizedFuture { + type Output = u32; + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + Poll::Ready(5) + } +} + +#[test] +fn test_zero_sized() { + let fut = ZeroSizedFuture {}; + // Zero sized! + assert_eq!(Layout::for_value(&fut).size(), 0); + + let mut b = ReusableBoxFuture::new(fut); + + assert_eq!(b.get_pin().now_or_never(), Some(5)); + assert_eq!(b.get_pin().now_or_never(), Some(5)); + + b.try_set(ZeroSizedFuture {}) + .unwrap_or_else(|_| panic!("incorrect size")); + + assert_eq!(b.get_pin().now_or_never(), Some(5)); + assert_eq!(b.get_pin().now_or_never(), Some(5)); +} diff --git a/third_party/rust/tokio-util/tests/spawn_pinned.rs b/third_party/rust/tokio-util/tests/spawn_pinned.rs new file mode 100644 index 000000000000..409b8dadab5f --- /dev/null +++ b/third_party/rust/tokio-util/tests/spawn_pinned.rs @@ -0,0 +1,193 @@ +#![warn(rust_2018_idioms)] + +use std::rc::Rc; +use std::sync::Arc; +use tokio_util::task; + +/// Simple test of running a !Send future via spawn_pinned +#[tokio::test] +async fn can_spawn_not_send_future() { + let pool = task::LocalPoolHandle::new(1); + + let output = pool + .spawn_pinned(|| { + // Rc is !Send + !Sync + let local_data = Rc::new("test"); + + // This future holds an Rc, so it is !Send + async move { local_data.to_string() } + }) + .await + .unwrap(); + + assert_eq!(output, "test"); +} + +/// Dropping the join handle still lets the task execute +#[test] +fn can_drop_future_and_still_get_output() { + let pool = task::LocalPoolHandle::new(1); + let (sender, receiver) = std::sync::mpsc::channel(); + + let _ = pool.spawn_pinned(move || { + // Rc is !Send + !Sync + let local_data = Rc::new("test"); + + // This future holds an Rc, so it is !Send + async move { + let _ = sender.send(local_data.to_string()); + } + }); + + assert_eq!(receiver.recv(), Ok("test".to_string())); +} + +#[test] +#[should_panic(expected = "assertion failed: pool_size > 0")] +fn cannot_create_zero_sized_pool() { + let _pool = task::LocalPoolHandle::new(0); +} + +/// We should be able to spawn multiple futures onto the pool at the same time. +#[tokio::test] +async fn can_spawn_multiple_futures() { + let pool = task::LocalPoolHandle::new(2); + + let join_handle1 = pool.spawn_pinned(|| { + let local_data = Rc::new("test1"); + async move { local_data.to_string() } + }); + let join_handle2 = pool.spawn_pinned(|| { + let local_data = Rc::new("test2"); + async move { local_data.to_string() } + }); + + assert_eq!(join_handle1.await.unwrap(), "test1"); + assert_eq!(join_handle2.await.unwrap(), "test2"); +} + +/// A panic in the spawned task causes the join handle to return an error. +/// But, you can continue to spawn tasks. +#[tokio::test] +async fn task_panic_propagates() { + let pool = task::LocalPoolHandle::new(1); + + let join_handle = pool.spawn_pinned(|| async { + panic!("Test panic"); + }); + + let result = join_handle.await; + assert!(result.is_err()); + let error = result.unwrap_err(); + assert!(error.is_panic()); + let panic_str: &str = *error.into_panic().downcast().unwrap(); + assert_eq!(panic_str, "Test panic"); + + // Trying again with a "safe" task still works + let join_handle = pool.spawn_pinned(|| async { "test" }); + let result = join_handle.await; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), "test"); +} + +/// A panic during task creation causes the join handle to return an error. +/// But, you can continue to spawn tasks. +#[tokio::test] +async fn callback_panic_does_not_kill_worker() { + let pool = task::LocalPoolHandle::new(1); + + let join_handle = pool.spawn_pinned(|| { + panic!("Test panic"); + #[allow(unreachable_code)] + async {} + }); + + let result = join_handle.await; + assert!(result.is_err()); + let error = result.unwrap_err(); + assert!(error.is_panic()); + let panic_str: &str = *error.into_panic().downcast().unwrap(); + assert_eq!(panic_str, "Test panic"); + + // Trying again with a "safe" callback works + let join_handle = pool.spawn_pinned(|| async { "test" }); + let result = join_handle.await; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), "test"); +} + +/// Canceling the task via the returned join handle cancels the spawned task +/// (which has a different, internal join handle). +#[tokio::test] +async fn task_cancellation_propagates() { + let pool = task::LocalPoolHandle::new(1); + let notify_dropped = Arc::new(()); + let weak_notify_dropped = Arc::downgrade(¬ify_dropped); + + let (start_sender, start_receiver) = tokio::sync::oneshot::channel(); + let (drop_sender, drop_receiver) = tokio::sync::oneshot::channel::<()>(); + let join_handle = pool.spawn_pinned(|| async move { + let _drop_sender = drop_sender; + // Move the Arc into the task + let _notify_dropped = notify_dropped; + let _ = start_sender.send(()); + + // Keep the task running until it gets aborted + futures::future::pending::<()>().await; + }); + + // Wait for the task to start + let _ = start_receiver.await; + + join_handle.abort(); + + // Wait for the inner task to abort, dropping the sender. + // The top level join handle aborts quicker than the inner task (the abort + // needs to propagate and get processed on the worker thread), so we can't + // just await the top level join handle. + let _ = drop_receiver.await; + + // Check that the Arc has been dropped. This verifies that the inner task + // was canceled as well. + assert!(weak_notify_dropped.upgrade().is_none()); +} + +/// Tasks should be given to the least burdened worker. When spawning two tasks +/// on a pool with two empty workers the tasks should be spawned on separate +/// workers. +#[tokio::test] +async fn tasks_are_balanced() { + let pool = task::LocalPoolHandle::new(2); + + // Spawn a task so one thread has a task count of 1 + let (start_sender1, start_receiver1) = tokio::sync::oneshot::channel(); + let (end_sender1, end_receiver1) = tokio::sync::oneshot::channel(); + let join_handle1 = pool.spawn_pinned(|| async move { + let _ = start_sender1.send(()); + let _ = end_receiver1.await; + std::thread::current().id() + }); + + // Wait for the first task to start up + let _ = start_receiver1.await; + + // This task should be spawned on the other thread + let (start_sender2, start_receiver2) = tokio::sync::oneshot::channel(); + let join_handle2 = pool.spawn_pinned(|| async move { + let _ = start_sender2.send(()); + std::thread::current().id() + }); + + // Wait for the second task to start up + let _ = start_receiver2.await; + + // Allow the first task to end + let _ = end_sender1.send(()); + + let thread_id1 = join_handle1.await.unwrap(); + let thread_id2 = join_handle2.await.unwrap(); + + // Since the first task was active when the second task spawned, they should + // be on separate workers/threads. + assert_ne!(thread_id1, thread_id2); +} diff --git a/third_party/rust/tokio-util/tests/sync_cancellation_token.rs b/third_party/rust/tokio-util/tests/sync_cancellation_token.rs new file mode 100644 index 000000000000..28ba284b6c2a --- /dev/null +++ b/third_party/rust/tokio-util/tests/sync_cancellation_token.rs @@ -0,0 +1,400 @@ +#![warn(rust_2018_idioms)] + +use tokio::pin; +use tokio_util::sync::{CancellationToken, WaitForCancellationFuture}; + +use core::future::Future; +use core::task::{Context, Poll}; +use futures_test::task::new_count_waker; + +#[test] +fn cancel_token() { + let (waker, wake_counter) = new_count_waker(); + let token = CancellationToken::new(); + assert!(!token.is_cancelled()); + + let wait_fut = token.cancelled(); + pin!(wait_fut); + + assert_eq!( + Poll::Pending, + wait_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!(wake_counter, 0); + + let wait_fut_2 = token.cancelled(); + pin!(wait_fut_2); + + token.cancel(); + assert_eq!(wake_counter, 1); + assert!(token.is_cancelled()); + + assert_eq!( + Poll::Ready(()), + wait_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + wait_fut_2.as_mut().poll(&mut Context::from_waker(&waker)) + ); +} + +#[test] +fn cancel_child_token_through_parent() { + let (waker, wake_counter) = new_count_waker(); + let token = CancellationToken::new(); + + let child_token = token.child_token(); + assert!(!child_token.is_cancelled()); + + let child_fut = child_token.cancelled(); + pin!(child_fut); + let parent_fut = token.cancelled(); + pin!(parent_fut); + + assert_eq!( + Poll::Pending, + child_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!(wake_counter, 0); + + token.cancel(); + assert_eq!(wake_counter, 2); + assert!(token.is_cancelled()); + assert!(child_token.is_cancelled()); + + assert_eq!( + Poll::Ready(()), + child_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); +} + +#[test] +fn cancel_grandchild_token_through_parent_if_child_was_dropped() { + let (waker, wake_counter) = new_count_waker(); + let token = CancellationToken::new(); + + let intermediate_token = token.child_token(); + let child_token = intermediate_token.child_token(); + drop(intermediate_token); + assert!(!child_token.is_cancelled()); + + let child_fut = child_token.cancelled(); + pin!(child_fut); + let parent_fut = token.cancelled(); + pin!(parent_fut); + + assert_eq!( + Poll::Pending, + child_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!(wake_counter, 0); + + token.cancel(); + assert_eq!(wake_counter, 2); + assert!(token.is_cancelled()); + assert!(child_token.is_cancelled()); + + assert_eq!( + Poll::Ready(()), + child_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); +} + +#[test] +fn cancel_child_token_without_parent() { + let (waker, wake_counter) = new_count_waker(); + let token = CancellationToken::new(); + + let child_token_1 = token.child_token(); + + let child_fut = child_token_1.cancelled(); + pin!(child_fut); + let parent_fut = token.cancelled(); + pin!(parent_fut); + + assert_eq!( + Poll::Pending, + child_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!(wake_counter, 0); + + child_token_1.cancel(); + assert_eq!(wake_counter, 1); + assert!(!token.is_cancelled()); + assert!(child_token_1.is_cancelled()); + + assert_eq!( + Poll::Ready(()), + child_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + + let child_token_2 = token.child_token(); + let child_fut_2 = child_token_2.cancelled(); + pin!(child_fut_2); + + assert_eq!( + Poll::Pending, + child_fut_2.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + + token.cancel(); + assert_eq!(wake_counter, 3); + assert!(token.is_cancelled()); + assert!(child_token_2.is_cancelled()); + + assert_eq!( + Poll::Ready(()), + child_fut_2.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); +} + +#[test] +fn create_child_token_after_parent_was_cancelled() { + for drop_child_first in [true, false].iter().cloned() { + let (waker, wake_counter) = new_count_waker(); + let token = CancellationToken::new(); + token.cancel(); + + let child_token = token.child_token(); + assert!(child_token.is_cancelled()); + + { + let child_fut = child_token.cancelled(); + pin!(child_fut); + let parent_fut = token.cancelled(); + pin!(parent_fut); + + assert_eq!( + Poll::Ready(()), + child_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!(wake_counter, 0); + + drop(child_fut); + drop(parent_fut); + } + + if drop_child_first { + drop(child_token); + drop(token); + } else { + drop(token); + drop(child_token); + } + } +} + +#[test] +fn drop_multiple_child_tokens() { + for drop_first_child_first in &[true, false] { + let token = CancellationToken::new(); + let mut child_tokens = [None, None, None]; + for child in &mut child_tokens { + *child = Some(token.child_token()); + } + + assert!(!token.is_cancelled()); + assert!(!child_tokens[0].as_ref().unwrap().is_cancelled()); + + for i in 0..child_tokens.len() { + if *drop_first_child_first { + child_tokens[i] = None; + } else { + child_tokens[child_tokens.len() - 1 - i] = None; + } + assert!(!token.is_cancelled()); + } + + drop(token); + } +} + +#[test] +fn cancel_only_all_descendants() { + // ARRANGE + let (waker, wake_counter) = new_count_waker(); + + let parent_token = CancellationToken::new(); + let token = parent_token.child_token(); + let sibling_token = parent_token.child_token(); + let child1_token = token.child_token(); + let child2_token = token.child_token(); + let grandchild_token = child1_token.child_token(); + let grandchild2_token = child1_token.child_token(); + let grandgrandchild_token = grandchild_token.child_token(); + + assert!(!parent_token.is_cancelled()); + assert!(!token.is_cancelled()); + assert!(!sibling_token.is_cancelled()); + assert!(!child1_token.is_cancelled()); + assert!(!child2_token.is_cancelled()); + assert!(!grandchild_token.is_cancelled()); + assert!(!grandchild2_token.is_cancelled()); + assert!(!grandgrandchild_token.is_cancelled()); + + let parent_fut = parent_token.cancelled(); + let fut = token.cancelled(); + let sibling_fut = sibling_token.cancelled(); + let child1_fut = child1_token.cancelled(); + let child2_fut = child2_token.cancelled(); + let grandchild_fut = grandchild_token.cancelled(); + let grandchild2_fut = grandchild2_token.cancelled(); + let grandgrandchild_fut = grandgrandchild_token.cancelled(); + + pin!(parent_fut); + pin!(fut); + pin!(sibling_fut); + pin!(child1_fut); + pin!(child2_fut); + pin!(grandchild_fut); + pin!(grandchild2_fut); + pin!(grandgrandchild_fut); + + assert_eq!( + Poll::Pending, + parent_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + sibling_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + child1_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + child2_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + grandchild_fut + .as_mut() + .poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + grandchild2_fut + .as_mut() + .poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Pending, + grandgrandchild_fut + .as_mut() + .poll(&mut Context::from_waker(&waker)) + ); + assert_eq!(wake_counter, 0); + + // ACT + token.cancel(); + + // ASSERT + assert_eq!(wake_counter, 6); + assert!(!parent_token.is_cancelled()); + assert!(token.is_cancelled()); + assert!(!sibling_token.is_cancelled()); + assert!(child1_token.is_cancelled()); + assert!(child2_token.is_cancelled()); + assert!(grandchild_token.is_cancelled()); + assert!(grandchild2_token.is_cancelled()); + assert!(grandgrandchild_token.is_cancelled()); + + assert_eq!( + Poll::Ready(()), + fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + child1_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + child2_fut.as_mut().poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + grandchild_fut + .as_mut() + .poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + grandchild2_fut + .as_mut() + .poll(&mut Context::from_waker(&waker)) + ); + assert_eq!( + Poll::Ready(()), + grandgrandchild_fut + .as_mut() + .poll(&mut Context::from_waker(&waker)) + ); + assert_eq!(wake_counter, 6); +} + +#[test] +fn drop_parent_before_child_tokens() { + let token = CancellationToken::new(); + let child1 = token.child_token(); + let child2 = token.child_token(); + + drop(token); + assert!(!child1.is_cancelled()); + + drop(child1); + drop(child2); +} + +#[test] +fn derives_send_sync() { + fn assert_send() {} + fn assert_sync() {} + + assert_send::(); + assert_sync::(); + + assert_send::>(); + assert_sync::>(); +} diff --git a/third_party/rust/tokio-0.2.25/tests/time_delay_queue.rs b/third_party/rust/tokio-util/tests/time_delay_queue.rs similarity index 53% rename from third_party/rust/tokio-0.2.25/tests/time_delay_queue.rs rename to third_party/rust/tokio-util/tests/time_delay_queue.rs index 3fd82eb32378..cb163adf3a6c 100644 --- a/third_party/rust/tokio-0.2.25/tests/time_delay_queue.rs +++ b/third_party/rust/tokio-util/tests/time_delay_queue.rs @@ -1,9 +1,10 @@ -#![allow(clippy::blacklisted_name, clippy::stable_sort_primitive)] +#![allow(clippy::blacklisted_name)] #![warn(rust_2018_idioms)] #![cfg(feature = "full")] -use tokio::time::{self, delay_for, DelayQueue, Duration, Instant}; -use tokio_test::{assert_ok, assert_pending, assert_ready, task}; +use tokio::time::{self, sleep, sleep_until, Duration, Instant}; +use tokio_test::{assert_pending, assert_ready, task}; +use tokio_util::time::DelayQueue; macro_rules! poll { ($queue:ident) => { @@ -11,12 +12,12 @@ macro_rules! poll { }; } -macro_rules! assert_ready_ok { +macro_rules! assert_ready_some { ($e:expr) => {{ - assert_ok!(match assert_ready!($e) { + match assert_ready!($e) { Some(v) => v, None => panic!("None"), - }) + } }}; } @@ -28,9 +29,9 @@ async fn single_immediate_delay() { let _key = queue.insert_at("foo", Instant::now()); // Advance time by 1ms to handle thee rounding - delay_for(ms(1)).await; + sleep(ms(1)).await; - assert_ready_ok!(poll!(queue)); + assert_ready_some!(poll!(queue)); let entry = assert_ready!(poll!(queue)); assert!(entry.is_none()) @@ -46,19 +47,19 @@ async fn multi_immediate_delays() { let _k = queue.insert_at("2", Instant::now()); let _k = queue.insert_at("3", Instant::now()); - delay_for(ms(1)).await; + sleep(ms(1)).await; let mut res = vec![]; while res.len() < 3 { - let entry = assert_ready_ok!(poll!(queue)); + let entry = assert_ready_some!(poll!(queue)); res.push(entry.into_inner()); } let entry = assert_ready!(poll!(queue)); assert!(entry.is_none()); - res.sort(); + res.sort_unstable(); assert_eq!("1", res[0]); assert_eq!("2", res[1]); @@ -74,15 +75,15 @@ async fn single_short_delay() { assert_pending!(poll!(queue)); - delay_for(ms(1)).await; + sleep(ms(1)).await; assert!(!queue.is_woken()); - delay_for(ms(5)).await; + sleep(ms(5)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)); + let entry = assert_ready_some!(poll!(queue)); assert_eq!(*entry.get_ref(), "foo"); let entry = assert_ready!(poll!(queue)); @@ -106,9 +107,11 @@ async fn multi_delay_at_start() { assert_pending!(poll!(queue)); assert!(!queue.is_woken()); + let start = Instant::now(); for elapsed in 0..1200 { - delay_for(ms(1)).await; + println!("elapsed: {:?}", elapsed); let elapsed = elapsed + 1; + tokio::time::sleep_until(start + ms(elapsed)).await; if delays.contains(&elapsed) { assert!(queue.is_woken()); @@ -116,25 +119,33 @@ async fn multi_delay_at_start() { assert_pending!(poll!(queue)); } else if queue.is_woken() { let cascade = &[192, 960]; - assert!(cascade.contains(&elapsed), "elapsed={}", elapsed); + assert!( + cascade.contains(&elapsed), + "elapsed={} dt={:?}", + elapsed, + Instant::now() - start + ); assert_pending!(poll!(queue)); } } + println!("finished multi_delay_start"); } #[tokio::test] async fn insert_in_past_fires_immediately() { + println!("running insert_in_past_fires_immediately"); time::pause(); let mut queue = task::spawn(DelayQueue::new()); let now = Instant::now(); - delay_for(ms(10)).await; + sleep(ms(10)).await; queue.insert_at("foo", now); assert_ready!(poll!(queue)); + println!("finished insert_in_past_fires_immediately"); } #[tokio::test] @@ -150,7 +161,7 @@ async fn remove_entry() { let entry = queue.remove(&key); assert_eq!(entry.into_inner(), "foo"); - delay_for(ms(10)).await; + sleep(ms(10)).await; let entry = assert_ready!(poll!(queue)); assert!(entry.is_none()); @@ -166,23 +177,23 @@ async fn reset_entry() { let key = queue.insert_at("foo", now + ms(5)); assert_pending!(poll!(queue)); - delay_for(ms(1)).await; + sleep(ms(1)).await; queue.reset_at(&key, now + ms(10)); assert_pending!(poll!(queue)); - delay_for(ms(7)).await; + sleep(ms(7)).await; assert!(!queue.is_woken()); assert_pending!(poll!(queue)); - delay_for(ms(3)).await; + sleep(ms(3)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)); + let entry = assert_ready_some!(poll!(queue)); assert_eq!(*entry.get_ref(), "foo"); let entry = assert_ready!(poll!(queue)); @@ -197,16 +208,16 @@ async fn reset_much_later() { let mut queue = task::spawn(DelayQueue::new()); let now = Instant::now(); - delay_for(ms(1)).await; + sleep(ms(1)).await; let key = queue.insert_at("foo", now + ms(200)); assert_pending!(poll!(queue)); - delay_for(ms(3)).await; + sleep(ms(3)).await; - queue.reset_at(&key, now + ms(5)); + queue.reset_at(&key, now + ms(10)); - delay_for(ms(20)).await; + sleep(ms(20)).await; assert!(queue.is_woken()); } @@ -219,25 +230,54 @@ async fn reset_twice() { let mut queue = task::spawn(DelayQueue::new()); let now = Instant::now(); - delay_for(ms(1)).await; + sleep(ms(1)).await; let key = queue.insert_at("foo", now + ms(200)); assert_pending!(poll!(queue)); - delay_for(ms(3)).await; + sleep(ms(3)).await; queue.reset_at(&key, now + ms(50)); - delay_for(ms(20)).await; + sleep(ms(20)).await; queue.reset_at(&key, now + ms(40)); - delay_for(ms(20)).await; + sleep(ms(20)).await; assert!(queue.is_woken()); } +/// Regression test: Given an entry inserted with a deadline in the past, so +/// that it is placed directly on the expired queue, reset the entry to a +/// deadline in the future. Validate that this leaves the entry and queue in an +/// internally consistent state by running an additional reset on the entry +/// before polling it to completion. +#[tokio::test] +async fn repeatedly_reset_entry_inserted_as_expired() { + time::pause(); + let mut queue = task::spawn(DelayQueue::new()); + let now = Instant::now(); + + let key = queue.insert_at("foo", now - ms(100)); + + queue.reset_at(&key, now + ms(100)); + queue.reset_at(&key, now + ms(50)); + + assert_pending!(poll!(queue)); + + time::sleep_until(now + ms(60)).await; + + assert!(queue.is_woken()); + + let entry = assert_ready_some!(poll!(queue)).into_inner(); + assert_eq!(entry, "foo"); + + let entry = assert_ready!(poll!(queue)); + assert!(entry.is_none()); +} + #[tokio::test] async fn remove_expired_item() { time::pause(); @@ -246,7 +286,7 @@ async fn remove_expired_item() { let now = Instant::now(); - delay_for(ms(10)).await; + sleep(ms(10)).await; let key = queue.insert_at("foo", now); @@ -254,6 +294,38 @@ async fn remove_expired_item() { assert_eq!(entry.into_inner(), "foo"); } +/// Regression test: it should be possible to remove entries which fall in the +/// 0th slot of the internal timer wheel — that is, entries whose expiration +/// (a) falls at the beginning of one of the wheel's hierarchical levels and (b) +/// is equal to the wheel's current elapsed time. +#[tokio::test] +async fn remove_at_timer_wheel_threshold() { + time::pause(); + + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + let key1 = queue.insert_at("foo", now + ms(64)); + let key2 = queue.insert_at("bar", now + ms(64)); + + sleep(ms(80)).await; + + let entry = assert_ready_some!(poll!(queue)).into_inner(); + + match entry { + "foo" => { + let entry = queue.remove(&key2).into_inner(); + assert_eq!(entry, "bar"); + } + "bar" => { + let entry = queue.remove(&key1).into_inner(); + assert_eq!(entry, "foo"); + } + other => panic!("other: {:?}", other), + } +} + #[tokio::test] async fn expires_before_last_insert() { time::pause(); @@ -272,11 +344,11 @@ async fn expires_before_last_insert() { assert_pending!(poll!(queue)); - delay_for(ms(600)).await; + sleep(ms(600)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "bar"); } @@ -297,20 +369,20 @@ async fn multi_reset() { queue.reset_at(&two, now + ms(350)); queue.reset_at(&one, now + ms(400)); - delay_for(ms(310)).await; + sleep(ms(310)).await; assert_pending!(poll!(queue)); - delay_for(ms(50)).await; + sleep(ms(50)).await; - let entry = assert_ready_ok!(poll!(queue)); + let entry = assert_ready_some!(poll!(queue)); assert_eq!(*entry.get_ref(), "two"); assert_pending!(poll!(queue)); - delay_for(ms(50)).await; + sleep(ms(50)).await; - let entry = assert_ready_ok!(poll!(queue)); + let entry = assert_ready_some!(poll!(queue)); assert_eq!(*entry.get_ref(), "one"); let entry = assert_ready!(poll!(queue)); @@ -332,11 +404,11 @@ async fn expire_first_key_when_reset_to_expire_earlier() { queue.reset_at(&one, now + ms(100)); - delay_for(ms(100)).await; + sleep(ms(100)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "one"); } @@ -355,11 +427,11 @@ async fn expire_second_key_when_reset_to_expire_earlier() { queue.reset_at(&two, now + ms(100)); - delay_for(ms(100)).await; + sleep(ms(100)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "two"); } @@ -377,11 +449,11 @@ async fn reset_first_expiring_item_to_expire_later() { assert_pending!(poll!(queue)); queue.reset_at(&one, now + ms(300)); - delay_for(ms(250)).await; + sleep(ms(250)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "two"); } @@ -399,15 +471,15 @@ async fn insert_before_first_after_poll() { let _two = queue.insert_at("two", now + ms(100)); - delay_for(ms(99)).await; + sleep(ms(99)).await; - assert!(!queue.is_woken()); + assert_pending!(poll!(queue)); - delay_for(ms(1)).await; + sleep(ms(1)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "two"); } @@ -425,19 +497,19 @@ async fn insert_after_ready_poll() { assert_pending!(poll!(queue)); - delay_for(ms(100)).await; + sleep(ms(100)).await; assert!(queue.is_woken()); let mut res = vec![]; while res.len() < 3 { - let entry = assert_ready_ok!(poll!(queue)); + let entry = assert_ready_some!(poll!(queue)); res.push(entry.into_inner()); queue.insert_at("foo", now + ms(500)); } - res.sort(); + res.sort_unstable(); assert_eq!("1", res[0]); assert_eq!("2", res[1]); @@ -456,7 +528,7 @@ async fn reset_later_after_slot_starts() { assert_pending!(poll!(queue)); - delay_for(ms(80)).await; + sleep_until(now + Duration::from_millis(80)).await; assert!(!queue.is_woken()); @@ -471,13 +543,13 @@ async fn reset_later_after_slot_starts() { assert_pending!(poll!(queue)); - delay_for(ms(39)).await; + sleep_until(now + Duration::from_millis(119)).await; assert!(!queue.is_woken()); - delay_for(ms(1)).await; + sleep(ms(1)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "foo"); } @@ -494,9 +566,9 @@ async fn reset_inserted_expired() { assert_eq!(1, queue.len()); - delay_for(ms(200)).await; + sleep(ms(200)).await; - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "foo"); assert_eq!(queue.len(), 0); @@ -514,7 +586,7 @@ async fn reset_earlier_after_slot_starts() { assert_pending!(poll!(queue)); - delay_for(ms(80)).await; + sleep_until(now + Duration::from_millis(80)).await; assert!(!queue.is_woken()); @@ -529,13 +601,13 @@ async fn reset_earlier_after_slot_starts() { assert_pending!(poll!(queue)); - delay_for(ms(39)).await; + sleep_until(now + Duration::from_millis(119)).await; assert!(!queue.is_woken()); - delay_for(ms(1)).await; + sleep(ms(1)).await; assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "foo"); } @@ -551,17 +623,196 @@ async fn insert_in_past_after_poll_fires_immediately() { assert_pending!(poll!(queue)); - delay_for(ms(80)).await; + sleep(ms(80)).await; assert!(!queue.is_woken()); queue.insert_at("bar", now + ms(40)); assert!(queue.is_woken()); - let entry = assert_ready_ok!(poll!(queue)).into_inner(); + let entry = assert_ready_some!(poll!(queue)).into_inner(); assert_eq!(entry, "bar"); } +#[tokio::test] +async fn delay_queue_poll_expired_when_empty() { + let mut delay_queue = task::spawn(DelayQueue::new()); + let key = delay_queue.insert(0, std::time::Duration::from_secs(10)); + assert_pending!(poll!(delay_queue)); + + delay_queue.remove(&key); + assert!(assert_ready!(poll!(delay_queue)).is_none()); +} + +#[tokio::test(start_paused = true)] +async fn compact_expire_empty() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_some!(poll!(queue)); + res.push(entry.into_inner()); + } + + queue.compact(); + + assert_eq!(queue.len(), 0); + assert_eq!(queue.capacity(), 0); +} + +#[tokio::test(start_paused = true)] +async fn compact_remove_empty() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + let key1 = queue.insert_at("foo1", now + ms(10)); + let key2 = queue.insert_at("foo2", now + ms(10)); + + queue.remove(&key1); + queue.remove(&key2); + + queue.compact(); + + assert_eq!(queue.len(), 0); + assert_eq!(queue.capacity(), 0); +} + +#[tokio::test(start_paused = true)] +// Trigger a re-mapping of keys in the slab due to a `compact` call and +// test removal of re-mapped keys +async fn compact_remove_remapped_keys() { + let mut queue = task::spawn(DelayQueue::new()); + + let now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + // should be assigned indices 3 and 4 + let key3 = queue.insert_at("foo3", now + ms(20)); + let key4 = queue.insert_at("foo4", now + ms(20)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_some!(poll!(queue)); + res.push(entry.into_inner()); + } + + // items corresponding to `foo3` and `foo4` will be assigned + // new indices here + queue.compact(); + + queue.insert_at("foo5", now + ms(10)); + + // test removal of re-mapped keys + let expired3 = queue.remove(&key3); + let expired4 = queue.remove(&key4); + + assert_eq!(expired3.into_inner(), "foo3"); + assert_eq!(expired4.into_inner(), "foo4"); + + queue.compact(); + assert_eq!(queue.len(), 1); + assert_eq!(queue.capacity(), 1); +} + +#[tokio::test(start_paused = true)] +async fn compact_change_deadline() { + let mut queue = task::spawn(DelayQueue::new()); + + let mut now = Instant::now(); + + queue.insert_at("foo1", now + ms(10)); + queue.insert_at("foo2", now + ms(10)); + + // should be assigned indices 3 and 4 + queue.insert_at("foo3", now + ms(20)); + let key4 = queue.insert_at("foo4", now + ms(20)); + + sleep(ms(10)).await; + + let mut res = vec![]; + while res.len() < 2 { + let entry = assert_ready_some!(poll!(queue)); + res.push(entry.into_inner()); + } + + // items corresponding to `foo3` and `foo4` should be assigned + // new indices + queue.compact(); + + now = Instant::now(); + + queue.insert_at("foo5", now + ms(10)); + let key6 = queue.insert_at("foo6", now + ms(10)); + + queue.reset_at(&key4, now + ms(20)); + queue.reset_at(&key6, now + ms(20)); + + // foo3 and foo5 will expire + sleep(ms(10)).await; + + while res.len() < 4 { + let entry = assert_ready_some!(poll!(queue)); + res.push(entry.into_inner()); + } + + sleep(ms(10)).await; + + while res.len() < 6 { + let entry = assert_ready_some!(poll!(queue)); + res.push(entry.into_inner()); + } + + let entry = assert_ready!(poll!(queue)); + assert!(entry.is_none()); +} + +#[tokio::test(start_paused = true)] +async fn remove_after_compact() { + let now = Instant::now(); + let mut queue = DelayQueue::new(); + + let foo_key = queue.insert_at("foo", now + ms(10)); + queue.insert_at("bar", now + ms(20)); + queue.remove(&foo_key); + queue.compact(); + + let panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + queue.remove(&foo_key); + })); + assert!(panic.is_err()); +} + +#[tokio::test(start_paused = true)] +async fn remove_after_compact_poll() { + let now = Instant::now(); + let mut queue = task::spawn(DelayQueue::new()); + + let foo_key = queue.insert_at("foo", now + ms(10)); + queue.insert_at("bar", now + ms(20)); + + sleep(ms(10)).await; + assert_eq!(assert_ready_some!(poll!(queue)).key(), foo_key); + + queue.compact(); + + let panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + queue.remove(&foo_key); + })); + assert!(panic.is_err()); +} + fn ms(n: u64) -> Duration { Duration::from_millis(n) } diff --git a/third_party/rust/tokio-util/tests/udp.rs b/third_party/rust/tokio-util/tests/udp.rs index 0ba0574281c0..b9436a30aa69 100644 --- a/third_party/rust/tokio-util/tests/udp.rs +++ b/third_party/rust/tokio-util/tests/udp.rs @@ -1,5 +1,8 @@ -use tokio::{net::UdpSocket, stream::StreamExt}; -use tokio_util::codec::{Decoder, Encoder}; +#![warn(rust_2018_idioms)] + +use tokio::net::UdpSocket; +use tokio_stream::StreamExt; +use tokio_util::codec::{Decoder, Encoder, LinesCodec}; use tokio_util::udp::UdpFramed; use bytes::{BufMut, BytesMut}; @@ -7,10 +10,11 @@ use futures::future::try_join; use futures::future::FutureExt; use futures::sink::SinkExt; use std::io; +use std::sync::Arc; #[cfg_attr(any(target_os = "macos", target_os = "ios"), allow(unused_assignments))] #[tokio::test] -async fn send_framed() -> std::io::Result<()> { +async fn send_framed_byte_codec() -> std::io::Result<()> { let mut a_soc = UdpSocket::bind("127.0.0.1:0").await?; let mut b_soc = UdpSocket::bind("127.0.0.1:0").await?; @@ -77,3 +81,52 @@ impl Encoder<&[u8]> for ByteCodec { Ok(()) } } + +#[tokio::test] +async fn send_framed_lines_codec() -> std::io::Result<()> { + let a_soc = UdpSocket::bind("127.0.0.1:0").await?; + let b_soc = UdpSocket::bind("127.0.0.1:0").await?; + + let a_addr = a_soc.local_addr()?; + let b_addr = b_soc.local_addr()?; + + let mut a = UdpFramed::new(a_soc, ByteCodec); + let mut b = UdpFramed::new(b_soc, LinesCodec::new()); + + let msg = b"1\r\n2\r\n3\r\n".to_vec(); + a.send((&msg, b_addr)).await?; + + assert_eq!(b.next().await.unwrap().unwrap(), ("1".to_string(), a_addr)); + assert_eq!(b.next().await.unwrap().unwrap(), ("2".to_string(), a_addr)); + assert_eq!(b.next().await.unwrap().unwrap(), ("3".to_string(), a_addr)); + + Ok(()) +} + +#[tokio::test] +async fn framed_half() -> std::io::Result<()> { + let a_soc = Arc::new(UdpSocket::bind("127.0.0.1:0").await?); + let b_soc = a_soc.clone(); + + let a_addr = a_soc.local_addr()?; + let b_addr = b_soc.local_addr()?; + + let mut a = UdpFramed::new(a_soc, ByteCodec); + let mut b = UdpFramed::new(b_soc, LinesCodec::new()); + + let msg = b"1\r\n2\r\n3\r\n".to_vec(); + a.send((&msg, b_addr)).await?; + + let msg = b"4\r\n5\r\n6\r\n".to_vec(); + a.send((&msg, b_addr)).await?; + + assert_eq!(b.next().await.unwrap().unwrap(), ("1".to_string(), a_addr)); + assert_eq!(b.next().await.unwrap().unwrap(), ("2".to_string(), a_addr)); + assert_eq!(b.next().await.unwrap().unwrap(), ("3".to_string(), a_addr)); + + assert_eq!(b.next().await.unwrap().unwrap(), ("4".to_string(), a_addr)); + assert_eq!(b.next().await.unwrap().unwrap(), ("5".to_string(), a_addr)); + assert_eq!(b.next().await.unwrap().unwrap(), ("6".to_string(), a_addr)); + + Ok(()) +} diff --git a/third_party/rust/tracing-attributes/.cargo-checksum.json b/third_party/rust/tracing-attributes/.cargo-checksum.json new file mode 100644 index 000000000000..8695cd39ea9a --- /dev/null +++ b/third_party/rust/tracing-attributes/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"a41cefad0d01fc385c212e5160b4b11bc7a87e3be3f211af17396bb4a7865abb","Cargo.toml":"253511713d185e130c9c15f037445c17776427c3588621d7972abb0c2db5b866","LICENSE":"898b1ae9821e98daf8964c8d6c7f61641f5f5aa78ad500020771c0939ee0dea1","README.md":"a0e6d8ad2f6071cd53539bfe6d6f011c1dd07a31872be014f43b7a36860b3c7e","src/attr.rs":"4d26dad70c765fff3d4677ebe8e71b63599d67ceba5c48dbcb204d247b5394ce","src/expand.rs":"0e30aec264f96e2388787aa346b94f004cb7cfa6d7187e256b335ce9a449606b","src/lib.rs":"8f6382c2c4622cd2f5478943ca228ad2cf335dcb35f16a900cb8960b8bffa78c","tests/async_fn.rs":"74126e6027bc9cb6e5e3d5d0e3b2debd5c808c737d25b90303b8e3ca878a3667","tests/destructuring.rs":"26b9800678bad09e06512a113a54556e2fac3ecb15a18dcccefe105fb8911c26","tests/err.rs":"bed4968c19f833f67464e2270bd9b05bf21fffefa1056770c634f161cb5fa5f1","tests/fields.rs":"e18f157a80bd3cee68d9dd96106b060652d4d94dabdd99e721c54851745b23f9","tests/follows_from.rs":"5bc856923e87b34e0558959149118238fe668ac621f1748cc444c21c90a86647","tests/instrument.rs":"9118eb6971d19a6b8d301bb4512b0fda909404a21e14edbef6b01db094cd8aaf","tests/levels.rs":"80ffb684163a4d28c69c40e31a82609ac02daf922086bab8247bca125aec3c69","tests/names.rs":"5afd6c4d526588bcea3141c130a45a21872956495b6868a01b44ddff57749827","tests/parents.rs":"673d3f81eed6ba433f685ec53fd007c5dd957b97d32499d7ea1537e1f289cb2e","tests/ret.rs":"bfd71022dbd9f9149152d13d99278e8adb0be8ae2fe8208611576d3a76359e08","tests/targets.rs":"95ce1ce1e2d29794062c5b3429d91c1bfaba5813251d5d8440c12cb2db6e11bf"},"package":"cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c"} \ No newline at end of file diff --git a/third_party/rust/tracing-attributes/CHANGELOG.md b/third_party/rust/tracing-attributes/CHANGELOG.md new file mode 100644 index 000000000000..a52baa7d2bc1 --- /dev/null +++ b/third_party/rust/tracing-attributes/CHANGELOG.md @@ -0,0 +1,283 @@ +# 0.1.21 (April 26, 2022) + +This release adds support for setting explicit parent and follows-from spans +in the `#[instrument]` attribute. + +### Added + +- `#[instrument(follows_from = ...)]` argument for setting one or more + follows-from span ([#2093]) +- `#[instrument(parent = ...)]` argument for overriding the generated span's + parent ([#2091]) + +### Fixed + +- Extra braces around `async` blocks in expanded code (causes a Clippy warning) + ([#2090]) +- Broken documentation links ([#2068], [#2077]) + +Thanks to @jarrodldavis, @ben0x539, and new contributor @jswrenn for +contributing to this release! + + +[#2093]: https://github.com/tokio-rs/tracing/pull/2093 +[#2091]: https://github.com/tokio-rs/tracing/pull/2091 +[#2090]: https://github.com/tokio-rs/tracing/pull/2090 +[#2077]: https://github.com/tokio-rs/tracing/pull/2077 +[#2068]: https://github.com/tokio-rs/tracing/pull/2068 + +# 0.1.20 (March 8, 2022) + +### Fixed + +- Compilation failure with `--minimal-versions` due to a too-permissive `syn` + dependency ([#1960]) + +### Changed + +- Bumped minimum supported Rust version (MSRV) to 1.49.0 ([#1913]) + +Thanks to new contributor @udoprog for contributing to this release! + +[#1960]: https://github.com/tokio-rs/tracing/pull/1960 +[#1913]: https://github.com/tokio-rs/tracing/pull/1913 + +# 0.1.19 (February 3, 2022) + +This release introduces a new `#[instrument(ret)]` argument to emit an event +with the return value of an instrumented function. + +### Added + +- `#[instrument(ret)]` to record the return value of a function ([#1716]) +- added `err(Debug)` argument to cause `#[instrument(err)]` to record errors + with `Debug` rather than `Display ([#1631]) + +### Fixed + +- incorrect code generation for functions returning async blocks ([#1866]) +- incorrect diagnostics when using `rust-analyzer` ([#1634]) + +Thanks to @Swatinem, @hkmatsumoto, @cynecx, and @ciuncan for contributing to +this release! + +[#1716]: https://github.com/tokio-rs/tracing/pull/1716 +[#1631]: https://github.com/tokio-rs/tracing/pull/1631 +[#1634]: https://github.com/tokio-rs/tracing/pull/1634 +[#1866]: https://github.com/tokio-rs/tracing/pull/1866 + +# 0.1.18 (October 5, 2021) + +This release fixes issues introduced in v0.1.17. + +### Fixed + +- fixed mismatched types compiler error that may occur when using + `#[instrument]` on an `async fn` that returns an `impl Trait` value that + includes a closure ([#1616]) +- fixed false positives for `clippy::suspicious_else_formatting` warnings due to + rust-lang/rust-clippy#7760 and rust-lang/rust-clippy#6249 ([#1617]) +- fixed `clippy::let_unit_value` lints when using `#[instrument]` ([#1614]) + +[#1617]: https://github.com/tokio-rs/tracing/pull/1617 +[#1616]: https://github.com/tokio-rs/tracing/pull/1616 +[#1614]: https://github.com/tokio-rs/tracing/pull/1614 + +# 0.1.17 (YANKED) (October 1, 2021) + +This release significantly improves performance when `#[instrument]`-generated +spans are below the maximum enabled level. + +### Added + +- improve performance when skipping `#[instrument]`-generated spans below the + max level ([#1600], [#1605]) + +Thanks to @oli-obk for contributing to this release! + +[#1600]: https://github.com/tokio-rs/tracing/pull/1600 +[#1605]: https://github.com/tokio-rs/tracing/pull/1605 + +# 0.1.16 (September 13, 2021) + +This release adds a new `#[instrument(skip_all)]` option to skip recording *all* +arguments to an instrumented function as fields. Additionally, it adds support +for recording arguments that are `tracing` primitive types as typed values, +rather than as `fmt::Debug`. + +### Added + +- add `skip_all` option to `#[instrument]` ([#1548]) +- record primitive types as primitive values rather than as `fmt::Debug` + ([#1378]) +- added support for `f64`s as typed values ([#1522]) + +Thanks to @Folyd and @jsgf for contributing to this release! + +[#1548]: https://github.com/tokio-rs/tracing/pull/1548 +[#1378]: https://github.com/tokio-rs/tracing/pull/1378 +[#1522]: https://github.com/tokio-rs/tracing/pull/1524 + +# 0.1.15 (March 12, 2021) + +### Fixed + +- `#[instrument]` on functions returning `Box::pin`ned futures incorrectly + skipping function bodies prior to returning a future ([#1297]) + +Thanks to @nightmared for contributing to this release! + +[#1297]: https://github.com/tokio-rs/tracing/pull/1297 + +# 0.1.14 (March 10, 2021) + +### Fixed + +- Compatibility between `#[instrument]` and `async-trait` v0.1.43 and newer + ([#1228]) + +Thanks to @nightmared for lots of hard work on this fix! + +[#1228]: https://github.com/tokio-rs/tracing/pull/1228 + +# 0.1.13 (February 17, 2021) + +### Fixed + +- Compiler error when using `#[instrument(err)]` on functions which return `impl + Trait` ([#1236]) + +[#1236]: https://github.com/tokio-rs/tracing/pull/1236 + +# 0.1.12 (February 4, 2021) + +### Fixed + +- Compiler error when using `#[instrument(err)]` on functions with mutable + parameters ([#1167]) +- Missing function visibility modifier when using `#[instrument]` with + `async-trait` ([#977]) +- Multiple documentation fixes and improvements ([#965], [#981], [#1215]) + +### Changed + +- `tracing-futures` dependency is no longer required when using `#[instrument]` + on async functions ([#808]) + +Thanks to @nagisa, @Txuritan, @TaKO8Ki, and @okready for contributing to this +release! + +[#1167]: https://github.com/tokio-rs/tracing/pull/1167 +[#977]: https://github.com/tokio-rs/tracing/pull/977 +[#965]: https://github.com/tokio-rs/tracing/pull/965 +[#981]: https://github.com/tokio-rs/tracing/pull/981 +[#1215]: https://github.com/tokio-rs/tracing/pull/1215 +[#808]: https://github.com/tokio-rs/tracing/pull/808 + +# 0.1.11 (August 18, 2020) + +### Fixed + +- Corrected wrong minimum supported Rust version note in docs (#941) +- Removed unused `syn` features (#928) + +Thanks to new contributor @jhpratt for contributing to this release! + +# 0.1.10 (August 10, 2020) + +### Added + +- Support for using `self` in field expressions when instrumenting `async-trait` + functions (#875) +- Several documentation improvements (#832, #897, #911, #913) + +Thanks to @anton-dutov and @nightmared for contributing to this release! + +# 0.1.9 (July 8, 2020) + +### Added + +- Support for arbitrary expressions as fields in `#[instrument]` (#672) + +### Changed + +- `#[instrument]` now emits a compiler warning when ignoring unrecognized + input (#672, #786) + +# 0.1.8 (May 13, 2020) + +### Added + +- Support for using `#[instrument]` on methods that are part of [`async-trait`] + trait implementations (#711) +- Optional `#[instrument(err)]` argument to automatically emit an event if an + instrumented function returns `Err` (#637) + +Thanks to @ilana and @nightmared for contributing to this release! + +[`async-trait`]: https://crates.io/crates/async-trait + +# 0.1.7 (February 26, 2020) + +### Added + +- Support for adding arbitrary literal fields to spans generated by + `#[instrument]` (#569) +- `#[instrument]` now emits a helpful compiler error when attempting to skip a + function parameter (#600) + +Thanks to @Kobzol for contributing to this release! + +# 0.1.6 (December 20, 2019) + +### Added + +- Updated documentation (#468) + +# 0.1.5 (October 22, 2019) + +### Added + +- Support for destructuring in arguments to `#[instrument]`ed functions (#397) +- Generated field for `self` parameters when `#[instrument]`ing methods (#397) + +# 0.1.4 (September 26, 2019) + +### Added + +- Optional `skip` argument to `#[instrument]` for excluding function parameters + from generated spans (#359) + +# 0.1.3 (September 12, 2019) + +### Fixed + +- Fixed `#[instrument]`ed async functions not compiling on `nightly-2019-09-11` + or newer (#342) + +# 0.1.2 (August 19, 2019) + +### Changed + +- Updated `syn` and `quote` dependencies to 1.0 (#292) +- Removed direct dependency on `proc-macro2` to avoid potential version + conflicts (#296) + +### Fixed + +- Outdated idioms in examples (#271, #273) + +# 0.1.1 (August 9, 2019) + +### Changed + +- Using the `#[instrument]` attribute on `async fn`s no longer requires a + feature flag (#258) + +### Fixed + +- The `#[instrument]` macro now works on generic functions (#262) + +# 0.1.0 (August 8, 2019) + +- Initial release diff --git a/third_party/rust/tracing-attributes/Cargo.toml b/third_party/rust/tracing-attributes/Cargo.toml new file mode 100644 index 000000000000..8e34f6c2ec0a --- /dev/null +++ b/third_party/rust/tracing-attributes/Cargo.toml @@ -0,0 +1,81 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.49.0" +name = "tracing-attributes" +version = "0.1.21" +authors = [ + "Tokio Contributors ", + "Eliza Weisman ", + "David Barsky ", +] +description = """ +Procedural macro attributes for automatically instrumenting functions. +""" +homepage = "https://tokio.rs" +readme = "README.md" +keywords = [ + "logging", + "tracing", + "macro", + "instrument", + "log", +] +categories = [ + "development-tools::debugging", + "development-tools::profiling", + "asynchronous", +] +license = "MIT" +repository = "https://github.com/tokio-rs/tracing" + +[lib] +proc-macro = true + +[dependencies.proc-macro2] +version = "1" + +[dependencies.quote] +version = "1" + +[dependencies.syn] +version = "1.0.43" +features = [ + "full", + "parsing", + "printing", + "visit", + "visit-mut", + "clone-impls", + "extra-traits", + "proc-macro", +] +default-features = false + +[dev-dependencies.async-trait] +version = "0.1.44" + +[dev-dependencies.tokio-test] +version = "0.2.0" + +[dev-dependencies.tracing] +version = "0.1" + +[dev-dependencies.tracing-core] +version = "0.1" + +[features] +async-await = [] + +[badges.maintenance] +status = "experimental" diff --git a/third_party/rust/tokio-0.2.25/LICENSE b/third_party/rust/tracing-attributes/LICENSE similarity index 100% rename from third_party/rust/tokio-0.2.25/LICENSE rename to third_party/rust/tracing-attributes/LICENSE diff --git a/third_party/rust/tracing-attributes/README.md b/third_party/rust/tracing-attributes/README.md new file mode 100644 index 000000000000..7d8009ac3c59 --- /dev/null +++ b/third_party/rust/tracing-attributes/README.md @@ -0,0 +1,91 @@ +![Tracing — Structured, application-level diagnostics][splash] + +[splash]: https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/splash.svg + +# tracing-attributes + +Macro attributes for application-level tracing. + +[![Crates.io][crates-badge]][crates-url] +[![Documentation][docs-badge]][docs-url] +[![Documentation (master)][docs-master-badge]][docs-master-url] +[![MIT licensed][mit-badge]][mit-url] +[![Build Status][actions-badge]][actions-url] +[![Discord chat][discord-badge]][discord-url] + +[Documentation][docs-url] | [Chat][discord-url] + +[crates-badge]: https://img.shields.io/crates/v/tracing-attributes.svg +[crates-url]: https://crates.io/crates/tracing-attributes +[docs-badge]: https://docs.rs/tracing-attributes/badge.svg +[docs-url]: https://docs.rs/tracing-attributes/0.1.21 +[docs-master-badge]: https://img.shields.io/badge/docs-master-blue +[docs-master-url]: https://tracing-rs.netlify.com/tracing_attributes +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: LICENSE +[actions-badge]: https://github.com/tokio-rs/tracing/workflows/CI/badge.svg +[actions-url]:https://github.com/tokio-rs/tracing/actions?query=workflow%3ACI +[discord-badge]: https://img.shields.io/discord/500028886025895936?logo=discord&label=discord&logoColor=white +[discord-url]: https://discord.gg/EeF3cQw + +## Overview + +[`tracing`] is a framework for instrumenting Rust programs to collect +structured, event-based diagnostic information. This crate provides the +`#[instrument]` attribute for automatically instrumenting functions using +`tracing`. + +Note that this macro is also re-exported by the main `tracing` crate. + +*Compiler support: [requires `rustc` 1.49+][msrv]* + +[msrv]: #supported-rust-versions + +## Usage + +First, add this to your `Cargo.toml`: + +```toml +[dependencies] +tracing-attributes = "0.1.21" +``` + + +This crate provides the `#[instrument]` attribute for instrumenting a function +with a `tracing` [span]. For example: + +```rust +use tracing_attributes::instrument; + +#[instrument] +pub fn my_function(my_arg: usize) { + // ... +} +``` + +[`tracing`]: https://crates.io/crates/tracing +[span]: https://docs.rs/tracing/latest/tracing/span/index.html + +## Supported Rust Versions + +Tracing is built against the latest stable release. The minimum supported +version is 1.49. The current Tracing version is not guaranteed to build on Rust +versions earlier than the minimum supported version. + +Tracing follows the same compiler support policies as the rest of the Tokio +project. The current stable Rust compiler and the three most recent minor +versions before it will always be supported. For example, if the current stable +compiler version is 1.45, the minimum supported version will not be increased +past 1.42, three minor versions prior. Increasing the minimum supported compiler +version is not considered a semver breaking change as long as doing so complies +with this policy. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tracing-attributes/src/attr.rs b/third_party/rust/tracing-attributes/src/attr.rs new file mode 100644 index 000000000000..ff875e179788 --- /dev/null +++ b/third_party/rust/tracing-attributes/src/attr.rs @@ -0,0 +1,413 @@ +use std::collections::HashSet; +use syn::{punctuated::Punctuated, Expr, Ident, LitInt, LitStr, Path, Token}; + +use proc_macro2::TokenStream; +use quote::{quote, quote_spanned, ToTokens}; +use syn::ext::IdentExt as _; +use syn::parse::{Parse, ParseStream}; + +#[derive(Clone, Default, Debug)] +pub(crate) struct InstrumentArgs { + level: Option, + pub(crate) name: Option, + target: Option, + pub(crate) parent: Option, + pub(crate) follows_from: Option, + pub(crate) skips: HashSet, + pub(crate) skip_all: bool, + pub(crate) fields: Option, + pub(crate) err_mode: Option, + pub(crate) ret_mode: Option, + /// Errors describing any unrecognized parse inputs that we skipped. + parse_warnings: Vec, +} + +impl InstrumentArgs { + pub(crate) fn level(&self) -> impl ToTokens { + fn is_level(lit: &LitInt, expected: u64) -> bool { + match lit.base10_parse::() { + Ok(value) => value == expected, + Err(_) => false, + } + } + + match &self.level { + Some(Level::Str(ref lit)) if lit.value().eq_ignore_ascii_case("trace") => { + quote!(tracing::Level::TRACE) + } + Some(Level::Str(ref lit)) if lit.value().eq_ignore_ascii_case("debug") => { + quote!(tracing::Level::DEBUG) + } + Some(Level::Str(ref lit)) if lit.value().eq_ignore_ascii_case("info") => { + quote!(tracing::Level::INFO) + } + Some(Level::Str(ref lit)) if lit.value().eq_ignore_ascii_case("warn") => { + quote!(tracing::Level::WARN) + } + Some(Level::Str(ref lit)) if lit.value().eq_ignore_ascii_case("error") => { + quote!(tracing::Level::ERROR) + } + Some(Level::Int(ref lit)) if is_level(lit, 1) => quote!(tracing::Level::TRACE), + Some(Level::Int(ref lit)) if is_level(lit, 2) => quote!(tracing::Level::DEBUG), + Some(Level::Int(ref lit)) if is_level(lit, 3) => quote!(tracing::Level::INFO), + Some(Level::Int(ref lit)) if is_level(lit, 4) => quote!(tracing::Level::WARN), + Some(Level::Int(ref lit)) if is_level(lit, 5) => quote!(tracing::Level::ERROR), + Some(Level::Path(ref pat)) => quote!(#pat), + Some(_) => quote! { + compile_error!( + "unknown verbosity level, expected one of \"trace\", \ + \"debug\", \"info\", \"warn\", or \"error\", or a number 1-5" + ) + }, + None => quote!(tracing::Level::INFO), + } + } + + pub(crate) fn target(&self) -> impl ToTokens { + if let Some(ref target) = self.target { + quote!(#target) + } else { + quote!(module_path!()) + } + } + + /// Generate "deprecation" warnings for any unrecognized attribute inputs + /// that we skipped. + /// + /// For backwards compatibility, we need to emit compiler warnings rather + /// than errors for unrecognized inputs. Generating a fake deprecation is + /// the only way to do this on stable Rust right now. + pub(crate) fn warnings(&self) -> impl ToTokens { + let warnings = self.parse_warnings.iter().map(|err| { + let msg = format!("found unrecognized input, {}", err); + let msg = LitStr::new(&msg, err.span()); + // TODO(eliza): This is a bit of a hack, but it's just about the + // only way to emit warnings from a proc macro on stable Rust. + // Eventually, when the `proc_macro::Diagnostic` API stabilizes, we + // should definitely use that instead. + quote_spanned! {err.span()=> + #[warn(deprecated)] + { + #[deprecated(since = "not actually deprecated", note = #msg)] + const TRACING_INSTRUMENT_WARNING: () = (); + let _ = TRACING_INSTRUMENT_WARNING; + } + } + }); + quote! { + { #(#warnings)* } + } + } +} + +impl Parse for InstrumentArgs { + fn parse(input: ParseStream<'_>) -> syn::Result { + let mut args = Self::default(); + while !input.is_empty() { + let lookahead = input.lookahead1(); + if lookahead.peek(kw::name) { + if args.name.is_some() { + return Err(input.error("expected only a single `name` argument")); + } + let name = input.parse::>()?.value; + args.name = Some(name); + } else if lookahead.peek(LitStr) { + // XXX: apparently we support names as either named args with an + // sign, _or_ as unnamed string literals. That's weird, but + // changing it is apparently breaking. + if args.name.is_some() { + return Err(input.error("expected only a single `name` argument")); + } + args.name = Some(input.parse()?); + } else if lookahead.peek(kw::target) { + if args.target.is_some() { + return Err(input.error("expected only a single `target` argument")); + } + let target = input.parse::>()?.value; + args.target = Some(target); + } else if lookahead.peek(kw::parent) { + if args.target.is_some() { + return Err(input.error("expected only a single `parent` argument")); + } + let parent = input.parse::>()?; + args.parent = Some(parent.value); + } else if lookahead.peek(kw::follows_from) { + if args.target.is_some() { + return Err(input.error("expected only a single `follows_from` argument")); + } + let follows_from = input.parse::>()?; + args.follows_from = Some(follows_from.value); + } else if lookahead.peek(kw::level) { + if args.level.is_some() { + return Err(input.error("expected only a single `level` argument")); + } + args.level = Some(input.parse()?); + } else if lookahead.peek(kw::skip) { + if !args.skips.is_empty() { + return Err(input.error("expected only a single `skip` argument")); + } + if args.skip_all { + return Err(input.error("expected either `skip` or `skip_all` argument")); + } + let Skips(skips) = input.parse()?; + args.skips = skips; + } else if lookahead.peek(kw::skip_all) { + if args.skip_all { + return Err(input.error("expected only a single `skip_all` argument")); + } + if !args.skips.is_empty() { + return Err(input.error("expected either `skip` or `skip_all` argument")); + } + let _ = input.parse::()?; + args.skip_all = true; + } else if lookahead.peek(kw::fields) { + if args.fields.is_some() { + return Err(input.error("expected only a single `fields` argument")); + } + args.fields = Some(input.parse()?); + } else if lookahead.peek(kw::err) { + let _ = input.parse::(); + let mode = FormatMode::parse(input)?; + args.err_mode = Some(mode); + } else if lookahead.peek(kw::ret) { + let _ = input.parse::()?; + let mode = FormatMode::parse(input)?; + args.ret_mode = Some(mode); + } else if lookahead.peek(Token![,]) { + let _ = input.parse::()?; + } else { + // We found a token that we didn't expect! + // We want to emit warnings for these, rather than errors, so + // we'll add it to the list of unrecognized inputs we've seen so + // far and keep going. + args.parse_warnings.push(lookahead.error()); + // Parse the unrecognized token tree to advance the parse + // stream, and throw it away so we can keep parsing. + let _ = input.parse::(); + } + } + Ok(args) + } +} + +struct StrArg { + value: LitStr, + _p: std::marker::PhantomData, +} + +impl Parse for StrArg { + fn parse(input: ParseStream<'_>) -> syn::Result { + let _ = input.parse::()?; + let _ = input.parse::()?; + let value = input.parse()?; + Ok(Self { + value, + _p: std::marker::PhantomData, + }) + } +} + +struct ExprArg { + value: Expr, + _p: std::marker::PhantomData, +} + +impl Parse for ExprArg { + fn parse(input: ParseStream<'_>) -> syn::Result { + let _ = input.parse::()?; + let _ = input.parse::()?; + let value = input.parse()?; + Ok(Self { + value, + _p: std::marker::PhantomData, + }) + } +} + +struct Skips(HashSet); + +impl Parse for Skips { + fn parse(input: ParseStream<'_>) -> syn::Result { + let _ = input.parse::(); + let content; + let _ = syn::parenthesized!(content in input); + let names: Punctuated = content.parse_terminated(Ident::parse_any)?; + let mut skips = HashSet::new(); + for name in names { + if skips.contains(&name) { + return Err(syn::Error::new( + name.span(), + "tried to skip the same field twice", + )); + } else { + skips.insert(name); + } + } + Ok(Self(skips)) + } +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub(crate) enum FormatMode { + Default, + Display, + Debug, +} + +impl Default for FormatMode { + fn default() -> Self { + FormatMode::Default + } +} + +impl Parse for FormatMode { + fn parse(input: ParseStream<'_>) -> syn::Result { + if !input.peek(syn::token::Paren) { + return Ok(FormatMode::default()); + } + let content; + let _ = syn::parenthesized!(content in input); + let maybe_mode: Option = content.parse()?; + maybe_mode.map_or(Ok(FormatMode::default()), |ident| { + match ident.to_string().as_str() { + "Debug" => Ok(FormatMode::Debug), + "Display" => Ok(FormatMode::Display), + _ => Err(syn::Error::new( + ident.span(), + "unknown error mode, must be Debug or Display", + )), + } + }) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct Fields(pub(crate) Punctuated); + +#[derive(Clone, Debug)] +pub(crate) struct Field { + pub(crate) name: Punctuated, + pub(crate) value: Option, + pub(crate) kind: FieldKind, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) enum FieldKind { + Debug, + Display, + Value, +} + +impl Parse for Fields { + fn parse(input: ParseStream<'_>) -> syn::Result { + let _ = input.parse::(); + let content; + let _ = syn::parenthesized!(content in input); + let fields: Punctuated<_, Token![,]> = content.parse_terminated(Field::parse)?; + Ok(Self(fields)) + } +} + +impl ToTokens for Fields { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.0.to_tokens(tokens) + } +} + +impl Parse for Field { + fn parse(input: ParseStream<'_>) -> syn::Result { + let mut kind = FieldKind::Value; + if input.peek(Token![%]) { + input.parse::()?; + kind = FieldKind::Display; + } else if input.peek(Token![?]) { + input.parse::()?; + kind = FieldKind::Debug; + }; + let name = Punctuated::parse_separated_nonempty_with(input, Ident::parse_any)?; + let value = if input.peek(Token![=]) { + input.parse::()?; + if input.peek(Token![%]) { + input.parse::()?; + kind = FieldKind::Display; + } else if input.peek(Token![?]) { + input.parse::()?; + kind = FieldKind::Debug; + }; + Some(input.parse()?) + } else { + None + }; + Ok(Self { name, value, kind }) + } +} + +impl ToTokens for Field { + fn to_tokens(&self, tokens: &mut TokenStream) { + if let Some(ref value) = self.value { + let name = &self.name; + let kind = &self.kind; + tokens.extend(quote! { + #name = #kind#value + }) + } else if self.kind == FieldKind::Value { + // XXX(eliza): I don't like that fields without values produce + // empty fields rather than local variable shorthand...but, + // we've released a version where field names without values in + // `instrument` produce empty field values, so changing it now + // is a breaking change. agh. + let name = &self.name; + tokens.extend(quote!(#name = tracing::field::Empty)) + } else { + self.kind.to_tokens(tokens); + self.name.to_tokens(tokens); + } + } +} + +impl ToTokens for FieldKind { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + FieldKind::Debug => tokens.extend(quote! { ? }), + FieldKind::Display => tokens.extend(quote! { % }), + _ => {} + } + } +} + +#[derive(Clone, Debug)] +enum Level { + Str(LitStr), + Int(LitInt), + Path(Path), +} + +impl Parse for Level { + fn parse(input: ParseStream<'_>) -> syn::Result { + let _ = input.parse::()?; + let _ = input.parse::()?; + let lookahead = input.lookahead1(); + if lookahead.peek(LitStr) { + Ok(Self::Str(input.parse()?)) + } else if lookahead.peek(LitInt) { + Ok(Self::Int(input.parse()?)) + } else if lookahead.peek(Ident) { + Ok(Self::Path(input.parse()?)) + } else { + Err(lookahead.error()) + } + } +} + +mod kw { + syn::custom_keyword!(fields); + syn::custom_keyword!(skip); + syn::custom_keyword!(skip_all); + syn::custom_keyword!(level); + syn::custom_keyword!(target); + syn::custom_keyword!(parent); + syn::custom_keyword!(follows_from); + syn::custom_keyword!(name); + syn::custom_keyword!(err); + syn::custom_keyword!(ret); +} diff --git a/third_party/rust/tracing-attributes/src/expand.rs b/third_party/rust/tracing-attributes/src/expand.rs new file mode 100644 index 000000000000..b563d4bbfe31 --- /dev/null +++ b/third_party/rust/tracing-attributes/src/expand.rs @@ -0,0 +1,756 @@ +use std::iter; + +use proc_macro2::TokenStream; +use quote::{quote, quote_spanned, ToTokens}; +use syn::{ + punctuated::Punctuated, spanned::Spanned, Block, Expr, ExprAsync, ExprCall, FieldPat, FnArg, + Ident, Item, ItemFn, Pat, PatIdent, PatReference, PatStruct, PatTuple, PatTupleStruct, PatType, + Path, Signature, Stmt, Token, TypePath, +}; + +use crate::{ + attr::{Field, Fields, FormatMode, InstrumentArgs}, + MaybeItemFnRef, +}; + +/// Given an existing function, generate an instrumented version of that function +pub(crate) fn gen_function<'a, B: ToTokens + 'a>( + input: MaybeItemFnRef<'a, B>, + args: InstrumentArgs, + instrumented_function_name: &str, + self_type: Option<&syn::TypePath>, +) -> proc_macro2::TokenStream { + // these are needed ahead of time, as ItemFn contains the function body _and_ + // isn't representable inside a quote!/quote_spanned! macro + // (Syn's ToTokens isn't implemented for ItemFn) + let MaybeItemFnRef { + attrs, + vis, + sig, + block, + } = input; + + let Signature { + output: return_type, + inputs: params, + unsafety, + asyncness, + constness, + abi, + ident, + generics: + syn::Generics { + params: gen_params, + where_clause, + .. + }, + .. + } = sig; + + let warnings = args.warnings(); + + let body = gen_block( + block, + params, + asyncness.is_some(), + args, + instrumented_function_name, + self_type, + ); + + quote!( + #(#attrs) * + #vis #constness #unsafety #asyncness #abi fn #ident<#gen_params>(#params) #return_type + #where_clause + { + #warnings + #body + } + ) +} + +/// Instrument a block +fn gen_block( + block: &B, + params: &Punctuated, + async_context: bool, + mut args: InstrumentArgs, + instrumented_function_name: &str, + self_type: Option<&syn::TypePath>, +) -> proc_macro2::TokenStream { + // generate the span's name + let span_name = args + // did the user override the span's name? + .name + .as_ref() + .map(|name| quote!(#name)) + .unwrap_or_else(|| quote!(#instrumented_function_name)); + + let level = args.level(); + + let follows_from = args.follows_from.iter(); + let follows_from = quote! { + #(for cause in #follows_from { + __tracing_attr_span.follows_from(cause); + })* + }; + + // generate this inside a closure, so we can return early on errors. + let span = (|| { + // Pull out the arguments-to-be-skipped first, so we can filter results + // below. + let param_names: Vec<(Ident, (Ident, RecordType))> = params + .clone() + .into_iter() + .flat_map(|param| match param { + FnArg::Typed(PatType { pat, ty, .. }) => { + param_names(*pat, RecordType::parse_from_ty(&*ty)) + } + FnArg::Receiver(_) => Box::new(iter::once(( + Ident::new("self", param.span()), + RecordType::Debug, + ))), + }) + // Little dance with new (user-exposed) names and old (internal) + // names of identifiers. That way, we could do the following + // even though async_trait (<=0.1.43) rewrites "self" as "_self": + // ``` + // #[async_trait] + // impl Foo for FooImpl { + // #[instrument(skip(self))] + // async fn foo(&self, v: usize) {} + // } + // ``` + .map(|(x, record_type)| { + // if we are inside a function generated by async-trait <=0.1.43, we need to + // take care to rewrite "_self" as "self" for 'user convenience' + if self_type.is_some() && x == "_self" { + (Ident::new("self", x.span()), (x, record_type)) + } else { + (x.clone(), (x, record_type)) + } + }) + .collect(); + + for skip in &args.skips { + if !param_names.iter().map(|(user, _)| user).any(|y| y == skip) { + return quote_spanned! {skip.span()=> + compile_error!("attempting to skip non-existent parameter") + }; + } + } + + let target = args.target(); + + let parent = args.parent.iter(); + + // filter out skipped fields + let quoted_fields: Vec<_> = param_names + .iter() + .filter(|(param, _)| { + if args.skip_all || args.skips.contains(param) { + return false; + } + + // If any parameters have the same name as a custom field, skip + // and allow them to be formatted by the custom field. + if let Some(ref fields) = args.fields { + fields.0.iter().all(|Field { ref name, .. }| { + let first = name.first(); + first != name.last() || !first.iter().any(|name| name == ¶m) + }) + } else { + true + } + }) + .map(|(user_name, (real_name, record_type))| match record_type { + RecordType::Value => quote!(#user_name = #real_name), + RecordType::Debug => quote!(#user_name = tracing::field::debug(&#real_name)), + }) + .collect(); + + // replace every use of a variable with its original name + if let Some(Fields(ref mut fields)) = args.fields { + let mut replacer = IdentAndTypesRenamer { + idents: param_names.into_iter().map(|(a, (b, _))| (a, b)).collect(), + types: Vec::new(), + }; + + // when async-trait <=0.1.43 is in use, replace instances + // of the "Self" type inside the fields values + if let Some(self_type) = self_type { + replacer.types.push(("Self", self_type.clone())); + } + + for e in fields.iter_mut().filter_map(|f| f.value.as_mut()) { + syn::visit_mut::visit_expr_mut(&mut replacer, e); + } + } + + let custom_fields = &args.fields; + + quote!(tracing::span!( + target: #target, + #(parent: #parent,)* + #level, + #span_name, + #(#quoted_fields,)* + #custom_fields + + )) + })(); + + let err_event = match args.err_mode { + Some(FormatMode::Default) | Some(FormatMode::Display) => { + Some(quote!(tracing::error!(error = %e))) + } + Some(FormatMode::Debug) => Some(quote!(tracing::error!(error = ?e))), + _ => None, + }; + + let ret_event = match args.ret_mode { + Some(FormatMode::Display) => Some(quote!(tracing::event!(#level, return = %x))), + Some(FormatMode::Default) | Some(FormatMode::Debug) => { + Some(quote!(tracing::event!(#level, return = ?x))) + } + _ => None, + }; + + // Generate the instrumented function body. + // If the function is an `async fn`, this will wrap it in an async block, + // which is `instrument`ed using `tracing-futures`. Otherwise, this will + // enter the span and then perform the rest of the body. + // If `err` is in args, instrument any resulting `Err`s. + // If `ret` is in args, instrument any resulting `Ok`s when the function + // returns `Result`s, otherwise instrument any resulting values. + if async_context { + let mk_fut = match (err_event, ret_event) { + (Some(err_event), Some(ret_event)) => quote_spanned!(block.span()=> + async move { + match async move #block.await { + #[allow(clippy::unit_arg)] + Ok(x) => { + #ret_event; + Ok(x) + }, + Err(e) => { + #err_event; + Err(e) + } + } + } + ), + (Some(err_event), None) => quote_spanned!(block.span()=> + async move { + match async move #block.await { + #[allow(clippy::unit_arg)] + Ok(x) => Ok(x), + Err(e) => { + #err_event; + Err(e) + } + } + } + ), + (None, Some(ret_event)) => quote_spanned!(block.span()=> + async move { + let x = async move #block.await; + #ret_event; + x + } + ), + (None, None) => quote_spanned!(block.span()=> + async move #block + ), + }; + + return quote!( + let __tracing_attr_span = #span; + let __tracing_instrument_future = #mk_fut; + if !__tracing_attr_span.is_disabled() { + #follows_from + tracing::Instrument::instrument( + __tracing_instrument_future, + __tracing_attr_span + ) + .await + } else { + __tracing_instrument_future.await + } + ); + } + + let span = quote!( + // These variables are left uninitialized and initialized only + // if the tracing level is statically enabled at this point. + // While the tracing level is also checked at span creation + // time, that will still create a dummy span, and a dummy guard + // and drop the dummy guard later. By lazily initializing these + // variables, Rust will generate a drop flag for them and thus + // only drop the guard if it was created. This creates code that + // is very straightforward for LLVM to optimize out if the tracing + // level is statically disabled, while not causing any performance + // regression in case the level is enabled. + let __tracing_attr_span; + let __tracing_attr_guard; + if tracing::level_enabled!(#level) { + __tracing_attr_span = #span; + #follows_from + __tracing_attr_guard = __tracing_attr_span.enter(); + } + ); + + match (err_event, ret_event) { + (Some(err_event), Some(ret_event)) => quote_spanned! {block.span()=> + #span + #[allow(clippy::redundant_closure_call)] + match (move || #block)() { + #[allow(clippy::unit_arg)] + Ok(x) => { + #ret_event; + Ok(x) + }, + Err(e) => { + #err_event; + Err(e) + } + } + }, + (Some(err_event), None) => quote_spanned!(block.span()=> + #span + #[allow(clippy::redundant_closure_call)] + match (move || #block)() { + #[allow(clippy::unit_arg)] + Ok(x) => Ok(x), + Err(e) => { + #err_event; + Err(e) + } + } + ), + (None, Some(ret_event)) => quote_spanned!(block.span()=> + #span + #[allow(clippy::redundant_closure_call)] + let x = (move || #block)(); + #ret_event; + x + ), + (None, None) => quote_spanned!(block.span() => + // Because `quote` produces a stream of tokens _without_ whitespace, the + // `if` and the block will appear directly next to each other. This + // generates a clippy lint about suspicious `if/else` formatting. + // Therefore, suppress the lint inside the generated code... + #[allow(clippy::suspicious_else_formatting)] + { + #span + // ...but turn the lint back on inside the function body. + #[warn(clippy::suspicious_else_formatting)] + #block + } + ), + } +} + +/// Indicates whether a field should be recorded as `Value` or `Debug`. +enum RecordType { + /// The field should be recorded using its `Value` implementation. + Value, + /// The field should be recorded using `tracing::field::debug()`. + Debug, +} + +impl RecordType { + /// Array of primitive types which should be recorded as [RecordType::Value]. + const TYPES_FOR_VALUE: &'static [&'static str] = &[ + "bool", + "str", + "u8", + "i8", + "u16", + "i16", + "u32", + "i32", + "u64", + "i64", + "f32", + "f64", + "usize", + "isize", + "NonZeroU8", + "NonZeroI8", + "NonZeroU16", + "NonZeroI16", + "NonZeroU32", + "NonZeroI32", + "NonZeroU64", + "NonZeroI64", + "NonZeroUsize", + "NonZeroIsize", + "Wrapping", + ]; + + /// Parse `RecordType` from [syn::Type] by looking up + /// the [RecordType::TYPES_FOR_VALUE] array. + fn parse_from_ty(ty: &syn::Type) -> Self { + match ty { + syn::Type::Path(syn::TypePath { path, .. }) + if path + .segments + .iter() + .last() + .map(|path_segment| { + let ident = path_segment.ident.to_string(); + Self::TYPES_FOR_VALUE.iter().any(|&t| t == ident) + }) + .unwrap_or(false) => + { + RecordType::Value + } + syn::Type::Reference(syn::TypeReference { elem, .. }) => { + RecordType::parse_from_ty(&*elem) + } + _ => RecordType::Debug, + } + } +} + +fn param_names(pat: Pat, record_type: RecordType) -> Box> { + match pat { + Pat::Ident(PatIdent { ident, .. }) => Box::new(iter::once((ident, record_type))), + Pat::Reference(PatReference { pat, .. }) => param_names(*pat, record_type), + // We can't get the concrete type of fields in the struct/tuple + // patterns by using `syn`. e.g. `fn foo(Foo { x, y }: Foo) {}`. + // Therefore, the struct/tuple patterns in the arguments will just + // always be recorded as `RecordType::Debug`. + Pat::Struct(PatStruct { fields, .. }) => Box::new( + fields + .into_iter() + .flat_map(|FieldPat { pat, .. }| param_names(*pat, RecordType::Debug)), + ), + Pat::Tuple(PatTuple { elems, .. }) => Box::new( + elems + .into_iter() + .flat_map(|p| param_names(p, RecordType::Debug)), + ), + Pat::TupleStruct(PatTupleStruct { + pat: PatTuple { elems, .. }, + .. + }) => Box::new( + elems + .into_iter() + .flat_map(|p| param_names(p, RecordType::Debug)), + ), + + // The above *should* cover all cases of irrefutable patterns, + // but we purposefully don't do any funny business here + // (such as panicking) because that would obscure rustc's + // much more informative error message. + _ => Box::new(iter::empty()), + } +} + +/// The specific async code pattern that was detected +enum AsyncKind<'a> { + /// Immediately-invoked async fn, as generated by `async-trait <= 0.1.43`: + /// `async fn foo<...>(...) {...}; Box::pin(foo<...>(...))` + Function(&'a ItemFn), + /// A function returning an async (move) block, optionally `Box::pin`-ed, + /// as generated by `async-trait >= 0.1.44`: + /// `Box::pin(async move { ... })` + Async { + async_expr: &'a ExprAsync, + pinned_box: bool, + }, +} + +pub(crate) struct AsyncInfo<'block> { + // statement that must be patched + source_stmt: &'block Stmt, + kind: AsyncKind<'block>, + self_type: Option, + input: &'block ItemFn, +} + +impl<'block> AsyncInfo<'block> { + /// Get the AST of the inner function we need to hook, if it looks like a + /// manual future implementation. + /// + /// When we are given a function that returns a (pinned) future containing the + /// user logic, it is that (pinned) future that needs to be instrumented. + /// Were we to instrument its parent, we would only collect information + /// regarding the allocation of that future, and not its own span of execution. + /// + /// We inspect the block of the function to find if it matches any of the + /// following patterns: + /// + /// - Immediately-invoked async fn, as generated by `async-trait <= 0.1.43`: + /// `async fn foo<...>(...) {...}; Box::pin(foo<...>(...))` + /// + /// - A function returning an async (move) block, optionally `Box::pin`-ed, + /// as generated by `async-trait >= 0.1.44`: + /// `Box::pin(async move { ... })` + /// + /// We the return the statement that must be instrumented, along with some + /// other information. + /// 'gen_body' will then be able to use that information to instrument the + /// proper function/future. + /// + /// (this follows the approach suggested in + /// https://github.com/dtolnay/async-trait/issues/45#issuecomment-571245673) + pub(crate) fn from_fn(input: &'block ItemFn) -> Option { + // are we in an async context? If yes, this isn't a manual async-like pattern + if input.sig.asyncness.is_some() { + return None; + } + + let block = &input.block; + + // list of async functions declared inside the block + let inside_funs = block.stmts.iter().filter_map(|stmt| { + if let Stmt::Item(Item::Fn(fun)) = &stmt { + // If the function is async, this is a candidate + if fun.sig.asyncness.is_some() { + return Some((stmt, fun)); + } + } + None + }); + + // last expression of the block: it determines the return value of the + // block, this is quite likely a `Box::pin` statement or an async block + let (last_expr_stmt, last_expr) = block.stmts.iter().rev().find_map(|stmt| { + if let Stmt::Expr(expr) = stmt { + Some((stmt, expr)) + } else { + None + } + })?; + + // is the last expression an async block? + if let Expr::Async(async_expr) = last_expr { + return Some(AsyncInfo { + source_stmt: last_expr_stmt, + kind: AsyncKind::Async { + async_expr, + pinned_box: false, + }, + self_type: None, + input, + }); + } + + // is the last expression a function call? + let (outside_func, outside_args) = match last_expr { + Expr::Call(ExprCall { func, args, .. }) => (func, args), + _ => return None, + }; + + // is it a call to `Box::pin()`? + let path = match outside_func.as_ref() { + Expr::Path(path) => &path.path, + _ => return None, + }; + if !path_to_string(path).ends_with("Box::pin") { + return None; + } + + // Does the call take an argument? If it doesn't, + // it's not gonna compile anyway, but that's no reason + // to (try to) perform an out of bounds access + if outside_args.is_empty() { + return None; + } + + // Is the argument to Box::pin an async block that + // captures its arguments? + if let Expr::Async(async_expr) = &outside_args[0] { + return Some(AsyncInfo { + source_stmt: last_expr_stmt, + kind: AsyncKind::Async { + async_expr, + pinned_box: true, + }, + self_type: None, + input, + }); + } + + // Is the argument to Box::pin a function call itself? + let func = match &outside_args[0] { + Expr::Call(ExprCall { func, .. }) => func, + _ => return None, + }; + + // "stringify" the path of the function called + let func_name = match **func { + Expr::Path(ref func_path) => path_to_string(&func_path.path), + _ => return None, + }; + + // Was that function defined inside of the current block? + // If so, retrieve the statement where it was declared and the function itself + let (stmt_func_declaration, func) = inside_funs + .into_iter() + .find(|(_, fun)| fun.sig.ident == func_name)?; + + // If "_self" is present as an argument, we store its type to be able to rewrite "Self" (the + // parameter type) with the type of "_self" + let mut self_type = None; + for arg in &func.sig.inputs { + if let FnArg::Typed(ty) = arg { + if let Pat::Ident(PatIdent { ref ident, .. }) = *ty.pat { + if ident == "_self" { + let mut ty = *ty.ty.clone(); + // extract the inner type if the argument is "&self" or "&mut self" + if let syn::Type::Reference(syn::TypeReference { elem, .. }) = ty { + ty = *elem; + } + + if let syn::Type::Path(tp) = ty { + self_type = Some(tp); + break; + } + } + } + } + } + + Some(AsyncInfo { + source_stmt: stmt_func_declaration, + kind: AsyncKind::Function(func), + self_type, + input, + }) + } + + pub(crate) fn gen_async( + self, + args: InstrumentArgs, + instrumented_function_name: &str, + ) -> proc_macro::TokenStream { + // let's rewrite some statements! + let mut out_stmts: Vec = self + .input + .block + .stmts + .iter() + .map(|stmt| stmt.to_token_stream()) + .collect(); + + if let Some((iter, _stmt)) = self + .input + .block + .stmts + .iter() + .enumerate() + .find(|(_iter, stmt)| *stmt == self.source_stmt) + { + // instrument the future by rewriting the corresponding statement + out_stmts[iter] = match self.kind { + // `Box::pin(immediately_invoked_async_fn())` + AsyncKind::Function(fun) => gen_function( + fun.into(), + args, + instrumented_function_name, + self.self_type.as_ref(), + ), + // `async move { ... }`, optionally pinned + AsyncKind::Async { + async_expr, + pinned_box, + } => { + let instrumented_block = gen_block( + &async_expr.block, + &self.input.sig.inputs, + true, + args, + instrumented_function_name, + None, + ); + let async_attrs = &async_expr.attrs; + if pinned_box { + quote! { + Box::pin(#(#async_attrs) * async move { #instrumented_block }) + } + } else { + quote! { + #(#async_attrs) * async move { #instrumented_block } + } + } + } + }; + } + + let vis = &self.input.vis; + let sig = &self.input.sig; + let attrs = &self.input.attrs; + quote!( + #(#attrs) * + #vis #sig { + #(#out_stmts) * + } + ) + .into() + } +} + +// Return a path as a String +fn path_to_string(path: &Path) -> String { + use std::fmt::Write; + // some heuristic to prevent too many allocations + let mut res = String::with_capacity(path.segments.len() * 5); + for i in 0..path.segments.len() { + write!(&mut res, "{}", path.segments[i].ident) + .expect("writing to a String should never fail"); + if i < path.segments.len() - 1 { + res.push_str("::"); + } + } + res +} + +/// A visitor struct to replace idents and types in some piece +/// of code (e.g. the "self" and "Self" tokens in user-supplied +/// fields expressions when the function is generated by an old +/// version of async-trait). +struct IdentAndTypesRenamer<'a> { + types: Vec<(&'a str, TypePath)>, + idents: Vec<(Ident, Ident)>, +} + +impl<'a> syn::visit_mut::VisitMut for IdentAndTypesRenamer<'a> { + // we deliberately compare strings because we want to ignore the spans + // If we apply clippy's lint, the behavior changes + #[allow(clippy::cmp_owned)] + fn visit_ident_mut(&mut self, id: &mut Ident) { + for (old_ident, new_ident) in &self.idents { + if id.to_string() == old_ident.to_string() { + *id = new_ident.clone(); + } + } + } + + fn visit_type_mut(&mut self, ty: &mut syn::Type) { + for (type_name, new_type) in &self.types { + if let syn::Type::Path(TypePath { path, .. }) = ty { + if path_to_string(path) == *type_name { + *ty = syn::Type::Path(new_type.clone()); + } + } + } + } +} + +// A visitor struct that replace an async block by its patched version +struct AsyncTraitBlockReplacer<'a> { + block: &'a Block, + patched_block: Block, +} + +impl<'a> syn::visit_mut::VisitMut for AsyncTraitBlockReplacer<'a> { + fn visit_block_mut(&mut self, i: &mut Block) { + if i == self.block { + *i = self.patched_block.clone(); + } + } +} diff --git a/third_party/rust/tracing-attributes/src/lib.rs b/third_party/rust/tracing-attributes/src/lib.rs new file mode 100644 index 000000000000..a1d2d09d21e9 --- /dev/null +++ b/third_party/rust/tracing-attributes/src/lib.rs @@ -0,0 +1,656 @@ +//! A procedural macro attribute for instrumenting functions with [`tracing`]. +//! +//! [`tracing`] is a framework for instrumenting Rust programs to collect +//! structured, event-based diagnostic information. This crate provides the +//! [`#[instrument]`][instrument] procedural macro attribute. +//! +//! Note that this macro is also re-exported by the main `tracing` crate. +//! +//! *Compiler support: [requires `rustc` 1.49+][msrv]* +//! +//! [msrv]: #supported-rust-versions +//! +//! ## Usage +//! +//! First, add this to your `Cargo.toml`: +//! +//! ```toml +//! [dependencies] +//! tracing-attributes = "0.1.21" +//! ``` +//! +//! The [`#[instrument]`][instrument] attribute can now be added to a function +//! to automatically create and enter `tracing` [span] when that function is +//! called. For example: +//! +//! ``` +//! use tracing_attributes::instrument; +//! +//! #[instrument] +//! pub fn my_function(my_arg: usize) { +//! // ... +//! } +//! +//! # fn main() {} +//! ``` +//! +//! [`tracing`]: https://crates.io/crates/tracing +//! [span]: https://docs.rs/tracing/latest/tracing/span/index.html +//! [instrument]: macro@self::instrument +//! +//! ## Supported Rust Versions +//! +//! Tracing is built against the latest stable release. The minimum supported +//! version is 1.49. The current Tracing version is not guaranteed to build on +//! Rust versions earlier than the minimum supported version. +//! +//! Tracing follows the same compiler support policies as the rest of the Tokio +//! project. The current stable Rust compiler and the three most recent minor +//! versions before it will always be supported. For example, if the current +//! stable compiler version is 1.45, the minimum supported version will not be +//! increased past 1.42, three minor versions prior. Increasing the minimum +//! supported compiler version is not considered a semver breaking change as +//! long as doing so complies with this policy. +//! +#![doc(html_root_url = "https://docs.rs/tracing-attributes/0.1.21")] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/logo-type.png", + issue_tracker_base_url = "https://github.com/tokio-rs/tracing/issues/" +)] +#![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))] +#![warn( + missing_debug_implementations, + missing_docs, + rust_2018_idioms, + unreachable_pub, + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] +// TODO: once `tracing` bumps its MSRV to 1.42, remove this allow. +#![allow(unused)] +extern crate proc_macro; + +use proc_macro2::TokenStream; +use quote::ToTokens; +use syn::parse::{Parse, ParseStream}; +use syn::{Attribute, Block, ItemFn, Signature, Visibility}; + +mod attr; +mod expand; +/// Instruments a function to create and enter a `tracing` [span] every time +/// the function is called. +/// +/// Unless overriden, a span with `info` level will be generated. +/// The generated span's name will be the name of the function. +/// By default, all arguments to the function are included as fields on the +/// span. Arguments that are `tracing` [primitive types] implementing the +/// [`Value` trait] will be recorded as fields of that type. Types which do +/// not implement `Value` will be recorded using [`std::fmt::Debug`]. +/// +/// [primitive types]: https://docs.rs/tracing/latest/tracing/field/trait.Value.html#foreign-impls +/// [`Value` trait]: https://docs.rs/tracing/latest/tracing/field/trait.Value.html. +/// +/// # Overriding Span Attributes +/// +/// To change the [name] of the generated span, add a `name` argument to the +/// `#[instrument]` macro, followed by an equals sign and a string literal. For +/// example: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// +/// // The generated span's name will be "my_span" rather than "my_function". +/// #[instrument(name = "my_span")] +/// pub fn my_function() { +/// // ... do something incredibly interesting and important ... +/// } +/// ``` +/// +/// To override the [target] of the generated span, add a `target` argument to +/// the `#[instrument]` macro, followed by an equals sign and a string literal +/// for the new target. The [module path] is still recorded separately. For +/// example: +/// +/// ``` +/// pub mod my_module { +/// # use tracing_attributes::instrument; +/// // The generated span's target will be "my_crate::some_special_target", +/// // rather than "my_crate::my_module". +/// #[instrument(target = "my_crate::some_special_target")] +/// pub fn my_function() { +/// // ... all kinds of neat code in here ... +/// } +/// } +/// ``` +/// +/// Finally, to override the [level] of the generated span, add a `level` +/// argument, followed by an equals sign and a string literal with the name of +/// the desired level. Level names are not case sensitive. For example: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// // The span's level will be TRACE rather than INFO. +/// #[instrument(level = "trace")] +/// pub fn my_function() { +/// // ... I have written a truly marvelous implementation of this function, +/// // which this example is too narrow to contain ... +/// } +/// ``` +/// +/// # Skipping Fields +/// +/// To skip recording one or more arguments to a function or method, pass +/// the argument's name inside the `skip()` argument on the `#[instrument]` +/// macro. This can be used when an argument to an instrumented function does +/// not implement [`fmt::Debug`], or to exclude an argument with a verbose or +/// costly `Debug` implementation. Note that: +/// +/// - multiple argument names can be passed to `skip`. +/// - arguments passed to `skip` do _not_ need to implement `fmt::Debug`. +/// +/// You can also use `skip_all` to skip all arguments. +/// +/// ## Examples +/// +/// ``` +/// # use tracing_attributes::instrument; +/// # use std::collections::HashMap; +/// // This type doesn't implement `fmt::Debug`! +/// struct NonDebug; +/// +/// // `arg` will be recorded, while `non_debug` will not. +/// #[instrument(skip(non_debug))] +/// fn my_function(arg: usize, non_debug: NonDebug) { +/// // ... +/// } +/// +/// // These arguments are huge +/// #[instrument(skip_all)] +/// fn my_big_data_function(large: Vec, also_large: HashMap) { +/// // ... +/// } +/// ``` +/// +/// Skipping the `self` parameter: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[derive(Debug)] +/// struct MyType { +/// data: Vec, // Suppose this buffer is often quite long... +/// } +/// +/// impl MyType { +/// // Suppose we don't want to print an entire kilobyte of `data` +/// // every time this is called... +/// #[instrument(skip(self))] +/// pub fn my_method(&mut self, an_interesting_argument: usize) { +/// // ... do something (hopefully, using all that `data`!) +/// } +/// } +/// ``` +/// +/// # Adding Fields +/// +/// Additional fields (key-value pairs with arbitrary data) may be added to the +/// generated span using the `fields` argument on the `#[instrument]` macro. Any +/// Rust expression can be used as a field value in this manner. These +/// expressions will be evaluated at the beginning of the function's body, so +/// arguments to the function may be used in these expressions. Field names may +/// also be specified *without* values. Doing so will result in an [empty field] +/// whose value may be recorded later within the function body. +/// +/// This supports the same [field syntax] as the `span!` and `event!` macros. +/// +/// Note that overlap between the names of fields and (non-skipped) arguments +/// will result in a compile error. +/// +/// ## Examples +/// +/// Adding a new field based on the value of an argument: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// +/// // This will record a field named "i" with the value of `i` *and* a field +/// // named "next" with the value of `i` + 1. +/// #[instrument(fields(next = i + 1))] +/// pub fn my_function(i: usize) { +/// // ... +/// } +/// ``` +/// +/// Recording specific properties of a struct as their own fields: +/// +/// ``` +/// # mod http { +/// # pub struct Error; +/// # pub struct Response { pub(super) _b: std::marker::PhantomData } +/// # pub struct Request { _b: B } +/// # impl std::fmt::Debug for Request { +/// # fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +/// # f.pad("request") +/// # } +/// # } +/// # impl Request { +/// # pub fn uri(&self) -> &str { "fake" } +/// # pub fn method(&self) -> &str { "GET" } +/// # } +/// # } +/// # use tracing_attributes::instrument; +/// +/// // This will record the request's URI and HTTP method as their own separate +/// // fields. +/// #[instrument(fields(http.uri = req.uri(), http.method = req.method()))] +/// pub fn handle_request(req: http::Request) -> http::Response { +/// // ... handle the request ... +/// # http::Response { _b: std::marker::PhantomData } +/// } +/// ``` +/// +/// This can be used in conjunction with `skip` or `skip_all` to record only +/// some fields of a struct: +/// ``` +/// # use tracing_attributes::instrument; +/// // Remember the struct with the very large `data` field from the earlier +/// // example? Now it also has a `name`, which we might want to include in +/// // our span. +/// #[derive(Debug)] +/// struct MyType { +/// name: &'static str, +/// data: Vec, +/// } +/// +/// impl MyType { +/// // This will skip the `data` field, but will include `self.name`, +/// // formatted using `fmt::Display`. +/// #[instrument(skip(self), fields(self.name = %self.name))] +/// pub fn my_method(&mut self, an_interesting_argument: usize) { +/// // ... do something (hopefully, using all that `data`!) +/// } +/// } +/// ``` +/// +/// Adding an empty field to be recorded later: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// +/// // This function does a very interesting and important mathematical calculation. +/// // Suppose we want to record both the inputs to the calculation *and* its result... +/// #[instrument(fields(result))] +/// pub fn do_calculation(input_1: usize, input_2: usize) -> usize { +/// // Rerform the calculation. +/// let result = input_1 + input_2; +/// +/// // Record the result as part of the current span. +/// tracing::Span::current().record("result", &result); +/// +/// // Now, the result will also be included on this event! +/// tracing::info!("calculation complete!"); +/// +/// // ... etc ... +/// # 0 +/// } +/// ``` +/// +/// # Examples +/// +/// Instrumenting a function: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument] +/// pub fn my_function(my_arg: usize) { +/// // This event will be recorded inside a span named `my_function` with the +/// // field `my_arg`. +/// tracing::info!("inside my_function!"); +/// // ... +/// } +/// ``` +/// Setting the level for the generated span: +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(level = "debug")] +/// pub fn my_function() { +/// // ... +/// } +/// ``` +/// Overriding the generated span's name: +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(name = "my_name")] +/// pub fn my_function() { +/// // ... +/// } +/// ``` +/// Overriding the generated span's target: +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(target = "my_target")] +/// pub fn my_function() { +/// // ... +/// } +/// ``` +/// Overriding the generated span's parent: +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(parent = None)] +/// pub fn my_function() { +/// // ... +/// } +/// ``` +/// ``` +/// # use tracing_attributes::instrument; +/// // A struct which owns a span handle. +/// struct MyStruct +/// { +/// span: tracing::Span +/// } +/// +/// impl MyStruct +/// { +/// // Use the struct's `span` field as the parent span +/// #[instrument(parent = &self.span, skip(self))] +/// fn my_method(&self) {} +/// } +/// ``` +/// Specifying [`follows_from`] relationships: +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(follows_from = causes)] +/// pub fn my_function(causes: &[tracing::Id]) { +/// // ... +/// } +/// ``` +/// Any expression of type `impl IntoIterator>>` +/// may be provided to `follows_from`; e.g.: +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(follows_from = [cause])] +/// pub fn my_function(cause: &tracing::span::EnteredSpan) { +/// // ... +/// } +/// ``` +/// +/// +/// To skip recording an argument, pass the argument's name to the `skip`: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// struct NonDebug; +/// +/// #[instrument(skip(non_debug))] +/// fn my_function(arg: usize, non_debug: NonDebug) { +/// // ... +/// } +/// ``` +/// +/// To add an additional context to the span, pass key-value pairs to `fields`: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(fields(foo="bar", id=1, show=true))] +/// fn my_function(arg: usize) { +/// // ... +/// } +/// ``` +/// +/// Adding the `ret` argument to `#[instrument]` will emit an event with the function's +/// return value when the function returns: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(ret)] +/// fn my_function() -> i32 { +/// 42 +/// } +/// ``` +/// The return value event will have the same level as the span generated by `#[instrument]`. +/// By default, this will be `TRACE`, but if the level is overridden, the event will be at the same +/// level. +/// +/// **Note**: if the function returns a `Result`, `ret` will record returned values if and +/// only if the function returns [`Result::Ok`]. +/// +/// By default, returned values will be recorded using their [`std::fmt::Debug`] implementations. +/// If a returned value implements [`std::fmt::Display`], it can be recorded using its `Display` +/// implementation instead, by writing `ret(Display)`: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(ret(Display))] +/// fn my_function() -> i32 { +/// 42 +/// } +/// ``` +/// +/// If the function returns a `Result` and `E` implements `std::fmt::Display`, you can add +/// `err` or `err(Display)` to emit error events when the function returns `Err`: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(err)] +/// fn my_function(arg: usize) -> Result<(), std::io::Error> { +/// Ok(()) +/// } +/// ``` +/// +/// By default, error values will be recorded using their `std::fmt::Display` implementations. +/// If an error implements `std::fmt::Debug`, it can be recorded using its `Debug` implementation +/// instead, by writing `err(Debug)`: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(err(Debug))] +/// fn my_function(arg: usize) -> Result<(), std::io::Error> { +/// Ok(()) +/// } +/// ``` +/// +/// The `ret` and `err` arguments can be combined in order to record an event if a +/// function returns [`Result::Ok`] or [`Result::Err`]: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument(err, ret)] +/// fn my_function(arg: usize) -> Result<(), std::io::Error> { +/// Ok(()) +/// } +/// ``` +/// +/// `async fn`s may also be instrumented: +/// +/// ``` +/// # use tracing_attributes::instrument; +/// #[instrument] +/// pub async fn my_function() -> Result<(), ()> { +/// // ... +/// # Ok(()) +/// } +/// ``` +/// +/// It also works with [async-trait](https://crates.io/crates/async-trait) +/// (a crate that allows defining async functions in traits, +/// something not currently possible in Rust), +/// and hopefully most libraries that exhibit similar behaviors: +/// +/// ``` +/// # use tracing::instrument; +/// use async_trait::async_trait; +/// +/// #[async_trait] +/// pub trait Foo { +/// async fn foo(&self, arg: usize); +/// } +/// +/// #[derive(Debug)] +/// struct FooImpl(usize); +/// +/// #[async_trait] +/// impl Foo for FooImpl { +/// #[instrument(fields(value = self.0, tmp = std::any::type_name::()))] +/// async fn foo(&self, arg: usize) {} +/// } +/// ``` +/// +/// Note than on `async-trait` <= 0.1.43, references to the `Self` +/// type inside the `fields` argument were only allowed when the instrumented +/// function is a method (i.e., the function receives `self` as an argument). +/// For example, this *used to not work* because the instrument function +/// didn't receive `self`: +/// ``` +/// # use tracing::instrument; +/// use async_trait::async_trait; +/// +/// #[async_trait] +/// pub trait Bar { +/// async fn bar(); +/// } +/// +/// #[derive(Debug)] +/// struct BarImpl(usize); +/// +/// #[async_trait] +/// impl Bar for BarImpl { +/// #[instrument(fields(tmp = std::any::type_name::()))] +/// async fn bar() {} +/// } +/// ``` +/// Instead, you should manually rewrite any `Self` types as the type for +/// which you implement the trait: `#[instrument(fields(tmp = std::any::type_name::()))]` +/// (or maybe you can just bump `async-trait`). +/// +/// [span]: https://docs.rs/tracing/latest/tracing/span/index.html +/// [name]: https://docs.rs/tracing/latest/tracing/struct.Metadata.html#method.name +/// [target]: https://docs.rs/tracing/latest/tracing/struct.Metadata.html#method.target +/// [level]: https://docs.rs/tracing/latest/tracing/struct.Level.html +/// [module path]: https://docs.rs/tracing/latest/tracing/struct.Metadata.html#method.module_path +/// [`INFO`]: https://docs.rs/tracing/latest/tracing/struct.Level.html#associatedconstant.INFO +/// [empty field]: https://docs.rs/tracing/latest/tracing/field/struct.Empty.html +/// [field syntax]: https://docs.rs/tracing/latest/tracing/#recording-fields +/// [`follows_from`]: https://docs.rs/tracing/latest/tracing/struct.Span.html#method.follows_from +/// [`tracing`]: https://github.com/tokio-rs/tracing +/// [`fmt::Debug`]: std::fmt::Debug +#[proc_macro_attribute] +pub fn instrument( + args: proc_macro::TokenStream, + item: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + let args = syn::parse_macro_input!(args as attr::InstrumentArgs); + // Cloning a `TokenStream` is cheap since it's reference counted internally. + instrument_precise(args.clone(), item.clone()) + .unwrap_or_else(|_err| instrument_speculative(args, item)) +} + +/// Instrument the function, without parsing the function body (instead using the raw tokens). +fn instrument_speculative( + args: attr::InstrumentArgs, + item: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + let input = syn::parse_macro_input!(item as MaybeItemFn); + let instrumented_function_name = input.sig.ident.to_string(); + expand::gen_function( + input.as_ref(), + args, + instrumented_function_name.as_str(), + None, + ) + .into() +} + +/// Instrument the function, by fully parsing the function body, +/// which allows us to rewrite some statements related to async-like patterns. +fn instrument_precise( + args: attr::InstrumentArgs, + item: proc_macro::TokenStream, +) -> Result { + let input = syn::parse::(item)?; + let instrumented_function_name = input.sig.ident.to_string(); + + // check for async_trait-like patterns in the block, and instrument + // the future instead of the wrapper + if let Some(async_like) = expand::AsyncInfo::from_fn(&input) { + return Ok(async_like.gen_async(args, instrumented_function_name.as_str())); + } + + Ok(expand::gen_function( + (&input).into(), + args, + instrumented_function_name.as_str(), + None, + ) + .into()) +} + +/// This is a more flexible/imprecise `ItemFn` type, +/// which's block is just a `TokenStream` (it may contain invalid code). +#[derive(Debug, Clone)] +struct MaybeItemFn { + attrs: Vec, + vis: Visibility, + sig: Signature, + block: TokenStream, +} + +impl MaybeItemFn { + fn as_ref(&self) -> MaybeItemFnRef<'_, TokenStream> { + MaybeItemFnRef { + attrs: &self.attrs, + vis: &self.vis, + sig: &self.sig, + block: &self.block, + } + } +} + +/// This parses a `TokenStream` into a `MaybeItemFn` +/// (just like `ItemFn`, but skips parsing the body). +impl Parse for MaybeItemFn { + fn parse(input: ParseStream<'_>) -> syn::Result { + let attrs = input.call(syn::Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let sig: Signature = input.parse()?; + let block: TokenStream = input.parse()?; + Ok(Self { + attrs, + vis, + sig, + block, + }) + } +} + +/// A generic reference type for `MaybeItemFn`, +/// that takes a generic block type `B` that implements `ToTokens` (eg. `TokenStream`, `Block`). +#[derive(Debug, Clone)] +struct MaybeItemFnRef<'a, B: ToTokens> { + attrs: &'a Vec, + vis: &'a Visibility, + sig: &'a Signature, + block: &'a B, +} + +impl<'a> From<&'a ItemFn> for MaybeItemFnRef<'a, Box> { + fn from(val: &'a ItemFn) -> Self { + MaybeItemFnRef { + attrs: &val.attrs, + vis: &val.vis, + sig: &val.sig, + block: &val.block, + } + } +} diff --git a/third_party/rust/tracing-attributes/tests/async_fn.rs b/third_party/rust/tracing-attributes/tests/async_fn.rs new file mode 100644 index 000000000000..7e27fb5ce237 --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/async_fn.rs @@ -0,0 +1,449 @@ +use tracing_mock::*; + +use std::convert::Infallible; +use std::{future::Future, pin::Pin, sync::Arc}; +use tracing::subscriber::with_default; +use tracing_attributes::instrument; + +#[instrument] +async fn test_async_fn(polls: usize) -> Result<(), ()> { + let future = PollN::new_ok(polls); + tracing::trace!(awaiting = true); + future.await +} + +// Reproduces a compile error when returning an `impl Trait` from an +// instrumented async fn (see https://github.com/tokio-rs/tracing/issues/1615) +#[instrument] +async fn test_ret_impl_trait(n: i32) -> Result, ()> { + let n = n; + Ok((0..10).filter(move |x| *x < n)) +} + +// Reproduces a compile error when returning an `impl Trait` from an +// instrumented async fn (see https://github.com/tokio-rs/tracing/issues/1615) +#[instrument(err)] +async fn test_ret_impl_trait_err(n: i32) -> Result, &'static str> { + Ok((0..10).filter(move |x| *x < n)) +} + +#[instrument] +async fn test_async_fn_empty() {} + +// Reproduces https://github.com/tokio-rs/tracing/issues/1613 +#[instrument] +// LOAD-BEARING `#[rustfmt::skip]`! This is necessary to reproduce the bug; +// with the rustfmt-generated formatting, the lint will not be triggered! +#[rustfmt::skip] +#[deny(clippy::suspicious_else_formatting)] +async fn repro_1613(var: bool) { + println!( + "{}", + if var { "true" } else { "false" } + ); +} + +// Reproduces https://github.com/tokio-rs/tracing/issues/1613 +// and https://github.com/rust-lang/rust-clippy/issues/7760 +#[instrument] +#[deny(clippy::suspicious_else_formatting)] +async fn repro_1613_2() { + // hello world + // else +} + +// Reproduces https://github.com/tokio-rs/tracing/issues/1831 +#[instrument] +#[deny(unused_braces)] +fn repro_1831() -> Pin>> { + Box::pin(async move {}) +} + +// This replicates the pattern used to implement async trait methods on nightly using the +// `type_alias_impl_trait` feature +#[instrument(ret, err)] +#[deny(unused_braces)] +#[allow(clippy::manual_async_fn)] +fn repro_1831_2() -> impl Future> { + async { Ok(()) } +} + +#[test] +fn async_fn_only_enters_for_polls() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("test_async_fn")) + .enter(span::mock().named("test_async_fn")) + .event(event::mock().with_fields(field::mock("awaiting").with_value(&true))) + .exit(span::mock().named("test_async_fn")) + .enter(span::mock().named("test_async_fn")) + .exit(span::mock().named("test_async_fn")) + .drop_span(span::mock().named("test_async_fn")) + .done() + .run_with_handle(); + with_default(subscriber, || { + block_on_future(async { test_async_fn(2).await }).unwrap(); + }); + handle.assert_finished(); +} + +#[test] +fn async_fn_nested() { + #[instrument] + async fn test_async_fns_nested() { + test_async_fns_nested_other().await + } + + #[instrument] + async fn test_async_fns_nested_other() { + tracing::trace!(nested = true); + } + + let span = span::mock().named("test_async_fns_nested"); + let span2 = span::mock().named("test_async_fns_nested_other"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .new_span(span2.clone()) + .enter(span2.clone()) + .event(event::mock().with_fields(field::mock("nested").with_value(&true))) + .exit(span2.clone()) + .drop_span(span2) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + block_on_future(async { test_async_fns_nested().await }); + }); + + handle.assert_finished(); +} + +#[test] +fn async_fn_with_async_trait() { + use async_trait::async_trait; + + // test the correctness of the metadata obtained by #[instrument] + // (function name, functions parameters) when async-trait is used + #[async_trait] + pub trait TestA { + async fn foo(&mut self, v: usize); + } + + // test nesting of async fns with aync-trait + #[async_trait] + pub trait TestB { + async fn bar(&self); + } + + // test skip(self) with async-await + #[async_trait] + pub trait TestC { + async fn baz(&self); + } + + #[derive(Debug)] + struct TestImpl(usize); + + #[async_trait] + impl TestA for TestImpl { + #[instrument] + async fn foo(&mut self, v: usize) { + self.baz().await; + self.0 = v; + self.bar().await + } + } + + #[async_trait] + impl TestB for TestImpl { + #[instrument] + async fn bar(&self) { + tracing::trace!(val = self.0); + } + } + + #[async_trait] + impl TestC for TestImpl { + #[instrument(skip(self))] + async fn baz(&self) { + tracing::trace!(val = self.0); + } + } + + let span = span::mock().named("foo"); + let span2 = span::mock().named("bar"); + let span3 = span::mock().named("baz"); + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone() + .with_field(field::mock("self")) + .with_field(field::mock("v")), + ) + .enter(span.clone()) + .new_span(span3.clone()) + .enter(span3.clone()) + .event(event::mock().with_fields(field::mock("val").with_value(&2u64))) + .exit(span3.clone()) + .drop_span(span3) + .new_span(span2.clone().with_field(field::mock("self"))) + .enter(span2.clone()) + .event(event::mock().with_fields(field::mock("val").with_value(&5u64))) + .exit(span2.clone()) + .drop_span(span2) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let mut test = TestImpl(2); + block_on_future(async { test.foo(5).await }); + }); + + handle.assert_finished(); +} + +#[test] +fn async_fn_with_async_trait_and_fields_expressions() { + use async_trait::async_trait; + + #[async_trait] + pub trait Test { + async fn call(&mut self, v: usize); + } + + #[derive(Clone, Debug)] + struct TestImpl; + + impl TestImpl { + fn foo(&self) -> usize { + 42 + } + } + + #[async_trait] + impl Test for TestImpl { + // check that self is correctly handled, even when using async_trait + #[instrument(fields(val=self.foo(), val2=Self::clone(self).foo(), test=%_v+5))] + async fn call(&mut self, _v: usize) {} + } + + let span = span::mock().named("call"); + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("_v") + .with_value(&5usize) + .and(field::mock("test").with_value(&tracing::field::debug(10))) + .and(field::mock("val").with_value(&42u64)) + .and(field::mock("val2").with_value(&42u64)), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + block_on_future(async { TestImpl.call(5).await }); + }); + + handle.assert_finished(); +} + +#[test] +fn async_fn_with_async_trait_and_fields_expressions_with_generic_parameter() { + use async_trait::async_trait; + + #[async_trait] + pub trait Test { + async fn call(); + async fn call_with_self(&self); + async fn call_with_mut_self(&mut self); + } + + #[derive(Clone, Debug)] + struct TestImpl; + + // we also test sync functions that return futures, as they should be handled just like + // async-trait (>= 0.1.44) functions + impl TestImpl { + #[instrument(fields(Self=std::any::type_name::()))] + fn sync_fun(&self) -> Pin + Send + '_>> { + let val = self.clone(); + Box::pin(async move { + let _ = val; + }) + } + } + + #[async_trait] + impl Test for TestImpl { + // instrumenting this is currently not possible, see https://github.com/tokio-rs/tracing/issues/864#issuecomment-667508801 + //#[instrument(fields(Self=std::any::type_name::()))] + async fn call() {} + + #[instrument(fields(Self=std::any::type_name::()))] + async fn call_with_self(&self) { + self.sync_fun().await; + } + + #[instrument(fields(Self=std::any::type_name::()))] + async fn call_with_mut_self(&mut self) {} + } + + //let span = span::mock().named("call"); + let span2 = span::mock().named("call_with_self"); + let span3 = span::mock().named("call_with_mut_self"); + let span4 = span::mock().named("sync_fun"); + let (subscriber, handle) = subscriber::mock() + /*.new_span(span.clone() + .with_field( + field::mock("Self").with_value(&"TestImpler"))) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span)*/ + .new_span( + span2 + .clone() + .with_field(field::mock("Self").with_value(&std::any::type_name::())), + ) + .enter(span2.clone()) + .new_span( + span4 + .clone() + .with_field(field::mock("Self").with_value(&std::any::type_name::())), + ) + .enter(span4.clone()) + .exit(span4) + .exit(span2.clone()) + .drop_span(span2) + .new_span( + span3 + .clone() + .with_field(field::mock("Self").with_value(&std::any::type_name::())), + ) + .enter(span3.clone()) + .exit(span3.clone()) + .drop_span(span3) + .done() + .run_with_handle(); + + with_default(subscriber, || { + block_on_future(async { + TestImpl::call().await; + TestImpl.call_with_self().await; + TestImpl.call_with_mut_self().await + }); + }); + + handle.assert_finished(); +} + +#[test] +fn out_of_scope_fields() { + // Reproduces tokio-rs/tracing#1296 + + struct Thing { + metrics: Arc<()>, + } + + impl Thing { + #[instrument(skip(self, _req), fields(app_id))] + fn call(&mut self, _req: ()) -> Pin> + Send + Sync>> { + // ... + let metrics = self.metrics.clone(); + // ... + Box::pin(async move { + // ... + metrics // cannot find value `metrics` in this scope + }) + } + } + + let span = span::mock().named("call"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + block_on_future(async { + let mut my_thing = Thing { + metrics: Arc::new(()), + }; + my_thing.call(()).await; + }); + }); + + handle.assert_finished(); +} + +#[test] +fn manual_impl_future() { + #[allow(clippy::manual_async_fn)] + #[instrument] + fn manual_impl_future() -> impl Future { + async { + tracing::trace!(poll = true); + } + } + + let span = span::mock().named("manual_impl_future"); + let poll_event = || event::mock().with_fields(field::mock("poll").with_value(&true)); + + let (subscriber, handle) = subscriber::mock() + // await manual_impl_future + .new_span(span.clone()) + .enter(span.clone()) + .event(poll_event()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + block_on_future(async { + manual_impl_future().await; + }); + }); + + handle.assert_finished(); +} + +#[test] +fn manual_box_pin() { + #[instrument] + fn manual_box_pin() -> Pin>> { + Box::pin(async { + tracing::trace!(poll = true); + }) + } + + let span = span::mock().named("manual_box_pin"); + let poll_event = || event::mock().with_fields(field::mock("poll").with_value(&true)); + + let (subscriber, handle) = subscriber::mock() + // await manual_box_pin + .new_span(span.clone()) + .enter(span.clone()) + .event(poll_event()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + block_on_future(async { + manual_box_pin().await; + }); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/destructuring.rs b/third_party/rust/tracing-attributes/tests/destructuring.rs new file mode 100644 index 000000000000..09cf1ad53442 --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/destructuring.rs @@ -0,0 +1,213 @@ +use tracing::subscriber::with_default; +use tracing_attributes::instrument; +use tracing_mock::*; + +#[test] +fn destructure_tuples() { + #[instrument] + fn my_fn((arg1, arg2): (usize, usize)) {} + + let span = span::mock().named("my_fn"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("arg1") + .with_value(&format_args!("1")) + .and(field::mock("arg2").with_value(&format_args!("2"))) + .only(), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn((1, 2)); + }); + + handle.assert_finished(); +} + +#[test] +fn destructure_nested_tuples() { + #[instrument] + fn my_fn(((arg1, arg2), (arg3, arg4)): ((usize, usize), (usize, usize))) {} + + let span = span::mock().named("my_fn"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("arg1") + .with_value(&format_args!("1")) + .and(field::mock("arg2").with_value(&format_args!("2"))) + .and(field::mock("arg3").with_value(&format_args!("3"))) + .and(field::mock("arg4").with_value(&format_args!("4"))) + .only(), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn(((1, 2), (3, 4))); + }); + + handle.assert_finished(); +} + +#[test] +fn destructure_refs() { + #[instrument] + fn my_fn(&arg1: &usize) {} + + let span = span::mock().named("my_fn"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone() + .with_field(field::mock("arg1").with_value(&1usize).only()), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn(&1); + }); + + handle.assert_finished(); +} + +#[test] +fn destructure_tuple_structs() { + struct Foo(usize, usize); + + #[instrument] + fn my_fn(Foo(arg1, arg2): Foo) {} + + let span = span::mock().named("my_fn"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("arg1") + .with_value(&format_args!("1")) + .and(field::mock("arg2").with_value(&format_args!("2"))) + .only(), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn(Foo(1, 2)); + }); + + handle.assert_finished(); +} + +#[test] +fn destructure_structs() { + struct Foo { + bar: usize, + baz: usize, + } + + #[instrument] + fn my_fn( + Foo { + bar: arg1, + baz: arg2, + }: Foo, + ) { + let _ = (arg1, arg2); + } + + let span = span::mock().named("my_fn"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("arg1") + .with_value(&format_args!("1")) + .and(field::mock("arg2").with_value(&format_args!("2"))) + .only(), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn(Foo { bar: 1, baz: 2 }); + }); + + handle.assert_finished(); +} + +#[test] +fn destructure_everything() { + struct Foo { + bar: Bar, + baz: (usize, usize), + qux: NoDebug, + } + struct Bar((usize, usize)); + struct NoDebug; + + #[instrument] + fn my_fn( + &Foo { + bar: Bar((arg1, arg2)), + baz: (arg3, arg4), + .. + }: &Foo, + ) { + let _ = (arg1, arg2, arg3, arg4); + } + + let span = span::mock().named("my_fn"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("arg1") + .with_value(&format_args!("1")) + .and(field::mock("arg2").with_value(&format_args!("2"))) + .and(field::mock("arg3").with_value(&format_args!("3"))) + .and(field::mock("arg4").with_value(&format_args!("4"))) + .only(), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let foo = Foo { + bar: Bar((1, 2)), + baz: (3, 4), + qux: NoDebug, + }; + let _ = foo.qux; // to eliminate unused field warning + my_fn(&foo); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/err.rs b/third_party/rust/tracing-attributes/tests/err.rs new file mode 100644 index 000000000000..f2706f3084cd --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/err.rs @@ -0,0 +1,200 @@ +use tracing::subscriber::with_default; +use tracing::Level; +use tracing_attributes::instrument; +use tracing_mock::*; + +use std::convert::TryFrom; +use std::num::TryFromIntError; + +#[instrument(err)] +fn err() -> Result { + u8::try_from(1234) +} + +#[instrument(err)] +fn err_suspicious_else() -> Result { + {} + u8::try_from(1234) +} + +#[test] +fn test() { + let span = span::mock().named("err"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event(event::mock().at_level(Level::ERROR)) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + with_default(subscriber, || err().ok()); + handle.assert_finished(); +} + +#[instrument(err)] +async fn err_async(polls: usize) -> Result { + let future = PollN::new_ok(polls); + tracing::trace!(awaiting = true); + future.await.ok(); + u8::try_from(1234) +} + +#[test] +fn test_async() { + let span = span::mock().named("err_async"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields(field::mock("awaiting").with_value(&true)) + .at_level(Level::TRACE), + ) + .exit(span.clone()) + .enter(span.clone()) + .event(event::mock().at_level(Level::ERROR)) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + with_default(subscriber, || { + block_on_future(async { err_async(2).await }).ok(); + }); + handle.assert_finished(); +} + +#[instrument(err)] +fn err_mut(out: &mut u8) -> Result<(), TryFromIntError> { + *out = u8::try_from(1234)?; + Ok(()) +} + +#[test] +fn test_mut() { + let span = span::mock().named("err_mut"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event(event::mock().at_level(Level::ERROR)) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + with_default(subscriber, || err_mut(&mut 0).ok()); + handle.assert_finished(); +} + +#[instrument(err)] +async fn err_mut_async(polls: usize, out: &mut u8) -> Result<(), TryFromIntError> { + let future = PollN::new_ok(polls); + tracing::trace!(awaiting = true); + future.await.ok(); + *out = u8::try_from(1234)?; + Ok(()) +} + +#[test] +fn test_mut_async() { + let span = span::mock().named("err_mut_async"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields(field::mock("awaiting").with_value(&true)) + .at_level(Level::TRACE), + ) + .exit(span.clone()) + .enter(span.clone()) + .event(event::mock().at_level(Level::ERROR)) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + with_default(subscriber, || { + block_on_future(async { err_mut_async(2, &mut 0).await }).ok(); + }); + handle.assert_finished(); +} + +#[test] +fn impl_trait_return_type() { + // Reproduces https://github.com/tokio-rs/tracing/issues/1227 + + #[instrument(err)] + fn returns_impl_trait(x: usize) -> Result, String> { + Ok(0..x) + } + + let span = span::mock().named("returns_impl_trait"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone() + .with_field(field::mock("x").with_value(&10usize).only()), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + for _ in returns_impl_trait(10).unwrap() { + // nop + } + }); + + handle.assert_finished(); +} + +#[instrument(err(Debug))] +fn err_dbg() -> Result { + u8::try_from(1234) +} + +#[test] +fn test_err_dbg() { + let span = span::mock().named("err_dbg"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock().at_level(Level::ERROR).with_fields( + field::mock("error") + // use the actual error value that will be emitted, so + // that this test doesn't break if the standard library + // changes the `fmt::Debug` output from the error type + // in the future. + .with_value(&tracing::field::debug(u8::try_from(1234).unwrap_err())), + ), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + with_default(subscriber, || err_dbg().ok()); + handle.assert_finished(); +} + +#[test] +fn test_err_display_default() { + let span = span::mock().named("err"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock().at_level(Level::ERROR).with_fields( + field::mock("error") + // by default, errors will be emitted with their display values + .with_value(&tracing::field::display(u8::try_from(1234).unwrap_err())), + ), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + with_default(subscriber, || err().ok()); + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/fields.rs b/third_party/rust/tracing-attributes/tests/fields.rs new file mode 100644 index 000000000000..a31c1c2a846c --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/fields.rs @@ -0,0 +1,147 @@ +use tracing::subscriber::with_default; +use tracing_attributes::instrument; +use tracing_mock::field::mock; +use tracing_mock::span::NewSpan; +use tracing_mock::*; + +#[instrument(fields(foo = "bar", dsa = true, num = 1))] +fn fn_no_param() {} + +#[instrument(fields(foo = "bar"))] +fn fn_param(param: u32) {} + +#[instrument(fields(foo = "bar", empty))] +fn fn_empty_field() {} + +#[instrument(fields(len = s.len()))] +fn fn_expr_field(s: &str) {} + +#[instrument(fields(s.len = s.len(), s.is_empty = s.is_empty()))] +fn fn_two_expr_fields(s: &str) { + let _ = s; +} + +#[instrument(fields(%s, s.len = s.len()))] +fn fn_clashy_expr_field(s: &str) { + let _ = s; +} + +#[instrument(fields(s = "s"))] +fn fn_clashy_expr_field2(s: &str) { + let _ = s; +} + +#[derive(Debug)] +struct HasField { + my_field: &'static str, +} + +impl HasField { + #[instrument(fields(my_field = self.my_field), skip(self))] + fn self_expr_field(&self) {} +} + +#[test] +fn fields() { + let span = span::mock().with_field( + mock("foo") + .with_value(&"bar") + .and(mock("dsa").with_value(&true)) + .and(mock("num").with_value(&1)) + .only(), + ); + run_test(span, || { + fn_no_param(); + }); +} + +#[test] +fn expr_field() { + let span = span::mock().with_field( + mock("s") + .with_value(&"hello world") + .and(mock("len").with_value(&"hello world".len())) + .only(), + ); + run_test(span, || { + fn_expr_field("hello world"); + }); +} + +#[test] +fn two_expr_fields() { + let span = span::mock().with_field( + mock("s") + .with_value(&"hello world") + .and(mock("s.len").with_value(&"hello world".len())) + .and(mock("s.is_empty").with_value(&false)) + .only(), + ); + run_test(span, || { + fn_two_expr_fields("hello world"); + }); +} + +#[test] +fn clashy_expr_field() { + let span = span::mock().with_field( + // Overriding the `s` field should record `s` as a `Display` value, + // rather than as a `Debug` value. + mock("s") + .with_value(&tracing::field::display("hello world")) + .and(mock("s.len").with_value(&"hello world".len())) + .only(), + ); + run_test(span, || { + fn_clashy_expr_field("hello world"); + }); + + let span = span::mock().with_field(mock("s").with_value(&"s").only()); + run_test(span, || { + fn_clashy_expr_field2("hello world"); + }); +} + +#[test] +fn self_expr_field() { + let span = span::mock().with_field(mock("my_field").with_value(&"hello world").only()); + run_test(span, || { + let has_field = HasField { + my_field: "hello world", + }; + has_field.self_expr_field(); + }); +} + +#[test] +fn parameters_with_fields() { + let span = span::mock().with_field( + mock("foo") + .with_value(&"bar") + .and(mock("param").with_value(&1u32)) + .only(), + ); + run_test(span, || { + fn_param(1); + }); +} + +#[test] +fn empty_field() { + let span = span::mock().with_field(mock("foo").with_value(&"bar").only()); + run_test(span, || { + fn_empty_field(); + }); +} + +fn run_test T, T>(span: NewSpan, fun: F) { + let (subscriber, handle) = subscriber::mock() + .new_span(span) + .enter(span::mock()) + .exit(span::mock()) + .done() + .run_with_handle(); + + with_default(subscriber, fun); + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/follows_from.rs b/third_party/rust/tracing-attributes/tests/follows_from.rs new file mode 100644 index 000000000000..da0eec6357ea --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/follows_from.rs @@ -0,0 +1,99 @@ +use tracing::{subscriber::with_default, Id, Level, Span}; +use tracing_attributes::instrument; +use tracing_mock::*; + +#[instrument(follows_from = causes, skip(causes))] +fn with_follows_from_sync(causes: impl IntoIterator>>) {} + +#[instrument(follows_from = causes, skip(causes))] +async fn with_follows_from_async(causes: impl IntoIterator>>) {} + +#[instrument(follows_from = [&Span::current()])] +fn follows_from_current() {} + +#[test] +fn follows_from_sync_test() { + let cause_a = span::mock().named("cause_a"); + let cause_b = span::mock().named("cause_b"); + let cause_c = span::mock().named("cause_c"); + let consequence = span::mock().named("with_follows_from_sync"); + + let (subscriber, handle) = subscriber::mock() + .new_span(cause_a.clone()) + .new_span(cause_b.clone()) + .new_span(cause_c.clone()) + .new_span(consequence.clone()) + .follows_from(consequence.clone(), cause_a) + .follows_from(consequence.clone(), cause_b) + .follows_from(consequence.clone(), cause_c) + .enter(consequence.clone()) + .exit(consequence) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let cause_a = tracing::span!(Level::TRACE, "cause_a"); + let cause_b = tracing::span!(Level::TRACE, "cause_b"); + let cause_c = tracing::span!(Level::TRACE, "cause_c"); + + with_follows_from_sync(&[cause_a, cause_b, cause_c]) + }); + + handle.assert_finished(); +} + +#[test] +fn follows_from_async_test() { + let cause_a = span::mock().named("cause_a"); + let cause_b = span::mock().named("cause_b"); + let cause_c = span::mock().named("cause_c"); + let consequence = span::mock().named("with_follows_from_async"); + + let (subscriber, handle) = subscriber::mock() + .new_span(cause_a.clone()) + .new_span(cause_b.clone()) + .new_span(cause_c.clone()) + .new_span(consequence.clone()) + .follows_from(consequence.clone(), cause_a) + .follows_from(consequence.clone(), cause_b) + .follows_from(consequence.clone(), cause_c) + .enter(consequence.clone()) + .exit(consequence) + .done() + .run_with_handle(); + + with_default(subscriber, || { + block_on_future(async { + let cause_a = tracing::span!(Level::TRACE, "cause_a"); + let cause_b = tracing::span!(Level::TRACE, "cause_b"); + let cause_c = tracing::span!(Level::TRACE, "cause_c"); + + with_follows_from_async(&[cause_a, cause_b, cause_c]).await + }) + }); + + handle.assert_finished(); +} + +#[test] +fn follows_from_current_test() { + let cause = span::mock().named("cause"); + let consequence = span::mock().named("follows_from_current"); + + let (subscriber, handle) = subscriber::mock() + .new_span(cause.clone()) + .enter(cause.clone()) + .new_span(consequence.clone()) + .follows_from(consequence.clone(), cause.clone()) + .enter(consequence.clone()) + .exit(consequence) + .exit(cause) + .done() + .run_with_handle(); + + with_default(subscriber, || { + tracing::span!(Level::TRACE, "cause").in_scope(follows_from_current) + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/instrument.rs b/third_party/rust/tracing-attributes/tests/instrument.rs new file mode 100644 index 000000000000..2b2fee71e7f0 --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/instrument.rs @@ -0,0 +1,243 @@ +use tracing::subscriber::with_default; +use tracing::Level; +use tracing_attributes::instrument; +use tracing_mock::*; + +#[test] +fn override_everything() { + #[instrument(target = "my_target", level = "debug")] + fn my_fn() {} + + #[instrument(level = "debug", target = "my_target")] + fn my_other_fn() {} + + let span = span::mock() + .named("my_fn") + .at_level(Level::DEBUG) + .with_target("my_target"); + let span2 = span::mock() + .named("my_other_fn") + .at_level(Level::DEBUG) + .with_target("my_target"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .new_span(span2.clone()) + .enter(span2.clone()) + .exit(span2.clone()) + .drop_span(span2) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn(); + my_other_fn(); + }); + + handle.assert_finished(); +} + +#[test] +fn fields() { + #[instrument(target = "my_target", level = "debug")] + fn my_fn(arg1: usize, arg2: bool) {} + + let span = span::mock() + .named("my_fn") + .at_level(Level::DEBUG) + .with_target("my_target"); + + let span2 = span::mock() + .named("my_fn") + .at_level(Level::DEBUG) + .with_target("my_target"); + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("arg1") + .with_value(&2usize) + .and(field::mock("arg2").with_value(&false)) + .only(), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .new_span( + span2.clone().with_field( + field::mock("arg1") + .with_value(&3usize) + .and(field::mock("arg2").with_value(&true)) + .only(), + ), + ) + .enter(span2.clone()) + .exit(span2.clone()) + .drop_span(span2) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn(2, false); + my_fn(3, true); + }); + + handle.assert_finished(); +} + +#[test] +fn skip() { + struct UnDebug(pub u32); + + #[instrument(target = "my_target", level = "debug", skip(_arg2, _arg3))] + fn my_fn(arg1: usize, _arg2: UnDebug, _arg3: UnDebug) {} + + #[instrument(target = "my_target", level = "debug", skip_all)] + fn my_fn2(_arg1: usize, _arg2: UnDebug, _arg3: UnDebug) {} + + let span = span::mock() + .named("my_fn") + .at_level(Level::DEBUG) + .with_target("my_target"); + + let span2 = span::mock() + .named("my_fn") + .at_level(Level::DEBUG) + .with_target("my_target"); + + let span3 = span::mock() + .named("my_fn2") + .at_level(Level::DEBUG) + .with_target("my_target"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone() + .with_field(field::mock("arg1").with_value(&2usize).only()), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .new_span( + span2 + .clone() + .with_field(field::mock("arg1").with_value(&3usize).only()), + ) + .enter(span2.clone()) + .exit(span2.clone()) + .drop_span(span2) + .new_span(span3.clone()) + .enter(span3.clone()) + .exit(span3.clone()) + .drop_span(span3) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn(2, UnDebug(0), UnDebug(1)); + my_fn(3, UnDebug(0), UnDebug(1)); + my_fn2(2, UnDebug(0), UnDebug(1)); + }); + + handle.assert_finished(); +} + +#[test] +fn generics() { + #[derive(Debug)] + struct Foo; + + #[instrument] + fn my_fn(arg1: S, arg2: T) + where + S: std::fmt::Debug, + { + } + + let span = span::mock().named("my_fn"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("arg1") + .with_value(&format_args!("Foo")) + .and(field::mock("arg2").with_value(&format_args!("false"))), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + my_fn(Foo, false); + }); + + handle.assert_finished(); +} + +#[test] +fn methods() { + #[derive(Debug)] + struct Foo; + + impl Foo { + #[instrument] + fn my_fn(&self, arg1: usize) {} + } + + let span = span::mock().named("my_fn"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone().with_field( + field::mock("self") + .with_value(&format_args!("Foo")) + .and(field::mock("arg1").with_value(&42usize)), + ), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let foo = Foo; + foo.my_fn(42); + }); + + handle.assert_finished(); +} + +#[test] +fn impl_trait_return_type() { + #[instrument] + fn returns_impl_trait(x: usize) -> impl Iterator { + 0..x + } + + let span = span::mock().named("returns_impl_trait"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + span.clone() + .with_field(field::mock("x").with_value(&10usize).only()), + ) + .enter(span.clone()) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || { + for _ in returns_impl_trait(10) { + // nop + } + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/levels.rs b/third_party/rust/tracing-attributes/tests/levels.rs new file mode 100644 index 000000000000..b074ea4f28a3 --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/levels.rs @@ -0,0 +1,96 @@ +use tracing::subscriber::with_default; +use tracing::Level; +use tracing_attributes::instrument; +use tracing_mock::*; + +#[test] +fn named_levels() { + #[instrument(level = "trace")] + fn trace() {} + + #[instrument(level = "Debug")] + fn debug() {} + + #[instrument(level = "INFO")] + fn info() {} + + #[instrument(level = "WARn")] + fn warn() {} + + #[instrument(level = "eRrOr")] + fn error() {} + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("trace").at_level(Level::TRACE)) + .enter(span::mock().named("trace").at_level(Level::TRACE)) + .exit(span::mock().named("trace").at_level(Level::TRACE)) + .new_span(span::mock().named("debug").at_level(Level::DEBUG)) + .enter(span::mock().named("debug").at_level(Level::DEBUG)) + .exit(span::mock().named("debug").at_level(Level::DEBUG)) + .new_span(span::mock().named("info").at_level(Level::INFO)) + .enter(span::mock().named("info").at_level(Level::INFO)) + .exit(span::mock().named("info").at_level(Level::INFO)) + .new_span(span::mock().named("warn").at_level(Level::WARN)) + .enter(span::mock().named("warn").at_level(Level::WARN)) + .exit(span::mock().named("warn").at_level(Level::WARN)) + .new_span(span::mock().named("error").at_level(Level::ERROR)) + .enter(span::mock().named("error").at_level(Level::ERROR)) + .exit(span::mock().named("error").at_level(Level::ERROR)) + .done() + .run_with_handle(); + + with_default(subscriber, || { + trace(); + debug(); + info(); + warn(); + error(); + }); + + handle.assert_finished(); +} + +#[test] +fn numeric_levels() { + #[instrument(level = 1)] + fn trace() {} + + #[instrument(level = 2)] + fn debug() {} + + #[instrument(level = 3)] + fn info() {} + + #[instrument(level = 4)] + fn warn() {} + + #[instrument(level = 5)] + fn error() {} + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("trace").at_level(Level::TRACE)) + .enter(span::mock().named("trace").at_level(Level::TRACE)) + .exit(span::mock().named("trace").at_level(Level::TRACE)) + .new_span(span::mock().named("debug").at_level(Level::DEBUG)) + .enter(span::mock().named("debug").at_level(Level::DEBUG)) + .exit(span::mock().named("debug").at_level(Level::DEBUG)) + .new_span(span::mock().named("info").at_level(Level::INFO)) + .enter(span::mock().named("info").at_level(Level::INFO)) + .exit(span::mock().named("info").at_level(Level::INFO)) + .new_span(span::mock().named("warn").at_level(Level::WARN)) + .enter(span::mock().named("warn").at_level(Level::WARN)) + .exit(span::mock().named("warn").at_level(Level::WARN)) + .new_span(span::mock().named("error").at_level(Level::ERROR)) + .enter(span::mock().named("error").at_level(Level::ERROR)) + .exit(span::mock().named("error").at_level(Level::ERROR)) + .done() + .run_with_handle(); + + with_default(subscriber, || { + trace(); + debug(); + info(); + warn(); + error(); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/names.rs b/third_party/rust/tracing-attributes/tests/names.rs new file mode 100644 index 000000000000..d97dece9a16e --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/names.rs @@ -0,0 +1,63 @@ +use tracing::subscriber::with_default; +use tracing_attributes::instrument; +use tracing_mock::*; + +#[instrument] +fn default_name() {} + +#[instrument(name = "my_name")] +fn custom_name() {} + +// XXX: it's weird that we support both of these forms, but apparently we +// managed to release a version that accepts both syntax, so now we have to +// support it! yay! +#[instrument("my_other_name")] +fn custom_name_no_equals() {} + +#[test] +fn default_name_test() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("default_name")) + .enter(span::mock().named("default_name")) + .exit(span::mock().named("default_name")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + default_name(); + }); + + handle.assert_finished(); +} + +#[test] +fn custom_name_test() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("my_name")) + .enter(span::mock().named("my_name")) + .exit(span::mock().named("my_name")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + custom_name(); + }); + + handle.assert_finished(); +} + +#[test] +fn custom_name_no_equals_test() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("my_other_name")) + .enter(span::mock().named("my_other_name")) + .exit(span::mock().named("my_other_name")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + custom_name_no_equals(); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/parents.rs b/third_party/rust/tracing-attributes/tests/parents.rs new file mode 100644 index 000000000000..7069b98ea5d7 --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/parents.rs @@ -0,0 +1,102 @@ +use tracing::{subscriber::with_default, Id, Level}; +use tracing_attributes::instrument; +use tracing_mock::*; + +#[instrument] +fn with_default_parent() {} + +#[instrument(parent = parent_span, skip(parent_span))] +fn with_explicit_parent

(parent_span: P) +where + P: Into>, +{ +} + +#[test] +fn default_parent_test() { + let contextual_parent = span::mock().named("contextual_parent"); + let child = span::mock().named("with_default_parent"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + contextual_parent + .clone() + .with_contextual_parent(None) + .with_explicit_parent(None), + ) + .new_span( + child + .clone() + .with_contextual_parent(Some("contextual_parent")) + .with_explicit_parent(None), + ) + .enter(child.clone()) + .exit(child.clone()) + .enter(contextual_parent.clone()) + .new_span( + child + .clone() + .with_contextual_parent(Some("contextual_parent")) + .with_explicit_parent(None), + ) + .enter(child.clone()) + .exit(child) + .exit(contextual_parent) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let contextual_parent = tracing::span!(Level::TRACE, "contextual_parent"); + + with_default_parent(); + + contextual_parent.in_scope(|| { + with_default_parent(); + }); + }); + + handle.assert_finished(); +} + +#[test] +fn explicit_parent_test() { + let contextual_parent = span::mock().named("contextual_parent"); + let explicit_parent = span::mock().named("explicit_parent"); + let child = span::mock().named("with_explicit_parent"); + + let (subscriber, handle) = subscriber::mock() + .new_span( + contextual_parent + .clone() + .with_contextual_parent(None) + .with_explicit_parent(None), + ) + .new_span( + explicit_parent + .with_contextual_parent(None) + .with_explicit_parent(None), + ) + .enter(contextual_parent.clone()) + .new_span( + child + .clone() + .with_contextual_parent(Some("contextual_parent")) + .with_explicit_parent(Some("explicit_parent")), + ) + .enter(child.clone()) + .exit(child) + .exit(contextual_parent) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let contextual_parent = tracing::span!(Level::INFO, "contextual_parent"); + let explicit_parent = tracing::span!(Level::INFO, "explicit_parent"); + + contextual_parent.in_scope(|| { + with_explicit_parent(&explicit_parent); + }); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/ret.rs b/third_party/rust/tracing-attributes/tests/ret.rs new file mode 100644 index 000000000000..01879dfd2d06 --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/ret.rs @@ -0,0 +1,221 @@ +use std::convert::TryFrom; +use std::num::TryFromIntError; +use tracing_mock::*; + +use tracing::{subscriber::with_default, Level}; +use tracing_attributes::instrument; + +#[instrument(ret)] +fn ret() -> i32 { + 42 +} + +#[test] +fn test() { + let span = span::mock().named("ret"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields(field::mock("return").with_value(&tracing::field::debug(42))) + .at_level(Level::INFO), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, ret); + handle.assert_finished(); +} + +#[instrument(level = "warn", ret)] +fn ret_warn() -> i32 { + 42 +} + +#[test] +fn test_warn() { + let span = span::mock().named("ret_warn"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields(field::mock("return").with_value(&tracing::field::debug(42))) + .at_level(Level::WARN), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, ret_warn); + handle.assert_finished(); +} + +#[instrument(ret)] +fn ret_mut(a: &mut i32) -> i32 { + *a *= 2; + tracing::info!(?a); + *a +} + +#[test] +fn test_mut() { + let span = span::mock().named("ret_mut"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields(field::mock("a").with_value(&tracing::field::display(2))) + .at_level(Level::INFO), + ) + .event( + event::mock() + .with_fields(field::mock("return").with_value(&tracing::field::debug(2))) + .at_level(Level::INFO), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || ret_mut(&mut 1)); + handle.assert_finished(); +} + +#[instrument(ret)] +async fn ret_async() -> i32 { + 42 +} + +#[test] +fn test_async() { + let span = span::mock().named("ret_async"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields(field::mock("return").with_value(&tracing::field::debug(42))) + .at_level(Level::INFO), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || block_on_future(async { ret_async().await })); + handle.assert_finished(); +} + +#[instrument(ret)] +fn ret_impl_type() -> impl Copy { + 42 +} + +#[test] +fn test_impl_type() { + let span = span::mock().named("ret_impl_type"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields(field::mock("return").with_value(&tracing::field::debug(42))) + .at_level(Level::INFO), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, ret_impl_type); + handle.assert_finished(); +} + +#[instrument(ret(Display))] +fn ret_display() -> i32 { + 42 +} + +#[test] +fn test_dbg() { + let span = span::mock().named("ret_display"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields(field::mock("return").with_value(&tracing::field::display(42))) + .at_level(Level::INFO), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, ret_display); + handle.assert_finished(); +} + +#[instrument(err, ret)] +fn ret_and_err() -> Result { + u8::try_from(1234) +} + +#[test] +fn test_ret_and_err() { + let span = span::mock().named("ret_and_err"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields( + field::mock("error") + .with_value(&tracing::field::display(u8::try_from(1234).unwrap_err())) + .only(), + ) + .at_level(Level::ERROR), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || ret_and_err().ok()); + handle.assert_finished(); +} + +#[instrument(err, ret)] +fn ret_and_ok() -> Result { + u8::try_from(123) +} + +#[test] +fn test_ret_and_ok() { + let span = span::mock().named("ret_and_ok"); + let (subscriber, handle) = subscriber::mock() + .new_span(span.clone()) + .enter(span.clone()) + .event( + event::mock() + .with_fields( + field::mock("return") + .with_value(&tracing::field::debug(u8::try_from(123).unwrap())) + .only(), + ) + .at_level(Level::INFO), + ) + .exit(span.clone()) + .drop_span(span) + .done() + .run_with_handle(); + + with_default(subscriber, || ret_and_ok().ok()); + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-attributes/tests/targets.rs b/third_party/rust/tracing-attributes/tests/targets.rs new file mode 100644 index 000000000000..363f628f315e --- /dev/null +++ b/third_party/rust/tracing-attributes/tests/targets.rs @@ -0,0 +1,97 @@ +use tracing::subscriber::with_default; +use tracing_attributes::instrument; +use tracing_mock::*; + +#[instrument] +fn default_target() {} + +#[instrument(target = "my_target")] +fn custom_target() {} + +mod my_mod { + use tracing_attributes::instrument; + + pub const MODULE_PATH: &str = module_path!(); + + #[instrument] + pub fn default_target() {} + + #[instrument(target = "my_other_target")] + pub fn custom_target() {} +} + +#[test] +fn default_targets() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock() + .named("default_target") + .with_target(module_path!()), + ) + .enter( + span::mock() + .named("default_target") + .with_target(module_path!()), + ) + .exit( + span::mock() + .named("default_target") + .with_target(module_path!()), + ) + .new_span( + span::mock() + .named("default_target") + .with_target(my_mod::MODULE_PATH), + ) + .enter( + span::mock() + .named("default_target") + .with_target(my_mod::MODULE_PATH), + ) + .exit( + span::mock() + .named("default_target") + .with_target(my_mod::MODULE_PATH), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + default_target(); + my_mod::default_target(); + }); + + handle.assert_finished(); +} + +#[test] +fn custom_targets() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("custom_target").with_target("my_target")) + .enter(span::mock().named("custom_target").with_target("my_target")) + .exit(span::mock().named("custom_target").with_target("my_target")) + .new_span( + span::mock() + .named("custom_target") + .with_target("my_other_target"), + ) + .enter( + span::mock() + .named("custom_target") + .with_target("my_other_target"), + ) + .exit( + span::mock() + .named("custom_target") + .with_target("my_other_target"), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + custom_target(); + my_mod::custom_target(); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing-core/.cargo-checksum.json b/third_party/rust/tracing-core/.cargo-checksum.json new file mode 100644 index 000000000000..7a0658dbbfaf --- /dev/null +++ b/third_party/rust/tracing-core/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"de0381af7fa8b6205fb806084961e4bbeee7d06a0e541fb00df8030e430e285b","Cargo.toml":"5cd31945f806bdc3c1e744bf8970e4d024397f75168fc745b282a3ebcd10c95f","LICENSE":"898b1ae9821e98daf8964c8d6c7f61641f5f5aa78ad500020771c0939ee0dea1","README.md":"214340927fdcbbcfbb2ad338588aae0d39fc00e7701058ee7b41d9feffcd5f92","src/callsite.rs":"a0b7d6611d683a7aaa0658188066a6e14d0c1837613c13a863c1bfc511cb1850","src/dispatcher.rs":"89c3020f722370a967d1cdf7d34ff723ddf5126e85c5c13e04186d5933e7c463","src/event.rs":"f2673bf5d266972e567e521c9cd92fb33f28b0c7e010937e3bc2bf9eb483087f","src/field.rs":"16a8f5d6d577686bc95d5da2aee4610140aeb0bfa802d8b08fba6166551ecc0a","src/lazy_static/LICENSE":"1e2391052f82d7b0111f512680dcbecf01b06842f5295833e7bd435be2b09a9b","src/lazy_static/core_lazy.rs":"b4c1aaec440177f0de3148fd7b5b60f052890267035966069e9d6a167a72af6a","src/lazy_static/mod.rs":"31abed65708c02b36ec4c72d0788b3fc18c684274ba943dc80fc9f2347f5fb2f","src/lib.rs":"b9b139f20303bb3bc650bc59cc0909ac27edcb7ffed8cdd6e657986d49da9187","src/metadata.rs":"13d8313f653d91eb6945b8bfa482f1182c69edc4876fb30046a8fa6380f4734e","src/parent.rs":"5d5ad733343280a64a1feb6a008e186c39305ec554f14279012b8d7915821471","src/span.rs":"ab52f8fd62209636487cade93b7f827d29965cb962c1eaacd82249a650e19c19","src/spin/LICENSE":"58545fed1565e42d687aecec6897d35c6d37ccb71479a137c0deb2203e125c79","src/spin/mod.rs":"c458ce5e875acb7fbfb279f23254f4924d7c6d6fee419b740800d2e8087d1524","src/spin/mutex.rs":"4d30ff2b59b18fd7909f016e1abdf9aa0c04aa11d047a46e98cffe1319e32dad","src/spin/once.rs":"3781fd4eae0db04d80c03a039906c99b1e01d1583b29ac0144e6fbbd5a0fef0b","src/stdlib.rs":"491c9e37321798c86124c0690f7c5f29054bc444cc51406c36b6fdb106392303","src/subscriber.rs":"4d7d4e3f6536f8db3cab88b582571b618f6098b07795588ca774a4829f691d77","tests/common/mod.rs":"0bbb217baa17df0f96cc1ff57dfa74ccc5a959e7f66b15bb7d25d5f43358a278","tests/dispatch.rs":"d3f000fab43734a854c82a7783142910c5e79f806cbd3f8ec5eded598c59ddb1","tests/global_dispatch.rs":"cdc05d77e448ee8b50bfb930abafa3f19b4c6f922b7bebc7797fa1dbdaa1d398","tests/macros.rs":"b1603d888b349c8d103794deceec3b1ae4538b8d3eba805f3f561899e8ad0dd2"},"package":"f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f"} \ No newline at end of file diff --git a/third_party/rust/tracing-core/CHANGELOG.md b/third_party/rust/tracing-core/CHANGELOG.md new file mode 100644 index 000000000000..1bb9aab529c3 --- /dev/null +++ b/third_party/rust/tracing-core/CHANGELOG.md @@ -0,0 +1,397 @@ +# 0.1.26 (April 14, 2022) + +This release adds a `Value` implementation for `Box` to allow +recording boxed values more conveniently. In particular, this should improve +the ergonomics of the implementations for `dyn std::error::Error` trait objects, +including those added in [v0.1.25]. + +### Added + +- `Value` implementation for `Box where T: Value` ([#2071]) + +### Fixed + +- Broken documentation links ([#2068]) + +Thanks to new contributor @ben0x539 for contributing to this release! + + +[v0.1.25]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.25 +[#2071]: https://github.com/tokio-rs/tracing/pull/2071 +[#2068]: https://github.com/tokio-rs/tracing/pull/2068 + +# 0.1.25 (April 12, 2022) + +This release adds additional `Value` implementations for `std::error::Error` +trait objects with auto trait bounds (`Send` and `Sync`), as Rust will not +auto-coerce trait objects. Additionally, it fixes a bug when setting scoped +dispatchers that was introduced in the previous release ([v0.1.24]). + +### Added + +- `Value` implementations for `dyn Error + Send + 'static`, `dyn Error + Send + + Sync + 'static`, `dyn Error + Sync + 'static` ([#2066]) + +### Fixed + +- Failure to use the global default dispatcher if a thread has set a scoped + default prior to setting the global default, and unset the scoped default + after setting the global default ([#2065]) + +Thanks to @lilyball for contributing to this release! + +[v0.1.24]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.24 +[#2066]: https://github.com/tokio-rs/tracing/pull/2066 +[#2065]: https://github.com/tokio-rs/tracing/pull/2065 + +# 0.1.24 (April 1, 2022) + +This release fixes a bug where setting `NoSubscriber` as the local default would +not disable the global default subscriber locally. + +### Fixed + +- Setting `NoSubscriber` as the local default now correctly disables the global + default subscriber ([#2001]) +- Fixed compilation warnings with the "std" feature disabled ([#2022]) + +### Changed + +- Removed unnecessary use of `write!` and `format_args!` macros ([#1988]) + +[#1988]: https://github.com/tokio-rs/tracing/pull/1988 +[#2001]: https://github.com/tokio-rs/tracing/pull/2001 +[#2022]: https://github.com/tokio-rs/tracing/pull/2022 + +# 0.1.23 (March 8, 2022) + +### Changed + +- Removed `#[inline]` attributes from some `Dispatch` methods whose + callers are now inlined ([#1974]) +- Bumped minimum supported Rust version (MSRV) to Rust 1.49.0 ([#1913]) + +[#1913]: https://github.com/tokio-rs/tracing/pull/1913 +[#1974]: https://github.com/tokio-rs/tracing/pull/1974 + +# 0.1.22 (February 3, 2022) + +This release adds *experimental* support for recording structured field values +using the [`valuable`] crate. See [this blog post][post] for details on +`valuable`. + +Note that `valuable` support currently requires `--cfg tracing_unstable`. See +the documentation for details. + +### Added + +- **field**: Experimental support for recording field values using the + [`valuable`] crate ([#1608], [#1888], [#1887]) +- **field**: Added `ValueSet::record` method ([#1823]) +- **subscriber**: `Default` impl for `NoSubscriber` ([#1785]) +- **metadata**: New `Kind::HINT` to support the `enabled!` macro in `tracing` + ([#1883], [#1891]) +### Fixed + +- Fixed a number of documentation issues ([#1665], [#1692], [#1737]) + +Thanks to @xd009642, @Skepfyr, @guswynn, @Folyd, and @mbergkvist for +contributing to this release! + +[`valuable`]: https://crates.io/crates/valuable +[post]: https://tokio.rs/blog/2021-05-valuable +[#1608]: https://github.com/tokio-rs/tracing/pull/1608 +[#1888]: https://github.com/tokio-rs/tracing/pull/1888 +[#1887]: https://github.com/tokio-rs/tracing/pull/1887 +[#1823]: https://github.com/tokio-rs/tracing/pull/1823 +[#1785]: https://github.com/tokio-rs/tracing/pull/1785 +[#1883]: https://github.com/tokio-rs/tracing/pull/1883 +[#1891]: https://github.com/tokio-rs/tracing/pull/1891 +[#1665]: https://github.com/tokio-rs/tracing/pull/1665 +[#1692]: https://github.com/tokio-rs/tracing/pull/1692 +[#1737]: https://github.com/tokio-rs/tracing/pull/1737 + +# 0.1.21 (October 1, 2021) + +This release adds support for recording `Option where T: Value` as typed +`tracing` field values. + +### Added + +- **field**: `Value` impl for `Option where T: Value` ([#1585]) + +### Fixed + +- Fixed deprecation warnings when building with `default-features` disabled + ([#1603], [#1606]) +- Documentation fixes and improvements ([#1595], [#1601]) + +Thanks to @brianburgers, @DCjanus, and @matklad for contributing to this +release! + +[#1585]: https://github.com/tokio-rs/tracing/pull/1585 +[#1595]: https://github.com/tokio-rs/tracing/pull/1595 +[#1601]: https://github.com/tokio-rs/tracing/pull/1601 +[#1603]: https://github.com/tokio-rs/tracing/pull/1603 +[#1606]: https://github.com/tokio-rs/tracing/pull/1606 + +# 0.1.20 (September 12, 2021) + +This release adds support for `f64` as one of the `tracing-core` +primitive field values, allowing floating-point values to be recorded as +typed values rather than with `fmt::Debug`. Additionally, it adds +`NoSubscriber`, a `Subscriber` implementation that does nothing. + +### Added + +- **subscriber**: `NoSubscriber`, a no-op `Subscriber` implementation + ([#1549]) +- **field**: Added `Visit::record_f64` and support for recording + floating-point values ([#1507]) + +Thanks to new contributors @jsgf and @maxburke for contributing to this +release! + +[#1549]: https://github.com/tokio-rs/tracing/pull/1549 +[#1507]: https://github.com/tokio-rs/tracing/pull/1507 + +# 0.1.19 (August 17, 2021) +### Added + +- `Level::as_str` ([#1413]) +- `Hash` implementation for `Level` and `LevelFilter` ([#1456]) +- `Value` implementation for `&mut T where T: Value` ([#1385]) +- Multiple documentation fixes and improvements ([#1435], [#1446]) + +Thanks to @Folyd, @teozkr, and @dvdplm for contributing to this release! + +[#1413]: https://github.com/tokio-rs/tracing/pull/1413 +[#1456]: https://github.com/tokio-rs/tracing/pull/1456 +[#1385]: https://github.com/tokio-rs/tracing/pull/1385 +[#1435]: https://github.com/tokio-rs/tracing/pull/1435 +[#1446]: https://github.com/tokio-rs/tracing/pull/1446 + +# 0.1.18 (April 30, 2021) + +### Added + +- `Subscriber` impl for `Box` ([#1358]) +- `Subscriber` impl for `Arc` ([#1374]) +- Symmetric `From` impls for existing `Into` impls on `Current` and `Option` + ([#1335]) +- `Attributes::fields` accessor that returns the set of fields defined on a + span's `Attributes` ([#1331]) + + +Thanks to @Folyd for contributing to this release! + +[#1358]: https://github.com/tokio-rs/tracing/pull/1358 +[#1374]: https://github.com/tokio-rs/tracing/pull/1374 +[#1335]: https://github.com/tokio-rs/tracing/pull/1335 +[#1331]: https://github.com/tokio-rs/tracing/pull/1331 + +# 0.1.17 (September 28, 2020) + +### Fixed + +- Incorrect inlining of `Event::dispatch` and `Event::child_of`, which could + result in `dispatcher::get_default` being inlined at the callsite ([#994]) + +### Added + +- `Copy` implementations for `Level` and `LevelFilter` ([#992]) + +Thanks to new contributors @jyn514 and @TaKO8Ki for contributing to this +release! + +[#994]: https://github.com/tokio-rs/tracing/pull/994 +[#992]: https://github.com/tokio-rs/tracing/pull/992 + +# 0.1.16 (September 8, 2020) + +### Fixed + +- Added a conversion from `Option` to `LevelFilter`. This resolves a + previously unreported regression where `Option` was no longer + a valid LevelFilter. ([#966](https://github.com/tokio-rs/tracing/pull/966)) + +# 0.1.15 (August 22, 2020) + +### Fixed + +- When combining `Interest` from multiple subscribers, if the interests differ, + the current subscriber is now always asked if a callsite should be enabled + (#927) + +## Added + +- Internal API changes to support optimizations in the `tracing` crate (#943) +- **docs**: Multiple fixes and improvements (#913, #941) + +# 0.1.14 (August 10, 2020) + +### Fixed + +- Incorrect calculation of global max level filter which could result in fast + filtering paths not being taken (#908) + +# 0.1.13 (August 4, 2020) + +### Fixed + +- Missing `fmt::Display` impl for `field::DisplayValue` causing a compilation + failure when the "log" feature is enabled (#887) + +Thanks to @d-e-s-o for contributing to this release! + +# 0.1.12 (July 31, 2020) + +### Added + +- `LevelFilter` type and `LevelFilter::current()` for returning the highest level + that any subscriber will enable (#853) +- `Subscriber::max_level_hint` optional trait method, for setting the value + returned by `LevelFilter::current()` (#853) + +### Fixed + +- **docs**: Removed outdated reference to a Tokio API that no longer exists + (#857) + +Thanks to new contributor @dignati for contributing to this release! + +# 0.1.11 (June 8, 2020) + +### Changed + +- Replaced use of `inner_local_macros` with `$crate::` (#729) + +### Added + +- `must_use` warning to guards returned by `dispatcher::set_default` (#686) +- `fmt::Debug` impl to `dyn Value`s (#696) +- Functions to convert between `span::Id` and `NonZeroU64` (#770) +- More obvious warnings in documentation (#769) + +### Fixed + +- Compiler error when `tracing-core/std` feature is enabled but `tracing/std` is + not (#760) +- Clippy warning on vtable address comparison in `callsite::Identifier` (#749) +- Documentation formatting issues (#715, #771) + +Thanks to @bkchr, @majecty, @taiki-e, @nagisa, and @nvzqz for contributing to +this release! + +# 0.1.10 (January 24, 2020) + +### Added + +- `field::Empty` type for declaring empty fields whose values will be recorded + later (#548) +- `field::Value` implementations for `Wrapping` and `NonZero*` numbers (#538) + +### Fixed + +- Broken and unresolvable links in RustDoc (#595) + +Thanks to @oli-cosmian for contributing to this release! + +# 0.1.9 (January 10, 2020) + +### Added + +- API docs now show what feature flags are required to enable each item (#523) + +### Fixed + +- A panic when the current default subscriber subscriber calls + `dispatcher::with_default` as it is being dropped (#522) +- Incorrect documentation for `Subscriber::drop_span` (#524) + +# 0.1.8 (December 20, 2019) + +### Added + +- `Default` impl for `Dispatch` (#411) + +### Fixed + +- Removed duplicate `lazy_static` dependencies (#424) +- Fixed no-std dependencies being enabled even when `std` feature flag is set + (#424) +- Broken link to `Metadata` in `Event` docs (#461) + +# 0.1.7 (October 18, 2019) + +### Added + +- Added `dispatcher::set_default` API which returns a drop guard (#388) + +### Fixed + +- Added missing `Value` impl for `u8` (#392) +- Broken links in docs. + +# 0.1.6 (September 12, 2019) + +### Added + +- Internal APIs to support performance optimizations (#326) + +### Fixed + +- Clarified wording in `field::display` documentation (#340) + +# 0.1.5 (August 16, 2019) + +### Added + +- `std::error::Error` as a new primitive `Value` type (#277) +- `Event::new` and `Event::new_child_of` to manually construct `Event`s (#281) + +# 0.1.4 (August 9, 2019) + +### Added + +- Support for `no-std` + `liballoc` (#256) + +### Fixed + +- Broken links in RustDoc (#259) + +# 0.1.3 (August 8, 2019) + +### Added + +- `std::fmt::Display` implementation for `Level` (#194) +- `std::str::FromStr` implementation for `Level` (#195) + +# 0.1.2 (July 10, 2019) + +### Deprecated + +- `Subscriber::drop_span` in favor of new `Subscriber::try_close` (#168) + +### Added + +- `Into>`, `Into>`, and + `Into>>` impls for `span::Current` (#170) +- `Subscriber::try_close` method (#153) +- Improved documentation for `dispatcher` (#171) + +# 0.1.1 (July 6, 2019) + +### Added + +- `Subscriber::current_span` API to return the current span (#148). +- `span::Current` type, representing the `Subscriber`'s view of the current + span (#148). + +### Fixed + +- Typos and broken links in documentation (#123, #124, #128, #154) + +# 0.1.0 (June 27, 2019) + +- Initial release diff --git a/third_party/rust/tracing-core/Cargo.toml b/third_party/rust/tracing-core/Cargo.toml new file mode 100644 index 000000000000..94c738728cf3 --- /dev/null +++ b/third_party/rust/tracing-core/Cargo.toml @@ -0,0 +1,41 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.49.0" +name = "tracing-core" +version = "0.1.26" +authors = ["Tokio Contributors "] +description = "Core primitives for application-level tracing.\n" +homepage = "https://tokio.rs" +readme = "README.md" +keywords = ["logging", "tracing", "profiling"] +categories = ["development-tools::debugging", "development-tools::profiling", "asynchronous"] +license = "MIT" +repository = "https://github.com/tokio-rs/tracing" +[package.metadata.docs.rs] +all-features = true +rustc-args = ["--cfg", "tracing_unstable"] +rustdoc-args = ["--cfg", "docsrs", "--cfg", "tracing_unstable"] +[dependencies.lazy_static] +version = "1.0.2" +optional = true + +[features] +default = ["std", "valuable/std"] +std = ["lazy_static"] +[target."cfg(tracing_unstable)".dependencies.valuable] +version = "0.1.0" +optional = true +default_features = false +[badges.maintenance] +status = "actively-developed" diff --git a/third_party/rust/tracing-core/LICENSE b/third_party/rust/tracing-core/LICENSE new file mode 100644 index 000000000000..cdb28b4b56a4 --- /dev/null +++ b/third_party/rust/tracing-core/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tracing-core/README.md b/third_party/rust/tracing-core/README.md new file mode 100644 index 000000000000..d841b15a0b62 --- /dev/null +++ b/third_party/rust/tracing-core/README.md @@ -0,0 +1,121 @@ +![Tracing — Structured, application-level diagnostics][splash] + +[splash]: https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/splash.svg + +# tracing-core + +Core primitives for application-level tracing. + +[![Crates.io][crates-badge]][crates-url] +[![Documentation][docs-badge]][docs-url] +[![Documentation (master)][docs-master-badge]][docs-master-url] +[![MIT licensed][mit-badge]][mit-url] +[![Build Status][actions-badge]][actions-url] +[![Discord chat][discord-badge]][discord-url] + +[Documentation][docs-url] | [Chat][discord-url] + +[crates-badge]: https://img.shields.io/crates/v/tracing-core.svg +[crates-url]: https://crates.io/crates/tracing-core/0.1.26 +[docs-badge]: https://docs.rs/tracing-core/badge.svg +[docs-url]: https://docs.rs/tracing-core/0.1.26 +[docs-master-badge]: https://img.shields.io/badge/docs-master-blue +[docs-master-url]: https://tracing-rs.netlify.com/tracing_core +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: LICENSE +[actions-badge]: https://github.com/tokio-rs/tracing/workflows/CI/badge.svg +[actions-url]:https://github.com/tokio-rs/tracing/actions?query=workflow%3ACI +[discord-badge]: https://img.shields.io/discord/500028886025895936?logo=discord&label=discord&logoColor=white +[discord-url]: https://discord.gg/EeF3cQw + +## Overview + +[`tracing`] is a framework for instrumenting Rust programs to collect +structured, event-based diagnostic information. This crate defines the core +primitives of `tracing`. + +The crate provides: + +* [`span::Id`] identifies a span within the execution of a program. + +* [`Event`] represents a single event within a trace. + +* [`Subscriber`], the trait implemented to collect trace data. + +* [`Metadata`] and [`Callsite`] provide information describing spans and + events. + +* [`Field`], [`FieldSet`], [`Value`], and [`ValueSet`] represent the + structured data attached to spans and events. + +* [`Dispatch`] allows spans and events to be dispatched to `Subscriber`s. + +In addition, it defines the global callsite registry and per-thread current +dispatcher which other components of the tracing system rely on. + +*Compiler support: [requires `rustc` 1.49+][msrv]* + +[msrv]: #supported-rust-versions + +## Usage + +Application authors will typically not use this crate directly. Instead, they +will use the [`tracing`] crate, which provides a much more fully-featured +API. However, this crate's API will change very infrequently, so it may be used +when dependencies must be very stable. + +`Subscriber` implementations may depend on `tracing-core` rather than `tracing`, +as the additional APIs provided by `tracing` are primarily useful for +instrumenting libraries and applications, and are generally not necessary for +`Subscriber` implementations. + +### Crate Feature Flags + +The following crate feature flags are available: + +* `std`: Depend on the Rust standard library (enabled by default). + + `no_std` users may disable this feature with `default-features = false`: + + ```toml + [dependencies] + tracing-core = { version = "0.1.26", default-features = false } + ``` + + **Note**:`tracing-core`'s `no_std` support requires `liballoc`. + +[`tracing`]: ../tracing +[`span::Id`]: https://docs.rs/tracing-core/0.1.26/tracing_core/span/struct.Id.html +[`Event`]: https://docs.rs/tracing-core/0.1.26/tracing_core/event/struct.Event.html +[`Subscriber`]: https://docs.rs/tracing-core/0.1.26/tracing_core/subscriber/trait.Subscriber.html +[`Metadata`]: https://docs.rs/tracing-core/0.1.26/tracing_core/metadata/struct.Metadata.html +[`Callsite`]: https://docs.rs/tracing-core/0.1.26/tracing_core/callsite/trait.Callsite.html +[`Field`]: https://docs.rs/tracing-core/0.1.26/tracing_core/field/struct.Field.html +[`FieldSet`]: https://docs.rs/tracing-core/0.1.26/tracing_core/field/struct.FieldSet.html +[`Value`]: https://docs.rs/tracing-core/0.1.26/tracing_core/field/trait.Value.html +[`ValueSet`]: https://docs.rs/tracing-core/0.1.26/tracing_core/field/struct.ValueSet.html +[`Dispatch`]: https://docs.rs/tracing-core/0.1.26/tracing_core/dispatcher/struct.Dispatch.html + +## Supported Rust Versions + +Tracing is built against the latest stable release. The minimum supported +version is 1.49. The current Tracing version is not guaranteed to build on Rust +versions earlier than the minimum supported version. + +Tracing follows the same compiler support policies as the rest of the Tokio +project. The current stable Rust compiler and the three most recent minor +versions before it will always be supported. For example, if the current stable +compiler version is 1.45, the minimum supported version will not be increased +past 1.42, three minor versions prior. Increasing the minimum supported compiler +version is not considered a semver breaking change as long as doing so complies +with this policy. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tracing-core/src/callsite.rs b/third_party/rust/tracing-core/src/callsite.rs new file mode 100644 index 000000000000..38ff14a2f08d --- /dev/null +++ b/third_party/rust/tracing-core/src/callsite.rs @@ -0,0 +1,173 @@ +//! Callsites represent the source locations from which spans or events +//! originate. +use crate::stdlib::{ + fmt, + hash::{Hash, Hasher}, + sync::Mutex, + vec::Vec, +}; +use crate::{ + dispatcher::{self, Dispatch}, + metadata::{LevelFilter, Metadata}, + subscriber::Interest, +}; + +crate::lazy_static! { + static ref REGISTRY: Mutex = Mutex::new(Registry { + callsites: Vec::new(), + dispatchers: Vec::new(), + }); +} + +struct Registry { + callsites: Vec<&'static dyn Callsite>, + dispatchers: Vec, +} + +impl Registry { + fn rebuild_callsite_interest(&self, callsite: &'static dyn Callsite) { + let meta = callsite.metadata(); + + // Iterate over the subscribers in the registry, and — if they are + // active — register the callsite with them. + let mut interests = self + .dispatchers + .iter() + .filter_map(|registrar| registrar.try_register(meta)); + + // Use the first subscriber's `Interest` as the base value. + let interest = if let Some(interest) = interests.next() { + // Combine all remaining `Interest`s. + interests.fold(interest, Interest::and) + } else { + // If nobody was interested in this thing, just return `never`. + Interest::never() + }; + + callsite.set_interest(interest) + } + + fn rebuild_interest(&mut self) { + let mut max_level = LevelFilter::OFF; + self.dispatchers.retain(|registrar| { + if let Some(dispatch) = registrar.upgrade() { + // If the subscriber did not provide a max level hint, assume + // that it may enable every level. + let level_hint = dispatch.max_level_hint().unwrap_or(LevelFilter::TRACE); + if level_hint > max_level { + max_level = level_hint; + } + true + } else { + false + } + }); + + self.callsites.iter().for_each(|&callsite| { + self.rebuild_callsite_interest(callsite); + }); + LevelFilter::set_max(max_level); + } +} + +/// Trait implemented by callsites. +/// +/// These functions are only intended to be called by the callsite registry, which +/// correctly handles determining the common interest between all subscribers. +pub trait Callsite: Sync { + /// Sets the [`Interest`] for this callsite. + /// + /// [`Interest`]: super::subscriber::Interest + fn set_interest(&self, interest: Interest); + + /// Returns the [metadata] associated with the callsite. + /// + /// [metadata]: super::metadata::Metadata + fn metadata(&self) -> &Metadata<'_>; +} + +/// Uniquely identifies a [`Callsite`] +/// +/// Two `Identifier`s are equal if they both refer to the same callsite. +/// +/// [`Callsite`]: super::callsite::Callsite +#[derive(Clone)] +pub struct Identifier( + /// **Warning**: The fields on this type are currently `pub` because it must + /// be able to be constructed statically by macros. However, when `const + /// fn`s are available on stable Rust, this will no longer be necessary. + /// Thus, these fields are *not* considered stable public API, and they may + /// change warning. Do not rely on any fields on `Identifier`. When + /// constructing new `Identifier`s, use the `identify_callsite!` macro + /// instead. + #[doc(hidden)] + pub &'static dyn Callsite, +); + +/// Clear and reregister interest on every [`Callsite`] +/// +/// This function is intended for runtime reconfiguration of filters on traces +/// when the filter recalculation is much less frequent than trace events are. +/// The alternative is to have the [`Subscriber`] that supports runtime +/// reconfiguration of filters always return [`Interest::sometimes()`] so that +/// [`enabled`] is evaluated for every event. +/// +/// This function will also re-compute the global maximum level as determined by +/// the [`max_level_hint`] method. If a [`Subscriber`] +/// implementation changes the value returned by its `max_level_hint` +/// implementation at runtime, then it **must** call this function after that +/// value changes, in order for the change to be reflected. +/// +/// [`max_level_hint`]: super::subscriber::Subscriber::max_level_hint +/// [`Callsite`]: super::callsite::Callsite +/// [`enabled`]: super::subscriber::Subscriber#tymethod.enabled +/// [`Interest::sometimes()`]: super::subscriber::Interest::sometimes +/// [`Subscriber`]: super::subscriber::Subscriber +pub fn rebuild_interest_cache() { + let mut registry = REGISTRY.lock().unwrap(); + registry.rebuild_interest(); +} + +/// Register a new `Callsite` with the global registry. +/// +/// This should be called once per callsite after the callsite has been +/// constructed. +pub fn register(callsite: &'static dyn Callsite) { + let mut registry = REGISTRY.lock().unwrap(); + registry.rebuild_callsite_interest(callsite); + registry.callsites.push(callsite); +} + +pub(crate) fn register_dispatch(dispatch: &Dispatch) { + let mut registry = REGISTRY.lock().unwrap(); + registry.dispatchers.push(dispatch.registrar()); + registry.rebuild_interest(); +} + +// ===== impl Identifier ===== + +impl PartialEq for Identifier { + fn eq(&self, other: &Identifier) -> bool { + core::ptr::eq( + self.0 as *const _ as *const (), + other.0 as *const _ as *const (), + ) + } +} + +impl Eq for Identifier {} + +impl fmt::Debug for Identifier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Identifier({:p})", self.0) + } +} + +impl Hash for Identifier { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + (self.0 as *const dyn Callsite).hash(state) + } +} diff --git a/third_party/rust/tracing-core/src/dispatcher.rs b/third_party/rust/tracing-core/src/dispatcher.rs new file mode 100644 index 000000000000..1d0c22f9a222 --- /dev/null +++ b/third_party/rust/tracing-core/src/dispatcher.rs @@ -0,0 +1,905 @@ +//! Dispatches trace events to [`Subscriber`]s. +//! +//! The _dispatcher_ is the component of the tracing system which is responsible +//! for forwarding trace data from the instrumentation points that generate it +//! to the subscriber that collects it. +//! +//! # Using the Trace Dispatcher +//! +//! Every thread in a program using `tracing` has a _default subscriber_. When +//! events occur, or spans are created, they are dispatched to the thread's +//! current subscriber. +//! +//! ## Setting the Default Subscriber +//! +//! By default, the current subscriber is an empty implementation that does +//! nothing. To use a subscriber implementation, it must be set as the default. +//! There are two methods for doing so: [`with_default`] and +//! [`set_global_default`]. `with_default` sets the default subscriber for the +//! duration of a scope, while `set_global_default` sets a default subscriber +//! for the entire process. +//! +//! To use either of these functions, we must first wrap our subscriber in a +//! [`Dispatch`], a cloneable, type-erased reference to a subscriber. For +//! example: +//! ```rust +//! # pub struct FooSubscriber; +//! # use tracing_core::{ +//! # dispatcher, Event, Metadata, +//! # span::{Attributes, Id, Record} +//! # }; +//! # impl tracing_core::Subscriber for FooSubscriber { +//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } +//! # fn record(&self, _: &Id, _: &Record) {} +//! # fn event(&self, _: &Event) {} +//! # fn record_follows_from(&self, _: &Id, _: &Id) {} +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn enter(&self, _: &Id) {} +//! # fn exit(&self, _: &Id) {} +//! # } +//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } +//! use dispatcher::Dispatch; +//! +//! let my_subscriber = FooSubscriber::new(); +//! let my_dispatch = Dispatch::new(my_subscriber); +//! ``` +//! Then, we can use [`with_default`] to set our `Dispatch` as the default for +//! the duration of a block: +//! ```rust +//! # pub struct FooSubscriber; +//! # use tracing_core::{ +//! # dispatcher, Event, Metadata, +//! # span::{Attributes, Id, Record} +//! # }; +//! # impl tracing_core::Subscriber for FooSubscriber { +//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } +//! # fn record(&self, _: &Id, _: &Record) {} +//! # fn event(&self, _: &Event) {} +//! # fn record_follows_from(&self, _: &Id, _: &Id) {} +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn enter(&self, _: &Id) {} +//! # fn exit(&self, _: &Id) {} +//! # } +//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } +//! # let my_subscriber = FooSubscriber::new(); +//! # let my_dispatch = dispatcher::Dispatch::new(my_subscriber); +//! // no default subscriber +//! +//! # #[cfg(feature = "std")] +//! dispatcher::with_default(&my_dispatch, || { +//! // my_subscriber is the default +//! }); +//! +//! // no default subscriber again +//! ``` +//! It's important to note that `with_default` will not propagate the current +//! thread's default subscriber to any threads spawned within the `with_default` +//! block. To propagate the default subscriber to new threads, either use +//! `with_default` from the new thread, or use `set_global_default`. +//! +//! As an alternative to `with_default`, we can use [`set_global_default`] to +//! set a `Dispatch` as the default for all threads, for the lifetime of the +//! program. For example: +//! ```rust +//! # pub struct FooSubscriber; +//! # use tracing_core::{ +//! # dispatcher, Event, Metadata, +//! # span::{Attributes, Id, Record} +//! # }; +//! # impl tracing_core::Subscriber for FooSubscriber { +//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } +//! # fn record(&self, _: &Id, _: &Record) {} +//! # fn event(&self, _: &Event) {} +//! # fn record_follows_from(&self, _: &Id, _: &Id) {} +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn enter(&self, _: &Id) {} +//! # fn exit(&self, _: &Id) {} +//! # } +//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } +//! # let my_subscriber = FooSubscriber::new(); +//! # let my_dispatch = dispatcher::Dispatch::new(my_subscriber); +//! // no default subscriber +//! +//! dispatcher::set_global_default(my_dispatch) +//! // `set_global_default` will return an error if the global default +//! // subscriber has already been set. +//! .expect("global default was already set!"); +//! +//! // `my_subscriber` is now the default +//! ``` +//! +//!

+//! +//! ## Accessing the Default Subscriber +//! +//! A thread's current default subscriber can be accessed using the +//! [`get_default`] function, which executes a closure with a reference to the +//! currently default `Dispatch`. This is used primarily by `tracing` +//! instrumentation. +//! +//! [`Subscriber`]: Subscriber +//! [`with_default`]: with_default +//! [`set_global_default`]: set_global_default +//! [`get_default`]: get_default +//! [`Dispatch`]: Dispatch +use crate::{ + callsite, span, + subscriber::{self, NoSubscriber, Subscriber}, + Event, LevelFilter, Metadata, +}; + +use crate::stdlib::{ + any::Any, + fmt, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, Weak, + }, +}; + +#[cfg(feature = "std")] +use crate::stdlib::{ + cell::{Cell, RefCell, RefMut}, + error, +}; + +/// `Dispatch` trace data to a [`Subscriber`]. +/// +/// [`Subscriber`]: Subscriber +#[derive(Clone)] +pub struct Dispatch { + subscriber: Arc, +} + +#[cfg(feature = "std")] +thread_local! { + static CURRENT_STATE: State = State { + default: RefCell::new(None), + can_enter: Cell::new(true), + }; +} + +static EXISTS: AtomicBool = AtomicBool::new(false); +static GLOBAL_INIT: AtomicUsize = AtomicUsize::new(UNINITIALIZED); + +const UNINITIALIZED: usize = 0; +const INITIALIZING: usize = 1; +const INITIALIZED: usize = 2; + +static mut GLOBAL_DISPATCH: Option = None; + +/// The dispatch state of a thread. +#[cfg(feature = "std")] +struct State { + /// This thread's current default dispatcher. + default: RefCell>, + /// Whether or not we can currently begin dispatching a trace event. + /// + /// This is set to `false` when functions such as `enter`, `exit`, `event`, + /// and `new_span` are called on this thread's default dispatcher, to + /// prevent further trace events triggered inside those functions from + /// creating an infinite recursion. When we finish handling a dispatch, this + /// is set back to `true`. + can_enter: Cell, +} + +/// While this guard is active, additional calls to subscriber functions on +/// the default dispatcher will not be able to access the dispatch context. +/// Dropping the guard will allow the dispatch context to be re-entered. +#[cfg(feature = "std")] +struct Entered<'a>(&'a State); + +/// A guard that resets the current default dispatcher to the prior +/// default dispatcher when dropped. +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[derive(Debug)] +pub struct DefaultGuard(Option); + +/// Sets this dispatch as the default for the duration of a closure. +/// +/// The default dispatcher is used when creating a new [span] or +/// [`Event`]. +/// +///
+///     Note: This function required the Rust standard library.
+///     no_std users should use 
+///     set_global_default instead.
+/// 
+/// +/// [span]: super::span +/// [`Subscriber`]: super::subscriber::Subscriber +/// [`Event`]: super::event::Event +/// [`set_global_default`]: super::set_global_default +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub fn with_default(dispatcher: &Dispatch, f: impl FnOnce() -> T) -> T { + // When this guard is dropped, the default dispatcher will be reset to the + // prior default. Using this (rather than simply resetting after calling + // `f`) ensures that we always reset to the prior dispatcher even if `f` + // panics. + let _guard = set_default(dispatcher); + f() +} + +/// Sets the dispatch as the default dispatch for the duration of the lifetime +/// of the returned DefaultGuard +/// +///
+//!     Note:the thread-local scoped dispatcher
+//!     (with_default) requires the
+//!     Rust standard library. no_std users should use
+//!     set_global_default
+//!     instead.
+//! 
+/// +/// [`set_global_default`]: super::set_global_default +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[must_use = "Dropping the guard unregisters the dispatcher."] +pub fn set_default(dispatcher: &Dispatch) -> DefaultGuard { + // When this guard is dropped, the default dispatcher will be reset to the + // prior default. Using this ensures that we always reset to the prior + // dispatcher even if the thread calling this function panics. + State::set_default(dispatcher.clone()) +} + +/// Sets this dispatch as the global default for the duration of the entire program. +/// Will be used as a fallback if no thread-local dispatch has been set in a thread +/// (using `with_default`.) +/// +/// Can only be set once; subsequent attempts to set the global default will fail. +/// Returns `Err` if the global default has already been set. +/// +///
+///     Warning: In general, libraries should not call
+///     set_global_default()! Doing so will cause conflicts when
+///     executables that depend on the library try to set the default later.
+/// 
+/// +/// [span]: super::span +/// [`Subscriber`]: super::subscriber::Subscriber +/// [`Event`]: super::event::Event +pub fn set_global_default(dispatcher: Dispatch) -> Result<(), SetGlobalDefaultError> { + // if `compare_exchange` returns Result::Ok(_), then `new` has been set and + // `current`—now the prior value—has been returned in the `Ok()` branch. + if GLOBAL_INIT + .compare_exchange( + UNINITIALIZED, + INITIALIZING, + Ordering::SeqCst, + Ordering::SeqCst, + ) + .is_ok() + { + unsafe { + GLOBAL_DISPATCH = Some(dispatcher); + } + GLOBAL_INIT.store(INITIALIZED, Ordering::SeqCst); + EXISTS.store(true, Ordering::Release); + Ok(()) + } else { + Err(SetGlobalDefaultError { _no_construct: () }) + } +} + +/// Returns true if a `tracing` dispatcher has ever been set. +/// +/// This may be used to completely elide trace points if tracing is not in use +/// at all or has yet to be initialized. +#[doc(hidden)] +#[inline(always)] +pub fn has_been_set() -> bool { + EXISTS.load(Ordering::Relaxed) +} + +/// Returned if setting the global dispatcher fails. +#[derive(Debug)] +pub struct SetGlobalDefaultError { + _no_construct: (), +} + +impl fmt::Display for SetGlobalDefaultError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("a global default trace dispatcher has already been set") + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl error::Error for SetGlobalDefaultError {} + +/// Executes a closure with a reference to this thread's current [dispatcher]. +/// +/// Note that calls to `get_default` should not be nested; if this function is +/// called while inside of another `get_default`, that closure will be provided +/// with `Dispatch::none` rather than the previously set dispatcher. +/// +/// [dispatcher]: super::dispatcher::Dispatch +#[cfg(feature = "std")] +pub fn get_default(mut f: F) -> T +where + F: FnMut(&Dispatch) -> T, +{ + CURRENT_STATE + .try_with(|state| { + if let Some(entered) = state.enter() { + return f(&*entered.current()); + } + + f(&Dispatch::none()) + }) + .unwrap_or_else(|_| f(&Dispatch::none())) +} + +/// Executes a closure with a reference to this thread's current [dispatcher]. +/// +/// Note that calls to `get_default` should not be nested; if this function is +/// called while inside of another `get_default`, that closure will be provided +/// with `Dispatch::none` rather than the previously set dispatcher. +/// +/// [dispatcher]: super::dispatcher::Dispatch +#[cfg(feature = "std")] +#[doc(hidden)] +#[inline(never)] +pub fn get_current(f: impl FnOnce(&Dispatch) -> T) -> Option { + CURRENT_STATE + .try_with(|state| { + let entered = state.enter()?; + Some(f(&*entered.current())) + }) + .ok()? +} + +/// Executes a closure with a reference to the current [dispatcher]. +/// +/// [dispatcher]: super::dispatcher::Dispatch +#[cfg(not(feature = "std"))] +#[doc(hidden)] +pub fn get_current(f: impl FnOnce(&Dispatch) -> T) -> Option { + let dispatch = get_global()?; + Some(f(&dispatch)) +} + +/// Executes a closure with a reference to the current [dispatcher]. +/// +/// [dispatcher]: super::dispatcher::Dispatch +#[cfg(not(feature = "std"))] +pub fn get_default(mut f: F) -> T +where + F: FnMut(&Dispatch) -> T, +{ + if let Some(d) = get_global() { + f(d) + } else { + f(&Dispatch::none()) + } +} + +fn get_global() -> Option<&'static Dispatch> { + if GLOBAL_INIT.load(Ordering::SeqCst) != INITIALIZED { + return None; + } + unsafe { + // This is safe given the invariant that setting the global dispatcher + // also sets `GLOBAL_INIT` to `INITIALIZED`. + Some(GLOBAL_DISPATCH.as_ref().expect( + "invariant violated: GLOBAL_DISPATCH must be initialized before GLOBAL_INIT is set", + )) + } +} + +pub(crate) struct Registrar(Weak); + +impl Dispatch { + /// Returns a new `Dispatch` that discards events and spans. + #[inline] + pub fn none() -> Self { + Dispatch { + subscriber: Arc::new(NoSubscriber::default()), + } + } + + /// Returns a `Dispatch` that forwards to the given [`Subscriber`]. + /// + /// [`Subscriber`]: super::subscriber::Subscriber + pub fn new(subscriber: S) -> Self + where + S: Subscriber + Send + Sync + 'static, + { + let me = Dispatch { + subscriber: Arc::new(subscriber), + }; + callsite::register_dispatch(&me); + me + } + + pub(crate) fn registrar(&self) -> Registrar { + Registrar(Arc::downgrade(&self.subscriber)) + } + + /// Registers a new callsite with this subscriber, returning whether or not + /// the subscriber is interested in being notified about the callsite. + /// + /// This calls the [`register_callsite`] function on the [`Subscriber`] + /// that this `Dispatch` forwards to. + /// + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`register_callsite`]: super::subscriber::Subscriber::register_callsite + #[inline] + pub fn register_callsite(&self, metadata: &'static Metadata<'static>) -> subscriber::Interest { + self.subscriber.register_callsite(metadata) + } + + /// Returns the highest [verbosity level][level] that this [`Subscriber`] will + /// enable, or `None`, if the subscriber does not implement level-based + /// filtering or chooses not to implement this method. + /// + /// This calls the [`max_level_hint`] function on the [`Subscriber`] + /// that this `Dispatch` forwards to. + /// + /// [level]: super::Level + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`register_callsite`]: super::subscriber::Subscriber::max_level_hint + // TODO(eliza): consider making this a public API? + #[inline] + pub(crate) fn max_level_hint(&self) -> Option { + self.subscriber.max_level_hint() + } + + /// Record the construction of a new span, returning a new [ID] for the + /// span being constructed. + /// + /// This calls the [`new_span`] function on the [`Subscriber`] that this + /// `Dispatch` forwards to. + /// + /// [ID]: super::span::Id + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`new_span`]: super::subscriber::Subscriber::new_span + #[inline] + pub fn new_span(&self, span: &span::Attributes<'_>) -> span::Id { + self.subscriber.new_span(span) + } + + /// Record a set of values on a span. + /// + /// This calls the [`record`] function on the [`Subscriber`] that this + /// `Dispatch` forwards to. + /// + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`record`]: super::subscriber::Subscriber::record + #[inline] + pub fn record(&self, span: &span::Id, values: &span::Record<'_>) { + self.subscriber.record(span, values) + } + + /// Adds an indication that `span` follows from the span with the id + /// `follows`. + /// + /// This calls the [`record_follows_from`] function on the [`Subscriber`] + /// that this `Dispatch` forwards to. + /// + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`record_follows_from`]: super::subscriber::Subscriber::record_follows_from + #[inline] + pub fn record_follows_from(&self, span: &span::Id, follows: &span::Id) { + self.subscriber.record_follows_from(span, follows) + } + + /// Returns true if a span with the specified [metadata] would be + /// recorded. + /// + /// This calls the [`enabled`] function on the [`Subscriber`] that this + /// `Dispatch` forwards to. + /// + /// [metadata]: super::metadata::Metadata + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`enabled`]: super::subscriber::Subscriber::enabled + #[inline] + pub fn enabled(&self, metadata: &Metadata<'_>) -> bool { + self.subscriber.enabled(metadata) + } + + /// Records that an [`Event`] has occurred. + /// + /// This calls the [`event`] function on the [`Subscriber`] that this + /// `Dispatch` forwards to. + /// + /// [`Event`]: super::event::Event + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`event`]: super::subscriber::Subscriber::event + #[inline] + pub fn event(&self, event: &Event<'_>) { + self.subscriber.event(event) + } + + /// Records that a span has been can_enter. + /// + /// This calls the [`enter`] function on the [`Subscriber`] that this + /// `Dispatch` forwards to. + /// + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`enter`]: super::subscriber::Subscriber::enter + pub fn enter(&self, span: &span::Id) { + self.subscriber.enter(span); + } + + /// Records that a span has been exited. + /// + /// This calls the [`exit`] function on the [`Subscriber`] that this + /// `Dispatch` forwards to. + /// + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`exit`]: super::subscriber::Subscriber::exit + pub fn exit(&self, span: &span::Id) { + self.subscriber.exit(span); + } + + /// Notifies the subscriber that a [span ID] has been cloned. + /// + /// This function must only be called with span IDs that were returned by + /// this `Dispatch`'s [`new_span`] function. The `tracing` crate upholds + /// this guarantee and any other libraries implementing instrumentation APIs + /// must as well. + /// + /// This calls the [`clone_span`] function on the `Subscriber` that this + /// `Dispatch` forwards to. + /// + /// [span ID]: super::span::Id + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`clone_span`]: super::subscriber::Subscriber::clone_span + /// [`new_span`]: super::subscriber::Subscriber::new_span + #[inline] + pub fn clone_span(&self, id: &span::Id) -> span::Id { + self.subscriber.clone_span(id) + } + + /// Notifies the subscriber that a [span ID] has been dropped. + /// + /// This function must only be called with span IDs that were returned by + /// this `Dispatch`'s [`new_span`] function. The `tracing` crate upholds + /// this guarantee and any other libraries implementing instrumentation APIs + /// must as well. + /// + /// This calls the [`drop_span`] function on the [`Subscriber`] that this + /// `Dispatch` forwards to. + /// + ///
+    ///     Deprecated: The 
+    ///     try_close method is functionally identical, but returns
+    ///     true if the span is now closed. It should be used
+    ///     instead of this method.
+    /// 
+ /// + /// [span ID]: super::span::Id + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`drop_span`]: super::subscriber::Subscriber::drop_span + /// [`new_span`]: super::subscriber::Subscriber::new_span + /// [`try_close`]: #method.try_close + #[inline] + #[deprecated(since = "0.1.2", note = "use `Dispatch::try_close` instead")] + pub fn drop_span(&self, id: span::Id) { + #[allow(deprecated)] + self.subscriber.drop_span(id); + } + + /// Notifies the subscriber that a [span ID] has been dropped, and returns + /// `true` if there are now 0 IDs referring to that span. + /// + /// This function must only be called with span IDs that were returned by + /// this `Dispatch`'s [`new_span`] function. The `tracing` crate upholds + /// this guarantee and any other libraries implementing instrumentation APIs + /// must as well. + /// + /// This calls the [`try_close`] function on the [`Subscriber`] that this + /// `Dispatch` forwards to. + /// + /// [span ID]: super::span::Id + /// [`Subscriber`]: super::subscriber::Subscriber + /// [`try_close`]: super::subscriber::Subscriber::try_close + /// [`new_span`]: super::subscriber::Subscriber::new_span + pub fn try_close(&self, id: span::Id) -> bool { + self.subscriber.try_close(id) + } + + /// Returns a type representing this subscriber's view of the current span. + /// + /// This calls the [`current`] function on the `Subscriber` that this + /// `Dispatch` forwards to. + /// + /// [`current`]: super::subscriber::Subscriber::current_span + #[inline] + pub fn current_span(&self) -> span::Current { + self.subscriber.current_span() + } + + /// Returns `true` if this `Dispatch` forwards to a `Subscriber` of type + /// `T`. + #[inline] + pub fn is(&self) -> bool { + ::is::(&*self.subscriber) + } + + /// Returns some reference to the `Subscriber` this `Dispatch` forwards to + /// if it is of type `T`, or `None` if it isn't. + #[inline] + pub fn downcast_ref(&self) -> Option<&T> { + ::downcast_ref(&*self.subscriber) + } +} + +impl Default for Dispatch { + /// Returns the current default dispatcher + fn default() -> Self { + get_default(|default| default.clone()) + } +} + +impl fmt::Debug for Dispatch { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Dispatch") + .field(&format_args!("{:p}", self.subscriber)) + .finish() + } +} + +impl From for Dispatch +where + S: Subscriber + Send + Sync + 'static, +{ + #[inline] + fn from(subscriber: S) -> Self { + Dispatch::new(subscriber) + } +} + +impl Registrar { + pub(crate) fn try_register( + &self, + metadata: &'static Metadata<'static>, + ) -> Option { + self.0.upgrade().map(|s| s.register_callsite(metadata)) + } + + pub(crate) fn upgrade(&self) -> Option { + self.0.upgrade().map(|subscriber| Dispatch { subscriber }) + } +} + +// ===== impl State ===== + +#[cfg(feature = "std")] +impl State { + /// Replaces the current default dispatcher on this thread with the provided + /// dispatcher.Any + /// + /// Dropping the returned `ResetGuard` will reset the default dispatcher to + /// the previous value. + #[inline] + fn set_default(new_dispatch: Dispatch) -> DefaultGuard { + let prior = CURRENT_STATE + .try_with(|state| { + state.can_enter.set(true); + state.default.replace(Some(new_dispatch)) + }) + .ok() + .flatten(); + EXISTS.store(true, Ordering::Release); + DefaultGuard(prior) + } + + #[inline] + fn enter(&self) -> Option> { + if self.can_enter.replace(false) { + Some(Entered(self)) + } else { + None + } + } +} + +// ===== impl Entered ===== + +#[cfg(feature = "std")] +impl<'a> Entered<'a> { + #[inline] + fn current(&self) -> RefMut<'a, Dispatch> { + let default = self.0.default.borrow_mut(); + RefMut::map(default, |default| { + default.get_or_insert_with(|| get_global().cloned().unwrap_or_else(Dispatch::none)) + }) + } +} + +#[cfg(feature = "std")] +impl<'a> Drop for Entered<'a> { + #[inline] + fn drop(&mut self) { + self.0.can_enter.set(true); + } +} + +// ===== impl DefaultGuard ===== + +#[cfg(feature = "std")] +impl Drop for DefaultGuard { + #[inline] + fn drop(&mut self) { + // Replace the dispatcher and then drop the old one outside + // of the thread-local context. Dropping the dispatch may + // lead to the drop of a subscriber which, in the process, + // could then also attempt to access the same thread local + // state -- causing a clash. + let prev = CURRENT_STATE.try_with(|state| state.default.replace(self.0.take())); + drop(prev) + } +} + +#[cfg(test)] +mod test { + use super::*; + #[cfg(feature = "std")] + use crate::stdlib::sync::atomic::{AtomicUsize, Ordering}; + use crate::{ + callsite::Callsite, + metadata::{Kind, Level, Metadata}, + subscriber::Interest, + }; + + #[test] + fn dispatch_is() { + let dispatcher = Dispatch::new(NoSubscriber::default()); + assert!(dispatcher.is::()); + } + + #[test] + fn dispatch_downcasts() { + let dispatcher = Dispatch::new(NoSubscriber::default()); + assert!(dispatcher.downcast_ref::().is_some()); + } + + struct TestCallsite; + static TEST_CALLSITE: TestCallsite = TestCallsite; + static TEST_META: Metadata<'static> = metadata! { + name: "test", + target: module_path!(), + level: Level::DEBUG, + fields: &[], + callsite: &TEST_CALLSITE, + kind: Kind::EVENT + }; + + impl Callsite for TestCallsite { + fn set_interest(&self, _: Interest) {} + fn metadata(&self) -> &Metadata<'_> { + &TEST_META + } + } + + #[test] + #[cfg(feature = "std")] + fn events_dont_infinite_loop() { + // This test ensures that an event triggered within a subscriber + // won't cause an infinite loop of events. + struct TestSubscriber; + impl Subscriber for TestSubscriber { + fn enabled(&self, _: &Metadata<'_>) -> bool { + true + } + + fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { + span::Id::from_u64(0xAAAA) + } + + fn record(&self, _: &span::Id, _: &span::Record<'_>) {} + + fn record_follows_from(&self, _: &span::Id, _: &span::Id) {} + + fn event(&self, _: &Event<'_>) { + static EVENTS: AtomicUsize = AtomicUsize::new(0); + assert_eq!( + EVENTS.fetch_add(1, Ordering::Relaxed), + 0, + "event method called twice!" + ); + Event::dispatch(&TEST_META, &TEST_META.fields().value_set(&[])) + } + + fn enter(&self, _: &span::Id) {} + + fn exit(&self, _: &span::Id) {} + } + + with_default(&Dispatch::new(TestSubscriber), || { + Event::dispatch(&TEST_META, &TEST_META.fields().value_set(&[])) + }) + } + + #[test] + #[cfg(feature = "std")] + fn spans_dont_infinite_loop() { + // This test ensures that a span created within a subscriber + // won't cause an infinite loop of new spans. + + fn mk_span() { + get_default(|current| { + current.new_span(&span::Attributes::new( + &TEST_META, + &TEST_META.fields().value_set(&[]), + )) + }); + } + + struct TestSubscriber; + impl Subscriber for TestSubscriber { + fn enabled(&self, _: &Metadata<'_>) -> bool { + true + } + + fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { + static NEW_SPANS: AtomicUsize = AtomicUsize::new(0); + assert_eq!( + NEW_SPANS.fetch_add(1, Ordering::Relaxed), + 0, + "new_span method called twice!" + ); + mk_span(); + span::Id::from_u64(0xAAAA) + } + + fn record(&self, _: &span::Id, _: &span::Record<'_>) {} + + fn record_follows_from(&self, _: &span::Id, _: &span::Id) {} + + fn event(&self, _: &Event<'_>) {} + + fn enter(&self, _: &span::Id) {} + + fn exit(&self, _: &span::Id) {} + } + + with_default(&Dispatch::new(TestSubscriber), mk_span) + } + + #[test] + fn default_no_subscriber() { + let default_dispatcher = Dispatch::default(); + assert!(default_dispatcher.is::()); + } + + #[cfg(feature = "std")] + #[test] + fn default_dispatch() { + struct TestSubscriber; + impl Subscriber for TestSubscriber { + fn enabled(&self, _: &Metadata<'_>) -> bool { + true + } + + fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { + span::Id::from_u64(0xAAAA) + } + + fn record(&self, _: &span::Id, _: &span::Record<'_>) {} + + fn record_follows_from(&self, _: &span::Id, _: &span::Id) {} + + fn event(&self, _: &Event<'_>) {} + + fn enter(&self, _: &span::Id) {} + + fn exit(&self, _: &span::Id) {} + } + let guard = set_default(&Dispatch::new(TestSubscriber)); + let default_dispatcher = Dispatch::default(); + assert!(default_dispatcher.is::()); + + drop(guard); + let default_dispatcher = Dispatch::default(); + assert!(default_dispatcher.is::()); + } +} diff --git a/third_party/rust/tracing-core/src/event.rs b/third_party/rust/tracing-core/src/event.rs new file mode 100644 index 000000000000..6e254376299a --- /dev/null +++ b/third_party/rust/tracing-core/src/event.rs @@ -0,0 +1,128 @@ +//! Events represent single points in time during the execution of a program. +use crate::parent::Parent; +use crate::span::Id; +use crate::{field, Metadata}; + +/// `Event`s represent single points in time where something occurred during the +/// execution of a program. +/// +/// An `Event` can be compared to a log record in unstructured logging, but with +/// two key differences: +/// - `Event`s exist _within the context of a [span]_. Unlike log lines, they +/// may be located within the trace tree, allowing visibility into the +/// _temporal_ context in which the event occurred, as well as the source +/// code location. +/// - Like spans, `Event`s have structured key-value data known as _[fields]_, +/// which may include textual message. In general, a majority of the data +/// associated with an event should be in the event's fields rather than in +/// the textual message, as the fields are more structured. +/// +/// [span]: super::span +/// [fields]: super::field +#[derive(Debug)] +pub struct Event<'a> { + fields: &'a field::ValueSet<'a>, + metadata: &'static Metadata<'static>, + parent: Parent, +} + +impl<'a> Event<'a> { + /// Constructs a new `Event` with the specified metadata and set of values, + /// and observes it with the current subscriber. + pub fn dispatch(metadata: &'static Metadata<'static>, fields: &'a field::ValueSet<'_>) { + let event = Event::new(metadata, fields); + crate::dispatcher::get_default(|current| { + current.event(&event); + }); + } + + /// Returns a new `Event` in the current span, with the specified metadata + /// and set of values. + #[inline] + pub fn new(metadata: &'static Metadata<'static>, fields: &'a field::ValueSet<'a>) -> Self { + Event { + fields, + metadata, + parent: Parent::Current, + } + } + + /// Returns a new `Event` as a child of the specified span, with the + /// provided metadata and set of values. + #[inline] + pub fn new_child_of( + parent: impl Into>, + metadata: &'static Metadata<'static>, + fields: &'a field::ValueSet<'a>, + ) -> Self { + let parent = match parent.into() { + Some(p) => Parent::Explicit(p), + None => Parent::Root, + }; + Event { + fields, + metadata, + parent, + } + } + + /// Constructs a new `Event` with the specified metadata and set of values, + /// and observes it with the current subscriber and an explicit parent. + pub fn child_of( + parent: impl Into>, + metadata: &'static Metadata<'static>, + fields: &'a field::ValueSet<'_>, + ) { + let event = Self::new_child_of(parent, metadata, fields); + crate::dispatcher::get_default(|current| { + current.event(&event); + }); + } + + /// Visits all the fields on this `Event` with the specified [visitor]. + /// + /// [visitor]: super::field::Visit + #[inline] + pub fn record(&self, visitor: &mut dyn field::Visit) { + self.fields.record(visitor); + } + + /// Returns an iterator over the set of values on this `Event`. + pub fn fields(&self) -> field::Iter { + self.fields.field_set().iter() + } + + /// Returns [metadata] describing this `Event`. + /// + /// [metadata]: super::Metadata + pub fn metadata(&self) -> &'static Metadata<'static> { + self.metadata + } + + /// Returns true if the new event should be a root. + pub fn is_root(&self) -> bool { + matches!(self.parent, Parent::Root) + } + + /// Returns true if the new event's parent should be determined based on the + /// current context. + /// + /// If this is true and the current thread is currently inside a span, then + /// that span should be the new event's parent. Otherwise, if the current + /// thread is _not_ inside a span, then the new event will be the root of its + /// own trace tree. + pub fn is_contextual(&self) -> bool { + matches!(self.parent, Parent::Current) + } + + /// Returns the new event's explicitly-specified parent, if there is one. + /// + /// Otherwise (if the new event is a root or is a child of the current span), + /// returns `None`. + pub fn parent(&self) -> Option<&Id> { + match self.parent { + Parent::Explicit(ref p) => Some(p), + _ => None, + } + } +} diff --git a/third_party/rust/tracing-core/src/field.rs b/third_party/rust/tracing-core/src/field.rs new file mode 100644 index 000000000000..e40d3827737a --- /dev/null +++ b/third_party/rust/tracing-core/src/field.rs @@ -0,0 +1,1190 @@ +//! `Span` and `Event` key-value data. +//! +//! Spans and events may be annotated with key-value data, referred to as known +//! as _fields_. These fields consist of a mapping from a key (corresponding to +//! a `&str` but represented internally as an array index) to a [`Value`]. +//! +//! # `Value`s and `Subscriber`s +//! +//! `Subscriber`s consume `Value`s as fields attached to [span]s or [`Event`]s. +//! The set of field keys on a given span or is defined on its [`Metadata`]. +//! When a span is created, it provides [`Attributes`] to the `Subscriber`'s +//! [`new_span`] method, containing any fields whose values were provided when +//! the span was created; and may call the `Subscriber`'s [`record`] method +//! with additional [`Record`]s if values are added for more of its fields. +//! Similarly, the [`Event`] type passed to the subscriber's [`event`] method +//! will contain any fields attached to each event. +//! +//! `tracing` represents values as either one of a set of Rust primitives +//! (`i64`, `u64`, `f64`, `bool`, and `&str`) or using a `fmt::Display` or +//! `fmt::Debug` implementation. `Subscriber`s are provided these primitive +//! value types as `dyn Value` trait objects. +//! +//! These trait objects can be formatted using `fmt::Debug`, but may also be +//! recorded as typed data by calling the [`Value::record`] method on these +//! trait objects with a _visitor_ implementing the [`Visit`] trait. This trait +//! represents the behavior used to record values of various types. For example, +//! an implementation of `Visit` might record integers by incrementing counters +//! for their field names rather than printing them. +//! +//! +//! # Using `valuable` +//! +//! `tracing`'s [`Value`] trait is intentionally minimalist: it supports only a small +//! number of Rust primitives as typed values, and only permits recording +//! user-defined types with their [`fmt::Debug`] or [`fmt::Display`] +//! implementations. However, there are some cases where it may be useful to record +//! nested values (such as arrays, `Vec`s, or `HashMap`s containing values), or +//! user-defined `struct` and `enum` types without having to format them as +//! unstructured text. +//! +//! To address `Value`'s limitations, `tracing` offers experimental support for +//! the [`valuable`] crate, which provides object-safe inspection of structured +//! values. User-defined types can implement the [`valuable::Valuable`] trait, +//! and be recorded as a `tracing` field by calling their [`as_value`] method. +//! If the [`Subscriber`] also supports the `valuable` crate, it can +//! then visit those types fields as structured values using `valuable`. +//! +//!
+//!     Note: valuable support is an
+//!     unstable feature. See
+//!     the documentation on unstable features for details on how to enable it.
+//! 
+//! +//! For example: +//! ```ignore +//! // Derive `Valuable` for our types: +//! use valuable::Valuable; +//! +//! #[derive(Clone, Debug, Valuable)] +//! struct User { +//! name: String, +//! age: u32, +//! address: Address, +//! } +//! +//! #[derive(Clone, Debug, Valuable)] +//! struct Address { +//! country: String, +//! city: String, +//! street: String, +//! } +//! +//! let user = User { +//! name: "Arwen Undomiel".to_string(), +//! age: 3000, +//! address: Address { +//! country: "Middle Earth".to_string(), +//! city: "Rivendell".to_string(), +//! street: "leafy lane".to_string(), +//! }, +//! }; +//! +//! // Recording `user` as a `valuable::Value` will allow the `tracing` subscriber +//! // to traverse its fields as a nested, typed structure: +//! tracing::info!(current_user = user.as_value()); +//! ``` +//! +//! Alternatively, the [`valuable()`] function may be used to convert a type +//! implementing [`Valuable`] into a `tracing` field value. +//! +//! When the `valuable` feature is enabled, the [`Visit`] trait will include an +//! optional [`record_value`] method. `Visit` implementations that wish to +//! record `valuable` values can implement this method with custom behavior. +//! If a visitor does not implement `record_value`, the [`valuable::Value`] will +//! be forwarded to the visitor's [`record_debug`] method. +//! +//! [`valuable`]: https://crates.io/crates/valuable +//! [`as_value`]: valuable::Valuable::as_value +//! [`Subscriber`]: crate::Subscriber +//! [`record_value`]: Visit::record_value +//! [`record_debug`]: Visit::record_debug +//! +//! [`Value`]: Value +//! [span]: super::span +//! [`Event`]: super::event::Event +//! [`Metadata`]: super::metadata::Metadata +//! [`Attributes`]: super::span::Attributes +//! [`Record`]: super::span::Record +//! [`new_span`]: super::subscriber::Subscriber::new_span +//! [`record`]: super::subscriber::Subscriber::record +//! [`event`]: super::subscriber::Subscriber::event +//! [`Value::record`]: Value::record +//! [`Visit`]: Visit +use crate::callsite; +use crate::stdlib::{ + borrow::Borrow, + fmt, + hash::{Hash, Hasher}, + num, + ops::Range, +}; + +use self::private::ValidLen; + +/// An opaque key allowing _O_(1) access to a field in a `Span`'s key-value +/// data. +/// +/// As keys are defined by the _metadata_ of a span, rather than by an +/// individual instance of a span, a key may be used to access the same field +/// across all instances of a given span with the same metadata. Thus, when a +/// subscriber observes a new span, it need only access a field by name _once_, +/// and use the key for that name for all other accesses. +#[derive(Debug)] +pub struct Field { + i: usize, + fields: FieldSet, +} + +/// An empty field. +/// +/// This can be used to indicate that the value of a field is not currently +/// present but will be recorded later. +/// +/// When a field's value is `Empty`. it will not be recorded. +#[derive(Debug, Eq, PartialEq)] +pub struct Empty; + +/// Describes the fields present on a span. +pub struct FieldSet { + /// The names of each field on the described span. + names: &'static [&'static str], + /// The callsite where the described span originates. + callsite: callsite::Identifier, +} + +/// A set of fields and values for a span. +pub struct ValueSet<'a> { + values: &'a [(&'a Field, Option<&'a (dyn Value + 'a)>)], + fields: &'a FieldSet, +} + +/// An iterator over a set of fields. +#[derive(Debug)] +pub struct Iter { + idxs: Range, + fields: FieldSet, +} + +/// Visits typed values. +/// +/// An instance of `Visit` ("a visitor") represents the logic necessary to +/// record field values of various types. When an implementor of [`Value`] is +/// [recorded], it calls the appropriate method on the provided visitor to +/// indicate the type that value should be recorded as. +/// +/// When a [`Subscriber`] implementation [records an `Event`] or a +/// [set of `Value`s added to a `Span`], it can pass an `&mut Visit` to the +/// `record` method on the provided [`ValueSet`] or [`Event`]. This visitor +/// will then be used to record all the field-value pairs present on that +/// `Event` or `ValueSet`. +/// +/// # Examples +/// +/// A simple visitor that writes to a string might be implemented like so: +/// ``` +/// # extern crate tracing_core as tracing; +/// use std::fmt::{self, Write}; +/// use tracing::field::{Value, Visit, Field}; +/// pub struct StringVisitor<'a> { +/// string: &'a mut String, +/// } +/// +/// impl<'a> Visit for StringVisitor<'a> { +/// fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { +/// write!(self.string, "{} = {:?}; ", field.name(), value).unwrap(); +/// } +/// } +/// ``` +/// This visitor will format each recorded value using `fmt::Debug`, and +/// append the field name and formatted value to the provided string, +/// regardless of the type of the recorded value. When all the values have +/// been recorded, the `StringVisitor` may be dropped, allowing the string +/// to be printed or stored in some other data structure. +/// +/// The `Visit` trait provides default implementations for `record_i64`, +/// `record_u64`, `record_bool`, `record_str`, and `record_error`, which simply +/// forward the recorded value to `record_debug`. Thus, `record_debug` is the +/// only method which a `Visit` implementation *must* implement. However, +/// visitors may override the default implementations of these functions in +/// order to implement type-specific behavior. +/// +/// Additionally, when a visitor receives a value of a type it does not care +/// about, it is free to ignore those values completely. For example, a +/// visitor which only records numeric data might look like this: +/// +/// ``` +/// # extern crate tracing_core as tracing; +/// # use std::fmt::{self, Write}; +/// # use tracing::field::{Value, Visit, Field}; +/// pub struct SumVisitor { +/// sum: i64, +/// } +/// +/// impl Visit for SumVisitor { +/// fn record_i64(&mut self, _field: &Field, value: i64) { +/// self.sum += value; +/// } +/// +/// fn record_u64(&mut self, _field: &Field, value: u64) { +/// self.sum += value as i64; +/// } +/// +/// fn record_debug(&mut self, _field: &Field, _value: &fmt::Debug) { +/// // Do nothing +/// } +/// } +/// ``` +/// +/// This visitor (which is probably not particularly useful) keeps a running +/// sum of all the numeric values it records, and ignores all other values. A +/// more practical example of recording typed values is presented in +/// `examples/counters.rs`, which demonstrates a very simple metrics system +/// implemented using `tracing`. +/// +///
+///
+/// Note: The record_error trait method is only
+/// available when the Rust standard library is present, as it requires the
+/// std::error::Error trait.
+/// 
+/// +/// [`Value`]: Value +/// [recorded]: Value::record +/// [`Subscriber`]: super::subscriber::Subscriber +/// [records an `Event`]: super::subscriber::Subscriber::event +/// [set of `Value`s added to a `Span`]: super::subscriber::Subscriber::record +/// [`Event`]: super::event::Event +/// [`ValueSet`]: ValueSet +pub trait Visit { + /// Visits an arbitrary type implementing the [`valuable`] crate's `Valuable` trait. + /// + /// [`valuable`]: https://docs.rs/valuable + #[cfg(all(tracing_unstable, feature = "valuable"))] + #[cfg_attr(docsrs, doc(cfg(all(tracing_unstable, feature = "valuable"))))] + fn record_value(&mut self, field: &Field, value: valuable::Value<'_>) { + self.record_debug(field, &value) + } + + /// Visit a double-precision floating point value. + fn record_f64(&mut self, field: &Field, value: f64) { + self.record_debug(field, &value) + } + + /// Visit a signed 64-bit integer value. + fn record_i64(&mut self, field: &Field, value: i64) { + self.record_debug(field, &value) + } + + /// Visit an unsigned 64-bit integer value. + fn record_u64(&mut self, field: &Field, value: u64) { + self.record_debug(field, &value) + } + + /// Visit a boolean value. + fn record_bool(&mut self, field: &Field, value: bool) { + self.record_debug(field, &value) + } + + /// Visit a string value. + fn record_str(&mut self, field: &Field, value: &str) { + self.record_debug(field, &value) + } + + /// Records a type implementing `Error`. + /// + ///
+ ///
+    /// Note: This is only enabled when the Rust standard library is
+    /// present.
+    /// 
+ #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + fn record_error(&mut self, field: &Field, value: &(dyn std::error::Error + 'static)) { + self.record_debug(field, &DisplayValue(value)) + } + + /// Visit a value implementing `fmt::Debug`. + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug); +} + +/// A field value of an erased type. +/// +/// Implementors of `Value` may call the appropriate typed recording methods on +/// the [visitor] passed to their `record` method in order to indicate how +/// their data should be recorded. +/// +/// [visitor]: Visit +pub trait Value: crate::sealed::Sealed { + /// Visits this value with the given `Visitor`. + fn record(&self, key: &Field, visitor: &mut dyn Visit); +} + +/// A `Value` which serializes using `fmt::Display`. +/// +/// Uses `record_debug` in the `Value` implementation to +/// avoid an unnecessary evaluation. +#[derive(Clone)] +pub struct DisplayValue(T); + +/// A `Value` which serializes as a string using `fmt::Debug`. +#[derive(Clone)] +pub struct DebugValue(T); + +/// Wraps a type implementing `fmt::Display` as a `Value` that can be +/// recorded using its `Display` implementation. +pub fn display(t: T) -> DisplayValue +where + T: fmt::Display, +{ + DisplayValue(t) +} + +/// Wraps a type implementing `fmt::Debug` as a `Value` that can be +/// recorded using its `Debug` implementation. +pub fn debug(t: T) -> DebugValue +where + T: fmt::Debug, +{ + DebugValue(t) +} + +/// Wraps a type implementing [`Valuable`] as a `Value` that +/// can be recorded using its `Valuable` implementation. +/// +/// [`Valuable`]: https://docs.rs/valuable/latest/valuable/trait.Valuable.html +#[cfg(all(tracing_unstable, feature = "valuable"))] +#[cfg_attr(docsrs, doc(cfg(all(tracing_unstable, feature = "valuable"))))] +pub fn valuable(t: &T) -> valuable::Value<'_> +where + T: valuable::Valuable, +{ + t.as_value() +} + +// ===== impl Visit ===== + +impl<'a, 'b> Visit for fmt::DebugStruct<'a, 'b> { + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + self.field(field.name(), value); + } +} + +impl<'a, 'b> Visit for fmt::DebugMap<'a, 'b> { + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + self.entry(&format_args!("{}", field), value); + } +} + +impl Visit for F +where + F: FnMut(&Field, &dyn fmt::Debug), +{ + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + (self)(field, value) + } +} + +// ===== impl Value ===== + +macro_rules! impl_values { + ( $( $record:ident( $( $whatever:tt)+ ) ),+ ) => { + $( + impl_value!{ $record( $( $whatever )+ ) } + )+ + } +} + +macro_rules! ty_to_nonzero { + (u8) => { + NonZeroU8 + }; + (u16) => { + NonZeroU16 + }; + (u32) => { + NonZeroU32 + }; + (u64) => { + NonZeroU64 + }; + (u128) => { + NonZeroU128 + }; + (usize) => { + NonZeroUsize + }; + (i8) => { + NonZeroI8 + }; + (i16) => { + NonZeroI16 + }; + (i32) => { + NonZeroI32 + }; + (i64) => { + NonZeroI64 + }; + (i128) => { + NonZeroI128 + }; + (isize) => { + NonZeroIsize + }; +} + +macro_rules! impl_one_value { + (f32, $op:expr, $record:ident) => { + impl_one_value!(normal, f32, $op, $record); + }; + (f64, $op:expr, $record:ident) => { + impl_one_value!(normal, f64, $op, $record); + }; + (bool, $op:expr, $record:ident) => { + impl_one_value!(normal, bool, $op, $record); + }; + ($value_ty:tt, $op:expr, $record:ident) => { + impl_one_value!(normal, $value_ty, $op, $record); + impl_one_value!(nonzero, $value_ty, $op, $record); + }; + (normal, $value_ty:tt, $op:expr, $record:ident) => { + impl $crate::sealed::Sealed for $value_ty {} + impl $crate::field::Value for $value_ty { + fn record(&self, key: &$crate::field::Field, visitor: &mut dyn $crate::field::Visit) { + visitor.$record(key, $op(*self)) + } + } + }; + (nonzero, $value_ty:tt, $op:expr, $record:ident) => { + // This `use num::*;` is reported as unused because it gets emitted + // for every single invocation of this macro, so there are multiple `use`s. + // All but the first are useless indeed. + // We need this import because we can't write a path where one part is + // the `ty_to_nonzero!($value_ty)` invocation. + #[allow(clippy::useless_attribute, unused)] + use num::*; + impl $crate::sealed::Sealed for ty_to_nonzero!($value_ty) {} + impl $crate::field::Value for ty_to_nonzero!($value_ty) { + fn record(&self, key: &$crate::field::Field, visitor: &mut dyn $crate::field::Visit) { + visitor.$record(key, $op(self.get())) + } + } + }; +} + +macro_rules! impl_value { + ( $record:ident( $( $value_ty:tt ),+ ) ) => { + $( + impl_one_value!($value_ty, |this: $value_ty| this, $record); + )+ + }; + ( $record:ident( $( $value_ty:tt ),+ as $as_ty:ty) ) => { + $( + impl_one_value!($value_ty, |this: $value_ty| this as $as_ty, $record); + )+ + }; +} + +// ===== impl Value ===== + +impl_values! { + record_u64(u64), + record_u64(usize, u32, u16, u8 as u64), + record_i64(i64), + record_i64(isize, i32, i16, i8 as i64), + record_bool(bool), + record_f64(f64, f32 as f64) +} + +impl crate::sealed::Sealed for Wrapping {} +impl crate::field::Value for Wrapping { + fn record(&self, key: &crate::field::Field, visitor: &mut dyn crate::field::Visit) { + self.0.record(key, visitor) + } +} + +impl crate::sealed::Sealed for str {} + +impl Value for str { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + visitor.record_str(key, self) + } +} + +#[cfg(feature = "std")] +impl crate::sealed::Sealed for dyn std::error::Error + 'static {} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Value for dyn std::error::Error + 'static { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + visitor.record_error(key, self) + } +} + +#[cfg(feature = "std")] +impl crate::sealed::Sealed for dyn std::error::Error + Send + 'static {} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Value for dyn std::error::Error + Send + 'static { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + (self as &dyn std::error::Error).record(key, visitor) + } +} + +#[cfg(feature = "std")] +impl crate::sealed::Sealed for dyn std::error::Error + Sync + 'static {} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Value for dyn std::error::Error + Sync + 'static { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + (self as &dyn std::error::Error).record(key, visitor) + } +} + +#[cfg(feature = "std")] +impl crate::sealed::Sealed for dyn std::error::Error + Send + Sync + 'static {} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Value for dyn std::error::Error + Send + Sync + 'static { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + (self as &dyn std::error::Error).record(key, visitor) + } +} + +impl<'a, T: ?Sized> crate::sealed::Sealed for &'a T where T: Value + crate::sealed::Sealed + 'a {} + +impl<'a, T: ?Sized> Value for &'a T +where + T: Value + 'a, +{ + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + (*self).record(key, visitor) + } +} + +impl<'a, T: ?Sized> crate::sealed::Sealed for &'a mut T where T: Value + crate::sealed::Sealed + 'a {} + +impl<'a, T: ?Sized> Value for &'a mut T +where + T: Value + 'a, +{ + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + // Don't use `(*self).record(key, visitor)`, otherwise would + // cause stack overflow due to `unconditional_recursion`. + T::record(self, key, visitor) + } +} + +impl<'a> crate::sealed::Sealed for fmt::Arguments<'a> {} + +impl<'a> Value for fmt::Arguments<'a> { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + visitor.record_debug(key, self) + } +} + +impl crate::sealed::Sealed for crate::stdlib::boxed::Box where T: Value {} + +impl Value for crate::stdlib::boxed::Box +where + T: Value, +{ + #[inline] + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + self.as_ref().record(key, visitor) + } +} + +impl fmt::Debug for dyn Value { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // We are only going to be recording the field value, so we don't + // actually care about the field name here. + struct NullCallsite; + static NULL_CALLSITE: NullCallsite = NullCallsite; + impl crate::callsite::Callsite for NullCallsite { + fn set_interest(&self, _: crate::subscriber::Interest) { + unreachable!("you somehow managed to register the null callsite?") + } + + fn metadata(&self) -> &crate::Metadata<'_> { + unreachable!("you somehow managed to access the null callsite?") + } + } + + static FIELD: Field = Field { + i: 0, + fields: FieldSet::new(&[], crate::identify_callsite!(&NULL_CALLSITE)), + }; + + let mut res = Ok(()); + self.record(&FIELD, &mut |_: &Field, val: &dyn fmt::Debug| { + res = write!(f, "{:?}", val); + }); + res + } +} + +impl fmt::Display for dyn Value { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(self, f) + } +} + +// ===== impl DisplayValue ===== + +impl crate::sealed::Sealed for DisplayValue {} + +impl Value for DisplayValue +where + T: fmt::Display, +{ + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + visitor.record_debug(key, self) + } +} + +impl fmt::Debug for DisplayValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for DisplayValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +// ===== impl DebugValue ===== + +impl crate::sealed::Sealed for DebugValue {} + +impl Value for DebugValue +where + T: fmt::Debug, +{ + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + visitor.record_debug(key, &self.0) + } +} + +impl fmt::Debug for DebugValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +// ===== impl ValuableValue ===== + +#[cfg(all(tracing_unstable, feature = "valuable"))] +impl crate::sealed::Sealed for valuable::Value<'_> {} + +#[cfg(all(tracing_unstable, feature = "valuable"))] +#[cfg_attr(docsrs, doc(cfg(all(tracing_unstable, feature = "valuable"))))] +impl Value for valuable::Value<'_> { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + visitor.record_value(key, *self) + } +} + +#[cfg(all(tracing_unstable, feature = "valuable"))] +impl crate::sealed::Sealed for &'_ dyn valuable::Valuable {} + +#[cfg(all(tracing_unstable, feature = "valuable"))] +#[cfg_attr(docsrs, doc(cfg(all(tracing_unstable, feature = "valuable"))))] +impl Value for &'_ dyn valuable::Valuable { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + visitor.record_value(key, self.as_value()) + } +} + +impl crate::sealed::Sealed for Empty {} +impl Value for Empty { + #[inline] + fn record(&self, _: &Field, _: &mut dyn Visit) {} +} + +impl crate::sealed::Sealed for Option {} + +impl Value for Option { + fn record(&self, key: &Field, visitor: &mut dyn Visit) { + if let Some(v) = &self { + v.record(key, visitor) + } + } +} + +// ===== impl Field ===== + +impl Field { + /// Returns an [`Identifier`] that uniquely identifies the [`Callsite`] + /// which defines this field. + /// + /// [`Identifier`]: super::callsite::Identifier + /// [`Callsite`]: super::callsite::Callsite + #[inline] + pub fn callsite(&self) -> callsite::Identifier { + self.fields.callsite() + } + + /// Returns a string representing the name of the field. + pub fn name(&self) -> &'static str { + self.fields.names[self.i] + } +} + +impl fmt::Display for Field { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad(self.name()) + } +} + +impl AsRef for Field { + fn as_ref(&self) -> &str { + self.name() + } +} + +impl PartialEq for Field { + fn eq(&self, other: &Self) -> bool { + self.callsite() == other.callsite() && self.i == other.i + } +} + +impl Eq for Field {} + +impl Hash for Field { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.callsite().hash(state); + self.i.hash(state); + } +} + +impl Clone for Field { + fn clone(&self) -> Self { + Field { + i: self.i, + fields: FieldSet { + names: self.fields.names, + callsite: self.fields.callsite(), + }, + } + } +} + +// ===== impl FieldSet ===== + +impl FieldSet { + /// Constructs a new `FieldSet` with the given array of field names and callsite. + pub const fn new(names: &'static [&'static str], callsite: callsite::Identifier) -> Self { + Self { names, callsite } + } + + /// Returns an [`Identifier`] that uniquely identifies the [`Callsite`] + /// which defines this set of fields.. + /// + /// [`Identifier`]: super::callsite::Identifier + /// [`Callsite`]: super::callsite::Callsite + pub(crate) fn callsite(&self) -> callsite::Identifier { + callsite::Identifier(self.callsite.0) + } + + /// Returns the [`Field`] named `name`, or `None` if no such field exists. + /// + /// [`Field`]: super::Field + pub fn field(&self, name: &Q) -> Option + where + Q: Borrow, + { + let name = &name.borrow(); + self.names.iter().position(|f| f == name).map(|i| Field { + i, + fields: FieldSet { + names: self.names, + callsite: self.callsite(), + }, + }) + } + + /// Returns `true` if `self` contains the given `field`. + /// + ///
+ ///
+    /// Note: If field shares a name with a field
+    /// in this FieldSet, but was created by a FieldSet
+    /// with a different callsite, this FieldSet does not
+    /// contain it. This is so that if two separate span callsites define a field
+    /// named "foo", the Field corresponding to "foo" for each
+    /// of those callsites are not equivalent.
+    /// 
+ pub fn contains(&self, field: &Field) -> bool { + field.callsite() == self.callsite() && field.i <= self.len() + } + + /// Returns an iterator over the `Field`s in this `FieldSet`. + pub fn iter(&self) -> Iter { + let idxs = 0..self.len(); + Iter { + idxs, + fields: FieldSet { + names: self.names, + callsite: self.callsite(), + }, + } + } + + /// Returns a new `ValueSet` with entries for this `FieldSet`'s values. + /// + /// Note that a `ValueSet` may not be constructed with arrays of over 32 + /// elements. + #[doc(hidden)] + pub fn value_set<'v, V>(&'v self, values: &'v V) -> ValueSet<'v> + where + V: ValidLen<'v>, + { + ValueSet { + fields: self, + values: values.borrow(), + } + } + + /// Returns the number of fields in this `FieldSet`. + #[inline] + pub fn len(&self) -> usize { + self.names.len() + } + + /// Returns whether or not this `FieldSet` has fields. + #[inline] + pub fn is_empty(&self) -> bool { + self.names.is_empty() + } +} + +impl<'a> IntoIterator for &'a FieldSet { + type IntoIter = Iter; + type Item = Field; + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl fmt::Debug for FieldSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FieldSet") + .field("names", &self.names) + .field("callsite", &self.callsite) + .finish() + } +} + +impl fmt::Display for FieldSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set() + .entries(self.names.iter().map(display)) + .finish() + } +} + +// ===== impl Iter ===== + +impl Iterator for Iter { + type Item = Field; + fn next(&mut self) -> Option { + let i = self.idxs.next()?; + Some(Field { + i, + fields: FieldSet { + names: self.fields.names, + callsite: self.fields.callsite(), + }, + }) + } +} + +// ===== impl ValueSet ===== + +impl<'a> ValueSet<'a> { + /// Returns an [`Identifier`] that uniquely identifies the [`Callsite`] + /// defining the fields this `ValueSet` refers to. + /// + /// [`Identifier`]: super::callsite::Identifier + /// [`Callsite`]: super::callsite::Callsite + #[inline] + pub fn callsite(&self) -> callsite::Identifier { + self.fields.callsite() + } + + /// Visits all the fields in this `ValueSet` with the provided [visitor]. + /// + /// [visitor]: Visit + pub fn record(&self, visitor: &mut dyn Visit) { + let my_callsite = self.callsite(); + for (field, value) in self.values { + if field.callsite() != my_callsite { + continue; + } + if let Some(value) = value { + value.record(field, visitor); + } + } + } + + /// Returns `true` if this `ValueSet` contains a value for the given `Field`. + pub(crate) fn contains(&self, field: &Field) -> bool { + field.callsite() == self.callsite() + && self + .values + .iter() + .any(|(key, val)| *key == field && val.is_some()) + } + + /// Returns true if this `ValueSet` contains _no_ values. + pub(crate) fn is_empty(&self) -> bool { + let my_callsite = self.callsite(); + self.values + .iter() + .all(|(key, val)| val.is_none() || key.callsite() != my_callsite) + } + + pub(crate) fn field_set(&self) -> &FieldSet { + self.fields + } +} + +impl<'a> fmt::Debug for ValueSet<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.values + .iter() + .fold(&mut f.debug_struct("ValueSet"), |dbg, (key, v)| { + if let Some(val) = v { + val.record(key, dbg); + } + dbg + }) + .field("callsite", &self.callsite()) + .finish() + } +} + +impl<'a> fmt::Display for ValueSet<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.values + .iter() + .fold(&mut f.debug_map(), |dbg, (key, v)| { + if let Some(val) = v { + val.record(key, dbg); + } + dbg + }) + .finish() + } +} + +// ===== impl ValidLen ===== + +mod private { + use super::*; + + /// Marker trait implemented by arrays which are of valid length to + /// construct a `ValueSet`. + /// + /// `ValueSet`s may only be constructed from arrays containing 32 or fewer + /// elements, to ensure the array is small enough to always be allocated on the + /// stack. This trait is only implemented by arrays of an appropriate length, + /// ensuring that the correct size arrays are used at compile-time. + pub trait ValidLen<'a>: Borrow<[(&'a Field, Option<&'a (dyn Value + 'a)>)]> {} +} + +macro_rules! impl_valid_len { + ( $( $len:tt ),+ ) => { + $( + impl<'a> private::ValidLen<'a> for + [(&'a Field, Option<&'a (dyn Value + 'a)>); $len] {} + )+ + } +} + +impl_valid_len! { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 +} + +#[cfg(test)] +mod test { + use super::*; + use crate::metadata::{Kind, Level, Metadata}; + use crate::stdlib::{borrow::ToOwned, string::String}; + + struct TestCallsite1; + static TEST_CALLSITE_1: TestCallsite1 = TestCallsite1; + static TEST_META_1: Metadata<'static> = metadata! { + name: "field_test1", + target: module_path!(), + level: Level::INFO, + fields: &["foo", "bar", "baz"], + callsite: &TEST_CALLSITE_1, + kind: Kind::SPAN, + }; + + impl crate::callsite::Callsite for TestCallsite1 { + fn set_interest(&self, _: crate::subscriber::Interest) { + unimplemented!() + } + + fn metadata(&self) -> &Metadata<'_> { + &TEST_META_1 + } + } + + struct TestCallsite2; + static TEST_CALLSITE_2: TestCallsite2 = TestCallsite2; + static TEST_META_2: Metadata<'static> = metadata! { + name: "field_test2", + target: module_path!(), + level: Level::INFO, + fields: &["foo", "bar", "baz"], + callsite: &TEST_CALLSITE_2, + kind: Kind::SPAN, + }; + + impl crate::callsite::Callsite for TestCallsite2 { + fn set_interest(&self, _: crate::subscriber::Interest) { + unimplemented!() + } + + fn metadata(&self) -> &Metadata<'_> { + &TEST_META_2 + } + } + + #[test] + fn value_set_with_no_values_is_empty() { + let fields = TEST_META_1.fields(); + let values = &[ + (&fields.field("foo").unwrap(), None), + (&fields.field("bar").unwrap(), None), + (&fields.field("baz").unwrap(), None), + ]; + let valueset = fields.value_set(values); + assert!(valueset.is_empty()); + } + + #[test] + fn empty_value_set_is_empty() { + let fields = TEST_META_1.fields(); + let valueset = fields.value_set(&[]); + assert!(valueset.is_empty()); + } + + #[test] + fn value_sets_with_fields_from_other_callsites_are_empty() { + let fields = TEST_META_1.fields(); + let values = &[ + (&fields.field("foo").unwrap(), Some(&1 as &dyn Value)), + (&fields.field("bar").unwrap(), Some(&2 as &dyn Value)), + (&fields.field("baz").unwrap(), Some(&3 as &dyn Value)), + ]; + let valueset = TEST_META_2.fields().value_set(values); + assert!(valueset.is_empty()) + } + + #[test] + fn sparse_value_sets_are_not_empty() { + let fields = TEST_META_1.fields(); + let values = &[ + (&fields.field("foo").unwrap(), None), + (&fields.field("bar").unwrap(), Some(&57 as &dyn Value)), + (&fields.field("baz").unwrap(), None), + ]; + let valueset = fields.value_set(values); + assert!(!valueset.is_empty()); + } + + #[test] + fn fields_from_other_callsets_are_skipped() { + let fields = TEST_META_1.fields(); + let values = &[ + (&fields.field("foo").unwrap(), None), + ( + &TEST_META_2.fields().field("bar").unwrap(), + Some(&57 as &dyn Value), + ), + (&fields.field("baz").unwrap(), None), + ]; + + struct MyVisitor; + impl Visit for MyVisitor { + fn record_debug(&mut self, field: &Field, _: &dyn (crate::stdlib::fmt::Debug)) { + assert_eq!(field.callsite(), TEST_META_1.callsite()) + } + } + let valueset = fields.value_set(values); + valueset.record(&mut MyVisitor); + } + + #[test] + fn empty_fields_are_skipped() { + let fields = TEST_META_1.fields(); + let values = &[ + (&fields.field("foo").unwrap(), Some(&Empty as &dyn Value)), + (&fields.field("bar").unwrap(), Some(&57 as &dyn Value)), + (&fields.field("baz").unwrap(), Some(&Empty as &dyn Value)), + ]; + + struct MyVisitor; + impl Visit for MyVisitor { + fn record_debug(&mut self, field: &Field, _: &dyn (crate::stdlib::fmt::Debug)) { + assert_eq!(field.name(), "bar") + } + } + let valueset = fields.value_set(values); + valueset.record(&mut MyVisitor); + } + + #[test] + fn record_debug_fn() { + let fields = TEST_META_1.fields(); + let values = &[ + (&fields.field("foo").unwrap(), Some(&1 as &dyn Value)), + (&fields.field("bar").unwrap(), Some(&2 as &dyn Value)), + (&fields.field("baz").unwrap(), Some(&3 as &dyn Value)), + ]; + let valueset = fields.value_set(values); + let mut result = String::new(); + valueset.record(&mut |_: &Field, value: &dyn fmt::Debug| { + use crate::stdlib::fmt::Write; + write!(&mut result, "{:?}", value).unwrap(); + }); + assert_eq!(result, "123".to_owned()); + } + + #[test] + #[cfg(feature = "std")] + fn record_error() { + let fields = TEST_META_1.fields(); + let err: Box = + std::io::Error::new(std::io::ErrorKind::Other, "lol").into(); + let values = &[ + (&fields.field("foo").unwrap(), Some(&err as &dyn Value)), + (&fields.field("bar").unwrap(), Some(&Empty as &dyn Value)), + (&fields.field("baz").unwrap(), Some(&Empty as &dyn Value)), + ]; + let valueset = fields.value_set(values); + let mut result = String::new(); + valueset.record(&mut |_: &Field, value: &dyn fmt::Debug| { + use core::fmt::Write; + write!(&mut result, "{:?}", value).unwrap(); + }); + assert_eq!(result, format!("{}", err)); + } +} diff --git a/third_party/rust/tracing-core/src/lazy_static/LICENSE b/third_party/rust/tracing-core/src/lazy_static/LICENSE new file mode 100644 index 000000000000..28e478827c75 --- /dev/null +++ b/third_party/rust/tracing-core/src/lazy_static/LICENSE @@ -0,0 +1,26 @@ + +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tracing-core/src/lazy_static/core_lazy.rs b/third_party/rust/tracing-core/src/lazy_static/core_lazy.rs new file mode 100644 index 000000000000..c61d36202d01 --- /dev/null +++ b/third_party/rust/tracing-core/src/lazy_static/core_lazy.rs @@ -0,0 +1,30 @@ +// Copyright 2016 lazy-static.rs Developers +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use crate::spin::Once; + +pub(crate) struct Lazy(Once); + +impl Lazy { + pub(crate) const INIT: Self = Lazy(Once::INIT); + + #[inline(always)] + pub(crate) fn get(&'static self, builder: F) -> &T + where + F: FnOnce() -> T, + { + self.0.call_once(builder) + } +} + +#[macro_export] +#[doc(hidden)] +macro_rules! __lazy_static_create { + ($NAME:ident, $T:ty) => { + static $NAME: $crate::lazy_static::lazy::Lazy<$T> = $crate::lazy_static::lazy::Lazy::INIT; + }; +} diff --git a/third_party/rust/tracing-core/src/lazy_static/mod.rs b/third_party/rust/tracing-core/src/lazy_static/mod.rs new file mode 100644 index 000000000000..78f0ae722bb8 --- /dev/null +++ b/third_party/rust/tracing-core/src/lazy_static/mod.rs @@ -0,0 +1,89 @@ +// Copyright 2016 lazy-static.rs Developers +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +/*! +A macro for declaring lazily evaluated statics. +Using this macro, it is possible to have `static`s that require code to be +executed at runtime in order to be initialized. +This includes anything requiring heap allocations, like vectors or hash maps, +as well as anything that requires function calls to be computed. +*/ + +#[path = "core_lazy.rs"] +pub(crate) mod lazy; + +#[doc(hidden)] +pub(crate) use core::ops::Deref as __Deref; + +#[macro_export] +#[doc(hidden)] +macro_rules! __lazy_static_internal { + // optional visibility restrictions are wrapped in `()` to allow for + // explicitly passing otherwise implicit information about private items + ($(#[$attr:meta])* ($($vis:tt)*) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + $crate::__lazy_static_internal!(@MAKE TY, $(#[$attr])*, ($($vis)*), $N); + $crate::__lazy_static_internal!(@TAIL, $N : $T = $e); + $crate::lazy_static!($($t)*); + }; + (@TAIL, $N:ident : $T:ty = $e:expr) => { + impl $crate::lazy_static::__Deref for $N { + type Target = $T; + fn deref(&self) -> &$T { + #[inline(always)] + fn __static_ref_initialize() -> $T { $e } + + #[inline(always)] + fn __stability() -> &'static $T { + $crate::__lazy_static_create!(LAZY, $T); + LAZY.get(__static_ref_initialize) + } + __stability() + } + } + impl $crate::lazy_static::LazyStatic for $N { + fn initialize(lazy: &Self) { + let _ = &**lazy; + } + } + }; + // `vis` is wrapped in `()` to prevent parsing ambiguity + (@MAKE TY, $(#[$attr:meta])*, ($($vis:tt)*), $N:ident) => { + #[allow(missing_copy_implementations)] + #[allow(non_camel_case_types)] + #[allow(dead_code)] + $(#[$attr])* + $($vis)* struct $N {__private_field: ()} + #[doc(hidden)] + $($vis)* static $N: $N = $N {__private_field: ()}; + }; + () => () +} + +#[macro_export] +#[doc(hidden)] +macro_rules! lazy_static { + ($(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + // use `()` to explicitly forward the information about private items + $crate::__lazy_static_internal!($(#[$attr])* () static ref $N : $T = $e; $($t)*); + }; + ($(#[$attr:meta])* pub static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + $crate::__lazy_static_internal!($(#[$attr])* (pub) static ref $N : $T = $e; $($t)*); + }; + ($(#[$attr:meta])* pub ($($vis:tt)+) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => { + $crate::__lazy_static_internal!($(#[$attr])* (pub ($($vis)+)) static ref $N : $T = $e; $($t)*); + }; + () => () +} + +/// Support trait for enabling a few common operation on lazy static values. +/// +/// This is implemented by each defined lazy static, and +/// used by the free functions in this crate. +pub(crate) trait LazyStatic { + #[doc(hidden)] + fn initialize(lazy: &Self); +} diff --git a/third_party/rust/tracing-core/src/lib.rs b/third_party/rust/tracing-core/src/lib.rs new file mode 100644 index 000000000000..7424a6cb3fb1 --- /dev/null +++ b/third_party/rust/tracing-core/src/lib.rs @@ -0,0 +1,302 @@ +//! Core primitives for `tracing`. +//! +//! [`tracing`] is a framework for instrumenting Rust programs to collect +//! structured, event-based diagnostic information. This crate defines the core +//! primitives of `tracing`. +//! +//! This crate provides: +//! +//! * [`span::Id`] identifies a span within the execution of a program. +//! +//! * [`Event`] represents a single event within a trace. +//! +//! * [`Subscriber`], the trait implemented to collect trace data. +//! +//! * [`Metadata`] and [`Callsite`] provide information describing spans and +//! `Event`s. +//! +//! * [`Field`], [`FieldSet`], [`Value`], and [`ValueSet`] represent the +//! structured data attached to a span. +//! +//! * [`Dispatch`] allows spans and events to be dispatched to `Subscriber`s. +//! +//! In addition, it defines the global callsite registry and per-thread current +//! dispatcher which other components of the tracing system rely on. +//! +//! *Compiler support: [requires `rustc` 1.49+][msrv]* +//! +//! [msrv]: #supported-rust-versions +//! +//! ## Usage +//! +//! Application authors will typically not use this crate directly. Instead, +//! they will use the [`tracing`] crate, which provides a much more +//! fully-featured API. However, this crate's API will change very infrequently, +//! so it may be used when dependencies must be very stable. +//! +//! `Subscriber` implementations may depend on `tracing-core` rather than +//! `tracing`, as the additional APIs provided by `tracing` are primarily useful +//! for instrumenting libraries and applications, and are generally not +//! necessary for `Subscriber` implementations. +//! +//! The [`tokio-rs/tracing`] repository contains less stable crates designed to +//! be used with the `tracing` ecosystem. It includes a collection of +//! `Subscriber` implementations, as well as utility and adapter crates. +//! +//! ## Crate Feature Flags +//! +//! The following crate [feature flags] are available: +//! +//! * `std`: Depend on the Rust standard library (enabled by default). +//! +//! `no_std` users may disable this feature with `default-features = false`: +//! +//! ```toml +//! [dependencies] +//! tracing-core = { version = "0.1.22", default-features = false } +//! ``` +//! +//! **Note**:`tracing-core`'s `no_std` support requires `liballoc`. +//! +//! ### Unstable Features +//! +//! These feature flags enable **unstable** features. The public API may break in 0.1.x +//! releases. To enable these features, the `--cfg tracing_unstable` must be passed to +//! `rustc` when compiling. +//! +//! The following unstable feature flags are currently available: +//! +//! * `valuable`: Enables support for recording [field values] using the +//! [`valuable`] crate. +//! +//! #### Enabling Unstable Features +//! +//! The easiest way to set the `tracing_unstable` cfg is to use the `RUSTFLAGS` +//! env variable when running `cargo` commands: +//! +//! ```shell +//! RUSTFLAGS="--cfg tracing_unstable" cargo build +//! ``` +//! Alternatively, the following can be added to the `.cargo/config` file in a +//! project to automatically enable the cfg flag for that project: +//! +//! ```toml +//! [build] +//! rustflags = ["--cfg", "tracing_unstable"] +//! ``` +//! +//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section +//! [field values]: crate::field +//! [`valuable`]: https://crates.io/crates/valuable +//! +//! ## Supported Rust Versions +//! +//! Tracing is built against the latest stable release. The minimum supported +//! version is 1.49. The current Tracing version is not guaranteed to build on +//! Rust versions earlier than the minimum supported version. +//! +//! Tracing follows the same compiler support policies as the rest of the Tokio +//! project. The current stable Rust compiler and the three most recent minor +//! versions before it will always be supported. For example, if the current +//! stable compiler version is 1.45, the minimum supported version will not be +//! increased past 1.42, three minor versions prior. Increasing the minimum +//! supported compiler version is not considered a semver breaking change as +//! long as doing so complies with this policy. +//! +//! +//! [`span::Id`]: span::Id +//! [`Event`]: event::Event +//! [`Subscriber`]: subscriber::Subscriber +//! [`Metadata`]: metadata::Metadata +//! [`Callsite`]: callsite::Callsite +//! [`Field`]: field::Field +//! [`FieldSet`]: field::FieldSet +//! [`Value`]: field::Value +//! [`ValueSet`]: field::ValueSet +//! [`Dispatch`]: dispatcher::Dispatch +//! [`tokio-rs/tracing`]: https://github.com/tokio-rs/tracing +//! [`tracing`]: https://crates.io/crates/tracing +#![doc(html_root_url = "https://docs.rs/tracing-core/0.1.22")] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/logo-type.png", + issue_tracker_base_url = "https://github.com/tokio-rs/tracing/issues/" +)] +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(docsrs, feature(doc_cfg), deny(rustdoc::broken_intra_doc_links))] +#![warn( + missing_debug_implementations, + missing_docs, + rust_2018_idioms, + unreachable_pub, + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] +#[cfg(not(feature = "std"))] +extern crate alloc; + +/// Statically constructs an [`Identifier`] for the provided [`Callsite`]. +/// +/// This may be used in contexts such as static initializers. +/// +/// For example: +/// ```rust +/// use tracing_core::{callsite, identify_callsite}; +/// # use tracing_core::{Metadata, subscriber::Interest}; +/// # fn main() { +/// pub struct MyCallsite { +/// // ... +/// } +/// impl callsite::Callsite for MyCallsite { +/// # fn set_interest(&self, _: Interest) { unimplemented!() } +/// # fn metadata(&self) -> &Metadata { unimplemented!() } +/// // ... +/// } +/// +/// static CALLSITE: MyCallsite = MyCallsite { +/// // ... +/// }; +/// +/// static CALLSITE_ID: callsite::Identifier = identify_callsite!(&CALLSITE); +/// # } +/// ``` +/// +/// [`Identifier`]: callsite::Identifier +/// [`Callsite`]: callsite::Callsite +#[macro_export] +macro_rules! identify_callsite { + ($callsite:expr) => { + $crate::callsite::Identifier($callsite) + }; +} + +/// Statically constructs new span [metadata]. +/// +/// /// For example: +/// ```rust +/// # use tracing_core::{callsite::Callsite, subscriber::Interest}; +/// use tracing_core::metadata; +/// use tracing_core::metadata::{Kind, Level, Metadata}; +/// # fn main() { +/// # pub struct MyCallsite { } +/// # impl Callsite for MyCallsite { +/// # fn set_interest(&self, _: Interest) { unimplemented!() } +/// # fn metadata(&self) -> &Metadata { unimplemented!() } +/// # } +/// # +/// static FOO_CALLSITE: MyCallsite = MyCallsite { +/// // ... +/// }; +/// +/// static FOO_METADATA: Metadata = metadata!{ +/// name: "foo", +/// target: module_path!(), +/// level: Level::DEBUG, +/// fields: &["bar", "baz"], +/// callsite: &FOO_CALLSITE, +/// kind: Kind::SPAN, +/// }; +/// # } +/// ``` +/// +/// [metadata]: metadata::Metadata +/// [`Metadata::new`]: metadata::Metadata::new +#[macro_export] +macro_rules! metadata { + ( + name: $name:expr, + target: $target:expr, + level: $level:expr, + fields: $fields:expr, + callsite: $callsite:expr, + kind: $kind:expr + ) => { + $crate::metadata! { + name: $name, + target: $target, + level: $level, + fields: $fields, + callsite: $callsite, + kind: $kind, + } + }; + ( + name: $name:expr, + target: $target:expr, + level: $level:expr, + fields: $fields:expr, + callsite: $callsite:expr, + kind: $kind:expr, + ) => { + $crate::metadata::Metadata::new( + $name, + $target, + $level, + Some(file!()), + Some(line!()), + Some(module_path!()), + $crate::field::FieldSet::new($fields, $crate::identify_callsite!($callsite)), + $kind, + ) + }; +} + +// when `std` is enabled, use the `lazy_static` crate from crates.io +#[cfg(feature = "std")] +pub(crate) use lazy_static::lazy_static; + +// Facade module: `no_std` uses spinlocks, `std` uses the mutexes in the standard library +#[cfg(not(feature = "std"))] +#[macro_use] +mod lazy_static; + +// Trimmed-down vendored version of spin 0.5.2 (0387621) +// Dependency of no_std lazy_static, not required in a std build +#[cfg(not(feature = "std"))] +pub(crate) mod spin; + +#[cfg(not(feature = "std"))] +#[doc(hidden)] +pub type Once = self::spin::Once<()>; + +#[cfg(feature = "std")] +pub use stdlib::sync::Once; + +pub mod callsite; +pub mod dispatcher; +pub mod event; +pub mod field; +pub mod metadata; +mod parent; +pub mod span; +pub(crate) mod stdlib; +pub mod subscriber; + +#[doc(inline)] +pub use self::{ + callsite::Callsite, + dispatcher::Dispatch, + event::Event, + field::Field, + metadata::{Level, LevelFilter, Metadata}, + subscriber::Subscriber, +}; + +pub use self::{metadata::Kind, subscriber::Interest}; + +mod sealed { + pub trait Sealed {} +} diff --git a/third_party/rust/tracing-core/src/metadata.rs b/third_party/rust/tracing-core/src/metadata.rs new file mode 100644 index 000000000000..b6046e4d9a06 --- /dev/null +++ b/third_party/rust/tracing-core/src/metadata.rs @@ -0,0 +1,1062 @@ +//! Metadata describing trace data. +use super::{callsite, field}; +use crate::stdlib::{ + cmp, fmt, + str::FromStr, + sync::atomic::{AtomicUsize, Ordering}, +}; + +/// Metadata describing a [span] or [event]. +/// +/// All spans and events have the following metadata: +/// - A [name], represented as a static string. +/// - A [target], a string that categorizes part of the system where the span +/// or event occurred. The `tracing` macros default to using the module +/// path where the span or event originated as the target, but it may be +/// overridden. +/// - A [verbosity level]. This determines how verbose a given span or event +/// is, and allows enabling or disabling more verbose diagnostics +/// situationally. See the documentation for the [`Level`] type for details. +/// - The names of the [fields] defined by the span or event. +/// - Whether the metadata corresponds to a span or event. +/// +/// In addition, the following optional metadata describing the source code +/// location where the span or event originated _may_ be provided: +/// - The [file name] +/// - The [line number] +/// - The [module path] +/// +/// Metadata is used by [`Subscriber`]s when filtering spans and events, and it +/// may also be used as part of their data payload. +/// +/// When created by the `event!` or `span!` macro, the metadata describing a +/// particular event or span is constructed statically and exists as a single +/// static instance. Thus, the overhead of creating the metadata is +/// _significantly_ lower than that of creating the actual span. Therefore, +/// filtering is based on metadata, rather than on the constructed span. +/// +///
+///     Note: Although instances of Metadata
+///     cannot be compared directly, they provide a method
+///     id, returning
+///     an opaque callsite
+///     identifierwhich uniquely identifies the callsite where the metadata
+///     originated. This can be used to determine if two Metadata
+///     correspond to the same callsite.
+/// 
+/// +/// [span]: super::span +/// [event]: super::event +/// [name]: #method.name +/// [target]: #method.target +/// [fields]: #method.fields +/// [verbosity level]: #method.level +/// [file name]: #method.file +/// [line number]: #method.line +/// [module path]: #method.module +/// [`Subscriber`]: super::subscriber::Subscriber +/// [`id`]: Metadata::id +/// [callsite identifier]: super::callsite::Identifier +pub struct Metadata<'a> { + /// The name of the span described by this metadata. + name: &'static str, + + /// The part of the system that the span that this metadata describes + /// occurred in. + target: &'a str, + + /// The level of verbosity of the described span. + level: Level, + + /// The name of the Rust module where the span occurred, or `None` if this + /// could not be determined. + module_path: Option<&'a str>, + + /// The name of the source code file where the span occurred, or `None` if + /// this could not be determined. + file: Option<&'a str>, + + /// The line number in the source code file where the span occurred, or + /// `None` if this could not be determined. + line: Option, + + /// The names of the key-value fields attached to the described span or + /// event. + fields: field::FieldSet, + + /// The kind of the callsite. + kind: Kind, +} + +/// Indicates whether the callsite is a span or event. +#[derive(Clone, Eq, PartialEq)] +pub struct Kind(u8); + +/// Describes the level of verbosity of a span or event. +/// +/// # Comparing Levels +/// +/// `Level` implements the [`PartialOrd`] and [`Ord`] traits, allowing two +/// `Level`s to be compared to determine which is considered more or less +/// verbose. Levels which are more verbose are considered "greater than" levels +/// which are less verbose, with [`Level::ERROR`] considered the lowest, and +/// [`Level::TRACE`] considered the highest. +/// +/// For example: +/// ``` +/// use tracing_core::Level; +/// +/// assert!(Level::TRACE > Level::DEBUG); +/// assert!(Level::ERROR < Level::WARN); +/// assert!(Level::INFO <= Level::DEBUG); +/// assert_eq!(Level::TRACE, Level::TRACE); +/// ``` +/// +/// # Filtering +/// +/// `Level`s are typically used to implement filtering that determines which +/// spans and events are enabled. Depending on the use case, more or less +/// verbose diagnostics may be desired. For example, when running in +/// development, [`DEBUG`]-level traces may be enabled by default. When running in +/// production, only [`INFO`]-level and lower traces might be enabled. Libraries +/// may include very verbose diagnostics at the [`DEBUG`] and/or [`TRACE`] levels. +/// Applications using those libraries typically chose to ignore those traces. However, when +/// debugging an issue involving said libraries, it may be useful to temporarily +/// enable the more verbose traces. +/// +/// The [`LevelFilter`] type is provided to enable filtering traces by +/// verbosity. `Level`s can be compared against [`LevelFilter`]s, and +/// [`LevelFilter`] has a variant for each `Level`, which compares analogously +/// to that level. In addition, [`LevelFilter`] adds a [`LevelFilter::OFF`] +/// variant, which is considered "less verbose" than every other `Level`. This is +/// intended to allow filters to completely disable tracing in a particular context. +/// +/// For example: +/// ``` +/// use tracing_core::{Level, LevelFilter}; +/// +/// assert!(LevelFilter::OFF < Level::TRACE); +/// assert!(LevelFilter::TRACE > Level::DEBUG); +/// assert!(LevelFilter::ERROR < Level::WARN); +/// assert!(LevelFilter::INFO <= Level::DEBUG); +/// assert!(LevelFilter::INFO >= Level::INFO); +/// ``` +/// +/// ## Examples +/// +/// Below is a simple example of how a [`Subscriber`] could implement filtering through +/// a [`LevelFilter`]. When a span or event is recorded, the [`Subscriber::enabled`] method +/// compares the span or event's `Level` against the configured [`LevelFilter`]. +/// The optional [`Subscriber::max_level_hint`] method can also be implemented to allow spans +/// and events above a maximum verbosity level to be skipped more efficiently, +/// often improving performance in short-lived programs. +/// +/// ``` +/// use tracing_core::{span, Event, Level, LevelFilter, Subscriber, Metadata}; +/// # use tracing_core::span::{Id, Record, Current}; +/// +/// #[derive(Debug)] +/// pub struct MySubscriber { +/// /// The most verbose level that this subscriber will enable. +/// max_level: LevelFilter, +/// +/// // ... +/// } +/// +/// impl MySubscriber { +/// /// Returns a new `MySubscriber` which will record spans and events up to +/// /// `max_level`. +/// pub fn with_max_level(max_level: LevelFilter) -> Self { +/// Self { +/// max_level, +/// // ... +/// } +/// } +/// } +/// impl Subscriber for MySubscriber { +/// fn enabled(&self, meta: &Metadata<'_>) -> bool { +/// // A span or event is enabled if it is at or below the configured +/// // maximum level. +/// meta.level() <= &self.max_level +/// } +/// +/// // This optional method returns the most verbose level that this +/// // subscriber will enable. Although implementing this method is not +/// // *required*, it permits additional optimizations when it is provided, +/// // allowing spans and events above the max level to be skipped +/// // more efficiently. +/// fn max_level_hint(&self) -> Option { +/// Some(self.max_level) +/// } +/// +/// // Implement the rest of the subscriber... +/// fn new_span(&self, span: &span::Attributes<'_>) -> span::Id { +/// // ... +/// # drop(span); Id::from_u64(1) +/// } + +/// fn event(&self, event: &Event<'_>) { +/// // ... +/// # drop(event); +/// } +/// +/// // ... +/// # fn enter(&self, _: &Id) {} +/// # fn exit(&self, _: &Id) {} +/// # fn record(&self, _: &Id, _: &Record<'_>) {} +/// # fn record_follows_from(&self, _: &Id, _: &Id) {} +/// } +/// ``` +/// +/// It is worth noting that the `tracing-subscriber` crate provides [additional +/// APIs][envfilter] for performing more sophisticated filtering, such as +/// enabling different levels based on which module or crate a span or event is +/// recorded in. +/// +/// [`DEBUG`]: Level::DEBUG +/// [`INFO`]: Level::INFO +/// [`TRACE`]: Level::TRACE +/// [`Subscriber::enabled`]: crate::subscriber::Subscriber::enabled +/// [`Subscriber::max_level_hint`]: crate::subscriber::Subscriber::max_level_hint +/// [`Subscriber`]: crate::subscriber::Subscriber +/// [envfilter]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct Level(LevelInner); + +/// A filter comparable to a verbosity [`Level`]. +/// +/// If a [`Level`] is considered less than a `LevelFilter`, it should be +/// considered enabled; if greater than or equal to the `LevelFilter`, +/// that level is disabled. See [`LevelFilter::current`] for more +/// details. +/// +/// Note that this is essentially identical to the `Level` type, but with the +/// addition of an [`OFF`] level that completely disables all trace +/// instrumentation. +/// +/// See the documentation for the [`Level`] type to see how `Level`s +/// and `LevelFilter`s interact. +/// +/// [`OFF`]: LevelFilter::OFF +#[repr(transparent)] +#[derive(Copy, Clone, Eq, PartialEq, Hash)] +pub struct LevelFilter(Option); + +/// Indicates that a string could not be parsed to a valid level. +#[derive(Clone, Debug)] +pub struct ParseLevelFilterError(()); + +static MAX_LEVEL: AtomicUsize = AtomicUsize::new(LevelFilter::OFF_USIZE); + +// ===== impl Metadata ===== + +impl<'a> Metadata<'a> { + /// Construct new metadata for a span or event, with a name, target, level, field + /// names, and optional source code location. + pub const fn new( + name: &'static str, + target: &'a str, + level: Level, + file: Option<&'a str>, + line: Option, + module_path: Option<&'a str>, + fields: field::FieldSet, + kind: Kind, + ) -> Self { + Metadata { + name, + target, + level, + module_path, + file, + line, + fields, + kind, + } + } + + /// Returns the names of the fields on the described span or event. + pub fn fields(&self) -> &field::FieldSet { + &self.fields + } + + /// Returns the level of verbosity of the described span or event. + pub fn level(&self) -> &Level { + &self.level + } + + /// Returns the name of the span. + pub fn name(&self) -> &'static str { + self.name + } + + /// Returns a string describing the part of the system where the span or + /// event that this metadata describes occurred. + /// + /// Typically, this is the module path, but alternate targets may be set + /// when spans or events are constructed. + pub fn target(&self) -> &'a str { + self.target + } + + /// Returns the path to the Rust module where the span occurred, or + /// `None` if the module path is unknown. + pub fn module_path(&self) -> Option<&'a str> { + self.module_path + } + + /// Returns the name of the source code file where the span + /// occurred, or `None` if the file is unknown + pub fn file(&self) -> Option<&'a str> { + self.file + } + + /// Returns the line number in the source code file where the span + /// occurred, or `None` if the line number is unknown. + pub fn line(&self) -> Option { + self.line + } + + /// Returns an opaque `Identifier` that uniquely identifies the callsite + /// this `Metadata` originated from. + #[inline] + pub fn callsite(&self) -> callsite::Identifier { + self.fields.callsite() + } + + /// Returns true if the callsite kind is `Event`. + pub fn is_event(&self) -> bool { + self.kind.is_event() + } + + /// Return true if the callsite kind is `Span`. + pub fn is_span(&self) -> bool { + self.kind.is_span() + } +} + +impl<'a> fmt::Debug for Metadata<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut meta = f.debug_struct("Metadata"); + meta.field("name", &self.name) + .field("target", &self.target) + .field("level", &self.level); + + if let Some(path) = self.module_path() { + meta.field("module_path", &path); + } + + match (self.file(), self.line()) { + (Some(file), Some(line)) => { + meta.field("location", &format_args!("{}:{}", file, line)); + } + (Some(file), None) => { + meta.field("file", &format_args!("{}", file)); + } + + // Note: a line num with no file is a kind of weird case that _probably_ never occurs... + (None, Some(line)) => { + meta.field("line", &line); + } + (None, None) => {} + }; + + meta.field("fields", &format_args!("{}", self.fields)) + .field("callsite", &self.callsite()) + .field("kind", &self.kind) + .finish() + } +} + +impl Kind { + const EVENT_BIT: u8 = 1 << 0; + const SPAN_BIT: u8 = 1 << 1; + const HINT_BIT: u8 = 1 << 2; + + /// `Event` callsite + pub const EVENT: Kind = Kind(Self::EVENT_BIT); + + /// `Span` callsite + pub const SPAN: Kind = Kind(Self::SPAN_BIT); + + /// `enabled!` callsite. [`Subscriber`][`crate::subscriber::Subscriber`]s can assume + /// this `Kind` means they will never recieve a + /// full event with this [`Metadata`]. + pub const HINT: Kind = Kind(Self::HINT_BIT); + + /// Return true if the callsite kind is `Span` + pub fn is_span(&self) -> bool { + self.0 & Self::SPAN_BIT == Self::SPAN_BIT + } + + /// Return true if the callsite kind is `Event` + pub fn is_event(&self) -> bool { + self.0 & Self::EVENT_BIT == Self::EVENT_BIT + } + + /// Return true if the callsite kind is `Hint` + pub fn is_hint(&self) -> bool { + self.0 & Self::HINT_BIT == Self::HINT_BIT + } + + /// Sets that this `Kind` is a [hint](Self::HINT). + /// + /// This can be called on [`SPAN`](Self::SPAN) and [`EVENT`](Self::EVENT) + /// kinds to construct a hint callsite that also counts as a span or event. + pub const fn hint(self) -> Self { + Self(self.0 | Self::HINT_BIT) + } +} + +impl fmt::Debug for Kind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("Kind(")?; + let mut has_bits = false; + let mut write_bit = |name: &str| { + if has_bits { + f.write_str(" | ")?; + } + f.write_str(name)?; + has_bits = true; + Ok(()) + }; + + if self.is_event() { + write_bit("EVENT")?; + } + + if self.is_span() { + write_bit("SPAN")?; + } + + if self.is_hint() { + write_bit("HINT")?; + } + + // if none of the expected bits were set, something is messed up, so + // just print the bits for debugging purposes + if !has_bits { + write!(f, "{:#b}", self.0)?; + } + + f.write_str(")") + } +} + +// ===== impl Level ===== + +impl Level { + /// The "error" level. + /// + /// Designates very serious errors. + pub const ERROR: Level = Level(LevelInner::Error); + /// The "warn" level. + /// + /// Designates hazardous situations. + pub const WARN: Level = Level(LevelInner::Warn); + /// The "info" level. + /// + /// Designates useful information. + pub const INFO: Level = Level(LevelInner::Info); + /// The "debug" level. + /// + /// Designates lower priority information. + pub const DEBUG: Level = Level(LevelInner::Debug); + /// The "trace" level. + /// + /// Designates very low priority, often extremely verbose, information. + pub const TRACE: Level = Level(LevelInner::Trace); + + /// Returns the string representation of the `Level`. + /// + /// This returns the same string as the `fmt::Display` implementation. + pub fn as_str(&self) -> &'static str { + match *self { + Level::TRACE => "TRACE", + Level::DEBUG => "DEBUG", + Level::INFO => "INFO", + Level::WARN => "WARN", + Level::ERROR => "ERROR", + } + } +} + +impl fmt::Display for Level { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Level::TRACE => f.pad("TRACE"), + Level::DEBUG => f.pad("DEBUG"), + Level::INFO => f.pad("INFO"), + Level::WARN => f.pad("WARN"), + Level::ERROR => f.pad("ERROR"), + } + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl crate::stdlib::error::Error for ParseLevelError {} + +impl FromStr for Level { + type Err = ParseLevelError; + fn from_str(s: &str) -> Result { + s.parse::() + .map_err(|_| ParseLevelError { _p: () }) + .and_then(|num| match num { + 1 => Ok(Level::ERROR), + 2 => Ok(Level::WARN), + 3 => Ok(Level::INFO), + 4 => Ok(Level::DEBUG), + 5 => Ok(Level::TRACE), + _ => Err(ParseLevelError { _p: () }), + }) + .or_else(|_| match s { + s if s.eq_ignore_ascii_case("error") => Ok(Level::ERROR), + s if s.eq_ignore_ascii_case("warn") => Ok(Level::WARN), + s if s.eq_ignore_ascii_case("info") => Ok(Level::INFO), + s if s.eq_ignore_ascii_case("debug") => Ok(Level::DEBUG), + s if s.eq_ignore_ascii_case("trace") => Ok(Level::TRACE), + _ => Err(ParseLevelError { _p: () }), + }) + } +} + +#[repr(usize)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +enum LevelInner { + /// The "trace" level. + /// + /// Designates very low priority, often extremely verbose, information. + Trace = 0, + /// The "debug" level. + /// + /// Designates lower priority information. + Debug = 1, + /// The "info" level. + /// + /// Designates useful information. + Info = 2, + /// The "warn" level. + /// + /// Designates hazardous situations. + Warn = 3, + /// The "error" level. + /// + /// Designates very serious errors. + Error = 4, +} + +// === impl LevelFilter === + +impl From for LevelFilter { + #[inline] + fn from(level: Level) -> Self { + Self::from_level(level) + } +} + +impl From> for LevelFilter { + #[inline] + fn from(level: Option) -> Self { + Self(level) + } +} + +impl From for Option { + #[inline] + fn from(filter: LevelFilter) -> Self { + filter.into_level() + } +} + +impl LevelFilter { + /// The "off" level. + /// + /// Designates that trace instrumentation should be completely disabled. + pub const OFF: LevelFilter = LevelFilter(None); + /// The "error" level. + /// + /// Designates very serious errors. + pub const ERROR: LevelFilter = LevelFilter::from_level(Level::ERROR); + /// The "warn" level. + /// + /// Designates hazardous situations. + pub const WARN: LevelFilter = LevelFilter::from_level(Level::WARN); + /// The "info" level. + /// + /// Designates useful information. + pub const INFO: LevelFilter = LevelFilter::from_level(Level::INFO); + /// The "debug" level. + /// + /// Designates lower priority information. + pub const DEBUG: LevelFilter = LevelFilter::from_level(Level::DEBUG); + /// The "trace" level. + /// + /// Designates very low priority, often extremely verbose, information. + pub const TRACE: LevelFilter = LevelFilter(Some(Level::TRACE)); + + /// Returns a `LevelFilter` that enables spans and events with verbosity up + /// to and including `level`. + pub const fn from_level(level: Level) -> Self { + Self(Some(level)) + } + + /// Returns the most verbose [`Level`] that this filter accepts, or `None` + /// if it is [`OFF`]. + /// + /// [`Level`]: super::Level + /// [`OFF`]: #associatedconstant.OFF + pub const fn into_level(self) -> Option { + self.0 + } + + // These consts are necessary because `as` casts are not allowed as + // match patterns. + const ERROR_USIZE: usize = LevelInner::Error as usize; + const WARN_USIZE: usize = LevelInner::Warn as usize; + const INFO_USIZE: usize = LevelInner::Info as usize; + const DEBUG_USIZE: usize = LevelInner::Debug as usize; + const TRACE_USIZE: usize = LevelInner::Trace as usize; + // Using the value of the last variant + 1 ensures that we match the value + // for `Option::None` as selected by the niche optimization for + // `LevelFilter`. If this is the case, converting a `usize` value into a + // `LevelFilter` (in `LevelFilter::current`) will be an identity conversion, + // rather than generating a lookup table. + const OFF_USIZE: usize = LevelInner::Error as usize + 1; + + /// Returns a `LevelFilter` that matches the most verbose [`Level`] that any + /// currently active [`Subscriber`] will enable. + /// + /// User code should treat this as a *hint*. If a given span or event has a + /// level *higher* than the returned `LevelFilter`, it will not be enabled. + /// However, if the level is less than or equal to this value, the span or + /// event is *not* guaranteed to be enabled; the subscriber will still + /// filter each callsite individually. + /// + /// Therefore, comparing a given span or event's level to the returned + /// `LevelFilter` **can** be used for determining if something is + /// *disabled*, but **should not** be used for determining if something is + /// *enabled*. + /// + /// [`Level`]: super::Level + /// [`Subscriber`]: super::Subscriber + #[inline(always)] + pub fn current() -> Self { + match MAX_LEVEL.load(Ordering::Relaxed) { + Self::ERROR_USIZE => Self::ERROR, + Self::WARN_USIZE => Self::WARN, + Self::INFO_USIZE => Self::INFO, + Self::DEBUG_USIZE => Self::DEBUG, + Self::TRACE_USIZE => Self::TRACE, + Self::OFF_USIZE => Self::OFF, + #[cfg(debug_assertions)] + unknown => unreachable!( + "/!\\ `LevelFilter` representation seems to have changed! /!\\ \n\ + This is a bug (and it's pretty bad). Please contact the `tracing` \ + maintainers. Thank you and I'm sorry.\n \ + The offending repr was: {:?}", + unknown, + ), + #[cfg(not(debug_assertions))] + _ => unsafe { + // Using `unreachable_unchecked` here (rather than + // `unreachable!()`) is necessary to ensure that rustc generates + // an identity conversion from integer -> discriminant, rather + // than generating a lookup table. We want to ensure this + // function is a single `mov` instruction (on x86) if at all + // possible, because it is called *every* time a span/event + // callsite is hit; and it is (potentially) the only code in the + // hottest path for skipping a majority of callsites when level + // filtering is in use. + // + // safety: This branch is only truly unreachable if we guarantee + // that no values other than the possible enum discriminants + // will *ever* be present. The `AtomicUsize` is initialized to + // the `OFF` value. It is only set by the `set_max` function, + // which takes a `LevelFilter` as a parameter. This restricts + // the inputs to `set_max` to the set of valid discriminants. + // Therefore, **as long as `MAX_VALUE` is only ever set by + // `set_max`**, this is safe. + crate::stdlib::hint::unreachable_unchecked() + }, + } + } + + pub(crate) fn set_max(LevelFilter(level): LevelFilter) { + let val = match level { + Some(Level(level)) => level as usize, + None => Self::OFF_USIZE, + }; + + // using an AcqRel swap ensures an ordered relationship of writes to the + // max level. + MAX_LEVEL.swap(val, Ordering::AcqRel); + } +} + +impl fmt::Display for LevelFilter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + LevelFilter::OFF => f.pad("off"), + LevelFilter::ERROR => f.pad("error"), + LevelFilter::WARN => f.pad("warn"), + LevelFilter::INFO => f.pad("info"), + LevelFilter::DEBUG => f.pad("debug"), + LevelFilter::TRACE => f.pad("trace"), + } + } +} + +impl fmt::Debug for LevelFilter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + LevelFilter::OFF => f.pad("LevelFilter::OFF"), + LevelFilter::ERROR => f.pad("LevelFilter::ERROR"), + LevelFilter::WARN => f.pad("LevelFilter::WARN"), + LevelFilter::INFO => f.pad("LevelFilter::INFO"), + LevelFilter::DEBUG => f.pad("LevelFilter::DEBUG"), + LevelFilter::TRACE => f.pad("LevelFilter::TRACE"), + } + } +} + +impl FromStr for LevelFilter { + type Err = ParseLevelFilterError; + fn from_str(from: &str) -> Result { + from.parse::() + .ok() + .and_then(|num| match num { + 0 => Some(LevelFilter::OFF), + 1 => Some(LevelFilter::ERROR), + 2 => Some(LevelFilter::WARN), + 3 => Some(LevelFilter::INFO), + 4 => Some(LevelFilter::DEBUG), + 5 => Some(LevelFilter::TRACE), + _ => None, + }) + .or_else(|| match from { + "" => Some(LevelFilter::ERROR), + s if s.eq_ignore_ascii_case("error") => Some(LevelFilter::ERROR), + s if s.eq_ignore_ascii_case("warn") => Some(LevelFilter::WARN), + s if s.eq_ignore_ascii_case("info") => Some(LevelFilter::INFO), + s if s.eq_ignore_ascii_case("debug") => Some(LevelFilter::DEBUG), + s if s.eq_ignore_ascii_case("trace") => Some(LevelFilter::TRACE), + s if s.eq_ignore_ascii_case("off") => Some(LevelFilter::OFF), + _ => None, + }) + .ok_or(ParseLevelFilterError(())) + } +} + +/// Returned if parsing a `Level` fails. +#[derive(Debug)] +pub struct ParseLevelError { + _p: (), +} + +impl fmt::Display for ParseLevelError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad( + "error parsing level: expected one of \"error\", \"warn\", \ + \"info\", \"debug\", \"trace\", or a number 1-5", + ) + } +} + +impl fmt::Display for ParseLevelFilterError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad( + "error parsing level filter: expected one of \"off\", \"error\", \ + \"warn\", \"info\", \"debug\", \"trace\", or a number 0-5", + ) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for ParseLevelFilterError {} + +// ==== Level and LevelFilter comparisons ==== + +// /!\ BIG, IMPORTANT WARNING /!\ +// Do NOT mess with these implementations! They are hand-written for a reason! +// +// Since comparing `Level`s and `LevelFilter`s happens in a *very* hot path +// (potentially, every time a span or event macro is hit, regardless of whether +// or not is enabled), we *need* to ensure that these comparisons are as fast as +// possible. Therefore, we have some requirements: +// +// 1. We want to do our best to ensure that rustc will generate integer-integer +// comparisons wherever possible. +// +// The derived `Ord`/`PartialOrd` impls for `LevelFilter` will not do this, +// because `LevelFilter`s are represented by `Option`, rather than as +// a separate `#[repr(usize)]` enum. This was (unfortunately) necessary for +// backwards-compatibility reasons, as the `tracing` crate's original +// version of `LevelFilter` defined `const fn` conversions between `Level`s +// and `LevelFilter`, so we're stuck with the `Option` repr. +// Therefore, we need hand-written `PartialOrd` impls that cast both sides of +// the comparison to `usize`s, to force the compiler to generate integer +// compares. +// +// 2. The hottest `Level`/`LevelFilter` comparison, the one that happens every +// time a callsite is hit, occurs *within the `tracing` crate's macros*. +// This means that the comparison is happening *inside* a crate that +// *depends* on `tracing-core`, not in `tracing-core` itself. The compiler +// will only inline function calls across crate boundaries if the called +// function is annotated with an `#[inline]` attribute, and we *definitely* +// want the comparison functions to be inlined: as previously mentioned, they +// should compile down to a single integer comparison on release builds, and +// it seems really sad to push an entire stack frame to call a function +// consisting of one `cmp` instruction! +// +// Therefore, we need to ensure that all the comparison methods have +// `#[inline]` or `#[inline(always)]` attributes. It's not sufficient to just +// add the attribute to `partial_cmp` in a manual implementation of the +// trait, since it's the comparison operators (`lt`, `le`, `gt`, and `ge`) +// that will actually be *used*, and the default implementation of *those* +// methods, which calls `partial_cmp`, does not have an inline annotation. +// +// 3. We need the comparisons to be inverted. The discriminants for the +// `LevelInner` enum are assigned in "backwards" order, with `TRACE` having +// the *lowest* value. However, we want `TRACE` to compare greater-than all +// other levels. +// +// Why are the numeric values inverted? In order to ensure that `LevelFilter` +// (which, as previously mentioned, *has* to be internally represented by an +// `Option`) compiles down to a single integer value. This is +// necessary for storing the global max in an `AtomicUsize`, and for ensuring +// that we use fast integer-integer comparisons, as mentioned previously. In +// order to ensure this, we exploit the niche optimization. The niche +// optimization for `Option<{enum with a numeric repr}>` will choose +// `(HIGHEST_DISCRIMINANT_VALUE + 1)` as the representation for `None`. +// Therefore, the integer representation of `LevelFilter::OFF` (which is +// `None`) will be the number 5. `OFF` must compare higher than every other +// level in order for it to filter as expected. Since we want to use a single +// `cmp` instruction, we can't special-case the integer value of `OFF` to +// compare higher, as that will generate more code. Instead, we need it to be +// on one end of the enum, with `ERROR` on the opposite end, so we assign the +// value 0 to `ERROR`. +// +// This *does* mean that when parsing `LevelFilter`s or `Level`s from +// `String`s, the integer values are inverted, but that doesn't happen in a +// hot path. +// +// Note that we manually invert the comparisons by swapping the left-hand and +// right-hand side. Using `Ordering::reverse` generates significantly worse +// code (per Matt Godbolt's Compiler Explorer). +// +// Anyway, that's a brief history of why this code is the way it is. Don't +// change it unless you know what you're doing. + +impl PartialEq for Level { + #[inline(always)] + fn eq(&self, other: &LevelFilter) -> bool { + self.0 as usize == filter_as_usize(&other.0) + } +} + +impl PartialOrd for Level { + #[inline(always)] + fn partial_cmp(&self, other: &Level) -> Option { + Some(self.cmp(other)) + } + + #[inline(always)] + fn lt(&self, other: &Level) -> bool { + (other.0 as usize) < (self.0 as usize) + } + + #[inline(always)] + fn le(&self, other: &Level) -> bool { + (other.0 as usize) <= (self.0 as usize) + } + + #[inline(always)] + fn gt(&self, other: &Level) -> bool { + (other.0 as usize) > (self.0 as usize) + } + + #[inline(always)] + fn ge(&self, other: &Level) -> bool { + (other.0 as usize) >= (self.0 as usize) + } +} + +impl Ord for Level { + #[inline(always)] + fn cmp(&self, other: &Self) -> cmp::Ordering { + (other.0 as usize).cmp(&(self.0 as usize)) + } +} + +impl PartialOrd for Level { + #[inline(always)] + fn partial_cmp(&self, other: &LevelFilter) -> Option { + Some(filter_as_usize(&other.0).cmp(&(self.0 as usize))) + } + + #[inline(always)] + fn lt(&self, other: &LevelFilter) -> bool { + filter_as_usize(&other.0) < (self.0 as usize) + } + + #[inline(always)] + fn le(&self, other: &LevelFilter) -> bool { + filter_as_usize(&other.0) <= (self.0 as usize) + } + + #[inline(always)] + fn gt(&self, other: &LevelFilter) -> bool { + filter_as_usize(&other.0) > (self.0 as usize) + } + + #[inline(always)] + fn ge(&self, other: &LevelFilter) -> bool { + filter_as_usize(&other.0) >= (self.0 as usize) + } +} + +#[inline(always)] +fn filter_as_usize(x: &Option) -> usize { + match x { + Some(Level(f)) => *f as usize, + None => LevelFilter::OFF_USIZE, + } +} + +impl PartialEq for LevelFilter { + #[inline(always)] + fn eq(&self, other: &Level) -> bool { + filter_as_usize(&self.0) == other.0 as usize + } +} + +impl PartialOrd for LevelFilter { + #[inline(always)] + fn partial_cmp(&self, other: &LevelFilter) -> Option { + Some(self.cmp(other)) + } + + #[inline(always)] + fn lt(&self, other: &LevelFilter) -> bool { + filter_as_usize(&other.0) < filter_as_usize(&self.0) + } + + #[inline(always)] + fn le(&self, other: &LevelFilter) -> bool { + filter_as_usize(&other.0) <= filter_as_usize(&self.0) + } + + #[inline(always)] + fn gt(&self, other: &LevelFilter) -> bool { + filter_as_usize(&other.0) > filter_as_usize(&self.0) + } + + #[inline(always)] + fn ge(&self, other: &LevelFilter) -> bool { + filter_as_usize(&other.0) >= filter_as_usize(&self.0) + } +} + +impl Ord for LevelFilter { + #[inline(always)] + fn cmp(&self, other: &Self) -> cmp::Ordering { + filter_as_usize(&other.0).cmp(&filter_as_usize(&self.0)) + } +} + +impl PartialOrd for LevelFilter { + #[inline(always)] + fn partial_cmp(&self, other: &Level) -> Option { + Some((other.0 as usize).cmp(&filter_as_usize(&self.0))) + } + + #[inline(always)] + fn lt(&self, other: &Level) -> bool { + (other.0 as usize) < filter_as_usize(&self.0) + } + + #[inline(always)] + fn le(&self, other: &Level) -> bool { + (other.0 as usize) <= filter_as_usize(&self.0) + } + + #[inline(always)] + fn gt(&self, other: &Level) -> bool { + (other.0 as usize) > filter_as_usize(&self.0) + } + + #[inline(always)] + fn ge(&self, other: &Level) -> bool { + (other.0 as usize) >= filter_as_usize(&self.0) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::stdlib::mem; + + #[test] + fn level_from_str() { + assert_eq!("error".parse::().unwrap(), Level::ERROR); + assert_eq!("4".parse::().unwrap(), Level::DEBUG); + assert!("0".parse::().is_err()) + } + + #[test] + fn filter_level_conversion() { + let mapping = [ + (LevelFilter::OFF, None), + (LevelFilter::ERROR, Some(Level::ERROR)), + (LevelFilter::WARN, Some(Level::WARN)), + (LevelFilter::INFO, Some(Level::INFO)), + (LevelFilter::DEBUG, Some(Level::DEBUG)), + (LevelFilter::TRACE, Some(Level::TRACE)), + ]; + for (filter, level) in mapping.iter() { + assert_eq!(filter.into_level(), *level); + match level { + Some(level) => { + let actual: LevelFilter = (*level).into(); + assert_eq!(actual, *filter); + } + None => { + let actual: LevelFilter = None.into(); + assert_eq!(actual, *filter); + } + } + } + } + + #[test] + fn level_filter_is_usize_sized() { + assert_eq!( + mem::size_of::(), + mem::size_of::(), + "`LevelFilter` is no longer `usize`-sized! global MAX_LEVEL may now be invalid!" + ) + } + + #[test] + fn level_filter_reprs() { + let mapping = [ + (LevelFilter::OFF, LevelInner::Error as usize + 1), + (LevelFilter::ERROR, LevelInner::Error as usize), + (LevelFilter::WARN, LevelInner::Warn as usize), + (LevelFilter::INFO, LevelInner::Info as usize), + (LevelFilter::DEBUG, LevelInner::Debug as usize), + (LevelFilter::TRACE, LevelInner::Trace as usize), + ]; + for &(filter, expected) in &mapping { + let repr = unsafe { + // safety: The entire purpose of this test is to assert that the + // actual repr matches what we expect it to be --- we're testing + // that *other* unsafe code is sound using the transmuted value. + // We're not going to do anything with it that might be unsound. + mem::transmute::(filter) + }; + assert_eq!(expected, repr, "repr changed for {:?}", filter) + } + } +} diff --git a/third_party/rust/tracing-core/src/parent.rs b/third_party/rust/tracing-core/src/parent.rs new file mode 100644 index 000000000000..cb34b376ccab --- /dev/null +++ b/third_party/rust/tracing-core/src/parent.rs @@ -0,0 +1,11 @@ +use crate::span::Id; + +#[derive(Debug)] +pub(crate) enum Parent { + /// The new span will be a root span. + Root, + /// The new span will be rooted in the current span. + Current, + /// The new span has an explicitly-specified parent. + Explicit(Id), +} diff --git a/third_party/rust/tracing-core/src/span.rs b/third_party/rust/tracing-core/src/span.rs new file mode 100644 index 000000000000..c600af4d99d9 --- /dev/null +++ b/third_party/rust/tracing-core/src/span.rs @@ -0,0 +1,334 @@ +//! Spans represent periods of time in the execution of a program. +use crate::field::FieldSet; +use crate::parent::Parent; +use crate::stdlib::num::NonZeroU64; +use crate::{field, Metadata}; + +/// Identifies a span within the context of a subscriber. +/// +/// They are generated by [`Subscriber`]s for each span as it is created, by +/// the [`new_span`] trait method. See the documentation for that method for +/// more information on span ID generation. +/// +/// [`Subscriber`]: super::subscriber::Subscriber +/// [`new_span`]: super::subscriber::Subscriber::new_span +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Id(NonZeroU64); + +/// Attributes provided to a `Subscriber` describing a new span when it is +/// created. +#[derive(Debug)] +pub struct Attributes<'a> { + metadata: &'static Metadata<'static>, + values: &'a field::ValueSet<'a>, + parent: Parent, +} + +/// A set of fields recorded by a span. +#[derive(Debug)] +pub struct Record<'a> { + values: &'a field::ValueSet<'a>, +} + +/// Indicates what [the `Subscriber` considers] the "current" span. +/// +/// As subscribers may not track a notion of a current span, this has three +/// possible states: +/// - "unknown", indicating that the subscriber does not track a current span, +/// - "none", indicating that the current context is known to not be in a span, +/// - "some", with the current span's [`Id`] and [`Metadata`]. +/// +/// [the `Subscriber` considers]: super::subscriber::Subscriber::current_span +/// [`Id`]: Id +/// [`Metadata`]: super::metadata::Metadata +#[derive(Debug)] +pub struct Current { + inner: CurrentInner, +} + +#[derive(Debug)] +enum CurrentInner { + Current { + id: Id, + metadata: &'static Metadata<'static>, + }, + None, + Unknown, +} + +// ===== impl Span ===== + +impl Id { + /// Constructs a new span ID from the given `u64`. + /// + ///
+    ///     Note: Span IDs must be greater than zero.
+    /// 
+ /// + /// # Panics + /// - If the provided `u64` is 0. + pub fn from_u64(u: u64) -> Self { + Id(NonZeroU64::new(u).expect("span IDs must be > 0")) + } + + /// Constructs a new span ID from the given `NonZeroU64`. + /// + /// Unlike [`Id::from_u64`](#method.from_u64), this will never panic. + #[inline] + pub const fn from_non_zero_u64(id: NonZeroU64) -> Self { + Id(id) + } + + // Allow `into` by-ref since we don't want to impl Copy for Id + #[allow(clippy::wrong_self_convention)] + /// Returns the span's ID as a `u64`. + pub fn into_u64(&self) -> u64 { + self.0.get() + } + + // Allow `into` by-ref since we don't want to impl Copy for Id + #[allow(clippy::wrong_self_convention)] + /// Returns the span's ID as a `NonZeroU64`. + #[inline] + pub const fn into_non_zero_u64(&self) -> NonZeroU64 { + self.0 + } +} + +impl<'a> From<&'a Id> for Option { + fn from(id: &'a Id) -> Self { + Some(id.clone()) + } +} + +// ===== impl Attributes ===== + +impl<'a> Attributes<'a> { + /// Returns `Attributes` describing a new child span of the current span, + /// with the provided metadata and values. + pub fn new(metadata: &'static Metadata<'static>, values: &'a field::ValueSet<'a>) -> Self { + Attributes { + metadata, + values, + parent: Parent::Current, + } + } + + /// Returns `Attributes` describing a new span at the root of its own trace + /// tree, with the provided metadata and values. + pub fn new_root(metadata: &'static Metadata<'static>, values: &'a field::ValueSet<'a>) -> Self { + Attributes { + metadata, + values, + parent: Parent::Root, + } + } + + /// Returns `Attributes` describing a new child span of the specified + /// parent span, with the provided metadata and values. + pub fn child_of( + parent: Id, + metadata: &'static Metadata<'static>, + values: &'a field::ValueSet<'a>, + ) -> Self { + Attributes { + metadata, + values, + parent: Parent::Explicit(parent), + } + } + + /// Returns a reference to the new span's metadata. + pub fn metadata(&self) -> &'static Metadata<'static> { + self.metadata + } + + /// Returns a reference to a `ValueSet` containing any values the new span + /// was created with. + pub fn values(&self) -> &field::ValueSet<'a> { + self.values + } + + /// Returns true if the new span should be a root. + pub fn is_root(&self) -> bool { + matches!(self.parent, Parent::Root) + } + + /// Returns true if the new span's parent should be determined based on the + /// current context. + /// + /// If this is true and the current thread is currently inside a span, then + /// that span should be the new span's parent. Otherwise, if the current + /// thread is _not_ inside a span, then the new span will be the root of its + /// own trace tree. + pub fn is_contextual(&self) -> bool { + matches!(self.parent, Parent::Current) + } + + /// Returns the new span's explicitly-specified parent, if there is one. + /// + /// Otherwise (if the new span is a root or is a child of the current span), + /// returns `None`. + pub fn parent(&self) -> Option<&Id> { + match self.parent { + Parent::Explicit(ref p) => Some(p), + _ => None, + } + } + + /// Records all the fields in this set of `Attributes` with the provided + /// [Visitor]. + /// + /// [visitor]: super::field::Visit + pub fn record(&self, visitor: &mut dyn field::Visit) { + self.values.record(visitor) + } + + /// Returns `true` if this set of `Attributes` contains a value for the + /// given `Field`. + pub fn contains(&self, field: &field::Field) -> bool { + self.values.contains(field) + } + + /// Returns true if this set of `Attributes` contains _no_ values. + pub fn is_empty(&self) -> bool { + self.values.is_empty() + } + + /// Returns the set of all [fields] defined by this span's [`Metadata`]. + /// + /// Note that the [`FieldSet`] returned by this method includes *all* the + /// fields declared by this span, not just those with values that are recorded + /// as part of this set of `Attributes`. Other fields with values not present in + /// this `Attributes`' value set may [record] values later. + /// + /// [fields]: crate::field + /// [record]: Attributes::record() + /// [`Metadata`]: crate::metadata::Metadata + /// [`FieldSet`]: crate::field::FieldSet + pub fn fields(&self) -> &FieldSet { + self.values.field_set() + } +} + +// ===== impl Record ===== + +impl<'a> Record<'a> { + /// Constructs a new `Record` from a `ValueSet`. + pub fn new(values: &'a field::ValueSet<'a>) -> Self { + Self { values } + } + + /// Records all the fields in this `Record` with the provided [Visitor]. + /// + /// [visitor]: super::field::Visit + pub fn record(&self, visitor: &mut dyn field::Visit) { + self.values.record(visitor) + } + + /// Returns `true` if this `Record` contains a value for the given `Field`. + pub fn contains(&self, field: &field::Field) -> bool { + self.values.contains(field) + } + + /// Returns true if this `Record` contains _no_ values. + pub fn is_empty(&self) -> bool { + self.values.is_empty() + } +} + +// ===== impl Current ===== + +impl Current { + /// Constructs a new `Current` that indicates the current context is a span + /// with the given `metadata` and `metadata`. + pub fn new(id: Id, metadata: &'static Metadata<'static>) -> Self { + Self { + inner: CurrentInner::Current { id, metadata }, + } + } + + /// Constructs a new `Current` that indicates the current context is *not* + /// in a span. + pub fn none() -> Self { + Self { + inner: CurrentInner::None, + } + } + + /// Constructs a new `Current` that indicates the `Subscriber` does not + /// track a current span. + pub(crate) fn unknown() -> Self { + Self { + inner: CurrentInner::Unknown, + } + } + + /// Returns `true` if the `Subscriber` that constructed this `Current` tracks a + /// current span. + /// + /// If this returns `true` and [`id`], [`metadata`], or [`into_inner`] + /// return `None`, that indicates that we are currently known to *not* be + /// inside a span. If this returns `false`, those methods will also return + /// `None`, but in this case, that is because the subscriber does not keep + /// track of the currently-entered span. + /// + /// [`id`]: #method.id + /// [`metadata`]: #method.metadata + /// [`into_inner`]: #method.into_inner + pub fn is_known(&self) -> bool { + !matches!(self.inner, CurrentInner::Unknown) + } + + /// Consumes `self` and returns the span `Id` and `Metadata` of the current + /// span, if one exists and is known. + pub fn into_inner(self) -> Option<(Id, &'static Metadata<'static>)> { + match self.inner { + CurrentInner::Current { id, metadata } => Some((id, metadata)), + _ => None, + } + } + + /// Borrows the `Id` of the current span, if one exists and is known. + pub fn id(&self) -> Option<&Id> { + match self.inner { + CurrentInner::Current { ref id, .. } => Some(id), + _ => None, + } + } + + /// Borrows the `Metadata` of the current span, if one exists and is known. + pub fn metadata(&self) -> Option<&'static Metadata<'static>> { + match self.inner { + CurrentInner::Current { metadata, .. } => Some(metadata), + _ => None, + } + } +} + +impl<'a> From<&'a Current> for Option<&'a Id> { + fn from(cur: &'a Current) -> Self { + cur.id() + } +} + +impl<'a> From<&'a Current> for Option { + fn from(cur: &'a Current) -> Self { + cur.id().cloned() + } +} + +impl From for Option { + fn from(cur: Current) -> Self { + match cur.inner { + CurrentInner::Current { id, .. } => Some(id), + _ => None, + } + } +} + +impl<'a> From<&'a Current> for Option<&'static Metadata<'static>> { + fn from(cur: &'a Current) -> Self { + cur.metadata() + } +} diff --git a/third_party/rust/urlencoding/LICENSE b/third_party/rust/tracing-core/src/spin/LICENSE similarity index 86% rename from third_party/rust/urlencoding/LICENSE rename to third_party/rust/tracing-core/src/spin/LICENSE index fb9900014e8c..84d5f4d7aff4 100644 --- a/third_party/rust/urlencoding/LICENSE +++ b/third_party/rust/tracing-core/src/spin/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2016 Bertram Truong +The MIT License (MIT) + +Copyright (c) 2014 Mathijs van de Nes Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -7,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/rust/tracing-core/src/spin/mod.rs b/third_party/rust/tracing-core/src/spin/mod.rs new file mode 100644 index 000000000000..148b192b34d6 --- /dev/null +++ b/third_party/rust/tracing-core/src/spin/mod.rs @@ -0,0 +1,7 @@ +//! Synchronization primitives based on spinning + +pub(crate) use mutex::*; +pub(crate) use once::Once; + +mod mutex; +mod once; diff --git a/third_party/rust/tracing-core/src/spin/mutex.rs b/third_party/rust/tracing-core/src/spin/mutex.rs new file mode 100644 index 000000000000..c261a619108a --- /dev/null +++ b/third_party/rust/tracing-core/src/spin/mutex.rs @@ -0,0 +1,118 @@ +use core::cell::UnsafeCell; +use core::default::Default; +use core::fmt; +use core::hint; +use core::marker::Sync; +use core::ops::{Deref, DerefMut, Drop}; +use core::option::Option::{self, None, Some}; +use core::sync::atomic::{AtomicBool, Ordering}; + +/// This type provides MUTual EXclusion based on spinning. +pub(crate) struct Mutex { + lock: AtomicBool, + data: UnsafeCell, +} + +/// A guard to which the protected data can be accessed +/// +/// When the guard falls out of scope it will release the lock. +#[derive(Debug)] +pub(crate) struct MutexGuard<'a, T: ?Sized> { + lock: &'a AtomicBool, + data: &'a mut T, +} + +// Same unsafe impls as `std::sync::Mutex` +unsafe impl Sync for Mutex {} +unsafe impl Send for Mutex {} + +impl Mutex { + /// Creates a new spinlock wrapping the supplied data. + pub(crate) const fn new(user_data: T) -> Mutex { + Mutex { + lock: AtomicBool::new(false), + data: UnsafeCell::new(user_data), + } + } +} + +impl Mutex { + fn obtain_lock(&self) { + while self + .lock + .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_err() + { + // Wait until the lock looks unlocked before retrying + while self.lock.load(Ordering::Relaxed) { + hint::spin_loop(); + } + } + } + + /// Locks the spinlock and returns a guard. + /// + /// The returned value may be dereferenced for data access + /// and the lock will be dropped when the guard falls out of scope. + pub(crate) fn lock(&self) -> MutexGuard<'_, T> { + self.obtain_lock(); + MutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + } + } + + /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns + /// a guard within Some. + pub(crate) fn try_lock(&self) -> Option> { + if self + .lock + .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + Some(MutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + }) + } else { + None + } + } +} + +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.try_lock() { + Some(guard) => write!(f, "Mutex {{ data: ") + .and_then(|()| (&*guard).fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Mutex {{ }}"), + } + } +} + +impl Default for Mutex { + fn default() -> Mutex { + Mutex::new(Default::default()) + } +} + +impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { + type Target = T; + fn deref<'b>(&'b self) -> &'b T { + &*self.data + } +} + +impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { + fn deref_mut<'b>(&'b mut self) -> &'b mut T { + &mut *self.data + } +} + +impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { + /// The dropping of the MutexGuard will release the lock it was created from. + fn drop(&mut self) { + self.lock.store(false, Ordering::Release); + } +} diff --git a/third_party/rust/tracing-core/src/spin/once.rs b/third_party/rust/tracing-core/src/spin/once.rs new file mode 100644 index 000000000000..27c99e56eeb4 --- /dev/null +++ b/third_party/rust/tracing-core/src/spin/once.rs @@ -0,0 +1,158 @@ +use core::cell::UnsafeCell; +use core::fmt; +use core::hint::spin_loop; +use core::sync::atomic::{AtomicUsize, Ordering}; + +/// A synchronization primitive which can be used to run a one-time global +/// initialization. Unlike its std equivalent, this is generalized so that the +/// closure returns a value and it is stored. Once therefore acts something like +/// a future, too. +pub struct Once { + state: AtomicUsize, + data: UnsafeCell>, // TODO remove option and use mem::uninitialized +} + +impl fmt::Debug for Once { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.r#try() { + Some(s) => write!(f, "Once {{ data: ") + .and_then(|()| s.fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Once {{ }}"), + } + } +} + +// Same unsafe impls as `std::sync::RwLock`, because this also allows for +// concurrent reads. +unsafe impl Sync for Once {} +unsafe impl Send for Once {} + +// Four states that a Once can be in, encoded into the lower bits of `state` in +// the Once structure. +const INCOMPLETE: usize = 0x0; +const RUNNING: usize = 0x1; +const COMPLETE: usize = 0x2; +const PANICKED: usize = 0x3; + +use core::hint::unreachable_unchecked as unreachable; + +impl Once { + /// Initialization constant of `Once`. + pub const INIT: Self = Once { + state: AtomicUsize::new(INCOMPLETE), + data: UnsafeCell::new(None), + }; + + /// Creates a new `Once` value. + pub const fn new() -> Once { + Self::INIT + } + + fn force_get<'a>(&'a self) -> &'a T { + match unsafe { &*self.data.get() }.as_ref() { + None => unsafe { unreachable() }, + Some(p) => p, + } + } + + /// Performs an initialization routine once and only once. The given closure + /// will be executed if this is the first time `call_once` has been called, + /// and otherwise the routine will *not* be invoked. + /// + /// This method will block the calling thread if another initialization + /// routine is currently running. + /// + /// When this function returns, it is guaranteed that some initialization + /// has run and completed (it may not be the closure specified). The + /// returned pointer will point to the result from the closure that was + /// run. + pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T + where + F: FnOnce() -> T, + { + let mut status = self.state.load(Ordering::SeqCst); + + if status == INCOMPLETE { + status = match self.state.compare_exchange( + INCOMPLETE, + RUNNING, + Ordering::SeqCst, + Ordering::SeqCst, + ) { + Ok(status) => { + debug_assert_eq!( + status, INCOMPLETE, + "if compare_exchange succeeded, previous status must be incomplete", + ); + // We init + // We use a guard (Finish) to catch panics caused by builder + let mut finish = Finish { + state: &self.state, + panicked: true, + }; + unsafe { *self.data.get() = Some(builder()) }; + finish.panicked = false; + + self.state.store(COMPLETE, Ordering::SeqCst); + + // This next line is strictly an optimization + return self.force_get(); + } + Err(status) => status, + } + } + + loop { + match status { + INCOMPLETE => unreachable!(), + RUNNING => { + // We spin + spin_loop(); + status = self.state.load(Ordering::SeqCst) + } + PANICKED => panic!("Once has panicked"), + COMPLETE => return self.force_get(), + _ => unsafe { unreachable() }, + } + } + } + + /// Returns a pointer iff the `Once` was previously initialized + pub fn r#try<'a>(&'a self) -> Option<&'a T> { + match self.state.load(Ordering::SeqCst) { + COMPLETE => Some(self.force_get()), + _ => None, + } + } + + /// Like try, but will spin if the `Once` is in the process of being + /// initialized + pub fn wait<'a>(&'a self) -> Option<&'a T> { + loop { + match self.state.load(Ordering::SeqCst) { + INCOMPLETE => return None, + + RUNNING => { + spin_loop() // We spin + } + COMPLETE => return Some(self.force_get()), + PANICKED => panic!("Once has panicked"), + _ => unsafe { unreachable() }, + } + } + } +} + +struct Finish<'a> { + state: &'a AtomicUsize, + panicked: bool, +} + +impl<'a> Drop for Finish<'a> { + fn drop(&mut self) { + if self.panicked { + self.state.store(PANICKED, Ordering::SeqCst); + } + } +} diff --git a/third_party/rust/tracing-core/src/stdlib.rs b/third_party/rust/tracing-core/src/stdlib.rs new file mode 100644 index 000000000000..4a1c17c2b82b --- /dev/null +++ b/third_party/rust/tracing-core/src/stdlib.rs @@ -0,0 +1,78 @@ +//! Re-exports either the Rust `std` library or `core` and `alloc` when `std` is +//! disabled. +//! +//! `crate::stdlib::...` should be used rather than `std::` when adding code that +//! will be available with the standard library disabled. +//! +//! Note that this module is called `stdlib` rather than `std`, as Rust 1.34.0 +//! does not permit redefining the name `stdlib` (although this works on the +//! latest stable Rust). +#[cfg(feature = "std")] +pub(crate) use std::*; + +#[cfg(not(feature = "std"))] +pub(crate) use self::no_std::*; + +#[cfg(not(feature = "std"))] +mod no_std { + // We pre-emptively export everything from libcore/liballoc, (even modules + // we aren't using currently) to make adding new code easier. Therefore, + // some of these imports will be unused. + #![allow(unused_imports)] + + pub(crate) use core::{ + any, array, ascii, cell, char, clone, cmp, convert, default, f32, f64, ffi, future, hash, + hint, i128, i16, i8, isize, iter, marker, mem, num, ops, option, pin, ptr, result, task, + time, u128, u16, u32, u8, usize, + }; + + pub(crate) use alloc::{boxed, collections, rc, string, vec}; + + pub(crate) mod borrow { + pub(crate) use alloc::borrow::*; + pub(crate) use core::borrow::*; + } + + pub(crate) mod fmt { + pub(crate) use alloc::fmt::*; + pub(crate) use core::fmt::*; + } + + pub(crate) mod slice { + pub(crate) use alloc::slice::*; + pub(crate) use core::slice::*; + } + + pub(crate) mod str { + pub(crate) use alloc::str::*; + pub(crate) use core::str::*; + } + + pub(crate) mod sync { + pub(crate) use crate::spin::MutexGuard; + pub(crate) use alloc::sync::*; + pub(crate) use core::sync::*; + + /// This wraps `spin::Mutex` to return a `Result`, so that it can be + /// used with code written against `std::sync::Mutex`. + /// + /// Since `spin::Mutex` doesn't support poisoning, the `Result` returned + /// by `lock` will always be `Ok`. + #[derive(Debug, Default)] + pub(crate) struct Mutex { + inner: crate::spin::Mutex, + } + + impl Mutex { + pub(crate) fn new(data: T) -> Self { + Self { + inner: crate::spin::Mutex::new(data), + } + } + + pub(crate) fn lock(&self) -> Result, ()> { + Ok(self.inner.lock()) + } + } + } +} diff --git a/third_party/rust/tracing-core/src/subscriber.rs b/third_party/rust/tracing-core/src/subscriber.rs new file mode 100644 index 000000000000..d138fcb5b4bc --- /dev/null +++ b/third_party/rust/tracing-core/src/subscriber.rs @@ -0,0 +1,750 @@ +//! Subscribers collect and record trace data. +use crate::{span, Event, LevelFilter, Metadata}; + +use crate::stdlib::{ + any::{Any, TypeId}, + boxed::Box, + sync::Arc, +}; + +/// Trait representing the functions required to collect trace data. +/// +/// Crates that provide implementations of methods for collecting or recording +/// trace data should implement the `Subscriber` interface. This trait is +/// intended to represent fundamental primitives for collecting trace events and +/// spans — other libraries may offer utility functions and types to make +/// subscriber implementations more modular or improve the ergonomics of writing +/// subscribers. +/// +/// A subscriber is responsible for the following: +/// - Registering new spans as they are created, and providing them with span +/// IDs. Implicitly, this means the subscriber may determine the strategy for +/// determining span equality. +/// - Recording the attachment of field values and follows-from annotations to +/// spans. +/// - Filtering spans and events, and determining when those filters must be +/// invalidated. +/// - Observing spans as they are entered, exited, and closed, and events as +/// they occur. +/// +/// When a span is entered or exited, the subscriber is provided only with the +/// [ID] with which it tagged that span when it was created. This means +/// that it is up to the subscriber to determine whether and how span _data_ — +/// the fields and metadata describing the span — should be stored. The +/// [`new_span`] function is called when a new span is created, and at that +/// point, the subscriber _may_ choose to store the associated data if it will +/// be referenced again. However, if the data has already been recorded and will +/// not be needed by the implementations of `enter` and `exit`, the subscriber +/// may freely discard that data without allocating space to store it. +/// +/// ## Overriding default impls +/// +/// Some trait methods on `Subscriber` have default implementations, either in +/// order to reduce the surface area of implementing `Subscriber`, or for +/// backward-compatibility reasons. However, many subscribers will likely want +/// to override these default implementations. +/// +/// The following methods are likely of interest: +/// +/// - [`register_callsite`] is called once for each callsite from which a span +/// event may originate, and returns an [`Interest`] value describing whether or +/// not the subscriber wishes to see events or spans from that callsite. By +/// default, it calls [`enabled`], and returns `Interest::always()` if +/// `enabled` returns true, or `Interest::never()` if enabled returns false. +/// However, if the subscriber's interest can change dynamically at runtime, +/// it may want to override this function to return `Interest::sometimes()`. +/// Additionally, subscribers which wish to perform a behaviour once for each +/// callsite, such as allocating storage for data related to that callsite, +/// can perform it in `register_callsite`. +/// - [`clone_span`] is called every time a span ID is cloned, and [`try_close`] +/// is called when a span ID is dropped. By default, these functions do +/// nothing. However, they can be used to implement reference counting for +/// spans, allowing subscribers to free storage for span data and to determine +/// when a span has _closed_ permanently (rather than being exited). +/// Subscribers which store per-span data or which need to track span closures +/// should override these functions together. +/// +/// [ID]: super::span::Id +/// [`new_span`]: Subscriber::new_span +/// [`register_callsite`]: Subscriber::register_callsite +/// [`Interest`]: Interest +/// [`enabled`]: Subscriber::enabled +/// [`clone_span`]: Subscriber::clone_span +/// [`try_close`]: Subscriber::try_close +pub trait Subscriber: 'static { + // === Span registry methods ============================================== + + /// Registers a new callsite with this subscriber, returning whether or not + /// the subscriber is interested in being notified about the callsite. + /// + /// By default, this function assumes that the subscriber's [filter] + /// represents an unchanging view of its interest in the callsite. However, + /// if this is not the case, subscribers may override this function to + /// indicate different interests, or to implement behaviour that should run + /// once for every callsite. + /// + /// This function is guaranteed to be called at least once per callsite on + /// every active subscriber. The subscriber may store the keys to fields it + /// cares about in order to reduce the cost of accessing fields by name, + /// preallocate storage for that callsite, or perform any other actions it + /// wishes to perform once for each callsite. + /// + /// The subscriber should then return an [`Interest`], indicating + /// whether it is interested in being notified about that callsite in the + /// future. This may be `Always` indicating that the subscriber always + /// wishes to be notified about the callsite, and its filter need not be + /// re-evaluated; `Sometimes`, indicating that the subscriber may sometimes + /// care about the callsite but not always (such as when sampling), or + /// `Never`, indicating that the subscriber never wishes to be notified about + /// that callsite. If all active subscribers return `Never`, a callsite will + /// never be enabled unless a new subscriber expresses interest in it. + /// + /// `Subscriber`s which require their filters to be run every time an event + /// occurs or a span is entered/exited should return `Interest::sometimes`. + /// If a subscriber returns `Interest::sometimes`, then its [`enabled`] method + /// will be called every time an event or span is created from that callsite. + /// + /// For example, suppose a sampling subscriber is implemented by + /// incrementing a counter every time `enabled` is called and only returning + /// `true` when the counter is divisible by a specified sampling rate. If + /// that subscriber returns `Interest::always` from `register_callsite`, then + /// the filter will not be re-evaluated once it has been applied to a given + /// set of metadata. Thus, the counter will not be incremented, and the span + /// or event that corresponds to the metadata will never be `enabled`. + /// + /// `Subscriber`s that need to change their filters occasionally should call + /// [`rebuild_interest_cache`] to re-evaluate `register_callsite` for all + /// callsites. + /// + /// Similarly, if a `Subscriber` has a filtering strategy that can be + /// changed dynamically at runtime, it would need to re-evaluate that filter + /// if the cached results have changed. + /// + /// A subscriber which manages fanout to multiple other subscribers + /// should proxy this decision to all of its child subscribers, + /// returning `Interest::never` only if _all_ such children return + /// `Interest::never`. If the set of subscribers to which spans are + /// broadcast may change dynamically, the subscriber should also never + /// return `Interest::Never`, as a new subscriber may be added that _is_ + /// interested. + /// + /// # Notes + /// This function may be called again when a new subscriber is created or + /// when the registry is invalidated. + /// + /// If a subscriber returns `Interest::never` for a particular callsite, it + /// _may_ still see spans and events originating from that callsite, if + /// another subscriber expressed interest in it. + /// + /// [filter]: #method.enabled + /// [metadata]: super::metadata::Metadata + /// [`Interest`]: Interest + /// [`enabled`]: #method.enabled + /// [`rebuild_interest_cache`]: super::callsite::rebuild_interest_cache + fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest { + if self.enabled(metadata) { + Interest::always() + } else { + Interest::never() + } + } + + /// Returns true if a span or event with the specified [metadata] would be + /// recorded. + /// + /// By default, it is assumed that this filter needs only be evaluated once + /// for each callsite, so it is called by [`register_callsite`] when each + /// callsite is registered. The result is used to determine if the subscriber + /// is always [interested] or never interested in that callsite. This is intended + /// primarily as an optimization, so that expensive filters (such as those + /// involving string search, et cetera) need not be re-evaluated. + /// + /// However, if the subscriber's interest in a particular span or event may + /// change, or depends on contexts only determined dynamically at runtime, + /// then the `register_callsite` method should be overridden to return + /// [`Interest::sometimes`]. In that case, this function will be called every + /// time that span or event occurs. + /// + /// [metadata]: super::metadata::Metadata + /// [interested]: Interest + /// [`Interest::sometimes`]: Interest::sometimes + /// [`register_callsite`]: #method.register_callsite + fn enabled(&self, metadata: &Metadata<'_>) -> bool; + + /// Returns the highest [verbosity level][level] that this `Subscriber` will + /// enable, or `None`, if the subscriber does not implement level-based + /// filtering or chooses not to implement this method. + /// + /// If this method returns a [`Level`][level], it will be used as a hint to + /// determine the most verbose level that will be enabled. This will allow + /// spans and events which are more verbose than that level to be skipped + /// more efficiently. Subscribers which perform filtering are strongly + /// encouraged to provide an implementation of this method. + /// + /// If the maximum level the subscriber will enable can change over the + /// course of its lifetime, it is free to return a different value from + /// multiple invocations of this method. However, note that changes in the + /// maximum level will **only** be reflected after the callsite [`Interest`] + /// cache is rebuilt, by calling the [`callsite::rebuild_interest_cache`][rebuild] + /// function. Therefore, if the subscriber will change the value returned by + /// this method, it is responsible for ensuring that + /// [`rebuild_interest_cache`][rebuild] is called after the value of the max + /// level changes. + /// + /// [level]: super::Level + /// [`Interest`]: Interest + /// [rebuild]: super::callsite::rebuild_interest_cache + fn max_level_hint(&self) -> Option { + None + } + + /// Visit the construction of a new span, returning a new [span ID] for the + /// span being constructed. + /// + /// The provided [`Attributes`] contains any field values that were provided + /// when the span was created. The subscriber may pass a [visitor] to the + /// `Attributes`' [`record` method] to record these values. + /// + /// IDs are used to uniquely identify spans and events within the context of a + /// subscriber, so span equality will be based on the returned ID. Thus, if + /// the subscriber wishes for all spans with the same metadata to be + /// considered equal, it should return the same ID every time it is given a + /// particular set of metadata. Similarly, if it wishes for two separate + /// instances of a span with the same metadata to *not* be equal, it should + /// return a distinct ID every time this function is called, regardless of + /// the metadata. + /// + /// Note that the subscriber is free to assign span IDs based on whatever + /// scheme it sees fit. Any guarantees about uniqueness, ordering, or ID + /// reuse are left up to the subscriber implementation to determine. + /// + /// [span ID]: super::span::Id + /// [`Attributes`]: super::span::Attributes + /// [visitor]: super::field::Visit + /// [`record` method]: super::span::Attributes::record + fn new_span(&self, span: &span::Attributes<'_>) -> span::Id; + + // === Notification methods =============================================== + + /// Record a set of values on a span. + /// + /// This method will be invoked when value is recorded on a span. + /// Recording multiple values for the same field is possible, + /// but the actual behaviour is defined by the subscriber implementation. + /// + /// Keep in mind that a span might not provide a value + /// for each field it declares. + /// + /// The subscriber is expected to provide a [visitor] to the `Record`'s + /// [`record` method] in order to record the added values. + /// + /// # Example + /// "foo = 3" will be recorded when [`record`] is called on the + /// `Attributes` passed to `new_span`. + /// Since values are not provided for the `bar` and `baz` fields, + /// the span's `Metadata` will indicate that it _has_ those fields, + /// but values for them won't be recorded at this time. + /// + /// ```rust,ignore + /// # use tracing::span; + /// + /// let mut span = span!("my_span", foo = 3, bar, baz); + /// + /// // `Subscriber::record` will be called with a `Record` + /// // containing "bar = false" + /// span.record("bar", &false); + /// + /// // `Subscriber::record` will be called with a `Record` + /// // containing "baz = "a string"" + /// span.record("baz", &"a string"); + /// ``` + /// + /// [visitor]: super::field::Visit + /// [`record`]: super::span::Attributes::record + /// [`record` method]: super::span::Record::record + fn record(&self, span: &span::Id, values: &span::Record<'_>); + + /// Adds an indication that `span` follows from the span with the id + /// `follows`. + /// + /// This relationship differs somewhat from the parent-child relationship: a + /// span may have any number of prior spans, rather than a single one; and + /// spans are not considered to be executing _inside_ of the spans they + /// follow from. This means that a span may close even if subsequent spans + /// that follow from it are still open, and time spent inside of a + /// subsequent span should not be included in the time its precedents were + /// executing. This is used to model causal relationships such as when a + /// single future spawns several related background tasks, et cetera. + /// + /// If the subscriber has spans corresponding to the given IDs, it should + /// record this relationship in whatever way it deems necessary. Otherwise, + /// if one or both of the given span IDs do not correspond to spans that the + /// subscriber knows about, or if a cyclical relationship would be created + /// (i.e., some span _a_ which proceeds some other span _b_ may not also + /// follow from _b_), it may silently do nothing. + fn record_follows_from(&self, span: &span::Id, follows: &span::Id); + + /// Records that an [`Event`] has occurred. + /// + /// This method will be invoked when an Event is constructed by + /// the `Event`'s [`dispatch` method]. For example, this happens internally + /// when an event macro from `tracing` is called. + /// + /// The key difference between this method and `record` is that `record` is + /// called when a value is recorded for a field defined by a span, + /// while `event` is called when a new event occurs. + /// + /// The provided `Event` struct contains any field values attached to the + /// event. The subscriber may pass a [visitor] to the `Event`'s + /// [`record` method] to record these values. + /// + /// [`Event`]: super::event::Event + /// [visitor]: super::field::Visit + /// [`record` method]: super::event::Event::record + /// [`dispatch` method]: super::event::Event::dispatch + fn event(&self, event: &Event<'_>); + + /// Records that a span has been entered. + /// + /// When entering a span, this method is called to notify the subscriber + /// that the span has been entered. The subscriber is provided with the + /// [span ID] of the entered span, and should update any internal state + /// tracking the current span accordingly. + /// + /// [span ID]: super::span::Id + fn enter(&self, span: &span::Id); + + /// Records that a span has been exited. + /// + /// When exiting a span, this method is called to notify the subscriber + /// that the span has been exited. The subscriber is provided with the + /// [span ID] of the exited span, and should update any internal state + /// tracking the current span accordingly. + /// + /// Exiting a span does not imply that the span will not be re-entered. + /// + /// [span ID]: super::span::Id + fn exit(&self, span: &span::Id); + + /// Notifies the subscriber that a [span ID] has been cloned. + /// + /// This function is guaranteed to only be called with span IDs that were + /// returned by this subscriber's `new_span` function. + /// + /// Note that the default implementation of this function this is just the + /// identity function, passing through the identifier. However, it can be + /// used in conjunction with [`try_close`] to track the number of handles + /// capable of `enter`ing a span. When all the handles have been dropped + /// (i.e., `try_close` has been called one more time than `clone_span` for a + /// given ID), the subscriber may assume that the span will not be entered + /// again. It is then free to deallocate storage for data associated with + /// that span, write data from that span to IO, and so on. + /// + /// For more unsafe situations, however, if `id` is itself a pointer of some + /// kind this can be used as a hook to "clone" the pointer, depending on + /// what that means for the specified pointer. + /// + /// [span ID]: super::span::Id + /// [`try_close`]: Subscriber::try_close + fn clone_span(&self, id: &span::Id) -> span::Id { + id.clone() + } + + /// **This method is deprecated.** + /// + /// Using `drop_span` may result in subscribers composed using + /// `tracing-subscriber` crate's `Layer` trait from observing close events. + /// Use [`try_close`] instead. + /// + /// The default implementation of this function does nothing. + /// + /// [`try_close`]: Subscriber::try_close + #[deprecated(since = "0.1.2", note = "use `Subscriber::try_close` instead")] + fn drop_span(&self, _id: span::Id) {} + + /// Notifies the subscriber that a [span ID] has been dropped, and returns + /// `true` if there are now 0 IDs that refer to that span. + /// + /// Higher-level libraries providing functionality for composing multiple + /// subscriber implementations may use this return value to notify any + /// "layered" subscribers that this subscriber considers the span closed. + /// + /// The default implementation of this method calls the subscriber's + /// [`drop_span`] method and returns `false`. This means that, unless the + /// subscriber overrides the default implementation, close notifications + /// will never be sent to any layered subscribers. In general, if the + /// subscriber tracks reference counts, this method should be implemented, + /// rather than `drop_span`. + /// + /// This function is guaranteed to only be called with span IDs that were + /// returned by this subscriber's `new_span` function. + /// + /// It's guaranteed that if this function has been called once more than the + /// number of times `clone_span` was called with the same `id`, then no more + /// handles that can enter the span with that `id` exist. This means that it + /// can be used in conjunction with [`clone_span`] to track the number of + /// handles capable of `enter`ing a span. When all the handles have been + /// dropped (i.e., `try_close` has been called one more time than + /// `clone_span` for a given ID), the subscriber may assume that the span + /// will not be entered again, and should return `true`. It is then free to + /// deallocate storage for data associated with that span, write data from + /// that span to IO, and so on. + /// + /// **Note**: since this function is called when spans are dropped, + /// implementations should ensure that they are unwind-safe. Panicking from + /// inside of a `try_close` function may cause a double panic, if the span + /// was dropped due to a thread unwinding. + /// + /// [span ID]: super::span::Id + /// [`clone_span`]: Subscriber::clone_span + /// [`drop_span`]: Subscriber::drop_span + fn try_close(&self, id: span::Id) -> bool { + #[allow(deprecated)] + self.drop_span(id); + false + } + + /// Returns a type representing this subscriber's view of the current span. + /// + /// If subscribers track a current span, they should override this function + /// to return [`Current::new`] if the thread from which this method is + /// called is inside a span, or [`Current::none`] if the thread is not + /// inside a span. + /// + /// By default, this returns a value indicating that the subscriber + /// does **not** track what span is current. If the subscriber does not + /// implement a current span, it should not override this method. + /// + /// [`Current::new`]: super::span::Current#tymethod.new + /// [`Current::none`]: super::span::Current#tymethod.none + fn current_span(&self) -> span::Current { + span::Current::unknown() + } + + // === Downcasting methods ================================================ + + /// If `self` is the same type as the provided `TypeId`, returns an untyped + /// `*const` pointer to that type. Otherwise, returns `None`. + /// + /// If you wish to downcast a `Subscriber`, it is strongly advised to use + /// the safe API provided by [`downcast_ref`] instead. + /// + /// This API is required for `downcast_raw` to be a trait method; a method + /// signature like [`downcast_ref`] (with a generic type parameter) is not + /// object-safe, and thus cannot be a trait method for `Subscriber`. This + /// means that if we only exposed `downcast_ref`, `Subscriber` + /// implementations could not override the downcasting behavior + /// + /// This method may be overridden by "fan out" or "chained" subscriber + /// implementations which consist of multiple composed types. Such + /// subscribers might allow `downcast_raw` by returning references to those + /// component if they contain components with the given `TypeId`. + /// + /// # Safety + /// + /// The [`downcast_ref`] method expects that the pointer returned by + /// `downcast_raw` is non-null and points to a valid instance of the type + /// with the provided `TypeId`. Failure to ensure this will result in + /// undefined behaviour, so implementing `downcast_raw` is unsafe. + /// + /// [`downcast_ref`]: #method.downcast_ref + unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { + if id == TypeId::of::() { + Some(self as *const Self as *const ()) + } else { + None + } + } +} + +impl dyn Subscriber { + /// Returns `true` if this `Subscriber` is the same type as `T`. + pub fn is(&self) -> bool { + self.downcast_ref::().is_some() + } + + /// Returns some reference to this `Subscriber` value if it is of type `T`, + /// or `None` if it isn't. + pub fn downcast_ref(&self) -> Option<&T> { + unsafe { + let raw = self.downcast_raw(TypeId::of::())?; + if raw.is_null() { + None + } else { + Some(&*(raw as *const _)) + } + } + } +} + +/// Indicates a [`Subscriber`]'s interest in a particular callsite. +/// +/// `Subscriber`s return an `Interest` from their [`register_callsite`] methods +/// in order to determine whether that span should be enabled or disabled. +/// +/// [`Subscriber`]: super::Subscriber +/// [`register_callsite`]: super::Subscriber::register_callsite +#[derive(Clone, Debug)] +pub struct Interest(InterestKind); + +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +enum InterestKind { + Never = 0, + Sometimes = 1, + Always = 2, +} + +impl Interest { + /// Returns an `Interest` indicating that the subscriber is never interested + /// in being notified about a callsite. + /// + /// If all active subscribers are `never()` interested in a callsite, it will + /// be completely disabled unless a new subscriber becomes active. + #[inline] + pub fn never() -> Self { + Interest(InterestKind::Never) + } + + /// Returns an `Interest` indicating the subscriber is sometimes interested + /// in being notified about a callsite. + /// + /// If all active subscribers are `sometimes` or `never` interested in a + /// callsite, the currently active subscriber will be asked to filter that + /// callsite every time it creates a span. This will be the case until a new + /// subscriber expresses that it is `always` interested in the callsite. + #[inline] + pub fn sometimes() -> Self { + Interest(InterestKind::Sometimes) + } + + /// Returns an `Interest` indicating the subscriber is always interested in + /// being notified about a callsite. + /// + /// If any subscriber expresses that it is `always()` interested in a given + /// callsite, then the callsite will always be enabled. + #[inline] + pub fn always() -> Self { + Interest(InterestKind::Always) + } + + /// Returns `true` if the subscriber is never interested in being notified + /// about this callsite. + #[inline] + pub fn is_never(&self) -> bool { + matches!(self.0, InterestKind::Never) + } + + /// Returns `true` if the subscriber is sometimes interested in being notified + /// about this callsite. + #[inline] + pub fn is_sometimes(&self) -> bool { + matches!(self.0, InterestKind::Sometimes) + } + + /// Returns `true` if the subscriber is always interested in being notified + /// about this callsite. + #[inline] + pub fn is_always(&self) -> bool { + matches!(self.0, InterestKind::Always) + } + + /// Returns the common interest between these two Interests. + /// + /// If both interests are the same, this propagates that interest. + /// Otherwise, if they differ, the result must always be + /// `Interest::sometimes` --- if the two subscribers differ in opinion, we + /// will have to ask the current subscriber what it thinks, no matter what. + pub(crate) fn and(self, rhs: Interest) -> Self { + if self.0 == rhs.0 { + self + } else { + Interest::sometimes() + } + } +} + +/// A no-op [`Subscriber`]. +/// +/// [`NoSubscriber`] implements the [`Subscriber`] trait by never being enabled, +/// never being interested in any callsite, and dropping all spans and events. +#[derive(Copy, Clone, Debug, Default)] +pub struct NoSubscriber(()); + +impl Subscriber for NoSubscriber { + #[inline] + fn register_callsite(&self, _: &'static Metadata<'static>) -> Interest { + Interest::never() + } + + fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { + span::Id::from_u64(0xDEAD) + } + + fn event(&self, _event: &Event<'_>) {} + + fn record(&self, _span: &span::Id, _values: &span::Record<'_>) {} + + fn record_follows_from(&self, _span: &span::Id, _follows: &span::Id) {} + + #[inline] + fn enabled(&self, _metadata: &Metadata<'_>) -> bool { + false + } + + fn enter(&self, _span: &span::Id) {} + fn exit(&self, _span: &span::Id) {} +} + +impl Subscriber for Box { + #[inline] + fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest { + self.as_ref().register_callsite(metadata) + } + + #[inline] + fn enabled(&self, metadata: &Metadata<'_>) -> bool { + self.as_ref().enabled(metadata) + } + + #[inline] + fn max_level_hint(&self) -> Option { + self.as_ref().max_level_hint() + } + + #[inline] + fn new_span(&self, span: &span::Attributes<'_>) -> span::Id { + self.as_ref().new_span(span) + } + + #[inline] + fn record(&self, span: &span::Id, values: &span::Record<'_>) { + self.as_ref().record(span, values) + } + + #[inline] + fn record_follows_from(&self, span: &span::Id, follows: &span::Id) { + self.as_ref().record_follows_from(span, follows) + } + + #[inline] + fn event(&self, event: &Event<'_>) { + self.as_ref().event(event) + } + + #[inline] + fn enter(&self, span: &span::Id) { + self.as_ref().enter(span) + } + + #[inline] + fn exit(&self, span: &span::Id) { + self.as_ref().exit(span) + } + + #[inline] + fn clone_span(&self, id: &span::Id) -> span::Id { + self.as_ref().clone_span(id) + } + + #[inline] + fn try_close(&self, id: span::Id) -> bool { + self.as_ref().try_close(id) + } + + #[inline] + #[allow(deprecated)] + fn drop_span(&self, id: span::Id) { + self.as_ref().try_close(id); + } + + #[inline] + fn current_span(&self) -> span::Current { + self.as_ref().current_span() + } + + #[inline] + unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { + if id == TypeId::of::() { + return Some(self as *const Self as *const _); + } + + self.as_ref().downcast_raw(id) + } +} + +impl Subscriber for Arc { + #[inline] + fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest { + self.as_ref().register_callsite(metadata) + } + + #[inline] + fn enabled(&self, metadata: &Metadata<'_>) -> bool { + self.as_ref().enabled(metadata) + } + + #[inline] + fn max_level_hint(&self) -> Option { + self.as_ref().max_level_hint() + } + + #[inline] + fn new_span(&self, span: &span::Attributes<'_>) -> span::Id { + self.as_ref().new_span(span) + } + + #[inline] + fn record(&self, span: &span::Id, values: &span::Record<'_>) { + self.as_ref().record(span, values) + } + + #[inline] + fn record_follows_from(&self, span: &span::Id, follows: &span::Id) { + self.as_ref().record_follows_from(span, follows) + } + + #[inline] + fn event(&self, event: &Event<'_>) { + self.as_ref().event(event) + } + + #[inline] + fn enter(&self, span: &span::Id) { + self.as_ref().enter(span) + } + + #[inline] + fn exit(&self, span: &span::Id) { + self.as_ref().exit(span) + } + + #[inline] + fn clone_span(&self, id: &span::Id) -> span::Id { + self.as_ref().clone_span(id) + } + + #[inline] + fn try_close(&self, id: span::Id) -> bool { + self.as_ref().try_close(id) + } + + #[inline] + #[allow(deprecated)] + fn drop_span(&self, id: span::Id) { + self.as_ref().try_close(id); + } + + #[inline] + fn current_span(&self) -> span::Current { + self.as_ref().current_span() + } + + #[inline] + unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { + if id == TypeId::of::() { + return Some(self as *const Self as *const _); + } + + self.as_ref().downcast_raw(id) + } +} diff --git a/third_party/rust/tracing-core/tests/common/mod.rs b/third_party/rust/tracing-core/tests/common/mod.rs new file mode 100644 index 000000000000..3420f0b899fa --- /dev/null +++ b/third_party/rust/tracing-core/tests/common/mod.rs @@ -0,0 +1,30 @@ +use tracing_core::{metadata::Metadata, span, subscriber::Subscriber, Event}; + +pub struct TestSubscriberA; +impl Subscriber for TestSubscriberA { + fn enabled(&self, _: &Metadata<'_>) -> bool { + true + } + fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { + span::Id::from_u64(1) + } + fn record(&self, _: &span::Id, _: &span::Record<'_>) {} + fn record_follows_from(&self, _: &span::Id, _: &span::Id) {} + fn event(&self, _: &Event<'_>) {} + fn enter(&self, _: &span::Id) {} + fn exit(&self, _: &span::Id) {} +} +pub struct TestSubscriberB; +impl Subscriber for TestSubscriberB { + fn enabled(&self, _: &Metadata<'_>) -> bool { + true + } + fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { + span::Id::from_u64(1) + } + fn record(&self, _: &span::Id, _: &span::Record<'_>) {} + fn record_follows_from(&self, _: &span::Id, _: &span::Id) {} + fn event(&self, _: &Event<'_>) {} + fn enter(&self, _: &span::Id) {} + fn exit(&self, _: &span::Id) {} +} diff --git a/third_party/rust/tracing-core/tests/dispatch.rs b/third_party/rust/tracing-core/tests/dispatch.rs new file mode 100644 index 000000000000..3820692a8625 --- /dev/null +++ b/third_party/rust/tracing-core/tests/dispatch.rs @@ -0,0 +1,56 @@ +#![cfg(feature = "std")] +mod common; + +use common::*; +use tracing_core::dispatcher::*; + +#[test] +fn set_default_dispatch() { + set_global_default(Dispatch::new(TestSubscriberA)).expect("global dispatch set failed"); + get_default(|current| { + assert!( + current.is::(), + "global dispatch get failed" + ) + }); + + let guard = set_default(&Dispatch::new(TestSubscriberB)); + get_default(|current| assert!(current.is::(), "set_default get failed")); + + // Drop the guard, setting the dispatch back to the global dispatch + drop(guard); + + get_default(|current| { + assert!( + current.is::(), + "global dispatch get failed" + ) + }); +} + +#[test] +fn nested_set_default() { + let _guard = set_default(&Dispatch::new(TestSubscriberA)); + get_default(|current| { + assert!( + current.is::(), + "set_default for outer subscriber failed" + ) + }); + + let inner_guard = set_default(&Dispatch::new(TestSubscriberB)); + get_default(|current| { + assert!( + current.is::(), + "set_default inner subscriber failed" + ) + }); + + drop(inner_guard); + get_default(|current| { + assert!( + current.is::(), + "set_default outer subscriber failed" + ) + }); +} diff --git a/third_party/rust/tracing-core/tests/global_dispatch.rs b/third_party/rust/tracing-core/tests/global_dispatch.rs new file mode 100644 index 000000000000..d430ac61820a --- /dev/null +++ b/third_party/rust/tracing-core/tests/global_dispatch.rs @@ -0,0 +1,34 @@ +mod common; + +use common::*; +use tracing_core::dispatcher::*; +#[test] +fn global_dispatch() { + set_global_default(Dispatch::new(TestSubscriberA)).expect("global dispatch set failed"); + get_default(|current| { + assert!( + current.is::(), + "global dispatch get failed" + ) + }); + + #[cfg(feature = "std")] + with_default(&Dispatch::new(TestSubscriberB), || { + get_default(|current| { + assert!( + current.is::(), + "thread-local override of global dispatch failed" + ) + }); + }); + + get_default(|current| { + assert!( + current.is::(), + "reset to global override failed" + ) + }); + + set_global_default(Dispatch::new(TestSubscriberA)) + .expect_err("double global dispatch set succeeded"); +} diff --git a/third_party/rust/tracing-core/tests/macros.rs b/third_party/rust/tracing-core/tests/macros.rs new file mode 100644 index 000000000000..ee9007eeebd4 --- /dev/null +++ b/third_party/rust/tracing-core/tests/macros.rs @@ -0,0 +1,48 @@ +use tracing_core::{ + callsite::Callsite, + metadata, + metadata::{Kind, Level, Metadata}, + subscriber::Interest, +}; + +#[test] +fn metadata_macro_api() { + // This test should catch any inadvertent breaking changes + // caused by changes to the macro. + struct TestCallsite; + + impl Callsite for TestCallsite { + fn set_interest(&self, _: Interest) { + unimplemented!("test") + } + fn metadata(&self) -> &Metadata<'_> { + unimplemented!("test") + } + } + + static CALLSITE: TestCallsite = TestCallsite; + let _metadata = metadata! { + name: "test_metadata", + target: "test_target", + level: Level::DEBUG, + fields: &["foo", "bar", "baz"], + callsite: &CALLSITE, + kind: Kind::SPAN, + }; + let _metadata = metadata! { + name: "test_metadata", + target: "test_target", + level: Level::TRACE, + fields: &[], + callsite: &CALLSITE, + kind: Kind::EVENT, + }; + let _metadata = metadata! { + name: "test_metadata", + target: "test_target", + level: Level::INFO, + fields: &[], + callsite: &CALLSITE, + kind: Kind::EVENT + }; +} diff --git a/third_party/rust/tracing/.cargo-checksum.json b/third_party/rust/tracing/.cargo-checksum.json new file mode 100644 index 000000000000..80ea1c93f6fd --- /dev/null +++ b/third_party/rust/tracing/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"1d82f1ac1540d9606314c56d1701d10fff72688b0bf458398846fa837a6b8209","Cargo.toml":"c924c5dd3ad8b58b4d65ec40f10a44b5352f89145c419d82dab9c647b5d363f3","LICENSE":"898b1ae9821e98daf8964c8d6c7f61641f5f5aa78ad500020771c0939ee0dea1","README.md":"39a30e68fd06cd90cdf232fb87cba82168797c9861739e9529fee68e72c26868","benches/global_subscriber.rs":"271213baed0e02054e506c1ec9c47b58696c78aaa46f0969a147f4c369f80e3d","benches/no_subscriber.rs":"495c9a91fb972ec61ced31ef8e19d2cca02ec8ffae4e98e3316e55f6a0074578","benches/subscriber.rs":"c609be119ed6e4d3fb79df77f15aa14effbd3e2f77c627a49229a50091d3ee6a","src/dispatcher.rs":"5732b1f228328cd41e77b04a27faf3b6054a8ed5cd5034a0dad8e1e694ca3889","src/field.rs":"55c7a2798b9ad0269e7c738c3f15a5d0281bf34ac3a6196a3f0b15801e5278bd","src/instrument.rs":"1fe4de5c13b5ba048e9872d78d1fa4e85655f9f2ed10f79b72b5da881c9b8b45","src/level_filters.rs":"baae8e797897bae9cdd9ec64b8e9a3d71156e9c03261be17b5b18acba034e154","src/lib.rs":"c794108f5f37b5dc1609498afad5ebbb25ad5f80bf9e362c032a63d327429937","src/macros.rs":"1bf35f17cbb50fb92f60e5e60190faf5eeba03c328f754631fff6df183509491","src/span.rs":"e7c16999e8702bf1ff82aaa6803c81ac5b77ab96b754cac6e686acfc6adc14f9","src/stdlib.rs":"248514a9bae6106e436358aee44c92abf8e7f79022895c4a25136ddef211d198","src/subscriber.rs":"ae879c373be7ee4935f7b02a345f92ccbeb7879d61c5d37e3cc1277b3d51ddb2","tests/enabled.rs":"1333339aace87ea9d701f2f76a1985820cc513a75013a7ed89669f7a2c635479","tests/event.rs":"c4ec3ac338475e9e61675551eb99df1d8a7cbefb05a0d60203994f5c1df7c784","tests/filter_caching_is_lexically_scoped.rs":"5487a37db5fbdf3d57020ab1f01185d928c45d967d99d723ffc434540459d8dc","tests/filters_are_not_reevaluated_for_the_same_span.rs":"251abbc000dddd298448958a1f0e5be71da527ac6c1a368d57837c83a5467329","tests/filters_are_reevaluated_for_different_call_sites.rs":"e0fdd8e930c043674702831b4d96f331e63aba824576bbac50b3f53bb0241cc7","tests/filters_dont_leak.rs":"d594266818a3461886da33bfcc76937d89a433ed6980226fc428706b216c093c","tests/future_send.rs":"3e9c9193219d12e342c18bbedb2f6ec940334202feb3cffba91601d6001b8575","tests/macro_imports.rs":"d5de857162185d4a2384f3cb644bfcf76c7f5c1a3b5f72bfa0d2620ac6e3873c","tests/macros.rs":"fa83397181d73d2cae09c16d9647a63d1e3bad0f2dbc5b3280f69f3d0180c488","tests/macros_incompatible_concat.rs":"5f3bcbb65e4ae39db1cfc2def62fc913c20bab0fb769c8f731504e2615585ee5","tests/macros_redefined_core.rs":"a6eac60522f71fe6c9a040b8b869d596f7eb9e907f5b49f4be4413a40c387676","tests/max_level_hint.rs":"9b366591d947ca0202fa0bdf797e1bb14534d3c896cf8b9674660cd2807c32ef","tests/multiple_max_level_hints.rs":"4d9ef0de9cccc787da8f5e3f6c233ac9db42a2a99cfe5e39997e1f4aa9df0c00","tests/no_subscriber.rs":"2f8f2ada5089d8e2e503394dfe8206598a11895907c53bf940b892f1e6afdd2f","tests/scoped_clobbers_default.rs":"806480a74c15e4d68bb7576050662b1e53ee765fd583d003f8b349f17ea63a4b","tests/span.rs":"f84ead5b1dad9b91e5cec9d8378ab932a942936374ba928fb381e67fab52cda0","tests/subscriber.rs":"1617c098f4fa6abed174fe062111444c7b67fa0f377d2b342176998e572480e3"},"package":"5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09"} \ No newline at end of file diff --git a/third_party/rust/tracing/CHANGELOG.md b/third_party/rust/tracing/CHANGELOG.md new file mode 100644 index 000000000000..8720d98ad5af --- /dev/null +++ b/third_party/rust/tracing/CHANGELOG.md @@ -0,0 +1,708 @@ +# 0.1.34 (April 14, 2022) + +This release includes bug fixes for the "log" support feature and for the use of +both scoped and global default dispatchers in the same program. + +### Fixed + +- Failure to use the global default dispatcher when a thread sets a local + default dispatcher before the global default is set ([#2065]) +- **log**: Compilation errors due to `async` block/fn futures becoming `!Send` + when the "log" feature flag is enabled ([#2073]) +- Broken links in documentation ([#2068]) + +Thanks to @ben0x539 for contributing to this release! + +[#2065]: https://github.com/tokio-rs/tracing/pull/2065 +[#2073]: https://github.com/tokio-rs/tracing/pull/2073 +[#2068]: https://github.com/tokio-rs/tracing/pull/2068 + +# 0.1.33 (April 9, 2022) + +This release adds new `span_enabled!` and `event_enabled!` variants of the +`enabled!` macro, for testing whether a subscriber would specifically enable a +span or an event. + +### Added + +- `span_enabled!` and `event_enabled!` macros ([#1900]) +- Several documentation improvements ([#2010], [#2012]) + +### Fixed + +- Compilation warning when compiling for <=32-bit targets (including `wasm32`) + ([#2060]) + +Thanks to @guswynn, @arifd, @hrxi, @CAD97, and @name1e5s for contributing to +this release! + +[#1900]: https://github.com/tokio-rs/tracing/pull/1900 +[#2010]: https://github.com/tokio-rs/tracing/pull/2010 +[#2012]: https://github.com/tokio-rs/tracing/pull/2012 +[#2060]: https://github.com/tokio-rs/tracing/pull/2060 + +# 0.1.32 (March 8th, 2022) + +This release reduces the overhead of creating and dropping disabled +spans significantly, which should improve performance when no `tracing` +subscriber is in use or when spans are disabled by a filter. + +### Fixed + +- **attributes**: Compilation failure with `--minimal-versions` due to a + too-permissive `syn` dependency ([#1960]) + +### Changed + +- Reduced `Drop` overhead for disabled spans ([#1974]) +- `tracing-attributes`: updated to [0.1.20][attributes-0.1.20] + +[#1974]: https://github.com/tokio-rs/tracing/pull/1974 +[#1960]: https://github.com/tokio-rs/tracing/pull/1960 +[attributes-0.1.20]: https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.20 + +# 0.1.31 (February 17th, 2022) + +This release increases the minimum supported Rust version (MSRV) to 1.49.0. In +addition, it fixes some relatively rare macro bugs. + +### Added + +- Added `tracing-forest` to the list of related crates ([#1935]) + +### Changed + +- Updated minimum supported Rust version (MSRV) to 1.49.0 ([#1913]) + +### Fixed + +- Fixed the `warn!` macro incorrectly generating an event with the `TRACE` level + ([#1930]) +- Fixed macro hygiene issues when used in a crate that defines its own `concat!` + macro, for real this time ([#1918]) + +Thanks to @QnnOkabayashi, @nicolaasg, and @teohhanhui for contributing to this +release! + +[#1935]: https://github.com/tokio-rs/tracing/pull/1935 +[#1913]: https://github.com/tokio-rs/tracing/pull/1913 +[#1930]: https://github.com/tokio-rs/tracing/pull/1930 +[#1918]: https://github.com/tokio-rs/tracing/pull/1918 + +# 0.1.30 (February 3rd, 2022) + +This release adds *experimental* support for recording structured field +values using the [`valuable`] crate. See [this blog post][post] for +details on `valuable`. + +Note that `valuable` support currently requires `--cfg tracing_unstable`. See +the documentation for details. + +This release also adds a new `enabled!` macro for testing if a span or event +would be enabled. + +### Added + +- **field**: Experimental support for recording field values using the + [`valuable`] crate ([#1608], [#1888], [#1887]) +- `enabled!` macro for testing if a span or event is enabled ([#1882]) + +### Changed + +- `tracing-core`: updated to [0.1.22][core-0.1.22] +- `tracing-attributes`: updated to [0.1.19][attributes-0.1.19] + +### Fixed + +- **log**: Fixed "use of moved value" compiler error when the "log" feature is + enabled ([#1823]) +- Fixed macro hygiene issues when used in a crate that defines its own `concat!` + macro ([#1842]) +- A very large number of documentation fixes and improvements. + +Thanks to @@Vlad-Scherbina, @Skepfyr, @Swatinem, @guswynn, @teohhanhui, +@xd009642, @tobz, @d-e-s-o@0b01, and @nickelc for contributing to this release! + +[`valuable`]: https://crates.io/crates/valuable +[post]: https://tokio.rs/blog/2021-05-valuable +[core-0.1.22]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.22 +[attributes-0.1.19]: https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.19 +[#1608]: https://github.com/tokio-rs/tracing/pull/1608 +[#1888]: https://github.com/tokio-rs/tracing/pull/1888 +[#1887]: https://github.com/tokio-rs/tracing/pull/1887 +[#1882]: https://github.com/tokio-rs/tracing/pull/1882 +[#1823]: https://github.com/tokio-rs/tracing/pull/1823 +[#1842]: https://github.com/tokio-rs/tracing/pull/1842 + +# 0.1.29 (October 5th, 2021) + +This release adds support for recording `Option where T: Value` as typed +`tracing` field values. It also includes significant performance improvements +for functions annotated with the `#[instrument]` attribute when the generated +span is disabled. + +### Changed + +- `tracing-core`: updated to v0.1.21 +- `tracing-attributes`: updated to v0.1.18 + +### Added + +- **field**: `Value` impl for `Option where T: Value` ([#1585]) +- **attributes**: - improved performance when skipping `#[instrument]`-generated + spans below the max level ([#1600], [#1605], [#1614], [#1616], [#1617]) + +### Fixed + +- **instrument**: added missing `Future` implementation for `WithSubscriber`, + making the `WithDispatch` extension trait actually useable ([#1602]) +- Documentation fixes and improvements ([#1595], [#1601], [#1597]) + +Thanks to @brianburgers, @mattiast, @DCjanus, @oli-obk, and @matklad for +contributing to this release! + +[#1585]: https://github.com/tokio-rs/tracing/pull/1585 +[#1595]: https://github.com/tokio-rs/tracing/pull/1596 +[#1597]: https://github.com/tokio-rs/tracing/pull/1597 +[#1600]: https://github.com/tokio-rs/tracing/pull/1600 +[#1601]: https://github.com/tokio-rs/tracing/pull/1601 +[#1602]: https://github.com/tokio-rs/tracing/pull/1602 +[#1605]: https://github.com/tokio-rs/tracing/pull/1605 +[#1614]: https://github.com/tokio-rs/tracing/pull/1614 +[#1616]: https://github.com/tokio-rs/tracing/pull/1616 +[#1617]: https://github.com/tokio-rs/tracing/pull/1617 + +# 0.1.28 (September 17th, 2021) + +This release fixes an issue where the RustDoc documentation was rendered +incorrectly. It doesn't include any actual code changes, and is very boring and +can be ignored. + +### Fixed + +- **docs**: Incorrect documentation rendering due to unclosed `
` tag + ([#1572]) + +[#1572]: https://github.com/tokio-rs/tracing/pull/1572 + +# 0.1.27 (September 13, 2021) + +This release adds a new [`Span::or_current`] method to aid in efficiently +propagating span contexts to spawned threads or tasks. Additionally, it updates +the [`tracing-core`] version to [0.1.20] and the [`tracing-attributes`] version to +[0.1.16], ensuring that a number of new features in those crates are present. + +### Fixed + +- **instrument**: Added missing `WithSubscriber` implementations for futures and + other types ([#1424]) + +### Added + +- `Span::or_current` method, to help with efficient span context propagation + ([#1538]) +- **attributes**: add `skip_all` option to `#[instrument]` ([#1548]) +- **attributes**: record primitive types as primitive values rather than as + `fmt::Debug` ([#1378]) +- **core**: `NoSubscriber`, a no-op `Subscriber` implementation + ([#1549]) +- **core**: Added `Visit::record_f64` and support for recording floating-point + values ([#1507], [#1522]) +- A large number of documentation improvements and fixes ([#1369], [#1398], + [#1435], [#1442], [#1524], [#1556]) + +Thanks to new contributors @dzvon and @mbergkvist, as well as @teozkr, +@maxburke, @LukeMathWalker, and @jsgf, for contributing to this +release! + +[`Span::or_current`]: https://docs.rs/tracing/0.1.27/tracing/struct.Span.html#method.or_current +[`tracing-core`]: https://crates.io/crates/tracing-core +[`tracing-attributes`]: https://crates.io/crates/tracing-attributes +[`tracing-core`]: https://crates.io/crates/tracing-core +[0.1.20]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.20 +[0.1.16]: https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.16 +[#1424]: https://github.com/tokio-rs/tracing/pull/1424 +[#1538]: https://github.com/tokio-rs/tracing/pull/1538 +[#1548]: https://github.com/tokio-rs/tracing/pull/1548 +[#1378]: https://github.com/tokio-rs/tracing/pull/1378 +[#1507]: https://github.com/tokio-rs/tracing/pull/1507 +[#1522]: https://github.com/tokio-rs/tracing/pull/1522 +[#1369]: https://github.com/tokio-rs/tracing/pull/1369 +[#1398]: https://github.com/tokio-rs/tracing/pull/1398 +[#1435]: https://github.com/tokio-rs/tracing/pull/1435 +[#1442]: https://github.com/tokio-rs/tracing/pull/1442 +[#1524]: https://github.com/tokio-rs/tracing/pull/1524 +[#1556]: https://github.com/tokio-rs/tracing/pull/1556 + +# 0.1.26 (April 30, 2021) + +### Fixed + +- **attributes**: Compatibility between `#[instrument]` and `async-trait` + v0.1.43 and newer ([#1228]) +- Several documentation fixes ([#1305], [#1344]) +### Added + +- `Subscriber` impl for `Box` ([#1358]) +- `Subscriber` impl for `Arc` ([#1374]) +- Symmetric `From` impls for existing `Into` impls on `span::Current`, `Span`, + and `Option` ([#1335], [#1338]) +- `From` implementation for `Option`, allowing `EnteredSpan` to + be used in a `span!` macro's `parent:` field ([#1325]) +- `Attributes::fields` accessor that returns the set of fields defined on a + span's `Attributes` ([#1331]) + + +Thanks to @Folyd, @nightmared, and new contributors @rmsc and @Fishrock123 for +contributing to this release! + +[#1227]: https://github.com/tokio-rs/tracing/pull/1228 +[#1305]: https://github.com/tokio-rs/tracing/pull/1305 +[#1325]: https://github.com/tokio-rs/tracing/pull/1325 +[#1338]: https://github.com/tokio-rs/tracing/pull/1338 +[#1344]: https://github.com/tokio-rs/tracing/pull/1344 +[#1358]: https://github.com/tokio-rs/tracing/pull/1358 +[#1374]: https://github.com/tokio-rs/tracing/pull/1374 +[#1335]: https://github.com/tokio-rs/tracing/pull/1335 +[#1331]: https://github.com/tokio-rs/tracing/pull/1331 + +# 0.1.25 (February 23, 2021) + +### Added + +- `Span::entered` method for entering a span and moving it into a guard by value + rather than borrowing it ([#1252]) + +Thanks to @matklad for contributing to this release! + +[#1252]: https://github.com/tokio-rs/tracing/pull/1252 + +# 0.1.24 (February 17, 2021) + +### Fixed + +- **attributes**: Compiler error when using `#[instrument(err)]` on functions + which return `impl Trait` ([#1236]) +- Fixed broken match arms in event macros ([#1239]) +- Documentation improvements ([#1232]) + +Thanks to @bkchr and @lfranke for contributing to this release! + +[#1236]: https://github.com/tokio-rs/tracing/pull/1236 +[#1239]: https://github.com/tokio-rs/tracing/pull/1239 +[#1232]: https://github.com/tokio-rs/tracing/pull/1232 + +# 0.1.23 (February 4, 2021) + +### Fixed + +- **attributes**: Compiler error when using `#[instrument(err)]` on functions + with mutable parameters ([#1167]) +- **attributes**: Missing function visibility modifier when using + `#[instrument]` with `async-trait` ([#977]) +- **attributes** Removed unused `syn` features ([#928]) +- **log**: Fixed an issue where the `tracing` macros would generate code for + events whose levels are disabled statically by the `log` crate's + `static_max_level_XXX` features ([#1175]) +- Fixed deprecations and clippy lints ([#1195]) +- Several documentation fixes and improvements ([#941], [#965], [#981], [#1146], + [#1215]) + +### Changed + +- **attributes**: `tracing-futures` dependency is no longer required when using + `#[instrument]` on async functions ([#808]) +- **attributes**: Updated `tracing-attributes` minimum dependency to v0.1.12 + ([#1222]) + +Thanks to @nagisa, @Txuritan, @TaKO8Ki, @okready, and @krojew for contributing +to this release! + +[#1167]: https://github.com/tokio-rs/tracing/pull/1167 +[#977]: https://github.com/tokio-rs/tracing/pull/977 +[#965]: https://github.com/tokio-rs/tracing/pull/965 +[#981]: https://github.com/tokio-rs/tracing/pull/981 +[#1215]: https://github.com/tokio-rs/tracing/pull/1215 +[#808]: https://github.com/tokio-rs/tracing/pull/808 +[#941]: https://github.com/tokio-rs/tracing/pull/941 +[#1146]: https://github.com/tokio-rs/tracing/pull/1146 +[#1175]: https://github.com/tokio-rs/tracing/pull/1175 +[#1195]: https://github.com/tokio-rs/tracing/pull/1195 +[#1222]: https://github.com/tokio-rs/tracing/pull/1222 + +# 0.1.22 (November 23, 2020) + +### Changed + +- Updated `pin-project-lite` dependency to 0.2 ([#1108]) + +[#1108]: https://github.com/tokio-rs/tracing/pull/1108 + +# 0.1.21 (September 28, 2020) + +### Fixed + +- Incorrect inlining of `Span::new`, `Span::new_root`, and `Span::new_child_of`, + which could result in `dispatcher::get_default` being inlined at the callsite + ([#994]) +- Regression where using a struct field as a span or event field when other + fields on that struct are borrowed mutably would fail to compile ([#987]) + +### Changed + +- Updated `tracing-core` to 0.1.17 ([#992]) + +### Added + +- `Instrument` trait and `Instrumented` type for attaching a `Span` to a + `Future` ([#808]) +- `Copy` implementations for `Level` and `LevelFilter` ([#992]) +- Multiple documentation fixes and improvements ([#964], [#980], [#981]) + +Thanks to @nagisa, and new contributors @SecurityInsanity, @froydnj, @jyn514 and +@TaKO8Ki for contributing to this release! + +[#994]: https://github.com/tokio-rs/tracing/pull/994 +[#992]: https://github.com/tokio-rs/tracing/pull/992 +[#987]: https://github.com/tokio-rs/tracing/pull/987 +[#980]: https://github.com/tokio-rs/tracing/pull/980 +[#981]: https://github.com/tokio-rs/tracing/pull/981 +[#964]: https://github.com/tokio-rs/tracing/pull/964 +[#808]: https://github.com/tokio-rs/tracing/pull/808 + +# 0.1.20 (August 24, 2020) + +### Changed + +- Significantly reduced assembly generated by macro invocations (#943) +- Updated `tracing-core` to 0.1.15 (#943) + +### Added + +- Documented minimum supported Rust version policy (#941) + +# 0.1.19 (August 10, 2020) + +### Fixed + +- Updated `tracing-core` to fix incorrect calculation of the global max level + filter (#908) + +### Added + +- **attributes**: Support for using `self` in field expressions when + instrumenting `async-trait` functions (#875) +- Several documentation improvements (#832, #881, #896, #897, #911, #913) + +Thanks to @anton-dutov, @nightmared, @mystor, and @toshokan for contributing to +this release! + +# 0.1.18 (July 31, 2020) + +### Fixed + +- Fixed a bug where `LevelFilter::OFF` (and thus also the `static_max_level_off` + feature flag) would enable *all* traces, rather than *none* (#853) +- **log**: Fixed `tracing` macros and `Span`s not checking `log::max_level` + before emitting `log` records (#870) + +### Changed + +- **macros**: Macros now check the global max level (`LevelFilter::current`) + before the per-callsite cache when determining if a span or event is enabled. + This significantly improves performance in some use cases (#853) +- **macros**: Simplified the code generated by macro expansion significantly, + which may improve compile times and/or `rustc` optimizatation of surrounding + code (#869, #869) +- **macros**: Macros now check the static max level before checking any runtime + filtering, improving performance when a span or event is disabled by a + `static_max_level_XXX` feature flag (#868) +- `LevelFilter` is now a re-export of the `tracing_core::LevelFilter` type, it + can now be used interchangably with the versions in `tracing-core` and + `tracing-subscriber` (#853) +- Significant performance improvements when comparing `LevelFilter`s and + `Level`s (#853) +- Updated the minimum `tracing-core` dependency to 0.1.12 (#853) + +### Added + +- **macros**: Quoted string literals may now be used as field names, to allow + fields whose names are not valid Rust identifiers (#790) +- **docs**: Several documentation improvements (#850, #857, #841) +- `LevelFilter::current()` function, which returns the highest level that any + subscriber will enable (#853) +- `Subscriber::max_level_hint` optional trait method, for setting the value + returned by `LevelFilter::current()` (#853) + +Thanks to new contributors @cuviper, @ethanboxx, @ben0x539, @dignati, +@colelawrence, and @rbtcollins for helping out with this release! + +# 0.1.17 (July 22, 2020) + +### Changed + +- **log**: Moved verbose span enter/exit log records to "tracing::span::active" + target, allowing them to be filtered separately (#833) +- **log**: All span lifecycle log records without fields now have the `Trace` + log filter, to guard against `log` users enabling them by default with blanket + level filtering (#833) + +### Fixed + +- **log**/**macros**: Fixed missing implicit imports of the + `tracing::field::debug` and `tracing::field::display` functions inside the + macros when the "log" feature is enabled (#835) + +# 0.1.16 (July 8, 2020) + +### Added + +- **attributes**: Support for arbitrary expressions as fields in `#[instrument]` (#672) +- **attributes**: `#[instrument]` now emits a compiler warning when ignoring unrecognized + input (#672, #786) +- Improved documentation on using `tracing` in async code (#769) + +### Changed + +- Updated `tracing-core` dependency to 0.1.11 + +### Fixed + +- **macros**: Excessive monomorphization in macros, which could lead to + longer compilation times (#787) +- **log**: Compiler warnings in macros when `log` or `log-always` features + are enabled (#753) +- Compiler error when `tracing-core/std` feature is enabled but `tracing/std` is + not (#760) + +Thanks to @nagisa for contributing to this release! + +# 0.1.15 (June 2, 2020) + +### Changed + +- **macros**: Replaced use of legacy `local_inner_macros` with `$crate::` (#740) + +### Added + +- Docs fixes and improvements (#742, #731, #730) + +Thanks to @bnjjj, @blaenk, and @LukeMathWalker for contributing to this release! + +# 0.1.14 (May 14, 2020) + +### Added + +- **log**: When using the [`log`] compatibility feature alongside a `tracing` + `Subscriber`, log records for spans now include span IDs (#613) +- **attributes**: Support for using `#[instrument]` on methods that are part of + [`async-trait`] trait implementations (#711) +- **attributes**: Optional `#[instrument(err)]` argument to automatically emit + an event if an instrumented function returns `Err` (#637) +- Added `#[must_use]` attribute to the guard returned by + `subscriber::set_default` (#685) + +### Changed + +- **log**: Made [`log`] records emitted by spans much less noisy when span IDs are + not available (#613) + +### Fixed + +- Several typos in the documentation (#656, #710, #715) + +Thanks to @FintanH, @shepmaster, @inanna-malick, @zekisharif, @bkchr, @majecty, +@ilana and @nightmared for contributing to this release! + +[`async-trait`]: https://crates.io/crates/async-trait +[`log`]: https://crates.io/crates/log + +# 0.1.13 (February 26, 2019) + +### Added + +- **field**: `field::Empty` type for declaring empty fields whose values will be + recorded later (#548) +- **field**: `field::Value` implementations for `Wrapping` and `NonZero*` + numbers (#538) +- **attributes**: Support for adding arbitrary literal fields to spans generated + by `#[instrument]` (#569) +- **attributes**: `#[instrument]` now emits a helpful compiler error when + attempting to skip a function parameter (#600) + +### Changed + +- **attributes**: The `#[instrument]` attribute was placed under an on-by-default + feature flag "attributes" (#603) + +### Fixed + +- Broken and unresolvable links in RustDoc (#595) + +Thanks to @oli-cosmian and @Kobzol for contributing to this release! + +# 0.1.12 (January 11, 2019) + +### Added + +- `Span::with_subscriber` method to access the subscriber that tracks a `Span` + (#503) +- API documentation now shows which features are required by feature-flagged + items (#523) +- Improved README examples (#496) +- Documentation links to related crates (#507) + +# 0.1.11 (December 20, 2019) + +### Added + +- `Span::is_none` method (#475) +- `LevelFilter::into_level` method (#470) +- `LevelFilter::from_level` function and `From` impl (#471) +- Documented minimum supported Rust version (#482) + +### Fixed + +- Incorrect parameter type to `Span::follows_from` that made it impossible to + call (#467) +- Missing whitespace in `log` records generated when enabling the `log` feature + flag (#484) +- Typos and missing links in documentation (#405, #423, #439) + +# 0.1.10 (October 23, 2019) + +### Added + +- Support for destructuring in arguments to `#[instrument]`ed functions (#397) +- Generated field for `self` parameters when `#[instrument]`ing methods (#397) +- Optional `skip` argument to `#[instrument]` for excluding function parameters + from generated spans (#359) +- Added `dispatcher::set_default` and `subscriber::set_default` APIs, which + return a drop guard (#388) + +### Fixed + +- Some minor documentation errors (#356, #370) + +# 0.1.9 (September 13, 2019) + +### Fixed + +- Fixed `#[instrument]`ed async functions not compiling on `nightly-2019-09-11` + or newer (#342) + +### Changed + +- Significantly reduced performance impact of skipped spans and events when a + `Subscriber` is not in use (#326) +- The `log` feature will now only cause `tracing` spans and events to emit log + records when a `Subscriber` is not in use (#346) + +### Added + +- Added support for overriding the name of the span generated by `#[instrument]` + (#330) +- `log-always` feature flag to emit log records even when a `Subscriber` is set + (#346) + +# 0.1.8 (September 3, 2019) + +### Changed + +- Reorganized and improved API documentation (#317) + +### Removed + +- Dev-dependencies on `ansi_term` and `humantime` crates, which were used only + for examples (#316) + +# 0.1.7 (August 30, 2019) + +### Changed + +- New (curly-brace free) event message syntax to place the message in the first + field rather than the last (#309) + +### Fixed + +- Fixed a regression causing macro stack exhaustion when the `log` feature flag + is enabled (#304) + +# 0.1.6 (August 20, 2019) + +### Added + +- `std::error::Error` as a new primitive type (#277) +- Support for mixing key-value fields and `format_args` messages without curly + braces as delimiters (#288) + +### Changed + +- `tracing-core` dependency to 0.1.5 (#294) +- `tracing-attributes` dependency to 0.1.2 (#297) + +# 0.1.5 (August 9, 2019) + +### Added + +- Support for `no-std` + `liballoc` (#263) + +### Changed + +- Using the `#[instrument]` attribute on `async fn`s no longer requires a + feature flag (#258) + +### Fixed + +- The `#[instrument]` macro now works on generic functions (#262) + +# 0.1.4 (August 8, 2019) + +### Added + +- `#[instrument]` attribute for automatically adding spans to functions (#253) + +# 0.1.3 (July 11, 2019) + +### Added + +- Log messages when a subscriber indicates that a span has closed, when the + `log` feature flag is enabled (#180). + +### Changed + +- `tracing-core` minimum dependency version to 0.1.2 (#174). + +### Fixed + +- Fixed an issue where event macro invocations with a single field, using local + variable shorthand, would recur infinitely (#166). +- Fixed uses of deprecated `tracing-core` APIs (#174). + +# 0.1.2 (July 6, 2019) + +### Added + +- `Span::none()` constructor, which does not require metadata and + returns a completely empty span (#147). +- `Span::current()` function, returning the current span if it is + known to the subscriber (#148). + +### Fixed + +- Broken macro imports when used prefixed with `tracing::` (#152). + +# 0.1.1 (July 3, 2019) + +### Changed + +- `cfg_if` dependency to 0.1.9. + +### Fixed + +- Compilation errors when the `log` feature is enabled (#131). +- Unclear wording and typos in documentation (#124, #128, #142). + +# 0.1.0 (June 27, 2019) + +- Initial release diff --git a/third_party/rust/tracing/Cargo.toml b/third_party/rust/tracing/Cargo.toml new file mode 100644 index 000000000000..44df820f926e --- /dev/null +++ b/third_party/rust/tracing/Cargo.toml @@ -0,0 +1,114 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.49.0" +name = "tracing" +version = "0.1.34" +authors = [ + "Eliza Weisman ", + "Tokio Contributors ", +] +description = """ +Application-level tracing for Rust. +""" +homepage = "https://tokio.rs" +readme = "README.md" +keywords = [ + "logging", + "tracing", + "metrics", + "async", +] +categories = [ + "development-tools::debugging", + "development-tools::profiling", + "asynchronous", + "no-std", +] +license = "MIT" +repository = "https://github.com/tokio-rs/tracing" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", + "--cfg", + "tracing_unstable", +] +rustc-args = [ + "--cfg", + "tracing_unstable", +] + +[[bench]] +name = "subscriber" +harness = false + +[[bench]] +name = "no_subscriber" +harness = false + +[dependencies.cfg-if] +version = "1.0.0" + +[dependencies.log] +version = "0.4" +optional = true + +[dependencies.pin-project-lite] +version = "0.2" + +[dependencies.tracing-attributes] +version = "0.1.20" +optional = true + +[dependencies.tracing-core] +version = "0.1.22" +default-features = false + +[dev-dependencies.criterion] +version = "0.3" +default_features = false + +[dev-dependencies.log] +version = "0.4" + +[features] +async-await = [] +attributes = ["tracing-attributes"] +default = [ + "std", + "attributes", +] +log-always = ["log"] +max_level_debug = [] +max_level_error = [] +max_level_info = [] +max_level_off = [] +max_level_trace = [] +max_level_warn = [] +release_max_level_debug = [] +release_max_level_error = [] +release_max_level_info = [] +release_max_level_off = [] +release_max_level_trace = [] +release_max_level_warn = [] +std = ["tracing-core/std"] +valuable = ["tracing-core/valuable"] + +[target."cfg(target_arch = \"wasm32\")".dev-dependencies.wasm-bindgen-test] +version = "^0.3" + +[badges.maintenance] +status = "actively-developed" diff --git a/third_party/rust/tracing/LICENSE b/third_party/rust/tracing/LICENSE new file mode 100644 index 000000000000..cdb28b4b56a4 --- /dev/null +++ b/third_party/rust/tracing/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tracing/README.md b/third_party/rust/tracing/README.md new file mode 100644 index 000000000000..6f1c07cc1f2e --- /dev/null +++ b/third_party/rust/tracing/README.md @@ -0,0 +1,463 @@ +![Tracing — Structured, application-level diagnostics][splash] + +[splash]: https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/splash.svg + +# tracing + +Application-level tracing for Rust. + +[![Crates.io][crates-badge]][crates-url] +[![Documentation][docs-badge]][docs-url] +[![Documentation (master)][docs-master-badge]][docs-master-url] +[![MIT licensed][mit-badge]][mit-url] +[![Build Status][actions-badge]][actions-url] +[![Discord chat][discord-badge]][discord-url] + +[Documentation][docs-url] | [Chat][discord-url] + +[crates-badge]: https://img.shields.io/crates/v/tracing.svg +[crates-url]: https://crates.io/crates/tracing/0.1.34 +[docs-badge]: https://docs.rs/tracing/badge.svg +[docs-url]: https://docs.rs/tracing/0.1.34 +[docs-master-badge]: https://img.shields.io/badge/docs-master-blue +[docs-master-url]: https://tracing-rs.netlify.com/tracing +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: LICENSE +[actions-badge]: https://github.com/tokio-rs/tracing/workflows/CI/badge.svg +[actions-url]:https://github.com/tokio-rs/tracing/actions?query=workflow%3ACI +[discord-badge]: https://img.shields.io/discord/500028886025895936?logo=discord&label=discord&logoColor=white +[discord-url]: https://discord.gg/EeF3cQw + +## Overview + +`tracing` is a framework for instrumenting Rust programs to collect +structured, event-based diagnostic information. + +In asynchronous systems like Tokio, interpreting traditional log messages can +often be quite challenging. Since individual tasks are multiplexed on the same +thread, associated events and log lines are intermixed making it difficult to +trace the logic flow. `tracing` expands upon logging-style diagnostics by +allowing libraries and applications to record structured events with additional +information about *temporality* and *causality* — unlike a log message, a span +in `tracing` has a beginning and end time, may be entered and exited by the +flow of execution, and may exist within a nested tree of similar spans. In +addition, `tracing` spans are *structured*, with the ability to record typed +data as well as textual messages. + +The `tracing` crate provides the APIs necessary for instrumenting libraries +and applications to emit trace data. + +*Compiler support: [requires `rustc` 1.49+][msrv]* + +[msrv]: #supported-rust-versions + +## Usage + +(The examples below are borrowed from the `log` crate's yak-shaving +[example](https://docs.rs/log/0.4.10/log/index.html#examples), modified to +idiomatic `tracing`.) + +### In Applications + +In order to record trace events, executables have to use a `Subscriber` +implementation compatible with `tracing`. A `Subscriber` implements a way of +collecting trace data, such as by logging it to standard output. [`tracing_subscriber`](https://docs.rs/tracing-subscriber/)'s +[`fmt` module](https://docs.rs/tracing-subscriber/0.3/tracing_subscriber/fmt/index.html) provides reasonable defaults. +Additionally, `tracing-subscriber` is able to consume messages emitted by `log`-instrumented libraries and modules. + +The simplest way to use a subscriber is to call the `set_global_default` function. + +```rust +use tracing::{info, Level}; +use tracing_subscriber::FmtSubscriber; + +fn main() { + // a builder for `FmtSubscriber`. + let subscriber = FmtSubscriber::builder() + // all spans/events with a level higher than TRACE (e.g, debug, info, warn, etc.) + // will be written to stdout. + .with_max_level(Level::TRACE) + // completes the builder. + .finish(); + + tracing::subscriber::set_global_default(subscriber) + .expect("setting default subscriber failed"); + + let number_of_yaks = 3; + // this creates a new event, outside of any spans. + info!(number_of_yaks, "preparing to shave yaks"); + + let number_shaved = yak_shave::shave_all(number_of_yaks); + info!( + all_yaks_shaved = number_shaved == number_of_yaks, + "yak shaving completed." + ); +} +``` + +```toml +[dependencies] +tracing = "0.1" +tracing-subscriber = "0.2.0" +``` + +This subscriber will be used as the default in all threads for the remainder of the duration +of the program, similar to how loggers work in the `log` crate. + +In addition, you can locally override the default subscriber. For example: + +```rust +use tracing::{info, Level}; +use tracing_subscriber::FmtSubscriber; + +fn main() { + let subscriber = tracing_subscriber::FmtSubscriber::builder() + // all spans/events with a level higher than TRACE (e.g, debug, info, warn, etc.) + // will be written to stdout. + .with_max_level(Level::TRACE) + // builds the subscriber. + .finish(); + + tracing::subscriber::with_default(subscriber, || { + info!("This will be logged to stdout"); + }); + info!("This will _not_ be logged to stdout"); +} +``` + +This approach allows trace data to be collected by multiple subscribers +within different contexts in the program. Note that the override only applies to the +currently executing thread; other threads will not see the change from with_default. + +Any trace events generated outside the context of a subscriber will not be collected. + +Once a subscriber has been set, instrumentation points may be added to the +executable using the `tracing` crate's macros. + +### In Libraries + +Libraries should only rely on the `tracing` crate and use the provided macros +and types to collect whatever information might be useful to downstream consumers. + +```rust +use std::{error::Error, io}; +use tracing::{debug, error, info, span, warn, Level}; + +// the `#[tracing::instrument]` attribute creates and enters a span +// every time the instrumented function is called. The span is named after the +// the function or method. Paramaters passed to the function are recorded as fields. +#[tracing::instrument] +pub fn shave(yak: usize) -> Result<(), Box> { + // this creates an event at the DEBUG level with two fields: + // - `excitement`, with the key "excitement" and the value "yay!" + // - `message`, with the key "message" and the value "hello! I'm gonna shave a yak." + // + // unlike other fields, `message`'s shorthand initialization is just the string itself. + debug!(excitement = "yay!", "hello! I'm gonna shave a yak."); + if yak == 3 { + warn!("could not locate yak!"); + // note that this is intended to demonstrate `tracing`'s features, not idiomatic + // error handling! in a library or application, you should consider returning + // a dedicated `YakError`. libraries like snafu or thiserror make this easy. + return Err(io::Error::new(io::ErrorKind::Other, "shaving yak failed!").into()); + } else { + debug!("yak shaved successfully"); + } + Ok(()) +} + +pub fn shave_all(yaks: usize) -> usize { + // Constructs a new span named "shaving_yaks" at the TRACE level, + // and a field whose key is "yaks". This is equivalent to writing: + // + // let span = span!(Level::TRACE, "shaving_yaks", yaks = yaks); + // + // local variables (`yaks`) can be used as field values + // without an assignment, similar to struct initializers. + let _span_ = span!(Level::TRACE, "shaving_yaks", yaks).entered(); + + info!("shaving yaks"); + + let mut yaks_shaved = 0; + for yak in 1..=yaks { + let res = shave(yak); + debug!(yak, shaved = res.is_ok()); + + if let Err(ref error) = res { + // Like spans, events can also use the field initialization shorthand. + // In this instance, `yak` is the field being initalized. + error!(yak, error = error.as_ref(), "failed to shave yak!"); + } else { + yaks_shaved += 1; + } + debug!(yaks_shaved); + } + + yaks_shaved +} +``` + +```toml +[dependencies] +tracing = "0.1" +``` + +Note: Libraries should *NOT* call `set_global_default()`, as this will cause +conflicts when executables try to set the default later. + +### In Asynchronous Code + +If you are instrumenting code that make use of +[`std::future::Future`](https://doc.rust-lang.org/stable/std/future/trait.Future.html) +or async/await, avoid using the `Span::enter` method. The following example +_will not_ work: + +```rust +async { + let _s = span.enter(); + // ... +} +``` +```rust +async { + let _s = tracing::span!(...).entered(); + // ... +} +``` + +The span guard `_s` will not exit until the future generated by the `async` block is complete. +Since futures and spans can be entered and exited _multiple_ times without them completing, +the span remains entered for as long as the future exists, rather than being entered only when +it is polled, leading to very confusing and incorrect output. +For more details, see [the documentation on closing spans](https://tracing.rs/tracing/span/index.html#closing-spans). + +There are two ways to instrument asynchronous code. The first is through the +[`Future::instrument`](https://docs.rs/tracing/latest/tracing/trait.Instrument.html#method.instrument) combinator: + +```rust +use tracing::Instrument; + +let my_future = async { + // ... +}; + +my_future + .instrument(tracing::info_span!("my_future")) + .await +``` + +`Future::instrument` attaches a span to the future, ensuring that the span's lifetime +is as long as the future's. + +The second, and preferred, option is through the +[`#[instrument]`](https://docs.rs/tracing/0.1.34/tracing/attr.instrument.html) +attribute: + +```rust +use tracing::{info, instrument}; +use tokio::{io::AsyncWriteExt, net::TcpStream}; +use std::io; + +#[instrument] +async fn write(stream: &mut TcpStream) -> io::Result { + let result = stream.write(b"hello world\n").await; + info!("wrote to stream; success={:?}", result.is_ok()); + result +} +``` + +Under the hood, the `#[instrument]` macro performs the same explicit span +attachment that `Future::instrument` does. + +### Concepts + +This crate provides macros for creating `Span`s and `Event`s, which represent +periods of time and momentary events within the execution of a program, +respectively. + +As a rule of thumb, _spans_ should be used to represent discrete units of work +(e.g., a given request's lifetime in a server) or periods of time spent in a +given context (e.g., time spent interacting with an instance of an external +system, such as a database). In contrast, _events_ should be used to represent +points in time within a span — a request returned with a given status code, +_n_ new items were taken from a queue, and so on. + +`Span`s are constructed using the `span!` macro, and then _entered_ +to indicate that some code takes place within the context of that `Span`: + +```rust +use tracing::{span, Level}; + +// Construct a new span named "my span". +let mut span = span!(Level::INFO, "my span"); +span.in_scope(|| { + // Any trace events in this closure or code called by it will occur within + // the span. +}); +// Dropping the span will close it, indicating that it has ended. +``` + +The [`#[instrument]`](https://docs.rs/tracing/0.1.34/tracing/attr.instrument.html) attribute macro +can reduce some of this boilerplate: + +```rust +use tracing::{instrument}; + +#[instrument] +pub fn my_function(my_arg: usize) { + // This event will be recorded inside a span named `my_function` with the + // field `my_arg`. + tracing::info!("inside my_function!"); + // ... +} +``` + +The `Event` type represent an event that occurs instantaneously, and is +essentially a `Span` that cannot be entered. They are created using the `event!` +macro: + +```rust +use tracing::{event, Level}; + +event!(Level::INFO, "something has happened!"); +``` + +Users of the [`log`] crate should note that `tracing` exposes a set of macros for +creating `Event`s (`trace!`, `debug!`, `info!`, `warn!`, and `error!`) which may +be invoked with the same syntax as the similarly-named macros from the `log` +crate. Often, the process of converting a project to use `tracing` can begin +with a simple drop-in replacement. + +## Supported Rust Versions + +Tracing is built against the latest stable release. The minimum supported +version is 1.42. The current Tracing version is not guaranteed to build on Rust +versions earlier than the minimum supported version. + +Tracing follows the same compiler support policies as the rest of the Tokio +project. The current stable Rust compiler and the three most recent minor +versions before it will always be supported. For example, if the current stable +compiler version is 1.45, the minimum supported version will not be increased +past 1.42, three minor versions prior. Increasing the minimum supported compiler +version is not considered a semver breaking change as long as doing so complies +with this policy. + +## Ecosystem + +### Related Crates + +In addition to `tracing` and `tracing-core`, the [`tokio-rs/tracing`] repository +contains several additional crates designed to be used with the `tracing` ecosystem. +This includes a collection of `Subscriber` implementations, as well as utility +and adapter crates to assist in writing `Subscriber`s and instrumenting +applications. + +In particular, the following crates are likely to be of interest: + +- [`tracing-futures`] provides a compatibility layer with the `futures` + crate, allowing spans to be attached to `Future`s, `Stream`s, and `Executor`s. +- [`tracing-subscriber`] provides `Subscriber` implementations and + utilities for working with `Subscriber`s. This includes a [`FmtSubscriber`] + `FmtSubscriber` for logging formatted trace data to stdout, with similar + filtering and formatting to the [`env_logger`] crate. +- [`tracing-log`] provides a compatibility layer with the [`log`] crate, + allowing log messages to be recorded as `tracing` `Event`s within the + trace tree. This is useful when a project using `tracing` have + dependencies which use `log`. Note that if you're using + `tracing-subscriber`'s `FmtSubscriber`, you don't need to depend on + `tracing-log` directly. + +Additionally, there are also several third-party crates which are not +maintained by the `tokio` project. These include: + +- [`tracing-timing`] implements inter-event timing metrics on top of `tracing`. + It provides a subscriber that records the time elapsed between pairs of + `tracing` events and generates histograms. +- [`tracing-opentelemetry`] provides a subscriber for emitting traces to + [OpenTelemetry]-compatible distributed tracing systems. +- [`tracing-honeycomb`] Provides a layer that reports traces spanning multiple machines to [honeycomb.io]. Backed by [`tracing-distributed`]. +- [`tracing-distributed`] Provides a generic implementation of a layer that reports traces spanning multiple machines to some backend. +- [`tracing-actix`] provides `tracing` integration for the `actix` actor + framework. +- [`tracing-gelf`] implements a subscriber for exporting traces in Greylog + GELF format. +- [`tracing-coz`] provides integration with the [coz] causal profiler + (Linux-only). +- [`test-log`] takes care of initializing `tracing` for tests, based on + environment variables with an `env_logger` compatible syntax. +- [`tracing-unwrap`] provides convenience methods to report failed unwraps on `Result` or `Option` types to a `Subscriber`. +- [`diesel-tracing`] provides integration with [`diesel`] database connections. +- [`tracing-tracy`] provides a way to collect [Tracy] profiles in instrumented + applications. +- [`tracing-elastic-apm`] provides a layer for reporting traces to [Elastic APM]. +- [`tracing-etw`] provides a layer for emitting Windows [ETW] events. +- [`tracing-fluent-assertions`] provides a fluent assertions-style testing + framework for validating the behavior of `tracing` spans. +- [`sentry-tracing`] provides a layer for reporting events and traces to [Sentry]. +- [`tracing-loki`] provides a layer for shipping logs to [Grafana Loki]. + +If you're the maintainer of a `tracing` ecosystem crate not listed above, +please let us know! We'd love to add your project to the list! + +[`tracing-timing`]: https://crates.io/crates/tracing-timing +[`tracing-opentelemetry`]: https://crates.io/crates/tracing-opentelemetry +[OpenTelemetry]: https://opentelemetry.io/ +[`tracing-honeycomb`]: https://crates.io/crates/tracing-honeycomb +[`tracing-distributed`]: https://crates.io/crates/tracing-distributed +[honeycomb.io]: https://www.honeycomb.io/ +[`tracing-actix`]: https://crates.io/crates/tracing-actix +[`tracing-gelf`]: https://crates.io/crates/tracing-gelf +[`tracing-coz`]: https://crates.io/crates/tracing-coz +[coz]: https://github.com/plasma-umass/coz +[`test-log`]: https://crates.io/crates/test-log +[`tracing-unwrap`]: https://docs.rs/tracing-unwrap +[`diesel`]: https://crates.io/crates/diesel +[`diesel-tracing`]: https://crates.io/crates/diesel-tracing +[`tracing-tracy`]: https://crates.io/crates/tracing-tracy +[Tracy]: https://github.com/wolfpld/tracy +[`tracing-elastic-apm`]: https://crates.io/crates/tracing-elastic-apm +[Elastic APM]: https://www.elastic.co/apm +[`tracing-etw`]: https://github.com/microsoft/tracing-etw +[ETW]: https://docs.microsoft.com/en-us/windows/win32/etw/about-event-tracing +[`tracing-fluent-assertions`]: https://crates.io/crates/tracing-fluent-assertions +[`sentry-tracing`]: https://crates.io/crates/sentry-tracing +[Sentry]: https://sentry.io/welcome/ +[`tracing-loki`]: https://crates.io/crates/tracing-loki +[Grafana Loki]: https://grafana.com/oss/loki/ + +**Note:** that some of the ecosystem crates are currently unreleased and +undergoing active development. They may be less stable than `tracing` and +`tracing-core`. + +[`log`]: https://docs.rs/log/0.4.6/log/ +[`tokio-rs/tracing`]: https://github.com/tokio-rs/tracing +[`tracing-futures`]: https://github.com/tokio-rs/tracing/tree/master/tracing-futures +[`tracing-subscriber`]: https://github.com/tokio-rs/tracing/tree/master/tracing-subscriber +[`tracing-log`]: https://github.com/tokio-rs/tracing/tree/master/tracing-log +[`env_logger`]: https://crates.io/crates/env_logger +[`FmtSubscriber`]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/struct.Subscriber.html +[`examples`]: https://github.com/tokio-rs/tracing/tree/master/examples + +## Supported Rust Versions + +Tracing is built against the latest stable release. The minimum supported +version is 1.49. The current Tracing version is not guaranteed to build on Rust +versions earlier than the minimum supported version. + +Tracing follows the same compiler support policies as the rest of the Tokio +project. The current stable Rust compiler and the three most recent minor +versions before it will always be supported. For example, if the current stable +compiler version is 1.45, the minimum supported version will not be increased +past 1.42, three minor versions prior. Increasing the minimum supported compiler +version is not considered a semver breaking change as long as doing so complies +with this policy. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tracing/benches/global_subscriber.rs b/third_party/rust/tracing/benches/global_subscriber.rs new file mode 100644 index 000000000000..83519610a866 --- /dev/null +++ b/third_party/rust/tracing/benches/global_subscriber.rs @@ -0,0 +1,136 @@ +use std::fmt::Write; + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use tracing::Level; + +use tracing::{span, Event, Id, Metadata}; + +/// A subscriber that is enabled but otherwise does nothing. +struct EnabledSubscriber; + +impl tracing::Subscriber for EnabledSubscriber { + fn new_span(&self, span: &span::Attributes<'_>) -> Id { + let _ = span; + Id::from_u64(0xDEAD_FACE) + } + + fn event(&self, event: &Event<'_>) { + let _ = event; + } + + fn record(&self, span: &Id, values: &span::Record<'_>) { + let _ = (span, values); + } + + fn record_follows_from(&self, span: &Id, follows: &Id) { + let _ = (span, follows); + } + + fn enabled(&self, metadata: &Metadata<'_>) -> bool { + let _ = metadata; + true + } + + fn enter(&self, span: &Id) { + let _ = span; + } + + fn exit(&self, span: &Id) { + let _ = span; + } +} + +const NOP_LOGGER: NopLogger = NopLogger; + +struct NopLogger; + +impl log::Log for NopLogger { + fn enabled(&self, _metadata: &log::Metadata) -> bool { + true + } + + fn log(&self, record: &log::Record) { + if self.enabled(record.metadata()) { + let mut this = self; + let _ = write!(this, "{}", record.args()); + } + } + + fn flush(&self) {} +} + +impl Write for &NopLogger { + fn write_str(&mut self, s: &str) -> std::fmt::Result { + black_box(s); + Ok(()) + } +} + +const N_SPANS: usize = 100; + +fn criterion_benchmark(c: &mut Criterion) { + let mut c = c.benchmark_group("global/subscriber"); + let _ = tracing::subscriber::set_global_default(EnabledSubscriber); + let _ = log::set_logger(&NOP_LOGGER); + log::set_max_level(log::LevelFilter::Trace); + c.bench_function("span_no_fields", |b| b.iter(|| span!(Level::TRACE, "span"))); + + c.bench_function("event", |b| { + b.iter(|| { + tracing::event!(Level::TRACE, "hello"); + }) + }); + + c.bench_function("enter_span", |b| { + let span = span!(Level::TRACE, "span"); + #[allow(clippy::unit_arg)] + b.iter(|| black_box(span.in_scope(|| {}))) + }); + + c.bench_function("span_repeatedly", |b| { + #[inline] + fn mk_span(i: u64) -> tracing::Span { + span!(Level::TRACE, "span", i = i) + } + + let n = black_box(N_SPANS); + b.iter(|| (0..n).fold(mk_span(0), |_, i| mk_span(i as u64))) + }); + + c.bench_function("span_with_fields", |b| { + b.iter(|| { + span!( + Level::TRACE, + "span", + foo = "foo", + bar = "bar", + baz = 3, + quuux = tracing::field::debug(0.99) + ) + }); + }); +} + +fn bench_dispatch(c: &mut Criterion) { + let mut group = c.benchmark_group("global/dispatch"); + let _ = tracing::subscriber::set_global_default(EnabledSubscriber); + let _ = log::set_logger(&NOP_LOGGER); + log::set_max_level(log::LevelFilter::Trace); + group.bench_function("get_ref", |b| { + b.iter(|| { + tracing::dispatcher::get_default(|current| { + black_box(¤t); + }) + }) + }); + group.bench_function("get_clone", |b| { + b.iter(|| { + let current = tracing::dispatcher::get_default(|current| current.clone()); + black_box(current); + }) + }); + group.finish(); +} + +criterion_group!(benches, criterion_benchmark, bench_dispatch); +criterion_main!(benches); diff --git a/third_party/rust/tracing/benches/no_subscriber.rs b/third_party/rust/tracing/benches/no_subscriber.rs new file mode 100644 index 000000000000..e0f82b56a085 --- /dev/null +++ b/third_party/rust/tracing/benches/no_subscriber.rs @@ -0,0 +1,101 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use tracing::Level; + +struct FakeEmptySpan { + inner: Option<(usize, std::sync::Arc<()>)>, + meta: Option<&'static ()>, +} + +impl FakeEmptySpan { + fn new() -> Self { + Self { + inner: None, + meta: None, + } + } +} + +impl Drop for FakeEmptySpan { + fn drop(&mut self) { + black_box(&self.inner); + black_box(&self.meta); + } +} + +fn bench_no_subscriber(c: &mut Criterion) { + use std::sync::atomic::{AtomicUsize, Ordering}; + + let mut group = c.benchmark_group("no_subscriber"); + + group.bench_function("span", |b| { + b.iter(|| { + let span = tracing::span!(Level::TRACE, "span"); + black_box(&span); + }) + }); + group.bench_function("span_enter", |b| { + b.iter(|| { + let span = tracing::span!(Level::TRACE, "span"); + let _e = span.enter(); + }) + }); + group.bench_function("empty_span", |b| { + b.iter(|| { + let span = tracing::span::Span::none(); + black_box(&span); + }); + }); + group.bench_function("empty_struct", |b| { + b.iter(|| { + let span = FakeEmptySpan::new(); + black_box(&span); + }) + }); + group.bench_function("event", |b| { + b.iter(|| { + tracing::event!(Level::TRACE, "hello"); + }) + }); + group.bench_function("relaxed_load", |b| { + let foo = AtomicUsize::new(1); + b.iter(|| black_box(foo.load(Ordering::Relaxed))); + }); + group.bench_function("acquire_load", |b| { + let foo = AtomicUsize::new(1); + b.iter(|| black_box(foo.load(Ordering::Acquire))) + }); + group.bench_function("log", |b| { + b.iter(|| { + log::log!(log::Level::Info, "log"); + }) + }); + group.finish(); +} + +fn bench_fields(c: &mut Criterion) { + let mut group = c.benchmark_group("no_subscriber_field"); + group.bench_function("span", |b| { + b.iter(|| { + black_box(tracing::span!( + Level::TRACE, + "span", + foo = tracing::field::display(format!("bar {:?}", 2)) + )); + }) + }); + group.bench_function("event", |b| { + b.iter(|| { + tracing::event!( + Level::TRACE, + foo = tracing::field::display(format!("bar {:?}", 2)) + ); + }) + }); + group.bench_function("log", |b| { + b.iter(|| log::log!(log::Level::Trace, "{}", format!("bar {:?}", 2))) + }); + group.finish(); +} + +criterion_group!(benches, bench_no_subscriber, bench_fields); +criterion_main!(benches); diff --git a/third_party/rust/tracing/benches/subscriber.rs b/third_party/rust/tracing/benches/subscriber.rs new file mode 100644 index 000000000000..c6418010f4de --- /dev/null +++ b/third_party/rust/tracing/benches/subscriber.rs @@ -0,0 +1,189 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use tracing::Level; + +use std::{ + fmt, + sync::{Mutex, MutexGuard}, +}; +use tracing::{field, span, Event, Id, Metadata}; + +/// A subscriber that is enabled but otherwise does nothing. +struct EnabledSubscriber; + +impl tracing::Subscriber for EnabledSubscriber { + fn new_span(&self, span: &span::Attributes<'_>) -> Id { + let _ = span; + Id::from_u64(0xDEAD_FACE) + } + + fn event(&self, event: &Event<'_>) { + let _ = event; + } + + fn record(&self, span: &Id, values: &span::Record<'_>) { + let _ = (span, values); + } + + fn record_follows_from(&self, span: &Id, follows: &Id) { + let _ = (span, follows); + } + + fn enabled(&self, metadata: &Metadata<'_>) -> bool { + let _ = metadata; + true + } + + fn enter(&self, span: &Id) { + let _ = span; + } + + fn exit(&self, span: &Id) { + let _ = span; + } +} + +/// Simulates a subscriber that records span data. +struct VisitingSubscriber(Mutex); + +struct Visitor<'a>(MutexGuard<'a, String>); + +impl<'a> field::Visit for Visitor<'a> { + fn record_debug(&mut self, _field: &field::Field, value: &dyn fmt::Debug) { + use std::fmt::Write; + let _ = write!(&mut *self.0, "{:?}", value); + } +} + +impl tracing::Subscriber for VisitingSubscriber { + fn new_span(&self, span: &span::Attributes<'_>) -> Id { + let mut visitor = Visitor(self.0.lock().unwrap()); + span.record(&mut visitor); + Id::from_u64(0xDEAD_FACE) + } + + fn record(&self, _span: &Id, values: &span::Record<'_>) { + let mut visitor = Visitor(self.0.lock().unwrap()); + values.record(&mut visitor); + } + + fn event(&self, event: &Event<'_>) { + let mut visitor = Visitor(self.0.lock().unwrap()); + event.record(&mut visitor); + } + + fn record_follows_from(&self, span: &Id, follows: &Id) { + let _ = (span, follows); + } + + fn enabled(&self, metadata: &Metadata<'_>) -> bool { + let _ = metadata; + true + } + + fn enter(&self, span: &Id) { + let _ = span; + } + + fn exit(&self, span: &Id) { + let _ = span; + } +} + +const N_SPANS: usize = 100; + +fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("span_no_fields", |b| { + tracing::subscriber::with_default(EnabledSubscriber, || { + b.iter(|| span!(Level::TRACE, "span")) + }); + }); + + c.bench_function("enter_span", |b| { + tracing::subscriber::with_default(EnabledSubscriber, || { + let span = span!(Level::TRACE, "span"); + #[allow(clippy::unit_arg)] + b.iter(|| black_box(span.in_scope(|| {}))) + }); + }); + + c.bench_function("span_repeatedly", |b| { + #[inline] + fn mk_span(i: u64) -> tracing::Span { + span!(Level::TRACE, "span", i = i) + } + + let n = black_box(N_SPANS); + tracing::subscriber::with_default(EnabledSubscriber, || { + b.iter(|| (0..n).fold(mk_span(0), |_, i| mk_span(i as u64))) + }); + }); + + c.bench_function("span_with_fields", |b| { + tracing::subscriber::with_default(EnabledSubscriber, || { + b.iter(|| { + span!( + Level::TRACE, + "span", + foo = "foo", + bar = "bar", + baz = 3, + quuux = tracing::field::debug(0.99) + ) + }) + }); + }); + + c.bench_function("span_with_fields_record", |b| { + let subscriber = VisitingSubscriber(Mutex::new(String::from(""))); + tracing::subscriber::with_default(subscriber, || { + b.iter(|| { + span!( + Level::TRACE, + "span", + foo = "foo", + bar = "bar", + baz = 3, + quuux = tracing::field::debug(0.99) + ) + }) + }); + }); +} + +fn bench_dispatch(c: &mut Criterion) { + let mut group = c.benchmark_group("dispatch"); + group.bench_function("no_dispatch_get_ref", |b| { + b.iter(|| { + tracing::dispatcher::get_default(|current| { + black_box(¤t); + }) + }) + }); + group.bench_function("no_dispatch_get_clone", |b| { + b.iter(|| { + let current = tracing::dispatcher::get_default(|current| current.clone()); + black_box(current); + }) + }); + group.bench_function("get_ref", |b| { + tracing::subscriber::with_default(EnabledSubscriber, || { + b.iter(|| { + tracing::dispatcher::get_default(|current| { + black_box(¤t); + }) + }) + }) + }); + group.bench_function("get_clone", |b| { + tracing::subscriber::with_default(EnabledSubscriber, || { + b.iter(|| { + let current = tracing::dispatcher::get_default(|current| current.clone()); + black_box(current); + }) + }) + }); + group.finish(); +} + +criterion_group!(benches, criterion_benchmark, bench_dispatch); +criterion_main!(benches); diff --git a/third_party/rust/tracing/src/dispatcher.rs b/third_party/rust/tracing/src/dispatcher.rs new file mode 100644 index 000000000000..568a0314d68b --- /dev/null +++ b/third_party/rust/tracing/src/dispatcher.rs @@ -0,0 +1,149 @@ +//! Dispatches trace events to [`Subscriber`]s. +//! +//! The _dispatcher_ is the component of the tracing system which is responsible +//! for forwarding trace data from the instrumentation points that generate it +//! to the subscriber that collects it. +//! +//! # Using the Trace Dispatcher +//! +//! Every thread in a program using `tracing` has a _default subscriber_. When +//! events occur, or spans are created, they are dispatched to the thread's +//! current subscriber. +//! +//! ## Setting the Default Subscriber +//! +//! By default, the current subscriber is an empty implementation that does +//! nothing. To use a subscriber implementation, it must be set as the default. +//! There are two methods for doing so: [`with_default`] and +//! [`set_global_default`]. `with_default` sets the default subscriber for the +//! duration of a scope, while `set_global_default` sets a default subscriber +//! for the entire process. +//! +//! To use either of these functions, we must first wrap our subscriber in a +//! [`Dispatch`], a cloneable, type-erased reference to a subscriber. For +//! example: +//! ```rust +//! # pub struct FooSubscriber; +//! # use tracing_core::{ +//! # dispatcher, Event, Metadata, +//! # span::{Attributes, Id, Record} +//! # }; +//! # impl tracing_core::Subscriber for FooSubscriber { +//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } +//! # fn record(&self, _: &Id, _: &Record) {} +//! # fn event(&self, _: &Event) {} +//! # fn record_follows_from(&self, _: &Id, _: &Id) {} +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn enter(&self, _: &Id) {} +//! # fn exit(&self, _: &Id) {} +//! # } +//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } +//! use dispatcher::Dispatch; +//! +//! let my_subscriber = FooSubscriber::new(); +//! let my_dispatch = Dispatch::new(my_subscriber); +//! ``` +//! Then, we can use [`with_default`] to set our `Dispatch` as the default for +//! the duration of a block: +//! ```rust +//! # pub struct FooSubscriber; +//! # use tracing_core::{ +//! # dispatcher, Event, Metadata, +//! # span::{Attributes, Id, Record} +//! # }; +//! # impl tracing_core::Subscriber for FooSubscriber { +//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } +//! # fn record(&self, _: &Id, _: &Record) {} +//! # fn event(&self, _: &Event) {} +//! # fn record_follows_from(&self, _: &Id, _: &Id) {} +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn enter(&self, _: &Id) {} +//! # fn exit(&self, _: &Id) {} +//! # } +//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } +//! # let my_subscriber = FooSubscriber::new(); +//! # let my_dispatch = dispatcher::Dispatch::new(my_subscriber); +//! // no default subscriber +//! +//! # #[cfg(feature = "std")] +//! dispatcher::with_default(&my_dispatch, || { +//! // my_subscriber is the default +//! }); +//! +//! // no default subscriber again +//! ``` +//! It's important to note that `with_default` will not propagate the current +//! thread's default subscriber to any threads spawned within the `with_default` +//! block. To propagate the default subscriber to new threads, either use +//! `with_default` from the new thread, or use `set_global_default`. +//! +//! As an alternative to `with_default`, we can use [`set_global_default`] to +//! set a `Dispatch` as the default for all threads, for the lifetime of the +//! program. For example: +//! ```rust +//! # pub struct FooSubscriber; +//! # use tracing_core::{ +//! # dispatcher, Event, Metadata, +//! # span::{Attributes, Id, Record} +//! # }; +//! # impl tracing_core::Subscriber for FooSubscriber { +//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } +//! # fn record(&self, _: &Id, _: &Record) {} +//! # fn event(&self, _: &Event) {} +//! # fn record_follows_from(&self, _: &Id, _: &Id) {} +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn enter(&self, _: &Id) {} +//! # fn exit(&self, _: &Id) {} +//! # } +//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } +//! # let my_subscriber = FooSubscriber::new(); +//! # let my_dispatch = dispatcher::Dispatch::new(my_subscriber); +//! // no default subscriber +//! +//! dispatcher::set_global_default(my_dispatch) +//! // `set_global_default` will return an error if the global default +//! // subscriber has already been set. +//! .expect("global default was already set!"); +//! +//! // `my_subscriber` is now the default +//! ``` +//! +//!
+//! Note: The thread-local scoped dispatcher (with_default)
+//! requires the Rust standard library. no_std users should
+//! use set_global_default
+//! instead.
+//! 
+//! +//! ## Accessing the Default Subscriber +//! +//! A thread's current default subscriber can be accessed using the +//! [`get_default`] function, which executes a closure with a reference to the +//! currently default `Dispatch`. This is used primarily by `tracing` +//! instrumentation. +//! +//! [`Subscriber`]: crate::Subscriber +//! [`with_default`]: with_default +//! [`set_global_default`]: set_global_default +//! [`get_default`]: get_default +//! [`Dispatch`]: Dispatch +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub use tracing_core::dispatcher::set_default; +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub use tracing_core::dispatcher::with_default; +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub use tracing_core::dispatcher::DefaultGuard; +pub use tracing_core::dispatcher::{ + get_default, set_global_default, Dispatch, SetGlobalDefaultError, +}; + +/// Private API for internal use by tracing's macros. +/// +/// This function is *not* considered part of `tracing`'s public API, and has no +/// stability guarantees. If you use it, and it breaks or disappears entirely, +/// don't say we didn;'t warn you. +#[doc(hidden)] +pub use tracing_core::dispatcher::has_been_set; diff --git a/third_party/rust/tracing/src/field.rs b/third_party/rust/tracing/src/field.rs new file mode 100644 index 000000000000..b3f9fbdfcab6 --- /dev/null +++ b/third_party/rust/tracing/src/field.rs @@ -0,0 +1,170 @@ +//! `Span` and `Event` key-value data. +//! +//! Spans and events may be annotated with key-value data, referred to as known +//! as _fields_. These fields consist of a mapping from a key (corresponding to +//! a `&str` but represented internally as an array index) to a [`Value`]. +//! +//! # `Value`s and `Subscriber`s +//! +//! `Subscriber`s consume `Value`s as fields attached to [span]s or [`Event`]s. +//! The set of field keys on a given span or is defined on its [`Metadata`]. +//! When a span is created, it provides [`Attributes`] to the `Subscriber`'s +//! [`new_span`] method, containing any fields whose values were provided when +//! the span was created; and may call the `Subscriber`'s [`record`] method +//! with additional [`Record`]s if values are added for more of its fields. +//! Similarly, the [`Event`] type passed to the subscriber's [`event`] method +//! will contain any fields attached to each event. +//! +//! `tracing` represents values as either one of a set of Rust primitives +//! (`i64`, `u64`, `f64`, `bool`, and `&str`) or using a `fmt::Display` or +//! `fmt::Debug` implementation. `Subscriber`s are provided these primitive +//! value types as `dyn Value` trait objects. +//! +//! These trait objects can be formatted using `fmt::Debug`, but may also be +//! recorded as typed data by calling the [`Value::record`] method on these +//! trait objects with a _visitor_ implementing the [`Visit`] trait. This trait +//! represents the behavior used to record values of various types. For example, +//! an implementation of `Visit` might record integers by incrementing counters +//! for their field names rather than printing them. +//! +//! +//! # Using `valuable` +//! +//! `tracing`'s [`Value`] trait is intentionally minimalist: it supports only a small +//! number of Rust primitives as typed values, and only permits recording +//! user-defined types with their [`fmt::Debug`] or [`fmt::Display`] +//! implementations. However, there are some cases where it may be useful to record +//! nested values (such as arrays, `Vec`s, or `HashMap`s containing values), or +//! user-defined `struct` and `enum` types without having to format them as +//! unstructured text. +//! +//! To address `Value`'s limitations, `tracing` offers experimental support for +//! the [`valuable`] crate, which provides object-safe inspection of structured +//! values. User-defined types can implement the [`valuable::Valuable`] trait, +//! and be recorded as a `tracing` field by calling their [`as_value`] method. +//! If the [`Subscriber`] also supports the `valuable` crate, it can +//! then visit those types fields as structured values using `valuable`. +//! +//!
+//!     Note: valuable support is an
+//!     unstable feature. See
+//!     the documentation on unstable features for details on how to enable it.
+//! 
+//! +//! For example: +//! ```ignore +//! // Derive `Valuable` for our types: +//! use valuable::Valuable; +//! +//! #[derive(Clone, Debug, Valuable)] +//! struct User { +//! name: String, +//! age: u32, +//! address: Address, +//! } +//! +//! #[derive(Clone, Debug, Valuable)] +//! struct Address { +//! country: String, +//! city: String, +//! street: String, +//! } +//! +//! let user = User { +//! name: "Arwen Undomiel".to_string(), +//! age: 3000, +//! address: Address { +//! country: "Middle Earth".to_string(), +//! city: "Rivendell".to_string(), +//! street: "leafy lane".to_string(), +//! }, +//! }; +//! +//! // Recording `user` as a `valuable::Value` will allow the `tracing` subscriber +//! // to traverse its fields as a nested, typed structure: +//! tracing::info!(current_user = user.as_value()); +//! ``` +//! +//! Alternatively, the [`valuable()`] function may be used to convert a type +//! implementing [`Valuable`] into a `tracing` field value. +//! +//! When the `valuable` feature is enabled, the [`Visit`] trait will include an +//! optional [`record_value`] method. `Visit` implementations that wish to +//! record `valuable` values can implement this method with custom behavior. +//! If a visitor does not implement `record_value`, the [`valuable::Value`] will +//! be forwarded to the visitor's [`record_debug`] method. +//! +//! [`fmt::Debug`]: std::fmt::Debug +//! [`fmt::Display`]: std::fmt::Debug +//! [`valuable`]: https://crates.io/crates/valuable +//! [`valuable::Valuable`]: https://docs.rs/valuable/latest/valuable/trait.Valuable.html +//! [`as_value`]: https://docs.rs/valuable/latest/valuable/trait.Valuable.html#tymethod.as_value +//! [`valuable::Value`]: https://docs.rs/valuable/latest/valuable/enum.Value.html +//! [`Subscriber`]: crate::Subscriber +//! [`record_value`]: Visit::record_value +//! [`record_debug`]: Visit::record_debug +//! [span]: mod@crate::span +//! [`Event`]: crate::event::Event +//! [`Metadata`]: crate::Metadata +//! [`Attributes`]: crate::span::Attributes +//! [`Record`]: crate::span::Record +//! [`new_span`]: crate::Subscriber::new_span +//! [`record`]: crate::Subscriber::record +//! [`event`]: crate::Subscriber::event +pub use tracing_core::field::*; + +use crate::Metadata; + +/// Trait implemented to allow a type to be used as a field key. +/// +///
+/// Note: Although this is implemented for both the
+/// Field type and any
+/// type that can be borrowed as an &str, only Field
+/// allows O(1) access.
+/// Indexing a field with a string results in an iterative search that performs
+/// string comparisons. Thus, if possible, once the key for a field is known, it
+/// should be used whenever possible.
+/// 
+pub trait AsField: crate::sealed::Sealed { + /// Attempts to convert `&self` into a `Field` with the specified `metadata`. + /// + /// If `metadata` defines this field, then the field is returned. Otherwise, + /// this returns `None`. + fn as_field(&self, metadata: &Metadata<'_>) -> Option; +} + +// ===== impl AsField ===== + +impl AsField for Field { + #[inline] + fn as_field(&self, metadata: &Metadata<'_>) -> Option { + if self.callsite() == metadata.callsite() { + Some(self.clone()) + } else { + None + } + } +} + +impl<'a> AsField for &'a Field { + #[inline] + fn as_field(&self, metadata: &Metadata<'_>) -> Option { + if self.callsite() == metadata.callsite() { + Some((*self).clone()) + } else { + None + } + } +} + +impl AsField for str { + #[inline] + fn as_field(&self, metadata: &Metadata<'_>) -> Option { + metadata.fields().field(&self) + } +} + +impl crate::sealed::Sealed for Field {} +impl<'a> crate::sealed::Sealed for &'a Field {} +impl crate::sealed::Sealed for str {} diff --git a/third_party/rust/tracing/src/instrument.rs b/third_party/rust/tracing/src/instrument.rs new file mode 100644 index 000000000000..46e5f579cd79 --- /dev/null +++ b/third_party/rust/tracing/src/instrument.rs @@ -0,0 +1,370 @@ +use crate::stdlib::pin::Pin; +use crate::stdlib::task::{Context, Poll}; +use crate::stdlib::{future::Future, marker::Sized}; +use crate::{ + dispatcher::{self, Dispatch}, + span::Span, +}; +use pin_project_lite::pin_project; + +/// Attaches spans to a [`std::future::Future`]. +/// +/// Extension trait allowing futures to be +/// instrumented with a `tracing` [span]. +/// +/// [span]: super::Span +pub trait Instrument: Sized { + /// Instruments this type with the provided [`Span`], returning an + /// `Instrumented` wrapper. + /// + /// The attached [`Span`] will be [entered] every time the instrumented + /// [`Future`] is polled. + /// + /// # Examples + /// + /// Instrumenting a future: + /// + /// ```rust + /// use tracing::Instrument; + /// + /// # async fn doc() { + /// let my_future = async { + /// // ... + /// }; + /// + /// my_future + /// .instrument(tracing::info_span!("my_future")) + /// .await + /// # } + /// ``` + /// + /// The [`Span::or_current`] combinator can be used in combination with + /// `instrument` to ensure that the [current span] is attached to the + /// future if the span passed to `instrument` is [disabled]: + /// + /// ``` + /// use tracing::Instrument; + /// # mod tokio { + /// # pub(super) fn spawn(_: impl std::future::Future) {} + /// # } + /// + /// let my_future = async { + /// // ... + /// }; + /// + /// let outer_span = tracing::info_span!("outer").entered(); + /// + /// // If the "my_future" span is enabled, then the spawned task will + /// // be within both "my_future" *and* "outer", since "outer" is + /// // "my_future"'s parent. However, if "my_future" is disabled, + /// // the spawned task will *not* be in any span. + /// tokio::spawn( + /// my_future + /// .instrument(tracing::debug_span!("my_future")) + /// ); + /// + /// // Using `Span::or_current` ensures the spawned task is instrumented + /// // with the current span, if the new span passed to `instrument` is + /// // not enabled. This means that if the "my_future" span is disabled, + /// // the spawned task will still be instrumented with the "outer" span: + /// # let my_future = async {}; + /// tokio::spawn( + /// my_future + /// .instrument(tracing::debug_span!("my_future").or_current()) + /// ); + /// ``` + /// + /// [entered]: super::Span::enter() + /// [`Span::or_current`]: super::Span::or_current() + /// [current span]: super::Span::current() + /// [disabled]: super::Span::is_disabled() + /// [`Future`]: std::future::Future + fn instrument(self, span: Span) -> Instrumented { + Instrumented { inner: self, span } + } + + /// Instruments this type with the [current] [`Span`], returning an + /// `Instrumented` wrapper. + /// + /// The attached [`Span`] will be [entered] every time the instrumented + /// [`Future`] is polled. + /// + /// This can be used to propagate the current span when spawning a new future. + /// + /// # Examples + /// + /// ```rust + /// use tracing::Instrument; + /// + /// # mod tokio { + /// # pub(super) fn spawn(_: impl std::future::Future) {} + /// # } + /// # async fn doc() { + /// let span = tracing::info_span!("my_span"); + /// let _enter = span.enter(); + /// + /// // ... + /// + /// let future = async { + /// tracing::debug!("this event will occur inside `my_span`"); + /// // ... + /// }; + /// tokio::spawn(future.in_current_span()); + /// # } + /// ``` + /// + /// [current]: super::Span::current() + /// [entered]: super::Span::enter() + /// [`Span`]: crate::Span + /// [`Future`]: std::future::Future + #[inline] + fn in_current_span(self) -> Instrumented { + self.instrument(Span::current()) + } +} + +/// Extension trait allowing futures to be instrumented with +/// a `tracing` [`Subscriber`](crate::Subscriber). +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub trait WithSubscriber: Sized { + /// Attaches the provided [`Subscriber`] to this type, returning a + /// [`WithDispatch`] wrapper. + /// + /// The attached [`Subscriber`] will be set as the [default] when the returned + /// [`Future`] is polled. + /// + /// # Examples + /// + /// ``` + /// # use tracing::subscriber::NoSubscriber as MySubscriber; + /// # use tracing::subscriber::NoSubscriber as MyOtherSubscriber; + /// # async fn docs() { + /// use tracing::instrument::WithSubscriber; + /// + /// // Set the default `Subscriber` + /// let _default = tracing::subscriber::set_default(MySubscriber::default()); + /// + /// tracing::info!("this event will be recorded by the default `Subscriber`"); + /// + /// // Create a different `Subscriber` and attach it to a future. + /// let other_subscriber = MyOtherSubscriber::default(); + /// let future = async { + /// tracing::info!("this event will be recorded by the other `Subscriber`"); + /// // ... + /// }; + /// + /// future + /// // Attach the other `Subscriber` to the future before awaiting it + /// .with_subscriber(other_subscriber) + /// .await; + /// + /// // Once the future has completed, we return to the default `Subscriber`. + /// tracing::info!("this event will be recorded by the default `Subscriber`"); + /// # } + /// ``` + /// + /// [`Subscriber`]: super::Subscriber + /// [default]: crate::dispatcher#setting-the-default-subscriber + /// [`Future`]: std::future::Future + fn with_subscriber(self, subscriber: S) -> WithDispatch + where + S: Into, + { + WithDispatch { + inner: self, + dispatcher: subscriber.into(), + } + } + + /// Attaches the current [default] [`Subscriber`] to this type, returning a + /// [`WithDispatch`] wrapper. + /// + /// The attached `Subscriber` will be set as the [default] when the returned + /// [`Future`] is polled. + /// + /// This can be used to propagate the current dispatcher context when + /// spawning a new future that may run on a different thread. + /// + /// # Examples + /// + /// ``` + /// # mod tokio { + /// # pub(super) fn spawn(_: impl std::future::Future) {} + /// # } + /// # use tracing::subscriber::NoSubscriber as MySubscriber; + /// # async fn docs() { + /// use tracing::instrument::WithSubscriber; + /// + /// // Using `set_default` (rather than `set_global_default`) sets the + /// // default `Subscriber` for *this* thread only. + /// let _default = tracing::subscriber::set_default(MySubscriber::default()); + /// + /// let future = async { + /// // ... + /// }; + /// + /// // If a multi-threaded async runtime is in use, this spawned task may + /// // run on a different thread, in a different default `Subscriber`'s context. + /// tokio::spawn(future); + /// + /// // However, calling `with_current_subscriber` on the future before + /// // spawning it, ensures that the current thread's default `Subscriber` is + /// // propagated to the spawned task, regardless of where it executes: + /// # let future = async { }; + /// tokio::spawn(future.with_current_subscriber()); + /// # } + /// ``` + /// [`Subscriber`]: super::Subscriber + /// [default]: crate::dispatcher#setting-the-default-subscriber + /// [`Future`]: std::future::Future + #[inline] + fn with_current_subscriber(self) -> WithDispatch { + WithDispatch { + inner: self, + dispatcher: crate::dispatcher::get_default(|default| default.clone()), + } + } +} + +pin_project! { + /// A [`Future`] that has been instrumented with a `tracing` [`Subscriber`]. + /// + /// This type is returned by the [`WithSubscriber`] extension trait. See that + /// trait's documentation for details. + /// + /// [`Future`]: std::future::Future + /// [`Subscriber`]: crate::Subscriber + #[derive(Clone, Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + pub struct WithDispatch { + #[pin] + inner: T, + dispatcher: Dispatch, + } +} + +pin_project! { + /// A [`Future`] that has been instrumented with a `tracing` [`Span`]. + /// + /// This type is returned by the [`Instrument`] extension trait. See that + /// trait's documentation for details. + /// + /// [`Future`]: std::future::Future + /// [`Span`]: crate::Span + #[derive(Debug, Clone)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Instrumented { + #[pin] + inner: T, + span: Span, + } +} + +// === impl Instrumented === + +impl Future for Instrumented { + type Output = T::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let _enter = this.span.enter(); + this.inner.poll(cx) + } +} + +impl Instrument for T {} + +impl Instrumented { + /// Borrows the `Span` that this type is instrumented by. + pub fn span(&self) -> &Span { + &self.span + } + + /// Mutably borrows the `Span` that this type is instrumented by. + pub fn span_mut(&mut self) -> &mut Span { + &mut self.span + } + + /// Borrows the wrapped type. + pub fn inner(&self) -> &T { + &self.inner + } + + /// Mutably borrows the wrapped type. + pub fn inner_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Get a pinned reference to the wrapped type. + pub fn inner_pin_ref(self: Pin<&Self>) -> Pin<&T> { + self.project_ref().inner + } + + /// Get a pinned mutable reference to the wrapped type. + pub fn inner_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { + self.project().inner + } + + /// Consumes the `Instrumented`, returning the wrapped type. + /// + /// Note that this drops the span. + pub fn into_inner(self) -> T { + self.inner + } +} + +// === impl WithDispatch === + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Future for WithDispatch { + type Output = T::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let dispatcher = this.dispatcher; + let future = this.inner; + let _default = dispatcher::set_default(dispatcher); + future.poll(cx) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl WithSubscriber for T {} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl WithDispatch { + /// Borrows the [`Dispatch`] that is entered when this type is polled. + pub fn dispatcher(&self) -> &Dispatch { + &self.dispatcher + } + + /// Borrows the wrapped type. + pub fn inner(&self) -> &T { + &self.inner + } + + /// Mutably borrows the wrapped type. + pub fn inner_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Get a pinned reference to the wrapped type. + pub fn inner_pin_ref(self: Pin<&Self>) -> Pin<&T> { + self.project_ref().inner + } + + /// Get a pinned mutable reference to the wrapped type. + pub fn inner_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { + self.project().inner + } + + /// Consumes the `Instrumented`, returning the wrapped type. + /// + /// Note that this drops the span. + pub fn into_inner(self) -> T { + self.inner + } +} diff --git a/third_party/rust/tracing/src/level_filters.rs b/third_party/rust/tracing/src/level_filters.rs new file mode 100644 index 000000000000..44f5e5f57a64 --- /dev/null +++ b/third_party/rust/tracing/src/level_filters.rs @@ -0,0 +1,94 @@ +//! Trace verbosity level filtering. +//! +//! # Compile time filters +//! +//! Trace verbosity levels can be statically disabled at compile time via Cargo +//! features, similar to the [`log` crate]. Trace instrumentation at disabled +//! levels will be skipped and will not even be present in the resulting binary +//! unless the verbosity level is specified dynamically. This level is +//! configured separately for release and debug builds. The features are: +//! +//! * `max_level_off` +//! * `max_level_error` +//! * `max_level_warn` +//! * `max_level_info` +//! * `max_level_debug` +//! * `max_level_trace` +//! * `release_max_level_off` +//! * `release_max_level_error` +//! * `release_max_level_warn` +//! * `release_max_level_info` +//! * `release_max_level_debug` +//! * `release_max_level_trace` +//! +//! These features control the value of the `STATIC_MAX_LEVEL` constant. The +//! instrumentation macros macros check this value before recording an event or +//! constructing a span. By default, no levels are disabled. +//! +//! For example, a crate can disable trace level instrumentation in debug builds +//! and trace, debug, and info level instrumentation in release builds with the +//! following configuration: +//! +//! ```toml +//! [dependencies] +//! tracing = { version = "0.1", features = ["max_level_debug", "release_max_level_warn"] } +//! ``` +//! ## Notes +//! +//! Please note that `tracing`'s static max level features do *not* control the +//! [`log`] records that may be emitted when [`tracing`'s "log" feature flag][f] is +//! enabled. This is to allow `tracing` to be disabled entirely at compile time +//! while still emitting `log` records --- such as when a library using +//! `tracing` is used by an application using `log` that doesn't want to +//! generate any `tracing`-related code, but does want to collect `log` records. +//! +//! This means that if the "log" feature is in use, some code may be generated +//! for `log` records emitted by disabled `tracing` events. If this is not +//! desirable, `log` records may be disabled separately using [`log`'s static +//! max level features][`log` crate]. +//! +//! [`log`]: https://docs.rs/log/ +//! [`log` crate]: https://docs.rs/log/latest/log/#compile-time-filters +//! [f]: https://docs.rs/tracing/latest/tracing/#emitting-log-records +pub use tracing_core::{metadata::ParseLevelFilterError, LevelFilter}; + +/// The statically configured maximum trace level. +/// +/// See the [module-level documentation] for information on how to configure +/// this. +/// +/// This value is checked by the `event!` and `span!` macros. Code that +/// manually constructs events or spans via the `Event::record` function or +/// `Span` constructors should compare the level against this value to +/// determine if those spans or events are enabled. +/// +/// [module-level documentation]: super#compile-time-filters +pub const STATIC_MAX_LEVEL: LevelFilter = MAX_LEVEL; + +cfg_if::cfg_if! { + if #[cfg(all(not(debug_assertions), feature = "release_max_level_off"))] { + const MAX_LEVEL: LevelFilter = LevelFilter::OFF; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_error"))] { + const MAX_LEVEL: LevelFilter = LevelFilter::ERROR; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_warn"))] { + const MAX_LEVEL: LevelFilter = LevelFilter::WARN; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_info"))] { + const MAX_LEVEL: LevelFilter = LevelFilter::INFO; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_debug"))] { + const MAX_LEVEL: LevelFilter = LevelFilter::DEBUG; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_trace"))] { + const MAX_LEVEL: LevelFilter = LevelFilter::TRACE; + } else if #[cfg(feature = "max_level_off")] { + const MAX_LEVEL: LevelFilter = LevelFilter::OFF; + } else if #[cfg(feature = "max_level_error")] { + const MAX_LEVEL: LevelFilter = LevelFilter::ERROR; + } else if #[cfg(feature = "max_level_warn")] { + const MAX_LEVEL: LevelFilter = LevelFilter::WARN; + } else if #[cfg(feature = "max_level_info")] { + const MAX_LEVEL: LevelFilter = LevelFilter::INFO; + } else if #[cfg(feature = "max_level_debug")] { + const MAX_LEVEL: LevelFilter = LevelFilter::DEBUG; + } else { + const MAX_LEVEL: LevelFilter = LevelFilter::TRACE; + } +} diff --git a/third_party/rust/tracing/src/lib.rs b/third_party/rust/tracing/src/lib.rs new file mode 100644 index 000000000000..e3424e040cce --- /dev/null +++ b/third_party/rust/tracing/src/lib.rs @@ -0,0 +1,1183 @@ +//! A scoped, structured logging and diagnostics system. +//! +//! # Overview +//! +//! `tracing` is a framework for instrumenting Rust programs to collect +//! structured, event-based diagnostic information. +//! +//! In asynchronous systems like Tokio, interpreting traditional log messages can +//! often be quite challenging. Since individual tasks are multiplexed on the same +//! thread, associated events and log lines are intermixed making it difficult to +//! trace the logic flow. `tracing` expands upon logging-style diagnostics by +//! allowing libraries and applications to record structured events with additional +//! information about *temporality* and *causality* — unlike a log message, a span +//! in `tracing` has a beginning and end time, may be entered and exited by the +//! flow of execution, and may exist within a nested tree of similar spans. In +//! addition, `tracing` spans are *structured*, with the ability to record typed +//! data as well as textual messages. +//! +//! The `tracing` crate provides the APIs necessary for instrumenting libraries +//! and applications to emit trace data. +//! +//! *Compiler support: [requires `rustc` 1.49+][msrv]* +//! +//! [msrv]: #supported-rust-versions +//! # Core Concepts +//! +//! The core of `tracing`'s API is composed of _spans_, _events_ and +//! _subscribers_. We'll cover these in turn. +//! +//! ## Spans +//! +//! To record the flow of execution through a program, `tracing` introduces the +//! concept of [spans]. Unlike a log line that represents a _moment in +//! time_, a span represents a _period of time_ with a beginning and an end. When a +//! program begins executing in a context or performing a unit of work, it +//! _enters_ that context's span, and when it stops executing in that context, +//! it _exits_ the span. The span in which a thread is currently executing is +//! referred to as that thread's _current_ span. +//! +//! For example: +//! ``` +//! use tracing::{span, Level}; +//! # fn main() { +//! let span = span!(Level::TRACE, "my_span"); +//! // `enter` returns a RAII guard which, when dropped, exits the span. this +//! // indicates that we are in the span for the current lexical scope. +//! let _enter = span.enter(); +//! // perform some work in the context of `my_span`... +//! # } +//!``` +//! +//! The [`span` module][span]'s documentation provides further details on how to +//! use spans. +//! +//!
+//!
+//!  **Warning**: In asynchronous code that uses async/await syntax,
+//!  `Span::enter` may produce incorrect traces if the returned drop
+//!  guard is held across an await point. See
+//!  [the method documentation][Span#in-asynchronous-code] for details.
+//!
+//! 
+//! +//! ## Events +//! +//! An [`Event`] represents a _moment_ in time. It signifies something that +//! happened while a trace was being recorded. `Event`s are comparable to the log +//! records emitted by unstructured logging code, but unlike a typical log line, +//! an `Event` may occur within the context of a span. +//! +//! For example: +//! ``` +//! use tracing::{event, span, Level}; +//! +//! # fn main() { +//! // records an event outside of any span context: +//! event!(Level::INFO, "something happened"); +//! +//! let span = span!(Level::INFO, "my_span"); +//! let _guard = span.enter(); +//! +//! // records an event within "my_span". +//! event!(Level::DEBUG, "something happened inside my_span"); +//! # } +//!``` +//! +//! In general, events should be used to represent points in time _within_ a +//! span — a request returned with a given status code, _n_ new items were +//! taken from a queue, and so on. +//! +//! The [`Event` struct][`Event`] documentation provides further details on using +//! events. +//! +//! ## Subscribers +//! +//! As `Span`s and `Event`s occur, they are recorded or aggregated by +//! implementations of the [`Subscriber`] trait. `Subscriber`s are notified +//! when an `Event` takes place and when a `Span` is entered or exited. These +//! notifications are represented by the following `Subscriber` trait methods: +//! +//! + [`event`][Subscriber::event], called when an `Event` takes place, +//! + [`enter`], called when execution enters a `Span`, +//! + [`exit`], called when execution exits a `Span` +//! +//! In addition, subscribers may implement the [`enabled`] function to _filter_ +//! the notifications they receive based on [metadata] describing each `Span` +//! or `Event`. If a call to `Subscriber::enabled` returns `false` for a given +//! set of metadata, that `Subscriber` will *not* be notified about the +//! corresponding `Span` or `Event`. For performance reasons, if no currently +//! active subscribers express interest in a given set of metadata by returning +//! `true`, then the corresponding `Span` or `Event` will never be constructed. +//! +//! # Usage +//! +//! First, add this to your `Cargo.toml`: +//! +//! ```toml +//! [dependencies] +//! tracing = "0.1" +//! ``` +//! +//! *Compiler support: [requires `rustc` 1.42+][msrv]* +//! +//! ## Recording Spans and Events +//! +//! Spans and events are recorded using macros. +//! +//! ### Spans +//! +//! The [`span!`] macro expands to a [`Span` struct][`Span`] which is used to +//! record a span. The [`Span::enter`] method on that struct records that the +//! span has been entered, and returns a [RAII] guard object, which will exit +//! the span when dropped. +//! +//! For example: +//! +//! ```rust +//! use tracing::{span, Level}; +//! # fn main() { +//! // Construct a new span named "my span" with trace log level. +//! let span = span!(Level::TRACE, "my span"); +//! +//! // Enter the span, returning a guard object. +//! let _enter = span.enter(); +//! +//! // Any trace events that occur before the guard is dropped will occur +//! // within the span. +//! +//! // Dropping the guard will exit the span. +//! # } +//! ``` +//! +//! The [`#[instrument]`][instrument] attribute provides an easy way to +//! add `tracing` spans to functions. A function annotated with `#[instrument]` +//! will create and enter a span with that function's name every time the +//! function is called, with arguments to that function will be recorded as +//! fields using `fmt::Debug`. +//! +//! For example: +//! ```ignore +//! # // this doctest is ignored because we don't have a way to say +//! # // that it should only be run with cfg(feature = "attributes") +//! use tracing::{Level, event, instrument}; +//! +//! #[instrument] +//! pub fn my_function(my_arg: usize) { +//! // This event will be recorded inside a span named `my_function` with the +//! // field `my_arg`. +//! event!(Level::INFO, "inside my_function!"); +//! // ... +//! } +//! # fn main() {} +//! ``` +//! +//! For functions which don't have built-in tracing support and can't have +//! the `#[instrument]` attribute applied (such as from an external crate), +//! the [`Span` struct][`Span`] has a [`in_scope()` method][`in_scope`] +//! which can be used to easily wrap synchonous code in a span. +//! +//! For example: +//! ```rust +//! use tracing::info_span; +//! +//! # fn doc() -> Result<(), ()> { +//! # mod serde_json { +//! # pub(crate) fn from_slice(buf: &[u8]) -> Result<(), ()> { Ok(()) } +//! # } +//! # let buf: [u8; 0] = []; +//! let json = info_span!("json.parse").in_scope(|| serde_json::from_slice(&buf))?; +//! # let _ = json; // suppress unused variable warning +//! # Ok(()) +//! # } +//! ``` +//! +//! You can find more examples showing how to use this crate [here][examples]. +//! +//! [RAII]: https://github.com/rust-unofficial/patterns/blob/master/patterns/behavioural/RAII.md +//! [examples]: https://github.com/tokio-rs/tracing/tree/master/examples +//! +//! ### Events +//! +//! [`Event`]s are recorded using the [`event!`] macro: +//! +//! ```rust +//! # fn main() { +//! use tracing::{event, Level}; +//! event!(Level::INFO, "something has happened!"); +//! # } +//! ``` +//! +//! ## Using the Macros +//! +//! The [`span!`] and [`event!`] macros as well as the `#[instrument]` attribute +//! use fairly similar syntax, with some exceptions. +//! +//! ### Configuring Attributes +//! +//! Both macros require a [`Level`] specifying the verbosity of the span or +//! event. Optionally, the [target] and [parent span] may be overridden. If the +//! target and parent span are not overridden, they will default to the +//! module path where the macro was invoked and the current span (as determined +//! by the subscriber), respectively. +//! +//! For example: +//! +//! ``` +//! # use tracing::{span, event, Level}; +//! # fn main() { +//! span!(target: "app_spans", Level::TRACE, "my span"); +//! event!(target: "app_events", Level::INFO, "something has happened!"); +//! # } +//! ``` +//! ``` +//! # use tracing::{span, event, Level}; +//! # fn main() { +//! let span = span!(Level::TRACE, "my span"); +//! event!(parent: &span, Level::INFO, "something has happened!"); +//! # } +//! ``` +//! +//! The span macros also take a string literal after the level, to set the name +//! of the span. +//! +//! ### Recording Fields +//! +//! Structured fields on spans and events are specified using the syntax +//! `field_name = field_value`. Fields are separated by commas. +//! +//! ``` +//! # use tracing::{event, Level}; +//! # fn main() { +//! // records an event with two fields: +//! // - "answer", with the value 42 +//! // - "question", with the value "life, the universe and everything" +//! event!(Level::INFO, answer = 42, question = "life, the universe, and everything"); +//! # } +//! ``` +//! +//! As shorthand, local variables may be used as field values without an +//! assignment, similar to [struct initializers]. For example: +//! +//! ``` +//! # use tracing::{span, Level}; +//! # fn main() { +//! let user = "ferris"; +//! +//! span!(Level::TRACE, "login", user); +//! // is equivalent to: +//! span!(Level::TRACE, "login", user = user); +//! # } +//!``` +//! +//! Field names can include dots, but should not be terminated by them: +//! ``` +//! # use tracing::{span, Level}; +//! # fn main() { +//! let user = "ferris"; +//! let email = "ferris@rust-lang.org"; +//! span!(Level::TRACE, "login", user, user.email = email); +//! # } +//!``` +//! +//! Since field names can include dots, fields on local structs can be used +//! using the local variable shorthand: +//! ``` +//! # use tracing::{span, Level}; +//! # fn main() { +//! # struct User { +//! # name: &'static str, +//! # email: &'static str, +//! # } +//! let user = User { +//! name: "ferris", +//! email: "ferris@rust-lang.org", +//! }; +//! // the span will have the fields `user.name = "ferris"` and +//! // `user.email = "ferris@rust-lang.org"`. +//! span!(Level::TRACE, "login", user.name, user.email); +//! # } +//!``` +//! +//! Fields with names that are not Rust identifiers, or with names that are Rust reserved words, +//! may be created using quoted string literals. However, this may not be used with the local +//! variable shorthand. +//! ``` +//! # use tracing::{span, Level}; +//! # fn main() { +//! // records an event with fields whose names are not Rust identifiers +//! // - "guid:x-request-id", containing a `:`, with the value "abcdef" +//! // - "type", which is a reserved word, with the value "request" +//! span!(Level::TRACE, "api", "guid:x-request-id" = "abcdef", "type" = "request"); +//! # } +//!``` +//! +//! The `?` sigil is shorthand that specifies a field should be recorded using +//! its [`fmt::Debug`] implementation: +//! ``` +//! # use tracing::{event, Level}; +//! # fn main() { +//! #[derive(Debug)] +//! struct MyStruct { +//! field: &'static str, +//! } +//! +//! let my_struct = MyStruct { +//! field: "Hello world!" +//! }; +//! +//! // `my_struct` will be recorded using its `fmt::Debug` implementation. +//! event!(Level::TRACE, greeting = ?my_struct); +//! // is equivalent to: +//! event!(Level::TRACE, greeting = tracing::field::debug(&my_struct)); +//! # } +//! ``` +//! +//! The `%` sigil operates similarly, but indicates that the value should be +//! recorded using its [`fmt::Display`] implementation: +//! ``` +//! # use tracing::{event, Level}; +//! # fn main() { +//! # #[derive(Debug)] +//! # struct MyStruct { +//! # field: &'static str, +//! # } +//! # +//! # let my_struct = MyStruct { +//! # field: "Hello world!" +//! # }; +//! // `my_struct.field` will be recorded using its `fmt::Display` implementation. +//! event!(Level::TRACE, greeting = %my_struct.field); +//! // is equivalent to: +//! event!(Level::TRACE, greeting = tracing::field::display(&my_struct.field)); +//! # } +//! ``` +//! +//! The `%` and `?` sigils may also be used with local variable shorthand: +//! +//! ``` +//! # use tracing::{event, Level}; +//! # fn main() { +//! # #[derive(Debug)] +//! # struct MyStruct { +//! # field: &'static str, +//! # } +//! # +//! # let my_struct = MyStruct { +//! # field: "Hello world!" +//! # }; +//! // `my_struct.field` will be recorded using its `fmt::Display` implementation. +//! event!(Level::TRACE, %my_struct.field); +//! # } +//! ``` +//! +//! Additionally, a span may declare fields with the special value [`Empty`], +//! which indicates that that the value for that field does not currently exist +//! but may be recorded later. For example: +//! +//! ``` +//! use tracing::{trace_span, field}; +//! +//! // Create a span with two fields: `greeting`, with the value "hello world", and +//! // `parting`, without a value. +//! let span = trace_span!("my_span", greeting = "hello world", parting = field::Empty); +//! +//! // ... +//! +//! // Now, record a value for parting as well. +//! span.record("parting", &"goodbye world!"); +//! ``` +//! +//! Note that a span may have up to 32 fields. The following will not compile: +//! +//! ```rust,compile_fail +//! # use tracing::Level; +//! # fn main() { +//! let bad_span = span!( +//! Level::TRACE, +//! "too many fields!", +//! a = 1, b = 2, c = 3, d = 4, e = 5, f = 6, g = 7, h = 8, i = 9, +//! j = 10, k = 11, l = 12, m = 13, n = 14, o = 15, p = 16, q = 17, +//! r = 18, s = 19, t = 20, u = 21, v = 22, w = 23, x = 24, y = 25, +//! z = 26, aa = 27, bb = 28, cc = 29, dd = 30, ee = 31, ff = 32, gg = 33 +//! ); +//! # } +//! ``` +//! +//! Finally, events may also include human-readable messages, in the form of a +//! [format string][fmt] and (optional) arguments, **after** the event's +//! key-value fields. If a format string and arguments are provided, +//! they will implicitly create a new field named `message` whose value is the +//! provided set of format arguments. +//! +//! For example: +//! +//! ``` +//! # use tracing::{event, Level}; +//! # fn main() { +//! let question = "the ultimate question of life, the universe, and everything"; +//! let answer = 42; +//! // records an event with the following fields: +//! // - `question.answer` with the value 42, +//! // - `question.tricky` with the value `true`, +//! // - "message", with the value "the answer to the ultimate question of life, the +//! // universe, and everything is 42." +//! event!( +//! Level::DEBUG, +//! question.answer = answer, +//! question.tricky = true, +//! "the answer to {} is {}.", question, answer +//! ); +//! # } +//! ``` +//! +//! Specifying a formatted message in this manner does not allocate by default. +//! +//! [struct initializers]: https://doc.rust-lang.org/book/ch05-01-defining-structs.html#using-the-field-init-shorthand-when-variables-and-fields-have-the-same-name +//! [target]: Metadata::target +//! [parent span]: span::Attributes::parent +//! [determined contextually]: span::Attributes::is_contextual +//! [`fmt::Debug`]: https://doc.rust-lang.org/std/fmt/trait.Debug.html +//! [`fmt::Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html +//! [fmt]: https://doc.rust-lang.org/std/fmt/#usage +//! [`Empty`]: field::Empty +//! +//! ### Shorthand Macros +//! +//! `tracing` also offers a number of macros with preset verbosity levels. +//! The [`trace!`], [`debug!`], [`info!`], [`warn!`], and [`error!`] behave +//! similarly to the [`event!`] macro, but with the [`Level`] argument already +//! specified, while the corresponding [`trace_span!`], [`debug_span!`], +//! [`info_span!`], [`warn_span!`], and [`error_span!`] macros are the same, +//! but for the [`span!`] macro. +//! +//! These are intended both as a shorthand, and for compatibility with the [`log`] +//! crate (see the next section). +//! +//! [`span!`]: span! +//! [`event!`]: event! +//! [`trace!`]: trace! +//! [`debug!`]: debug! +//! [`info!`]: info! +//! [`warn!`]: warn! +//! [`error!`]: error! +//! [`trace_span!`]: trace_span! +//! [`debug_span!`]: debug_span! +//! [`info_span!`]: info_span! +//! [`warn_span!`]: warn_span! +//! [`error_span!`]: error_span! +//! [`Level`]: Level +//! +//! ### For `log` Users +//! +//! Users of the [`log`] crate should note that `tracing` exposes a set of +//! macros for creating `Event`s (`trace!`, `debug!`, `info!`, `warn!`, and +//! `error!`) which may be invoked with the same syntax as the similarly-named +//! macros from the `log` crate. Often, the process of converting a project to +//! use `tracing` can begin with a simple drop-in replacement. +//! +//! Let's consider the `log` crate's yak-shaving example: +//! +//! ```rust,ignore +//! use std::{error::Error, io}; +//! use tracing::{debug, error, info, span, warn, Level}; +//! +//! // the `#[tracing::instrument]` attribute creates and enters a span +//! // every time the instrumented function is called. The span is named after the +//! // the function or method. Parameters passed to the function are recorded as fields. +//! #[tracing::instrument] +//! pub fn shave(yak: usize) -> Result<(), Box> { +//! // this creates an event at the DEBUG level with two fields: +//! // - `excitement`, with the key "excitement" and the value "yay!" +//! // - `message`, with the key "message" and the value "hello! I'm gonna shave a yak." +//! // +//! // unlike other fields, `message`'s shorthand initialization is just the string itself. +//! debug!(excitement = "yay!", "hello! I'm gonna shave a yak."); +//! if yak == 3 { +//! warn!("could not locate yak!"); +//! // note that this is intended to demonstrate `tracing`'s features, not idiomatic +//! // error handling! in a library or application, you should consider returning +//! // a dedicated `YakError`. libraries like snafu or thiserror make this easy. +//! return Err(io::Error::new(io::ErrorKind::Other, "shaving yak failed!").into()); +//! } else { +//! debug!("yak shaved successfully"); +//! } +//! Ok(()) +//! } +//! +//! pub fn shave_all(yaks: usize) -> usize { +//! // Constructs a new span named "shaving_yaks" at the TRACE level, +//! // and a field whose key is "yaks". This is equivalent to writing: +//! // +//! // let span = span!(Level::TRACE, "shaving_yaks", yaks = yaks); +//! // +//! // local variables (`yaks`) can be used as field values +//! // without an assignment, similar to struct initializers. +//! let _span = span!(Level::TRACE, "shaving_yaks", yaks).entered(); +//! +//! info!("shaving yaks"); +//! +//! let mut yaks_shaved = 0; +//! for yak in 1..=yaks { +//! let res = shave(yak); +//! debug!(yak, shaved = res.is_ok()); +//! +//! if let Err(ref error) = res { +//! // Like spans, events can also use the field initialization shorthand. +//! // In this instance, `yak` is the field being initalized. +//! error!(yak, error = error.as_ref(), "failed to shave yak!"); +//! } else { +//! yaks_shaved += 1; +//! } +//! debug!(yaks_shaved); +//! } +//! +//! yaks_shaved +//! } +//! ``` +//! +//! ## In libraries +//! +//! Libraries should link only to the `tracing` crate, and use the provided +//! macros to record whatever information will be useful to downstream +//! consumers. +//! +//! ## In executables +//! +//! In order to record trace events, executables have to use a `Subscriber` +//! implementation compatible with `tracing`. A `Subscriber` implements a +//! way of collecting trace data, such as by logging it to standard output. +//! +//! This library does not contain any `Subscriber` implementations; these are +//! provided by [other crates](#related-crates). +//! +//! The simplest way to use a subscriber is to call the [`set_global_default`] +//! function: +//! +//! ``` +//! extern crate tracing; +//! # pub struct FooSubscriber; +//! # use tracing::{span::{Id, Attributes, Record}, Metadata}; +//! # impl tracing::Subscriber for FooSubscriber { +//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } +//! # fn record(&self, _: &Id, _: &Record) {} +//! # fn event(&self, _: &tracing::Event) {} +//! # fn record_follows_from(&self, _: &Id, _: &Id) {} +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn enter(&self, _: &Id) {} +//! # fn exit(&self, _: &Id) {} +//! # } +//! # impl FooSubscriber { +//! # fn new() -> Self { FooSubscriber } +//! # } +//! # fn main() { +//! +//! let my_subscriber = FooSubscriber::new(); +//! tracing::subscriber::set_global_default(my_subscriber) +//! .expect("setting tracing default failed"); +//! # } +//! ``` +//! +//!
+//!     Warning: In general, libraries should not call
+//!     set_global_default()! Doing so will cause conflicts when
+//!     executables that depend on the library try to set the default later.
+//! 
+//! +//! This subscriber will be used as the default in all threads for the +//! remainder of the duration of the program, similar to setting the logger +//! in the `log` crate. +//! +//! In addition, the default subscriber can be set through using the +//! [`with_default`] function. This follows the `tokio` pattern of using +//! closures to represent executing code in a context that is exited at the end +//! of the closure. For example: +//! +//! ```rust +//! # pub struct FooSubscriber; +//! # use tracing::{span::{Id, Attributes, Record}, Metadata}; +//! # impl tracing::Subscriber for FooSubscriber { +//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } +//! # fn record(&self, _: &Id, _: &Record) {} +//! # fn event(&self, _: &tracing::Event) {} +//! # fn record_follows_from(&self, _: &Id, _: &Id) {} +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn enter(&self, _: &Id) {} +//! # fn exit(&self, _: &Id) {} +//! # } +//! # impl FooSubscriber { +//! # fn new() -> Self { FooSubscriber } +//! # } +//! # fn main() { +//! +//! let my_subscriber = FooSubscriber::new(); +//! # #[cfg(feature = "std")] +//! tracing::subscriber::with_default(my_subscriber, || { +//! // Any trace events generated in this closure or by functions it calls +//! // will be collected by `my_subscriber`. +//! }) +//! # } +//! ``` +//! +//! This approach allows trace data to be collected by multiple subscribers +//! within different contexts in the program. Note that the override only applies to the +//! currently executing thread; other threads will not see the change from with_default. +//! +//! Any trace events generated outside the context of a subscriber will not be collected. +//! +//! Once a subscriber has been set, instrumentation points may be added to the +//! executable using the `tracing` crate's macros. +//! +//! ## `log` Compatibility +//! +//! The [`log`] crate provides a simple, lightweight logging facade for Rust. +//! While `tracing` builds upon `log`'s foundation with richer structured +//! diagnostic data, `log`'s simplicity and ubiquity make it the "lowest common +//! denominator" for text-based logging in Rust — a vast majority of Rust +//! libraries and applications either emit or consume `log` records. Therefore, +//! `tracing` provides multiple forms of interoperability with `log`: `tracing` +//! instrumentation can emit `log` records, and a compatibility layer enables +//! `tracing` [`Subscriber`]s to consume `log` records as `tracing` [`Event`]s. +//! +//! ### Emitting `log` Records +//! +//! This crate provides two feature flags, "log" and "log-always", which will +//! cause [spans] and [events] to emit `log` records. When the "log" feature is +//! enabled, if no `tracing` `Subscriber` is active, invoking an event macro or +//! creating a span with fields will emit a `log` record. This is intended +//! primarily for use in libraries which wish to emit diagnostics that can be +//! consumed by applications using `tracing` *or* `log`, without paying the +//! additional overhead of emitting both forms of diagnostics when `tracing` is +//! in use. +//! +//! Enabling the "log-always" feature will cause `log` records to be emitted +//! even if a `tracing` `Subscriber` _is_ set. This is intended to be used in +//! applications where a `log` `Logger` is being used to record a textual log, +//! and `tracing` is used only to record other forms of diagnostics (such as +//! metrics, profiling, or distributed tracing data). Unlike the "log" feature, +//! libraries generally should **not** enable the "log-always" feature, as doing +//! so will prevent applications from being able to opt out of the `log` records. +//! +//! See [here][flags] for more details on this crate's feature flags. +//! +//! The generated `log` records' messages will be a string representation of the +//! span or event's fields, and all additional information recorded by `log` +//! (target, verbosity level, module path, file, and line number) will also be +//! populated. Additionally, `log` records are also generated when spans are +//! entered, exited, and closed. Since these additional span lifecycle logs have +//! the potential to be very verbose, and don't include additional fields, they +//! will always be emitted at the `Trace` level, rather than inheriting the +//! level of the span that generated them. Furthermore, they are are categorized +//! under a separate `log` target, "tracing::span" (and its sub-target, +//! "tracing::span::active", for the logs on entering and exiting a span), which +//! may be enabled or disabled separately from other `log` records emitted by +//! `tracing`. +//! +//! ### Consuming `log` Records +//! +//! The [`tracing-log`] crate provides a compatibility layer which +//! allows a `tracing` [`Subscriber`] to consume `log` records as though they +//! were `tracing` [events]. This allows applications using `tracing` to record +//! the logs emitted by dependencies using `log` as events within the context of +//! the application's trace tree. See [that crate's documentation][log-tracer] +//! for details. +//! +//! [log-tracer]: https://docs.rs/tracing-log/latest/tracing_log/#convert-log-records-to-tracing-events +//! +//! ## Related Crates +//! +//! In addition to `tracing` and `tracing-core`, the [`tokio-rs/tracing`] repository +//! contains several additional crates designed to be used with the `tracing` ecosystem. +//! This includes a collection of `Subscriber` implementations, as well as utility +//! and adapter crates to assist in writing `Subscriber`s and instrumenting +//! applications. +//! +//! In particular, the following crates are likely to be of interest: +//! +//! - [`tracing-futures`] provides a compatibility layer with the `futures` +//! crate, allowing spans to be attached to `Future`s, `Stream`s, and `Executor`s. +//! - [`tracing-subscriber`] provides `Subscriber` implementations and +//! utilities for working with `Subscriber`s. This includes a [`FmtSubscriber`] +//! `FmtSubscriber` for logging formatted trace data to stdout, with similar +//! filtering and formatting to the [`env_logger`] crate. +//! - [`tracing-log`] provides a compatibility layer with the [`log`] crate, +//! allowing log messages to be recorded as `tracing` `Event`s within the +//! trace tree. This is useful when a project using `tracing` have +//! dependencies which use `log`. Note that if you're using +//! `tracing-subscriber`'s `FmtSubscriber`, you don't need to depend on +//! `tracing-log` directly. +//! - [`tracing-appender`] provides utilities for outputting tracing data, +//! including a file appender and non blocking writer. +//! +//! Additionally, there are also several third-party crates which are not +//! maintained by the `tokio` project. These include: +//! +//! - [`tracing-timing`] implements inter-event timing metrics on top of `tracing`. +//! It provides a subscriber that records the time elapsed between pairs of +//! `tracing` events and generates histograms. +//! - [`tracing-opentelemetry`] provides a subscriber for emitting traces to +//! [OpenTelemetry]-compatible distributed tracing systems. +//! - [`tracing-honeycomb`] Provides a layer that reports traces spanning multiple machines to [honeycomb.io]. Backed by [`tracing-distributed`]. +//! - [`tracing-distributed`] Provides a generic implementation of a layer that reports traces spanning multiple machines to some backend. +//! - [`tracing-actix-web`] provides `tracing` integration for the `actix-web` web framework. +//! - [`tracing-actix`] provides `tracing` integration for the `actix` actor +//! framework. +//! - [`tracing-gelf`] implements a subscriber for exporting traces in Greylog +//! GELF format. +//! - [`tracing-coz`] provides integration with the [coz] causal profiler +//! (Linux-only). +//! - [`tracing-bunyan-formatter`] provides a layer implementation that reports events and spans +//! in [bunyan] format, enriched with timing information. +//! - [`tracing-wasm`] provides a `Subscriber`/`Layer` implementation that reports +//! events and spans via browser `console.log` and [User Timing API (`window.performance`)]. +//! - [`tide-tracing`] provides a [tide] middleware to trace all incoming requests and responses. +//! - [`test-log`] takes care of initializing `tracing` for tests, based on +//! environment variables with an `env_logger` compatible syntax. +//! - [`tracing-unwrap`] provides convenience methods to report failed unwraps +//! on `Result` or `Option` types to a `Subscriber`. +//! - [`diesel-tracing`] provides integration with [`diesel`] database connections. +//! - [`tracing-tracy`] provides a way to collect [Tracy] profiles in instrumented +//! applications. +//! - [`tracing-elastic-apm`] provides a layer for reporting traces to [Elastic APM]. +//! - [`tracing-etw`] provides a layer for emitting Windows [ETW] events. +//! - [`tracing-fluent-assertions`] provides a fluent assertions-style testing +//! framework for validating the behavior of `tracing` spans. +//! - [`sentry-tracing`] provides a layer for reporting events and traces to [Sentry]. +//! - [`tracing-forest`] provides a subscriber that preserves contextual coherence by +//! grouping together logs from the same spans during writing. +//! - [`tracing-loki`] provides a layer for shipping logs to [Grafana Loki]. +//! +//! If you're the maintainer of a `tracing` ecosystem crate not listed above, +//! please let us know! We'd love to add your project to the list! +//! +//! [`tracing-opentelemetry`]: https://crates.io/crates/tracing-opentelemetry +//! [OpenTelemetry]: https://opentelemetry.io/ +//! [`tracing-honeycomb`]: https://crates.io/crates/tracing-honeycomb +//! [`tracing-distributed`]: https://crates.io/crates/tracing-distributed +//! [honeycomb.io]: https://www.honeycomb.io/ +//! [`tracing-actix-web`]: https://crates.io/crates/tracing-actix-web +//! [`tracing-actix`]: https://crates.io/crates/tracing-actix +//! [`tracing-gelf`]: https://crates.io/crates/tracing-gelf +//! [`tracing-coz`]: https://crates.io/crates/tracing-coz +//! [coz]: https://github.com/plasma-umass/coz +//! [`tracing-bunyan-formatter`]: https://crates.io/crates/tracing-bunyan-formatter +//! [bunyan]: https://github.com/trentm/node-bunyan +//! [`tracing-wasm`]: https://docs.rs/tracing-wasm +//! [User Timing API (`window.performance`)]: https://developer.mozilla.org/en-US/docs/Web/API/User_Timing_API +//! [`tide-tracing`]: https://crates.io/crates/tide-tracing +//! [tide]: https://crates.io/crates/tide +//! [`test-log`]: https://crates.io/crates/test-log +//! [`tracing-unwrap`]: https://docs.rs/tracing-unwrap +//! [`diesel`]: https://crates.io/crates/diesel +//! [`diesel-tracing`]: https://crates.io/crates/diesel-tracing +//! [`tracing-tracy`]: https://crates.io/crates/tracing-tracy +//! [Tracy]: https://github.com/wolfpld/tracy +//! [`tracing-elastic-apm`]: https://crates.io/crates/tracing-elastic-apm +//! [Elastic APM]: https://www.elastic.co/apm +//! [`tracing-etw`]: https://github.com/microsoft/tracing-etw +//! [ETW]: https://docs.microsoft.com/en-us/windows/win32/etw/about-event-tracing +//! [`tracing-fluent-assertions`]: https://crates.io/crates/tracing-fluent-assertions +//! [`sentry-tracing`]: https://crates.io/crates/sentry-tracing +//! [Sentry]: https://sentry.io/welcome/ +//! [`tracing-forest`]: https://crates.io/crates/tracing-forest +//! [`tracing-loki`]: https://crates.io/crates/tracing-loki +//! [Grafana Loki]: https://grafana.com/oss/loki/ +//! +//!
+//!     Note: Some of these ecosystem crates are currently
+//!     unreleased and/or in earlier stages of development. They may be less stable
+//!     than tracing and tracing-core.
+//! 
+//! +//! ## Crate Feature Flags +//! +//! The following crate [feature flags] are available: +//! +//! * A set of features controlling the [static verbosity level]. +//! * `log`: causes trace instrumentation points to emit [`log`] records as well +//! as trace events, if a default `tracing` subscriber has not been set. This +//! is intended for use in libraries whose users may be using either `tracing` +//! or `log`. +//! * `log-always`: Emit `log` records from all `tracing` spans and events, even +//! if a `tracing` subscriber has been set. This should be set only by +//! applications which intend to collect traces and logs separately; if an +//! adapter is used to convert `log` records into `tracing` events, this will +//! cause duplicate events to occur. +//! * `attributes`: Includes support for the `#[instrument]` attribute. +//! This is on by default, but does bring in the `syn` crate as a dependency, +//! which may add to the compile time of crates that do not already use it. +//! * `std`: Depend on the Rust standard library (enabled by default). +//! +//! `no_std` users may disable this feature with `default-features = false`: +//! +//! ```toml +//! [dependencies] +//! tracing = { version = "0.1.34", default-features = false } +//! ``` +//! +//!
+//!     Note: tracing's no_std support
+//!     requires liballoc.
+//! 
+//! +//! ### Unstable Features +//! +//! These feature flags enable **unstable** features. The public API may break in 0.1.x +//! releases. To enable these features, the `--cfg tracing_unstable` must be passed to +//! `rustc` when compiling. +//! +//! The following unstable feature flags are currently available: +//! +//! * `valuable`: Enables support for recording [field values] using the +//! [`valuable`] crate. +//! +//! #### Enabling Unstable Features +//! +//! The easiest way to set the `tracing_unstable` cfg is to use the `RUSTFLAGS` +//! env variable when running `cargo` commands: +//! +//! ```shell +//! RUSTFLAGS="--cfg tracing_unstable" cargo build +//! ``` +//! Alternatively, the following can be added to the `.cargo/config` file in a +//! project to automatically enable the cfg flag for that project: +//! +//! ```toml +//! [build] +//! rustflags = ["--cfg", "tracing_unstable"] +//! ``` +//! +//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section +//! [field values]: crate::field +//! [`valuable`]: https://crates.io/crates/valuable +//! +//! ## Supported Rust Versions +//! +//! Tracing is built against the latest stable release. The minimum supported +//! version is 1.49. The current Tracing version is not guaranteed to build on +//! Rust versions earlier than the minimum supported version. +//! +//! Tracing follows the same compiler support policies as the rest of the Tokio +//! project. The current stable Rust compiler and the three most recent minor +//! versions before it will always be supported. For example, if the current +//! stable compiler version is 1.45, the minimum supported version will not be +//! increased past 1.42, three minor versions prior. Increasing the minimum +//! supported compiler version is not considered a semver breaking change as +//! long as doing so complies with this policy. +//! +//! [`log`]: https://docs.rs/log/0.4.6/log/ +//! [span]: mod@span +//! [spans]: mod@span +//! [`Span`]: span::Span +//! [`in_scope`]: span::Span::in_scope +//! [event]: Event +//! [events]: Event +//! [`Subscriber`]: subscriber::Subscriber +//! [Subscriber::event]: subscriber::Subscriber::event +//! [`enter`]: subscriber::Subscriber::enter +//! [`exit`]: subscriber::Subscriber::exit +//! [`enabled`]: subscriber::Subscriber::enabled +//! [metadata]: Metadata +//! [`field::display`]: field::display +//! [`field::debug`]: field::debug +//! [`set_global_default`]: subscriber::set_global_default +//! [`with_default`]: subscriber::with_default +//! [`tokio-rs/tracing`]: https://github.com/tokio-rs/tracing +//! [`tracing-futures`]: https://crates.io/crates/tracing-futures +//! [`tracing-subscriber`]: https://crates.io/crates/tracing-subscriber +//! [`tracing-log`]: https://crates.io/crates/tracing-log +//! [`tracing-timing`]: https://crates.io/crates/tracing-timing +//! [`tracing-appender`]: https://crates.io/crates/tracing-appender +//! [`env_logger`]: https://crates.io/crates/env_logger +//! [`FmtSubscriber`]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/struct.Subscriber.html +//! [static verbosity level]: level_filters#compile-time-filters +//! [instrument]: https://docs.rs/tracing-attributes/latest/tracing_attributes/attr.instrument.html +//! [flags]: #crate-feature-flags +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(docsrs, feature(doc_cfg), deny(rustdoc::broken_intra_doc_links))] +#![doc(html_root_url = "https://docs.rs/tracing/0.1.34")] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/logo-type.png", + issue_tracker_base_url = "https://github.com/tokio-rs/tracing/issues/" +)] +#![warn( + missing_debug_implementations, + missing_docs, + rust_2018_idioms, + unreachable_pub, + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] + +#[cfg(not(feature = "std"))] +extern crate alloc; + +// Somehow this `use` statement is necessary for us to re-export the `core` +// macros on Rust 1.26.0. I'm not sure how this makes it work, but it does. +#[allow(unused_imports)] +#[doc(hidden)] +use tracing_core::*; + +#[doc(inline)] +pub use self::instrument::Instrument; +pub use self::{dispatcher::Dispatch, event::Event, field::Value, subscriber::Subscriber}; + +#[doc(hidden)] +pub use self::span::Id; + +#[doc(hidden)] +pub use tracing_core::{ + callsite::{self, Callsite}, + metadata, +}; +pub use tracing_core::{event, Level, Metadata}; + +#[doc(inline)] +pub use self::span::Span; +#[cfg(feature = "attributes")] +#[cfg_attr(docsrs, doc(cfg(feature = "attributes")))] +#[doc(inline)] +pub use tracing_attributes::instrument; + +#[macro_use] +mod macros; + +pub mod dispatcher; +pub mod field; +/// Attach a span to a `std::future::Future`. +pub mod instrument; +pub mod level_filters; +pub mod span; +pub(crate) mod stdlib; +pub mod subscriber; + +#[doc(hidden)] +pub mod __macro_support { + pub use crate::callsite::Callsite; + use crate::stdlib::{ + fmt, + sync::atomic::{AtomicUsize, Ordering}, + }; + use crate::{subscriber::Interest, Metadata}; + pub use core::concat; + use tracing_core::Once; + + /// Callsite implementation used by macro-generated code. + /// + /// /!\ WARNING: This is *not* a stable API! /!\ + /// This type, and all code contained in the `__macro_support` module, is + /// a *private* API of `tracing`. It is exposed publicly because it is used + /// by the `tracing` macros, but it is not part of the stable versioned API. + /// Breaking changes to this module may occur in small-numbered versions + /// without warning. + pub struct MacroCallsite { + interest: AtomicUsize, + meta: &'static Metadata<'static>, + registration: Once, + } + + impl MacroCallsite { + /// Returns a new `MacroCallsite` with the specified `Metadata`. + /// + /// /!\ WARNING: This is *not* a stable API! /!\ + /// This method, and all code contained in the `__macro_support` module, is + /// a *private* API of `tracing`. It is exposed publicly because it is used + /// by the `tracing` macros, but it is not part of the stable versioned API. + /// Breaking changes to this module may occur in small-numbered versions + /// without warning. + pub const fn new(meta: &'static Metadata<'static>) -> Self { + Self { + interest: AtomicUsize::new(0xDEAD), + meta, + registration: Once::new(), + } + } + + /// Registers this callsite with the global callsite registry. + /// + /// If the callsite is already registered, this does nothing. + /// + /// /!\ WARNING: This is *not* a stable API! /!\ + /// This method, and all code contained in the `__macro_support` module, is + /// a *private* API of `tracing`. It is exposed publicly because it is used + /// by the `tracing` macros, but it is not part of the stable versioned API. + /// Breaking changes to this module may occur in small-numbered versions + /// without warning. + #[inline(never)] + // This only happens once (or if the cached interest value was corrupted). + #[cold] + pub fn register(&'static self) -> Interest { + self.registration + .call_once(|| crate::callsite::register(self)); + match self.interest.load(Ordering::Relaxed) { + 0 => Interest::never(), + 2 => Interest::always(), + _ => Interest::sometimes(), + } + } + + /// Returns the callsite's cached Interest, or registers it for the + /// first time if it has not yet been registered. + /// + /// /!\ WARNING: This is *not* a stable API! /!\ + /// This method, and all code contained in the `__macro_support` module, is + /// a *private* API of `tracing`. It is exposed publicly because it is used + /// by the `tracing` macros, but it is not part of the stable versioned API. + /// Breaking changes to this module may occur in small-numbered versions + /// without warning. + #[inline] + pub fn interest(&'static self) -> Interest { + match self.interest.load(Ordering::Relaxed) { + 0 => Interest::never(), + 1 => Interest::sometimes(), + 2 => Interest::always(), + _ => self.register(), + } + } + + pub fn is_enabled(&self, interest: Interest) -> bool { + interest.is_always() + || crate::dispatcher::get_default(|default| default.enabled(self.meta)) + } + + #[inline] + #[cfg(feature = "log")] + pub fn disabled_span(&self) -> crate::Span { + crate::Span::new_disabled(self.meta) + } + + #[inline] + #[cfg(not(feature = "log"))] + pub fn disabled_span(&self) -> crate::Span { + crate::Span::none() + } + + #[cfg(feature = "log")] + pub fn log( + &self, + logger: &'static dyn log::Log, + log_meta: log::Metadata<'_>, + values: &tracing_core::field::ValueSet<'_>, + ) { + let meta = self.metadata(); + logger.log( + &crate::log::Record::builder() + .file(meta.file()) + .module_path(meta.module_path()) + .line(meta.line()) + .metadata(log_meta) + .args(format_args!( + "{}", + crate::log::LogValueSet { + values, + is_first: true + } + )) + .build(), + ); + } + } + + impl Callsite for MacroCallsite { + fn set_interest(&self, interest: Interest) { + let interest = match () { + _ if interest.is_never() => 0, + _ if interest.is_always() => 2, + _ => 1, + }; + self.interest.store(interest, Ordering::SeqCst); + } + + #[inline(always)] + fn metadata(&self) -> &Metadata<'static> { + self.meta + } + } + + impl fmt::Debug for MacroCallsite { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MacroCallsite") + .field("interest", &self.interest) + .field("meta", &self.meta) + .field("registration", &self.registration) + .finish() + } + } +} + +#[cfg(feature = "log")] +#[doc(hidden)] +pub mod log { + use core::fmt; + pub use log::*; + use tracing_core::field::{Field, ValueSet, Visit}; + + /// Utility to format [`ValueSet`]s for logging. + pub(crate) struct LogValueSet<'a> { + pub(crate) values: &'a ValueSet<'a>, + pub(crate) is_first: bool, + } + + impl<'a> fmt::Display for LogValueSet<'a> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + struct LogVisitor<'a, 'b> { + f: &'a mut fmt::Formatter<'b>, + is_first: bool, + result: fmt::Result, + } + + impl Visit for LogVisitor<'_, '_> { + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + let res = if self.is_first { + self.is_first = false; + if field.name() == "message" { + write!(self.f, "{:?}", value) + } else { + write!(self.f, "{}={:?}", field.name(), value) + } + } else { + write!(self.f, " {}={:?}", field.name(), value) + }; + if let Err(err) = res { + self.result = self.result.and(Err(err)); + } + } + + fn record_str(&mut self, field: &Field, value: &str) { + if field.name() == "message" { + self.record_debug(field, &format_args!("{}", value)) + } else { + self.record_debug(field, &value) + } + } + } + + let mut visit = LogVisitor { + f, + is_first: self.is_first, + result: Ok(()), + }; + self.values.record(&mut visit); + visit.result + } + } +} + +mod sealed { + pub trait Sealed {} +} diff --git a/third_party/rust/tracing/src/macros.rs b/third_party/rust/tracing/src/macros.rs new file mode 100644 index 000000000000..825c0d86893d --- /dev/null +++ b/third_party/rust/tracing/src/macros.rs @@ -0,0 +1,2500 @@ +/// Constructs a new span. +/// +/// See [the top-level documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [lib]: crate#using-the-macros +/// +/// # Examples +/// +/// Creating a new span: +/// ``` +/// # use tracing::{span, Level}; +/// # fn main() { +/// let span = span!(Level::TRACE, "my span"); +/// let _enter = span.enter(); +/// // do work inside the span... +/// # } +/// ``` +#[macro_export] +macro_rules! span { + (target: $target:expr, parent: $parent:expr, $lvl:expr, $name:expr) => { + $crate::span!(target: $target, parent: $parent, $lvl, $name,) + }; + (target: $target:expr, parent: $parent:expr, $lvl:expr, $name:expr, $($fields:tt)*) => { + { + use $crate::__macro_support::Callsite as _; + static CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { + name: $name, + kind: $crate::metadata::Kind::SPAN, + target: $target, + level: $lvl, + fields: $($fields)* + }; + let mut interest = $crate::subscriber::Interest::never(); + if $crate::level_enabled!($lvl) + && { interest = CALLSITE.interest(); !interest.is_never() } + && CALLSITE.is_enabled(interest) + { + let meta = CALLSITE.metadata(); + // span with explicit parent + $crate::Span::child_of( + $parent, + meta, + &$crate::valueset!(meta.fields(), $($fields)*), + ) + } else { + let span = CALLSITE.disabled_span(); + $crate::if_log_enabled! { $lvl, { + span.record_all(&$crate::valueset!(CALLSITE.metadata().fields(), $($fields)*)); + }}; + span + } + } + }; + (target: $target:expr, $lvl:expr, $name:expr, $($fields:tt)*) => { + { + use $crate::__macro_support::Callsite as _; + static CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { + name: $name, + kind: $crate::metadata::Kind::SPAN, + target: $target, + level: $lvl, + fields: $($fields)* + }; + let mut interest = $crate::subscriber::Interest::never(); + if $crate::level_enabled!($lvl) + && { interest = CALLSITE.interest(); !interest.is_never() } + && CALLSITE.is_enabled(interest) + { + let meta = CALLSITE.metadata(); + // span with contextual parent + $crate::Span::new( + meta, + &$crate::valueset!(meta.fields(), $($fields)*), + ) + } else { + let span = CALLSITE.disabled_span(); + $crate::if_log_enabled! { $lvl, { + span.record_all(&$crate::valueset!(CALLSITE.metadata().fields(), $($fields)*)); + }}; + span + } + } + }; + (target: $target:expr, parent: $parent:expr, $lvl:expr, $name:expr) => { + $crate::span!(target: $target, parent: $parent, $lvl, $name,) + }; + (parent: $parent:expr, $lvl:expr, $name:expr, $($fields:tt)*) => { + $crate::span!( + target: module_path!(), + parent: $parent, + $lvl, + $name, + $($fields)* + ) + }; + (parent: $parent:expr, $lvl:expr, $name:expr) => { + $crate::span!( + target: module_path!(), + parent: $parent, + $lvl, + $name, + ) + }; + (target: $target:expr, $lvl:expr, $name:expr, $($fields:tt)*) => { + $crate::span!( + target: $target, + $lvl, + $name, + $($fields)* + ) + }; + (target: $target:expr, $lvl:expr, $name:expr) => { + $crate::span!(target: $target, $lvl, $name,) + }; + ($lvl:expr, $name:expr, $($fields:tt)*) => { + $crate::span!( + target: module_path!(), + $lvl, + $name, + $($fields)* + ) + }; + ($lvl:expr, $name:expr) => { + $crate::span!( + target: module_path!(), + $lvl, + $name, + ) + }; +} + +/// Constructs a span at the trace level. +/// +/// [Fields] and [attributes] are set using the same syntax as the [`span!`] +/// macro. +/// +/// See [the top-level documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [lib]: crate#using-the-macros +/// [attributes]: crate#configuring-attributes +/// [Fields]: crate#recording-fields +/// [`span!`]: crate::span! +/// +/// # Examples +/// +/// ```rust +/// # use tracing::{trace_span, span, Level}; +/// # fn main() { +/// trace_span!("my_span"); +/// // is equivalent to: +/// span!(Level::TRACE, "my_span"); +/// # } +/// ``` +/// +/// ```rust +/// # use tracing::{trace_span, span, Level}; +/// # fn main() { +/// let span = trace_span!("my span"); +/// span.in_scope(|| { +/// // do work inside the span... +/// }); +/// # } +/// ``` +#[macro_export] +macro_rules! trace_span { + (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + parent: $parent, + $crate::Level::TRACE, + $name, + $($field)* + ) + }; + (target: $target:expr, parent: $parent:expr, $name:expr) => { + $crate::trace_span!(target: $target, parent: $parent, $name,) + }; + (parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + $name, + $($field)* + ) + }; + (parent: $parent:expr, $name:expr) => { + $crate::trace_span!(parent: $parent, $name,) + }; + (target: $target:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + $crate::Level::TRACE, + $name, + $($field)* + ) + }; + (target: $target:expr, $name:expr) => { + $crate::trace_span!(target: $target, $name,) + }; + ($name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + $crate::Level::TRACE, + $name, + $($field)* + ) + }; + ($name:expr) => { $crate::trace_span!($name,) }; +} + +/// Constructs a span at the debug level. +/// +/// [Fields] and [attributes] are set using the same syntax as the [`span!`] +/// macro. +/// +/// See [the top-level documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [lib]: crate#using-the-macros +/// [attributes]: crate#configuring-attributes +/// [Fields]: crate#recording-fields +/// [`span!`]: crate::span! +/// +/// # Examples +/// +/// ```rust +/// # use tracing::{debug_span, span, Level}; +/// # fn main() { +/// debug_span!("my_span"); +/// // is equivalent to: +/// span!(Level::DEBUG, "my_span"); +/// # } +/// ``` +/// +/// ```rust +/// # use tracing::debug_span; +/// # fn main() { +/// let span = debug_span!("my span"); +/// span.in_scope(|| { +/// // do work inside the span... +/// }); +/// # } +/// ``` +#[macro_export] +macro_rules! debug_span { + (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + parent: $parent, + $crate::Level::DEBUG, + $name, + $($field)* + ) + }; + (target: $target:expr, parent: $parent:expr, $name:expr) => { + $crate::debug_span!(target: $target, parent: $parent, $name,) + }; + (parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + $name, + $($field)* + ) + }; + (parent: $parent:expr, $name:expr) => { + $crate::debug_span!(parent: $parent, $name,) + }; + (target: $target:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + $crate::Level::DEBUG, + $name, + $($field)* + ) + }; + (target: $target:expr, $name:expr) => { + $crate::debug_span!(target: $target, $name,) + }; + ($name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + $crate::Level::DEBUG, + $name, + $($field)* + ) + }; + ($name:expr) => {$crate::debug_span!($name,)}; +} + +/// Constructs a span at the info level. +/// +/// [Fields] and [attributes] are set using the same syntax as the [`span!`] +/// macro. +/// +/// See [the top-level documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [lib]: crate#using-the-macros +/// [attributes]: crate#configuring-attributes +/// [Fields]: crate#recording-fields +/// [`span!`]: crate::span! +/// +/// # Examples +/// +/// ```rust +/// # use tracing::{span, info_span, Level}; +/// # fn main() { +/// info_span!("my_span"); +/// // is equivalent to: +/// span!(Level::INFO, "my_span"); +/// # } +/// ``` +/// +/// ```rust +/// # use tracing::info_span; +/// # fn main() { +/// let span = info_span!("my span"); +/// span.in_scope(|| { +/// // do work inside the span... +/// }); +/// # } +/// ``` +#[macro_export] +macro_rules! info_span { + (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + parent: $parent, + $crate::Level::INFO, + $name, + $($field)* + ) + }; + (target: $target:expr, parent: $parent:expr, $name:expr) => { + $crate::info_span!(target: $target, parent: $parent, $name,) + }; + (parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + $name, + $($field)* + ) + }; + (parent: $parent:expr, $name:expr) => { + $crate::info_span!(parent: $parent, $name,) + }; + (target: $target:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + $crate::Level::INFO, + $name, + $($field)* + ) + }; + (target: $target:expr, $name:expr) => { + $crate::info_span!(target: $target, $name,) + }; + ($name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + $crate::Level::INFO, + $name, + $($field)* + ) + }; + ($name:expr) => {$crate::info_span!($name,)}; +} + +/// Constructs a span at the warn level. +/// +/// [Fields] and [attributes] are set using the same syntax as the [`span!`] +/// macro. +/// +/// See [the top-level documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [lib]: crate#using-the-macros +/// [attributes]: crate#configuring-attributes +/// [Fields]: crate#recording-fields +/// [`span!`]: crate::span! +/// +/// # Examples +/// +/// ```rust +/// # use tracing::{warn_span, span, Level}; +/// # fn main() { +/// warn_span!("my_span"); +/// // is equivalent to: +/// span!(Level::WARN, "my_span"); +/// # } +/// ``` +/// +/// ```rust +/// use tracing::warn_span; +/// # fn main() { +/// let span = warn_span!("my span"); +/// span.in_scope(|| { +/// // do work inside the span... +/// }); +/// # } +/// ``` +#[macro_export] +macro_rules! warn_span { + (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + parent: $parent, + $crate::Level::WARN, + $name, + $($field)* + ) + }; + (target: $target:expr, parent: $parent:expr, $name:expr) => { + $crate::warn_span!(target: $target, parent: $parent, $name,) + }; + (parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + $name, + $($field)* + ) + }; + (parent: $parent:expr, $name:expr) => { + $crate::warn_span!(parent: $parent, $name,) + }; + (target: $target:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + $crate::Level::WARN, + $name, + $($field)* + ) + }; + (target: $target:expr, $name:expr) => { + $crate::warn_span!(target: $target, $name,) + }; + ($name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + $crate::Level::WARN, + $name, + $($field)* + ) + }; + ($name:expr) => {$crate::warn_span!($name,)}; +} +/// Constructs a span at the error level. +/// +/// [Fields] and [attributes] are set using the same syntax as the [`span!`] +/// macro. +/// +/// See [the top-level documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [lib]: crate#using-the-macros +/// [attributes]: crate#configuring-attributes +/// [Fields]: crate#recording-fields +/// [`span!`]: crate::span! +/// +/// # Examples +/// +/// ```rust +/// # use tracing::{span, error_span, Level}; +/// # fn main() { +/// error_span!("my_span"); +/// // is equivalent to: +/// span!(Level::ERROR, "my_span"); +/// # } +/// ``` +/// +/// ```rust +/// # use tracing::error_span; +/// # fn main() { +/// let span = error_span!("my span"); +/// span.in_scope(|| { +/// // do work inside the span... +/// }); +/// # } +/// ``` +#[macro_export] +macro_rules! error_span { + (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + parent: $parent, + $crate::Level::ERROR, + $name, + $($field)* + ) + }; + (target: $target:expr, parent: $parent:expr, $name:expr) => { + $crate::error_span!(target: $target, parent: $parent, $name,) + }; + (parent: $parent:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + $name, + $($field)* + ) + }; + (parent: $parent:expr, $name:expr) => { + $crate::error_span!(parent: $parent, $name,) + }; + (target: $target:expr, $name:expr, $($field:tt)*) => { + $crate::span!( + target: $target, + $crate::Level::ERROR, + $name, + $($field)* + ) + }; + (target: $target:expr, $name:expr) => { + $crate::error_span!(target: $target, $name,) + }; + ($name:expr, $($field:tt)*) => { + $crate::span!( + target: module_path!(), + $crate::Level::ERROR, + $name, + $($field)* + ) + }; + ($name:expr) => {$crate::error_span!($name,)}; +} + +/// Constructs a new `Event`. +/// +/// The event macro is invoked with a `Level` and up to 32 key-value fields. +/// Optionally, a format string and arguments may follow the fields; this will +/// be used to construct an implicit field named "message". +/// +/// See [the top-level documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [lib]: crate#using-the-macros +/// +/// # Examples +/// +/// ```rust +/// use tracing::{event, Level}; +/// +/// # fn main() { +/// let data = (42, "forty-two"); +/// let private_data = "private"; +/// let error = "a bad error"; +/// +/// event!(Level::ERROR, %error, "Received error"); +/// event!( +/// target: "app_events", +/// Level::WARN, +/// private_data, +/// ?data, +/// "App warning: {}", +/// error +/// ); +/// event!(Level::INFO, the_answer = data.0); +/// # } +/// ``` +/// +// /// Note that *unlike `span!`*, `event!` requires a value for all fields. As +// /// events are recorded immediately when the macro is invoked, there is no +// /// opportunity for fields to be recorded later. A trailing comma on the final +// /// field is valid. +// /// +// /// For example, the following does not compile: +// /// ```rust,compile_fail +// /// # use tracing::{Level, event}; +// /// # fn main() { +// /// event!(Level::INFO, foo = 5, bad_field, bar = "hello") +// /// #} +// /// ``` +#[macro_export] +macro_rules! event { + (target: $target:expr, parent: $parent:expr, $lvl:expr, { $($fields:tt)* } )=> ({ + use $crate::__macro_support::Callsite as _; + static CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { + name: $crate::__macro_support::concat!( + "event ", + file!(), + ":", + line!() + ), + kind: $crate::metadata::Kind::EVENT, + target: $target, + level: $lvl, + fields: $($fields)* + }; + + let enabled = $crate::level_enabled!($lvl) && { + let interest = CALLSITE.interest(); + !interest.is_never() && CALLSITE.is_enabled(interest) + }; + if enabled { + (|value_set: $crate::field::ValueSet| { + $crate::__tracing_log!( + $lvl, + CALLSITE, + &value_set + ); + let meta = CALLSITE.metadata(); + // event with explicit parent + $crate::Event::child_of( + $parent, + meta, + &value_set + ); + })($crate::valueset!(CALLSITE.metadata().fields(), $($fields)*)); + } else { + $crate::__tracing_log!( + $lvl, + CALLSITE, + &$crate::valueset!(CALLSITE.metadata().fields(), $($fields)*) + ); + } + }); + + (target: $target:expr, parent: $parent:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( + $crate::event!( + target: $target, + parent: $parent, + $lvl, + { message = format_args!($($arg)+), $($fields)* } + ) + ); + (target: $target:expr, parent: $parent:expr, $lvl:expr, $($k:ident).+ = $($fields:tt)* ) => ( + $crate::event!(target: $target, parent: $parent, $lvl, { $($k).+ = $($fields)* }) + ); + (target: $target:expr, parent: $parent:expr, $lvl:expr, $($arg:tt)+) => ( + $crate::event!(target: $target, parent: $parent, $lvl, { $($arg)+ }) + ); + (target: $target:expr, $lvl:expr, { $($fields:tt)* } )=> ({ + use $crate::__macro_support::Callsite as _; + static CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { + name: $crate::__macro_support::concat!( + "event ", + file!(), + ":", + line!() + ), + kind: $crate::metadata::Kind::EVENT, + target: $target, + level: $lvl, + fields: $($fields)* + }; + let enabled = $crate::level_enabled!($lvl) && { + let interest = CALLSITE.interest(); + !interest.is_never() && CALLSITE.is_enabled(interest) + }; + if enabled { + (|value_set: $crate::field::ValueSet| { + let meta = CALLSITE.metadata(); + // event with contextual parent + $crate::Event::dispatch( + meta, + &value_set + ); + $crate::__tracing_log!( + $lvl, + CALLSITE, + &value_set + ); + })($crate::valueset!(CALLSITE.metadata().fields(), $($fields)*)); + } else { + $crate::__tracing_log!( + $lvl, + CALLSITE, + &$crate::valueset!(CALLSITE.metadata().fields(), $($fields)*) + ); + } + }); + (target: $target:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( + $crate::event!( + target: $target, + $lvl, + { message = format_args!($($arg)+), $($fields)* } + ) + ); + (target: $target:expr, $lvl:expr, $($k:ident).+ = $($fields:tt)* ) => ( + $crate::event!(target: $target, $lvl, { $($k).+ = $($fields)* }) + ); + (target: $target:expr, $lvl:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, $lvl, { $($arg)+ }) + ); + (parent: $parent:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $lvl, + { message = format_args!($($arg)+), $($fields)* } + ) + ); + (parent: $parent:expr, $lvl:expr, $($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $lvl, + { $($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, $lvl:expr, ?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $lvl, + { ?$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, $lvl:expr, %$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $lvl, + { %$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, $lvl:expr, $($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $lvl, + { $($k).+, $($field)*} + ) + ); + (parent: $parent:expr, $lvl:expr, %$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $lvl, + { %$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, $lvl:expr, ?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $lvl, + { ?$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, $lvl:expr, $($arg:tt)+ ) => ( + $crate::event!(target: module_path!(), parent: $parent, $lvl, { $($arg)+ }) + ); + ( $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + $lvl, + { message = format_args!($($arg)+), $($fields)* } + ) + ); + ( $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + $lvl, + { message = format_args!($($arg)+), $($fields)* } + ) + ); + ($lvl:expr, $($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $lvl, + { $($k).+ = $($field)*} + ) + ); + ($lvl:expr, $($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $lvl, + { $($k).+, $($field)*} + ) + ); + ($lvl:expr, ?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $lvl, + { ?$($k).+, $($field)*} + ) + ); + ($lvl:expr, %$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $lvl, + { %$($k).+, $($field)*} + ) + ); + ($lvl:expr, ?$($k:ident).+) => ( + $crate::event!($lvl, ?$($k).+,) + ); + ($lvl:expr, %$($k:ident).+) => ( + $crate::event!($lvl, %$($k).+,) + ); + ($lvl:expr, $($k:ident).+) => ( + $crate::event!($lvl, $($k).+,) + ); + ( $lvl:expr, $($arg:tt)+ ) => ( + $crate::event!(target: module_path!(), $lvl, { $($arg)+ }) + ); +} + +/// Tests whether an event with the specified level and target would be enabled. +/// +/// This is similar to [`enabled!`], but queries the current subscriber specifically for +/// an event, whereas [`enabled!`] queries for an event _or_ span. +/// +/// See the documentation for [`enabled!]` for more details on using this macro. +/// See also [`span_enabled!`]. +/// +/// # Examples +/// +/// ```rust +/// # use tracing::{event_enabled, Level}; +/// if event_enabled!(target: "my_crate", Level::DEBUG) { +/// // some expensive work... +/// } +/// // simpler +/// if event_enabled!(Level::DEBUG) { +/// // some expensive work... +/// } +/// // with fields +/// if event_enabled!(Level::DEBUG, foo_field) { +/// // some expensive work... +/// } +/// ``` +/// +#[macro_export] +macro_rules! event_enabled { + ($($rest:tt)*)=> ( + $crate::enabled!(kind: $crate::metadata::Kind::EVENT, $($rest)*) + ) +} + +/// Tests whether a span with the specified level and target would be enabled. +/// +/// This is similar to [`enabled!`], but queries the current subscriber specifically for +/// an event, whereas [`enabled!`] queries for an event _or_ span. +/// +/// See the documentation for [`enabled!]` for more details on using this macro. +/// See also [`span_enabled!`]. +/// +/// # Examples +/// +/// ```rust +/// # use tracing::{span_enabled, Level}; +/// if span_enabled!(target: "my_crate", Level::DEBUG) { +/// // some expensive work... +/// } +/// // simpler +/// if span_enabled!(Level::DEBUG) { +/// // some expensive work... +/// } +/// // with fields +/// if span_enabled!(Level::DEBUG, foo_field) { +/// // some expensive work... +/// } +/// ``` +/// +#[macro_export] +macro_rules! span_enabled { + ($($rest:tt)*)=> ( + $crate::enabled!(kind: $crate::metadata::Kind::SPAN, $($rest)*) + ) +} + +/// Checks whether a span or event is [enabled] based on the provided [metadata]. +/// +/// [enabled]: crate::Subscriber::enabled +/// [metadata]: crate::Metadata +/// +/// This macro is a specialized tool: it is intended to be used prior +/// to an expensive computation required *just* for that event, but +/// *cannot* be done as part of an argument to that event, such as +/// when multiple events are emitted (e.g., iterating over a collection +/// and emitting an event for each item). +/// +/// # Usage +/// +/// [Subscribers] can make filtering decisions based all the data included in a +/// span or event's [`Metadata`]. This means that it is possible for `enabled!` +/// to return a _false positive_ (indicating that something would be enabled +/// when it actually would not be) or a _false negative_ (indicating that +/// something would be disabled when it would actually be enabled). +/// +/// [Subscribers]: crate::subscriber::Subscriber +/// [`Metadata`]: crate::metadata::Metadata +/// +/// This occurs when a subscriber is using a _more specific_ filter than the +/// metadata provided to the `enabled!` macro. Some situations that can result +/// in false positives or false negatives include: +/// +/// - If a subscriber is using a filter which may enable a span or event based +/// on field names, but `enabled!` is invoked without listing field names, +/// `enabled!` may return a false negative if a specific field name would +/// cause the subscriber to enable something that would otherwise be disabled. +/// - If a subscriber is using a filter which enables or disables specific events by +/// file path and line number, a particular event may be enabled/disabled +/// even if an `enabled!` invocation with the same level, target, and fields +/// indicated otherwise. +/// - The subscriber can choose to enable _only_ spans or _only_ events, which `enabled` +/// will not reflect. +/// +/// `enabled!()` requires a [level](crate::Level) argument, an optional `target:` +/// argument, and an optional set of field names. If the fields are not provided, +/// they are considered to be unknown. `enabled!` attempts to match the +/// syntax of `event!()` as closely as possible, which can be seen in the +/// examples below. +/// +/// # Examples +/// +/// If the current subscriber is interested in recording `DEBUG`-level spans and +/// events in the current file and module path, this will evaluate to true: +/// ```rust +/// use tracing::{enabled, Level}; +/// +/// if enabled!(Level::DEBUG) { +/// // some expensive work... +/// } +/// ``` +/// +/// If the current subscriber is interested in recording spans and events +/// in the current file and module path, with the target "my_crate", and at the +/// level `DEBUG`, this will evaluate to true: +/// ```rust +/// # use tracing::{enabled, Level}; +/// if enabled!(target: "my_crate", Level::DEBUG) { +/// // some expensive work... +/// } +/// ``` +/// +/// If the current subscriber is interested in recording spans and events +/// in the current file and module path, with the target "my_crate", at +/// the level `DEBUG`, and with a field named "hello", this will evaluate +/// to true: +/// +/// ```rust +/// # use tracing::{enabled, Level}; +/// if enabled!(target: "my_crate", Level::DEBUG, hello) { +/// // some expensive work... +/// } +/// ``` +/// +/// # Alternatives +/// +/// `enabled!` queries subscribers with [`Metadata`] where +/// [`is_event`] and [`is_span`] both return `false`. Alternatively, +/// use [`event_enabled!`] or [`span_enabled!`] to ensure one of these +/// returns true. +/// +/// +/// [`Metadata`]: crate::Metadata +/// [`is_event`]: crate::Metadata::is_event +/// [`is_span`]: crate::Metadata::is_span +/// +#[macro_export] +macro_rules! enabled { + (kind: $kind:expr, target: $target:expr, $lvl:expr, { $($fields:tt)* } )=> ({ + if $crate::level_enabled!($lvl) { + use $crate::__macro_support::Callsite as _; + static CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { + name: $crate::__macro_support::concat!( + "enabled ", + file!(), + ":", + line!() + ), + kind: $kind.hint(), + target: $target, + level: $lvl, + fields: $($fields)* + }; + let interest = CALLSITE.interest(); + if !interest.is_never() && CALLSITE.is_enabled(interest) { + let meta = CALLSITE.metadata(); + $crate::dispatcher::get_default(|current| current.enabled(meta)) + } else { + false + } + } else { + false + } + }); + // Just target and level + (kind: $kind:expr, target: $target:expr, $lvl:expr ) => ( + $crate::enabled!(kind: $kind, target: $target, $lvl, { }) + ); + (target: $target:expr, $lvl:expr ) => ( + $crate::enabled!(kind: $crate::metadata::Kind::HINT, target: $target, $lvl, { }) + ); + + // These four cases handle fields with no values + (kind: $kind:expr, target: $target:expr, $lvl:expr, $($field:tt)*) => ( + $crate::enabled!( + kind: $kind, + target: $target, + $lvl, + { $($field)*} + ) + ); + (target: $target:expr, $lvl:expr, $($field:tt)*) => ( + $crate::enabled!( + kind: $crate::metadata::Kind::HINT, + target: $target, + $lvl, + { $($field)*} + ) + ); + + // Level and field case + (kind: $kind:expr, $lvl:expr, $($field:tt)*) => ( + $crate::enabled!( + kind: $kind, + target: module_path!(), + $lvl, + { $($field)*} + ) + ); + + // Simplest `enabled!` case + (kind: $kind:expr, $lvl:expr) => ( + $crate::enabled!(kind: $kind, target: module_path!(), $lvl, { }) + ); + ($lvl:expr) => ( + $crate::enabled!(kind: $crate::metadata::Kind::HINT, target: module_path!(), $lvl, { }) + ); + + // Fallthrough from above + ($lvl:expr, $($field:tt)*) => ( + $crate::enabled!( + kind: $crate::metadata::Kind::HINT, + target: module_path!(), + $lvl, + { $($field)*} + ) + ); +} + +/// Constructs an event at the trace level. +/// +/// This functions similarly to the [`event!`] macro. See [the top-level +/// documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [`event!`]: crate::event! +/// [lib]: crate#using-the-macros +/// +/// # Examples +/// +/// ```rust +/// use tracing::trace; +/// # #[derive(Debug, Copy, Clone)] struct Position { x: f32, y: f32 } +/// # impl Position { +/// # const ORIGIN: Self = Self { x: 0.0, y: 0.0 }; +/// # fn dist(&self, other: Position) -> f32 { +/// # let x = (other.x - self.x).exp2(); let y = (self.y - other.y).exp2(); +/// # (x + y).sqrt() +/// # } +/// # } +/// # fn main() { +/// let pos = Position { x: 3.234, y: -1.223 }; +/// let origin_dist = pos.dist(Position::ORIGIN); +/// +/// trace!(position = ?pos, ?origin_dist); +/// trace!( +/// target: "app_events", +/// position = ?pos, +/// "x is {} and y is {}", +/// if pos.x >= 0.0 { "positive" } else { "negative" }, +/// if pos.y >= 0.0 { "positive" } else { "negative" } +/// ); +/// # } +/// ``` +#[macro_export] +macro_rules! trace { + (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, { $($field)* }, $($arg)*) + ); + (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, {}, $($arg)+) + ); + (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + { $($field)+ }, + $($arg)+ + ) + ); + (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + { $($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + { ?$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + { %$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + { $($k).+, $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + { ?$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + { %$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, $($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::TRACE, + {}, + $($arg)+ + ) + ); + (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::TRACE, { $($field)* }, $($arg)*) + ); + (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::TRACE, { $($k).+ $($field)* }) + ); + (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::TRACE, { ?$($k).+ $($field)* }) + ); + (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::TRACE, { %$($k).+ $($field)* }) + ); + (target: $target:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, $crate::Level::TRACE, {}, $($arg)+) + ); + ({ $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + { $($field)+ }, + $($arg)+ + ) + ); + ($($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + { $($k).+ = $($field)*} + ) + ); + ($($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + { $($k).+, $($field)*} + ) + ); + (?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + { ?$($k).+, $($field)*} + ) + ); + (%$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + { %$($k).+, $($field)*} + ) + ); + (?$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + { ?$($k).+ } + ) + ); + (%$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + { %$($k).+ } + ) + ); + ($($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + { $($k).+ } + ) + ); + ($($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::TRACE, + {}, + $($arg)+ + ) + ); +} + +/// Constructs an event at the debug level. +/// +/// This functions similarly to the [`event!`] macro. See [the top-level +/// documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [`event!`]: crate::event! +/// [lib]: crate#using-the-macros +/// +/// # Examples +/// +/// ```rust +/// use tracing::debug; +/// # fn main() { +/// # #[derive(Debug)] struct Position { x: f32, y: f32 } +/// +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// debug!(?pos.x, ?pos.y); +/// debug!(target: "app_events", position = ?pos, "New position"); +/// # } +/// ``` +#[macro_export] +macro_rules! debug { + (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, { $($field)* }, $($arg)*) + ); + (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, {}, $($arg)+) + ); + (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + { $($field)+ }, + $($arg)+ + ) + ); + (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + { $($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + { ?$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + { %$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + { $($k).+, $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + { ?$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + { %$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, $($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::DEBUG, + {}, + $($arg)+ + ) + ); + (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::DEBUG, { $($field)* }, $($arg)*) + ); + (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::DEBUG, { $($k).+ $($field)* }) + ); + (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::DEBUG, { ?$($k).+ $($field)* }) + ); + (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::DEBUG, { %$($k).+ $($field)* }) + ); + (target: $target:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, $crate::Level::DEBUG, {}, $($arg)+) + ); + ({ $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { $($field)+ }, + $($arg)+ + ) + ); + ($($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { $($k).+ = $($field)*} + ) + ); + (?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { ?$($k).+ = $($field)*} + ) + ); + (%$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { %$($k).+ = $($field)*} + ) + ); + ($($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { $($k).+, $($field)*} + ) + ); + (?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { ?$($k).+, $($field)*} + ) + ); + (%$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { %$($k).+, $($field)*} + ) + ); + (?$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { ?$($k).+ } + ) + ); + (%$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { %$($k).+ } + ) + ); + ($($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + { $($k).+ } + ) + ); + ($($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::DEBUG, + {}, + $($arg)+ + ) + ); +} + +/// Constructs an event at the info level. +/// +/// This functions similarly to the [`event!`] macro. See [the top-level +/// documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [`event!`]: crate::event! +/// [lib]: crate#using-the-macros +/// +/// # Examples +/// +/// ```rust +/// use tracing::info; +/// # // this is so the test will still work in no-std mode +/// # #[derive(Debug)] +/// # pub struct Ipv4Addr; +/// # impl Ipv4Addr { fn new(o1: u8, o2: u8, o3: u8, o4: u8) -> Self { Self } } +/// # fn main() { +/// # struct Connection { port: u32, speed: f32 } +/// use tracing::field; +/// +/// let addr = Ipv4Addr::new(127, 0, 0, 1); +/// let conn = Connection { port: 40, speed: 3.20 }; +/// +/// info!(conn.port, "connected to {:?}", addr); +/// info!( +/// target: "connection_events", +/// ip = ?addr, +/// conn.port, +/// ?conn.speed, +/// ); +/// # } +/// ``` +#[macro_export] +macro_rules! info { + (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, { $($field)* }, $($arg)*) + ); + (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, {}, $($arg)+) + ); + (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + { $($field)+ }, + $($arg)+ + ) + ); + (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + { $($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + { ?$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + { %$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + { $($k).+, $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + { ?$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + { %$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, $($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::INFO, + {}, + $($arg)+ + ) + ); + (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::INFO, { $($field)* }, $($arg)*) + ); + (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::INFO, { $($k).+ $($field)* }) + ); + (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::INFO, { ?$($k).+ $($field)* }) + ); + (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::INFO, { $($k).+ $($field)* }) + ); + (target: $target:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, $crate::Level::INFO, {}, $($arg)+) + ); + ({ $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { $($field)+ }, + $($arg)+ + ) + ); + ($($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { $($k).+ = $($field)*} + ) + ); + (?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { ?$($k).+ = $($field)*} + ) + ); + (%$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { %$($k).+ = $($field)*} + ) + ); + ($($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { $($k).+, $($field)*} + ) + ); + (?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { ?$($k).+, $($field)*} + ) + ); + (%$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { %$($k).+, $($field)*} + ) + ); + (?$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { ?$($k).+ } + ) + ); + (%$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { %$($k).+ } + ) + ); + ($($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + { $($k).+ } + ) + ); + ($($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::INFO, + {}, + $($arg)+ + ) + ); +} + +/// Constructs an event at the warn level. +/// +/// This functions similarly to the [`event!`] macro. See [the top-level +/// documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [`event!`]: crate::event! +/// [lib]: crate#using-the-macros +/// +/// # Examples +/// +/// ```rust +/// use tracing::warn; +/// # fn main() { +/// +/// let warn_description = "Invalid Input"; +/// let input = &[0x27, 0x45]; +/// +/// warn!(?input, warning = warn_description); +/// warn!( +/// target: "input_events", +/// warning = warn_description, +/// "Received warning for input: {:?}", input, +/// ); +/// # } +/// ``` +#[macro_export] +macro_rules! warn { + (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, { $($field)* }, $($arg)*) + ); + (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, {}, $($arg)+) + ); + (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + { $($field)+ }, + $($arg)+ + ) + ); + (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + { $($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + { ?$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + { %$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + { $($k).+, $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + { ?$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + { %$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, $($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::WARN, + {}, + $($arg)+ + ) + ); + (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::WARN, { $($field)* }, $($arg)*) + ); + (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::WARN, { $($k).+ $($field)* }) + ); + (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::WARN, { ?$($k).+ $($field)* }) + ); + (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::WARN, { %$($k).+ $($field)* }) + ); + (target: $target:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, $crate::Level::WARN, {}, $($arg)+) + ); + ({ $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { $($field)+ }, + $($arg)+ + ) + ); + ($($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { $($k).+ = $($field)*} + ) + ); + (?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { ?$($k).+ = $($field)*} + ) + ); + (%$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { %$($k).+ = $($field)*} + ) + ); + ($($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { $($k).+, $($field)*} + ) + ); + (?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { ?$($k).+, $($field)*} + ) + ); + (%$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { %$($k).+, $($field)*} + ) + ); + (?$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { ?$($k).+ } + ) + ); + (%$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { %$($k).+ } + ) + ); + ($($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + { $($k).+ } + ) + ); + ($($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::WARN, + {}, + $($arg)+ + ) + ); +} + +/// Constructs an event at the error level. +/// +/// This functions similarly to the [`event!`] macro. See [the top-level +/// documentation][lib] for details on the syntax accepted by +/// this macro. +/// +/// [`event!`]: crate::event! +/// [lib]: crate#using-the-macros +/// +/// # Examples +/// +/// ```rust +/// use tracing::error; +/// # fn main() { +/// +/// let (err_info, port) = ("No connection", 22); +/// +/// error!(port, error = %err_info); +/// error!(target: "app_events", "App Error: {}", err_info); +/// error!({ info = err_info }, "error on port: {}", port); +/// # } +/// ``` +#[macro_export] +macro_rules! error { + (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, { $($field)* }, $($arg)*) + ); + (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) + ); + (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, {}, $($arg)+) + ); + (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + { $($field)+ }, + $($arg)+ + ) + ); + (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + { $($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + { ?$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + { %$($k).+ = $($field)*} + ) + ); + (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + { $($k).+, $($field)*} + ) + ); + (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + { ?$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + { %$($k).+, $($field)*} + ) + ); + (parent: $parent:expr, $($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + parent: $parent, + $crate::Level::ERROR, + {}, + $($arg)+ + ) + ); + (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::ERROR, { $($field)* }, $($arg)*) + ); + (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::ERROR, { $($k).+ $($field)* }) + ); + (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::ERROR, { ?$($k).+ $($field)* }) + ); + (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( + $crate::event!(target: $target, $crate::Level::ERROR, { %$($k).+ $($field)* }) + ); + (target: $target:expr, $($arg:tt)+ ) => ( + $crate::event!(target: $target, $crate::Level::ERROR, {}, $($arg)+) + ); + ({ $($field:tt)+ }, $($arg:tt)+ ) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { $($field)+ }, + $($arg)+ + ) + ); + ($($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { $($k).+ = $($field)*} + ) + ); + (?$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { ?$($k).+ = $($field)*} + ) + ); + (%$($k:ident).+ = $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { %$($k).+ = $($field)*} + ) + ); + ($($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { $($k).+, $($field)*} + ) + ); + (?$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { ?$($k).+, $($field)*} + ) + ); + (%$($k:ident).+, $($field:tt)*) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { %$($k).+, $($field)*} + ) + ); + (?$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { ?$($k).+ } + ) + ); + (%$($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { %$($k).+ } + ) + ); + ($($k:ident).+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + { $($k).+ } + ) + ); + ($($arg:tt)+) => ( + $crate::event!( + target: module_path!(), + $crate::Level::ERROR, + {}, + $($arg)+ + ) + ); +} + +/// Constructs a new static callsite for a span or event. +#[doc(hidden)] +#[macro_export] +macro_rules! callsite { + (name: $name:expr, kind: $kind:expr, fields: $($fields:tt)*) => {{ + $crate::callsite! { + name: $name, + kind: $kind, + target: module_path!(), + level: $crate::Level::TRACE, + fields: $($fields)* + } + }}; + ( + name: $name:expr, + kind: $kind:expr, + level: $lvl:expr, + fields: $($fields:tt)* + ) => {{ + $crate::callsite! { + name: $name, + kind: $kind, + target: module_path!(), + level: $lvl, + fields: $($fields)* + } + }}; + ( + name: $name:expr, + kind: $kind:expr, + target: $target:expr, + level: $lvl:expr, + fields: $($fields:tt)* + ) => {{ + use $crate::__macro_support::MacroCallsite; + static META: $crate::Metadata<'static> = { + $crate::metadata! { + name: $name, + target: $target, + level: $lvl, + fields: $crate::fieldset!( $($fields)* ), + callsite: &CALLSITE, + kind: $kind, + } + }; + static CALLSITE: MacroCallsite = MacroCallsite::new(&META); + CALLSITE.register(); + &CALLSITE + }}; +} + +/// Constructs a new static callsite for a span or event. +#[doc(hidden)] +#[macro_export] +macro_rules! callsite2 { + (name: $name:expr, kind: $kind:expr, fields: $($fields:tt)*) => {{ + $crate::callsite2! { + name: $name, + kind: $kind, + target: module_path!(), + level: $crate::Level::TRACE, + fields: $($fields)* + } + }}; + ( + name: $name:expr, + kind: $kind:expr, + level: $lvl:expr, + fields: $($fields:tt)* + ) => {{ + $crate::callsite2! { + name: $name, + kind: $kind, + target: module_path!(), + level: $lvl, + fields: $($fields)* + } + }}; + ( + name: $name:expr, + kind: $kind:expr, + target: $target:expr, + level: $lvl:expr, + fields: $($fields:tt)* + ) => {{ + use $crate::__macro_support::MacroCallsite; + static META: $crate::Metadata<'static> = { + $crate::metadata! { + name: $name, + target: $target, + level: $lvl, + fields: $crate::fieldset!( $($fields)* ), + callsite: &CALLSITE, + kind: $kind, + } + }; + MacroCallsite::new(&META) + }}; +} + +#[macro_export] +// TODO: determine if this ought to be public API?` +#[doc(hidden)] +macro_rules! level_enabled { + ($lvl:expr) => { + $lvl <= $crate::level_filters::STATIC_MAX_LEVEL + && $lvl <= $crate::level_filters::LevelFilter::current() + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! valueset { + + // === base case === + (@ { $(,)* $($val:expr),* $(,)* }, $next:expr $(,)*) => { + &[ $($val),* ] + }; + + // === recursive case (more tts) === + + // TODO(#1138): determine a new syntax for uninitialized span fields, and + // re-enable this. + // (@{ $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = _, $($rest:tt)*) => { + // $crate::valueset!(@ { $($out),*, (&$next, None) }, $next, $($rest)*) + // }; + (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = ?$val:expr, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&debug(&$val) as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = %$val:expr, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&display(&$val) as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = $val:expr, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&$val as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&$($k).+ as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, ?$($k:ident).+, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&debug(&$($k).+) as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, %$($k:ident).+, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&display(&$($k).+) as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = ?$val:expr) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&debug(&$val) as &Value)) }, + $next, + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = %$val:expr) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&display(&$val) as &Value)) }, + $next, + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = $val:expr) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&$val as &Value)) }, + $next, + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&$($k).+ as &Value)) }, + $next, + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, ?$($k:ident).+) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&debug(&$($k).+) as &Value)) }, + $next, + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, %$($k:ident).+) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&display(&$($k).+) as &Value)) }, + $next, + ) + }; + + // Handle literal names + (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = ?$val:expr, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&debug(&$val) as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = %$val:expr, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&display(&$val) as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = $val:expr, $($rest:tt)*) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&$val as &Value)) }, + $next, + $($rest)* + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = ?$val:expr) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&debug(&$val) as &Value)) }, + $next, + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = %$val:expr) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&display(&$val) as &Value)) }, + $next, + ) + }; + (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = $val:expr) => { + $crate::valueset!( + @ { $($out),*, (&$next, Some(&$val as &Value)) }, + $next, + ) + }; + + // Remainder is unparseable, but exists --- must be format args! + (@ { $(,)* $($out:expr),* }, $next:expr, $($rest:tt)+) => { + $crate::valueset!(@ { (&$next, Some(&format_args!($($rest)+) as &Value)), $($out),* }, $next, ) + }; + + // === entry === + ($fields:expr, $($kvs:tt)+) => { + { + #[allow(unused_imports)] + use $crate::field::{debug, display, Value}; + let mut iter = $fields.iter(); + $fields.value_set($crate::valueset!( + @ { }, + iter.next().expect("FieldSet corrupted (this is a bug)"), + $($kvs)+ + )) + } + }; + ($fields:expr,) => { + { + $fields.value_set(&[]) + } + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! fieldset { + // == base case == + (@ { $(,)* $($out:expr),* $(,)* } $(,)*) => { + &[ $($out),* ] + }; + + // == recursive cases (more tts) == + (@ { $(,)* $($out:expr),* } $($k:ident).+ = ?$val:expr, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) + }; + (@ { $(,)* $($out:expr),* } $($k:ident).+ = %$val:expr, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) + }; + (@ { $(,)* $($out:expr),* } $($k:ident).+ = $val:expr, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) + }; + // TODO(#1138): determine a new syntax for uninitialized span fields, and + // re-enable this. + // (@ { $($out:expr),* } $($k:ident).+ = _, $($rest:tt)*) => { + // $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) + // }; + (@ { $(,)* $($out:expr),* } ?$($k:ident).+, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) + }; + (@ { $(,)* $($out:expr),* } %$($k:ident).+, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) + }; + (@ { $(,)* $($out:expr),* } $($k:ident).+, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) + }; + + // Handle literal names + (@ { $(,)* $($out:expr),* } $k:literal = ?$val:expr, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $k } $($rest)*) + }; + (@ { $(,)* $($out:expr),* } $k:literal = %$val:expr, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $k } $($rest)*) + }; + (@ { $(,)* $($out:expr),* } $k:literal = $val:expr, $($rest:tt)*) => { + $crate::fieldset!(@ { $($out),*, $k } $($rest)*) + }; + + // Remainder is unparseable, but exists --- must be format args! + (@ { $(,)* $($out:expr),* } $($rest:tt)+) => { + $crate::fieldset!(@ { "message", $($out),*, }) + }; + + // == entry == + ($($args:tt)*) => { + $crate::fieldset!(@ { } $($args)*,) + }; + +} + +#[cfg(feature = "log")] +#[doc(hidden)] +#[macro_export] +macro_rules! level_to_log { + ($level:expr) => { + match $level { + $crate::Level::ERROR => $crate::log::Level::Error, + $crate::Level::WARN => $crate::log::Level::Warn, + $crate::Level::INFO => $crate::log::Level::Info, + $crate::Level::DEBUG => $crate::log::Level::Debug, + _ => $crate::log::Level::Trace, + } + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __tracing_stringify { + ($s:expr) => { + stringify!($s) + }; +} + +#[cfg(not(feature = "log"))] +#[doc(hidden)] +#[macro_export] +macro_rules! __tracing_log { + ($level:expr, $callsite:expr, $value_set:expr) => {}; +} + +#[cfg(feature = "log")] +#[doc(hidden)] +#[macro_export] +macro_rules! __tracing_log { + ($level:expr, $callsite:expr, $value_set:expr) => { + $crate::if_log_enabled! { $level, { + use $crate::log; + let level = $crate::level_to_log!($level); + if level <= log::max_level() { + let log_meta = log::Metadata::builder() + .level(level) + .target(CALLSITE.metadata().target()) + .build(); + let logger = log::logger(); + if logger.enabled(&log_meta) { + $callsite.log(logger, log_meta, $value_set) + } + } + }} + }; +} + +#[cfg(not(feature = "log"))] +#[doc(hidden)] +#[macro_export] +macro_rules! if_log_enabled { + ($lvl:expr, $e:expr;) => { + $crate::if_log_enabled! { $lvl, $e } + }; + ($lvl:expr, $if_log:block) => { + $crate::if_log_enabled! { $lvl, $if_log else {} } + }; + ($lvl:expr, $if_log:block else $else_block:block) => { + $else_block + }; +} + +#[cfg(all(feature = "log", not(feature = "log-always")))] +#[doc(hidden)] +#[macro_export] +macro_rules! if_log_enabled { + ($lvl:expr, $e:expr;) => { + $crate::if_log_enabled! { $lvl, $e } + }; + ($lvl:expr, $if_log:block) => { + $crate::if_log_enabled! { $lvl, $if_log else {} } + }; + ($lvl:expr, $if_log:block else $else_block:block) => { + if $crate::level_to_log!($lvl) <= $crate::log::STATIC_MAX_LEVEL { + if !$crate::dispatcher::has_been_set() { + $if_log + } else { + $else_block + } + } else { + $else_block + } + }; +} + +#[cfg(all(feature = "log", feature = "log-always"))] +#[doc(hidden)] +#[macro_export] +macro_rules! if_log_enabled { + ($lvl:expr, $e:expr;) => { + $crate::if_log_enabled! { $lvl, $e } + }; + ($lvl:expr, $if_log:block) => { + $crate::if_log_enabled! { $lvl, $if_log else {} } + }; + ($lvl:expr, $if_log:block else $else_block:block) => { + if $crate::level_to_log!($lvl) <= $crate::log::STATIC_MAX_LEVEL { + #[allow(unused_braces)] + $if_log + } else { + $else_block + } + }; +} diff --git a/third_party/rust/tracing/src/span.rs b/third_party/rust/tracing/src/span.rs new file mode 100644 index 000000000000..7278e6d17c17 --- /dev/null +++ b/third_party/rust/tracing/src/span.rs @@ -0,0 +1,1617 @@ +//! Spans represent periods of time in which a program was executing in a +//! particular context. +//! +//! A span consists of [fields], user-defined key-value pairs of arbitrary data +//! that describe the context the span represents, and a set of fixed attributes +//! that describe all `tracing` spans and events. Attributes describing spans +//! include: +//! +//! - An [`Id`] assigned by the subscriber that uniquely identifies it in relation +//! to other spans. +//! - The span's [parent] in the trace tree. +//! - [Metadata] that describes static characteristics of all spans +//! originating from that callsite, such as its name, source code location, +//! [verbosity level], and the names of its fields. +//! +//! # Creating Spans +//! +//! Spans are created using the [`span!`] macro. This macro is invoked with the +//! following arguments, in order: +//! +//! - The [`target`] and/or [`parent`][parent] attributes, if the user wishes to +//! override their default values. +//! - The span's [verbosity level] +//! - A string literal providing the span's name. +//! - Finally, between zero and 32 arbitrary key/value fields. +//! +//! [`target`]: super::Metadata::target +//! +//! For example: +//! ```rust +//! use tracing::{span, Level}; +//! +//! /// Construct a new span at the `INFO` level named "my_span", with a single +//! /// field named answer , with the value `42`. +//! let my_span = span!(Level::INFO, "my_span", answer = 42); +//! ``` +//! +//! The documentation for the [`span!`] macro provides additional examples of +//! the various options that exist when creating spans. +//! +//! The [`trace_span!`], [`debug_span!`], [`info_span!`], [`warn_span!`], and +//! [`error_span!`] exist as shorthand for constructing spans at various +//! verbosity levels. +//! +//! ## Recording Span Creation +//! +//! The [`Attributes`] type contains data associated with a span, and is +//! provided to the [`Subscriber`] when a new span is created. It contains +//! the span's metadata, the ID of [the span's parent][parent] if one was +//! explicitly set, and any fields whose values were recorded when the span was +//! constructed. The subscriber, which is responsible for recording `tracing` +//! data, can then store or record these values. +//! +//! # The Span Lifecycle +//! +//! ## Entering a Span +//! +//! A thread of execution is said to _enter_ a span when it begins executing, +//! and _exit_ the span when it switches to another context. Spans may be +//! entered through the [`enter`], [`entered`], and [`in_scope`] methods. +//! +//! The [`enter`] method enters a span, returning a [guard] that exits the span +//! when dropped +//! ``` +//! # use tracing::{span, Level}; +//! let my_var: u64 = 5; +//! let my_span = span!(Level::TRACE, "my_span", my_var); +//! +//! // `my_span` exists but has not been entered. +//! +//! // Enter `my_span`... +//! let _enter = my_span.enter(); +//! +//! // Perform some work inside of the context of `my_span`... +//! // Dropping the `_enter` guard will exit the span. +//!``` +//! +//!
+//!     Warning: In asynchronous code that uses async/await syntax,
+//!     Span::enter may produce incorrect traces if the returned drop
+//!     guard is held across an await point. See
+//!     the method documentation
+//!     for details.
+//! 
+//! +//! The [`entered`] method is analogous to [`enter`], but moves the span into +//! the returned guard, rather than borrowing it. This allows creating and +//! entering a span in a single expression: +//! +//! ``` +//! # use tracing::{span, Level}; +//! // Create a span and enter it, returning a guard: +//! let span = span!(Level::INFO, "my_span").entered(); +//! +//! // We are now inside the span! Like `enter()`, the guard returned by +//! // `entered()` will exit the span when it is dropped... +//! +//! // ...but, it can also be exited explicitly, returning the `Span` +//! // struct: +//! let span = span.exit(); +//! ``` +//! +//! Finally, [`in_scope`] takes a closure or function pointer and executes it +//! inside the span: +//! +//! ``` +//! # use tracing::{span, Level}; +//! let my_var: u64 = 5; +//! let my_span = span!(Level::TRACE, "my_span", my_var = &my_var); +//! +//! my_span.in_scope(|| { +//! // perform some work in the context of `my_span`... +//! }); +//! +//! // Perform some work outside of the context of `my_span`... +//! +//! my_span.in_scope(|| { +//! // Perform some more work in the context of `my_span`. +//! }); +//! ``` +//! +//!
+//!     Note: Since entering a span takes &self, and
+//!     Spans are Clone, Send, and
+//!     Sync, it is entirely valid for multiple threads to enter the
+//!     same span concurrently.
+//! 
+//! +//! ## Span Relationships +//! +//! Spans form a tree structure — unless it is a root span, all spans have a +//! _parent_, and may have one or more _children_. When a new span is created, +//! the current span becomes the new span's parent. The total execution time of +//! a span consists of the time spent in that span and in the entire subtree +//! represented by its children. Thus, a parent span always lasts for at least +//! as long as the longest-executing span in its subtree. +//! +//! ``` +//! # use tracing::{Level, span}; +//! // this span is considered the "root" of a new trace tree: +//! span!(Level::INFO, "root").in_scope(|| { +//! // since we are now inside "root", this span is considered a child +//! // of "root": +//! span!(Level::DEBUG, "outer_child").in_scope(|| { +//! // this span is a child of "outer_child", which is in turn a +//! // child of "root": +//! span!(Level::TRACE, "inner_child").in_scope(|| { +//! // and so on... +//! }); +//! }); +//! // another span created here would also be a child of "root". +//! }); +//!``` +//! +//! In addition, the parent of a span may be explicitly specified in +//! the `span!` macro. For example: +//! +//! ```rust +//! # use tracing::{Level, span}; +//! // Create, but do not enter, a span called "foo". +//! let foo = span!(Level::INFO, "foo"); +//! +//! // Create and enter a span called "bar". +//! let bar = span!(Level::INFO, "bar"); +//! let _enter = bar.enter(); +//! +//! // Although we have currently entered "bar", "baz"'s parent span +//! // will be "foo". +//! let baz = span!(parent: &foo, Level::INFO, "baz"); +//! ``` +//! +//! A child span should typically be considered _part_ of its parent. For +//! example, if a subscriber is recording the length of time spent in various +//! spans, it should generally include the time spent in a span's children as +//! part of that span's duration. +//! +//! In addition to having zero or one parent, a span may also _follow from_ any +//! number of other spans. This indicates a causal relationship between the span +//! and the spans that it follows from, but a follower is *not* typically +//! considered part of the duration of the span it follows. Unlike the parent, a +//! span may record that it follows from another span after it is created, using +//! the [`follows_from`] method. +//! +//! As an example, consider a listener task in a server. As the listener accepts +//! incoming connections, it spawns new tasks that handle those connections. We +//! might want to have a span representing the listener, and instrument each +//! spawned handler task with its own span. We would want our instrumentation to +//! record that the handler tasks were spawned as a result of the listener task. +//! However, we might not consider the handler tasks to be _part_ of the time +//! spent in the listener task, so we would not consider those spans children of +//! the listener span. Instead, we would record that the handler tasks follow +//! from the listener, recording the causal relationship but treating the spans +//! as separate durations. +//! +//! ## Closing Spans +//! +//! Execution may enter and exit a span multiple times before that span is +//! _closed_. Consider, for example, a future which has an associated +//! span and enters that span every time it is polled: +//! ```rust +//! # use std::future::Future; +//! # use std::task::{Context, Poll}; +//! # use std::pin::Pin; +//! struct MyFuture { +//! // data +//! span: tracing::Span, +//! } +//! +//! impl Future for MyFuture { +//! type Output = (); +//! +//! fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { +//! let _enter = self.span.enter(); +//! // Do actual future work... +//! # Poll::Ready(()) +//! } +//! } +//! ``` +//! +//! If this future was spawned on an executor, it might yield one or more times +//! before `poll` returns [`Poll::Ready`]. If the future were to yield, then +//! the executor would move on to poll the next future, which may _also_ enter +//! an associated span or series of spans. Therefore, it is valid for a span to +//! be entered repeatedly before it completes. Only the time when that span or +//! one of its children was the current span is considered to be time spent in +//! that span. A span which is not executing and has not yet been closed is said +//! to be _idle_. +//! +//! Because spans may be entered and exited multiple times before they close, +//! [`Subscriber`]s have separate trait methods which are called to notify them +//! of span exits and when span handles are dropped. When execution exits a +//! span, [`exit`] will always be called with that span's ID to notify the +//! subscriber that the span has been exited. When span handles are dropped, the +//! [`drop_span`] method is called with that span's ID. The subscriber may use +//! this to determine whether or not the span will be entered again. +//! +//! If there is only a single handle with the capacity to exit a span, dropping +//! that handle "closes" the span, since the capacity to enter it no longer +//! exists. For example: +//! ``` +//! # use tracing::{Level, span}; +//! { +//! span!(Level::TRACE, "my_span").in_scope(|| { +//! // perform some work in the context of `my_span`... +//! }); // --> Subscriber::exit(my_span) +//! +//! // The handle to `my_span` only lives inside of this block; when it is +//! // dropped, the subscriber will be informed via `drop_span`. +//! +//! } // --> Subscriber::drop_span(my_span) +//! ``` +//! +//! However, if multiple handles exist, the span can still be re-entered even if +//! one or more is dropped. For determining when _all_ handles to a span have +//! been dropped, `Subscriber`s have a [`clone_span`] method, which is called +//! every time a span handle is cloned. Combined with `drop_span`, this may be +//! used to track the number of handles to a given span — if `drop_span` has +//! been called one more time than the number of calls to `clone_span` for a +//! given ID, then no more handles to the span with that ID exist. The +//! subscriber may then treat it as closed. +//! +//! # When to use spans +//! +//! As a rule of thumb, spans should be used to represent discrete units of work +//! (e.g., a given request's lifetime in a server) or periods of time spent in a +//! given context (e.g., time spent interacting with an instance of an external +//! system, such as a database). +//! +//! Which scopes in a program correspond to new spans depend somewhat on user +//! intent. For example, consider the case of a loop in a program. Should we +//! construct one span and perform the entire loop inside of that span, like: +//! +//! ```rust +//! # use tracing::{Level, span}; +//! # let n = 1; +//! let span = span!(Level::TRACE, "my_loop"); +//! let _enter = span.enter(); +//! for i in 0..n { +//! # let _ = i; +//! // ... +//! } +//! ``` +//! Or, should we create a new span for each iteration of the loop, as in: +//! ```rust +//! # use tracing::{Level, span}; +//! # let n = 1u64; +//! for i in 0..n { +//! let span = span!(Level::TRACE, "my_loop", iteration = i); +//! let _enter = span.enter(); +//! // ... +//! } +//! ``` +//! +//! Depending on the circumstances, we might want to do either, or both. For +//! example, if we want to know how long was spent in the loop overall, we would +//! create a single span around the entire loop; whereas if we wanted to know how +//! much time was spent in each individual iteration, we would enter a new span +//! on every iteration. +//! +//! [fields]: super::field +//! [Metadata]: super::Metadata +//! [verbosity level]: super::Level +//! [`Poll::Ready`]: std::task::Poll::Ready +//! [`span!`]: super::span! +//! [`trace_span!`]: super::trace_span! +//! [`debug_span!`]: super::debug_span! +//! [`info_span!`]: super::info_span! +//! [`warn_span!`]: super::warn_span! +//! [`error_span!`]: super::error_span! +//! [`clone_span`]: super::subscriber::Subscriber::clone_span() +//! [`drop_span`]: super::subscriber::Subscriber::drop_span() +//! [`exit`]: super::subscriber::Subscriber::exit +//! [`Subscriber`]: super::subscriber::Subscriber +//! [`enter`]: Span::enter() +//! [`entered`]: Span::entered() +//! [`in_scope`]: Span::in_scope() +//! [`follows_from`]: Span::follows_from() +//! [guard]: Entered +//! [parent]: #span-relationships +pub use tracing_core::span::{Attributes, Id, Record}; + +use crate::stdlib::{ + cmp, fmt, + hash::{Hash, Hasher}, + marker::PhantomData, + mem, + ops::Deref, +}; +use crate::{ + dispatcher::{self, Dispatch}, + field, Metadata, +}; + +/// Trait implemented by types which have a span `Id`. +pub trait AsId: crate::sealed::Sealed { + /// Returns the `Id` of the span that `self` corresponds to, or `None` if + /// this corresponds to a disabled span. + fn as_id(&self) -> Option<&Id>; +} + +/// A handle representing a span, with the capability to enter the span if it +/// exists. +/// +/// If the span was rejected by the current `Subscriber`'s filter, entering the +/// span will silently do nothing. Thus, the handle can be used in the same +/// manner regardless of whether or not the trace is currently being collected. +#[derive(Clone)] +pub struct Span { + /// A handle used to enter the span when it is not executing. + /// + /// If this is `None`, then the span has either closed or was never enabled. + inner: Option, + /// Metadata describing the span. + /// + /// This might be `Some` even if `inner` is `None`, in the case that the + /// span is disabled but the metadata is needed for `log` support. + meta: Option<&'static Metadata<'static>>, +} + +/// A handle representing the capacity to enter a span which is known to exist. +/// +/// Unlike `Span`, this type is only constructed for spans which _have_ been +/// enabled by the current filter. This type is primarily used for implementing +/// span handles; users should typically not need to interact with it directly. +#[derive(Debug)] +pub(crate) struct Inner { + /// The span's ID, as provided by `subscriber`. + id: Id, + + /// The subscriber that will receive events relating to this span. + /// + /// This should be the same subscriber that provided this span with its + /// `id`. + subscriber: Dispatch, +} + +/// A guard representing a span which has been entered and is currently +/// executing. +/// +/// When the guard is dropped, the span will be exited. +/// +/// This is returned by the [`Span::enter`] function. +/// +/// [`Span::enter`]: super::Span::enter +#[derive(Debug)] +#[must_use = "once a span has been entered, it should be exited"] +pub struct Entered<'a> { + span: &'a Span, +} + +/// An owned version of [`Entered`], a guard representing a span which has been +/// entered and is currently executing. +/// +/// When the guard is dropped, the span will be exited. +/// +/// This is returned by the [`Span::entered`] function. +/// +/// [`Span::entered`]: super::Span::entered() +#[derive(Debug)] +#[must_use = "once a span has been entered, it should be exited"] +pub struct EnteredSpan { + span: Span, + + /// ```compile_fail + /// use tracing::span::*; + /// trait AssertSend: Send {} + /// + /// impl AssertSend for EnteredSpan {} + /// ``` + _not_send: PhantomNotSend, +} + +/// `log` target for all span lifecycle (creation/enter/exit/close) records. +#[cfg(feature = "log")] +const LIFECYCLE_LOG_TARGET: &str = "tracing::span"; +/// `log` target for span activity (enter/exit) records. +#[cfg(feature = "log")] +const ACTIVITY_LOG_TARGET: &str = "tracing::span::active"; + +// ===== impl Span ===== + +impl Span { + /// Constructs a new `Span` with the given [metadata] and set of + /// [field values]. + /// + /// The new span will be constructed by the currently-active [`Subscriber`], + /// with the current span as its parent (if one exists). + /// + /// After the span is constructed, [field values] and/or [`follows_from`] + /// annotations may be added to it. + /// + /// [metadata]: super::Metadata + /// [`Subscriber`]: super::subscriber::Subscriber + /// [field values]: super::field::ValueSet + /// [`follows_from`]: super::Span::follows_from + pub fn new(meta: &'static Metadata<'static>, values: &field::ValueSet<'_>) -> Span { + dispatcher::get_default(|dispatch| Self::new_with(meta, values, dispatch)) + } + + #[inline] + #[doc(hidden)] + pub fn new_with( + meta: &'static Metadata<'static>, + values: &field::ValueSet<'_>, + dispatch: &Dispatch, + ) -> Span { + let new_span = Attributes::new(meta, values); + Self::make_with(meta, new_span, dispatch) + } + + /// Constructs a new `Span` as the root of its own trace tree, with the + /// given [metadata] and set of [field values]. + /// + /// After the span is constructed, [field values] and/or [`follows_from`] + /// annotations may be added to it. + /// + /// [metadata]: super::Metadata + /// [field values]: super::field::ValueSet + /// [`follows_from`]: super::Span::follows_from + pub fn new_root(meta: &'static Metadata<'static>, values: &field::ValueSet<'_>) -> Span { + dispatcher::get_default(|dispatch| Self::new_root_with(meta, values, dispatch)) + } + + #[inline] + #[doc(hidden)] + pub fn new_root_with( + meta: &'static Metadata<'static>, + values: &field::ValueSet<'_>, + dispatch: &Dispatch, + ) -> Span { + let new_span = Attributes::new_root(meta, values); + Self::make_with(meta, new_span, dispatch) + } + + /// Constructs a new `Span` as child of the given parent span, with the + /// given [metadata] and set of [field values]. + /// + /// After the span is constructed, [field values] and/or [`follows_from`] + /// annotations may be added to it. + /// + /// [metadata]: super::Metadata + /// [field values]: super::field::ValueSet + /// [`follows_from`]: super::Span::follows_from + pub fn child_of( + parent: impl Into>, + meta: &'static Metadata<'static>, + values: &field::ValueSet<'_>, + ) -> Span { + let mut parent = parent.into(); + dispatcher::get_default(move |dispatch| { + Self::child_of_with(Option::take(&mut parent), meta, values, dispatch) + }) + } + + #[inline] + #[doc(hidden)] + pub fn child_of_with( + parent: impl Into>, + meta: &'static Metadata<'static>, + values: &field::ValueSet<'_>, + dispatch: &Dispatch, + ) -> Span { + let new_span = match parent.into() { + Some(parent) => Attributes::child_of(parent, meta, values), + None => Attributes::new_root(meta, values), + }; + Self::make_with(meta, new_span, dispatch) + } + + /// Constructs a new disabled span with the given `Metadata`. + /// + /// This should be used when a span is constructed from a known callsite, + /// but the subscriber indicates that it is disabled. + /// + /// Entering, exiting, and recording values on this span will not notify the + /// `Subscriber` but _may_ record log messages if the `log` feature flag is + /// enabled. + #[inline(always)] + pub fn new_disabled(meta: &'static Metadata<'static>) -> Span { + Self { + inner: None, + meta: Some(meta), + } + } + + /// Constructs a new span that is *completely disabled*. + /// + /// This can be used rather than `Option` to represent cases where a + /// span is not present. + /// + /// Entering, exiting, and recording values on this span will do nothing. + #[inline(always)] + pub const fn none() -> Span { + Self { + inner: None, + meta: None, + } + } + + /// Returns a handle to the span [considered by the `Subscriber`] to be the + /// current span. + /// + /// If the subscriber indicates that it does not track the current span, or + /// that the thread from which this function is called is not currently + /// inside a span, the returned span will be disabled. + /// + /// [considered by the `Subscriber`]: + /// super::subscriber::Subscriber::current_span + pub fn current() -> Span { + dispatcher::get_default(|dispatch| { + if let Some((id, meta)) = dispatch.current_span().into_inner() { + let id = dispatch.clone_span(&id); + Self { + inner: Some(Inner::new(id, dispatch)), + meta: Some(meta), + } + } else { + Self::none() + } + }) + } + + fn make_with( + meta: &'static Metadata<'static>, + new_span: Attributes<'_>, + dispatch: &Dispatch, + ) -> Span { + let attrs = &new_span; + let id = dispatch.new_span(attrs); + let inner = Some(Inner::new(id, dispatch)); + + let span = Self { + inner, + meta: Some(meta), + }; + + if_log_enabled! { *meta.level(), { + let target = if attrs.is_empty() { + LIFECYCLE_LOG_TARGET + } else { + meta.target() + }; + let values = attrs.values(); + span.log( + target, + level_to_log!(*meta.level()), + format_args!("++ {};{}", meta.name(), crate::log::LogValueSet { values, is_first: false }), + ); + }} + + span + } + + /// Enters this span, returning a guard that will exit the span when dropped. + /// + /// If this span is enabled by the current subscriber, then this function will + /// call [`Subscriber::enter`] with the span's [`Id`], and dropping the guard + /// will call [`Subscriber::exit`]. If the span is disabled, this does + /// nothing. + /// + /// # In Asynchronous Code + /// + /// **Warning**: in asynchronous code that uses [async/await syntax][syntax], + /// `Span::enter` should be used very carefully or avoided entirely. Holding + /// the drop guard returned by `Span::enter` across `.await` points will + /// result in incorrect traces. For example, + /// + /// ``` + /// # use tracing::info_span; + /// # async fn some_other_async_function() {} + /// async fn my_async_function() { + /// let span = info_span!("my_async_function"); + /// + /// // WARNING: This span will remain entered until this + /// // guard is dropped... + /// let _enter = span.enter(); + /// // ...but the `await` keyword may yield, causing the + /// // runtime to switch to another task, while remaining in + /// // this span! + /// some_other_async_function().await + /// + /// // ... + /// } + /// ``` + /// + /// The drop guard returned by `Span::enter` exits the span when it is + /// dropped. When an async function or async block yields at an `.await` + /// point, the current scope is _exited_, but values in that scope are + /// **not** dropped (because the async block will eventually resume + /// execution from that await point). This means that _another_ task will + /// begin executing while _remaining_ in the entered span. This results in + /// an incorrect trace. + /// + /// Instead of using `Span::enter` in asynchronous code, prefer the + /// following: + /// + /// * To enter a span for a synchronous section of code within an async + /// block or function, prefer [`Span::in_scope`]. Since `in_scope` takes a + /// synchronous closure and exits the span when the closure returns, the + /// span will always be exited before the next await point. For example: + /// ``` + /// # use tracing::info_span; + /// # async fn some_other_async_function(_: ()) {} + /// async fn my_async_function() { + /// let span = info_span!("my_async_function"); + /// + /// let some_value = span.in_scope(|| { + /// // run some synchronous code inside the span... + /// }); + /// + /// // This is okay! The span has already been exited before we reach + /// // the await point. + /// some_other_async_function(some_value).await; + /// + /// // ... + /// } + /// ``` + /// * For instrumenting asynchronous code, `tracing` provides the + /// [`Future::instrument` combinator][instrument] for + /// attaching a span to a future (async function or block). This will + /// enter the span _every_ time the future is polled, and exit it whenever + /// the future yields. + /// + /// `Instrument` can be used with an async block inside an async function: + /// ```ignore + /// # use tracing::info_span; + /// use tracing::Instrument; + /// + /// # async fn some_other_async_function() {} + /// async fn my_async_function() { + /// let span = info_span!("my_async_function"); + /// async move { + /// // This is correct! If we yield here, the span will be exited, + /// // and re-entered when we resume. + /// some_other_async_function().await; + /// + /// //more asynchronous code inside the span... + /// + /// } + /// // instrument the async block with the span... + /// .instrument(span) + /// // ...and await it. + /// .await + /// } + /// ``` + /// + /// It can also be used to instrument calls to async functions at the + /// callsite: + /// ```ignore + /// # use tracing::debug_span; + /// use tracing::Instrument; + /// + /// # async fn some_other_async_function() {} + /// async fn my_async_function() { + /// let some_value = some_other_async_function() + /// .instrument(debug_span!("some_other_async_function")) + /// .await; + /// + /// // ... + /// } + /// ``` + /// + /// * The [`#[instrument]` attribute macro][attr] can automatically generate + /// correct code when used on an async function: + /// + /// ```ignore + /// # async fn some_other_async_function() {} + /// #[tracing::instrument(level = "info")] + /// async fn my_async_function() { + /// + /// // This is correct! If we yield here, the span will be exited, + /// // and re-entered when we resume. + /// some_other_async_function().await; + /// + /// // ... + /// + /// } + /// ``` + /// + /// [syntax]: https://rust-lang.github.io/async-book/01_getting_started/04_async_await_primer.html + /// [`Span::in_scope`]: #method.in_scope + /// [instrument]: https://docs.rs/tracing/latest/tracing/trait.Instrument.html + /// [attr]: macro@crate::instrument + /// + /// # Examples + /// + /// ``` + /// # use tracing::{span, Level}; + /// let span = span!(Level::INFO, "my_span"); + /// let guard = span.enter(); + /// + /// // code here is within the span + /// + /// drop(guard); + /// + /// // code here is no longer within the span + /// + /// ``` + /// + /// Guards need not be explicitly dropped: + /// + /// ``` + /// # use tracing::trace_span; + /// fn my_function() -> String { + /// // enter a span for the duration of this function. + /// let span = trace_span!("my_function"); + /// let _enter = span.enter(); + /// + /// // anything happening in functions we call is still inside the span... + /// my_other_function(); + /// + /// // returning from the function drops the guard, exiting the span. + /// return "Hello world".to_owned(); + /// } + /// + /// fn my_other_function() { + /// // ... + /// } + /// ``` + /// + /// Sub-scopes may be created to limit the duration for which the span is + /// entered: + /// + /// ``` + /// # use tracing::{info, info_span}; + /// let span = info_span!("my_great_span"); + /// + /// { + /// let _enter = span.enter(); + /// + /// // this event occurs inside the span. + /// info!("i'm in the span!"); + /// + /// // exiting the scope drops the guard, exiting the span. + /// } + /// + /// // this event is not inside the span. + /// info!("i'm outside the span!") + /// ``` + /// + /// [`Subscriber::enter`]: super::subscriber::Subscriber::enter() + /// [`Subscriber::exit`]: super::subscriber::Subscriber::exit() + /// [`Id`]: super::Id + #[inline(always)] + pub fn enter(&self) -> Entered<'_> { + self.do_enter(); + Entered { span: self } + } + + /// Enters this span, consuming it and returning a [guard][`EnteredSpan`] + /// that will exit the span when dropped. + /// + ///
+    ///     Warning: In asynchronous code that uses async/await syntax,
+    ///     Span::entered may produce incorrect traces if the returned drop
+    ///     guard is held across an await point. See the
+    ///     Span::enter documentation for details.
+    /// 
+ /// + /// + /// If this span is enabled by the current subscriber, then this function will + /// call [`Subscriber::enter`] with the span's [`Id`], and dropping the guard + /// will call [`Subscriber::exit`]. If the span is disabled, this does + /// nothing. + /// + /// This is similar to the [`Span::enter`] method, except that it moves the + /// span by value into the returned guard, rather than borrowing it. + /// Therefore, this method can be used to create and enter a span in a + /// single expression, without requiring a `let`-binding. For example: + /// + /// ``` + /// # use tracing::info_span; + /// let _span = info_span!("something_interesting").entered(); + /// ``` + /// rather than: + /// ``` + /// # use tracing::info_span; + /// let span = info_span!("something_interesting"); + /// let _e = span.enter(); + /// ``` + /// + /// Furthermore, `entered` may be used when the span must be stored in some + /// other struct or be passed to a function while remaining entered. + /// + ///
+    ///     Note: The returned 
+    ///     EnteredSpan guard does not implement Send.
+    ///     Dropping the guard will exit this span, and if the guard is sent
+    ///     to another thread and dropped there, that thread may never have entered
+    ///     this span. Thus, EnteredSpans should not be sent between threads.
+    /// 
+ /// + /// [syntax]: https://rust-lang.github.io/async-book/01_getting_started/04_async_await_primer.html + /// + /// # Examples + /// + /// The returned guard can be [explicitly exited][EnteredSpan::exit], + /// returning the un-entered span: + /// + /// ``` + /// # use tracing::{Level, span}; + /// let span = span!(Level::INFO, "doing_something").entered(); + /// + /// // code here is within the span + /// + /// // explicitly exit the span, returning it + /// let span = span.exit(); + /// + /// // code here is no longer within the span + /// + /// // enter the span again + /// let span = span.entered(); + /// + /// // now we are inside the span once again + /// ``` + /// + /// Guards need not be explicitly dropped: + /// + /// ``` + /// # use tracing::trace_span; + /// fn my_function() -> String { + /// // enter a span for the duration of this function. + /// let span = trace_span!("my_function").entered(); + /// + /// // anything happening in functions we call is still inside the span... + /// my_other_function(); + /// + /// // returning from the function drops the guard, exiting the span. + /// return "Hello world".to_owned(); + /// } + /// + /// fn my_other_function() { + /// // ... + /// } + /// ``` + /// + /// Since the [`EnteredSpan`] guard can dereference to the [`Span`] itself, + /// the span may still be accessed while entered. For example: + /// + /// ```rust + /// # use tracing::info_span; + /// use tracing::field; + /// + /// // create the span with an empty field, and enter it. + /// let span = info_span!("my_span", some_field = field::Empty).entered(); + /// + /// // we can still record a value for the field while the span is entered. + /// span.record("some_field", &"hello world!"); + /// ``` + /// + + /// [`Subscriber::enter`]: super::subscriber::Subscriber::enter() + /// [`Subscriber::exit`]: super::subscriber::Subscriber::exit() + /// [`Id`]: super::Id + #[inline(always)] + pub fn entered(self) -> EnteredSpan { + self.do_enter(); + EnteredSpan { + span: self, + _not_send: PhantomNotSend, + } + } + + /// Returns this span, if it was [enabled] by the current [`Subscriber`], or + /// the [current span] (whose lexical distance may be further than expected), + /// if this span [is disabled]. + /// + /// This method can be useful when propagating spans to spawned threads or + /// [async tasks]. Consider the following: + /// + /// ``` + /// let _parent_span = tracing::info_span!("parent").entered(); + /// + /// // ... + /// + /// let child_span = tracing::debug_span!("child"); + /// + /// std::thread::spawn(move || { + /// let _entered = child_span.entered(); + /// + /// tracing::info!("spawned a thread!"); + /// + /// // ... + /// }); + /// ``` + /// + /// If the current [`Subscriber`] enables the [`DEBUG`] level, then both + /// the "parent" and "child" spans will be enabled. Thus, when the "spawaned + /// a thread!" event occurs, it will be inside of the "child" span. Because + /// "parent" is the parent of "child", the event will _also_ be inside of + /// "parent". + /// + /// However, if the [`Subscriber`] only enables the [`INFO`] level, the "child" + /// span will be disabled. When the thread is spawned, the + /// `child_span.entered()` call will do nothing, since "child" is not + /// enabled. In this case, the "spawned a thread!" event occurs outside of + /// *any* span, since the "child" span was responsible for propagating its + /// parent to the spawned thread. + /// + /// If this is not the desired behavior, `Span::or_current` can be used to + /// ensure that the "parent" span is propagated in both cases, either as a + /// parent of "child" _or_ directly. For example: + /// + /// ``` + /// let _parent_span = tracing::info_span!("parent").entered(); + /// + /// // ... + /// + /// // If DEBUG is enabled, then "child" will be enabled, and `or_current` + /// // returns "child". Otherwise, if DEBUG is not enabled, "child" will be + /// // disabled, and `or_current` returns "parent". + /// let child_span = tracing::debug_span!("child").or_current(); + /// + /// std::thread::spawn(move || { + /// let _entered = child_span.entered(); + /// + /// tracing::info!("spawned a thread!"); + /// + /// // ... + /// }); + /// ``` + /// + /// When spawning [asynchronous tasks][async tasks], `Span::or_current` can + /// be used similarly, in combination with [`instrument`]: + /// + /// ``` + /// use tracing::Instrument; + /// # // lol + /// # mod tokio { + /// # pub(super) fn spawn(_: impl std::future::Future) {} + /// # } + /// + /// let _parent_span = tracing::info_span!("parent").entered(); + /// + /// // ... + /// + /// let child_span = tracing::debug_span!("child"); + /// + /// tokio::spawn( + /// async { + /// tracing::info!("spawned a task!"); + /// + /// // ... + /// + /// }.instrument(child_span.or_current()) + /// ); + /// ``` + /// + /// In general, `or_current` should be preferred over nesting an + /// [`instrument`] call inside of an [`in_current_span`] call, as using + /// `or_current` will be more efficient. + /// + /// ``` + /// use tracing::Instrument; + /// # // lol + /// # mod tokio { + /// # pub(super) fn spawn(_: impl std::future::Future) {} + /// # } + /// async fn my_async_fn() { + /// // ... + /// } + /// + /// let _parent_span = tracing::info_span!("parent").entered(); + /// + /// // Do this: + /// tokio::spawn( + /// my_async_fn().instrument(tracing::debug_span!("child").or_current()) + /// ); + /// + /// // ...rather than this: + /// tokio::spawn( + /// my_async_fn() + /// .instrument(tracing::debug_span!("child")) + /// .in_current_span() + /// ); + /// ``` + /// + /// [enabled]: crate::Subscriber::enabled + /// [`Subscriber`]: crate::Subscriber + /// [current span]: Span::current + /// [is disabled]: Span::is_disabled + /// [`INFO`]: crate::Level::INFO + /// [`DEBUG`]: crate::Level::DEBUG + /// [async tasks]: std::task + /// [`instrument`]: crate::instrument::Instrument::instrument + /// [`in_current_span`]: crate::instrument::Instrument::in_current_span + pub fn or_current(self) -> Self { + if self.is_disabled() { + return Self::current(); + } + self + } + + #[inline(always)] + fn do_enter(&self) { + if let Some(inner) = self.inner.as_ref() { + inner.subscriber.enter(&inner.id); + } + + if_log_enabled! { crate::Level::TRACE, { + if let Some(_meta) = self.meta { + self.log(ACTIVITY_LOG_TARGET, log::Level::Trace, format_args!("-> {};", _meta.name())); + } + }} + } + + // Called from [`Entered`] and [`EnteredSpan`] drops. + // + // Running this behaviour on drop rather than with an explicit function + // call means that spans may still be exited when unwinding. + #[inline(always)] + fn do_exit(&self) { + if let Some(inner) = self.inner.as_ref() { + inner.subscriber.exit(&inner.id); + } + + if_log_enabled! { crate::Level::TRACE, { + if let Some(_meta) = self.meta { + self.log(ACTIVITY_LOG_TARGET, log::Level::Trace, format_args!("<- {};", _meta.name())); + } + }} + } + + /// Executes the given function in the context of this span. + /// + /// If this span is enabled, then this function enters the span, invokes `f` + /// and then exits the span. If the span is disabled, `f` will still be + /// invoked, but in the context of the currently-executing span (if there is + /// one). + /// + /// Returns the result of evaluating `f`. + /// + /// # Examples + /// + /// ``` + /// # use tracing::{trace, span, Level}; + /// let my_span = span!(Level::TRACE, "my_span"); + /// + /// my_span.in_scope(|| { + /// // this event occurs within the span. + /// trace!("i'm in the span!"); + /// }); + /// + /// // this event occurs outside the span. + /// trace!("i'm not in the span!"); + /// ``` + /// + /// Calling a function and returning the result: + /// ``` + /// # use tracing::{info_span, Level}; + /// fn hello_world() -> String { + /// "Hello world!".to_owned() + /// } + /// + /// let span = info_span!("hello_world"); + /// // the span will be entered for the duration of the call to + /// // `hello_world`. + /// let a_string = span.in_scope(hello_world); + /// + pub fn in_scope T, T>(&self, f: F) -> T { + let _enter = self.enter(); + f() + } + + /// Returns a [`Field`][super::field::Field] for the field with the + /// given `name`, if one exists, + pub fn field(&self, field: &Q) -> Option + where + Q: field::AsField, + { + self.metadata().and_then(|meta| field.as_field(meta)) + } + + /// Returns true if this `Span` has a field for the given + /// [`Field`][super::field::Field] or field name. + #[inline] + pub fn has_field(&self, field: &Q) -> bool + where + Q: field::AsField, + { + self.field(field).is_some() + } + + /// Records that the field described by `field` has the value `value`. + /// + /// This may be used with [`field::Empty`] to declare fields whose values + /// are not known when the span is created, and record them later: + /// ``` + /// use tracing::{trace_span, field}; + /// + /// // Create a span with two fields: `greeting`, with the value "hello world", and + /// // `parting`, without a value. + /// let span = trace_span!("my_span", greeting = "hello world", parting = field::Empty); + /// + /// // ... + /// + /// // Now, record a value for parting as well. + /// // (note that the field name is passed as a string slice) + /// span.record("parting", &"goodbye world!"); + /// ``` + /// However, it may also be used to record a _new_ value for a field whose + /// value was already recorded: + /// ``` + /// use tracing::info_span; + /// # fn do_something() -> Result<(), ()> { Err(()) } + /// + /// // Initially, let's assume that our attempt to do something is going okay... + /// let span = info_span!("doing_something", is_okay = true); + /// let _e = span.enter(); + /// + /// match do_something() { + /// Ok(something) => { + /// // ... + /// } + /// Err(_) => { + /// // Things are no longer okay! + /// span.record("is_okay", &false); + /// } + /// } + /// ``` + /// + ///
+    ///     Note: The fields associated with a span are part
+    ///     of its Metadata.
+    ///     The Metadata
+    ///     describing a particular span is constructed statically when the span
+    ///     is created and cannot be extended later to add new fields. Therefore,
+    ///     you cannot record a value for a field that was not specified when the
+    ///     span was created:
+    /// 
+ /// + /// ``` + /// use tracing::{trace_span, field}; + /// + /// // Create a span with two fields: `greeting`, with the value "hello world", and + /// // `parting`, without a value. + /// let span = trace_span!("my_span", greeting = "hello world", parting = field::Empty); + /// + /// // ... + /// + /// // Now, you try to record a value for a new field, `new_field`, which was not + /// // declared as `Empty` or populated when you created `span`. + /// // You won't get any error, but the assignment will have no effect! + /// span.record("new_field", &"interesting_value_you_really_need"); + /// + /// // Instead, all fields that may be recorded after span creation should be declared up front, + /// // using field::Empty when a value is not known, as we did for `parting`. + /// // This `record` call will indeed replace field::Empty with "you will be remembered". + /// span.record("parting", &"you will be remembered"); + /// ``` + /// + /// [`field::Empty`]: super::field::Empty + /// [`Metadata`]: super::Metadata + pub fn record(&self, field: &Q, value: &V) -> &Self + where + Q: field::AsField, + V: field::Value, + { + if let Some(meta) = self.meta { + if let Some(field) = field.as_field(meta) { + self.record_all( + &meta + .fields() + .value_set(&[(&field, Some(value as &dyn field::Value))]), + ); + } + } + + self + } + + /// Records all the fields in the provided `ValueSet`. + pub fn record_all(&self, values: &field::ValueSet<'_>) -> &Self { + let record = Record::new(values); + if let Some(ref inner) = self.inner { + inner.record(&record); + } + + if let Some(_meta) = self.meta { + if_log_enabled! { *_meta.level(), { + let target = if record.is_empty() { + LIFECYCLE_LOG_TARGET + } else { + _meta.target() + }; + self.log( + target, + level_to_log!(*_meta.level()), + format_args!("{};{}", _meta.name(), crate::log::LogValueSet { values, is_first: false }), + ); + }} + } + + self + } + + /// Returns `true` if this span was disabled by the subscriber and does not + /// exist. + /// + /// See also [`is_none`]. + /// + /// [`is_none`]: #method.is_none + #[inline] + pub fn is_disabled(&self) -> bool { + self.inner.is_none() + } + + /// Returns `true` if this span was constructed by [`Span::none`] and is + /// empty. + /// + /// If `is_none` returns `true` for a given span, then [`is_disabled`] will + /// also return `true`. However, when a span is disabled by the subscriber + /// rather than constructed by `Span::none`, this method will return + /// `false`, while `is_disabled` will return `true`. + /// + /// [`Span::none`]: #method.none + /// [`is_disabled`]: #method.is_disabled + #[inline] + pub fn is_none(&self) -> bool { + self.is_disabled() && self.meta.is_none() + } + + /// Indicates that the span with the given ID has an indirect causal + /// relationship with this span. + /// + /// This relationship differs somewhat from the parent-child relationship: a + /// span may have any number of prior spans, rather than a single one; and + /// spans are not considered to be executing _inside_ of the spans they + /// follow from. This means that a span may close even if subsequent spans + /// that follow from it are still open, and time spent inside of a + /// subsequent span should not be included in the time its precedents were + /// executing. This is used to model causal relationships such as when a + /// single future spawns several related background tasks, et cetera. + /// + /// If this span is disabled, or the resulting follows-from relationship + /// would be invalid, this function will do nothing. + /// + /// # Examples + /// + /// Setting a `follows_from` relationship with a `Span`: + /// ``` + /// # use tracing::{span, Id, Level, Span}; + /// let span1 = span!(Level::INFO, "span_1"); + /// let span2 = span!(Level::DEBUG, "span_2"); + /// span2.follows_from(span1); + /// ``` + /// + /// Setting a `follows_from` relationship with the current span: + /// ``` + /// # use tracing::{span, Id, Level, Span}; + /// let span = span!(Level::INFO, "hello!"); + /// span.follows_from(Span::current()); + /// ``` + /// + /// Setting a `follows_from` relationship with a `Span` reference: + /// ``` + /// # use tracing::{span, Id, Level, Span}; + /// let span = span!(Level::INFO, "hello!"); + /// let curr = Span::current(); + /// span.follows_from(&curr); + /// ``` + /// + /// Setting a `follows_from` relationship with an `Id`: + /// ``` + /// # use tracing::{span, Id, Level, Span}; + /// let span = span!(Level::INFO, "hello!"); + /// let id = span.id(); + /// span.follows_from(id); + /// ``` + pub fn follows_from(&self, from: impl Into>) -> &Self { + if let Some(ref inner) = self.inner { + if let Some(from) = from.into() { + inner.follows_from(&from); + } + } + self + } + + /// Returns this span's `Id`, if it is enabled. + pub fn id(&self) -> Option { + self.inner.as_ref().map(Inner::id) + } + + /// Returns this span's `Metadata`, if it is enabled. + pub fn metadata(&self) -> Option<&'static Metadata<'static>> { + self.meta + } + + #[cfg(feature = "log")] + #[inline] + fn log(&self, target: &str, level: log::Level, message: fmt::Arguments<'_>) { + if let Some(meta) = self.meta { + if level_to_log!(*meta.level()) <= log::max_level() { + let logger = log::logger(); + let log_meta = log::Metadata::builder().level(level).target(target).build(); + if logger.enabled(&log_meta) { + if let Some(ref inner) = self.inner { + logger.log( + &log::Record::builder() + .metadata(log_meta) + .module_path(meta.module_path()) + .file(meta.file()) + .line(meta.line()) + .args(format_args!("{} span={}", message, inner.id.into_u64())) + .build(), + ); + } else { + logger.log( + &log::Record::builder() + .metadata(log_meta) + .module_path(meta.module_path()) + .file(meta.file()) + .line(meta.line()) + .args(message) + .build(), + ); + } + } + } + } + } + + /// Invokes a function with a reference to this span's ID and subscriber. + /// + /// if this span is enabled, the provided function is called, and the result is returned. + /// If the span is disabled, the function is not called, and this method returns `None` + /// instead. + pub fn with_subscriber(&self, f: impl FnOnce((&Id, &Dispatch)) -> T) -> Option { + self.inner + .as_ref() + .map(|inner| f((&inner.id, &inner.subscriber))) + } +} + +impl cmp::PartialEq for Span { + fn eq(&self, other: &Self) -> bool { + match (&self.meta, &other.meta) { + (Some(this), Some(that)) => { + this.callsite() == that.callsite() && self.inner == other.inner + } + _ => false, + } + } +} + +impl Hash for Span { + fn hash(&self, hasher: &mut H) { + self.inner.hash(hasher); + } +} + +impl fmt::Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut span = f.debug_struct("Span"); + if let Some(meta) = self.meta { + span.field("name", &meta.name()) + .field("level", &meta.level()) + .field("target", &meta.target()); + + if let Some(ref inner) = self.inner { + span.field("id", &inner.id()); + } else { + span.field("disabled", &true); + } + + if let Some(ref path) = meta.module_path() { + span.field("module_path", &path); + } + + if let Some(ref line) = meta.line() { + span.field("line", &line); + } + + if let Some(ref file) = meta.file() { + span.field("file", &file); + } + } else { + span.field("none", &true); + } + + span.finish() + } +} + +impl<'a> From<&'a Span> for Option<&'a Id> { + fn from(span: &'a Span) -> Self { + span.inner.as_ref().map(|inner| &inner.id) + } +} + +impl<'a> From<&'a Span> for Option { + fn from(span: &'a Span) -> Self { + span.inner.as_ref().map(Inner::id) + } +} + +impl From for Option { + fn from(span: Span) -> Self { + span.inner.as_ref().map(Inner::id) + } +} + +impl<'a> From<&'a EnteredSpan> for Option<&'a Id> { + fn from(span: &'a EnteredSpan) -> Self { + span.inner.as_ref().map(|inner| &inner.id) + } +} + +impl<'a> From<&'a EnteredSpan> for Option { + fn from(span: &'a EnteredSpan) -> Self { + span.inner.as_ref().map(Inner::id) + } +} + +impl Drop for Span { + #[inline(always)] + fn drop(&mut self) { + if let Some(Inner { + ref id, + ref subscriber, + }) = self.inner + { + subscriber.try_close(id.clone()); + } + + if_log_enabled! { crate::Level::TRACE, { + if let Some(meta) = self.meta { + self.log( + LIFECYCLE_LOG_TARGET, + log::Level::Trace, + format_args!("-- {};", meta.name()), + ); + } + }} + } +} + +// ===== impl Inner ===== + +impl Inner { + /// Indicates that the span with the given ID has an indirect causal + /// relationship with this span. + /// + /// This relationship differs somewhat from the parent-child relationship: a + /// span may have any number of prior spans, rather than a single one; and + /// spans are not considered to be executing _inside_ of the spans they + /// follow from. This means that a span may close even if subsequent spans + /// that follow from it are still open, and time spent inside of a + /// subsequent span should not be included in the time its precedents were + /// executing. This is used to model causal relationships such as when a + /// single future spawns several related background tasks, et cetera. + /// + /// If this span is disabled, this function will do nothing. Otherwise, it + /// returns `Ok(())` if the other span was added as a precedent of this + /// span, or an error if this was not possible. + fn follows_from(&self, from: &Id) { + self.subscriber.record_follows_from(&self.id, from) + } + + /// Returns the span's ID. + fn id(&self) -> Id { + self.id.clone() + } + + fn record(&self, values: &Record<'_>) { + self.subscriber.record(&self.id, values) + } + + fn new(id: Id, subscriber: &Dispatch) -> Self { + Inner { + id, + subscriber: subscriber.clone(), + } + } +} + +impl cmp::PartialEq for Inner { + fn eq(&self, other: &Self) -> bool { + self.id == other.id + } +} + +impl Hash for Inner { + fn hash(&self, state: &mut H) { + self.id.hash(state); + } +} + +impl Clone for Inner { + fn clone(&self) -> Self { + Inner { + id: self.subscriber.clone_span(&self.id), + subscriber: self.subscriber.clone(), + } + } +} + +// ===== impl Entered ===== + +impl EnteredSpan { + /// Returns this span's `Id`, if it is enabled. + pub fn id(&self) -> Option { + self.inner.as_ref().map(Inner::id) + } + + /// Exits this span, returning the underlying [`Span`]. + #[inline] + pub fn exit(mut self) -> Span { + // One does not simply move out of a struct with `Drop`. + let span = mem::replace(&mut self.span, Span::none()); + span.do_exit(); + span + } +} + +impl Deref for EnteredSpan { + type Target = Span; + + #[inline] + fn deref(&self) -> &Span { + &self.span + } +} + +impl<'a> Drop for Entered<'a> { + #[inline(always)] + fn drop(&mut self) { + self.span.do_exit() + } +} + +impl Drop for EnteredSpan { + #[inline(always)] + fn drop(&mut self) { + self.span.do_exit() + } +} + +/// Technically, `EnteredSpan` _can_ implement both `Send` *and* +/// `Sync` safely. It doesn't, because it has a `PhantomNotSend` field, +/// specifically added in order to make it `!Send`. +/// +/// Sending an `EnteredSpan` guard between threads cannot cause memory unsafety. +/// However, it *would* result in incorrect behavior, so we add a +/// `PhantomNotSend` to prevent it from being sent between threads. This is +/// because it must be *dropped* on the same thread that it was created; +/// otherwise, the span will never be exited on the thread where it was entered, +/// and it will attempt to exit the span on a thread that may never have entered +/// it. However, we still want them to be `Sync` so that a struct holding an +/// `Entered` guard can be `Sync`. +/// +/// Thus, this is totally safe. +#[derive(Debug)] +struct PhantomNotSend { + ghost: PhantomData<*mut ()>, +} + +#[allow(non_upper_case_globals)] +const PhantomNotSend: PhantomNotSend = PhantomNotSend { ghost: PhantomData }; + +/// # Safety +/// +/// Trivially safe, as `PhantomNotSend` doesn't have any API. +unsafe impl Sync for PhantomNotSend {} + +#[cfg(test)] +mod test { + use super::*; + + trait AssertSend: Send {} + impl AssertSend for Span {} + + trait AssertSync: Sync {} + impl AssertSync for Span {} + impl AssertSync for Entered<'_> {} + impl AssertSync for EnteredSpan {} +} diff --git a/third_party/rust/tracing/src/stdlib.rs b/third_party/rust/tracing/src/stdlib.rs new file mode 100644 index 000000000000..12b54084d432 --- /dev/null +++ b/third_party/rust/tracing/src/stdlib.rs @@ -0,0 +1,55 @@ +//! Re-exports either the Rust `std` library or `core` and `alloc` when `std` is +//! disabled. +//! +//! `crate::stdlib::...` should be used rather than `std::` when adding code that +//! will be available with the standard library disabled. +//! +//! Note that this module is called `stdlib` rather than `std`, as Rust 1.34.0 +//! does not permit redefining the name `stdlib` (although this works on the +//! latest stable Rust). +#[cfg(feature = "std")] +pub(crate) use std::*; + +#[cfg(not(feature = "std"))] +pub(crate) use self::no_std::*; + +#[cfg(not(feature = "std"))] +mod no_std { + // We pre-emptively export everything from libcore/liballoc, (even modules + // we aren't using currently) to make adding new code easier. Therefore, + // some of these imports will be unused. + #![allow(unused_imports)] + + pub(crate) use core::{ + any, array, ascii, cell, char, clone, cmp, convert, default, f32, f64, ffi, future, hash, + hint, i128, i16, i8, isize, iter, marker, mem, num, ops, option, pin, ptr, result, task, + time, u128, u16, u32, u8, usize, + }; + + pub(crate) use alloc::{boxed, collections, rc, string, vec}; + + pub(crate) mod borrow { + pub(crate) use alloc::borrow::*; + pub(crate) use core::borrow::*; + } + + pub(crate) mod fmt { + pub(crate) use alloc::fmt::*; + pub(crate) use core::fmt::*; + } + + pub(crate) mod slice { + pub(crate) use alloc::slice::*; + pub(crate) use core::slice::*; + } + + pub(crate) mod str { + pub(crate) use alloc::str::*; + pub(crate) use core::str::*; + } + + pub(crate) mod sync { + pub(crate) use alloc::sync::*; + pub(crate) use core::sync::*; + } +} diff --git a/third_party/rust/tracing/src/subscriber.rs b/third_party/rust/tracing/src/subscriber.rs new file mode 100644 index 000000000000..343dc5914c37 --- /dev/null +++ b/third_party/rust/tracing/src/subscriber.rs @@ -0,0 +1,68 @@ +//! Collects and records trace data. +pub use tracing_core::subscriber::*; + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub use tracing_core::dispatcher::DefaultGuard; + +/// Sets this subscriber as the default for the duration of a closure. +/// +/// The default subscriber is used when creating a new [`Span`] or +/// [`Event`], _if no span is currently executing_. If a span is currently +/// executing, new spans or events are dispatched to the subscriber that +/// tagged that span, instead. +/// +/// [`Span`]: super::span::Span +/// [`Subscriber`]: super::subscriber::Subscriber +/// [`Event`]: super::event::Event +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub fn with_default(subscriber: S, f: impl FnOnce() -> T) -> T +where + S: Subscriber + Send + Sync + 'static, +{ + crate::dispatcher::with_default(&crate::Dispatch::new(subscriber), f) +} + +/// Sets this subscriber as the global default for the duration of the entire program. +/// Will be used as a fallback if no thread-local subscriber has been set in a thread (using `with_default`.) +/// +/// Can only be set once; subsequent attempts to set the global default will fail. +/// Returns whether the initialization was successful. +/// +/// Note: Libraries should *NOT* call `set_global_default()`! That will cause conflicts when +/// executables try to set them later. +/// +/// [span]: super::span +/// [`Subscriber`]: super::subscriber::Subscriber +/// [`Event`]: super::event::Event +pub fn set_global_default(subscriber: S) -> Result<(), SetGlobalDefaultError> +where + S: Subscriber + Send + Sync + 'static, +{ + crate::dispatcher::set_global_default(crate::Dispatch::new(subscriber)) +} + +/// Sets the subscriber as the default for the duration of the lifetime of the +/// returned [`DefaultGuard`] +/// +/// The default subscriber is used when creating a new [`Span`] or +/// [`Event`], _if no span is currently executing_. If a span is currently +/// executing, new spans or events are dispatched to the subscriber that +/// tagged that span, instead. +/// +/// [`Span`]: super::span::Span +/// [`Subscriber`]: super::subscriber::Subscriber +/// [`Event`]: super::event::Event +/// [`DefaultGuard`]: super::dispatcher::DefaultGuard +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[must_use = "Dropping the guard unregisters the subscriber."] +pub fn set_default(subscriber: S) -> DefaultGuard +where + S: Subscriber + Send + Sync + 'static, +{ + crate::dispatcher::set_default(&crate::Dispatch::new(subscriber)) +} + +pub use tracing_core::dispatcher::SetGlobalDefaultError; diff --git a/third_party/rust/tracing/tests/enabled.rs b/third_party/rust/tracing/tests/enabled.rs new file mode 100644 index 000000000000..ea1c69804d8b --- /dev/null +++ b/third_party/rust/tracing/tests/enabled.rs @@ -0,0 +1,54 @@ +#![cfg(feature = "std")] +use tracing::Level; +use tracing_mock::*; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn level_and_target() { + let subscriber = subscriber::mock() + .with_filter(|meta| { + if meta.target() == "debug_module" { + meta.level() <= &Level::DEBUG + } else { + meta.level() <= &Level::INFO + } + }) + .done() + .run(); + + let _guard = tracing::subscriber::set_default(subscriber); + + assert!(tracing::enabled!(target: "debug_module", Level::DEBUG)); + assert!(tracing::enabled!(Level::ERROR)); + assert!(!tracing::enabled!(Level::DEBUG)); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span_and_event() { + let subscriber = subscriber::mock() + .with_filter(|meta| { + if meta.target() == "debug_module" { + meta.level() <= &Level::DEBUG + } else if meta.is_span() { + meta.level() <= &Level::TRACE + } else if meta.is_event() { + meta.level() <= &Level::DEBUG + } else { + meta.level() <= &Level::INFO + } + }) + .done() + .run(); + + let _guard = tracing::subscriber::set_default(subscriber); + + // Ensure that the `_event` and `_span` alternatives work corretly + assert!(!tracing::event_enabled!(Level::TRACE)); + assert!(tracing::event_enabled!(Level::DEBUG)); + assert!(tracing::span_enabled!(Level::TRACE)); + + // target variants + assert!(tracing::span_enabled!(target: "debug_module", Level::DEBUG)); + assert!(tracing::event_enabled!(target: "debug_module", Level::DEBUG)); +} diff --git a/third_party/rust/tracing/tests/event.rs b/third_party/rust/tracing/tests/event.rs new file mode 100644 index 000000000000..ffb63c816bd8 --- /dev/null +++ b/third_party/rust/tracing/tests/event.rs @@ -0,0 +1,476 @@ +// These tests require the thread-local scoped dispatcher, which only works when +// we have a standard library. The behaviour being tested should be the same +// with the standard lib disabled. +// +// The alternative would be for each of these tests to be defined in a separate +// file, which is :( +#![cfg(feature = "std")] + +use tracing::{ + debug, error, + field::{debug, display}, + info, + subscriber::with_default, + trace, warn, Level, +}; +use tracing_mock::*; + +macro_rules! event_without_message { + ($name:ident: $e:expr) => { + #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] + #[test] + fn $name() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("answer") + .with_value(&42) + .and( + field::mock("to_question") + .with_value(&"life, the universe, and everything"), + ) + .only(), + ), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + info!( + answer = $e, + to_question = "life, the universe, and everything" + ); + }); + + handle.assert_finished(); + } + }; +} + +event_without_message! {event_without_message: 42} +event_without_message! {wrapping_event_without_message: std::num::Wrapping(42)} +event_without_message! {nonzeroi32_event_without_message: std::num::NonZeroI32::new(42).unwrap()} +// needs API breakage +//event_without_message!{nonzerou128_event_without_message: std::num::NonZeroU128::new(42).unwrap()} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn event_with_message() { + let (subscriber, handle) = subscriber::mock() + .event(event::msg(format_args!( + "hello from my event! yak shaved = {:?}", + true + ))) + .done() + .run_with_handle(); + + with_default(subscriber, || { + debug!("hello from my event! yak shaved = {:?}", true); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn message_without_delims() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("answer") + .with_value(&42) + .and(field::mock("question").with_value(&"life, the universe, and everything")) + .and(field::msg(format_args!( + "hello from my event! tricky? {:?}!", + true + ))) + .only(), + ), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let question = "life, the universe, and everything"; + debug!(answer = 42, question, "hello from {where}! tricky? {:?}!", true, where = "my event"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn string_message_without_delims() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("answer") + .with_value(&42) + .and(field::mock("question").with_value(&"life, the universe, and everything")) + .and(field::msg(format_args!("hello from my event"))) + .only(), + ), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let question = "life, the universe, and everything"; + debug!(answer = 42, question, "hello from my event"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn one_with_everything() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock() + .with_fields( + field::mock("message") + .with_value(&tracing::field::debug(format_args!( + "{:#x} make me one with{what:.>20}", + 4_277_009_102u64, + what = "everything" + ))) + .and(field::mock("foo").with_value(&666)) + .and(field::mock("bar").with_value(&false)) + .and(field::mock("like_a_butterfly").with_value(&42.0)) + .only(), + ) + .at_level(Level::ERROR) + .with_target("whatever"), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + tracing::event!( + target: "whatever", + Level::ERROR, + { foo = 666, bar = false, like_a_butterfly = 42.0 }, + "{:#x} make me one with{what:.>20}", 4_277_009_102u64, what = "everything" + ); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn moved_field() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("foo") + .with_value(&display("hello from my event")) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + let from = "my event"; + tracing::event!(Level::INFO, foo = display(format!("hello from {}", from))) + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn dotted_field_name() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("foo.bar") + .with_value(&true) + .and(field::mock("foo.baz").with_value(&false)) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::event!(Level::INFO, foo.bar = true, foo.baz = false); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn borrowed_field() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("foo") + .with_value(&display("hello from my event")) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + let from = "my event"; + let mut message = format!("hello from {}", from); + tracing::event!(Level::INFO, foo = display(&message)); + message.push_str(", which happened!"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +// If emitting log instrumentation, this gets moved anyway, breaking the test. +#[cfg(not(feature = "log"))] +fn move_field_out_of_struct() { + use tracing::field::debug; + + #[derive(Debug)] + struct Position { + x: f32, + y: f32, + } + + let pos = Position { + x: 3.234, + y: -1.223, + }; + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("x") + .with_value(&debug(3.234)) + .and(field::mock("y").with_value(&debug(-1.223))) + .only(), + ), + ) + .event(event::mock().with_fields(field::mock("position").with_value(&debug(&pos)))) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let pos = Position { + x: 3.234, + y: -1.223, + }; + debug!(x = debug(pos.x), y = debug(pos.y)); + debug!(target: "app_events", { position = debug(pos) }, "New position"); + }); + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn display_shorthand() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("my_field") + .with_value(&display("hello world")) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::event!(Level::TRACE, my_field = %"hello world"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn debug_shorthand() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("my_field") + .with_value(&debug("hello world")) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::event!(Level::TRACE, my_field = ?"hello world"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn both_shorthands() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("display_field") + .with_value(&display("hello world")) + .and(field::mock("debug_field").with_value(&debug("hello world"))) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::event!(Level::TRACE, display_field = %"hello world", debug_field = ?"hello world"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn explicit_child() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo")) + .event(event::mock().with_explicit_parent(Some("foo"))) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let foo = tracing::span!(Level::TRACE, "foo"); + tracing::event!(parent: foo.id(), Level::TRACE, "bar"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn explicit_child_at_levels() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo")) + .event(event::mock().with_explicit_parent(Some("foo"))) + .event(event::mock().with_explicit_parent(Some("foo"))) + .event(event::mock().with_explicit_parent(Some("foo"))) + .event(event::mock().with_explicit_parent(Some("foo"))) + .event(event::mock().with_explicit_parent(Some("foo"))) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let foo = tracing::span!(Level::TRACE, "foo"); + trace!(parent: foo.id(), "a"); + debug!(parent: foo.id(), "b"); + info!(parent: foo.id(), "c"); + warn!(parent: foo.id(), "d"); + error!(parent: foo.id(), "e"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn option_values() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("some_str") + .with_value(&"yes") + .and(field::mock("some_bool").with_value(&true)) + .and(field::mock("some_u64").with_value(&42_u64)) + .only(), + ), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let some_str = Some("yes"); + let none_str: Option<&'static str> = None; + let some_bool = Some(true); + let none_bool: Option = None; + let some_u64 = Some(42_u64); + let none_u64: Option = None; + trace!( + some_str = some_str, + none_str = none_str, + some_bool = some_bool, + none_bool = none_bool, + some_u64 = some_u64, + none_u64 = none_u64 + ); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn option_ref_values() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("some_str") + .with_value(&"yes") + .and(field::mock("some_bool").with_value(&true)) + .and(field::mock("some_u64").with_value(&42_u64)) + .only(), + ), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let some_str = &Some("yes"); + let none_str: &Option<&'static str> = &None; + let some_bool = &Some(true); + let none_bool: &Option = &None; + let some_u64 = &Some(42_u64); + let none_u64: &Option = &None; + trace!( + some_str = some_str, + none_str = none_str, + some_bool = some_bool, + none_bool = none_bool, + some_u64 = some_u64, + none_u64 = none_u64 + ); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn option_ref_mut_values() { + let (subscriber, handle) = subscriber::mock() + .event( + event::mock().with_fields( + field::mock("some_str") + .with_value(&"yes") + .and(field::mock("some_bool").with_value(&true)) + .and(field::mock("some_u64").with_value(&42_u64)) + .only(), + ), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let some_str = &mut Some("yes"); + let none_str: &mut Option<&'static str> = &mut None; + let some_bool = &mut Some(true); + let none_bool: &mut Option = &mut None; + let some_u64 = &mut Some(42_u64); + let none_u64: &mut Option = &mut None; + trace!( + some_str = some_str, + none_str = none_str, + some_bool = some_bool, + none_bool = none_bool, + some_u64 = some_u64, + none_u64 = none_u64 + ); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing/tests/filter_caching_is_lexically_scoped.rs b/third_party/rust/tracing/tests/filter_caching_is_lexically_scoped.rs new file mode 100644 index 000000000000..e291103d7515 --- /dev/null +++ b/third_party/rust/tracing/tests/filter_caching_is_lexically_scoped.rs @@ -0,0 +1,65 @@ +// Tests that depend on a count of the number of times their filter is evaluated +// can't exist in the same file with other tests that add subscribers to the +// registry. The registry was changed so that each time a new dispatcher is +// added all filters are re-evaluated. The tests being run only in separate +// threads with shared global state lets them interfere with each other + +#[cfg(not(feature = "std"))] +extern crate std; + +use tracing::{span, Level}; +use tracing_mock::*; + +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn filter_caching_is_lexically_scoped() { + pub fn my_great_function() -> bool { + span!(Level::TRACE, "emily").in_scope(|| true) + } + + pub fn my_other_function() -> bool { + span!(Level::TRACE, "frank").in_scope(|| true) + } + + let count = Arc::new(AtomicUsize::new(0)); + let count2 = count.clone(); + + let subscriber = subscriber::mock() + .with_filter(move |meta| match meta.name() { + "emily" | "frank" => { + count2.fetch_add(1, Ordering::Relaxed); + true + } + _ => false, + }) + .run(); + + // Since this test is in its own file anyway, we can do this. Thus, this + // test will work even with no-std. + tracing::subscriber::set_global_default(subscriber).unwrap(); + + // Call the function once. The filter should be re-evaluated. + assert!(my_great_function()); + assert_eq!(count.load(Ordering::Relaxed), 1); + + // Call the function again. The cached result should be used. + assert!(my_great_function()); + assert_eq!(count.load(Ordering::Relaxed), 1); + + assert!(my_other_function()); + assert_eq!(count.load(Ordering::Relaxed), 2); + + assert!(my_great_function()); + assert_eq!(count.load(Ordering::Relaxed), 2); + + assert!(my_other_function()); + assert_eq!(count.load(Ordering::Relaxed), 2); + + assert!(my_great_function()); + assert_eq!(count.load(Ordering::Relaxed), 2); +} diff --git a/third_party/rust/tracing/tests/filters_are_not_reevaluated_for_the_same_span.rs b/third_party/rust/tracing/tests/filters_are_not_reevaluated_for_the_same_span.rs new file mode 100644 index 000000000000..e9b2529b8f6f --- /dev/null +++ b/third_party/rust/tracing/tests/filters_are_not_reevaluated_for_the_same_span.rs @@ -0,0 +1,70 @@ +// Tests that depend on a count of the number of times their filter is evaluated +// cant exist in the same file with other tests that add subscribers to the +// registry. The registry was changed so that each time a new dispatcher is +// added all filters are re-evaluated. The tests being run only in separate +// threads with shared global state lets them interfere with each other +#[cfg(not(feature = "std"))] +extern crate std; + +use tracing::{span, Level}; +use tracing_mock::*; + +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn filters_are_not_reevaluated_for_the_same_span() { + // Asserts that the `span!` macro caches the result of calling + // `Subscriber::enabled` for each span. + let alice_count = Arc::new(AtomicUsize::new(0)); + let bob_count = Arc::new(AtomicUsize::new(0)); + let alice_count2 = alice_count.clone(); + let bob_count2 = bob_count.clone(); + + let (subscriber, handle) = subscriber::mock() + .with_filter(move |meta| match meta.name() { + "alice" => { + alice_count2.fetch_add(1, Ordering::Relaxed); + false + } + "bob" => { + bob_count2.fetch_add(1, Ordering::Relaxed); + true + } + _ => false, + }) + .run_with_handle(); + + // Since this test is in its own file anyway, we can do this. Thus, this + // test will work even with no-std. + tracing::subscriber::set_global_default(subscriber).unwrap(); + + // Enter "alice" and then "bob". The dispatcher expects to see "bob" but + // not "alice." + let alice = span!(Level::TRACE, "alice"); + let bob = alice.in_scope(|| { + let bob = span!(Level::TRACE, "bob"); + bob.in_scope(|| ()); + bob + }); + + // The filter should have seen each span a single time. + assert_eq!(alice_count.load(Ordering::Relaxed), 1); + assert_eq!(bob_count.load(Ordering::Relaxed), 1); + + alice.in_scope(|| bob.in_scope(|| {})); + + // The subscriber should see "bob" again, but the filter should not have + // been called. + assert_eq!(alice_count.load(Ordering::Relaxed), 1); + assert_eq!(bob_count.load(Ordering::Relaxed), 1); + + bob.in_scope(|| {}); + assert_eq!(alice_count.load(Ordering::Relaxed), 1); + assert_eq!(bob_count.load(Ordering::Relaxed), 1); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing/tests/filters_are_reevaluated_for_different_call_sites.rs b/third_party/rust/tracing/tests/filters_are_reevaluated_for_different_call_sites.rs new file mode 100644 index 000000000000..265d4a88656a --- /dev/null +++ b/third_party/rust/tracing/tests/filters_are_reevaluated_for_different_call_sites.rs @@ -0,0 +1,80 @@ +// Tests that depend on a count of the number of times their filter is evaluated +// cant exist in the same file with other tests that add subscribers to the +// registry. The registry was changed so that each time a new dispatcher is +// added all filters are re-evaluated. The tests being run only in separate +// threads with shared global state lets them interfere with each other +#[cfg(not(feature = "std"))] +extern crate std; + +use tracing::{span, Level}; +use tracing_mock::*; + +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn filters_are_reevaluated_for_different_call_sites() { + // Asserts that the `span!` macro caches the result of calling + // `Subscriber::enabled` for each span. + let charlie_count = Arc::new(AtomicUsize::new(0)); + let dave_count = Arc::new(AtomicUsize::new(0)); + let charlie_count2 = charlie_count.clone(); + let dave_count2 = dave_count.clone(); + + let subscriber = subscriber::mock() + .with_filter(move |meta| { + println!("Filter: {:?}", meta.name()); + match meta.name() { + "charlie" => { + charlie_count2.fetch_add(1, Ordering::Relaxed); + false + } + "dave" => { + dave_count2.fetch_add(1, Ordering::Relaxed); + true + } + _ => false, + } + }) + .run(); + + // Since this test is in its own file anyway, we can do this. Thus, this + // test will work even with no-std. + tracing::subscriber::set_global_default(subscriber).unwrap(); + + // Enter "charlie" and then "dave". The dispatcher expects to see "dave" but + // not "charlie." + let charlie = span!(Level::TRACE, "charlie"); + let dave = charlie.in_scope(|| { + let dave = span!(Level::TRACE, "dave"); + dave.in_scope(|| {}); + dave + }); + + // The filter should have seen each span a single time. + assert_eq!(charlie_count.load(Ordering::Relaxed), 1); + assert_eq!(dave_count.load(Ordering::Relaxed), 1); + + charlie.in_scope(|| dave.in_scope(|| {})); + + // The subscriber should see "dave" again, but the filter should not have + // been called. + assert_eq!(charlie_count.load(Ordering::Relaxed), 1); + assert_eq!(dave_count.load(Ordering::Relaxed), 1); + + // A different span with the same name has a different call site, so it + // should cause the filter to be reapplied. + let charlie2 = span!(Level::TRACE, "charlie"); + charlie.in_scope(|| {}); + assert_eq!(charlie_count.load(Ordering::Relaxed), 2); + assert_eq!(dave_count.load(Ordering::Relaxed), 1); + + // But, the filter should not be re-evaluated for the new "charlie" span + // when it is re-entered. + charlie2.in_scope(|| span!(Level::TRACE, "dave").in_scope(|| {})); + assert_eq!(charlie_count.load(Ordering::Relaxed), 2); + assert_eq!(dave_count.load(Ordering::Relaxed), 2); +} diff --git a/third_party/rust/tracing/tests/filters_dont_leak.rs b/third_party/rust/tracing/tests/filters_dont_leak.rs new file mode 100644 index 000000000000..2ef1c9c701d5 --- /dev/null +++ b/third_party/rust/tracing/tests/filters_dont_leak.rs @@ -0,0 +1,81 @@ +#![cfg(feature = "std")] + +use tracing_mock::*; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn spans_dont_leak() { + fn do_span() { + let span = tracing::debug_span!("alice"); + let _e = span.enter(); + } + + let (subscriber, handle) = subscriber::mock() + .named("spans/subscriber1") + .with_filter(|_| false) + .done() + .run_with_handle(); + + let _guard = tracing::subscriber::set_default(subscriber); + + do_span(); + + let alice = span::mock().named("alice"); + let (subscriber2, handle2) = subscriber::mock() + .named("spans/subscriber2") + .with_filter(|_| true) + .new_span(alice.clone()) + .enter(alice.clone()) + .exit(alice.clone()) + .drop_span(alice) + .done() + .run_with_handle(); + + tracing::subscriber::with_default(subscriber2, || { + println!("--- subscriber 2 is default ---"); + do_span() + }); + + println!("--- subscriber 1 is default ---"); + do_span(); + + handle.assert_finished(); + handle2.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn events_dont_leak() { + fn do_event() { + tracing::debug!("alice"); + } + + let (subscriber, handle) = subscriber::mock() + .named("events/subscriber1") + .with_filter(|_| false) + .done() + .run_with_handle(); + + let _guard = tracing::subscriber::set_default(subscriber); + + do_event(); + + let (subscriber2, handle2) = subscriber::mock() + .named("events/subscriber2") + .with_filter(|_| true) + .event(event::mock()) + .done() + .run_with_handle(); + + tracing::subscriber::with_default(subscriber2, || { + println!("--- subscriber 2 is default ---"); + do_event() + }); + + println!("--- subscriber 1 is default ---"); + + do_event(); + + handle.assert_finished(); + handle2.assert_finished(); +} diff --git a/third_party/rust/tracing/tests/future_send.rs b/third_party/rust/tracing/tests/future_send.rs new file mode 100644 index 000000000000..5e5f9f18bcbc --- /dev/null +++ b/third_party/rust/tracing/tests/future_send.rs @@ -0,0 +1,22 @@ +// These tests reproduce the following issues: +// - https://github.com/tokio-rs/tracing/issues/1487 +// - https://github.com/tokio-rs/tracing/issues/1793 + +use core::future::{self, Future}; +#[test] +fn async_fn_is_send() { + async fn some_async_fn() { + tracing::info!("{}", future::ready("test").await); + } + + assert_send(some_async_fn()) +} + +#[test] +fn async_block_is_send() { + assert_send(async { + tracing::info!("{}", future::ready("test").await); + }) +} + +fn assert_send(_f: F) {} diff --git a/third_party/rust/tracing/tests/macro_imports.rs b/third_party/rust/tracing/tests/macro_imports.rs new file mode 100644 index 000000000000..2d0a9d6528f4 --- /dev/null +++ b/third_party/rust/tracing/tests/macro_imports.rs @@ -0,0 +1,23 @@ +use tracing::Level; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn prefixed_span_macros() { + tracing::span!(Level::DEBUG, "foo"); + tracing::trace_span!("foo"); + tracing::debug_span!("foo"); + tracing::info_span!("foo"); + tracing::warn_span!("foo"); + tracing::error_span!("foo"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn prefixed_event_macros() { + tracing::event!(Level::DEBUG, "foo"); + tracing::trace!("foo"); + tracing::debug!("foo"); + tracing::info!("foo"); + tracing::warn!("foo"); + tracing::error!("foo"); +} diff --git a/third_party/rust/tracing/tests/macros.rs b/third_party/rust/tracing/tests/macros.rs new file mode 100644 index 000000000000..a9679a3e94b1 --- /dev/null +++ b/third_party/rust/tracing/tests/macros.rs @@ -0,0 +1,963 @@ +#![deny(warnings)] +use tracing::{ + callsite, debug, debug_span, enabled, error, error_span, event, event_enabled, info, info_span, + span, span_enabled, trace, trace_span, warn, warn_span, Level, +}; + +// Tests that macros work across various invocation syntax. +// +// These are quite repetitive, and _could_ be generated by a macro. However, +// they're compile-time tests, so I want to get line numbers etc out of +// failures, and producing them with a macro would muddy the waters a bit. + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span() { + span!(target: "foo_events", Level::DEBUG, "foo", bar.baz = ?2, quux = %3, quuux = 4); + span!(target: "foo_events", Level::DEBUG, "foo", bar.baz = 2, quux = 3); + span!(target: "foo_events", Level::DEBUG, "foo", bar.baz = 2, quux = 4,); + span!(target: "foo_events", Level::DEBUG, "foo"); + span!(target: "foo_events", Level::DEBUG, "bar",); + span!(Level::DEBUG, "foo", bar.baz = 2, quux = 3); + span!(Level::DEBUG, "foo", bar.baz = 2, quux = 4,); + span!(Level::DEBUG, "foo", bar.baz = 2, quux = 3); + span!(Level::DEBUG, "foo", bar.baz = 2, quux = 4,); + span!(Level::DEBUG, "foo", bar.baz = ?2); + span!(Level::DEBUG, "foo", bar.baz = %2); + span!(Level::DEBUG, "foo"); + span!(Level::DEBUG, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn trace_span() { + trace_span!(target: "foo_events", "foo", bar.baz = ?2, quux = %3, quuux = 4); + trace_span!(target: "foo_events", "foo", bar.baz = 2, quux = 3); + trace_span!(target: "foo_events", "foo", bar.baz = 2, quux = 4,); + trace_span!(target: "foo_events", "foo"); + trace_span!(target: "foo_events", "bar",); + trace_span!("foo", bar.baz = 2, quux = 3); + trace_span!("foo", bar.baz = 2, quux = 4,); + trace_span!("foo", bar.baz = ?2); + trace_span!("foo", bar.baz = %2); + trace_span!("bar"); + trace_span!("bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn debug_span() { + debug_span!(target: "foo_events", "foo", bar.baz = ?2, quux = %3, quuux = 4); + debug_span!(target: "foo_events", "foo", bar.baz = 2, quux = 3); + debug_span!(target: "foo_events", "foo", bar.baz = 2, quux = 4,); + debug_span!(target: "foo_events", "foo"); + debug_span!(target: "foo_events", "bar",); + debug_span!("foo", bar.baz = 2, quux = 3); + debug_span!("foo", bar.baz = 2, quux = 4,); + debug_span!("foo", bar.baz = ?2); + debug_span!("foo", bar.baz = %2); + debug_span!("bar"); + debug_span!("bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn info_span() { + info_span!(target: "foo_events", "foo", bar.baz = ?2, quux = %3, quuux = 4); + info_span!(target: "foo_events", "foo", bar.baz = 2, quux = 3); + info_span!(target: "foo_events", "foo", bar.baz = 2, quux = 4,); + info_span!(target: "foo_events", "foo"); + info_span!(target: "foo_events", "bar",); + info_span!("foo", bar.baz = 2, quux = 3); + info_span!("foo", bar.baz = 2, quux = 4,); + info_span!("foo", bar.baz = ?2); + info_span!("foo", bar.baz = %2); + info_span!("bar"); + info_span!("bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn warn_span() { + warn_span!(target: "foo_events", "foo", bar.baz = ?2, quux = %3, quuux = 4); + warn_span!(target: "foo_events", "foo", bar.baz = 2, quux = 3); + warn_span!(target: "foo_events", "foo", bar.baz = 2, quux = 4,); + warn_span!(target: "foo_events", "foo"); + warn_span!(target: "foo_events", "bar",); + warn_span!("foo", bar.baz = 2, quux = 3); + warn_span!("foo", bar.baz = 2, quux = 4,); + warn_span!("foo", bar.baz = ?2); + warn_span!("foo", bar.baz = %2); + warn_span!("bar"); + warn_span!("bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn error_span() { + error_span!(target: "foo_events", "foo", bar.baz = ?2, quux = %3, quuux = 4); + error_span!(target: "foo_events", "foo", bar.baz = 2, quux = 3); + error_span!(target: "foo_events", "foo", bar.baz = 2, quux = 4,); + error_span!(target: "foo_events", "foo"); + error_span!(target: "foo_events", "bar",); + error_span!("foo", bar.baz = 2, quux = 3); + error_span!("foo", bar.baz = 2, quux = 4,); + error_span!("foo", bar.baz = ?2); + error_span!("foo", bar.baz = %2); + error_span!("bar"); + error_span!("bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span_root() { + span!(target: "foo_events", parent: None, Level::TRACE, "foo", bar.baz = 2, quux = 3); + span!(target: "foo_events", parent: None, Level::TRACE, "foo", bar.baz = 2, quux = 3); + span!(target: "foo_events", parent: None, Level::TRACE, "foo", bar.baz = 2, quux = 4,); + span!(target: "foo_events", parent: None, Level::TRACE, "foo"); + span!(target: "foo_events", parent: None, Level::TRACE, "bar",); + span!(parent: None, Level::DEBUG, "foo", bar.baz = 2, quux = 3); + span!(parent: None, Level::DEBUG, "foo", bar.baz = 2, quux = 4,); + span!(parent: None, Level::DEBUG, "foo"); + span!(parent: None, Level::DEBUG, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn trace_span_root() { + trace_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 3); + trace_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 4,); + trace_span!(target: "foo_events", parent: None, "foo"); + trace_span!(target: "foo_events", parent: None, "bar",); + trace_span!(parent: None, "foo", bar.baz = 2, quux = 3); + trace_span!(parent: None, "foo", bar.baz = 2, quux = 4,); + trace_span!(parent: None, "foo"); + trace_span!(parent: None, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn debug_span_root() { + debug_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 3); + debug_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 4,); + debug_span!(target: "foo_events", parent: None, "foo"); + debug_span!(target: "foo_events", parent: None, "bar",); + debug_span!(parent: None, "foo", bar.baz = 2, quux = 3); + debug_span!(parent: None, "foo", bar.baz = 2, quux = 4,); + debug_span!(parent: None, "foo"); + debug_span!(parent: None, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn info_span_root() { + info_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 3); + info_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 4,); + info_span!(target: "foo_events", parent: None, "foo"); + info_span!(target: "foo_events", parent: None, "bar",); + info_span!(parent: None, "foo", bar.baz = 2, quux = 3); + info_span!(parent: None, "foo", bar.baz = 2, quux = 4,); + info_span!(parent: None, "foo"); + info_span!(parent: None, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn warn_span_root() { + warn_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 3); + warn_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 4,); + warn_span!(target: "foo_events", parent: None, "foo"); + warn_span!(target: "foo_events", parent: None, "bar",); + warn_span!(parent: None, "foo", bar.baz = 2, quux = 3); + warn_span!(parent: None, "foo", bar.baz = 2, quux = 4,); + warn_span!(parent: None, "foo"); + warn_span!(parent: None, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn error_span_root() { + error_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 3); + error_span!(target: "foo_events", parent: None, "foo", bar.baz = 2, quux = 4,); + error_span!(target: "foo_events", parent: None, "foo"); + error_span!(target: "foo_events", parent: None, "bar",); + error_span!(parent: None, "foo", bar.baz = 2, quux = 3); + error_span!(parent: None, "foo", bar.baz = 2, quux = 4,); + error_span!(parent: None, "foo"); + error_span!(parent: None, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + span!(target: "foo_events", parent: &p, Level::TRACE, "foo", bar.baz = 2, quux = 3); + span!(target: "foo_events", parent: &p, Level::TRACE, "foo", bar.baz = 2, quux = 4,); + span!(target: "foo_events", parent: &p, Level::TRACE, "foo"); + span!(target: "foo_events", parent: &p, Level::TRACE, "bar",); + span!(parent: &p, Level::DEBUG, "foo", bar.baz = 2, quux = 3); + span!(parent: &p, Level::DEBUG, "foo", bar.baz = 2, quux = 4,); + span!(parent: &p, Level::DEBUG, "foo"); + span!(parent: &p, Level::DEBUG, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn trace_span_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + trace_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 3); + trace_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 4,); + trace_span!(target: "foo_events", parent: &p, "foo"); + trace_span!(target: "foo_events", parent: &p, "bar",); + + trace_span!(parent: &p, "foo", bar.baz = 2, quux = 3); + trace_span!(parent: &p, "foo", bar.baz = 2, quux = 4,); + + trace_span!(parent: &p, "foo"); + trace_span!(parent: &p, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn debug_span_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + debug_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 3); + debug_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 4,); + debug_span!(target: "foo_events", parent: &p, "foo"); + debug_span!(target: "foo_events", parent: &p, "bar",); + + debug_span!(parent: &p, "foo", bar.baz = 2, quux = 3); + debug_span!(parent: &p, "foo", bar.baz = 2, quux = 4,); + + debug_span!(parent: &p, "foo"); + debug_span!(parent: &p, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn info_span_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + info_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 3); + info_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 4,); + info_span!(target: "foo_events", parent: &p, "foo"); + info_span!(target: "foo_events", parent: &p, "bar",); + + info_span!(parent: &p, "foo", bar.baz = 2, quux = 3); + info_span!(parent: &p, "foo", bar.baz = 2, quux = 4,); + + info_span!(parent: &p, "foo"); + info_span!(parent: &p, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn warn_span_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + warn_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 3); + warn_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 4,); + warn_span!(target: "foo_events", parent: &p, "foo"); + warn_span!(target: "foo_events", parent: &p, "bar",); + + warn_span!(parent: &p, "foo", bar.baz = 2, quux = 3); + warn_span!(parent: &p, "foo", bar.baz = 2, quux = 4,); + + warn_span!(parent: &p, "foo"); + warn_span!(parent: &p, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn error_span_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + error_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 3); + error_span!(target: "foo_events", parent: &p, "foo", bar.baz = 2, quux = 4,); + error_span!(target: "foo_events", parent: &p, "foo"); + error_span!(target: "foo_events", parent: &p, "bar",); + + error_span!(parent: &p, "foo", bar.baz = 2, quux = 3); + error_span!(parent: &p, "foo", bar.baz = 2, quux = 4,); + + error_span!(parent: &p, "foo"); + error_span!(parent: &p, "bar",); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span_with_non_rust_symbol() { + span!(Level::TRACE, "non-rust", "guid:x-request-id" = ?"abcdef", "more {}", 42); + span!(Level::TRACE, "non-rust", "guid:x-request-id" = %"abcdef", "more {}", 51); + span!( + Level::TRACE, + "non-rust", + "guid:x-request-id" = "abcdef", + "more {}", + 60 + ); + span!(Level::TRACE, "non-rust", "guid:x-request-id" = ?"abcdef"); + span!(Level::TRACE, "non-rust", "guid:x-request-id" = %"abcdef"); + span!(Level::TRACE, "non-rust", "guid:x-request-id" = "abcdef"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn event() { + event!(Level::DEBUG, foo = ?3, bar.baz = %2, quux = false); + event!(Level::DEBUG, foo = 3, bar.baz = 2, quux = false); + event!(Level::DEBUG, foo = 3, bar.baz = 3,); + event!(Level::DEBUG, "foo"); + event!(Level::DEBUG, "foo: {}", 3); + event!(Level::INFO, foo = ?3, bar.baz = %2, quux = false, "hello world {:?}", 42); + event!( + Level::INFO, + foo = 3, + bar.baz = 2, + quux = false, + "hello world {:?}", + 42 + ); + event!(Level::INFO, foo = 3, bar.baz = 3, "hello world {:?}", 42,); + event!(Level::DEBUG, { foo = 3, bar.baz = 80 }, "quux"); + event!(Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + event!(Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + event!(Level::DEBUG, { foo = ?2, bar.baz = %78 }, "quux"); + event!(target: "foo_events", Level::DEBUG, foo = 3, bar.baz = 2, quux = false); + event!(target: "foo_events", Level::DEBUG, foo = 3, bar.baz = 3,); + event!(target: "foo_events", Level::DEBUG, "foo"); + event!(target: "foo_events", Level::DEBUG, "foo: {}", 3); + event!(target: "foo_events", Level::DEBUG, { foo = 3, bar.baz = 80 }, "quux"); + event!(target: "foo_events", Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + event!(target: "foo_events", Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + event!(target: "foo_events", Level::DEBUG, { foo = 2, bar.baz = 78, }, "quux"); + let foo = 1; + event!(Level::DEBUG, ?foo); + event!(Level::DEBUG, %foo); + event!(Level::DEBUG, foo); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn enabled() { + enabled!(Level::DEBUG, foo, bar.baz, quux,); + enabled!(Level::DEBUG, message); + enabled!(Level::INFO, foo, bar.baz, quux, message,); + enabled!(Level::INFO, foo, bar., message,); + enabled!(Level::DEBUG, foo); + + enabled!(Level::DEBUG); + enabled!(target: "rando", Level::DEBUG); + enabled!(target: "rando", Level::DEBUG, field); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span_enabled() { + span_enabled!(Level::DEBUG, foo, bar.baz, quux,); + span_enabled!(Level::DEBUG, message); + span_enabled!(Level::INFO, foo, bar.baz, quux, message,); + span_enabled!(Level::INFO, foo, bar., message,); + span_enabled!(Level::DEBUG, foo); + + span_enabled!(Level::DEBUG); + span_enabled!(target: "rando", Level::DEBUG); + span_enabled!(target: "rando", Level::DEBUG, field); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn event_enabled() { + event_enabled!(Level::DEBUG, foo, bar.baz, quux,); + event_enabled!(Level::DEBUG, message); + event_enabled!(Level::INFO, foo, bar.baz, quux, message,); + event_enabled!(Level::INFO, foo, bar., message,); + event_enabled!(Level::DEBUG, foo); + + event_enabled!(Level::DEBUG); + event_enabled!(target: "rando", Level::DEBUG); + event_enabled!(target: "rando", Level::DEBUG, field); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn locals_with_message() { + let data = (42, "forty-two"); + let private_data = "private"; + let error = "a bad error"; + event!(Level::ERROR, %error, "Received error"); + event!( + target: "app_events", + Level::WARN, + private_data, + ?data, + "App warning: {}", + error + ); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn locals_no_message() { + let data = (42, "forty-two"); + let private_data = "private"; + let error = "a bad error"; + event!( + target: "app_events", + Level::WARN, + private_data, + ?data, + ); + event!( + target: "app_events", + Level::WARN, + private_data, + ?data, + error, + ); + event!( + target: "app_events", + Level::WARN, + private_data, + ?data, + error + ); + event!(Level::WARN, private_data, ?data, error,); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn trace() { + trace!(foo = ?3, bar.baz = %2, quux = false); + trace!(foo = 3, bar.baz = 2, quux = false); + trace!(foo = 3, bar.baz = 3,); + trace!("foo"); + trace!("foo: {}", 3); + trace!(foo = ?3, bar.baz = %2, quux = false, "hello world {:?}", 42); + trace!(foo = 3, bar.baz = 2, quux = false, "hello world {:?}", 42); + trace!(foo = 3, bar.baz = 3, "hello world {:?}", 42,); + trace!({ foo = 3, bar.baz = 80 }, "quux"); + trace!({ foo = 2, bar.baz = 79 }, "quux {:?}", true); + trace!({ foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + trace!({ foo = 2, bar.baz = 78 }, "quux"); + trace!({ foo = ?2, bar.baz = %78 }, "quux"); + trace!(target: "foo_events", foo = 3, bar.baz = 2, quux = false); + trace!(target: "foo_events", foo = 3, bar.baz = 3,); + trace!(target: "foo_events", "foo"); + trace!(target: "foo_events", "foo: {}", 3); + trace!(target: "foo_events", { foo = 3, bar.baz = 80 }, "quux"); + trace!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}", true); + trace!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + trace!(target: "foo_events", { foo = 2, bar.baz = 78, }, "quux"); + let foo = 1; + trace!(?foo); + trace!(%foo); + trace!(foo); + trace!(target: "foo_events", ?foo); + trace!(target: "foo_events", %foo); + trace!(target: "foo_events", foo); + trace!(target: "foo_events", ?foo, true, "message"); + trace!(target: "foo_events", %foo, true, "message"); + trace!(target: "foo_events", foo, true, "message"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn debug() { + debug!(foo = ?3, bar.baz = %2, quux = false); + debug!(foo = 3, bar.baz = 2, quux = false); + debug!(foo = 3, bar.baz = 3,); + debug!("foo"); + debug!("foo: {}", 3); + debug!(foo = ?3, bar.baz = %2, quux = false, "hello world {:?}", 42); + debug!(foo = 3, bar.baz = 2, quux = false, "hello world {:?}", 42); + debug!(foo = 3, bar.baz = 3, "hello world {:?}", 42,); + debug!({ foo = 3, bar.baz = 80 }, "quux"); + debug!({ foo = 2, bar.baz = 79 }, "quux {:?}", true); + debug!({ foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + debug!({ foo = 2, bar.baz = 78 }, "quux"); + debug!({ foo = ?2, bar.baz = %78 }, "quux"); + debug!(target: "foo_events", foo = 3, bar.baz = 2, quux = false); + debug!(target: "foo_events", foo = 3, bar.baz = 3,); + debug!(target: "foo_events", "foo"); + debug!(target: "foo_events", "foo: {}", 3); + debug!(target: "foo_events", { foo = 3, bar.baz = 80 }, "quux"); + debug!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}", true); + debug!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + debug!(target: "foo_events", { foo = 2, bar.baz = 78, }, "quux"); + let foo = 1; + debug!(?foo); + debug!(%foo); + debug!(foo); + debug!(target: "foo_events", ?foo); + debug!(target: "foo_events", %foo); + debug!(target: "foo_events", foo); + debug!(target: "foo_events", ?foo, true, "message"); + debug!(target: "foo_events", %foo, true, "message"); + debug!(target: "foo_events", foo, true, "message"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn info() { + info!(foo = ?3, bar.baz = %2, quux = false); + info!(foo = 3, bar.baz = 2, quux = false); + info!(foo = 3, bar.baz = 3,); + info!("foo"); + info!("foo: {}", 3); + info!(foo = ?3, bar.baz = %2, quux = false, "hello world {:?}", 42); + info!(foo = 3, bar.baz = 2, quux = false, "hello world {:?}", 42); + info!(foo = 3, bar.baz = 3, "hello world {:?}", 42,); + info!({ foo = 3, bar.baz = 80 }, "quux"); + info!({ foo = 2, bar.baz = 79 }, "quux {:?}", true); + info!({ foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + info!({ foo = 2, bar.baz = 78 }, "quux"); + info!({ foo = ?2, bar.baz = %78 }, "quux"); + info!(target: "foo_events", foo = 3, bar.baz = 2, quux = false); + info!(target: "foo_events", foo = 3, bar.baz = 3,); + info!(target: "foo_events", "foo"); + info!(target: "foo_events", "foo: {}", 3); + info!(target: "foo_events", { foo = 3, bar.baz = 80 }, "quux"); + info!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}", true); + info!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + info!(target: "foo_events", { foo = 2, bar.baz = 78, }, "quux"); + let foo = 1; + info!(?foo); + info!(%foo); + info!(foo); + info!(target: "foo_events", ?foo); + info!(target: "foo_events", %foo); + info!(target: "foo_events", foo); + info!(target: "foo_events", ?foo, true, "message"); + info!(target: "foo_events", %foo, true, "message"); + info!(target: "foo_events", foo, true, "message"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn warn() { + warn!(foo = ?3, bar.baz = %2, quux = false); + warn!(foo = 3, bar.baz = 2, quux = false); + warn!(foo = 3, bar.baz = 3,); + warn!("foo"); + warn!("foo: {}", 3); + warn!(foo = ?3, bar.baz = %2, quux = false, "hello world {:?}", 42); + warn!(foo = 3, bar.baz = 2, quux = false, "hello world {:?}", 42); + warn!(foo = 3, bar.baz = 3, "hello world {:?}", 42,); + warn!({ foo = 3, bar.baz = 80 }, "quux"); + warn!({ foo = 2, bar.baz = 79 }, "quux {:?}", true); + warn!({ foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + warn!({ foo = 2, bar.baz = 78 }, "quux"); + warn!({ foo = ?2, bar.baz = %78 }, "quux"); + warn!(target: "foo_events", foo = 3, bar.baz = 2, quux = false); + warn!(target: "foo_events", foo = 3, bar.baz = 3,); + warn!(target: "foo_events", "foo"); + warn!(target: "foo_events", "foo: {}", 3); + warn!(target: "foo_events", { foo = 3, bar.baz = 80 }, "quux"); + warn!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}", true); + warn!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + warn!(target: "foo_events", { foo = 2, bar.baz = 78, }, "quux"); + let foo = 1; + warn!(?foo); + warn!(%foo); + warn!(foo); + warn!(target: "foo_events", ?foo); + warn!(target: "foo_events", %foo); + warn!(target: "foo_events", foo); + warn!(target: "foo_events", ?foo, true, "message"); + warn!(target: "foo_events", %foo, true, "message"); + warn!(target: "foo_events", foo, true, "message"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn error() { + error!(foo = ?3, bar.baz = %2, quux = false); + error!(foo = 3, bar.baz = 2, quux = false); + error!(foo = 3, bar.baz = 3,); + error!("foo"); + error!("foo: {}", 3); + error!(foo = ?3, bar.baz = %2, quux = false, "hello world {:?}", 42); + error!(foo = 3, bar.baz = 2, quux = false, "hello world {:?}", 42); + error!(foo = 3, bar.baz = 3, "hello world {:?}", 42,); + error!({ foo = 3, bar.baz = 80 }, "quux"); + error!({ foo = 2, bar.baz = 79 }, "quux {:?}", true); + error!({ foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + error!({ foo = 2, bar.baz = 78, }, "quux"); + error!({ foo = ?2, bar.baz = %78 }, "quux"); + error!(target: "foo_events", foo = 3, bar.baz = 2, quux = false); + error!(target: "foo_events", foo = 3, bar.baz = 3,); + error!(target: "foo_events", "foo"); + error!(target: "foo_events", "foo: {}", 3); + error!(target: "foo_events", { foo = 3, bar.baz = 80 }, "quux"); + error!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}", true); + error!(target: "foo_events", { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + error!(target: "foo_events", { foo = 2, bar.baz = 78, }, "quux"); + let foo = 1; + error!(?foo); + error!(%foo); + error!(foo); + error!(target: "foo_events", ?foo); + error!(target: "foo_events", %foo); + error!(target: "foo_events", foo); + error!(target: "foo_events", ?foo, true, "message"); + error!(target: "foo_events", %foo, true, "message"); + error!(target: "foo_events", foo, true, "message"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn event_root() { + event!(parent: None, Level::DEBUG, foo = ?3, bar.baz = %2, quux = false); + event!( + parent: None, + Level::DEBUG, + foo = 3, + bar.baz = 2, + quux = false + ); + event!(parent: None, Level::DEBUG, foo = 3, bar.baz = 3,); + event!(parent: None, Level::DEBUG, "foo"); + event!(parent: None, Level::DEBUG, "foo: {}", 3); + event!(parent: None, Level::DEBUG, { foo = 3, bar.baz = 80 }, "quux"); + event!(parent: None, Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + event!(parent: None, Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + event!(parent: None, Level::DEBUG, { foo = ?2, bar.baz = %78 }, "quux"); + event!(target: "foo_events", parent: None, Level::DEBUG, foo = 3, bar.baz = 2, quux = false); + event!(target: "foo_events", parent: None, Level::DEBUG, foo = 3, bar.baz = 3,); + event!(target: "foo_events", parent: None, Level::DEBUG, "foo"); + event!(target: "foo_events", parent: None, Level::DEBUG, "foo: {}", 3); + event!(target: "foo_events", parent: None, Level::DEBUG, { foo = 3, bar.baz = 80 }, "quux"); + event!(target: "foo_events", parent: None, Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + event!(target: "foo_events", parent: None, Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + event!(target: "foo_events", parent: None, Level::DEBUG, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn trace_root() { + trace!(parent: None, foo = ?3, bar.baz = %2, quux = false); + trace!(parent: None, foo = 3, bar.baz = 2, quux = false); + trace!(parent: None, foo = 3, bar.baz = 3,); + trace!(parent: None, "foo"); + trace!(parent: None, "foo: {}", 3); + trace!(parent: None, { foo = 3, bar.baz = 80 }, "quux"); + trace!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + trace!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + trace!(parent: None, { foo = 2, bar.baz = 78 }, "quux"); + trace!(parent: None, { foo = ?2, bar.baz = %78 }, "quux"); + trace!(target: "foo_events", parent: None, foo = 3, bar.baz = 2, quux = false); + trace!(target: "foo_events", parent: None, foo = 3, bar.baz = 3,); + trace!(target: "foo_events", parent: None, "foo"); + trace!(target: "foo_events", parent: None, "foo: {}", 3); + trace!(target: "foo_events", parent: None, { foo = 3, bar.baz = 80 }, "quux"); + trace!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + trace!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + trace!(target: "foo_events", parent: None, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn debug_root() { + debug!(parent: None, foo = ?3, bar.baz = %2, quux = false); + debug!(parent: None, foo = 3, bar.baz = 2, quux = false); + debug!(parent: None, foo = 3, bar.baz = 3,); + debug!(parent: None, "foo"); + debug!(parent: None, "foo: {}", 3); + debug!(parent: None, { foo = 3, bar.baz = 80 }, "quux"); + debug!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + debug!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + debug!(parent: None, { foo = 2, bar.baz = 78 }, "quux"); + debug!(parent: None, { foo = ?2, bar.baz = %78 }, "quux"); + debug!(target: "foo_events", parent: None, foo = 3, bar.baz = 2, quux = false); + debug!(target: "foo_events", parent: None, foo = 3, bar.baz = 3,); + debug!(target: "foo_events", parent: None, "foo"); + debug!(target: "foo_events", parent: None, "foo: {}", 3); + debug!(target: "foo_events", parent: None, { foo = 3, bar.baz = 80 }, "quux"); + debug!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + debug!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + debug!(target: "foo_events", parent: None, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn info_root() { + info!(parent: None, foo = ?3, bar.baz = %2, quux = false); + info!(parent: None, foo = 3, bar.baz = 2, quux = false); + info!(parent: None, foo = 3, bar.baz = 3,); + info!(parent: None, "foo"); + info!(parent: None, "foo: {}", 3); + info!(parent: None, { foo = 3, bar.baz = 80 }, "quux"); + info!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + info!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + info!(parent: None, { foo = 2, bar.baz = 78 }, "quux"); + info!(parent: None, { foo = ?2, bar.baz = %78 }, "quux"); + info!(target: "foo_events", parent: None, foo = 3, bar.baz = 2, quux = false); + info!(target: "foo_events", parent: None, foo = 3, bar.baz = 3,); + info!(target: "foo_events", parent: None, "foo"); + info!(target: "foo_events", parent: None, "foo: {}", 3); + info!(target: "foo_events", parent: None, { foo = 3, bar.baz = 80 }, "quux"); + info!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + info!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + info!(target: "foo_events", parent: None, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn warn_root() { + warn!(parent: None, foo = ?3, bar.baz = %2, quux = false); + warn!(parent: None, foo = 3, bar.baz = 2, quux = false); + warn!(parent: None, foo = 3, bar.baz = 3,); + warn!(parent: None, "foo"); + warn!(parent: None, "foo: {}", 3); + warn!(parent: None, { foo = 3, bar.baz = 80 }, "quux"); + warn!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + warn!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + warn!(parent: None, { foo = 2, bar.baz = 78 }, "quux"); + warn!(parent: None, { foo = ?2, bar.baz = %78 }, "quux"); + warn!(target: "foo_events", parent: None, foo = 3, bar.baz = 2, quux = false); + warn!(target: "foo_events", parent: None, foo = 3, bar.baz = 3,); + warn!(target: "foo_events", parent: None, "foo"); + warn!(target: "foo_events", parent: None, "foo: {}", 3); + warn!(target: "foo_events", parent: None, { foo = 3, bar.baz = 80 }, "quux"); + warn!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + warn!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + warn!(target: "foo_events", parent: None, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn error_root() { + error!(parent: None, foo = ?3, bar.baz = %2, quux = false); + error!(parent: None, foo = 3, bar.baz = 2, quux = false); + error!(parent: None, foo = 3, bar.baz = 3,); + error!(parent: None, "foo"); + error!(parent: None, "foo: {}", 3); + error!(parent: None, { foo = 3, bar.baz = 80 }, "quux"); + error!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + error!(parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + error!(parent: None, { foo = 2, bar.baz = 78 }, "quux"); + error!(parent: None, { foo = ?2, bar.baz = %78 }, "quux"); + error!(target: "foo_events", parent: None, foo = 3, bar.baz = 2, quux = false); + error!(target: "foo_events", parent: None, foo = 3, bar.baz = 3,); + error!(target: "foo_events", parent: None, "foo"); + error!(target: "foo_events", parent: None, "foo: {}", 3); + error!(target: "foo_events", parent: None, { foo = 3, bar.baz = 80 }, "quux"); + error!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + error!(target: "foo_events", parent: None, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + error!(target: "foo_events", parent: None, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn event_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + event!(parent: &p, Level::DEBUG, foo = ?3, bar.baz = %2, quux = false); + event!(parent: &p, Level::DEBUG, foo = 3, bar.baz = 2, quux = false); + event!(parent: &p, Level::DEBUG, foo = 3, bar.baz = 3,); + event!(parent: &p, Level::DEBUG, "foo"); + event!(parent: &p, Level::DEBUG, "foo: {}", 3); + event!(parent: &p, Level::DEBUG, { foo = 3, bar.baz = 80 }, "quux"); + event!(parent: &p, Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + event!(parent: &p, Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + event!(parent: &p, Level::DEBUG, { foo = ?2, bar.baz = %78 }, "quux"); + event!(target: "foo_events", parent: &p, Level::DEBUG, foo = 3, bar.baz = 2, quux = false); + event!(target: "foo_events", parent: &p, Level::DEBUG, foo = 3, bar.baz = 3,); + event!(target: "foo_events", parent: &p, Level::DEBUG, "foo"); + event!(target: "foo_events", parent: &p, Level::DEBUG, "foo: {}", 3); + event!(target: "foo_events", parent: &p, Level::DEBUG, { foo = 3, bar.baz = 80 }, "quux"); + event!(target: "foo_events", parent: &p, Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + event!(target: "foo_events", parent: &p, Level::DEBUG, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + event!(target: "foo_events", parent: &p, Level::DEBUG, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn trace_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + trace!(parent: &p, foo = ?3, bar.baz = %2, quux = false); + trace!(parent: &p, foo = 3, bar.baz = 2, quux = false); + trace!(parent: &p, foo = 3, bar.baz = 3,); + trace!(parent: &p, "foo"); + trace!(parent: &p, "foo: {}", 3); + trace!(parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + trace!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + trace!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + trace!(parent: &p, { foo = 2, bar.baz = 78 }, "quux"); + trace!(parent: &p, { foo = ?2, bar.baz = %78 }, "quux"); + trace!(target: "foo_events", parent: &p, foo = 3, bar.baz = 2, quux = false); + trace!(target: "foo_events", parent: &p, foo = 3, bar.baz = 3,); + trace!(target: "foo_events", parent: &p, "foo"); + trace!(target: "foo_events", parent: &p, "foo: {}", 3); + trace!(target: "foo_events", parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + trace!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + trace!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + trace!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn debug_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + debug!(parent: &p, foo = ?3, bar.baz = %2, quux = false); + debug!(parent: &p, foo = 3, bar.baz = 2, quux = false); + debug!(parent: &p, foo = 3, bar.baz = 3,); + debug!(parent: &p, "foo"); + debug!(parent: &p, "foo: {}", 3); + debug!(parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + debug!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + debug!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + debug!(parent: &p, { foo = 2, bar.baz = 78 }, "quux"); + debug!(parent: &p, { foo = ?2, bar.baz = %78 }, "quux"); + debug!(target: "foo_events", parent: &p, foo = 3, bar.baz = 2, quux = false); + debug!(target: "foo_events", parent: &p, foo = 3, bar.baz = 3,); + debug!(target: "foo_events", parent: &p, "foo"); + debug!(target: "foo_events", parent: &p, "foo: {}", 3); + debug!(target: "foo_events", parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + debug!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + debug!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + debug!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn info_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + info!(parent: &p, foo = ?3, bar.baz = %2, quux = false); + info!(parent: &p, foo = 3, bar.baz = 2, quux = false); + info!(parent: &p, foo = 3, bar.baz = 3,); + info!(parent: &p, "foo"); + info!(parent: &p, "foo: {}", 3); + info!(parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + info!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + info!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + info!(parent: &p, { foo = 2, bar.baz = 78 }, "quux"); + info!(parent: &p, { foo = ?2, bar.baz = %78 }, "quux"); + info!(target: "foo_events", parent: &p, foo = 3, bar.baz = 2, quux = false); + info!(target: "foo_events", parent: &p, foo = 3, bar.baz = 3,); + info!(target: "foo_events", parent: &p, "foo"); + info!(target: "foo_events", parent: &p, "foo: {}", 3); + info!(target: "foo_events", parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + info!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + info!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + info!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn warn_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + warn!(parent: &p, foo = ?3, bar.baz = %2, quux = false); + warn!(parent: &p, foo = 3, bar.baz = 2, quux = false); + warn!(parent: &p, foo = 3, bar.baz = 3,); + warn!(parent: &p, "foo"); + warn!(parent: &p, "foo: {}", 3); + warn!(parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + warn!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + warn!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + warn!(parent: &p, { foo = 2, bar.baz = 78 }, "quux"); + warn!(parent: &p, { foo = ?2, bar.baz = %78 }, "quux"); + warn!(target: "foo_events", parent: &p, foo = 3, bar.baz = 2, quux = false); + warn!(target: "foo_events", parent: &p, foo = 3, bar.baz = 3,); + warn!(target: "foo_events", parent: &p, "foo"); + warn!(target: "foo_events", parent: &p, "foo: {}", 3); + warn!(target: "foo_events", parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + warn!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + warn!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + warn!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn error_with_parent() { + let p = span!(Level::TRACE, "im_a_parent!"); + error!(parent: &p, foo = ?3, bar.baz = %2, quux = false); + error!(parent: &p, foo = 3, bar.baz = 2, quux = false); + error!(parent: &p, foo = 3, bar.baz = 3,); + error!(parent: &p, "foo"); + error!(parent: &p, "foo: {}", 3); + error!(parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + error!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + error!(parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + error!(parent: &p, { foo = 2, bar.baz = 78 }, "quux"); + error!(parent: &p, { foo = ?2, bar.baz = %78 }, "quux"); + error!(target: "foo_events", parent: &p, foo = 3, bar.baz = 2, quux = false); + error!(target: "foo_events", parent: &p, foo = 3, bar.baz = 3,); + error!(target: "foo_events", parent: &p, "foo"); + error!(target: "foo_events", parent: &p, "foo: {}", 3); + error!(target: "foo_events", parent: &p, { foo = 3, bar.baz = 80 }, "quux"); + error!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}", true); + error!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 79 }, "quux {:?}, {quux}", true, quux = false); + error!(target: "foo_events", parent: &p, { foo = 2, bar.baz = 78, }, "quux"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn field_shorthand_only() { + #[derive(Debug)] + struct Position { + x: f32, + y: f32, + } + let pos = Position { + x: 3.234, + y: -1.223, + }; + + trace!(?pos.x, ?pos.y); + debug!(?pos.x, ?pos.y); + info!(?pos.x, ?pos.y); + warn!(?pos.x, ?pos.y); + error!(?pos.x, ?pos.y); + event!(Level::TRACE, ?pos.x, ?pos.y); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn borrow_val_events() { + // Reproduces https://github.com/tokio-rs/tracing/issues/954 + let mut foo = (String::new(), String::new()); + let zero = &mut foo.0; + trace!(one = ?foo.1); + debug!(one = ?foo.1); + info!(one = ?foo.1); + warn!(one = ?foo.1); + error!(one = ?foo.1); + zero.push_str("hello world"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn borrow_val_spans() { + // Reproduces https://github.com/tokio-rs/tracing/issues/954 + let mut foo = (String::new(), String::new()); + let zero = &mut foo.0; + let _span = trace_span!("span", one = ?foo.1); + let _span = debug_span!("span", one = ?foo.1); + let _span = info_span!("span", one = ?foo.1); + let _span = warn_span!("span", one = ?foo.1); + let _span = error_span!("span", one = ?foo.1); + zero.push_str("hello world"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn callsite_macro_api() { + // This test should catch any inadvertent breaking changes + // caused by changes to the macro. + let _callsite = callsite! { + name: "test callsite", + kind: tracing::metadata::Kind::EVENT, + target: "test target", + level: tracing::Level::TRACE, + fields: foo, bar, + }; + let _callsite = callsite! { + name: "test callsite", + kind: tracing::metadata::Kind::SPAN, + level: tracing::Level::TRACE, + fields: foo, + }; + let _callsite = callsite! { + name: "test callsite", + kind: tracing::metadata::Kind::SPAN, + fields: foo, + }; +} diff --git a/third_party/rust/tracing/tests/macros_incompatible_concat.rs b/third_party/rust/tracing/tests/macros_incompatible_concat.rs new file mode 100644 index 000000000000..bda6b964fa5c --- /dev/null +++ b/third_party/rust/tracing/tests/macros_incompatible_concat.rs @@ -0,0 +1,24 @@ +use tracing::{enabled, event, span, Level}; + +#[macro_export] +macro_rules! concat { + () => {}; +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span() { + span!(Level::DEBUG, "foo"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn event() { + event!(Level::DEBUG, "foo"); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn enabled() { + enabled!(Level::DEBUG); +} diff --git a/third_party/rust/tracing/tests/macros_redefined_core.rs b/third_party/rust/tracing/tests/macros_redefined_core.rs new file mode 100644 index 000000000000..d830dcdb0254 --- /dev/null +++ b/third_party/rust/tracing/tests/macros_redefined_core.rs @@ -0,0 +1,18 @@ +extern crate self as core; + +use tracing::{enabled, event, span, Level}; + +#[test] +fn span() { + span!(Level::DEBUG, "foo"); +} + +#[test] +fn event() { + event!(Level::DEBUG, "foo"); +} + +#[test] +fn enabled() { + enabled!(Level::DEBUG); +} diff --git a/third_party/rust/tracing/tests/max_level_hint.rs b/third_party/rust/tracing/tests/max_level_hint.rs new file mode 100644 index 000000000000..63d3af635705 --- /dev/null +++ b/third_party/rust/tracing/tests/max_level_hint.rs @@ -0,0 +1,37 @@ +use tracing::Level; +use tracing_mock::*; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn max_level_hints() { + // This test asserts that when a subscriber provides us with the global + // maximum level that it will enable (by implementing the + // `Subscriber::max_level_hint` method), we will never call + // `Subscriber::enabled` for events above that maximum level. + // + // In this case, we test that by making the `enabled` method assert that no + // `Metadata` for spans or events at the `TRACE` or `DEBUG` levels. + let (subscriber, handle) = subscriber::mock() + .with_max_level_hint(Level::INFO) + .with_filter(|meta| { + assert!( + dbg!(meta).level() <= &Level::INFO, + "a TRACE or DEBUG event was dynamically filtered: " + ); + true + }) + .event(event::mock().at_level(Level::INFO)) + .event(event::mock().at_level(Level::WARN)) + .event(event::mock().at_level(Level::ERROR)) + .done() + .run_with_handle(); + + tracing::subscriber::set_global_default(subscriber).unwrap(); + + tracing::info!("doing a thing that you might care about"); + tracing::debug!("charging turboencabulator with interocitor"); + tracing::warn!("extremely serious warning, pay attention"); + tracing::trace!("interocitor charge level is 10%"); + tracing::error!("everything is on fire"); + handle.assert_finished(); +} diff --git a/third_party/rust/tracing/tests/multiple_max_level_hints.rs b/third_party/rust/tracing/tests/multiple_max_level_hints.rs new file mode 100644 index 000000000000..dd50a193b539 --- /dev/null +++ b/third_party/rust/tracing/tests/multiple_max_level_hints.rs @@ -0,0 +1,69 @@ +#![cfg(feature = "std")] + +use tracing::Level; +use tracing_mock::*; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn multiple_max_level_hints() { + // This test ensures that when multiple subscribers are active, their max + // level hints are handled correctly. The global max level should be the + // maximum of the level filters returned by the two `Subscriber`'s + // `max_level_hint` method. + // + // In this test, we create a subscriber whose max level is `INFO`, and + // another whose max level is `DEBUG`. We then add an assertion to both of + // those subscribers' `enabled` method that no metadata for `TRACE` spans or + // events are filtered, since they are disabled by the global max filter. + + fn do_events() { + tracing::info!("doing a thing that you might care about"); + tracing::debug!("charging turboencabulator with interocitor"); + tracing::warn!("extremely serious warning, pay attention"); + tracing::trace!("interocitor charge level is 10%"); + tracing::error!("everything is on fire"); + } + + let (subscriber1, handle1) = subscriber::mock() + .named("subscriber1") + .with_max_level_hint(Level::INFO) + .with_filter(|meta| { + let level = dbg!(meta.level()); + assert!( + level <= &Level::DEBUG, + "a TRACE event was dynamically filtered by subscriber1" + ); + level <= &Level::INFO + }) + .event(event::mock().at_level(Level::INFO)) + .event(event::mock().at_level(Level::WARN)) + .event(event::mock().at_level(Level::ERROR)) + .done() + .run_with_handle(); + let (subscriber2, handle2) = subscriber::mock() + .named("subscriber2") + .with_max_level_hint(Level::DEBUG) + .with_filter(|meta| { + let level = dbg!(meta.level()); + assert!( + level <= &Level::DEBUG, + "a TRACE event was dynamically filtered by subscriber2" + ); + level <= &Level::DEBUG + }) + .event(event::mock().at_level(Level::INFO)) + .event(event::mock().at_level(Level::DEBUG)) + .event(event::mock().at_level(Level::WARN)) + .event(event::mock().at_level(Level::ERROR)) + .done() + .run_with_handle(); + + let dispatch1 = tracing::Dispatch::new(subscriber1); + + tracing::dispatcher::with_default(&dispatch1, do_events); + handle1.assert_finished(); + + let dispatch2 = tracing::Dispatch::new(subscriber2); + tracing::dispatcher::with_default(&dispatch2, do_events); + handle2.assert_finished(); +} diff --git a/third_party/rust/tracing/tests/no_subscriber.rs b/third_party/rust/tracing/tests/no_subscriber.rs new file mode 100644 index 000000000000..5f927c1dee85 --- /dev/null +++ b/third_party/rust/tracing/tests/no_subscriber.rs @@ -0,0 +1,15 @@ +#![cfg(feature = "std")] + +use tracing::subscriber::{self, NoSubscriber}; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn no_subscriber_disables_global() { + // Reproduces https://github.com/tokio-rs/tracing/issues/1999 + let (subscriber, handle) = tracing_mock::subscriber::mock().done().run_with_handle(); + subscriber::set_global_default(subscriber).expect("setting global default must succeed"); + subscriber::with_default(NoSubscriber::default(), || { + tracing::info!("this should not be recorded"); + }); + handle.assert_finished(); +} diff --git a/third_party/rust/tracing/tests/scoped_clobbers_default.rs b/third_party/rust/tracing/tests/scoped_clobbers_default.rs new file mode 100644 index 000000000000..362d34a82cb4 --- /dev/null +++ b/third_party/rust/tracing/tests/scoped_clobbers_default.rs @@ -0,0 +1,35 @@ +#![cfg(feature = "std")] +use tracing_mock::*; + +#[test] +fn scoped_clobbers_global() { + // Reproduces https://github.com/tokio-rs/tracing/issues/2050 + + let (scoped, scoped_handle) = subscriber::mock() + .event(event::msg("before global")) + .event(event::msg("before drop")) + .done() + .run_with_handle(); + + let (global, global_handle) = subscriber::mock() + .event(event::msg("after drop")) + .done() + .run_with_handle(); + + // Set a scoped default subscriber, returning a guard. + let guard = tracing::subscriber::set_default(scoped); + tracing::info!("before global"); + + // Now, set the global default. + tracing::subscriber::set_global_default(global) + .expect("global default should not already be set"); + // This event should still be collected by the scoped default. + tracing::info!("before drop"); + + // Drop the guard. Now, the global default subscriber should be used. + drop(guard); + tracing::info!("after drop"); + + scoped_handle.assert_finished(); + global_handle.assert_finished(); +} diff --git a/third_party/rust/tracing/tests/span.rs b/third_party/rust/tracing/tests/span.rs new file mode 100644 index 000000000000..4ed6500235bf --- /dev/null +++ b/third_party/rust/tracing/tests/span.rs @@ -0,0 +1,825 @@ +// These tests require the thread-local scoped dispatcher, which only works when +// we have a standard library. The behaviour being tested should be the same +// with the standard lib disabled. +#![cfg(feature = "std")] + +use std::thread; + +use tracing::{ + field::{debug, display}, + subscriber::with_default, + Level, Span, +}; +use tracing_mock::*; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn handles_to_the_same_span_are_equal() { + // Create a mock subscriber that will return `true` on calls to + // `Subscriber::enabled`, so that the spans will be constructed. We + // won't enter any spans in this test, so the subscriber won't actually + // expect to see any spans. + with_default(subscriber::mock().run(), || { + let foo1 = tracing::span!(Level::TRACE, "foo"); + let foo2 = foo1.clone(); + // Two handles that point to the same span are equal. + assert_eq!(foo1, foo2); + }); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn handles_to_different_spans_are_not_equal() { + with_default(subscriber::mock().run(), || { + // Even though these spans have the same name and fields, they will have + // differing metadata, since they were created on different lines. + let foo1 = tracing::span!(Level::TRACE, "foo", bar = 1u64, baz = false); + let foo2 = tracing::span!(Level::TRACE, "foo", bar = 1u64, baz = false); + + assert_ne!(foo1, foo2); + }); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn handles_to_different_spans_with_the_same_metadata_are_not_equal() { + // Every time time this function is called, it will return a _new + // instance_ of a span with the same metadata, name, and fields. + fn make_span() -> Span { + tracing::span!(Level::TRACE, "foo", bar = 1u64, baz = false) + } + + with_default(subscriber::mock().run(), || { + let foo1 = make_span(); + let foo2 = make_span(); + + assert_ne!(foo1, foo2); + // assert_ne!(foo1.data(), foo2.data()); + }); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn spans_always_go_to_the_subscriber_that_tagged_them() { + let subscriber1 = subscriber::mock() + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run(); + let subscriber2 = subscriber::mock().run(); + + let foo = with_default(subscriber1, || { + let foo = tracing::span!(Level::TRACE, "foo"); + foo.in_scope(|| {}); + foo + }); + // Even though we enter subscriber 2's context, the subscriber that + // tagged the span should see the enter/exit. + with_default(subscriber2, move || foo.in_scope(|| {})); +} + +// This gets exempt from testing in wasm because of: `thread::spawn` which is +// not yet possible to do in WASM. There is work going on see: +// +// +// But for now since it's not possible we don't need to test for it :) +#[test] +fn spans_always_go_to_the_subscriber_that_tagged_them_even_across_threads() { + let subscriber1 = subscriber::mock() + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run(); + let foo = with_default(subscriber1, || { + let foo = tracing::span!(Level::TRACE, "foo"); + foo.in_scope(|| {}); + foo + }); + + // Even though we enter subscriber 2's context, the subscriber that + // tagged the span should see the enter/exit. + thread::spawn(move || { + with_default(subscriber::mock().run(), || { + foo.in_scope(|| {}); + }) + }) + .join() + .unwrap(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn dropping_a_span_calls_drop_span() { + let (subscriber, handle) = subscriber::mock() + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + with_default(subscriber, || { + let span = tracing::span!(Level::TRACE, "foo"); + span.in_scope(|| {}); + drop(span); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span_closes_after_event() { + let (subscriber, handle) = subscriber::mock() + .enter(span::mock().named("foo")) + .event(event::mock()) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::span!(Level::TRACE, "foo").in_scope(|| { + tracing::event!(Level::DEBUG, {}, "my tracing::event!"); + }); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn new_span_after_event() { + let (subscriber, handle) = subscriber::mock() + .enter(span::mock().named("foo")) + .event(event::mock()) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .enter(span::mock().named("bar")) + .exit(span::mock().named("bar")) + .drop_span(span::mock().named("bar")) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::span!(Level::TRACE, "foo").in_scope(|| { + tracing::event!(Level::DEBUG, {}, "my tracing::event!"); + }); + tracing::span!(Level::TRACE, "bar").in_scope(|| {}); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn event_outside_of_span() { + let (subscriber, handle) = subscriber::mock() + .event(event::mock()) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::debug!("my tracing::event!"); + tracing::span!(Level::TRACE, "foo").in_scope(|| {}); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn cloning_a_span_calls_clone_span() { + let (subscriber, handle) = subscriber::mock() + .clone_span(span::mock().named("foo")) + .run_with_handle(); + with_default(subscriber, || { + let span = tracing::span!(Level::TRACE, "foo"); + // Allow the "redundant" `.clone` since it is used to call into the `.clone_span` hook. + #[allow(clippy::redundant_clone)] + let _span2 = span.clone(); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn drop_span_when_exiting_dispatchers_context() { + let (subscriber, handle) = subscriber::mock() + .clone_span(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .run_with_handle(); + with_default(subscriber, || { + let span = tracing::span!(Level::TRACE, "foo"); + let _span2 = span.clone(); + drop(span); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn clone_and_drop_span_always_go_to_the_subscriber_that_tagged_the_span() { + let (subscriber1, handle1) = subscriber::mock() + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .clone_span(span::mock().named("foo")) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .run_with_handle(); + let subscriber2 = subscriber::mock().done().run(); + + let foo = with_default(subscriber1, || { + let foo = tracing::span!(Level::TRACE, "foo"); + foo.in_scope(|| {}); + foo + }); + // Even though we enter subscriber 2's context, the subscriber that + // tagged the span should see the enter/exit. + with_default(subscriber2, move || { + let foo2 = foo.clone(); + foo.in_scope(|| {}); + drop(foo); + drop(foo2); + }); + + handle1.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn span_closes_when_exited() { + let (subscriber, handle) = subscriber::mock() + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + with_default(subscriber, || { + let foo = tracing::span!(Level::TRACE, "foo"); + + foo.in_scope(|| {}); + + drop(foo); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn enter() { + let (subscriber, handle) = subscriber::mock() + .enter(span::mock().named("foo")) + .event(event::mock()) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + with_default(subscriber, || { + let foo = tracing::span!(Level::TRACE, "foo"); + let _enter = foo.enter(); + tracing::debug!("dropping guard..."); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn entered() { + let (subscriber, handle) = subscriber::mock() + .enter(span::mock().named("foo")) + .event(event::mock()) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + with_default(subscriber, || { + let _span = tracing::span!(Level::TRACE, "foo").entered(); + tracing::debug!("dropping guard..."); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn entered_api() { + let (subscriber, handle) = subscriber::mock() + .enter(span::mock().named("foo")) + .event(event::mock()) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + with_default(subscriber, || { + let span = tracing::span!(Level::TRACE, "foo").entered(); + let _derefs_to_span = span.id(); + tracing::debug!("exiting span..."); + let _: Span = span.exit(); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn moved_field() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("foo").with_field( + field::mock("bar") + .with_value(&display("hello from my span")) + .only(), + ), + ) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + with_default(subscriber, || { + let from = "my span"; + let span = tracing::span!( + Level::TRACE, + "foo", + bar = display(format!("hello from {}", from)) + ); + span.in_scope(|| {}); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn dotted_field_name() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock() + .named("foo") + .with_field(field::mock("fields.bar").with_value(&true).only()), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::span!(Level::TRACE, "foo", fields.bar = true); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn borrowed_field() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("foo").with_field( + field::mock("bar") + .with_value(&display("hello from my span")) + .only(), + ), + ) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let from = "my span"; + let mut message = format!("hello from {}", from); + let span = tracing::span!(Level::TRACE, "foo", bar = display(&message)); + span.in_scope(|| { + message.insert_str(10, " inside"); + }); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +// If emitting log instrumentation, this gets moved anyway, breaking the test. +#[cfg(not(feature = "log"))] +fn move_field_out_of_struct() { + use tracing::field::debug; + + #[derive(Debug)] + struct Position { + x: f32, + y: f32, + } + + let pos = Position { + x: 3.234, + y: -1.223, + }; + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("foo").with_field( + field::mock("x") + .with_value(&debug(3.234)) + .and(field::mock("y").with_value(&debug(-1.223))) + .only(), + ), + ) + .new_span( + span::mock() + .named("bar") + .with_field(field::mock("position").with_value(&debug(&pos)).only()), + ) + .run_with_handle(); + + with_default(subscriber, || { + let pos = Position { + x: 3.234, + y: -1.223, + }; + let foo = tracing::span!(Level::TRACE, "foo", x = debug(pos.x), y = debug(pos.y)); + let bar = tracing::span!(Level::TRACE, "bar", position = debug(pos)); + foo.in_scope(|| {}); + bar.in_scope(|| {}); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn float_values() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("foo").with_field( + field::mock("x") + .with_value(&3.234) + .and(field::mock("y").with_value(&-1.223)) + .only(), + ), + ) + .run_with_handle(); + + with_default(subscriber, || { + let foo = tracing::span!(Level::TRACE, "foo", x = 3.234, y = -1.223); + foo.in_scope(|| {}); + }); + + handle.assert_finished(); +} + +// TODO(#1138): determine a new syntax for uninitialized span fields, and +// re-enable these. +/* +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn add_field_after_new_span() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock() + .named("foo") + .with_field(field::mock("bar").with_value(&5) + .and(field::mock("baz").with_value).only()), + ) + .record( + span::mock().named("foo"), + field::mock("baz").with_value(&true).only(), + ) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let span = tracing::span!(Level::TRACE, "foo", bar = 5, baz = false); + span.record("baz", &true); + span.in_scope(|| {}) + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn add_fields_only_after_new_span() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo")) + .record( + span::mock().named("foo"), + field::mock("bar").with_value(&5).only(), + ) + .record( + span::mock().named("foo"), + field::mock("baz").with_value(&true).only(), + ) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let span = tracing::span!(Level::TRACE, "foo", bar = _, baz = _); + span.record("bar", &5); + span.record("baz", &true); + span.in_scope(|| {}) + }); + + handle.assert_finished(); +} +*/ + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn record_new_value_for_field() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("foo").with_field( + field::mock("bar") + .with_value(&5) + .and(field::mock("baz").with_value(&false)) + .only(), + ), + ) + .record( + span::mock().named("foo"), + field::mock("baz").with_value(&true).only(), + ) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let span = tracing::span!(Level::TRACE, "foo", bar = 5, baz = false); + span.record("baz", &true); + span.in_scope(|| {}) + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn record_new_values_for_fields() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("foo").with_field( + field::mock("bar") + .with_value(&4) + .and(field::mock("baz").with_value(&false)) + .only(), + ), + ) + .record( + span::mock().named("foo"), + field::mock("bar").with_value(&5).only(), + ) + .record( + span::mock().named("foo"), + field::mock("baz").with_value(&true).only(), + ) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let span = tracing::span!(Level::TRACE, "foo", bar = 4, baz = false); + span.record("bar", &5); + span.record("baz", &true); + span.in_scope(|| {}) + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn new_span_with_target_and_log_level() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock() + .named("foo") + .with_target("app_span") + .at_level(Level::DEBUG), + ) + .done() + .run_with_handle(); + + with_default(subscriber, || { + tracing::span!(target: "app_span", Level::DEBUG, "foo"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn explicit_root_span_is_root() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo").with_explicit_parent(None)) + .done() + .run_with_handle(); + + with_default(subscriber, || { + tracing::span!(parent: None, Level::TRACE, "foo"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn explicit_root_span_is_root_regardless_of_ctx() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo")) + .enter(span::mock().named("foo")) + .new_span(span::mock().named("bar").with_explicit_parent(None)) + .exit(span::mock().named("foo")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + tracing::span!(Level::TRACE, "foo").in_scope(|| { + tracing::span!(parent: None, Level::TRACE, "bar"); + }) + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn explicit_child() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo")) + .new_span(span::mock().named("bar").with_explicit_parent(Some("foo"))) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let foo = tracing::span!(Level::TRACE, "foo"); + tracing::span!(parent: foo.id(), Level::TRACE, "bar"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn explicit_child_at_levels() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo")) + .new_span(span::mock().named("a").with_explicit_parent(Some("foo"))) + .new_span(span::mock().named("b").with_explicit_parent(Some("foo"))) + .new_span(span::mock().named("c").with_explicit_parent(Some("foo"))) + .new_span(span::mock().named("d").with_explicit_parent(Some("foo"))) + .new_span(span::mock().named("e").with_explicit_parent(Some("foo"))) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let foo = tracing::span!(Level::TRACE, "foo"); + tracing::trace_span!(parent: foo.id(), "a"); + tracing::debug_span!(parent: foo.id(), "b"); + tracing::info_span!(parent: foo.id(), "c"); + tracing::warn_span!(parent: foo.id(), "d"); + tracing::error_span!(parent: foo.id(), "e"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn explicit_child_regardless_of_ctx() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo")) + .new_span(span::mock().named("bar")) + .enter(span::mock().named("bar")) + .new_span(span::mock().named("baz").with_explicit_parent(Some("foo"))) + .exit(span::mock().named("bar")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + let foo = tracing::span!(Level::TRACE, "foo"); + tracing::span!(Level::TRACE, "bar") + .in_scope(|| tracing::span!(parent: foo.id(), Level::TRACE, "baz")) + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn contextual_root() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo").with_contextual_parent(None)) + .done() + .run_with_handle(); + + with_default(subscriber, || { + tracing::span!(Level::TRACE, "foo"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn contextual_child() { + let (subscriber, handle) = subscriber::mock() + .new_span(span::mock().named("foo")) + .enter(span::mock().named("foo")) + .new_span( + span::mock() + .named("bar") + .with_contextual_parent(Some("foo")), + ) + .exit(span::mock().named("foo")) + .done() + .run_with_handle(); + + with_default(subscriber, || { + tracing::span!(Level::TRACE, "foo").in_scope(|| { + tracing::span!(Level::TRACE, "bar"); + }) + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn display_shorthand() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("my_span").with_field( + field::mock("my_field") + .with_value(&display("hello world")) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::span!(Level::TRACE, "my_span", my_field = %"hello world"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn debug_shorthand() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("my_span").with_field( + field::mock("my_field") + .with_value(&debug("hello world")) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::span!(Level::TRACE, "my_span", my_field = ?"hello world"); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn both_shorthands() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("my_span").with_field( + field::mock("display_field") + .with_value(&display("hello world")) + .and(field::mock("debug_field").with_value(&debug("hello world"))) + .only(), + ), + ) + .done() + .run_with_handle(); + with_default(subscriber, || { + tracing::span!(Level::TRACE, "my_span", display_field = %"hello world", debug_field = ?"hello world"); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/tracing/tests/subscriber.rs b/third_party/rust/tracing/tests/subscriber.rs new file mode 100644 index 000000000000..15557c107f99 --- /dev/null +++ b/third_party/rust/tracing/tests/subscriber.rs @@ -0,0 +1,130 @@ +// These tests require the thread-local scoped dispatcher, which only works when +// we have a standard library. The behaviour being tested should be the same +// with the standard lib disabled. +// +// The alternative would be for each of these tests to be defined in a separate +// file, which is :( +#![cfg(feature = "std")] +use tracing::{ + field::display, + span::{Attributes, Id, Record}, + subscriber::{with_default, Interest, Subscriber}, + Event, Level, Metadata, +}; + +use tracing_mock::*; + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn event_macros_dont_infinite_loop() { + // This test ensures that an event macro within a subscriber + // won't cause an infinite loop of events. + struct TestSubscriber; + impl Subscriber for TestSubscriber { + fn register_callsite(&self, _: &Metadata<'_>) -> Interest { + // Always return sometimes so that `enabled` will be called + // (which can loop). + Interest::sometimes() + } + + fn enabled(&self, meta: &Metadata<'_>) -> bool { + assert!(meta.fields().iter().any(|f| f.name() == "foo")); + tracing::event!(Level::TRACE, bar = false); + true + } + + fn new_span(&self, _: &Attributes<'_>) -> Id { + Id::from_u64(0xAAAA) + } + + fn record(&self, _: &Id, _: &Record<'_>) {} + + fn record_follows_from(&self, _: &Id, _: &Id) {} + + fn event(&self, event: &Event<'_>) { + assert!(event.metadata().fields().iter().any(|f| f.name() == "foo")); + tracing::event!(Level::TRACE, baz = false); + } + + fn enter(&self, _: &Id) {} + + fn exit(&self, _: &Id) {} + } + + with_default(TestSubscriber, || { + tracing::event!(Level::TRACE, foo = false); + }) +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn boxed_subscriber() { + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("foo").with_field( + field::mock("bar") + .with_value(&display("hello from my span")) + .only(), + ), + ) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .done() + .run_with_handle(); + let subscriber: Box = Box::new(subscriber); + + with_default(subscriber, || { + let from = "my span"; + let span = tracing::span!( + Level::TRACE, + "foo", + bar = format_args!("hello from {}", from) + ); + span.in_scope(|| {}); + }); + + handle.assert_finished(); +} + +#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] +#[test] +fn arced_subscriber() { + use std::sync::Arc; + + let (subscriber, handle) = subscriber::mock() + .new_span( + span::mock().named("foo").with_field( + field::mock("bar") + .with_value(&display("hello from my span")) + .only(), + ), + ) + .enter(span::mock().named("foo")) + .exit(span::mock().named("foo")) + .drop_span(span::mock().named("foo")) + .event( + event::mock() + .with_fields(field::mock("message").with_value(&display("hello from my event"))), + ) + .done() + .run_with_handle(); + let subscriber: Arc = Arc::new(subscriber); + + // Test using a clone of the `Arc`ed subscriber + with_default(subscriber.clone(), || { + let from = "my span"; + let span = tracing::span!( + Level::TRACE, + "foo", + bar = format_args!("hello from {}", from) + ); + span.in_scope(|| {}); + }); + + with_default(subscriber, || { + tracing::info!("hello from my event"); + }); + + handle.assert_finished(); +} diff --git a/third_party/rust/urlencoding/.cargo-checksum.json b/third_party/rust/urlencoding/.cargo-checksum.json deleted file mode 100644 index 05ce65becc26..000000000000 --- a/third_party/rust/urlencoding/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"77e56c6def989a0342bf69140ae3b363c82e9a8d39fbdbe73001cae8c8f7cc4e","LICENSE":"5fe71cd9f72a6009711249fff7cb5d4d075cf9288a5eb52e125d5f8ad9ed8ce7","README.md":"b3ef57e4bb13adf74bf3e37b16a91909ae4a80686746311472eeb4feac63d1a2","benches/bench.rs":"20a525b93dbe07cec14c29ded08af5df5b73655d5a244d42271a4547a59f90ab","src/dec.rs":"c58cda03a625d0c9de5b95f2dcffd774a4bf74ddbe950ced7929b6ff460c8f44","src/enc.rs":"ba6c9541f8bcf079a1ba73b963819277f343522ba4e9e8e747f8f027c740b51f","src/lib.rs":"c0ce6f8841d48ad8bd9cd8c82f743bfbd9a01c55c13c87247cbf3db96883bcf2"},"package":"5a1f0175e03a0973cf4afd476bef05c26e228520400eb1fd473ad417b1c00ffb"} \ No newline at end of file diff --git a/third_party/rust/urlencoding/README.md b/third_party/rust/urlencoding/README.md deleted file mode 100644 index 8ef087b2f2cb..000000000000 --- a/third_party/rust/urlencoding/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# urlencoding - -[![Latest Version](https://img.shields.io/crates/v/urlencoding.svg)](https://lib.rs/crates/urlencoding) - -A tiny Rust library for doing URL percentage encoding and decoding. It percent-encodes everything except alphanumerics and `-`, `_`, `.`, `~`. - -When decoding `+` is not treated as a space. Error recovery from incomplete percent-escapes follows the [WHATWG URL standard](https://url.spec.whatwg.org/). - -## Usage - -To encode a string, do the following: - -```rust -use urlencoding::encode; - -fn main() { - let encoded = encode("This string will be URL encoded."); - println!("{}", encoded); - // This%20string%20will%20be%20URL%20encoded. -} -``` - -To decode a string, it's only slightly different: - -```rust -use urlencoding::decode; - -fn main() { - let decoded = decode("%F0%9F%91%BE%20Exterminate%21"); - println!("{}", decoded.unwrap()); - // 👾 Exterminate! -} -``` - -## License - -This project is licensed under the MIT license, Copyright (c) 2017 Bertram Truong. For more information see the `LICENSE` file. diff --git a/third_party/rust/urlencoding/benches/bench.rs b/third_party/rust/urlencoding/benches/bench.rs deleted file mode 100644 index b6dc18c4b17e..000000000000 --- a/third_party/rust/urlencoding/benches/bench.rs +++ /dev/null @@ -1,65 +0,0 @@ -#![feature(test)] -extern crate test; - -use urlencoding::*; -use test::Bencher; - -#[bench] -fn bench_enc_nop_short(b: &mut Bencher) { - b.iter(|| { - encode("hello") - }) -} -#[bench] -fn bench_enc_nop_long(b: &mut Bencher) { - b.iter(|| { - encode("Lorem-ipsum-dolor-sit-amet-consectetur-adipisicing-elit-sed-do-eiusmod-tempor-incididunt-ut-labore-et-dolore-magna-aliqua.Ut-enim-ad-minim-veniam-quis-nostrud\ - -exercitation-ullamco-laboris-nisi-ut-aliquip-ex-ea-commodo-consequat.Duis-aute-irure-dolor-in-reprehenderit-in-voluptate-velit-esse-cillum-dolore-eu-fugiat-nulla\ - -pariatur.Excepteur-sint-occaecat-cupidatat-non-proident-sunt-in-culpa-qui-officia-deserunt-mollit-anim-id-est-laborum.") - }) -} - -#[bench] -fn bench_dec_nop_short(b: &mut Bencher) { - b.iter(|| { - decode("hello") - }) -} -#[bench] -fn bench_dec_nop_long(b: &mut Bencher) { - b.iter(|| { - decode("Lorem-ipsum-dolor-sit-amet-consectetur-adipisicing-elit-sed-do-eiusmod-tempor-incididunt-ut-labore-et-dolore-magna-aliqua.Ut-enim-ad-minim-veniam-quis-nostrud\ - -exercitation-ullamco-laboris-nisi-ut-aliquip-ex-ea-commodo-consequat.Duis-aute-irure-dolor-in-reprehenderit-in-voluptate-velit-esse-cillum-dolore-eu-fugiat-nulla\ - -pariatur.Excepteur-sint-occaecat-cupidatat-non-proident-sunt-in-culpa-qui-officia-deserunt-mollit-anim-id-est-laborum.") - }) -} - -#[bench] -fn bench_enc_chg_short(b: &mut Bencher) { - b.iter(|| { - encode("he!!o") - }) -} -#[bench] -fn bench_enc_chg_long(b: &mut Bencher) { - b.iter(|| { - encode("Lorem ipsum dolor sit amet consectetur adipisicing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.Ut enim ad minim veniam quis nostrud\ - exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla\ - pariatur. Excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est laborum.") - }) -} - -#[bench] -fn bench_dec_chg_short(b: &mut Bencher) { - b.iter(|| { - decode("he%26%26o") - }) -} -#[bench] -fn bench_dec_chg_long(b: &mut Bencher) { - b.iter(|| { - decode("Lorem%20ipsum%20dolor%20sit%20amet%20consectetur%20adipisicing%20elit%20sed%20do%20eiusmod%20tempor%20incididunt%20ut%20labore%20et%20dolore%20magna%20aliqua.Ut%20enim%20ad%20minim%20veniam%20quis%20nostrud\ - %20exercitation%20ullamco%20laboris%20nisi%20ut%20aliquip%20ex%20ea%20commodo%20consequat.Duis%20aute%20irure%20dolor%20in%20reprehenderit%20in%20voluptate%20velit%20esse%20cillum%20dolore%20eu%20fugiat%20nulla\ - %20pariatur.Excepteur%20sint%20occaecat%20cupidatat%20non%20proident%20sunt%20in%20culpa%20qui%20officia%20deserunt%20mollit%20anim%20id%20est%20laborum.") - }) -} diff --git a/third_party/rust/urlencoding/src/dec.rs b/third_party/rust/urlencoding/src/dec.rs deleted file mode 100644 index 2c4f1971c48a..000000000000 --- a/third_party/rust/urlencoding/src/dec.rs +++ /dev/null @@ -1,100 +0,0 @@ -use std::borrow::Cow; -use std::error::Error; -use std::fmt::{self, Display}; -use std::string::FromUtf8Error; - -#[inline] -pub(crate) fn from_hex_digit(digit: u8) -> Option { - match digit { - b'0'..=b'9' => Some(digit - b'0'), - b'A'..=b'F' => Some(digit - b'A' + 10), - b'a'..=b'f' => Some(digit - b'a' + 10), - _ => None, - } -} - -/// Decode percent-encoded string assuming UTF-8 encoding. -/// -/// Unencoded `+` is preserved literally, and _not_ changed to a space. -#[inline] -pub fn decode(urlencoded: &str) -> Result { - let data = urlencoded.as_bytes(); - String::from_utf8(decode_binary(data).into_owned()) - .map_err(|error| FromUrlEncodingError::Utf8CharacterError {error}) -} - -/// Decode percent-encoded string as binary data, in any encoding. -/// -/// Unencoded `+` is preserved literally, and _not_ changed to a space. -pub fn decode_binary(mut data: &[u8]) -> Cow<[u8]> { - let mut out: Vec = Vec::with_capacity(data.len()); - loop { - let mut parts = data.splitn(2, |&c| c == b'%'); - // first the decoded non-% part - out.extend_from_slice(parts.next().unwrap()); - // then decode one %xx - match parts.next() { - None => { - if out.is_empty() { - // avoids utf-8 check - return data.into(); - } - break; - }, - Some(rest) => match rest.get(0..2) { - Some(&[first, second]) => match from_hex_digit(first) { - Some(first_val) => match from_hex_digit(second) { - Some(second_val) => { - out.push((first_val << 4) | second_val); - data = &rest[2..]; - }, - None => { - out.extend_from_slice(&[b'%', first]); - data = &rest[1..]; - }, - }, - None => { - out.push(b'%'); - data = rest; - }, - }, - _ => { - // too short - out.push(b'%'); - out.extend_from_slice(rest); - break; - }, - }, - }; - } - Cow::Owned(out) -} - -/// Error when decoding invalid UTF-8 -#[derive(Debug)] -pub enum FromUrlEncodingError { - /// Not used. Exists for backwards-compatibility only - UriCharacterError { character: char, index: usize }, - /// Percent-encoded string contained bytes that can't be expressed in UTF-8 - Utf8CharacterError { error: FromUtf8Error }, -} - -impl Error for FromUrlEncodingError { - fn source(&self) -> Option<&(dyn Error + 'static)> { - match self { - FromUrlEncodingError::UriCharacterError {character: _, index: _} => None, - FromUrlEncodingError::Utf8CharacterError {error} => Some(error) - } - } -} - -impl Display for FromUrlEncodingError { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match self { - FromUrlEncodingError::UriCharacterError {character, index} => - write!(f, "invalid URI char [{}] at [{}]", character, index), - FromUrlEncodingError::Utf8CharacterError {error} => - write!(f, "invalid utf8 char: {}", error) - } - } -} diff --git a/third_party/rust/urlencoding/src/enc.rs b/third_party/rust/urlencoding/src/enc.rs deleted file mode 100644 index 26a01d7951c9..000000000000 --- a/third_party/rust/urlencoding/src/enc.rs +++ /dev/null @@ -1,137 +0,0 @@ -use std::str; -use std::fmt; -use std::borrow::Cow; -use std::io; - -/// Wrapper type that implements `Display`. Encodes on the fly, without allocating. -/// Percent-encodes every byte except alphanumerics and `-`, `_`, `.`, `~`. Assumes UTF-8 encoding. -/// -/// ```rust -/// use urlencoding::Encoded; -/// format!("{}", Encoded("hello!")); -/// ``` -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] -#[repr(transparent)] -pub struct Encoded(pub Str); - -impl> Encoded { - /// Long way of writing `Encoded(data)` - /// - /// Takes any string-like type or a slice of bytes, either owned or borrowed. - #[inline(always)] - pub fn new(string: Str) -> Self { - Self(string) - } - - #[inline(always)] - pub fn to_str(&self) -> Cow { - encode_binary(self.0.as_ref()) - } - - /// Perform urlencoding to a string - #[inline] - #[allow(clippy::inherent_to_string_shadow_display)] - pub fn to_string(&self) -> String { - self.to_str().into_owned() - } - - /// Perform urlencoding into a writer - #[inline] - pub fn write(&self, writer: &mut W) -> io::Result<()> { - encode_into(self.0.as_ref(), false, |s| writer.write_all(s.as_bytes()))?; - Ok(()) - } - - /// Perform urlencoding into a string - #[inline] - pub fn append_to(&self, string: &mut String) { - append_string(&self.0.as_ref(), string, false); - } -} - -impl> fmt::Display for Encoded { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - encode_into(self.0.as_ref(), false, |s| f.write_str(s))?; - Ok(()) - } -} - -/// Percent-encodes every byte except alphanumerics and `-`, `_`, `.`, `~`. Assumes UTF-8 encoding. -#[inline] -pub fn encode(data: &str) -> String { - encode_binary(data.as_bytes()).into_owned() -} - -/// Percent-encodes every byte except alphanumerics and `-`, `_`, `.`, `~`. -#[inline] -pub fn encode_binary(data: &[u8]) -> Cow { - // add maybe extra capacity, but try not to exceed allocator's bucket size - let mut escaped = String::with_capacity(data.len() | 15); - let unmodified = append_string(data, &mut escaped, true); - if unmodified { - return Cow::Borrowed(unsafe { - // encode_into has checked it's ASCII - str::from_utf8_unchecked(data) - }); - } - Cow::Owned(escaped) -} - -fn append_string(data: &[u8], escaped: &mut String, may_skip: bool) -> bool { - encode_into(data, may_skip, |s| Ok::<_, std::convert::Infallible>(escaped.push_str(s))).unwrap() -} - -fn encode_into(mut data: &[u8], may_skip_write: bool, mut push_str: impl FnMut(&str) -> Result<(), E>) -> Result { - let mut pushed = false; - loop { - // Fast path to skip over safe chars at the beginning of the remaining string - let ascii_len = data.iter() - .take_while(|&&c| matches!(c, b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z' | b'-' | b'.' | b'_' | b'~')).count(); - - let (safe, rest) = if ascii_len >= data.len() { - if !pushed && may_skip_write { - return Ok(true); - } - (data, &[][..]) // redundatnt to optimize out a panic in split_at - } else { - data.split_at(ascii_len) - }; - pushed = true; - if !safe.is_empty() { - push_str(unsafe { str::from_utf8_unchecked(safe) })?; - } - if rest.is_empty() { - break; - } - - match rest.split_first() { - Some((byte, rest)) => { - let enc = &[b'%', to_hex_digit(byte >> 4), to_hex_digit(byte & 15)]; - push_str(unsafe { str::from_utf8_unchecked(enc) })?; - data = rest; - } - None => break, - }; - } - Ok(false) -} - -#[inline] -fn to_hex_digit(digit: u8) -> u8 { - match digit { - 0..=9 => b'0' + digit, - 10..=255 => b'A' - 10 + digit, - } -} - -#[test] -fn lazy_writer() { - let mut s = "he".to_string(); - Encoded("llo").append_to(&mut s); - assert_eq!("hello", s); - - assert_eq!("hello", Encoded("hello").to_string()); - assert_eq!("hello", format!("{}", Encoded("hello"))); - assert_eq!("hello", Encoded("hello").to_str()); - assert!(matches!(Encoded("hello").to_str(), Cow::Borrowed(_))); -} diff --git a/third_party/rust/urlencoding/src/lib.rs b/third_party/rust/urlencoding/src/lib.rs deleted file mode 100644 index 05d53a79bc88..000000000000 --- a/third_party/rust/urlencoding/src/lib.rs +++ /dev/null @@ -1,82 +0,0 @@ -mod enc; -pub use enc::encode; -pub use enc::encode_binary; -pub use enc::Encoded; - -mod dec; -pub use dec::decode; -pub use dec::decode_binary; -pub use dec::FromUrlEncodingError; - -#[cfg(test)] -mod tests { - use super::encode; - use super::decode; - use crate::dec::from_hex_digit; - - #[test] - fn it_encodes_successfully() { - let expected = "this%20that"; - assert_eq!(expected, encode("this that")); - } - - #[test] - fn it_encodes_successfully_emoji() { - let emoji_string = "👾 Exterminate!"; - let expected = "%F0%9F%91%BE%20Exterminate%21"; - assert_eq!(expected, encode(emoji_string)); - } - - #[test] - fn it_decodes_successfully() { - let expected = String::from("this that"); - let encoded = "this%20that"; - assert_eq!(expected, decode(encoded).unwrap()); - } - - #[test] - fn it_decodes_successfully_emoji() { - let expected = String::from("👾 Exterminate!"); - let encoded = "%F0%9F%91%BE%20Exterminate%21"; - assert_eq!(expected, decode(encoded).unwrap()); - } - - #[test] - fn it_decodes_unsuccessfully_emoji() { - let bad_encoded_string = "👾 Exterminate!"; - - assert_eq!(bad_encoded_string, decode(bad_encoded_string).unwrap()); - } - - - #[test] - fn misc() { - assert_eq!(3, from_hex_digit(b'3').unwrap()); - assert_eq!(10, from_hex_digit(b'a').unwrap()); - assert_eq!(15, from_hex_digit(b'F').unwrap()); - assert_eq!(None, from_hex_digit(b'G')); - assert_eq!(None, from_hex_digit(9)); - - assert_eq!("pureascii", encode("pureascii")); - assert_eq!("pureascii", decode("pureascii").unwrap()); - assert_eq!("", encode("")); - assert_eq!("", decode("").unwrap()); - assert_eq!("%26a%25b%21c.d%3Fe", encode("&a%b!c.d?e")); - assert_eq!("%00", encode("\0")); - assert_eq!("%00x", encode("\0x")); - assert_eq!("x%00", encode("x\0")); - assert_eq!("x%00x", encode("x\0x")); - assert_eq!("aa%00%00bb", encode("aa\0\0bb")); - assert_eq!("\0", decode("\0").unwrap()); - assert!(decode("%F0%0F%91%BE%20Hello%21").is_err()); - assert_eq!("this that", decode("this%20that").unwrap()); - assert_eq!("this that%", decode("this%20that%").unwrap()); - assert_eq!("this that%2", decode("this%20that%2").unwrap()); - assert_eq!("this that%%", decode("this%20that%%").unwrap()); - assert_eq!("this that%2%", decode("this%20that%2%").unwrap()); - assert_eq!("this%2that", decode("this%2that").unwrap()); - assert_eq!("this%%2that", decode("this%%2that").unwrap()); - assert_eq!("this%2x&that", decode("this%2x%26that").unwrap()); - // assert_eq!("this%2&that", decode("this%2%26that").unwrap()); - } -} diff --git a/third_party/rust/warp/.cargo-checksum.json b/third_party/rust/warp/.cargo-checksum.json index 8ca12fae6c2f..55fed83adfd4 100644 --- a/third_party/rust/warp/.cargo-checksum.json +++ b/third_party/rust/warp/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"e0595a54a962cecf3659d910bfa3a3bd556914703d4ed17cc55f346b7a567a9b","Cargo.lock":"8af4c9eb0295c9a9dcc3e7032b425a6153a3437bb5471122377ba3d4476e0b3c","Cargo.toml":"1472d768262d415bb7c1eed803806a06247b95388ef86a04d4709f73da2d5cce","LICENSE":"f9e46cb84d28dce0b6c08e84496cde893896a2816f4ffaac23e6552b7daf20f0","README.md":"46eb8f4ad84b013b7de89f639d71e1a2a2369c2b0ad313e3a985acaff77e0d0a","examples/README.md":"1330dfe1aae7e4237eb8abaa25da8920ed560daefca27450e6d4f3d700377537","examples/autoreload.rs":"9f416c2d9d377c1cb61b37580f6088bbc213769f11045a5443b792ab41f68c10","examples/body.rs":"57eb99b3a59c33474c5d5c7c22964bea66c663f525c755924ed75eca8fba4aa1","examples/compression.rs":"3bc90f12200fed9374cdd7f23f2adae770d8a283740e3441731571dbb5e2217f","examples/dir.rs":"3e1bd9abb6e317bd64433dd3440636187ec258dc8a72f5e56ee0926269256f95","examples/dir/another.html":"f58abf6f1a7b233b58b537b981b10be2dc64be3ac66f139230bed1f80f9f5277","examples/dir/index.html":"52f5b947733c9d2b7900fa8e613c123455bdbf7a25aca8e30aba56f75a837e65","examples/dyn_reply.rs":"2b85c26246322e34ba1ddce317f83e3a0fdaff295c7730f97e6c961475e93ebd","examples/file.rs":"a155c1fbeedaecf8e926a97689b2ad45f711d20f87e4c29ff84a4b1dacaeedd5","examples/futures.rs":"e80901b15731d4555d7d4290f5779e7361497900661431288ac23c28925269d0","examples/handlebars_template.rs":"f0acb19640e2a2894e08740ff5d3e12a9bf0b93d88a9fb313ef2b1749d59969b","examples/headers.rs":"651cd6425219d1d09cfb4f1114fefc2dff4435070b90f7c1d1696c95c96913b5","examples/hello.rs":"9ca9f197e2365c21729bbe74fe07590c615aaf87f07aece51b5640c97a453196","examples/rejections.rs":"ec09f48dd164427eb02cad28f9688321a24dd7c323ddcb918a58bef371f98529","examples/returning.rs":"e19e8df9ec34d7eeb60f6053e5b1f51043159dccb7f5e7148b69597eff2061ea","examples/routing.rs":"0511a80ad1874d9fda302479fb50a861d5a546da19e1f06a3befff4819fe56e1","examples/sse.rs":"4adae8335a9d3757bf4b9b4477358df299c339e958ae427ab853dc2cadbe155c","examples/sse_chat.rs":"31b33b517f72f3f34232ee39781e23ae6d4a97ac90a289a8a1eb3f91d7da0841","examples/tls.rs":"450d3e7e7da94d29a9c8e48dc0775f27931886eafd02e137cbfb3e7924edc698","examples/tls/cert.pem":"6b974e5654edca3664916613c9f932a64990cff3f604df5369275c35f64d36a2","examples/tls/key.rsa":"ffe615f53e98682bc470ae6fa964ce34a383b4509ae7bb9a7ee2ece1b1bdc7e3","examples/todos.rs":"0bc7575d7acc942635f1b7df75751ff9d2dc1f6cc73fd632036f773ce859b8be","examples/unix_socket.rs":"89128846ae31a7feef00d092edfd989dffba4647caa78f43f750510a5a5ff9bf","examples/websockets.rs":"a3677b311813dc68e7d1c1ad91779b348942cedd47db4b13a4158cdf54ce4411","examples/websockets_chat.rs":"50b3f6cc0036a83d67361889179f6f8948d2f6a80aa1ab2972d5e89f6d275941","src/error.rs":"221d8d1180ca31570416ef9593483a27fd343029fc16eaed842131671c16a754","src/filter/and.rs":"aed85d45ac92ad4e80c767340bfed436a106e80e82f6230074abfa578c60c93f","src/filter/and_then.rs":"56146227d0cb2c7d519b056a4dcb4dbb8d7245ec9fd3b0e789fa940dadf35b98","src/filter/boxed.rs":"160536a43c411110d2420a9215d6e55bd4d94323570e013ba4ba168103a8c716","src/filter/map.rs":"df0d0a6012eaef124bb7f1b21ec7a4507c582438c51c88cec8759538c61fcd86","src/filter/map_err.rs":"1bdec8f691482a531a1b85c9eaf96fddc86f076c14f5f8540fea95cadfac92eb","src/filter/mod.rs":"fe82a2ab2dd39616b2841a275df28ce9d9e5c4e7ef46c357e7d8da5ab2ca1d6e","src/filter/or.rs":"642f1a936c3af5429bb6f15aedc505906622dd7cf2a6782cee72962cf7045db6","src/filter/or_else.rs":"8d736ff444af09846e797f0523ffbc904e8ae4466e1be16f28ab5eacac0286b8","src/filter/recover.rs":"6d65299154a7305057b70e01addce04f5afa8e563c7f655de6f310bfaeaa20af","src/filter/service.rs":"78583f2f028025dabda0636d6a59a095faad9a3d85e32fe0defe0a5d9b8b4a99","src/filter/unify.rs":"8f56fa9fe85dd68ed84b8070705f50d3f53de74e6f0dfec4fa74e1458c2a31e0","src/filter/untuple_one.rs":"696d475fd1e3f2c50b0be519196462ff7648953d5aed8cae057bc81790479f3d","src/filter/wrap.rs":"44d02002e06f9661fd5d2fa56eccd11129dcf68f7a1795f2e0e94c7219c5e873","src/filters/addr.rs":"be634906b34e26c35dc4a62b382fc34aa4a363778e77a085a0dc6be2f7efac99","src/filters/any.rs":"2c993a9866cd5e545f6dcddf27d3a5394a3484eff6c4a839af4e50f90b43b95f","src/filters/body.rs":"f1ccea8b6c4ca3db27b1cdcc85ded2d5ba8482d952738efb64b293f9dadecde6","src/filters/compression.rs":"e30fff6ff0da8195be2092bad0ded94a3fd1291346f16729038e5bcf4a8ea534","src/filters/cookie.rs":"f785fca4d0e26af4c0807dbe440f42f7cbba4f09c87e760f1911c3f5cf83b927","src/filters/cors.rs":"14c9567cd2412d3215aaa9f342737e484e50913978e5eebc3cf96f86d719e815","src/filters/ext.rs":"0dfe1218f06c723663a649c2a0e36fe32be02191e1ef7aaeecdceede967520ff","src/filters/fs.rs":"0328510f8bd5d9b8cc044564a1e2c40c35237191d1e0cce377854b7194566f9f","src/filters/header.rs":"131aeddf93315a8181a514c79a9091855f9732a1a508e88c4da8e9db23684054","src/filters/log.rs":"704c52d151bfe147c00c640967be1b4b7c944a43cc5190328ff7302151e9c9c8","src/filters/method.rs":"b075fbb566b9a8daa62a26da55fff9aa70beb94d3a1492b5edac0c007e9b8a79","src/filters/mod.rs":"d547fa654eecd390f6c87c76863f8748f357d387552a34230b4f648c940dcc82","src/filters/multipart.rs":"a08f8a1621524ca027d10fc72d12310133245222864fcf94cea6aa9aaf127b7d","src/filters/path.rs":"1d3fdfaa81bf02c9e17f7ea58a67c8c725050000ef688778781d9e5c17be6236","src/filters/query.rs":"d98b03128dfce822844d750870b8c1cb75a4edc8947313182031c6139db8f973","src/filters/reply.rs":"6102b63e142dfe2c55bdd37f1b3e9d25852a1ed7f1ec2ec4fec91f478cf672a4","src/filters/sse.rs":"bce025464b7eacd2ec4dd45351ff722581479dd950d4225cab3727f79120a2af","src/filters/ws.rs":"adfada3888f1a39ead210f7066c25a62052c2b2bda8ca59e75b5449391356a54","src/generic.rs":"66be231b0cc92c41379f81d23343fd971941e3588f833f537ae5c97e445828e0","src/lib.rs":"6f2d14f5fe77ff2d6b097df8adfac32f32b796cee8e60aac7034ea289683a637","src/redirect.rs":"190ab1011c2771fc405a32047399cfce90b1864baaaa08d4df4f047dd9814c7b","src/reject.rs":"78da91ec2aa93c2293aced66ba921a160f0cc65da7e13fdc3b7394ea10365088","src/reply.rs":"771009494bbf2c16b7ccb09a754e4617ed71ee8168d8566fd8d49bd9ebac14f7","src/route.rs":"717c035d6e6ea98bc111524e7ed97fb3041110b9db76f4954b802be224c14cc1","src/server.rs":"17fedf40babb5ba394d151dc5c6a532068c0e026ceb2cb209ac4afdc8ad3319e","src/service.rs":"4564ec95e98a2314f73df24582ca8f6ec96cc8eda90fb5a5d1d83a9d5c841b86","src/test.rs":"58ba46b52b95b63c166da9dbcaa84aac04f31a13b2e8d621f0bb7ac97ba0f8c8","src/tls.rs":"00e432087fd1fb1c94dcb01841d97c6f3e4ca54037eb483912e46acfcd3a629f","src/transport.rs":"338211e665e46ffd32e161292dd0dd89d9a5d2f814a0641273c710be1b3e8fdf","tests/addr.rs":"2946596c8c5eb71dbb7339492d1805d12b5f9941b9d855c3dc6bd17597687296","tests/body.rs":"371d58394da468aab9f2412e73b44de615a517c3fc94986d25d6a246ab461e6d","tests/cookie.rs":"3fddd2d109da20ceb9d6c389facda7bbf4d066d9a32f639c4dad9679590dcb93","tests/cors.rs":"07798bf7f30f73f19fae2b9cf7b9f94228d490bdc52767dc3413863f635c6af6","tests/ext.rs":"93d6527288f71ee20b63f6a47f616f055735373b0f203f0863f27c2b65fd8991","tests/filter.rs":"76c05031f1e0d6271ff2ac7eb596b4c97ffd0fe93d4d49af4783cc4d862dae25","tests/fs.rs":"d7ca82ec2f074e67844d7013960cf49ddeb201fe8a0b023e6d53028702809bbc","tests/header.rs":"78d7b4fd80025694cde65010cf04e4cc23ad4ac91fff3cb527542c278e6cfe4b","tests/method.rs":"6ae1f188b06b07822bbd97f671886259ee8e7008313449ec04604c8f396cf59b","tests/multipart.rs":"62cb1ded7cc1805c925162b9cc8598a7ec85e9d7ae36df5cac367110b4e951ac","tests/path.rs":"84603bc90e4016ca1fa51d485622f5bf968a6eabc8cd46ea5ab95e19fd807104","tests/query.rs":"993133adc0e47eea4defea60dbca5365ad54a7f49069a1b423b9642d9fda6e3b","tests/redirect.rs":"25e0c18fcc7bf1c0393708e0efc10f4508093d1b355371a66b9a59e2eb51ed13","tests/reply_with.rs":"35fdfe9653ffab0776fe9fb65e231f9ea647c9f391b17794010adbcbd5009e65","tests/ws.rs":"2b9192216d9612e0a9ea0c868b0973194e4ada83d2034f6028e876a9c3f75e09"},"package":"0e95175b7a927258ecbb816bdada3cc469cb68593e7940b96a60f4af366a9970"} \ No newline at end of file +{"files":{"CHANGELOG.md":"f03ee2b1757d7776fd66437d362799191d40e3c0ca186f73c2195ba033b56902","Cargo.lock":"8465b6a6d0614ba6a66b8b9ec583a067392dacae1eecfc8b128d5e0865a68166","Cargo.toml":"52e93557696dab888fcb1fd422f9ee1d9183495232bcf446acf85c5391217ed1","LICENSE":"f9e46cb84d28dce0b6c08e84496cde893896a2816f4ffaac23e6552b7daf20f0","README.md":"d0cb27a8b2be3f852a5d1e0dca6b114a204936ad4ba76c7ec7424f4b0c88a1c3","examples/README.md":"aa8613c11e1528037a4b2517c8261eaee531f2d0a109bd9784a3f2633e0147b8","examples/autoreload.rs":"c4ca42f4e44917e69da0ee27553d8ba159c0bc50b9930d30caa07a3bb945e721","examples/body.rs":"57eb99b3a59c33474c5d5c7c22964bea66c663f525c755924ed75eca8fba4aa1","examples/compression.rs":"3bc90f12200fed9374cdd7f23f2adae770d8a283740e3441731571dbb5e2217f","examples/custom_methods.rs":"40cb0d2cf0ec43f55bd61863ec552d526ca7d94195624d4d19e789412a85b17c","examples/dir.rs":"3e1bd9abb6e317bd64433dd3440636187ec258dc8a72f5e56ee0926269256f95","examples/dir/another.html":"f58abf6f1a7b233b58b537b981b10be2dc64be3ac66f139230bed1f80f9f5277","examples/dir/index.html":"52f5b947733c9d2b7900fa8e613c123455bdbf7a25aca8e30aba56f75a837e65","examples/dyn_reply.rs":"2b85c26246322e34ba1ddce317f83e3a0fdaff295c7730f97e6c961475e93ebd","examples/file.rs":"a155c1fbeedaecf8e926a97689b2ad45f711d20f87e4c29ff84a4b1dacaeedd5","examples/futures.rs":"d5f8a58de075663f20686590837d6dea4355df754e9c1425caaf1c5547b77f9d","examples/handlebars_template.rs":"cf13236fbe9ad294902c0da0c21eda58f9e498b9423ab2a34d54052102a859ba","examples/headers.rs":"651cd6425219d1d09cfb4f1114fefc2dff4435070b90f7c1d1696c95c96913b5","examples/hello.rs":"9ca9f197e2365c21729bbe74fe07590c615aaf87f07aece51b5640c97a453196","examples/query_string.rs":"be67abd54d67e8a16406957360b33349f4880cd45953b879e63fa6fe2a440515","examples/rejections.rs":"a6b811a70378b245afa9179d5829553e1ae2a0ec3fe6ec7288058a1aa789e1c1","examples/returning.rs":"e19e8df9ec34d7eeb60f6053e5b1f51043159dccb7f5e7148b69597eff2061ea","examples/routing.rs":"7663edfff0bf04d4314453570aa7427dd0a5ffcfb3f4a116852afb6c2d13af76","examples/sse.rs":"d1bebebd1403d601d23231cf9520fdf565aabde8352698675966877d27911952","examples/sse_chat.rs":"9455fc09f7808cc9581d194347186c0447090df9a793f3780c75912ba84a42b6","examples/tls.rs":"4c704782caa31e6b147b0fe969935ab9ea256fad52e74093fb77ef9c4f32902f","examples/tls/cert.pem":"6b974e5654edca3664916613c9f932a64990cff3f604df5369275c35f64d36a2","examples/tls/key.rsa":"ffe615f53e98682bc470ae6fa964ce34a383b4509ae7bb9a7ee2ece1b1bdc7e3","examples/todos.rs":"5d208cfb13cafd482c238fdec54666714717f8ded7d3c248038f166beee6a264","examples/tracing.rs":"2d47c96e4ac041dfd6c9b6d2a17fc9af1bd72b69c405d184ec07085491c6ac0e","examples/unix_socket.rs":"9bebb31119df075001dcdd51a2be6ad5c1d94741b5814d0016e3ed7229154f14","examples/websockets.rs":"d7eeb5dd7b936248d7fb3fd56dc4d51aa8a07d7403a82bd11b4a69d7c31d3672","examples/websockets_chat.rs":"42c281d3c189416ab54d34253e2d93bcb3f19c0d93d30feb9b372c5f9433a1dd","examples/wrapping.rs":"c0ad14ddbdd531023df48a96ddcf680eecb741a941c816cca6c6796de9ff146f","src/error.rs":"a3b8f7ba1bb3353c0894c9bb63edd38525a120067135775b650e5bfc36046e03","src/filter/and.rs":"519cef248f5e7166bb62e0bdb3ef5194997b8a9b3fa8ded057748639e0da5de5","src/filter/and_then.rs":"f3903d6473483355d010b2eeafc965d2998bdaaa0a47c090dc7681710ec2e4fb","src/filter/boxed.rs":"9f41f214da36b673ba6234cfccba597acafbb40a02181304f9f379dc7d7643b1","src/filter/map.rs":"7b239aaf5dc129b380530a338d091bb68ad336cfcb74d89a2cbe1ea2560ab155","src/filter/map_err.rs":"37b4a0c61de36a531e6b1fef588ce87b17d2d312ace010fb4ecd2406d4936231","src/filter/mod.rs":"56bb18280f7ec89a824a5214458c2e6c5f90573c6327f096aa239bf49e9e7496","src/filter/or.rs":"0bc2d2c6dad60fc0f60609cd884db08e3a468de5d08f068b87221def3e353142","src/filter/or_else.rs":"d1c18b3d09c8d71f6dbbf1f70dba26ec623222a5797bcbb2bef49239f2447b01","src/filter/recover.rs":"9ba28ad03861b57eee7c6ecb53ad57dfc8c9f1bf321fcc5c7f0a9c3bd07c04bb","src/filter/service.rs":"8f2ba4e414655e4800f85daa090ba0f4758943ede23ab0ab09c068608d4fe14b","src/filter/then.rs":"d5c568c47cba20f8683908a194f9451ff4265f4c76225452decb155d522e8b14","src/filter/unify.rs":"41616e079082f959444c05325176f5d761850b023a73601ff259d6a897247b9a","src/filter/untuple_one.rs":"c88f292545646ad7bd11b493b24ad1e51a926bfbe45998281e735fa133dbe6aa","src/filter/wrap.rs":"a5783e8451db2818da5a7b010390d5d77865590fcbb3a2a6ad0a1f5f4c158fae","src/filters/addr.rs":"52d3336a046620e645feb2e15cb743418889a3a7d5d5337ac5cd806dd0ade4ed","src/filters/any.rs":"ad57b333abad579c3822338bd44035f44902e5a9bbf167fe44f057cffb723f07","src/filters/body.rs":"565758830fa5accc63825f32d80e87e70d2b7a0f7eb3ad57863b97c4565fde9e","src/filters/compression.rs":"2e6428e546791f332be8fcb42b6a4c7df2c389f4d002848a206a0f0ee2ed5cd1","src/filters/cookie.rs":"09082d991cf6a7fcf372dd83611234d1250711f1ff2e5d6392b7241820a6fc95","src/filters/cors.rs":"7fd739d8dea6a3e7f0b2c8640907ec4a01142f972933065eb41244f1638ac6b7","src/filters/ext.rs":"76dd310d51d29fa2531a21a947bfdb7fda744b9a71fd8c8104e428f1f6b8f735","src/filters/fs.rs":"4fff994f71b9f0a957a19d89b613ec573add8d82377a5613859322ed005836a4","src/filters/header.rs":"b02e37f47ffa86a53097127759a16fdbc0e09c5d02f961a15f5972a59a96fae9","src/filters/host.rs":"5cb5e207e934173b3299a30f427c19ae112dbd7613a770ceeda51ba0f38d845a","src/filters/log.rs":"29e0e243e0084b7d612b05369eb48482e957a207d8c651629bf217d05f747beb","src/filters/method.rs":"0a64e1c1c14ce82511cd99a489ff13eebaaacce04394a5eb282e5c43ff5a6e92","src/filters/mod.rs":"bd6d0af8fec10cf1cbf120a1dd4b271a129bfb09d4071526e865cb9e2d619a8b","src/filters/multipart.rs":"386b3c1ce184e702d47a480bb374fbf5eb48fb8942d2e80ece17e6e4083cf453","src/filters/path.rs":"d087f6f2f7cb869721a66ab16a5d52d69a0312a28ba4b349775a3f810b6a7e48","src/filters/query.rs":"7985b040b6f38252355d2d369b384d66bb4b3978012c47f2105d0b62e5221d3b","src/filters/reply.rs":"6102b63e142dfe2c55bdd37f1b3e9d25852a1ed7f1ec2ec4fec91f478cf672a4","src/filters/sse.rs":"7ce9362323a2d7a481fc72f429c5a45e433505645d29aa7672a21a21727173cf","src/filters/trace.rs":"f4b439b66a3dc3c9c7b07c03a56dc6bc83961c86df7b9b4cc4bcaa660c2e8e62","src/filters/ws.rs":"36c73d38a993a72403687039d5e41971fc0b49aa3ddb0a645dfa49c46ba94375","src/generic.rs":"a7afd6804059c16c4397028c85120bf1b29ce86bbfa134e3670291764a3ef257","src/lib.rs":"de974d87717c4f4bdeda154cede6d158c03ca976aa26142a9466a70661ae69eb","src/redirect.rs":"ab104a9c48e08ffeb93bff6c5e81174a378f6ee98b2dadd61e038641da5cf976","src/reject.rs":"547904fae45048ca05c8ce0203c81f8ea6115605c240dddefcb9c837cceb2140","src/reply.rs":"b45d7740903a47254d23ba23e1c72d1ed578515737a292a61ed7b03fba6976c0","src/route.rs":"1a246b76d481ac0184511d342ca0dae0754384221ef37445bcee28ee7cd40b06","src/server.rs":"f06cab83c7fe242a095df41f932c7f2916e44434085d7d0dd3b1a3d8b136a3fc","src/service.rs":"4564ec95e98a2314f73df24582ca8f6ec96cc8eda90fb5a5d1d83a9d5c841b86","src/test.rs":"aa7694d627290a3db9ce94dea2c7f53515e9ee6db5de109fff2ff29f8b9c726e","src/tls.rs":"a7bf3bbc3a12b377cd94936586253a0850f16965b6bbcd7f8b86b985f5eb056e","src/transport.rs":"6940d28bd3b1720e6e86676d46ace2855c78cd302ef167bad955f5367b022a67","tests/addr.rs":"2946596c8c5eb71dbb7339492d1805d12b5f9941b9d855c3dc6bd17597687296","tests/body.rs":"595e25b2aabbdd6619da44a4b8cbb5c4f580c56450e7c614b8ac197bcbe32a29","tests/cookie.rs":"1bb5cdacddca5dc7028796d06bd1b7bf599bc81759a904215b61beda735bf2d1","tests/cors.rs":"9ee68d8212fdbd171bfa2636466db425cf7d7de32b6afdf387b2ba21569aca4f","tests/ext.rs":"93d6527288f71ee20b63f6a47f616f055735373b0f203f0863f27c2b65fd8991","tests/filter.rs":"76c05031f1e0d6271ff2ac7eb596b4c97ffd0fe93d4d49af4783cc4d862dae25","tests/fs.rs":"2656bdcfe76f90e30e044c6c464028e96d475464bee8acb2d5c46a3b76ac03fe","tests/header.rs":"78d7b4fd80025694cde65010cf04e4cc23ad4ac91fff3cb527542c278e6cfe4b","tests/host.rs":"712f883c133041ead3cec66379e85279547189ed9ac366796732cabf486114a4","tests/method.rs":"6ae1f188b06b07822bbd97f671886259ee8e7008313449ec04604c8f396cf59b","tests/multipart.rs":"03e366749f4dae82ea864d3da795d940163ba7c2b2214a1db36f66b3f4b9d655","tests/path.rs":"db1884d961d83a624f7c63c8de09d9196f10a21a11bb43005e2015aee68d7ccf","tests/query.rs":"993133adc0e47eea4defea60dbca5365ad54a7f49069a1b423b9642d9fda6e3b","tests/redirect.rs":"a292860964a3ad2edca4f43d1b462226a4ca4537a71aae31520857dcb662b33d","tests/reply_with.rs":"35fdfe9653ffab0776fe9fb65e231f9ea647c9f391b17794010adbcbd5009e65","tests/tracing.rs":"f5223781843d97f78e598ae580234567691afca38952b07cac161597749f6332","tests/ws.rs":"b130f4a58198641c0853269f1e21e1b1ba7acba10b114161d499881740621f8a"},"package":"3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e"} \ No newline at end of file diff --git a/third_party/rust/warp/CHANGELOG.md b/third_party/rust/warp/CHANGELOG.md index 862f72559427..fd297c61d5d4 100644 --- a/third_party/rust/warp/CHANGELOG.md +++ b/third_party/rust/warp/CHANGELOG.md @@ -1,3 +1,58 @@ +### v0.3.2 (November 9, 2021) + +- **Features**: + - Add `Filter::then()`, which is like `Filter::map()` in that it's infallible, but is async like `Filter::and_then()`. + - Add `redirect::found()` reply helper that returns `302 Found`. + - Add `compression-brotli` and `compression-gzip` cargo features to enable only the compression you need. + - Allow `HEAD` requests to be served to `fs::dir()` filters. + - Allow `path!()` with no arguments. +- **Fixes**: + - Update private dependencies Tungstenite and Multipart. + - Replaces uses of `futures` with `futures-util`, which is a smaller dependency. + + +### v0.3.1 (March 24, 2021) + +- **Features**: + - Add `pong` constructor to websocket messages. + - Add `redirect::see_other` and `redirect::permanent` helpers. +- **Fixes**: + - Fix `fs` filters sometimes having an off-by-one error with range requests. + - Fix CORS to allow spaces when checking `Access-Control-Request-Headers`. + +## v0.3.0 (January 19, 2021) + +- **Features**: + - Add TLS client authentication support. + - Add TLS OCSP stapling support. + - Add `From` for `Rejection`. + - Add `close_frame` accessor to `ws::Message`. +- **Changes**: + - Update to Tokio v1. + - Update to Bytes v1. + - Update to hyper v0.14. + - Rework `sse` filter to be more like `ws`, with a single `Event` type and builder. + - Change `cookie` filter to extract a generic `FromStr` value. + + +### v0.2.5 (August 31, 2020) + +- **Features**: + - Add `wrap_fn`, which can be used to create a `Wrap` from a closure. These in turn are used with `Filter::with()`. + - Add `warp::host` filters to deal with `Host`/`:authority` headers. + - Relax some lifetime bounds on `Server`. +- **Fixes**: + - Fix panic when URI doesn't have a slash (for example, `CONNECT foo.bar`). + +### v0.2.4 (July 20, 2020) + +- **Features**: + - Add `tracing` internals in place of `log` (log is still emitted for backwards compatibility). + - Add `warp::trace` module set of filters to customize `tracing` dianostics. + - Add `path` method to `warp::fs::File` reply. + - Add `source` implementation for `BodyDeserializeError`. + - Make `warp::ws::MissingConnectionUpgrade` rejection public. + ### v0.2.3 (May 19, 2020) - **Features**: diff --git a/third_party/rust/warp/Cargo.lock b/third_party/rust/warp/Cargo.lock index cd7186d22f80..bab6ace6165f 100644 --- a/third_party/rust/warp/Cargo.lock +++ b/third_party/rust/warp/Cargo.lock @@ -1,25 +1,27 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] -name = "adler32" -version = "1.0.4" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.7.10" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] [[package]] name = "alloc-no-stdlib" -version = "2.0.1" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192ec435945d87bc2f70992b4d818154b5feede43c09fb7592146374eac90a6" +checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" [[package]] name = "alloc-stdlib" @@ -31,17 +33,26 @@ dependencies = [ ] [[package]] -name = "async-compression" -version = "0.3.4" +name = "ansi_term" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae84766bab9f774e32979583ba56d6af8c701288c6dc99144819d5d2ee0b170f" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "async-compression" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6" dependencies = [ "brotli", - "bytes", "flate2", "futures-core", "memchr", "pin-project-lite", + "tokio", ] [[package]] @@ -52,47 +63,26 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.8", + "winapi", ] [[package]] name = "autocfg" -version = "0.1.7" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" - -[[package]] -name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "base64" -version = "0.10.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = [ - "byteorder", -] - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - -[[package]] -name = "base64" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d1ccbaf7d9ec9537465a97bf19edc1a4e158ecb49fc16178202238c569cc42" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "block-buffer" @@ -103,7 +93,16 @@ dependencies = [ "block-padding", "byte-tools", "byteorder", - "generic-array", + "generic-array 0.12.4", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.4", ] [[package]] @@ -117,9 +116,9 @@ dependencies = [ [[package]] name = "brotli" -version = "3.3.0" +version = "3.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f29919120f08613aadcd4383764e00526fc9f18b6c0895814faeed0dd78613e" +checksum = "71cb90ade945043d3d53597b2fc359bb063db8ade2bcffe7997351d0756e9d50" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -128,9 +127,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.3.0" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a9f2b517b96b19d8f91c1ff5b1cf498e688850b32eae5d58e02d15c4d4fdc0c" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -148,9 +147,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.3.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5356f1d23ee24a1f785a56d1d1a5f0fd5b0f6a0c0fb2412ce11da71649ab78f6" +checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" [[package]] name = "byte-tools" @@ -160,42 +159,54 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "0.5.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" -version = "1.0.53" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "404b1fe4f65288577753b17e3b36a04596ee784493ec249bf81c7f2d2acd751c" +checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" [[package]] name = "cfg-if" -version = "0.1.10" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] -name = "cloudabi" -version = "0.0.3" +name = "chrono" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ - "bitflags", + "libc", + "num-integer", + "num-traits", + "winapi", +] + +[[package]] +name = "cpufeatures" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +dependencies = [ + "libc", ] [[package]] name = "crc32fast" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ "cfg-if", ] @@ -206,14 +217,17 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "generic-array", + "generic-array 0.12.4", ] [[package]] -name = "dtoa" -version = "0.4.5" +name = "digest" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.4", +] [[package]] name = "env_logger" @@ -223,7 +237,7 @@ checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ "atty", "humantime", - "log 0.4.8", + "log", "regex", "termcolor", ] @@ -236,9 +250,9 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "flate2" -version = "1.0.14" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" +checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" dependencies = [ "cfg-if", "crc32fast", @@ -253,47 +267,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] -name = "fuchsia-cprng" -version = "0.1.1" +name = "form_urlencoded" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - -[[package]] -name = "futures" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", + "matches", + "percent-encoding", ] [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" dependencies = [ "futures-core", "futures-sink", @@ -301,88 +288,61 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" - -[[package]] -name = "futures-executor" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" - -[[package]] -name = "futures-macro" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] +checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" -dependencies = [ - "once_cell", -] +checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" dependencies = [ - "futures-channel", + "autocfg", "futures-core", - "futures-io", - "futures-macro", "futures-sink", "futures-task", - "memchr", - "pin-project", + "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] [[package]] name = "generic-array" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" dependencies = [ "typenum", ] [[package]] -name = "getrandom" -version = "0.1.14" +name = "generic-array" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if", "libc", @@ -391,9 +351,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" +checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" dependencies = [ "bytes", "fnv", @@ -402,40 +362,46 @@ dependencies = [ "futures-util", "http", "indexmap", - "log 0.4.8", "slab", "tokio", "tokio-util", + "tracing", ] [[package]] name = "handlebars" -version = "3.0.1" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba758d094d31274eb49d15da6f326b96bf3185239a6359bf684f3d5321148900" +checksum = "66b09e2322d20d14bc2572401ce7c1d60b4748580a76c230ed9c1f8938f0c833" dependencies = [ - "log 0.4.8", + "log", "pest", "pest_derive", - "quick-error", + "quick-error 2.0.1", "serde", "serde_json", ] [[package]] -name = "headers" -version = "0.3.2" +name = "hashbrown" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + +[[package]] +name = "headers" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c4eb0471fcb85846d8b0690695ef354f9afb11cb03cac2e1d7c9253351afb0" dependencies = [ - "base64 0.12.1", + "base64", "bitflags", "bytes", "headers-core", "http", - "mime 0.3.16", - "sha-1", - "time", + "httpdate", + "mime", + "sha-1 0.9.8", ] [[package]] @@ -449,18 +415,18 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.13" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91780f809e750b0a89f5544be56617ff6b1227ee485bcb06ebe10cdf89bd3b71" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] [[package]] name = "http" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" dependencies = [ "bytes", "fnv", @@ -469,19 +435,26 @@ dependencies = [ [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", + "pin-project-lite", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" + +[[package]] +name = "httpdate" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "humantime" @@ -489,14 +462,14 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" dependencies = [ - "quick-error", + "quick-error 1.2.3", ] [[package]] name = "hyper" -version = "0.13.5" +version = "0.14.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96816e1d921eca64d208a85aab4f7798455a8e34229ee5a88c935bdee1b78b14" +checksum = "2b91bb1f221b6ea1f1e4371216b70f40748774c2fb5971b450c07773fb92d26b" dependencies = [ "bytes", "futures-channel", @@ -506,21 +479,21 @@ dependencies = [ "http", "http-body", "httparse", + "httpdate", "itoa", - "log 0.4.8", - "net2", - "pin-project", - "time", + "pin-project-lite", + "socket2", "tokio", "tower-service", + "tracing", "want", ] [[package]] name = "idna" -version = "0.2.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" dependencies = [ "matches", "unicode-bidi", @@ -529,56 +502,29 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.3.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ - "autocfg 1.0.0", -] - -[[package]] -name = "input_buffer" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754" -dependencies = [ - "bytes", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", + "autocfg", + "hashbrown", ] [[package]] name = "itoa" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "js-sys" -version = "0.3.39" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -587,35 +533,26 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.70" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3baa92041a6fec78c687fa0cc2b3fae8884f743d672cf551bed1d6dac6988d0f" +checksum = "a60553f9a9e039a333b4e9b20573b9e9b9c0bb3a11e201ccc48ef4283456d673" [[package]] name = "listenfd" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "492158e732f2e2de81c592f0a2427e57e12cd3d59877378fe7af624b6bbe0ca1" +checksum = "809e514e2cb8a9624701346ea3e694c1766d76778e343e537d873c1c366e79a7" dependencies = [ "libc", "uuid", - "winapi 0.3.8", + "winapi", ] [[package]] name = "log" -version = "0.3.9" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -dependencies = [ - "log 0.4.8", -] - -[[package]] -name = "log" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ "cfg-if", ] @@ -627,25 +564,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] -name = "matches" -version = "0.1.8" +name = "matchers" +version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matches" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" - -[[package]] -name = "mime" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" -dependencies = [ - "log 0.3.9", -] +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "mime" @@ -653,102 +590,109 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "1.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216929a5ee4dd316b1702eedf5e74548c123d370f47841ceaac38ca154690ca3" -dependencies = [ - "mime 0.2.6", - "phf", - "phf_codegen", - "unicase 1.4.2", -] - [[package]] name = "mime_guess" version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" dependencies = [ - "mime 0.3.16", - "unicase 2.6.0", + "mime", + "unicase", ] [[package]] name = "miniz_oxide" -version = "0.3.6" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ - "adler32", + "adler", + "autocfg", ] [[package]] name = "mio" -version = "0.6.22" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ - "cfg-if", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", "libc", - "log 0.4.8", + "log", "miow", - "net2", - "slab", - "winapi 0.2.8", + "ntapi", + "winapi", ] [[package]] name = "miow" -version = "0.2.1" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "winapi", ] [[package]] name = "multipart" -version = "0.16.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136eed74cadb9edd2651ffba732b19a450316b680e4f48d6c79e905799e19d01" +checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" dependencies = [ "buf_redux", "httparse", - "log 0.4.8", - "mime 0.2.6", - "mime_guess 1.8.8", - "quick-error", - "rand 0.6.5", + "log", + "mime", + "mime_guess", + "quick-error 1.2.3", + "rand", "safemem", "tempfile", "twoway", ] [[package]] -name = "net2" -version = "0.2.34" +name = "ntapi" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "cfg-if", + "winapi", +] + +[[package]] +name = "num-integer" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", "libc", - "winapi 0.3.8", ] [[package]] name = "once_cell" -version = "1.4.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "opaque-debug" @@ -756,6 +700,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + [[package]] name = "percent-encoding" version = "2.1.0" @@ -802,62 +752,23 @@ checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" dependencies = [ "maplit", "pest", - "sha-1", -] - -[[package]] -name = "phf" -version = "0.7.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3da44b85f8e8dfaec21adae67f95d93244b2ecf6ad2a692320598dcc8e6dd18" -dependencies = [ - "phf_shared", -] - -[[package]] -name = "phf_codegen" -version = "0.7.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03e85129e324ad4166b06b2c7491ae27fe3ec353af72e72cd1654c7225d517e" -dependencies = [ - "phf_generator", - "phf_shared", -] - -[[package]] -name = "phf_generator" -version = "0.7.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09364cc93c159b8b06b1f4dd8a4398984503483891b0c26b867cf431fb132662" -dependencies = [ - "phf_shared", - "rand 0.6.5", -] - -[[package]] -name = "phf_shared" -version = "0.7.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234f71a15de2288bcb7e3b6515828d22af7ec8598ee6d24c3b526fa0a80b67a0" -dependencies = [ - "siphasher", - "unicase 1.4.2", + "sha-1 0.8.2", ] [[package]] name = "pin-project" -version = "0.4.17" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc93aeee735e60ecb40cf740eb319ff23eab1c5748abfdb5c180e4ce49f7791" +checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.17" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" +checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" dependencies = [ "proc-macro2", "quote", @@ -866,9 +777,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -878,9 +789,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" [[package]] name = "pretty_env_logger" @@ -889,26 +800,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" dependencies = [ "env_logger", - "log 0.4.8", + "log", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" - -[[package]] -name = "proc-macro-nested" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" - [[package]] name = "proc-macro2" -version = "1.0.13" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53f5ffe53a6b28e37c9c1ce74893477864d64f74778a93a4beb43c8fa167f639" +checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" dependencies = [ "unicode-xid", ] @@ -920,208 +819,109 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] -name = "quote" -version = "1.0.6" +name = "quick-error" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + +[[package]] +name = "quote" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" dependencies = [ "proc-macro2", ] [[package]] name = "rand" -version = "0.6.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ - "autocfg 0.1.7", "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift", - "winapi 0.3.8", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", + "rand_chacha", + "rand_core", + "rand_hc", ] [[package]] name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_core" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ - "rand_core 0.4.2", + "ppv-lite86", + "rand_core", ] [[package]] name = "rand_core" -version = "0.4.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom", ] [[package]] name = "rand_hc" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi 0.3.8", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi 0.3.8", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", + "rand_core", ] [[package]] name = "redox_syscall" -version = "0.1.56" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +dependencies = [ + "bitflags", +] [[package]] name = "regex" -version = "1.3.7" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.17" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "remove_dir_all" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.8", + "winapi", ] [[package]] name = "ring" -version = "0.16.13" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703516ae74571f24b465b4a1431e81e2ad51336cb0ded733a55a1aa3eccac196" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -1129,17 +929,17 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.8", + "winapi", ] [[package]] name = "rustls" -version = "0.16.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.10.1", - "log 0.4.8", + "base64", + "log", "ring", "sct", "webpki", @@ -1147,9 +947,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" [[package]] name = "safemem" @@ -1165,9 +965,9 @@ checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" [[package]] name = "sct" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ "ring", "untrusted", @@ -1175,15 +975,15 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.110" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" [[package]] name = "serde_derive" -version = "1.0.110" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -1192,9 +992,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.53" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2" +checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" dependencies = [ "itoa", "ryu", @@ -1203,14 +1003,14 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" dependencies = [ - "dtoa", + "form_urlencoded", "itoa", + "ryu", "serde", - "url", ] [[package]] @@ -1219,29 +1019,55 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.7.3", + "digest 0.8.1", "fake-simd", - "opaque-debug", + "opaque-debug 0.2.3", ] [[package]] -name = "siphasher" -version = "0.2.3" +name = "sha-1" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] [[package]] name = "slab" -version = "0.4.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" -version = "1.4.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" + +[[package]] +name = "socket2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "spin" @@ -1251,9 +1077,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "syn" -version = "1.0.22" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1425de3c33b0941002740a420b1a906a350b88d08b82b2c8a01035a3f9447bac" +checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" dependencies = [ "proc-macro2", "quote", @@ -1262,69 +1088,93 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ "cfg-if", "libc", - "rand 0.7.3", + "rand", "redox_syscall", "remove_dir_all", - "winapi 0.3.8", + "winapi", ] [[package]] name = "termcolor" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" dependencies = [ "winapi-util", ] [[package]] -name = "thread_local" -version = "1.0.1" +name = "thiserror" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ - "lazy_static", + "thiserror-impl", ] [[package]] -name = "time" -version = "0.1.43" +name = "thiserror-impl" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ - "libc", - "winapi 0.3.8", + "proc-macro2", + "quote", + "syn", ] +[[package]] +name = "thread_local" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +dependencies = [ + "once_cell", +] + +[[package]] +name = "tinyvec" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + [[package]] name = "tokio" -version = "0.2.21" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +checksum = "588b2d10a336da58d877567cd8fb8a14b463e2104910f8132cd054b4b96e29ee" dependencies = [ + "autocfg", "bytes", - "fnv", - "futures-core", - "iovec", - "lazy_static", + "libc", "memchr", "mio", + "num_cpus", "pin-project-lite", - "slab", "tokio-macros", + "winapi", ] [[package]] name = "tokio-macros" -version = "0.2.5" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +checksum = "114383b041aa6212c579467afa0075fbbdd0718de036100bc0ba7961d8cb9095" dependencies = [ "proc-macro2", "quote", @@ -1333,24 +1183,34 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.12.3" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3068d891551949b37681724d6b73666787cc63fa8e255c812a41d2513aff9775" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "futures-core", "rustls", "tokio", "webpki", ] [[package]] -name = "tokio-tungstenite" -version = "0.10.1" +name = "tokio-stream" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8b8fe88007ebc363512449868d7da4389c9400072a3f666f212c7280082882a" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ - "futures", - "log 0.4.8", + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" +dependencies = [ + "futures-util", + "log", "pin-project", "tokio", "tungstenite", @@ -1358,45 +1218,109 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", "futures-sink", - "log 0.4.8", + "log", "pin-project-lite", "tokio", ] [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" + +[[package]] +name = "tracing" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +dependencies = [ + "cfg-if", + "log", + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tracing-log" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] [[package]] name = "try-lock" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.10.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfea31758bf674f990918962e8e5f07071a3161bd7c4138ed23e416e1ac4264e" +checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" dependencies = [ - "base64 0.11.0", + "base64", "byteorder", "bytes", "http", "httparse", - "input_buffer", - "log 0.4.8", - "rand 0.7.3", - "sha-1", + "log", + "rand", + "sha-1 0.9.8", + "thiserror", "url", "utf-8", ] @@ -1412,9 +1336,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.12.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" [[package]] name = "ucd-trie" @@ -1422,47 +1346,35 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" -[[package]] -name = "unicase" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" -dependencies = [ - "version_check 0.1.5", -] - [[package]] name = "unicase" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check 0.9.1", + "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" -version = "0.1.12" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ - "smallvec", + "tinyvec", ] [[package]] name = "unicode-xid" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "untrusted" @@ -1472,47 +1384,33 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.1.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ + "form_urlencoded", "idna", "matches", "percent-encoding", ] -[[package]] -name = "urlencoding" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df3561629a8bb4c57e5a2e4c43348d9e29c7c29d9b1c4c1f47166deca8f37ed" - [[package]] name = "utf-8" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "uuid" -version = "0.6.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1436e58182935dcd9ce0add9ea0b558e8a87befe01c1a301e6020aeb0876363" -dependencies = [ - "cfg-if", -] +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" [[package]] name = "version_check" -version = "0.1.5" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" - -[[package]] -name = "version_check" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "want" @@ -1520,26 +1418,28 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log 0.4.8", + "log", "try-lock", ] [[package]] name = "warp" -version = "0.2.3" +version = "0.3.2" dependencies = [ "async-compression", "bytes", - "futures", + "futures-channel", + "futures-util", "handlebars", "headers", "http", "hyper", "listenfd", - "log 0.4.8", - "mime 0.3.16", - "mime_guess 2.0.3", + "log", + "mime", + "mime_guess", "multipart", + "percent-encoding", "pin-project", "pretty_env_logger", "scoped-tls", @@ -1549,22 +1449,26 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-rustls", + "tokio-stream", "tokio-tungstenite", + "tokio-util", "tower-service", - "urlencoding", + "tracing", + "tracing-log", + "tracing-subscriber", ] [[package]] name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.62" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c7d40d09cdbf0f4895ae58cf57d92e1e57a9dd8ed2e8390514b54a47cc5551" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -1572,13 +1476,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.62" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3972e137ebf830900db522d6c8fd74d1900dcfc733462e9a12e942b00b4ac94" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.8", + "log", "proc-macro2", "quote", "syn", @@ -1587,9 +1491,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.62" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1597,9 +1501,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.62" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2", "quote", @@ -1610,15 +1514,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.62" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91c2916119c17a8e316507afaaa2dd94b47646048014bbdf6bef098c1bb58ad" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "web-sys" -version = "0.3.39" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ "js-sys", "wasm-bindgen", @@ -1626,9 +1530,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.2" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ "ring", "untrusted", @@ -1636,26 +1540,14 @@ dependencies = [ [[package]] name = "winapi" -version = "0.2.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - -[[package]] -name = "winapi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -1668,7 +1560,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.8", + "winapi", ] [[package]] @@ -1676,13 +1568,3 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] diff --git a/third_party/rust/warp/Cargo.toml b/third_party/rust/warp/Cargo.toml index 18b1b13f6b96..822f246fefbc 100644 --- a/third_party/rust/warp/Cargo.toml +++ b/third_party/rust/warp/Cargo.toml @@ -3,17 +3,16 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "warp" -version = "0.2.3" +version = "0.3.2" authors = ["Sean McArthur "] autoexamples = true autotests = true @@ -25,7 +24,7 @@ categories = ["web-programming::http-server"] license = "MIT" repository = "https://github.com/seanmonstar/warp" [package.metadata.docs.rs] -features = ["tls"] +all-features = true [profile.bench] codegen-units = 1 incremental = false @@ -40,7 +39,6 @@ required-features = ["compression"] [[example]] name = "unix_socket" -required-features = ["tokio/uds"] [[example]] name = "websockets" @@ -50,6 +48,9 @@ required-features = ["websocket"] name = "websockets_chat" required-features = ["websocket"] +[[example]] +name = "query_string" + [[test]] name = "multipart" required-features = ["multipart"] @@ -58,16 +59,20 @@ required-features = ["multipart"] name = "ws" required-features = ["websocket"] [dependencies.async-compression] -version = "0.3.1" -features = ["brotli", "deflate", "gzip", "stream"] +version = "0.3.7" +features = ["tokio"] optional = true [dependencies.bytes] -version = "0.5" +version = "1.0" -[dependencies.futures] +[dependencies.futures-channel] +version = "0.3.17" +features = ["sink"] + +[dependencies.futures-util] version = "0.3" -features = ["alloc"] +features = ["sink"] default-features = false [dependencies.headers] @@ -77,8 +82,8 @@ version = "0.3" version = "0.2" [dependencies.hyper] -version = "0.13" -features = ["stream"] +version = "0.14" +features = ["stream", "server", "http1", "http2", "tcp", "client"] [dependencies.log] version = "0.4" @@ -90,13 +95,16 @@ version = "0.3" version = "2.0.0" [dependencies.multipart] -version = "0.16" +version = "0.18" features = ["server"] optional = true default-features = false +[dependencies.percent-encoding] +version = "2.1" + [dependencies.pin-project] -version = "0.4.5" +version = "1.0" [dependencies.scoped-tls] version = "1.0" @@ -108,28 +116,36 @@ version = "1.0" version = "1.0" [dependencies.serde_urlencoded] -version = "0.6" +version = "0.7" [dependencies.tokio] -version = "0.2" -features = ["fs", "stream", "sync", "time"] +version = "1.0" +features = ["fs", "sync", "time"] [dependencies.tokio-rustls] -version = "0.12.2" +version = "0.22" optional = true +[dependencies.tokio-stream] +version = "0.1.1" + [dependencies.tokio-tungstenite] -version = "0.10" +version = "0.15" optional = true -default-features = false + +[dependencies.tokio-util] +version = "0.6" +features = ["io"] [dependencies.tower-service] version = "0.3" -[dependencies.urlencoding] -version = "1.0.0" +[dependencies.tracing] +version = "0.1.21" +features = ["log", "std"] +default-features = false [dev-dependencies.handlebars] -version = "3.0.0" +version = "4.0" [dev-dependencies.listenfd] version = "0.3" @@ -141,11 +157,23 @@ version = "0.4" version = "1.0" [dev-dependencies.tokio] -version = "0.2" -features = ["macros"] +version = "1.0" +features = ["macros", "rt-multi-thread"] + +[dev-dependencies.tokio-stream] +version = "0.1.1" +features = ["net"] + +[dev-dependencies.tracing-log] +version = "0.1" + +[dev-dependencies.tracing-subscriber] +version = "0.2.7" [features] -compression = ["async-compression"] +compression = ["compression-brotli", "compression-gzip"] +compression-brotli = ["async-compression/brotli"] +compression-gzip = ["async-compression/deflate", "async-compression/gzip"] default = ["multipart", "websocket"] tls = ["tokio-rustls"] websocket = ["tokio-tungstenite"] diff --git a/third_party/rust/warp/README.md b/third_party/rust/warp/README.md index 0d4b9196d657..0f6559407747 100644 --- a/third_party/rust/warp/README.md +++ b/third_party/rust/warp/README.md @@ -1,9 +1,10 @@ # warp -[![GHA Build Status](https://github.com/seanmonstar/warp/workflows/CI/badge.svg)](https://github.com/seanmonstar/warp/actions?query=workflow%3ACI) -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) [![crates.io](https://img.shields.io/crates/v/warp.svg)](https://crates.io/crates/warp) [![Released API docs](https://docs.rs/warp/badge.svg)](https://docs.rs/warp) +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) +[![GHA Build Status](https://github.com/seanmonstar/warp/workflows/CI/badge.svg)](https://github.com/seanmonstar/warp/actions?query=workflow%3ACI) +[![Discord chat][discord-badge]][discord-url] A super-easy, composable, web server framework for warp speeds. @@ -35,8 +36,8 @@ Since it builds on top of [hyper](https://hyper.rs), you automatically get: Add warp and Tokio to your dependencies: ```toml -tokio = { version = "0.2", features = ["macros"] } -warp = "0.2" +tokio = { version = "1", features = ["full"] } +warp = "0.3" ``` And then get started in your `main.rs`: @@ -57,3 +58,6 @@ async fn main() { ``` For more information you can check the [docs](https://docs.rs/warp) or the [examples](https://github.com/seanmonstar/warp/tree/master/examples). + +[discord-badge]: https://img.shields.io/discord/500028886025895936.svg?logo=discord +[discord-url]: https://discord.gg/RFsPjyt diff --git a/third_party/rust/warp/examples/README.md b/third_party/rust/warp/examples/README.md index 3f5ecb95ebb7..1b3bb8a2ae7f 100644 --- a/third_party/rust/warp/examples/README.md +++ b/third_party/rust/warp/examples/README.md @@ -4,6 +4,25 @@ Welcome to the examples! These show off `warp`'s functionality and explain how t ## Getting Started +To get started, run `examples/hello.rs` with: + +```bash +> cargo run --example hello +``` + +This will start a simple "hello world" service running on your localhost port 3030. + +Open another terminal and run: + +```bash +> curl http://localhost:3030/hi +Hello, World!% +``` + +Congratulations, you have just run your first warp service! + +You can run other examples with `cargo run --example [example name]`: + - [`hello.rs`](./hello.rs) - Just a basic "Hello World" API - [`routing.rs`](./routing.rs) - Builds up a more complex set of routes and shows how to combine filters - [`body.rs`](./body.rs) - What's a good API without parsing data from the request body? @@ -39,3 +58,11 @@ Hooray! `warp` also includes built-in support for WebSockets ### Autoreloading - [`autoreload.rs`](./autoreload.rs) - Change some code and watch the server reload automatically! + +### Debugging + +- [`tracing.rs`](./tracing.rs) - Warp has built-in support for rich diagnostics with [`tracing`](https://docs.rs/tracing)! + +## Custom HTTP Methods + +- [`custom_methods.rs`](./custom_methods.rs) - It is also possible to use Warp with custom HTTP methods. diff --git a/third_party/rust/warp/examples/autoreload.rs b/third_party/rust/warp/examples/autoreload.rs index ed00ae266b5c..a21d9b1369c5 100644 --- a/third_party/rust/warp/examples/autoreload.rs +++ b/third_party/rust/warp/examples/autoreload.rs @@ -4,7 +4,6 @@ use listenfd::ListenFd; use std::convert::Infallible; use warp::Filter; -extern crate listenfd; /// You'll need to install `systemfd` and `cargo-watch`: /// ``` /// cargo install systemfd cargo-watch diff --git a/third_party/rust/warp/examples/custom_methods.rs b/third_party/rust/warp/examples/custom_methods.rs new file mode 100644 index 000000000000..2e266041325f --- /dev/null +++ b/third_party/rust/warp/examples/custom_methods.rs @@ -0,0 +1,61 @@ +#![deny(warnings)] +use std::net::SocketAddr; + +use warp::hyper::StatusCode; +use warp::{hyper::Method, reject, Filter, Rejection, Reply}; + +#[derive(Debug)] +struct MethodError; +impl reject::Reject for MethodError {} + +const FOO_METHOD: &'static str = "FOO"; +const BAR_METHOD: &'static str = "BAR"; + +fn method(name: &'static str) -> impl Filter + Clone { + warp::method() + .and_then(move |m: Method| async move { + if m == name { + Ok(()) + } else { + Err(reject::custom(MethodError)) + } + }) + .untuple_one() +} + +pub async fn handle_not_found(reject: Rejection) -> Result { + if reject.is_not_found() { + Ok(StatusCode::NOT_FOUND) + } else { + Err(reject) + } +} + +pub async fn handle_custom(reject: Rejection) -> Result { + if reject.find::().is_some() { + Ok(StatusCode::METHOD_NOT_ALLOWED) + } else { + Err(reject) + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let address: SocketAddr = "[::]:3030".parse()?; + + let foo_route = method(FOO_METHOD) + .and(warp::path!("foo")) + .map(|| "Success") + .recover(handle_not_found); + + let bar_route = method(BAR_METHOD) + .and(warp::path!("bar")) + .map(|| "Success") + .recover(handle_not_found); + + warp::serve(foo_route.or(bar_route).recover(handle_custom)) + .run(address) + .await; + + Ok(()) +} diff --git a/third_party/rust/warp/examples/futures.rs b/third_party/rust/warp/examples/futures.rs index 01342809347d..43bf2f6efa72 100644 --- a/third_party/rust/warp/examples/futures.rs +++ b/third_party/rust/warp/examples/futures.rs @@ -16,7 +16,7 @@ async fn main() { } async fn sleepy(Seconds(seconds): Seconds) -> Result { - tokio::time::delay_for(Duration::from_secs(seconds)).await; + tokio::time::sleep(Duration::from_secs(seconds)).await; Ok(format!("I waited {} seconds!", seconds)) } diff --git a/third_party/rust/warp/examples/handlebars_template.rs b/third_party/rust/warp/examples/handlebars_template.rs index 3740a32c34ca..78e040539ee1 100644 --- a/third_party/rust/warp/examples/handlebars_template.rs +++ b/third_party/rust/warp/examples/handlebars_template.rs @@ -11,7 +11,7 @@ struct WithTemplate { value: T, } -fn render(template: WithTemplate, hbs: Arc) -> impl warp::Reply +fn render(template: WithTemplate, hbs: Arc>) -> impl warp::Reply where T: Serialize, { diff --git a/third_party/rust/warp/examples/query_string.rs b/third_party/rust/warp/examples/query_string.rs new file mode 100644 index 000000000000..869468eb0dcc --- /dev/null +++ b/third_party/rust/warp/examples/query_string.rs @@ -0,0 +1,59 @@ +use serde_derive::{Deserialize, Serialize}; +use std::collections::HashMap; +use warp::{ + http::{Response, StatusCode}, + Filter, +}; + +#[derive(Deserialize, Serialize)] +struct MyObject { + key1: String, + key2: u32, +} + +#[tokio::main] +async fn main() { + pretty_env_logger::init(); + + // get /example1?key=value + // demonstrates an optional parameter. + let example1 = warp::get() + .and(warp::path("example1")) + .and(warp::query::>()) + .map(|p: HashMap| match p.get("key") { + Some(key) => Response::builder().body(format!("key = {}", key)), + None => Response::builder().body(String::from("No \"key\" param in query.")), + }); + + // get /example2?key1=value&key2=42 + // uses the query string to populate a custom object + let example2 = warp::get() + .and(warp::path("example2")) + .and(warp::query::()) + .map(|p: MyObject| { + Response::builder().body(format!("key1 = {}, key2 = {}", p.key1, p.key2)) + }); + + let opt_query = warp::query::() + .map(Some) + .or_else(|_| async { Ok::<(Option,), std::convert::Infallible>((None,)) }); + + // get /example3?key1=value&key2=42 + // builds on example2 but adds custom error handling + let example3 = + warp::get() + .and(warp::path("example3")) + .and(opt_query) + .map(|p: Option| match p { + Some(obj) => { + Response::builder().body(format!("key1 = {}, key2 = {}", obj.key1, obj.key2)) + } + None => Response::builder() + .status(StatusCode::BAD_REQUEST) + .body(String::from("Failed to decode query param.")), + }); + + warp::serve(example1.or(example2).or(example3)) + .run(([127, 0, 0, 1], 3030)) + .await +} diff --git a/third_party/rust/warp/examples/rejections.rs b/third_party/rust/warp/examples/rejections.rs index 286b7772b6fb..721e69ecaac3 100644 --- a/third_party/rust/warp/examples/rejections.rs +++ b/third_party/rust/warp/examples/rejections.rs @@ -1,9 +1,10 @@ #![deny(warnings)] use std::convert::Infallible; +use std::error::Error; use std::num::NonZeroU16; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use warp::http::StatusCode; use warp::{reject, Filter, Rejection, Reply}; @@ -11,7 +12,9 @@ use warp::{reject, Filter, Rejection, Reply}; /// the request, but a different filter *could* process it. #[tokio::main] async fn main() { - let math = warp::path!("math" / u16) + let math = warp::path!("math" / u16); + let div_with_header = math + .and(warp::get()) .and(div_by()) .map(|num: u16, denom: NonZeroU16| { warp::reply::json(&Math { @@ -20,7 +23,17 @@ async fn main() { }) }); - let routes = warp::get().and(math).recover(handle_rejection); + let div_with_body = + math.and(warp::post()) + .and(warp::body::json()) + .map(|num: u16, body: DenomRequest| { + warp::reply::json(&Math { + op: format!("{} / {}", num, body.denom), + output: num / body.denom.get(), + }) + }); + + let routes = div_with_header.or(div_with_body).recover(handle_rejection); warp::serve(routes).run(([127, 0, 0, 1], 3030)).await; } @@ -36,6 +49,11 @@ fn div_by() -> impl Filter + Copy { }) } +#[derive(Deserialize)] +struct DenomRequest { + pub denom: NonZeroU16, +} + #[derive(Debug)] struct DivideByZero; @@ -69,6 +87,20 @@ async fn handle_rejection(err: Rejection) -> Result { } else if let Some(DivideByZero) = err.find() { code = StatusCode::BAD_REQUEST; message = "DIVIDE_BY_ZERO"; + } else if let Some(e) = err.find::() { + // This error happens if the body could not be deserialized correctly + // We can use the cause to analyze the error and customize the error message + message = match e.source() { + Some(cause) => { + if cause.to_string().contains("denom") { + "FIELD_ERROR: denom" + } else { + "BAD_REQUEST" + } + } + None => "BAD_REQUEST", + }; + code = StatusCode::BAD_REQUEST; } else if let Some(_) = err.find::() { // We can handle a specific error, here METHOD_NOT_ALLOWED, // and render it however we want diff --git a/third_party/rust/warp/examples/routing.rs b/third_party/rust/warp/examples/routing.rs index 6d1caa5bfee2..b2ad8c278d4c 100644 --- a/third_party/rust/warp/examples/routing.rs +++ b/third_party/rust/warp/examples/routing.rs @@ -9,6 +9,9 @@ async fn main() { // We'll start simple, and gradually show how you combine these powers // into super powers! + // GET / + let hello_world = warp::path::end().map(|| "Hello, World at root!"); + // GET /hi let hi = warp::path("hi").map(|| "Hello, World!"); @@ -73,16 +76,25 @@ async fn main() { times.map(|output| format!("(This route has moved to /math/:u16/times/:u16) {}", output)); // It turns out, using `or` is how you combine everything together into - // a single API. (We also actually haven't been enforcing the that the + // a single API. (We also actually haven't been enforcing that the // method is GET, so we'll do that too!) // + // GET / // GET /hi // GET /hello/from/warp // GET /bye/:string // GET /math/sum/:u32/:u32 // GET /math/:u16/times/:u16 - let routes = warp::get().and(hi.or(hello_from_warp).or(bye).or(math).or(sum).or(times)); + let routes = warp::get().and( + hello_world + .or(hi) + .or(hello_from_warp) + .or(bye) + .or(math) + .or(sum) + .or(times), + ); // Note that composing filters for many routes may increase compile times (because it uses a lot of generics). // If you wish to use dynamic dispatch instead and speed up compile times while diff --git a/third_party/rust/warp/examples/sse.rs b/third_party/rust/warp/examples/sse.rs index 9c883a7c9ad5..bce1fb6b1ae1 100644 --- a/third_party/rust/warp/examples/sse.rs +++ b/third_party/rust/warp/examples/sse.rs @@ -1,12 +1,13 @@ -use futures::StreamExt; +use futures_util::StreamExt; use std::convert::Infallible; use std::time::Duration; use tokio::time::interval; -use warp::{sse::ServerSentEvent, Filter}; +use tokio_stream::wrappers::IntervalStream; +use warp::{sse::Event, Filter}; // create server-sent event -fn sse_counter(counter: u64) -> Result { - Ok(warp::sse::data(counter)) +fn sse_counter(counter: u64) -> Result { + Ok(warp::sse::Event::default().data(counter.to_string())) } #[tokio::main] @@ -16,7 +17,9 @@ async fn main() { let routes = warp::path("ticks").and(warp::get()).map(|| { let mut counter: u64 = 0; // create server event source - let event_stream = interval(Duration::from_secs(1)).map(move |_| { + let interval = interval(Duration::from_secs(1)); + let stream = IntervalStream::new(interval); + let event_stream = stream.map(move |_| { counter += 1; sse_counter(counter) }); diff --git a/third_party/rust/warp/examples/sse_chat.rs b/third_party/rust/warp/examples/sse_chat.rs index 693a87f552c1..6e064b18242a 100644 --- a/third_party/rust/warp/examples/sse_chat.rs +++ b/third_party/rust/warp/examples/sse_chat.rs @@ -1,11 +1,12 @@ -use futures::{Stream, StreamExt}; +use futures_util::{Stream, StreamExt}; use std::collections::HashMap; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }; use tokio::sync::mpsc; -use warp::{sse::ServerSentEvent, Filter}; +use tokio_stream::wrappers::UnboundedReceiverStream; +use warp::{sse::Event, Filter}; #[tokio::main] async fn main() { @@ -74,10 +75,7 @@ impl warp::reject::Reject for NotUtf8 {} /// - Value is a sender of `Message` type Users = Arc>>>; -fn user_connected( - users: Users, -) -> impl Stream> + Send + 'static -{ +fn user_connected(users: Users) -> impl Stream> + Send + 'static { // Use a counter to assign a new unique ID for this user. let my_id = NEXT_USER_ID.fetch_add(1, Ordering::Relaxed); @@ -86,6 +84,7 @@ fn user_connected( // Use an unbounded channel to handle buffering and flushing of messages // to the event source... let (tx, rx) = mpsc::unbounded_channel(); + let rx = UnboundedReceiverStream::new(rx); tx.send(Message::UserId(my_id)) // rx is right above, so this cannot fail @@ -96,8 +95,8 @@ fn user_connected( // Convert messages into Server-Sent Events and return resulting stream. rx.map(|msg| match msg { - Message::UserId(my_id) => Ok((warp::sse::event("user"), warp::sse::data(my_id)).into_a()), - Message::Reply(reply) => Ok(warp::sse::data(reply).into_b()), + Message::UserId(my_id) => Ok(Event::default().event("user").data(my_id.to_string())), + Message::Reply(reply) => Ok(Event::default().data(reply)), }) } diff --git a/third_party/rust/warp/examples/tls.rs b/third_party/rust/warp/examples/tls.rs index 900026384287..7d28e03a3a9e 100644 --- a/third_party/rust/warp/examples/tls.rs +++ b/third_party/rust/warp/examples/tls.rs @@ -2,6 +2,7 @@ // Don't copy this `cfg`, it's only needed because this file is within // the warp repository. +// Instead, specify the "tls" feature in your warp dependency declaration. #[cfg(feature = "tls")] #[tokio::main] async fn main() { diff --git a/third_party/rust/warp/examples/todos.rs b/third_party/rust/warp/examples/todos.rs index d32db3a24510..904d604e8f45 100644 --- a/third_party/rust/warp/examples/todos.rs +++ b/third_party/rust/warp/examples/todos.rs @@ -241,11 +241,7 @@ mod tests { let resp = request() .method("POST") .path("/todos") - .json(&Todo { - id: 1, - text: "test 1".into(), - completed: false, - }) + .json(&todo1()) .reply(&api) .await; diff --git a/third_party/rust/warp/examples/tracing.rs b/third_party/rust/warp/examples/tracing.rs new file mode 100644 index 000000000000..103f747a9368 --- /dev/null +++ b/third_party/rust/warp/examples/tracing.rs @@ -0,0 +1,59 @@ +//! [`tracing`] is a framework for instrumenting Rust programs to +//! collect scoped, structured, and async-aware diagnostics. This example +//! demonstrates how the `warp::trace` module can be used to instrument `warp` +//! applications with `tracing`. +//! +//! [`tracing`]: https://crates.io/crates/tracing +#![deny(warnings)] +use tracing_subscriber::fmt::format::FmtSpan; +use warp::Filter; + +#[tokio::main] +async fn main() { + // Filter traces based on the RUST_LOG env var, or, if it's not set, + // default to show the output of the example. + let filter = std::env::var("RUST_LOG").unwrap_or_else(|_| "tracing=info,warp=debug".to_owned()); + + // Configure the default `tracing` subscriber. + // The `fmt` subscriber from the `tracing-subscriber` crate logs `tracing` + // events to stdout. Other subscribers are available for integrating with + // distributed tracing systems such as OpenTelemetry. + tracing_subscriber::fmt() + // Use the filter we built above to determine which traces to record. + .with_env_filter(filter) + // Record an event when each span closes. This can be used to time our + // routes' durations! + .with_span_events(FmtSpan::CLOSE) + .init(); + + let hello = warp::path("hello") + .and(warp::get()) + // When the `hello` route is called, emit a `tracing` event. + .map(|| { + tracing::info!("saying hello..."); + "Hello, World!" + }) + // Wrap the route in a `tracing` span to add the route's name as context + // to any events that occur inside it. + .with(warp::trace::named("hello")); + + let goodbye = warp::path("goodbye") + .and(warp::get()) + .map(|| { + tracing::info!("saying goodbye..."); + "So long and thanks for all the fish!" + }) + // We can also provide our own custom `tracing` spans to wrap a route. + .with(warp::trace(|info| { + // Construct our own custom span for this route. + tracing::info_span!("goodbye", req.path = ?info.path()) + })); + + let routes = hello + .or(goodbye) + // Wrap all the routes with a filter that creates a `tracing` span for + // each request we receive, including data about the request. + .with(warp::trace::request()); + + warp::serve(routes).run(([127, 0, 0, 1], 3030)).await; +} diff --git a/third_party/rust/warp/examples/unix_socket.rs b/third_party/rust/warp/examples/unix_socket.rs index 951a28782e5a..521aeead215b 100644 --- a/third_party/rust/warp/examples/unix_socket.rs +++ b/third_party/rust/warp/examples/unix_socket.rs @@ -1,14 +1,22 @@ #![deny(warnings)] -use tokio::net::UnixListener; - +#[cfg(unix)] #[tokio::main] async fn main() { + use tokio::net::UnixListener; + use tokio_stream::wrappers::UnixListenerStream; + pretty_env_logger::init(); - let mut listener = UnixListener::bind("/tmp/warp.sock").unwrap(); - let incoming = listener.incoming(); + let listener = UnixListener::bind("/tmp/warp.sock").unwrap(); + let incoming = UnixListenerStream::new(listener); warp::serve(warp::fs::dir("examples/dir")) .run_incoming(incoming) .await; } + +#[cfg(not(unix))] +#[tokio::main] +async fn main() { + panic!("Must run under Unix-like platform!"); +} diff --git a/third_party/rust/warp/examples/websockets.rs b/third_party/rust/warp/examples/websockets.rs index 6387041f06e5..b0de205743b7 100644 --- a/third_party/rust/warp/examples/websockets.rs +++ b/third_party/rust/warp/examples/websockets.rs @@ -1,6 +1,6 @@ #![deny(warnings)] -use futures::{FutureExt, StreamExt}; +use futures_util::{FutureExt, StreamExt}; use warp::Filter; #[tokio::main] diff --git a/third_party/rust/warp/examples/websockets_chat.rs b/third_party/rust/warp/examples/websockets_chat.rs index 7f4e1d551334..21e2286f6f9f 100644 --- a/third_party/rust/warp/examples/websockets_chat.rs +++ b/third_party/rust/warp/examples/websockets_chat.rs @@ -5,8 +5,9 @@ use std::sync::{ Arc, }; -use futures::{FutureExt, StreamExt}; -use tokio::sync::{mpsc, Mutex}; +use futures_util::{SinkExt, StreamExt, TryFutureExt}; +use tokio::sync::{mpsc, RwLock}; +use tokio_stream::wrappers::UnboundedReceiverStream; use warp::ws::{Message, WebSocket}; use warp::Filter; @@ -17,7 +18,7 @@ static NEXT_USER_ID: AtomicUsize = AtomicUsize::new(1); /// /// - Key is their id /// - Value is a sender of `warp::ws::Message` -type Users = Arc>>>>; +type Users = Arc>>>; #[tokio::main] async fn main() { @@ -25,7 +26,7 @@ async fn main() { // Keep track of all connected users, key is usize, value // is a websocket sender. - let users = Arc::new(Mutex::new(HashMap::new())); + let users = Users::default(); // Turn our "state" into a new Filter... let users = warp::any().map(move || users.clone()); @@ -54,26 +55,30 @@ async fn user_connected(ws: WebSocket, users: Users) { eprintln!("new chat user: {}", my_id); // Split the socket into a sender and receive of messages. - let (user_ws_tx, mut user_ws_rx) = ws.split(); + let (mut user_ws_tx, mut user_ws_rx) = ws.split(); // Use an unbounded channel to handle buffering and flushing of messages // to the websocket... let (tx, rx) = mpsc::unbounded_channel(); - tokio::task::spawn(rx.forward(user_ws_tx).map(|result| { - if let Err(e) = result { - eprintln!("websocket send error: {}", e); + let mut rx = UnboundedReceiverStream::new(rx); + + tokio::task::spawn(async move { + while let Some(message) = rx.next().await { + user_ws_tx + .send(message) + .unwrap_or_else(|e| { + eprintln!("websocket send error: {}", e); + }) + .await; } - })); + }); // Save the sender in our list of connected users. - users.lock().await.insert(my_id, tx); + users.write().await.insert(my_id, tx); // Return a `Future` that is basically a state machine managing // this specific user's connection. - // Make an extra clone to give to our disconnection handler... - let users2 = users.clone(); - // Every time the user sends a message, broadcast it to // all other users... while let Some(result) = user_ws_rx.next().await { @@ -89,7 +94,7 @@ async fn user_connected(ws: WebSocket, users: Users) { // user_ws_rx stream will keep processing as long as the user stays // connected. Once they disconnect, then... - user_disconnected(my_id, &users2).await; + user_disconnected(my_id, &users).await; } async fn user_message(my_id: usize, msg: Message, users: &Users) { @@ -103,12 +108,9 @@ async fn user_message(my_id: usize, msg: Message, users: &Users) { let new_msg = format!(": {}", my_id, msg); // New message from this user, send it to everyone else (except same uid)... - // - // We use `retain` instead of a for loop so that we can reap any user that - // appears to have disconnected. - for (&uid, tx) in users.lock().await.iter_mut() { + for (&uid, tx) in users.read().await.iter() { if my_id != uid { - if let Err(_disconnected) = tx.send(Ok(Message::text(new_msg.clone()))) { + if let Err(_disconnected) = tx.send(Message::text(new_msg.clone())) { // The tx is disconnected, our `user_disconnected` code // should be happening in another task, nothing more to // do here. @@ -121,42 +123,47 @@ async fn user_disconnected(my_id: usize, users: &Users) { eprintln!("good bye user: {}", my_id); // Stream closed up, so remove from the user list - users.lock().await.remove(&my_id); + users.write().await.remove(&my_id); } -static INDEX_HTML: &str = r#" - - +static INDEX_HTML: &str = r#" + Warp Chat -

warp chat

+

Warp chat

Connecting...

+///     Note: This function required the Rust standard library.
+///     no_std users should use 
+///     set_global_default instead.
+/// 

(path: P) -> io::Result - where - P: AsRef, - { - let stream = mio_uds::UnixStream::connect(path)?; - let stream = UnixStream::new(stream)?; - - poll_fn(|cx| stream.io.poll_write_ready(cx)).await?; - Ok(stream) - } - - /// Consumes a `UnixStream` in the standard library and returns a - /// nonblocking `UnixStream` from this crate. - /// - /// The returned stream will be associated with the given event loop - /// specified by `handle` and is ready to perform I/O. - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. - pub fn from_std(stream: net::UnixStream) -> io::Result { - let stream = mio_uds::UnixStream::from_stream(stream)?; - let io = PollEvented::new(stream)?; - - Ok(UnixStream { io }) - } - - /// Creates an unnamed pair of connected sockets. - /// - /// This function will create a pair of interconnected Unix sockets for - /// communicating back and forth between one another. Each socket will - /// be associated with the default event loop's handle. - pub fn pair() -> io::Result<(UnixStream, UnixStream)> { - let (a, b) = mio_uds::UnixStream::pair()?; - let a = UnixStream::new(a)?; - let b = UnixStream::new(b)?; - - Ok((a, b)) - } - - pub(crate) fn new(stream: mio_uds::UnixStream) -> io::Result { - let io = PollEvented::new(stream)?; - Ok(UnixStream { io }) - } - - /// Returns the socket address of the local half of this connection. - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Returns the socket address of the remote half of this connection. - pub fn peer_addr(&self) -> io::Result { - self.io.get_ref().peer_addr() - } - - /// Returns effective credentials of the process which called `connect` or `pair`. - pub fn peer_cred(&self) -> io::Result { - ucred::get_peer_cred(self) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.get_ref().take_error() - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O calls on the - /// specified portions to immediately return with an appropriate value - /// (see the documentation of `Shutdown`). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.io.get_ref().shutdown(how) - } - - // These lifetime markers also appear in the generated documentation, and make - // it more clear that this is a *borrowed* split. - #[allow(clippy::needless_lifetimes)] - /// Split a `UnixStream` into a read half and a write half, which can be used - /// to read and write the stream concurrently. - /// - /// This method is more efficient than [`into_split`], but the halves cannot be - /// moved into independently spawned tasks. - /// - /// [`into_split`]: Self::into_split() - pub fn split<'a>(&'a mut self) -> (ReadHalf<'a>, WriteHalf<'a>) { - split(self) - } - - /// Splits a `UnixStream` into a read half and a write half, which can be used - /// to read and write the stream concurrently. - /// - /// Unlike [`split`], the owned halves can be moved to separate tasks, however - /// this comes at the cost of a heap allocation. - /// - /// **Note:** Dropping the write half will shut down the write half of the - /// stream. This is equivalent to calling [`shutdown(Write)`] on the `UnixStream`. - /// - /// [`split`]: Self::split() - /// [`shutdown(Write)`]: fn@Self::shutdown - pub fn into_split(self) -> (OwnedReadHalf, OwnedWriteHalf) { - split_owned(self) - } -} - -impl TryFrom for mio_uds::UnixStream { - type Error = io::Error; - - /// Consumes value, returning the mio I/O object. - /// - /// See [`PollEvented::into_inner`] for more details about - /// resource deregistration that happens during the call. - /// - /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner - fn try_from(value: UnixStream) -> Result { - value.io.into_inner() - } -} - -impl TryFrom for UnixStream { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`UnixStream::from_std(stream)`](UnixStream::from_std). - fn try_from(stream: net::UnixStream) -> io::Result { - Self::from_std(stream) - } -} - -impl AsyncRead for UnixStream { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit]) -> bool { - false - } - - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.poll_read_priv(cx, buf) - } -} - -impl AsyncWrite for UnixStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.poll_write_priv(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.shutdown(std::net::Shutdown::Write)?; - Poll::Ready(Ok(())) - } -} - -impl UnixStream { - // == Poll IO functions that takes `&self` == - // - // They are not public because (taken from the doc of `PollEvented`): - // - // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the - // caller must ensure that there are at most two tasks that use a - // `PollEvented` instance concurrently. One for reading and one for writing. - // While violating this requirement is "safe" from a Rust memory model point - // of view, it will result in unexpected behavior in the form of lost - // notifications and tasks hanging. - - pub(crate) fn poll_read_priv( - &self, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - match self.io.get_ref().read(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - x => Poll::Ready(x), - } - } - - pub(crate) fn poll_write_priv( - &self, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - ready!(self.io.poll_write_ready(cx))?; - - match self.io.get_ref().write(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_write_ready(cx)?; - Poll::Pending - } - x => Poll::Ready(x), - } - } -} - -impl fmt::Debug for UnixStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -impl AsRawFd for UnixStream { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/net/unix/ucred.rs b/third_party/rust/tokio-0.2.25/src/net/unix/ucred.rs deleted file mode 100644 index 466aedc21fe5..000000000000 --- a/third_party/rust/tokio-0.2.25/src/net/unix/ucred.rs +++ /dev/null @@ -1,151 +0,0 @@ -use libc::{gid_t, uid_t}; - -/// Credentials of a process -#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] -pub struct UCred { - /// UID (user ID) of the process - pub uid: uid_t, - /// GID (group ID) of the process - pub gid: gid_t, -} - -#[cfg(any(target_os = "linux", target_os = "android"))] -pub(crate) use self::impl_linux::get_peer_cred; - -#[cfg(any( - target_os = "dragonfly", - target_os = "macos", - target_os = "ios", - target_os = "freebsd", - target_os = "netbsd", - target_os = "openbsd" -))] -pub(crate) use self::impl_macos::get_peer_cred; - -#[cfg(any(target_os = "solaris", target_os = "illumos"))] -pub(crate) use self::impl_solaris::get_peer_cred; - -#[cfg(any(target_os = "linux", target_os = "android"))] -pub(crate) mod impl_linux { - use crate::net::unix::UnixStream; - - use libc::{c_void, getsockopt, socklen_t, SOL_SOCKET, SO_PEERCRED}; - use std::{io, mem}; - - use libc::ucred; - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - use std::os::unix::io::AsRawFd; - - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut ucred = ucred { - pid: 0, - uid: 0, - gid: 0, - }; - - let ucred_size = mem::size_of::(); - - // These paranoid checks should be optimized-out - assert!(mem::size_of::() <= mem::size_of::()); - assert!(ucred_size <= u32::max_value() as usize); - - let mut ucred_size = ucred_size as socklen_t; - - let ret = getsockopt( - raw_fd, - SOL_SOCKET, - SO_PEERCRED, - &mut ucred as *mut ucred as *mut c_void, - &mut ucred_size, - ); - if ret == 0 && ucred_size as usize == mem::size_of::() { - Ok(super::UCred { - uid: ucred.uid, - gid: ucred.gid, - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(any( - target_os = "dragonfly", - target_os = "macos", - target_os = "ios", - target_os = "freebsd", - target_os = "netbsd", - target_os = "openbsd" -))] -pub(crate) mod impl_macos { - use crate::net::unix::UnixStream; - - use libc::getpeereid; - use std::io; - use std::mem::MaybeUninit; - use std::os::unix::io::AsRawFd; - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut uid = MaybeUninit::uninit(); - let mut gid = MaybeUninit::uninit(); - - let ret = getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr()); - - if ret == 0 { - Ok(super::UCred { - uid: uid.assume_init(), - gid: gid.assume_init(), - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(any(target_os = "solaris", target_os = "illumos"))] -pub(crate) mod impl_solaris { - use crate::net::unix::UnixStream; - use std::io; - use std::os::unix::io::AsRawFd; - use std::ptr; - - #[allow(non_camel_case_types)] - enum ucred_t {} - - extern "C" { - fn ucred_free(cred: *mut ucred_t); - fn ucred_geteuid(cred: *const ucred_t) -> super::uid_t; - fn ucred_getegid(cred: *const ucred_t) -> super::gid_t; - - fn getpeerucred(fd: std::os::raw::c_int, cred: *mut *mut ucred_t) -> std::os::raw::c_int; - } - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut cred = ptr::null_mut::<*mut ucred_t>() as *mut ucred_t; - - let ret = getpeerucred(raw_fd, &mut cred); - - if ret == 0 { - let uid = ucred_geteuid(cred); - let gid = ucred_getegid(cred); - - ucred_free(cred); - - Ok(super::UCred { uid, gid }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/park/either.rs b/third_party/rust/tokio-0.2.25/src/park/either.rs deleted file mode 100644 index c66d1213125b..000000000000 --- a/third_party/rust/tokio-0.2.25/src/park/either.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::park::{Park, Unpark}; - -use std::fmt; -use std::time::Duration; - -pub(crate) enum Either { - A(A), - B(B), -} - -impl Park for Either -where - A: Park, - B: Park, -{ - type Unpark = Either; - type Error = Either; - - fn unpark(&self) -> Self::Unpark { - match self { - Either::A(a) => Either::A(a.unpark()), - Either::B(b) => Either::B(b.unpark()), - } - } - - fn park(&mut self) -> Result<(), Self::Error> { - match self { - Either::A(a) => a.park().map_err(Either::A), - Either::B(b) => b.park().map_err(Either::B), - } - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - match self { - Either::A(a) => a.park_timeout(duration).map_err(Either::A), - Either::B(b) => b.park_timeout(duration).map_err(Either::B), - } - } - - fn shutdown(&mut self) { - match self { - Either::A(a) => a.shutdown(), - Either::B(b) => b.shutdown(), - } - } -} - -impl Unpark for Either -where - A: Unpark, - B: Unpark, -{ - fn unpark(&self) { - match self { - Either::A(a) => a.unpark(), - Either::B(b) => b.unpark(), - } - } -} - -impl fmt::Debug for Either -where - A: fmt::Debug, - B: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Either::A(a) => a.fmt(fmt), - Either::B(b) => b.fmt(fmt), - } - } -} diff --git a/third_party/rust/tokio-0.2.25/src/park/mod.rs b/third_party/rust/tokio-0.2.25/src/park/mod.rs deleted file mode 100644 index 2cfef8c2dd88..000000000000 --- a/third_party/rust/tokio-0.2.25/src/park/mod.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! Abstraction over blocking and unblocking the current thread. -//! -//! Provides an abstraction over blocking the current thread. This is similar to -//! the park / unpark constructs provided by `std` but made generic. This allows -//! embedding custom functionality to perform when the thread is blocked. -//! -//! A blocked `Park` instance is unblocked by calling `unpark` on its -//! `Unpark` handle. -//! -//! The `ParkThread` struct implements `Park` using `thread::park` to put the -//! thread to sleep. The Tokio reactor also implements park, but uses -//! `mio::Poll` to block the thread instead. -//! -//! The `Park` trait is composable. A timer implementation might decorate a -//! `Park` implementation by checking if any timeouts have elapsed after the -//! inner `Park` implementation unblocks. -//! -//! # Model -//! -//! Conceptually, each `Park` instance has an associated token, which is -//! initially not present: -//! -//! * The `park` method blocks the current thread unless or until the token is -//! available, at which point it atomically consumes the token. -//! * The `unpark` method atomically makes the token available if it wasn't -//! already. -//! -//! Some things to note: -//! -//! * If `unpark` is called before `park`, the next call to `park` will -//! **not** block the thread. -//! * **Spurious** wakeups are permitted, i.e., the `park` method may unblock -//! even if `unpark` was not called. -//! * `park_timeout` does the same as `park` but allows specifying a maximum -//! time to block the thread for. - -cfg_resource_drivers! { - mod either; - pub(crate) use self::either::Either; -} - -mod thread; -pub(crate) use self::thread::ParkThread; - -cfg_block_on! { - pub(crate) use self::thread::{CachedParkThread, ParkError}; -} - -use std::sync::Arc; -use std::time::Duration; - -/// Block the current thread. -pub(crate) trait Park { - /// Unpark handle type for the `Park` implementation. - type Unpark: Unpark; - - /// Error returned by `park` - type Error; - - /// Gets a new `Unpark` handle associated with this `Park` instance. - fn unpark(&self) -> Self::Unpark; - - /// Blocks the current thread unless or until the token is available. - /// - /// A call to `park` does not guarantee that the thread will remain blocked - /// forever, and callers should be prepared for this possibility. This - /// function may wakeup spuriously for any reason. - /// - /// # Panics - /// - /// This function **should** not panic, but ultimately, panics are left as - /// an implementation detail. Refer to the documentation for the specific - /// `Park` implementation - fn park(&mut self) -> Result<(), Self::Error>; - - /// Parks the current thread for at most `duration`. - /// - /// This function is the same as `park` but allows specifying a maximum time - /// to block the thread for. - /// - /// Same as `park`, there is no guarantee that the thread will remain - /// blocked for any amount of time. Spurious wakeups are permitted for any - /// reason. - /// - /// # Panics - /// - /// This function **should** not panic, but ultimately, panics are left as - /// an implementation detail. Refer to the documentation for the specific - /// `Park` implementation - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error>; - - /// Release all resources holded by the parker for proper leak-free shutdown - fn shutdown(&mut self); -} - -/// Unblock a thread blocked by the associated `Park` instance. -pub(crate) trait Unpark: Sync + Send + 'static { - /// Unblocks a thread that is blocked by the associated `Park` handle. - /// - /// Calling `unpark` atomically makes available the unpark token, if it is - /// not already available. - /// - /// # Panics - /// - /// This function **should** not panic, but ultimately, panics are left as - /// an implementation detail. Refer to the documentation for the specific - /// `Unpark` implementation - fn unpark(&self); -} - -impl Unpark for Box { - fn unpark(&self) { - (**self).unpark() - } -} - -impl Unpark for Arc { - fn unpark(&self) { - (**self).unpark() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/park/thread.rs b/third_party/rust/tokio-0.2.25/src/park/thread.rs deleted file mode 100644 index 44174d3519fb..000000000000 --- a/third_party/rust/tokio-0.2.25/src/park/thread.rs +++ /dev/null @@ -1,329 +0,0 @@ -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Arc, Condvar, Mutex}; -use crate::park::{Park, Unpark}; - -use std::sync::atomic::Ordering::SeqCst; -use std::time::Duration; - -#[derive(Debug)] -pub(crate) struct ParkThread { - inner: Arc, -} - -pub(crate) type ParkError = (); - -/// Unblocks a thread that was blocked by `ParkThread`. -#[derive(Clone, Debug)] -pub(crate) struct UnparkThread { - inner: Arc, -} - -#[derive(Debug)] -struct Inner { - state: AtomicUsize, - mutex: Mutex<()>, - condvar: Condvar, -} - -const EMPTY: usize = 0; -const PARKED: usize = 1; -const NOTIFIED: usize = 2; - -thread_local! { - static CURRENT_PARKER: ParkThread = ParkThread::new(); -} - -// ==== impl ParkThread ==== - -impl ParkThread { - pub(crate) fn new() -> Self { - Self { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - }), - } - } -} - -impl Park for ParkThread { - type Unpark = UnparkThread; - type Error = ParkError; - - fn unpark(&self) -> Self::Unpark { - let inner = self.inner.clone(); - UnparkThread { inner } - } - - fn park(&mut self) -> Result<(), Self::Error> { - self.inner.park(); - Ok(()) - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.inner.park_timeout(duration); - Ok(()) - } - - fn shutdown(&mut self) { - self.inner.shutdown(); - } -} - -// ==== impl Inner ==== - -impl Inner { - /// Park the current thread for at most `dur`. - fn park(&self) { - // If we were previously notified then we consume this notification and - // return quickly. - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - return; - } - - // Otherwise we need to coordinate going to sleep - let mut m = self.mutex.lock().unwrap(); - - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park state; actual = {}", actual), - } - - loop { - m = self.condvar.wait(m).unwrap(); - - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - // got a notification - return; - } - - // spurious wakeup, go back to sleep - } - } - - fn park_timeout(&self, dur: Duration) { - // Like `park` above we have a fast path for an already-notified thread, - // and afterwards we start coordinating for a sleep. Return quickly. - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - return; - } - - if dur == Duration::from_millis(0) { - return; - } - - let m = self.mutex.lock().unwrap(); - - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read again here, see `park`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park_timeout state; actual = {}", actual), - } - - // Wait with a timeout, and if we spuriously wake up or otherwise wake up - // from a notification, we just want to unconditionally set the state back to - // empty, either consuming a notification or un-flagging ourselves as - // parked. - let (_m, _result) = self.condvar.wait_timeout(m, dur).unwrap(); - - match self.state.swap(EMPTY, SeqCst) { - NOTIFIED => {} // got a notification, hurray! - PARKED => {} // no notification, alas - n => panic!("inconsistent park_timeout state: {}", n), - } - } - - fn unpark(&self) { - // To ensure the unparked thread will observe any writes we made before - // this call, we must perform a release operation that `park` can - // synchronize with. To do that we must write `NOTIFIED` even if `state` - // is already `NOTIFIED`. That is why this must be a swap rather than a - // compare-and-swap that returns if it reads `NOTIFIED` on failure. - match self.state.swap(NOTIFIED, SeqCst) { - EMPTY => return, // no one was waiting - NOTIFIED => return, // already unparked - PARKED => {} // gotta go wake someone up - _ => panic!("inconsistent state in unpark"), - } - - // There is a period between when the parked thread sets `state` to - // `PARKED` (or last checked `state` in the case of a spurious wake - // up) and when it actually waits on `cvar`. If we were to notify - // during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has - // `lock` locked at this stage so we can acquire `lock` to wait until - // it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the - // parked thread wakes it doesn't get woken only to have to wait for us - // to release `lock`. - drop(self.mutex.lock().unwrap()); - - self.condvar.notify_one() - } - - fn shutdown(&self) { - self.condvar.notify_all(); - } -} - -impl Default for ParkThread { - fn default() -> Self { - Self::new() - } -} - -// ===== impl UnparkThread ===== - -impl Unpark for UnparkThread { - fn unpark(&self) { - self.inner.unpark(); - } -} - -cfg_block_on! { - use std::marker::PhantomData; - use std::rc::Rc; - - use std::mem; - use std::task::{RawWaker, RawWakerVTable, Waker}; - - /// Blocks the current thread using a condition variable. - #[derive(Debug)] - pub(crate) struct CachedParkThread { - _anchor: PhantomData>, - } - - impl CachedParkThread { - /// Create a new `ParkThread` handle for the current thread. - /// - /// This type cannot be moved to other threads, so it should be created on - /// the thread that the caller intends to park. - pub(crate) fn new() -> CachedParkThread { - CachedParkThread { - _anchor: PhantomData, - } - } - - pub(crate) fn get_unpark(&self) -> Result { - self.with_current(|park_thread| park_thread.unpark()) - } - - /// Get a reference to the `ParkThread` handle for this thread. - fn with_current(&self, f: F) -> Result - where - F: FnOnce(&ParkThread) -> R, - { - CURRENT_PARKER.try_with(|inner| f(inner)) - .map_err(|_| ()) - } - } - - impl Park for CachedParkThread { - type Unpark = UnparkThread; - type Error = ParkError; - - fn unpark(&self) -> Self::Unpark { - self.get_unpark().unwrap() - } - - fn park(&mut self) -> Result<(), Self::Error> { - self.with_current(|park_thread| park_thread.inner.park())?; - Ok(()) - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.with_current(|park_thread| park_thread.inner.park_timeout(duration))?; - Ok(()) - } - - fn shutdown(&mut self) { - let _ = self.with_current(|park_thread| park_thread.inner.shutdown()); - } - } - - - impl UnparkThread { - pub(crate) fn into_waker(self) -> Waker { - unsafe { - let raw = unparker_to_raw_waker(self.inner); - Waker::from_raw(raw) - } - } - } - - impl Inner { - #[allow(clippy::wrong_self_convention)] - fn into_raw(this: Arc) -> *const () { - Arc::into_raw(this) as *const () - } - - unsafe fn from_raw(ptr: *const ()) -> Arc { - Arc::from_raw(ptr as *const Inner) - } - } - - unsafe fn unparker_to_raw_waker(unparker: Arc) -> RawWaker { - RawWaker::new( - Inner::into_raw(unparker), - &RawWakerVTable::new(clone, wake, wake_by_ref, drop_waker), - ) - } - - unsafe fn clone(raw: *const ()) -> RawWaker { - let unparker = Inner::from_raw(raw); - - // Increment the ref count - mem::forget(unparker.clone()); - - unparker_to_raw_waker(unparker) - } - - unsafe fn drop_waker(raw: *const ()) { - let _ = Inner::from_raw(raw); - } - - unsafe fn wake(raw: *const ()) { - let unparker = Inner::from_raw(raw); - unparker.unpark(); - } - - unsafe fn wake_by_ref(raw: *const ()) { - let unparker = Inner::from_raw(raw); - unparker.unpark(); - - // We don't actually own a reference to the unparker - mem::forget(unparker); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/prelude.rs b/third_party/rust/tokio-0.2.25/src/prelude.rs deleted file mode 100644 index 1909f9da6aed..000000000000 --- a/third_party/rust/tokio-0.2.25/src/prelude.rs +++ /dev/null @@ -1,21 +0,0 @@ -#![cfg(not(loom))] - -//! A "prelude" for users of the `tokio` crate. -//! -//! This prelude is similar to the standard library's prelude in that you'll -//! almost always want to import its entire contents, but unlike the standard -//! library's prelude you'll have to do so manually: -//! -//! ``` -//! # #![allow(warnings)] -//! use tokio::prelude::*; -//! ``` -//! -//! The prelude may grow over time as additional items see ubiquitous use. - -pub use crate::io::{self, AsyncBufRead, AsyncRead, AsyncWrite}; - -cfg_io_util! { - #[doc(no_inline)] - pub use crate::io::{AsyncBufReadExt as _, AsyncReadExt as _, AsyncSeekExt as _, AsyncWriteExt as _}; -} diff --git a/third_party/rust/tokio-0.2.25/src/process/kill.rs b/third_party/rust/tokio-0.2.25/src/process/kill.rs deleted file mode 100644 index a1f165228191..000000000000 --- a/third_party/rust/tokio-0.2.25/src/process/kill.rs +++ /dev/null @@ -1,13 +0,0 @@ -use std::io; - -/// An interface for killing a running process. -pub(crate) trait Kill { - /// Forcefully kills the process. - fn kill(&mut self) -> io::Result<()>; -} - -impl Kill for &mut T { - fn kill(&mut self) -> io::Result<()> { - (**self).kill() - } -} diff --git a/third_party/rust/tokio-0.2.25/src/process/mod.rs b/third_party/rust/tokio-0.2.25/src/process/mod.rs deleted file mode 100644 index 4a070023b066..000000000000 --- a/third_party/rust/tokio-0.2.25/src/process/mod.rs +++ /dev/null @@ -1,1125 +0,0 @@ -//! An implementation of asynchronous process management for Tokio. -//! -//! This module provides a [`Command`] struct that imitates the interface of the -//! [`std::process::Command`] type in the standard library, but provides asynchronous versions of -//! functions that create processes. These functions (`spawn`, `status`, `output` and their -//! variants) return "future aware" types that interoperate with Tokio. The asynchronous process -//! support is provided through signal handling on Unix and system APIs on Windows. -//! -//! [`std::process::Command`]: std::process::Command -//! -//! # Examples -//! -//! Here's an example program which will spawn `echo hello world` and then wait -//! for it complete. -//! -//! ```no_run -//! use tokio::process::Command; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! // The usage is the same as with the standard library's `Command` type, however the value -//! // returned from `spawn` is a `Result` containing a `Future`. -//! let child = Command::new("echo").arg("hello").arg("world") -//! .spawn(); -//! -//! // Make sure our child succeeded in spawning and process the result -//! let future = child.expect("failed to spawn"); -//! -//! // Await until the future (and the command) completes -//! let status = future.await?; -//! println!("the command exited with: {}", status); -//! Ok(()) -//! } -//! ``` -//! -//! Next, let's take a look at an example where we not only spawn `echo hello -//! world` but we also capture its output. -//! -//! ```no_run -//! use tokio::process::Command; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! // Like above, but use `output` which returns a future instead of -//! // immediately returning the `Child`. -//! let output = Command::new("echo").arg("hello").arg("world") -//! .output(); -//! -//! let output = output.await?; -//! -//! assert!(output.status.success()); -//! assert_eq!(output.stdout, b"hello world\n"); -//! Ok(()) -//! } -//! ``` -//! -//! We can also read input line by line. -//! -//! ```no_run -//! use tokio::io::{BufReader, AsyncBufReadExt}; -//! use tokio::process::Command; -//! -//! use std::process::Stdio; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let mut cmd = Command::new("cat"); -//! -//! // Specify that we want the command's standard output piped back to us. -//! // By default, standard input/output/error will be inherited from the -//! // current process (for example, this means that standard input will -//! // come from the keyboard and standard output/error will go directly to -//! // the terminal if this process is invoked from the command line). -//! cmd.stdout(Stdio::piped()); -//! -//! let mut child = cmd.spawn() -//! .expect("failed to spawn command"); -//! -//! let stdout = child.stdout.take() -//! .expect("child did not have a handle to stdout"); -//! -//! let mut reader = BufReader::new(stdout).lines(); -//! -//! // Ensure the child process is spawned in the runtime so it can -//! // make progress on its own while we await for any output. -//! tokio::spawn(async { -//! let status = child.await -//! .expect("child process encountered an error"); -//! -//! println!("child status was: {}", status); -//! }); -//! -//! while let Some(line) = reader.next_line().await? { -//! println!("Line: {}", line); -//! } -//! -//! Ok(()) -//! } -//! ``` -//! -//! # Caveats -//! -//! Similar to the behavior to the standard library, and unlike the futures -//! paradigm of dropping-implies-cancellation, a spawned process will, by -//! default, continue to execute even after the `Child` handle has been dropped. -//! -//! The `Command::kill_on_drop` method can be used to modify this behavior -//! and kill the child process if the `Child` wrapper is dropped before it -//! has exited. -//! -//! [`Command`]: crate::process::Command - -#[path = "unix/mod.rs"] -#[cfg(unix)] -mod imp; - -#[path = "windows.rs"] -#[cfg(windows)] -mod imp; - -mod kill; - -use crate::io::{AsyncRead, AsyncWrite}; -use crate::process::kill::Kill; - -use std::ffi::OsStr; -use std::future::Future; -use std::io; -#[cfg(unix)] -use std::os::unix::process::CommandExt; -#[cfg(windows)] -use std::os::windows::process::CommandExt; -use std::path::Path; -use std::pin::Pin; -use std::process::{Command as StdCommand, ExitStatus, Output, Stdio}; -use std::task::Context; -use std::task::Poll; - -/// This structure mimics the API of [`std::process::Command`] found in the standard library, but -/// replaces functions that create a process with an asynchronous variant. The main provided -/// asynchronous functions are [spawn](Command::spawn), [status](Command::status), and -/// [output](Command::output). -/// -/// `Command` uses asynchronous versions of some `std` types (for example [`Child`]). -/// -/// [`std::process::Command`]: std::process::Command -/// [`Child`]: struct@Child -#[derive(Debug)] -pub struct Command { - std: StdCommand, - kill_on_drop: bool, -} - -pub(crate) struct SpawnedChild { - child: imp::Child, - stdin: Option, - stdout: Option, - stderr: Option, -} - -impl Command { - /// Constructs a new `Command` for launching the program at - /// path `program`, with the following default configuration: - /// - /// * No arguments to the program - /// * Inherit the current process's environment - /// * Inherit the current process's working directory - /// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output` - /// - /// Builder methods are provided to change these defaults and - /// otherwise configure the process. - /// - /// If `program` is not an absolute path, the `PATH` will be searched in - /// an OS-defined way. - /// - /// The search path to be used may be controlled by setting the - /// `PATH` environment variable on the Command, - /// but this has some implementation limitations on Windows - /// (see issue [rust-lang/rust#37519]). - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// let command = Command::new("sh"); - /// ``` - /// - /// [rust-lang/rust#37519]: https://github.com/rust-lang/rust/issues/37519 - pub fn new>(program: S) -> Command { - Self::from(StdCommand::new(program)) - } - - /// Adds an argument to pass to the program. - /// - /// Only one argument can be passed per use. So instead of: - /// - /// ```no_run - /// tokio::process::Command::new("sh") - /// .arg("-C /path/to/repo"); - /// ``` - /// - /// usage would be: - /// - /// ```no_run - /// tokio::process::Command::new("sh") - /// .arg("-C") - /// .arg("/path/to/repo"); - /// ``` - /// - /// To pass multiple arguments see [`args`]. - /// - /// [`args`]: method@Self::args - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// let command = Command::new("ls") - /// .arg("-l") - /// .arg("-a"); - /// ``` - pub fn arg>(&mut self, arg: S) -> &mut Command { - self.std.arg(arg); - self - } - - /// Adds multiple arguments to pass to the program. - /// - /// To pass a single argument see [`arg`]. - /// - /// [`arg`]: method@Self::arg - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// let command = Command::new("ls") - /// .args(&["-l", "-a"]); - /// ``` - pub fn args(&mut self, args: I) -> &mut Command - where - I: IntoIterator, - S: AsRef, - { - self.std.args(args); - self - } - - /// Inserts or updates an environment variable mapping. - /// - /// Note that environment variable names are case-insensitive (but case-preserving) on Windows, - /// and case-sensitive on all other platforms. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// let command = Command::new("ls") - /// .env("PATH", "/bin"); - /// ``` - pub fn env(&mut self, key: K, val: V) -> &mut Command - where - K: AsRef, - V: AsRef, - { - self.std.env(key, val); - self - } - - /// Adds or updates multiple environment variable mappings. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// use std::process::{Stdio}; - /// use std::env; - /// use std::collections::HashMap; - /// - /// let filtered_env : HashMap = - /// env::vars().filter(|&(ref k, _)| - /// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH" - /// ).collect(); - /// - /// let command = Command::new("printenv") - /// .stdin(Stdio::null()) - /// .stdout(Stdio::inherit()) - /// .env_clear() - /// .envs(&filtered_env); - /// ``` - pub fn envs(&mut self, vars: I) -> &mut Command - where - I: IntoIterator, - K: AsRef, - V: AsRef, - { - self.std.envs(vars); - self - } - - /// Removes an environment variable mapping. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// let command = Command::new("ls") - /// .env_remove("PATH"); - /// ``` - pub fn env_remove>(&mut self, key: K) -> &mut Command { - self.std.env_remove(key); - self - } - - /// Clears the entire environment map for the child process. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// let command = Command::new("ls") - /// .env_clear(); - /// ``` - pub fn env_clear(&mut self) -> &mut Command { - self.std.env_clear(); - self - } - - /// Sets the working directory for the child process. - /// - /// # Platform-specific behavior - /// - /// If the program path is relative (e.g., `"./script.sh"`), it's ambiguous - /// whether it should be interpreted relative to the parent's working - /// directory or relative to `current_dir`. The behavior in this case is - /// platform specific and unstable, and it's recommended to use - /// [`canonicalize`] to get an absolute program path instead. - /// - /// [`canonicalize`]: crate::fs::canonicalize() - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// let command = Command::new("ls") - /// .current_dir("/bin"); - /// ``` - pub fn current_dir>(&mut self, dir: P) -> &mut Command { - self.std.current_dir(dir); - self - } - - /// Sets configuration for the child process's standard input (stdin) handle. - /// - /// Defaults to [`inherit`] when used with `spawn` or `status`, and - /// defaults to [`piped`] when used with `output`. - /// - /// [`inherit`]: std::process::Stdio::inherit - /// [`piped`]: std::process::Stdio::piped - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use std::process::{Stdio}; - /// use tokio::process::Command; - /// - /// let command = Command::new("ls") - /// .stdin(Stdio::null()); - /// ``` - pub fn stdin>(&mut self, cfg: T) -> &mut Command { - self.std.stdin(cfg); - self - } - - /// Sets configuration for the child process's standard output (stdout) handle. - /// - /// Defaults to [`inherit`] when used with `spawn` or `status`, and - /// defaults to [`piped`] when used with `output`. - /// - /// [`inherit`]: std::process::Stdio::inherit - /// [`piped`]: std::process::Stdio::piped - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command;; - /// use std::process::Stdio; - /// - /// let command = Command::new("ls") - /// .stdout(Stdio::null()); - /// ``` - pub fn stdout>(&mut self, cfg: T) -> &mut Command { - self.std.stdout(cfg); - self - } - - /// Sets configuration for the child process's standard error (stderr) handle. - /// - /// Defaults to [`inherit`] when used with `spawn` or `status`, and - /// defaults to [`piped`] when used with `output`. - /// - /// [`inherit`]: std::process::Stdio::inherit - /// [`piped`]: std::process::Stdio::piped - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command;; - /// use std::process::{Stdio}; - /// - /// let command = Command::new("ls") - /// .stderr(Stdio::null()); - /// ``` - pub fn stderr>(&mut self, cfg: T) -> &mut Command { - self.std.stderr(cfg); - self - } - - /// Controls whether a `kill` operation should be invoked on a spawned child - /// process when its corresponding `Child` handle is dropped. - /// - /// By default, this value is assumed to be `false`, meaning the next spawned - /// process will not be killed on drop, similar to the behavior of the standard - /// library. - pub fn kill_on_drop(&mut self, kill_on_drop: bool) -> &mut Command { - self.kill_on_drop = kill_on_drop; - self - } - - /// Sets the [process creation flags][1] to be passed to `CreateProcess`. - /// - /// These will always be ORed with `CREATE_UNICODE_ENVIRONMENT`. - /// - /// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx - #[cfg(windows)] - pub fn creation_flags(&mut self, flags: u32) -> &mut Command { - self.std.creation_flags(flags); - self - } - - /// Sets the child process's user ID. This translates to a - /// `setuid` call in the child process. Failure in the `setuid` - /// call will cause the spawn to fail. - #[cfg(unix)] - pub fn uid(&mut self, id: u32) -> &mut Command { - self.std.uid(id); - self - } - - /// Similar to `uid` but sets the group ID of the child process. This has - /// the same semantics as the `uid` field. - #[cfg(unix)] - pub fn gid(&mut self, id: u32) -> &mut Command { - self.std.gid(id); - self - } - - /// Schedules a closure to be run just before the `exec` function is - /// invoked. - /// - /// The closure is allowed to return an I/O error whose OS error code will - /// be communicated back to the parent and returned as an error from when - /// the spawn was requested. - /// - /// Multiple closures can be registered and they will be called in order of - /// their registration. If a closure returns `Err` then no further closures - /// will be called and the spawn operation will immediately return with a - /// failure. - /// - /// # Safety - /// - /// This closure will be run in the context of the child process after a - /// `fork`. This primarily means that any modifications made to memory on - /// behalf of this closure will **not** be visible to the parent process. - /// This is often a very constrained environment where normal operations - /// like `malloc` or acquiring a mutex are not guaranteed to work (due to - /// other threads perhaps still running when the `fork` was run). - /// - /// This also means that all resources such as file descriptors and - /// memory-mapped regions got duplicated. It is your responsibility to make - /// sure that the closure does not violate library invariants by making - /// invalid use of these duplicates. - /// - /// When this closure is run, aspects such as the stdio file descriptors and - /// working directory have successfully been changed, so output to these - /// locations may not appear where intended. - #[cfg(unix)] - pub unsafe fn pre_exec(&mut self, f: F) -> &mut Command - where - F: FnMut() -> io::Result<()> + Send + Sync + 'static, - { - self.std.pre_exec(f); - self - } - - /// Executes the command as a child process, returning a handle to it. - /// - /// By default, stdin, stdout and stderr are inherited from the parent. - /// - /// This method will spawn the child process synchronously and return a - /// handle to a future-aware child process. The `Child` returned implements - /// `Future` itself to acquire the `ExitStatus` of the child, and otherwise - /// the `Child` has methods to acquire handles to the stdin, stdout, and - /// stderr streams. - /// - /// All I/O this child does will be associated with the current default - /// event loop. - /// - /// # Caveats - /// - /// Similar to the behavior to the standard library, and unlike the futures - /// paradigm of dropping-implies-cancellation, the spawned process will, by - /// default, continue to execute even after the `Child` handle has been dropped. - /// - /// The `Command::kill_on_drop` method can be used to modify this behavior - /// and kill the child process if the `Child` wrapper is dropped before it - /// has exited. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// async fn run_ls() -> std::process::ExitStatus { - /// Command::new("ls") - /// .spawn() - /// .expect("ls command failed to start") - /// .await - /// .expect("ls command failed to run") - /// } - /// ``` - pub fn spawn(&mut self) -> io::Result { - imp::spawn_child(&mut self.std).map(|spawned_child| Child { - child: ChildDropGuard { - inner: spawned_child.child, - kill_on_drop: self.kill_on_drop, - }, - stdin: spawned_child.stdin.map(|inner| ChildStdin { inner }), - stdout: spawned_child.stdout.map(|inner| ChildStdout { inner }), - stderr: spawned_child.stderr.map(|inner| ChildStderr { inner }), - }) - } - - /// Executes the command as a child process, waiting for it to finish and - /// collecting its exit status. - /// - /// By default, stdin, stdout and stderr are inherited from the parent. - /// If any input/output handles are set to a pipe then they will be immediately - /// closed after the child is spawned. - /// - /// All I/O this child does will be associated with the current default - /// event loop. - /// - /// If this future is dropped before the future resolves, then - /// the child will be killed, if it was spawned. - /// - /// # Errors - /// - /// This future will return an error if the child process cannot be spawned - /// or if there is an error while awaiting its status. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// async fn run_ls() -> std::process::ExitStatus { - /// Command::new("ls") - /// .status() - /// .await - /// .expect("ls command failed to run") - /// } - pub fn status(&mut self) -> impl Future> { - let child = self.spawn(); - - async { - let mut child = child?; - - // Ensure we close any stdio handles so we can't deadlock - // waiting on the child which may be waiting to read/write - // to a pipe we're holding. - child.stdin.take(); - child.stdout.take(); - child.stderr.take(); - - child.await - } - } - - /// Executes the command as a child process, waiting for it to finish and - /// collecting all of its output. - /// - /// > **Note**: this method, unlike the standard library, will - /// > unconditionally configure the stdout/stderr handles to be pipes, even - /// > if they have been previously configured. If this is not desired then - /// > the `spawn` method should be used in combination with the - /// > `wait_with_output` method on child. - /// - /// This method will return a future representing the collection of the - /// child process's stdout/stderr. It will resolve to - /// the `Output` type in the standard library, containing `stdout` and - /// `stderr` as `Vec` along with an `ExitStatus` representing how the - /// process exited. - /// - /// All I/O this child does will be associated with the current default - /// event loop. - /// - /// If this future is dropped before the future resolves, then - /// the child will be killed, if it was spawned. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// async fn run_ls() { - /// let output: std::process::Output = Command::new("ls") - /// .output() - /// .await - /// .expect("ls command failed to run"); - /// println!("stderr of ls: {:?}", output.stderr); - /// } - pub fn output(&mut self) -> impl Future> { - self.std.stdout(Stdio::piped()); - self.std.stderr(Stdio::piped()); - - let child = self.spawn(); - - async { child?.wait_with_output().await } - } -} - -impl From for Command { - fn from(std: StdCommand) -> Command { - Command { - std, - kill_on_drop: false, - } - } -} - -/// A drop guard which can ensure the child process is killed on drop if specified. -#[derive(Debug)] -struct ChildDropGuard { - inner: T, - kill_on_drop: bool, -} - -impl Kill for ChildDropGuard { - fn kill(&mut self) -> io::Result<()> { - let ret = self.inner.kill(); - - if ret.is_ok() { - self.kill_on_drop = false; - } - - ret - } -} - -impl Drop for ChildDropGuard { - fn drop(&mut self) { - if self.kill_on_drop { - drop(self.kill()); - } - } -} - -impl Future for ChildDropGuard -where - F: Future> + Kill + Unpin, -{ - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - let ret = Pin::new(&mut self.inner).poll(cx); - - if let Poll::Ready(Ok(_)) = ret { - // Avoid the overhead of trying to kill a reaped process - self.kill_on_drop = false; - } - - if ret.is_ready() { - coop.made_progress(); - } - - ret - } -} - -/// Representation of a child process spawned onto an event loop. -/// -/// This type is also a future which will yield the `ExitStatus` of the -/// underlying child process. A `Child` here also provides access to information -/// like the OS-assigned identifier and the stdio streams. -/// -/// # Caveats -/// Similar to the behavior to the standard library, and unlike the futures -/// paradigm of dropping-implies-cancellation, a spawned process will, by -/// default, continue to execute even after the `Child` handle has been dropped. -/// -/// The `Command::kill_on_drop` method can be used to modify this behavior -/// and kill the child process if the `Child` wrapper is dropped before it -/// has exited. -#[must_use = "futures do nothing unless polled"] -#[derive(Debug)] -pub struct Child { - child: ChildDropGuard, - - /// The handle for writing to the child's standard input (stdin), if it has - /// been captured. - pub stdin: Option, - - /// The handle for reading from the child's standard output (stdout), if it - /// has been captured. - pub stdout: Option, - - /// The handle for reading from the child's standard error (stderr), if it - /// has been captured. - pub stderr: Option, -} - -impl Child { - /// Returns the OS-assigned process identifier associated with this child. - pub fn id(&self) -> u32 { - self.child.inner.id() - } - - /// Forces the child to exit. - /// - /// This is equivalent to sending a SIGKILL on unix platforms. - /// - /// If the child has to be killed remotely, it is possible to do it using - /// a combination of the select! macro and a oneshot channel. In the following - /// example, the child will run until completion unless a message is sent on - /// the oneshot channel. If that happens, the child is killed immediately - /// using the `.kill()` method. - /// - /// ```no_run - /// use tokio::process::Command; - /// use tokio::sync::oneshot::channel; - /// - /// #[tokio::main] - /// async fn main() { - /// let (send, recv) = channel::<()>(); - /// let mut child = Command::new("sleep").arg("1").spawn().unwrap(); - /// tokio::spawn(async move { send.send(()) }); - /// tokio::select! { - /// _ = &mut child => {} - /// _ = recv => { - /// &mut child.kill(); - /// // NB: await the child here to avoid a zombie process on Unix platforms - /// child.await.unwrap(); - /// } - /// } - /// } - - pub fn kill(&mut self) -> io::Result<()> { - self.child.kill() - } - - #[doc(hidden)] - #[deprecated(note = "please use `child.stdin` instead")] - pub fn stdin(&mut self) -> &mut Option { - &mut self.stdin - } - - #[doc(hidden)] - #[deprecated(note = "please use `child.stdout` instead")] - pub fn stdout(&mut self) -> &mut Option { - &mut self.stdout - } - - #[doc(hidden)] - #[deprecated(note = "please use `child.stderr` instead")] - pub fn stderr(&mut self) -> &mut Option { - &mut self.stderr - } - - /// Returns a future that will resolve to an `Output`, containing the exit - /// status, stdout, and stderr of the child process. - /// - /// The returned future will simultaneously waits for the child to exit and - /// collect all remaining output on the stdout/stderr handles, returning an - /// `Output` instance. - /// - /// The stdin handle to the child process, if any, will be closed before - /// waiting. This helps avoid deadlock: it ensures that the child does not - /// block waiting for input from the parent, while the parent waits for the - /// child to exit. - /// - /// By default, stdin, stdout and stderr are inherited from the parent. In - /// order to capture the output into this `Output` it is necessary to create - /// new pipes between parent and child. Use `stdout(Stdio::piped())` or - /// `stderr(Stdio::piped())`, respectively, when creating a `Command`. - pub async fn wait_with_output(mut self) -> io::Result { - use crate::future::try_join3; - - async fn read_to_end(io: Option) -> io::Result> { - let mut vec = Vec::new(); - if let Some(mut io) = io { - crate::io::util::read_to_end(&mut io, &mut vec).await?; - } - Ok(vec) - } - - drop(self.stdin.take()); - let stdout_fut = read_to_end(self.stdout.take()); - let stderr_fut = read_to_end(self.stderr.take()); - - let (status, stdout, stderr) = try_join3(self, stdout_fut, stderr_fut).await?; - - Ok(Output { - status, - stdout, - stderr, - }) - } -} - -impl Future for Child { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.child).poll(cx) - } -} - -/// The standard input stream for spawned children. -/// -/// This type implements the `AsyncWrite` trait to pass data to the stdin handle of -/// handle of a child process asynchronously. -#[derive(Debug)] -pub struct ChildStdin { - inner: imp::ChildStdin, -} - -/// The standard output stream for spawned children. -/// -/// This type implements the `AsyncRead` trait to read data from the stdout -/// handle of a child process asynchronously. -#[derive(Debug)] -pub struct ChildStdout { - inner: imp::ChildStdout, -} - -/// The standard error stream for spawned children. -/// -/// This type implements the `AsyncRead` trait to read data from the stderr -/// handle of a child process asynchronously. -#[derive(Debug)] -pub struct ChildStderr { - inner: imp::ChildStderr, -} - -impl AsyncWrite for ChildStdin { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.inner).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_shutdown(cx) - } -} - -impl AsyncRead for ChildStdout { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [std::mem::MaybeUninit]) -> bool { - // https://github.com/rust-lang/rust/blob/09c817eeb29e764cfc12d0a8d94841e3ffe34023/src/libstd/process.rs#L314 - false - } - - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} - -impl AsyncRead for ChildStderr { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [std::mem::MaybeUninit]) -> bool { - // https://github.com/rust-lang/rust/blob/09c817eeb29e764cfc12d0a8d94841e3ffe34023/src/libstd/process.rs#L375 - false - } - - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} - -#[cfg(unix)] -mod sys { - use std::os::unix::io::{AsRawFd, RawFd}; - - use super::{ChildStderr, ChildStdin, ChildStdout}; - - impl AsRawFd for ChildStdin { - fn as_raw_fd(&self) -> RawFd { - self.inner.get_ref().as_raw_fd() - } - } - - impl AsRawFd for ChildStdout { - fn as_raw_fd(&self) -> RawFd { - self.inner.get_ref().as_raw_fd() - } - } - - impl AsRawFd for ChildStderr { - fn as_raw_fd(&self) -> RawFd { - self.inner.get_ref().as_raw_fd() - } - } -} - -#[cfg(windows)] -mod sys { - use std::os::windows::io::{AsRawHandle, RawHandle}; - - use super::{ChildStderr, ChildStdin, ChildStdout}; - - impl AsRawHandle for ChildStdin { - fn as_raw_handle(&self) -> RawHandle { - self.inner.get_ref().as_raw_handle() - } - } - - impl AsRawHandle for ChildStdout { - fn as_raw_handle(&self) -> RawHandle { - self.inner.get_ref().as_raw_handle() - } - } - - impl AsRawHandle for ChildStderr { - fn as_raw_handle(&self) -> RawHandle { - self.inner.get_ref().as_raw_handle() - } - } -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::kill::Kill; - use super::ChildDropGuard; - - use futures::future::FutureExt; - use std::future::Future; - use std::io; - use std::pin::Pin; - use std::task::{Context, Poll}; - - struct Mock { - num_kills: usize, - num_polls: usize, - poll_result: Poll>, - } - - impl Mock { - fn new() -> Self { - Self::with_result(Poll::Pending) - } - - fn with_result(result: Poll>) -> Self { - Self { - num_kills: 0, - num_polls: 0, - poll_result: result, - } - } - } - - impl Kill for Mock { - fn kill(&mut self) -> io::Result<()> { - self.num_kills += 1; - Ok(()) - } - } - - impl Future for Mock { - type Output = Result<(), ()>; - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - let inner = Pin::get_mut(self); - inner.num_polls += 1; - inner.poll_result - } - } - - #[test] - fn kills_on_drop_if_specified() { - let mut mock = Mock::new(); - - { - let guard = ChildDropGuard { - inner: &mut mock, - kill_on_drop: true, - }; - drop(guard); - } - - assert_eq!(1, mock.num_kills); - assert_eq!(0, mock.num_polls); - } - - #[test] - fn no_kill_on_drop_by_default() { - let mut mock = Mock::new(); - - { - let guard = ChildDropGuard { - inner: &mut mock, - kill_on_drop: false, - }; - drop(guard); - } - - assert_eq!(0, mock.num_kills); - assert_eq!(0, mock.num_polls); - } - - #[test] - fn no_kill_if_already_killed() { - let mut mock = Mock::new(); - - { - let mut guard = ChildDropGuard { - inner: &mut mock, - kill_on_drop: true, - }; - let _ = guard.kill(); - drop(guard); - } - - assert_eq!(1, mock.num_kills); - assert_eq!(0, mock.num_polls); - } - - #[test] - fn no_kill_if_reaped() { - let mut mock_pending = Mock::with_result(Poll::Pending); - let mut mock_reaped = Mock::with_result(Poll::Ready(Ok(()))); - let mut mock_err = Mock::with_result(Poll::Ready(Err(()))); - - let waker = futures::task::noop_waker(); - let mut context = Context::from_waker(&waker); - { - let mut guard = ChildDropGuard { - inner: &mut mock_pending, - kill_on_drop: true, - }; - let _ = guard.poll_unpin(&mut context); - - let mut guard = ChildDropGuard { - inner: &mut mock_reaped, - kill_on_drop: true, - }; - let _ = guard.poll_unpin(&mut context); - - let mut guard = ChildDropGuard { - inner: &mut mock_err, - kill_on_drop: true, - }; - let _ = guard.poll_unpin(&mut context); - } - - assert_eq!(1, mock_pending.num_kills); - assert_eq!(1, mock_pending.num_polls); - - assert_eq!(0, mock_reaped.num_kills); - assert_eq!(1, mock_reaped.num_polls); - - assert_eq!(1, mock_err.num_kills); - assert_eq!(1, mock_err.num_polls); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/process/unix/mod.rs b/third_party/rust/tokio-0.2.25/src/process/unix/mod.rs deleted file mode 100644 index c25d98974ae7..000000000000 --- a/third_party/rust/tokio-0.2.25/src/process/unix/mod.rs +++ /dev/null @@ -1,227 +0,0 @@ -//! Unix handling of child processes -//! -//! Right now the only "fancy" thing about this is how we implement the -//! `Future` implementation on `Child` to get the exit status. Unix offers -//! no way to register a child with epoll, and the only real way to get a -//! notification when a process exits is the SIGCHLD signal. -//! -//! Signal handling in general is *super* hairy and complicated, and it's even -//! more complicated here with the fact that signals are coalesced, so we may -//! not get a SIGCHLD-per-child. -//! -//! Our best approximation here is to check *all spawned processes* for all -//! SIGCHLD signals received. To do that we create a `Signal`, implemented in -//! the `tokio-net` crate, which is a stream over signals being received. -//! -//! Later when we poll the process's exit status we simply check to see if a -//! SIGCHLD has happened since we last checked, and while that returns "yes" we -//! keep trying. -//! -//! Note that this means that this isn't really scalable, but then again -//! processes in general aren't scalable (e.g. millions) so it shouldn't be that -//! bad in theory... - -mod orphan; -use orphan::{OrphanQueue, OrphanQueueImpl, Wait}; - -mod reap; -use reap::Reaper; - -use crate::io::PollEvented; -use crate::process::kill::Kill; -use crate::process::SpawnedChild; -use crate::signal::unix::{signal, Signal, SignalKind}; - -use mio::event::Evented; -use mio::unix::{EventedFd, UnixReady}; -use mio::{Poll as MioPoll, PollOpt, Ready, Token}; -use std::fmt; -use std::future::Future; -use std::io; -use std::os::unix::io::{AsRawFd, RawFd}; -use std::pin::Pin; -use std::process::ExitStatus; -use std::task::Context; -use std::task::Poll; - -impl Wait for std::process::Child { - fn id(&self) -> u32 { - self.id() - } - - fn try_wait(&mut self) -> io::Result> { - self.try_wait() - } -} - -impl Kill for std::process::Child { - fn kill(&mut self) -> io::Result<()> { - self.kill() - } -} - -lazy_static::lazy_static! { - static ref ORPHAN_QUEUE: OrphanQueueImpl = OrphanQueueImpl::new(); -} - -struct GlobalOrphanQueue; - -impl fmt::Debug for GlobalOrphanQueue { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - ORPHAN_QUEUE.fmt(fmt) - } -} - -impl OrphanQueue for GlobalOrphanQueue { - fn push_orphan(&self, orphan: std::process::Child) { - ORPHAN_QUEUE.push_orphan(orphan) - } - - fn reap_orphans(&self) { - ORPHAN_QUEUE.reap_orphans() - } -} - -#[must_use = "futures do nothing unless polled"] -pub(crate) struct Child { - inner: Reaper, -} - -impl fmt::Debug for Child { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Child") - .field("pid", &self.inner.id()) - .finish() - } -} - -pub(crate) fn spawn_child(cmd: &mut std::process::Command) -> io::Result { - let mut child = cmd.spawn()?; - let stdin = stdio(child.stdin.take())?; - let stdout = stdio(child.stdout.take())?; - let stderr = stdio(child.stderr.take())?; - - let signal = signal(SignalKind::child())?; - - Ok(SpawnedChild { - child: Child { - inner: Reaper::new(child, GlobalOrphanQueue, signal), - }, - stdin, - stdout, - stderr, - }) -} - -impl Child { - pub(crate) fn id(&self) -> u32 { - self.inner.id() - } -} - -impl Kill for Child { - fn kill(&mut self) -> io::Result<()> { - self.inner.kill() - } -} - -impl Future for Child { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx) - } -} - -#[derive(Debug)] -pub(crate) struct Fd { - inner: T, -} - -impl io::Read for Fd -where - T: io::Read, -{ - fn read(&mut self, bytes: &mut [u8]) -> io::Result { - self.inner.read(bytes) - } -} - -impl io::Write for Fd -where - T: io::Write, -{ - fn write(&mut self, bytes: &[u8]) -> io::Result { - self.inner.write(bytes) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -impl AsRawFd for Fd -where - T: AsRawFd, -{ - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -impl Evented for Fd -where - T: AsRawFd, -{ - fn register( - &self, - poll: &MioPoll, - token: Token, - interest: Ready, - opts: PollOpt, - ) -> io::Result<()> { - EventedFd(&self.as_raw_fd()).register(poll, token, interest | UnixReady::hup(), opts) - } - - fn reregister( - &self, - poll: &MioPoll, - token: Token, - interest: Ready, - opts: PollOpt, - ) -> io::Result<()> { - EventedFd(&self.as_raw_fd()).reregister(poll, token, interest | UnixReady::hup(), opts) - } - - fn deregister(&self, poll: &MioPoll) -> io::Result<()> { - EventedFd(&self.as_raw_fd()).deregister(poll) - } -} - -pub(crate) type ChildStdin = PollEvented>; -pub(crate) type ChildStdout = PollEvented>; -pub(crate) type ChildStderr = PollEvented>; - -fn stdio(option: Option) -> io::Result>>> -where - T: AsRawFd, -{ - let io = match option { - Some(io) => io, - None => return Ok(None), - }; - - // Set the fd to nonblocking before we pass it to the event loop - unsafe { - let fd = io.as_raw_fd(); - let r = libc::fcntl(fd, libc::F_GETFL); - if r == -1 { - return Err(io::Error::last_os_error()); - } - let r = libc::fcntl(fd, libc::F_SETFL, r | libc::O_NONBLOCK); - if r == -1 { - return Err(io::Error::last_os_error()); - } - } - Ok(Some(PollEvented::new(Fd { inner: io })?)) -} diff --git a/third_party/rust/tokio-0.2.25/src/process/unix/orphan.rs b/third_party/rust/tokio-0.2.25/src/process/unix/orphan.rs deleted file mode 100644 index 6c449a909368..000000000000 --- a/third_party/rust/tokio-0.2.25/src/process/unix/orphan.rs +++ /dev/null @@ -1,191 +0,0 @@ -use std::io; -use std::process::ExitStatus; -use std::sync::Mutex; - -/// An interface for waiting on a process to exit. -pub(crate) trait Wait { - /// Get the identifier for this process or diagnostics. - fn id(&self) -> u32; - /// Try waiting for a process to exit in a non-blocking manner. - fn try_wait(&mut self) -> io::Result>; -} - -impl Wait for &mut T { - fn id(&self) -> u32 { - (**self).id() - } - - fn try_wait(&mut self) -> io::Result> { - (**self).try_wait() - } -} - -/// An interface for queueing up an orphaned process so that it can be reaped. -pub(crate) trait OrphanQueue { - /// Adds an orphan to the queue. - fn push_orphan(&self, orphan: T); - /// Attempts to reap every process in the queue, ignoring any errors and - /// enqueueing any orphans which have not yet exited. - fn reap_orphans(&self); -} - -impl> OrphanQueue for &O { - fn push_orphan(&self, orphan: T) { - (**self).push_orphan(orphan); - } - - fn reap_orphans(&self) { - (**self).reap_orphans() - } -} - -/// An implementation of `OrphanQueue`. -#[derive(Debug)] -pub(crate) struct OrphanQueueImpl { - queue: Mutex>, -} - -impl OrphanQueueImpl { - pub(crate) fn new() -> Self { - Self { - queue: Mutex::new(Vec::new()), - } - } - - #[cfg(test)] - fn len(&self) -> usize { - self.queue.lock().unwrap().len() - } -} - -impl OrphanQueue for OrphanQueueImpl { - fn push_orphan(&self, orphan: T) { - self.queue.lock().unwrap().push(orphan) - } - - fn reap_orphans(&self) { - let mut queue = self.queue.lock().unwrap(); - let queue = &mut *queue; - - let mut i = 0; - while i < queue.len() { - match queue[i].try_wait() { - Ok(Some(_)) => {} - Err(_) => { - // TODO: bubble up error some how. Is this an internal bug? - // Shoudl we panic? Is it OK for this to be silently - // dropped? - } - // Still not done yet - Ok(None) => { - i += 1; - continue; - } - } - - queue.remove(i); - } - } -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::Wait; - use super::{OrphanQueue, OrphanQueueImpl}; - use std::cell::Cell; - use std::io; - use std::os::unix::process::ExitStatusExt; - use std::process::ExitStatus; - use std::rc::Rc; - - struct MockWait { - total_waits: Rc>, - num_wait_until_status: usize, - return_err: bool, - } - - impl MockWait { - fn new(num_wait_until_status: usize) -> Self { - Self { - total_waits: Rc::new(Cell::new(0)), - num_wait_until_status, - return_err: false, - } - } - - fn with_err() -> Self { - Self { - total_waits: Rc::new(Cell::new(0)), - num_wait_until_status: 0, - return_err: true, - } - } - } - - impl Wait for MockWait { - fn id(&self) -> u32 { - 42 - } - - fn try_wait(&mut self) -> io::Result> { - let waits = self.total_waits.get(); - - let ret = if self.num_wait_until_status == waits { - if self.return_err { - Ok(Some(ExitStatus::from_raw(0))) - } else { - Err(io::Error::new(io::ErrorKind::Other, "mock err")) - } - } else { - Ok(None) - }; - - self.total_waits.set(waits + 1); - ret - } - } - - #[test] - fn drain_attempts_a_single_reap_of_all_queued_orphans() { - let first_orphan = MockWait::new(0); - let second_orphan = MockWait::new(1); - let third_orphan = MockWait::new(2); - let fourth_orphan = MockWait::with_err(); - - let first_waits = first_orphan.total_waits.clone(); - let second_waits = second_orphan.total_waits.clone(); - let third_waits = third_orphan.total_waits.clone(); - let fourth_waits = fourth_orphan.total_waits.clone(); - - let orphanage = OrphanQueueImpl::new(); - orphanage.push_orphan(first_orphan); - orphanage.push_orphan(third_orphan); - orphanage.push_orphan(second_orphan); - orphanage.push_orphan(fourth_orphan); - - assert_eq!(orphanage.len(), 4); - - orphanage.reap_orphans(); - assert_eq!(orphanage.len(), 2); - assert_eq!(first_waits.get(), 1); - assert_eq!(second_waits.get(), 1); - assert_eq!(third_waits.get(), 1); - assert_eq!(fourth_waits.get(), 1); - - orphanage.reap_orphans(); - assert_eq!(orphanage.len(), 1); - assert_eq!(first_waits.get(), 1); - assert_eq!(second_waits.get(), 2); - assert_eq!(third_waits.get(), 2); - assert_eq!(fourth_waits.get(), 1); - - orphanage.reap_orphans(); - assert_eq!(orphanage.len(), 0); - assert_eq!(first_waits.get(), 1); - assert_eq!(second_waits.get(), 2); - assert_eq!(third_waits.get(), 3); - assert_eq!(fourth_waits.get(), 1); - - orphanage.reap_orphans(); // Safe to reap when empty - } -} diff --git a/third_party/rust/tokio-0.2.25/src/process/unix/reap.rs b/third_party/rust/tokio-0.2.25/src/process/unix/reap.rs deleted file mode 100644 index 8963805afe32..000000000000 --- a/third_party/rust/tokio-0.2.25/src/process/unix/reap.rs +++ /dev/null @@ -1,342 +0,0 @@ -use crate::process::imp::orphan::{OrphanQueue, Wait}; -use crate::process::kill::Kill; -use crate::signal::unix::Signal; - -use std::future::Future; -use std::io; -use std::ops::Deref; -use std::pin::Pin; -use std::process::ExitStatus; -use std::task::Context; -use std::task::Poll; - -/// Orchestrates between registering interest for receiving signals when a -/// child process has exited, and attempting to poll for process completion. -#[derive(Debug)] -pub(crate) struct Reaper -where - W: Wait + Unpin, - Q: OrphanQueue, -{ - inner: Option, - orphan_queue: Q, - signal: S, -} - -// Work around removal of `futures_core` dependency -pub(crate) trait Stream: Unpin { - fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll>; -} - -impl Stream for Signal { - fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - Signal::poll_recv(self, cx) - } -} - -impl Deref for Reaper -where - W: Wait + Unpin, - Q: OrphanQueue, -{ - type Target = W; - - fn deref(&self) -> &Self::Target { - self.inner() - } -} - -impl Reaper -where - W: Wait + Unpin, - Q: OrphanQueue, -{ - pub(crate) fn new(inner: W, orphan_queue: Q, signal: S) -> Self { - Self { - inner: Some(inner), - orphan_queue, - signal, - } - } - - fn inner(&self) -> &W { - self.inner.as_ref().expect("inner has gone away") - } - - fn inner_mut(&mut self) -> &mut W { - self.inner.as_mut().expect("inner has gone away") - } -} - -impl Future for Reaper -where - W: Wait + Unpin, - Q: OrphanQueue + Unpin, - S: Stream, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - loop { - // If the child hasn't exited yet, then it's our responsibility to - // ensure the current task gets notified when it might be able to - // make progress. - // - // As described in `spawn` above, we just indicate that we can - // next make progress once a SIGCHLD is received. - // - // However, we will register for a notification on the next signal - // BEFORE we poll the child. Otherwise it is possible that the child - // can exit and the signal can arrive after we last polled the child, - // but before we've registered for a notification on the next signal - // (this can cause a deadlock if there are no more spawned children - // which can generate a different signal for us). A side effect of - // pre-registering for signal notifications is that when the child - // exits, we will have already registered for an additional - // notification we don't need to consume. If another signal arrives, - // this future's task will be notified/woken up again. Since the - // futures model allows for spurious wake ups this extra wakeup - // should not cause significant issues with parent futures. - let registered_interest = self.signal.poll_recv(cx).is_pending(); - - self.orphan_queue.reap_orphans(); - if let Some(status) = self.inner_mut().try_wait()? { - return Poll::Ready(Ok(status)); - } - - // If our attempt to poll for the next signal was not ready, then - // we've arranged for our task to get notified and we can bail out. - if registered_interest { - return Poll::Pending; - } else { - // Otherwise, if the signal stream delivered a signal to us, we - // won't get notified at the next signal, so we'll loop and try - // again. - continue; - } - } - } -} - -impl Kill for Reaper -where - W: Kill + Wait + Unpin, - Q: OrphanQueue, -{ - fn kill(&mut self) -> io::Result<()> { - self.inner_mut().kill() - } -} - -impl Drop for Reaper -where - W: Wait + Unpin, - Q: OrphanQueue, -{ - fn drop(&mut self) { - if let Ok(Some(_)) = self.inner_mut().try_wait() { - return; - } - - let orphan = self.inner.take().unwrap(); - self.orphan_queue.push_orphan(orphan); - } -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::*; - - use futures::future::FutureExt; - use std::cell::{Cell, RefCell}; - use std::os::unix::process::ExitStatusExt; - use std::process::ExitStatus; - use std::task::Context; - use std::task::Poll; - - #[derive(Debug)] - struct MockWait { - total_kills: usize, - total_waits: usize, - num_wait_until_status: usize, - status: ExitStatus, - } - - impl MockWait { - fn new(status: ExitStatus, num_wait_until_status: usize) -> Self { - Self { - total_kills: 0, - total_waits: 0, - num_wait_until_status, - status, - } - } - } - - impl Wait for MockWait { - fn id(&self) -> u32 { - 0 - } - - fn try_wait(&mut self) -> io::Result> { - let ret = if self.num_wait_until_status == self.total_waits { - Some(self.status) - } else { - None - }; - - self.total_waits += 1; - Ok(ret) - } - } - - impl Kill for MockWait { - fn kill(&mut self) -> io::Result<()> { - self.total_kills += 1; - Ok(()) - } - } - - struct MockStream { - total_polls: usize, - values: Vec>, - } - - impl MockStream { - fn new(values: Vec>) -> Self { - Self { - total_polls: 0, - values, - } - } - } - - impl Stream for MockStream { - fn poll_recv(&mut self, _cx: &mut Context<'_>) -> Poll> { - self.total_polls += 1; - match self.values.remove(0) { - Some(()) => Poll::Ready(Some(())), - None => Poll::Pending, - } - } - } - - struct MockQueue { - all_enqueued: RefCell>, - total_reaps: Cell, - } - - impl MockQueue { - fn new() -> Self { - Self { - all_enqueued: RefCell::new(Vec::new()), - total_reaps: Cell::new(0), - } - } - } - - impl OrphanQueue for MockQueue { - fn push_orphan(&self, orphan: W) { - self.all_enqueued.borrow_mut().push(orphan); - } - - fn reap_orphans(&self) { - self.total_reaps.set(self.total_reaps.get() + 1); - } - } - - #[test] - fn reaper() { - let exit = ExitStatus::from_raw(0); - let mock = MockWait::new(exit, 3); - let mut grim = Reaper::new( - mock, - MockQueue::new(), - MockStream::new(vec![None, Some(()), None, None, None]), - ); - - let waker = futures::task::noop_waker(); - let mut context = Context::from_waker(&waker); - - // Not yet exited, interest registered - assert!(grim.poll_unpin(&mut context).is_pending()); - assert_eq!(1, grim.signal.total_polls); - assert_eq!(1, grim.total_waits); - assert_eq!(1, grim.orphan_queue.total_reaps.get()); - assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); - - // Not yet exited, couldn't register interest the first time - // but managed to register interest the second time around - assert!(grim.poll_unpin(&mut context).is_pending()); - assert_eq!(3, grim.signal.total_polls); - assert_eq!(3, grim.total_waits); - assert_eq!(3, grim.orphan_queue.total_reaps.get()); - assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); - - // Exited - if let Poll::Ready(r) = grim.poll_unpin(&mut context) { - assert!(r.is_ok()); - let exit_code = r.unwrap(); - assert_eq!(exit_code, exit); - } else { - unreachable!(); - } - assert_eq!(4, grim.signal.total_polls); - assert_eq!(4, grim.total_waits); - assert_eq!(4, grim.orphan_queue.total_reaps.get()); - assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); - } - - #[test] - fn kill() { - let exit = ExitStatus::from_raw(0); - let mut grim = Reaper::new( - MockWait::new(exit, 0), - MockQueue::new(), - MockStream::new(vec![None]), - ); - - grim.kill().unwrap(); - assert_eq!(1, grim.total_kills); - assert_eq!(0, grim.orphan_queue.total_reaps.get()); - assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); - } - - #[test] - fn drop_reaps_if_possible() { - let exit = ExitStatus::from_raw(0); - let mut mock = MockWait::new(exit, 0); - - { - let queue = MockQueue::new(); - - let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![])); - - drop(grim); - - assert_eq!(0, queue.total_reaps.get()); - assert!(queue.all_enqueued.borrow().is_empty()); - } - - assert_eq!(1, mock.total_waits); - assert_eq!(0, mock.total_kills); - } - - #[test] - fn drop_enqueues_orphan_if_wait_fails() { - let exit = ExitStatus::from_raw(0); - let mut mock = MockWait::new(exit, 2); - - { - let queue = MockQueue::<&mut MockWait>::new(); - let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![])); - drop(grim); - - assert_eq!(0, queue.total_reaps.get()); - assert_eq!(1, queue.all_enqueued.borrow().len()); - } - - assert_eq!(1, mock.total_waits); - assert_eq!(0, mock.total_kills); - } -} diff --git a/third_party/rust/tokio-0.2.25/src/process/windows.rs b/third_party/rust/tokio-0.2.25/src/process/windows.rs deleted file mode 100644 index cbe2fa7596f5..000000000000 --- a/third_party/rust/tokio-0.2.25/src/process/windows.rs +++ /dev/null @@ -1,191 +0,0 @@ -//! Windows asynchronous process handling. -//! -//! Like with Unix we don't actually have a way of registering a process with an -//! IOCP object. As a result we similarly need another mechanism for getting a -//! signal when a process has exited. For now this is implemented with the -//! `RegisterWaitForSingleObject` function in the kernel32.dll. -//! -//! This strategy is the same that libuv takes and essentially just queues up a -//! wait for the process in a kernel32-specific thread pool. Once the object is -//! notified (e.g. the process exits) then we have a callback that basically -//! just completes a `Oneshot`. -//! -//! The `poll_exit` implementation will attempt to wait for the process in a -//! nonblocking fashion, but failing that it'll fire off a -//! `RegisterWaitForSingleObject` and then wait on the other end of the oneshot -//! from then on out. - -use crate::io::PollEvented; -use crate::process::kill::Kill; -use crate::process::SpawnedChild; -use crate::sync::oneshot; - -use mio_named_pipes::NamedPipe; -use std::fmt; -use std::future::Future; -use std::io; -use std::os::windows::prelude::*; -use std::os::windows::process::ExitStatusExt; -use std::pin::Pin; -use std::process::{Child as StdChild, Command as StdCommand, ExitStatus}; -use std::ptr; -use std::task::Context; -use std::task::Poll; -use winapi::shared::minwindef::FALSE; -use winapi::shared::winerror::WAIT_TIMEOUT; -use winapi::um::handleapi::INVALID_HANDLE_VALUE; -use winapi::um::processthreadsapi::GetExitCodeProcess; -use winapi::um::synchapi::WaitForSingleObject; -use winapi::um::threadpoollegacyapiset::UnregisterWaitEx; -use winapi::um::winbase::{RegisterWaitForSingleObject, INFINITE, WAIT_OBJECT_0}; -use winapi::um::winnt::{BOOLEAN, HANDLE, PVOID, WT_EXECUTEINWAITTHREAD, WT_EXECUTEONLYONCE}; - -#[must_use = "futures do nothing unless polled"] -pub(crate) struct Child { - child: StdChild, - waiting: Option, -} - -impl fmt::Debug for Child { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Child") - .field("pid", &self.id()) - .field("child", &self.child) - .field("waiting", &"..") - .finish() - } -} - -struct Waiting { - rx: oneshot::Receiver<()>, - wait_object: HANDLE, - tx: *mut Option>, -} - -unsafe impl Sync for Waiting {} -unsafe impl Send for Waiting {} - -pub(crate) fn spawn_child(cmd: &mut StdCommand) -> io::Result { - let mut child = cmd.spawn()?; - let stdin = stdio(child.stdin.take()); - let stdout = stdio(child.stdout.take()); - let stderr = stdio(child.stderr.take()); - - Ok(SpawnedChild { - child: Child { - child, - waiting: None, - }, - stdin, - stdout, - stderr, - }) -} - -impl Child { - pub(crate) fn id(&self) -> u32 { - self.child.id() - } -} - -impl Kill for Child { - fn kill(&mut self) -> io::Result<()> { - self.child.kill() - } -} - -impl Future for Child { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let inner = Pin::get_mut(self); - loop { - if let Some(ref mut w) = inner.waiting { - match Pin::new(&mut w.rx).poll(cx) { - Poll::Ready(Ok(())) => {} - Poll::Ready(Err(_)) => panic!("should not be canceled"), - Poll::Pending => return Poll::Pending, - } - let status = try_wait(&inner.child)?.expect("not ready yet"); - return Poll::Ready(Ok(status)); - } - - if let Some(e) = try_wait(&inner.child)? { - return Poll::Ready(Ok(e)); - } - let (tx, rx) = oneshot::channel(); - let ptr = Box::into_raw(Box::new(Some(tx))); - let mut wait_object = ptr::null_mut(); - let rc = unsafe { - RegisterWaitForSingleObject( - &mut wait_object, - inner.child.as_raw_handle(), - Some(callback), - ptr as *mut _, - INFINITE, - WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE, - ) - }; - if rc == 0 { - let err = io::Error::last_os_error(); - drop(unsafe { Box::from_raw(ptr) }); - return Poll::Ready(Err(err)); - } - inner.waiting = Some(Waiting { - rx, - wait_object, - tx: ptr, - }); - } - } -} - -impl Drop for Waiting { - fn drop(&mut self) { - unsafe { - let rc = UnregisterWaitEx(self.wait_object, INVALID_HANDLE_VALUE); - if rc == 0 { - panic!("failed to unregister: {}", io::Error::last_os_error()); - } - drop(Box::from_raw(self.tx)); - } - } -} - -unsafe extern "system" fn callback(ptr: PVOID, _timer_fired: BOOLEAN) { - let complete = &mut *(ptr as *mut Option>); - let _ = complete.take().unwrap().send(()); -} - -pub(crate) fn try_wait(child: &StdChild) -> io::Result> { - unsafe { - match WaitForSingleObject(child.as_raw_handle(), 0) { - WAIT_OBJECT_0 => {} - WAIT_TIMEOUT => return Ok(None), - _ => return Err(io::Error::last_os_error()), - } - let mut status = 0; - let rc = GetExitCodeProcess(child.as_raw_handle(), &mut status); - if rc == FALSE { - Err(io::Error::last_os_error()) - } else { - Ok(Some(ExitStatus::from_raw(status))) - } - } -} - -pub(crate) type ChildStdin = PollEvented; -pub(crate) type ChildStdout = PollEvented; -pub(crate) type ChildStderr = PollEvented; - -fn stdio(option: Option) -> Option> -where - T: IntoRawHandle, -{ - let io = match option { - Some(io) => io, - None => return None, - }; - let pipe = unsafe { NamedPipe::from_raw_handle(io.into_raw_handle()) }; - PollEvented::new(pipe).ok() -} diff --git a/third_party/rust/tokio-0.2.25/src/runtime/basic_scheduler.rs b/third_party/rust/tokio-0.2.25/src/runtime/basic_scheduler.rs deleted file mode 100644 index 7e1c257cc861..000000000000 --- a/third_party/rust/tokio-0.2.25/src/runtime/basic_scheduler.rs +++ /dev/null @@ -1,326 +0,0 @@ -use crate::park::{Park, Unpark}; -use crate::runtime; -use crate::runtime::task::{self, JoinHandle, Schedule, Task}; -use crate::util::linked_list::LinkedList; -use crate::util::{waker_ref, Wake}; - -use std::cell::RefCell; -use std::collections::VecDeque; -use std::fmt; -use std::future::Future; -use std::sync::{Arc, Mutex}; -use std::task::Poll::Ready; -use std::time::Duration; - -/// Executes tasks on the current thread -pub(crate) struct BasicScheduler