Bug 1785002 - Update bytes to 1.2.1. r=emilio,supply-chain-reviewers

Differential Revision: https://phabricator.services.mozilla.com/D154716
This commit is contained in:
Mike Hommey 2022-08-16 20:30:28 +00:00
Родитель 0b1695d170
Коммит c09807cfa5
20 изменённых файлов: 849 добавлений и 181 удалений

28
Cargo.lock сгенерированный
Просмотреть файл

@ -325,7 +325,7 @@ dependencies = [
"audio_thread_priority",
"bincode",
"byteorder",
"bytes 1.1.0",
"bytes 1.2.1",
"cc",
"cubeb",
"error-chain",
@ -604,9 +604,9 @@ dependencies = [
[[package]]
name = "bytes"
version = "1.1.0"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db"
[[package]]
name = "cache-padded"
@ -2402,7 +2402,7 @@ version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57"
dependencies = [
"bytes 1.1.0",
"bytes 1.2.1",
"fnv",
"futures-core",
"futures-sink",
@ -2441,7 +2441,7 @@ checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d"
dependencies = [
"base64",
"bitflags",
"bytes 1.1.0",
"bytes 1.2.1",
"headers-core",
"http",
"httpdate",
@ -2485,7 +2485,7 @@ version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
dependencies = [
"bytes 1.1.0",
"bytes 1.2.1",
"fnv",
"itoa",
]
@ -2496,7 +2496,7 @@ version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
dependencies = [
"bytes 1.1.0",
"bytes 1.2.1",
"http",
"pin-project-lite",
]
@ -2552,7 +2552,7 @@ version = "0.14.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f"
dependencies = [
"bytes 1.1.0",
"bytes 1.2.1",
"futures-channel",
"futures-core",
"futures-util",
@ -4214,7 +4214,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020"
dependencies = [
"bytes 1.1.0",
"bytes 1.2.1",
"prost-derive",
]
@ -5268,7 +5268,7 @@ version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee"
dependencies = [
"bytes 1.1.0",
"bytes 1.2.1",
"libc",
"memchr",
"mio 0.8.0",
@ -5388,7 +5388,7 @@ version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c"
dependencies = [
"bytes 1.1.0",
"bytes 1.2.1",
"futures-core",
"futures-sink",
"pin-project-lite",
@ -5611,7 +5611,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc1de33ad46ce00bc9a31cea44e80ef69175d3a23007335216fe3996880a310d"
dependencies = [
"anyhow",
"bytes 1.1.0",
"bytes 1.2.1",
"camino",
"cargo_metadata",
"lazy_static",
@ -5830,7 +5830,7 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e"
dependencies = [
"bytes 1.1.0",
"bytes 1.2.1",
"futures-channel",
"futures-util",
"headers",
@ -5922,7 +5922,7 @@ name = "webdriver"
version = "0.46.0"
dependencies = [
"base64",
"bytes 1.1.0",
"bytes 1.2.1",
"cookie",
"http",
"log",

Просмотреть файл

@ -107,6 +107,11 @@ who = "Jeff Muizelaar <jmuizelaar@mozilla.com>"
criteria = "safe-to-deploy"
version = "0.1.2"
[[audits.bytes]]
who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
delta = "1.1.0 -> 1.2.1"
[[audits.clap_lex]]
who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"

2
third_party/rust/bytes/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"3ae0ceffbd69f54380bb44fdaf82dd674015471875a8da55686718afc3e58bdd","Cargo.toml":"5e1c5c02693e7afe119c1f82caec24ad41f51ce3b6899393ebeb1769ff50f1ab","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"b691d6e144eb133c181e869dc2f6a6eedbf76c4f832a9ecfbed5b9c560160c7f","benches/buf.rs":"f76240b8c8872185d831382216eb536b3e05b23913c815cd36edd5d903fbeaf7","benches/bytes.rs":"dc5289a9ce82be35e71ed5853ab33aa108a30460e481135f6058fe4d2f7dc15e","benches/bytes_mut.rs":"1326fe6224b26826228e02b4133151e756f38152c2d9cfe66adf83af76c3ec98","ci/miri.sh":"1f27dc786a0f1e930c1c8429b1d60d2e107ff6998ec8efd4674c78a5d0594dd7","ci/test-stable.sh":"57dd709bc25a20103ee85e24965566900817b2e603f067fb1251a5c03e4b1d93","ci/tsan.sh":"466b86b19225dd26c756cf2252cb1973f87a145642c99364b462ed7ceb55c7dd","src/buf/buf_impl.rs":"bdd9d5bc3318185ef1bea8d7c6a9dd3712ec297e0045fd84024f188c0ad96ac0","src/buf/buf_mut.rs":"d4387228d687414d0ad3eb2bd1c2f0fc84be8ec7d8746b95075f186b467293d4","src/buf/chain.rs":"d31989886d8ca01a9e3b42d6756391f5bdf8c102f83fa6dac51dd86312a91c14","src/buf/iter.rs":"49e9990a2303252ef7c66c2cc24459097dbbf4900c978453982ef513467bbf67","src/buf/limit.rs":"e005ba140b70f68654877c96b981a220477e415ff5c92438c1b0cb9bc866d872","src/buf/mod.rs":"19ff6fb7e19cba3884bc3f1a50ef20117dbc807f6d146ed355f42344a74fdf44","src/buf/reader.rs":"856c1e7129a1eceaa3c8f9ed4da8c3b5e1cc267eeffa99fa8f7c56c5ca7834d1","src/buf/take.rs":"a897e79bf579391227816973b2aa1f1d63614bd48bc029d9371f61607dcfa23f","src/buf/uninit_slice.rs":"0532041bf0128311eb6a2edbc4b720be30395882744dbc437874753fd8f249b4","src/buf/vec_deque.rs":"8d552c26ac6ce28a471f74c388e4749432e86b1d8f5a9759b9fc32a2549d395f","src/buf/writer.rs":"c92b5f8b9b42e2e784de474c987fe4ac50af4b5c51ac9548d19a54e8ac9ff521","src/bytes.rs":"f8d26a3de35977225abb4a416846f713f3ab2dc1215119bdac6b43ce4ef3fa0e","src/bytes_mut.rs":"6dab0856996c1bf07fd8786cf876a6c8c27df001ae78d23ba2d220d6d3ef9360","src/fmt/debug.rs":"19ebe7e5516e40ab712995f3ec2e0ba78ddfa905cce117e6d01e8eb330f3970a","src/fmt/hex.rs":"13755ec6f1b79923e1f1a05c51b179a38c03c40bb8ed2db0210e8901812e61e7","src/fmt/mod.rs":"176da4e359da99b8e5cf16e480cb7b978f574876827f1b9bb9c08da4d74ac0f5","src/lib.rs":"d8be90ade0cf78a30d73493086c109049d8ff442d69589a07f16480578eb4b17","src/loom.rs":"5dc97a5afce14875a66e44cbf0afa67e084c8b6b8c560bc14e7a70ef73aee96e","src/serde.rs":"3ecd7e828cd4c2b7db93c807cb1548fad209e674df493edf7cda69a7b04d405d","tests/test_buf.rs":"a04fb90644fcf0444092c49a4ca848bb0fd8b2ffeeebcb705eeea2de58560859","tests/test_buf_mut.rs":"5643866cd7b0967fb36053a1da73a23b26ffaa2746c05dca91e82df91aee7f81","tests/test_bytes.rs":"2349daa82fd079037ba4059273a8339fadf2a1d59ac2ce58e83269de6f133a0f","tests/test_bytes_odd_alloc.rs":"9a02cc9b1f09e2353554d9a33f6630250e6b5cf04faa00de3b9fecf247e65edb","tests/test_bytes_vec_alloc.rs":"2b686b6ab44f924e69d8270a4f256eb3626a3b4db8c1919b74bc422c10124899","tests/test_chain.rs":"69661c21b7257bf9c52792cb66d16f4dd5b62131381b8e6dbee1fb177433aec9","tests/test_debug.rs":"13299107172809e8cbbd823964ac9450cd0d6b6de79f2e6a2e0f44b9225a0593","tests/test_iter.rs":"c1f46823df26a90139645fd8728a03138edd95b2849dfec830452a80ddd9726d","tests/test_reader.rs":"bf83669d4e0960dad6aa47b46a9a454814fab626eb83572aba914c3d71618f43","tests/test_serde.rs":"2691f891796ba259de0ecf926de05c514f4912cc5fcd3e6a1591efbcd23ed4d0","tests/test_take.rs":"db01bf6855097f318336e90d12c0725a92cee426d330e477a6bd1d32dac34a27"},"package":"c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"}
{"files":{"CHANGELOG.md":"f6e9c0188bb3dea02b2db80c9a3b9bef263de53fa51303b7e3b5297e14cb2b4a","Cargo.toml":"6549e17368b94b0c93696b62a151587b8ef8879d282c4784cdc85cc1862ee242","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"b691d6e144eb133c181e869dc2f6a6eedbf76c4f832a9ecfbed5b9c560160c7f","benches/buf.rs":"72e6b6120b52d568da068f17c66a793d65602e400c595778581b63092e41d8dc","benches/bytes.rs":"f8cc255be7e8afedf6ade95cd529d105c537c5ec51110d46d470a26b497afa05","benches/bytes_mut.rs":"1326fe6224b26826228e02b4133151e756f38152c2d9cfe66adf83af76c3ec98","ci/miri.sh":"1ee54575b55a0e495e52ca1a934beed674bc8f375f03c4cfc3e81d221ec4fe98","ci/test-stable.sh":"57dd709bc25a20103ee85e24965566900817b2e603f067fb1251a5c03e4b1d93","ci/tsan.sh":"466b86b19225dd26c756cf2252cb1973f87a145642c99364b462ed7ceb55c7dd","clippy.toml":"8522f448dfa3b33ac334ce47d233ebb6b58e8ae115e45107a64fc1b4510fe560","src/buf/buf_impl.rs":"bdd9d5bc3318185ef1bea8d7c6a9dd3712ec297e0045fd84024f188c0ad96ac0","src/buf/buf_mut.rs":"58d9a82d982b2fc4c5421beafd31a9d0ea1ae500fd4a8b469ffc22e5bb21822a","src/buf/chain.rs":"46ec16a7cc370374218c2621ad738df77d95b25216099900ad9195a08a234375","src/buf/iter.rs":"49e9990a2303252ef7c66c2cc24459097dbbf4900c978453982ef513467bbf67","src/buf/limit.rs":"e005ba140b70f68654877c96b981a220477e415ff5c92438c1b0cb9bc866d872","src/buf/mod.rs":"19ff6fb7e19cba3884bc3f1a50ef20117dbc807f6d146ed355f42344a74fdf44","src/buf/reader.rs":"856c1e7129a1eceaa3c8f9ed4da8c3b5e1cc267eeffa99fa8f7c56c5ca7834d1","src/buf/take.rs":"a897e79bf579391227816973b2aa1f1d63614bd48bc029d9371f61607dcfa23f","src/buf/uninit_slice.rs":"b3bc013336235de246ddcb87f2961c556e800851d2199f4326a7b466c566b4a0","src/buf/vec_deque.rs":"8d552c26ac6ce28a471f74c388e4749432e86b1d8f5a9759b9fc32a2549d395f","src/buf/writer.rs":"c92b5f8b9b42e2e784de474c987fe4ac50af4b5c51ac9548d19a54e8ac9ff521","src/bytes.rs":"37b2df8bc94560d6a6ba2fbfb702358686d5b016cd7d48c80f51fa2ad03c5393","src/bytes_mut.rs":"29854cf9fbedd12ca66cf76bf4b1ca4424620ad495c75243a51a77efda926c73","src/fmt/debug.rs":"97b23cfa1d2701fa187005421302eeb260e635cd4f9a9e02b044ff89fcc8b8ad","src/fmt/hex.rs":"13755ec6f1b79923e1f1a05c51b179a38c03c40bb8ed2db0210e8901812e61e7","src/fmt/mod.rs":"176da4e359da99b8e5cf16e480cb7b978f574876827f1b9bb9c08da4d74ac0f5","src/lib.rs":"d8be90ade0cf78a30d73493086c109049d8ff442d69589a07f16480578eb4b17","src/loom.rs":"eb3f577d8cce39a84155c241c4dc308f024631f02085833f7fe9f0ea817bcea9","src/serde.rs":"3ecd7e828cd4c2b7db93c807cb1548fad209e674df493edf7cda69a7b04d405d","tests/test_buf.rs":"a04fb90644fcf0444092c49a4ca848bb0fd8b2ffeeebcb705eeea2de58560859","tests/test_buf_mut.rs":"5643866cd7b0967fb36053a1da73a23b26ffaa2746c05dca91e82df91aee7f81","tests/test_bytes.rs":"fe5beb749d3d48ec0c57d2ecf0ca56edc5a08cdc07a7842d863ee64d78d0ce69","tests/test_bytes_odd_alloc.rs":"aeb7a86bf8b31f67b6f453399f3649e0d3878247debc1325d98e66201b1da15f","tests/test_bytes_vec_alloc.rs":"dd7e3c3a71abcfdcad7e3b2f52a6bd106ad6ea0d4bc634372e81dae097233cf0","tests/test_chain.rs":"e9f094539bb42b3135f50033c44122a6b44cf0f953e51e8b488f43243f1e7f10","tests/test_debug.rs":"13299107172809e8cbbd823964ac9450cd0d6b6de79f2e6a2e0f44b9225a0593","tests/test_iter.rs":"c1f46823df26a90139645fd8728a03138edd95b2849dfec830452a80ddd9726d","tests/test_reader.rs":"bf83669d4e0960dad6aa47b46a9a454814fab626eb83572aba914c3d71618f43","tests/test_serde.rs":"2691f891796ba259de0ecf926de05c514f4912cc5fcd3e6a1591efbcd23ed4d0","tests/test_take.rs":"db01bf6855097f318336e90d12c0725a92cee426d330e477a6bd1d32dac34a27"},"package":"ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db"}

33
third_party/rust/bytes/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,3 +1,36 @@
# 1.2.1 (July 30, 2022)
### Fixed
- Fix unbounded memory growth when using `reserve` (#560)
# 1.2.0 (July 19, 2022)
### Added
- Add `BytesMut::zeroed` (#517)
- Implement `Extend<Bytes>` for `BytesMut` (#527)
- Add conversion from `BytesMut` to `Vec<u8>` (#543, #554)
- Add conversion from `Bytes` to `Vec<u8>` (#547)
- Add `UninitSlice::as_uninit_slice_mut()` (#548)
- Add const to `Bytes::{len,is_empty}` (#514)
### Changed
- Reuse vector in `BytesMut::reserve` (#539, #544)
### Fixed
- Make miri happy (#515, #523, #542, #545, #553)
- Make tsan happy (#541)
- Fix `remaining_mut()` on chain (#488)
- Fix amortized asymptotics of `BytesMut` (#555)
### Documented
- Redraw layout diagram with box drawing characters (#539)
- Clarify `BytesMut::unsplit` docs (#535)
# 1.1.0 (August 25, 2021)
### Added

36
third_party/rust/bytes/Cargo.toml поставляемый
Просмотреть файл

@ -3,36 +3,52 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "bytes"
version = "1.1.0"
authors = ["Carl Lerche <me@carllerche.com>", "Sean McArthur <sean@seanmonstar.com>"]
version = "1.2.1"
authors = [
"Carl Lerche <me@carllerche.com>",
"Sean McArthur <sean@seanmonstar.com>",
]
description = "Types and traits for working with bytes"
readme = "README.md"
keywords = ["buffers", "zero-copy", "io"]
categories = ["network-programming", "data-structures"]
keywords = [
"buffers",
"zero-copy",
"io",
]
categories = [
"network-programming",
"data-structures",
]
license = "MIT"
repository = "https://github.com/tokio-rs/bytes"
[package.metadata.docs.rs]
rustdoc-args = ["--cfg", "docsrs"]
rustdoc-args = [
"--cfg",
"docsrs",
]
[dependencies.serde]
version = "1.0.60"
features = ["alloc"]
optional = true
default-features = false
[dev-dependencies.serde_test]
version = "1.0"
[features]
default = ["std"]
std = []
[target."cfg(loom)".dev-dependencies.loom]
version = "0.5"

2
third_party/rust/bytes/benches/buf.rs поставляемый
Просмотреть файл

@ -46,7 +46,7 @@ impl TestBuf {
}
impl Buf for TestBuf {
fn remaining(&self) -> usize {
return self.buf.len() - self.pos;
self.buf.len() - self.pos
}
fn advance(&mut self, cnt: usize) {
self.pos += cnt;

1
third_party/rust/bytes/benches/bytes.rs поставляемый
Просмотреть файл

@ -88,6 +88,7 @@ fn from_long_slice(b: &mut Bencher) {
#[bench]
fn slice_empty(b: &mut Bencher) {
b.iter(|| {
// `clone` is to convert to ARC
let b = Bytes::from(vec![17; 1024]).clone();
for i in 0..1000 {
test::black_box(b.slice(i % 100..i % 100));

10
third_party/rust/bytes/ci/miri.sh поставляемый
Просмотреть файл

@ -1,11 +1,11 @@
#!/bin/bash
set -e
MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
rustup set profile minimal
rustup default "$MIRI_NIGHTLY"
rustup component add miri
rustup toolchain install nightly --component miri
rustup override set nightly
cargo miri setup
export MIRIFLAGS="-Zmiri-strict-provenance"
cargo miri test
cargo miri test --target mips64-unknown-linux-gnuabi64

1
third_party/rust/bytes/clippy.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
msrv = "1.39"

4
third_party/rust/bytes/src/buf/buf_mut.rs поставляемый
Просмотреть файл

@ -56,6 +56,10 @@ pub unsafe trait BufMut {
/// Implementations of `remaining_mut` should ensure that the return value
/// does not change unless a call is made to `advance_mut` or any other
/// function that is documented to change the `BufMut`'s current position.
///
/// # Note
///
/// `remaining_mut` may return value smaller than actual available space.
fn remaining_mut(&self) -> usize;
/// Advance the internal cursor of the BufMut

3
third_party/rust/bytes/src/buf/chain.rs поставляемый
Просмотреть файл

@ -198,8 +198,7 @@ where
fn remaining_mut(&self) -> usize {
self.a
.remaining_mut()
.checked_add(self.b.remaining_mut())
.unwrap()
.saturating_add(self.b.remaining_mut())
}
fn chunk_mut(&mut self) -> &mut UninitSlice {

Просмотреть файл

@ -124,6 +124,32 @@ impl UninitSlice {
self.0.as_mut_ptr() as *mut _
}
/// Return a `&mut [MaybeUninit<u8>]` to this slice's buffer.
///
/// # Safety
///
/// The caller **must not** read from the referenced memory and **must not** write
/// **uninitialized** bytes to the slice either. This is because `BufMut` implementation
/// that created the `UninitSlice` knows which parts are initialized. Writing uninitalized
/// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined
/// behavior.
///
/// # Examples
///
/// ```
/// use bytes::BufMut;
///
/// let mut data = [0, 1, 2];
/// let mut slice = &mut data[..];
/// unsafe {
/// let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut();
/// };
/// ```
#[inline]
pub unsafe fn as_uninit_slice_mut<'a>(&'a mut self) -> &'a mut [MaybeUninit<u8>] {
&mut *(self as *mut _ as *mut [MaybeUninit<u8>])
}
/// Returns the number of bytes in the slice.
///
/// # Examples

218
third_party/rust/bytes/src/bytes.rs поставляемый
Просмотреть файл

@ -2,12 +2,18 @@ use core::iter::FromIterator;
use core::ops::{Deref, RangeBounds};
use core::{cmp, fmt, hash, mem, ptr, slice, usize};
use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec};
use alloc::{
alloc::{dealloc, Layout},
borrow::Borrow,
boxed::Box,
string::String,
vec::Vec,
};
use crate::buf::IntoIter;
#[allow(unused)]
use crate::loom::sync::atomic::AtomicMut;
use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::Buf;
/// A cheaply cloneable and sliceable chunk of contiguous memory.
@ -55,7 +61,7 @@ use crate::Buf;
/// # Sharing
///
/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
/// how sharing/cloneing is implemented in detail.
/// how sharing/cloning is implemented in detail.
/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
/// cloning the backing storage in order to share it behind between multiple
/// `Bytes` instances.
@ -78,18 +84,18 @@ use crate::Buf;
///
/// ```text
///
/// Arc ptrs +---------+
/// ________________________ / | Bytes 2 |
/// / +---------+
/// / +-----------+ | |
/// |_________/ | Bytes 1 | | |
/// | +-----------+ | |
/// Arc ptrs ┌─────────┐
/// ________________________ / │ Bytes 2 │
/// / └─────────┘
/// / ┌───────────┐ | |
/// |_________/ │ Bytes 1 │ | |
/// | └───────────┘ | |
/// | | | ___/ data | tail
/// | data | tail |/ |
/// v v v v
/// +-----+---------------------------------+-----+
/// | Arc | | | | |
/// +-----+---------------------------------+-----+
/// ┌─────┬─────┬───────────┬───────────────┬─────┐
/// │ Arc │ │ │ │ │
/// └─────┴─────┴───────────┴───────────────┴─────┘
/// ```
pub struct Bytes {
ptr: *const u8,
@ -103,6 +109,10 @@ pub(crate) struct Vtable {
/// fn(data, ptr, len)
pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
/// fn(data, ptr, len)
///
/// takes `Bytes` to value
pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
/// fn(data, ptr, len)
pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
}
@ -179,7 +189,7 @@ impl Bytes {
/// assert_eq!(b.len(), 5);
/// ```
#[inline]
pub fn len(&self) -> usize {
pub const fn len(&self) -> usize {
self.len
}
@ -194,7 +204,7 @@ impl Bytes {
/// assert!(b.is_empty());
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
pub const fn is_empty(&self) -> bool {
self.len == 0
}
@ -262,7 +272,7 @@ impl Bytes {
let mut ret = self.clone();
ret.len = end - begin;
ret.ptr = unsafe { ret.ptr.offset(begin as isize) };
ret.ptr = unsafe { ret.ptr.add(begin) };
ret
}
@ -308,15 +318,15 @@ impl Bytes {
assert!(
sub_p >= bytes_p,
"subset pointer ({:p}) is smaller than self pointer ({:p})",
sub_p as *const u8,
bytes_p as *const u8,
subset.as_ptr(),
self.as_ptr(),
);
assert!(
sub_p + sub_len <= bytes_p + bytes_len,
"subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
bytes_p as *const u8,
self.as_ptr(),
bytes_len,
sub_p as *const u8,
subset.as_ptr(),
sub_len,
);
@ -501,7 +511,7 @@ impl Bytes {
// should already be asserted, but debug assert for tests
debug_assert!(self.len >= by, "internal: inc_start out of bounds");
self.len -= by;
self.ptr = self.ptr.offset(by as isize);
self.ptr = self.ptr.add(by);
}
}
@ -604,7 +614,7 @@ impl<'a> IntoIterator for &'a Bytes {
type IntoIter = core::slice::Iter<'a, u8>;
fn into_iter(self) -> Self::IntoIter {
self.as_slice().into_iter()
self.as_slice().iter()
}
}
@ -686,7 +696,7 @@ impl PartialOrd<Bytes> for str {
impl PartialEq<Vec<u8>> for Bytes {
fn eq(&self, other: &Vec<u8>) -> bool {
*self == &other[..]
*self == other[..]
}
}
@ -710,7 +720,7 @@ impl PartialOrd<Bytes> for Vec<u8> {
impl PartialEq<String> for Bytes {
fn eq(&self, other: &String) -> bool {
*self == &other[..]
*self == other[..]
}
}
@ -815,18 +825,18 @@ impl From<Box<[u8]>> for Bytes {
let ptr = Box::into_raw(slice) as *mut u8;
if ptr as usize & 0x1 == 0 {
let data = ptr as usize | KIND_VEC;
let data = ptr_map(ptr, |addr| addr | KIND_VEC);
Bytes {
ptr,
len,
data: AtomicPtr::new(data as *mut _),
data: AtomicPtr::new(data.cast()),
vtable: &PROMOTABLE_EVEN_VTABLE,
}
} else {
Bytes {
ptr,
len,
data: AtomicPtr::new(ptr as *mut _),
data: AtomicPtr::new(ptr.cast()),
vtable: &PROMOTABLE_ODD_VTABLE,
}
}
@ -839,6 +849,13 @@ impl From<String> for Bytes {
}
}
impl From<Bytes> for Vec<u8> {
fn from(bytes: Bytes) -> Vec<u8> {
let bytes = mem::ManuallyDrop::new(bytes);
unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
}
}
// ===== impl Vtable =====
impl fmt::Debug for Vtable {
@ -854,6 +871,7 @@ impl fmt::Debug for Vtable {
const STATIC_VTABLE: Vtable = Vtable {
clone: static_clone,
to_vec: static_to_vec,
drop: static_drop,
};
@ -862,6 +880,11 @@ unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
Bytes::from_static(slice)
}
unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
let slice = slice::from_raw_parts(ptr, len);
slice.to_vec()
}
unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
// nothing to drop for &'static [u8]
}
@ -870,11 +893,13 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
clone: promotable_even_clone,
to_vec: promotable_even_to_vec,
drop: promotable_even_drop,
};
static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
clone: promotable_odd_clone,
to_vec: promotable_odd_to_vec,
drop: promotable_odd_drop,
};
@ -883,25 +908,57 @@ unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
shallow_clone_arc(shared as _, ptr, len)
shallow_clone_arc(shared.cast(), ptr, len)
} else {
debug_assert_eq!(kind, KIND_VEC);
let buf = (shared as usize & !KIND_MASK) as *mut u8;
let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
shallow_clone_vec(data, shared, buf, ptr, len)
}
}
unsafe fn promotable_to_vec(
data: &AtomicPtr<()>,
ptr: *const u8,
len: usize,
f: fn(*mut ()) -> *mut u8,
) -> Vec<u8> {
let shared = data.load(Ordering::Acquire);
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
shared_to_vec_impl(shared.cast(), ptr, len)
} else {
// If Bytes holds a Vec, then the offset must be 0.
debug_assert_eq!(kind, KIND_VEC);
let buf = f(shared);
let cap = (ptr as usize - buf as usize) + len;
// Copy back buffer
ptr::copy(ptr, buf, len);
Vec::from_raw_parts(buf, len, cap)
}
}
unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
promotable_to_vec(data, ptr, len, |shared| {
ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
})
}
unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
data.with_mut(|shared| {
let shared = *shared;
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
release_shared(shared as *mut Shared);
release_shared(shared.cast());
} else {
debug_assert_eq!(kind, KIND_VEC);
let buf = (shared as usize & !KIND_MASK) as *mut u8;
drop(rebuild_boxed_slice(buf, ptr, len));
let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
free_boxed_slice(buf, ptr, len);
}
});
}
@ -914,38 +971,49 @@ unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize)
shallow_clone_arc(shared as _, ptr, len)
} else {
debug_assert_eq!(kind, KIND_VEC);
shallow_clone_vec(data, shared, shared as *mut u8, ptr, len)
shallow_clone_vec(data, shared, shared.cast(), ptr, len)
}
}
unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
promotable_to_vec(data, ptr, len, |shared| shared.cast())
}
unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
data.with_mut(|shared| {
let shared = *shared;
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
release_shared(shared as *mut Shared);
release_shared(shared.cast());
} else {
debug_assert_eq!(kind, KIND_VEC);
drop(rebuild_boxed_slice(shared as *mut u8, ptr, len));
free_boxed_slice(shared.cast(), ptr, len);
}
});
}
unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> {
unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
let cap = (offset as usize - buf as usize) + len;
Box::from_raw(slice::from_raw_parts_mut(buf, cap))
dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
}
// ===== impl SharedVtable =====
struct Shared {
// holds vec for drop, but otherwise doesnt access it
_vec: Vec<u8>,
// Holds arguments to dealloc upon Drop, but otherwise doesn't use them
buf: *mut u8,
cap: usize,
ref_cnt: AtomicUsize,
}
impl Drop for Shared {
fn drop(&mut self) {
unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
}
}
// Assert that the alignment of `Shared` is divisible by 2.
// This is a necessary invariant since we depend on allocating `Shared` a
// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
@ -954,6 +1022,7 @@ const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignm
static SHARED_VTABLE: Vtable = Vtable {
clone: shared_clone,
to_vec: shared_to_vec,
drop: shared_drop,
};
@ -966,9 +1035,42 @@ unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Byte
shallow_clone_arc(shared as _, ptr, len)
}
unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
// Check that the ref_cnt is 1 (unique).
//
// If it is unique, then it is set to 0 with AcqRel fence for the same
// reason in release_shared.
//
// Otherwise, we take the other branch and call release_shared.
if (*shared)
.ref_cnt
.compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
let buf = (*shared).buf;
let cap = (*shared).cap;
// Deallocate Shared
drop(Box::from_raw(shared as *mut mem::ManuallyDrop<Shared>));
// Copy back buffer
ptr::copy(ptr, buf, len);
Vec::from_raw_parts(buf, len, cap)
} else {
let v = slice::from_raw_parts(ptr, len).to_vec();
release_shared(shared);
v
}
}
unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
}
unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
data.with_mut(|shared| {
release_shared(*shared as *mut Shared);
release_shared(shared.cast());
});
}
@ -1006,9 +1108,9 @@ unsafe fn shallow_clone_vec(
// updated and since the buffer hasn't been promoted to an
// `Arc`, those three fields still are the components of the
// vector.
let vec = rebuild_boxed_slice(buf, offset, len).into_vec();
let shared = Box::new(Shared {
_vec: vec,
buf,
cap: (offset as usize - buf as usize) + len,
// Initialize refcount to 2. One for this reference, and one
// for the new clone that will be returned from
// `shallow_clone`.
@ -1082,10 +1184,40 @@ unsafe fn release_shared(ptr: *mut Shared) {
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Ordering::Acquire);
//
// Thread sanitizer does not support atomic fences. Use an atomic load
// instead.
(*ptr).ref_cnt.load(Ordering::Acquire);
// Drop the data
Box::from_raw(ptr);
drop(Box::from_raw(ptr));
}
// Ideally we would always use this version of `ptr_map` since it is strict
// provenance compatible, but it results in worse codegen. We will however still
// use it on miri because it gives better diagnostics for people who test bytes
// code with miri.
//
// See https://github.com/tokio-rs/bytes/pull/545 for more info.
#[cfg(miri)]
fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
where
F: FnOnce(usize) -> usize,
{
let old_addr = ptr as usize;
let new_addr = f(old_addr);
let diff = new_addr.wrapping_sub(old_addr);
ptr.wrapping_add(diff)
}
#[cfg(not(miri))]
fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
where
F: FnOnce(usize) -> usize,
{
let old_addr = ptr as usize;
let new_addr = f(old_addr);
new_addr as *mut u8
}
// compile-fails

280
third_party/rust/bytes/src/bytes_mut.rs поставляемый
Просмотреть файл

@ -8,6 +8,7 @@ use alloc::{
borrow::{Borrow, BorrowMut},
boxed::Box,
string::String,
vec,
vec::Vec,
};
@ -15,7 +16,7 @@ use crate::buf::{IntoIter, UninitSlice};
use crate::bytes::Vtable;
#[allow(unused)]
use crate::loom::sync::atomic::AtomicMut;
use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::{Buf, BufMut, Bytes};
/// A unique reference to a contiguous slice of memory.
@ -252,12 +253,28 @@ impl BytesMut {
let ptr = self.ptr.as_ptr();
let len = self.len;
let data = AtomicPtr::new(self.data as _);
let data = AtomicPtr::new(self.data.cast());
mem::forget(self);
unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
}
}
/// Creates a new `BytesMut`, which is initialized with zero.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let zeros = BytesMut::zeroed(42);
///
/// assert_eq!(zeros.len(), 42);
/// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
/// ```
pub fn zeroed(len: usize) -> BytesMut {
BytesMut::from_vec(vec![0; len])
}
/// Splits the bytes into two at the given index.
///
/// Afterwards `self` contains elements `[0, at)`, and the returned
@ -494,11 +511,20 @@ impl BytesMut {
/// reallocations. A call to `reserve` may result in an allocation.
///
/// Before allocating new buffer space, the function will attempt to reclaim
/// space in the existing buffer. If the current handle references a small
/// view in the original buffer and all other handles have been dropped,
/// and the requested capacity is less than or equal to the existing
/// buffer's capacity, then the current view will be copied to the front of
/// the buffer and the handle will take ownership of the full buffer.
/// space in the existing buffer. If the current handle references a view
/// into a larger original buffer, and all other handles referencing part
/// of the same original buffer have been dropped, then the current view
/// can be copied/shifted to the front of the buffer and the handle can take
/// ownership of the full buffer, provided that the full buffer is large
/// enough to fit the requested additional capacity.
///
/// This optimization will only happen if shifting the data from the current
/// view to the front of the buffer is not too expensive in terms of the
/// (amortized) time required. The precise condition is subject to change;
/// as of now, the length of the data being shifted needs to be at least as
/// large as the distance that it's shifted by. If the current view is empty
/// and the original buffer is large enough to fit the requested additional
/// capacity, then reallocations will never happen.
///
/// # Examples
///
@ -562,17 +588,34 @@ impl BytesMut {
// space.
//
// Otherwise, since backed by a vector, use `Vec::reserve`
//
// We need to make sure that this optimization does not kill the
// amortized runtimes of BytesMut's operations.
unsafe {
let (off, prev) = self.get_vec_pos();
// Only reuse space if we can satisfy the requested additional space.
if self.capacity() - self.len() + off >= additional {
// There's space - reuse it
//
// Also check if the value of `off` suggests that enough bytes
// have been read to account for the overhead of shifting all
// the data (in an amortized analysis).
// Hence the condition `off >= self.len()`.
//
// This condition also already implies that the buffer is going
// to be (at least) half-empty in the end; so we do not break
// the (amortized) runtime with future resizes of the underlying
// `Vec`.
//
// [For more details check issue #524, and PR #525.]
if self.capacity() - self.len() + off >= additional && off >= self.len() {
// There's enough space, and it's not too much overhead:
// reuse the space!
//
// Just move the pointer back to the start after copying
// data back.
let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
ptr::copy(self.ptr.as_ptr(), base_ptr, self.len);
// Since `off >= self.len()`, the two regions don't overlap.
ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
self.ptr = vptr(base_ptr);
self.set_vec_pos(0, prev);
@ -580,13 +623,14 @@ impl BytesMut {
// can gain capacity back.
self.cap += off;
} else {
// No space - allocate more
// Not enough space, or reusing might be too much overhead:
// allocate more space!
let mut v =
ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
v.reserve(additional);
// Update the info
self.ptr = vptr(v.as_mut_ptr().offset(off as isize));
self.ptr = vptr(v.as_mut_ptr().add(off));
self.len = v.len() - off;
self.cap = v.capacity() - off;
}
@ -596,7 +640,7 @@ impl BytesMut {
}
debug_assert_eq!(kind, KIND_ARC);
let shared: *mut Shared = self.data as _;
let shared: *mut Shared = self.data;
// Reserving involves abandoning the currently shared buffer and
// allocating a new vector with the requested capacity.
@ -619,29 +663,56 @@ impl BytesMut {
// sure that the vector has enough capacity.
let v = &mut (*shared).vec;
if v.capacity() >= new_cap {
// The capacity is sufficient, reclaim the buffer
let ptr = v.as_mut_ptr();
let v_capacity = v.capacity();
let ptr = v.as_mut_ptr();
ptr::copy(self.ptr.as_ptr(), ptr, len);
let offset = offset_from(self.ptr.as_ptr(), ptr);
// Compare the condition in the `kind == KIND_VEC` case above
// for more details.
if v_capacity >= new_cap + offset {
self.cap = new_cap;
// no copy is necessary
} else if v_capacity >= new_cap && offset >= len {
// The capacity is sufficient, and copying is not too much
// overhead: reclaim the buffer!
// `offset >= len` means: no overlap
ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
self.ptr = vptr(ptr);
self.cap = v.capacity();
} else {
// calculate offset
let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
return;
// new_cap is calculated in terms of `BytesMut`, not the underlying
// `Vec`, so it does not take the offset into account.
//
// Thus we have to manually add it here.
new_cap = new_cap.checked_add(off).expect("overflow");
// The vector capacity is not sufficient. The reserve request is
// asking for more than the initial buffer capacity. Allocate more
// than requested if `new_cap` is not much bigger than the current
// capacity.
//
// There are some situations, using `reserve_exact` that the
// buffer capacity could be below `original_capacity`, so do a
// check.
let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
new_cap = cmp::max(double, new_cap);
// No space - allocate more
v.reserve(new_cap - v.len());
// Update the info
self.ptr = vptr(v.as_mut_ptr().add(off));
self.cap = v.capacity() - off;
}
// The vector capacity is not sufficient. The reserve request is
// asking for more than the initial buffer capacity. Allocate more
// than requested if `new_cap` is not much bigger than the current
// capacity.
//
// There are some situations, using `reserve_exact` that the
// buffer capacity could be below `original_capacity`, so do a
// check.
let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
new_cap = cmp::max(cmp::max(double, new_cap), original_capacity);
return;
} else {
new_cap = cmp::max(new_cap, original_capacity);
}
@ -659,7 +730,7 @@ impl BytesMut {
// Update self
let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
self.data = data as _;
self.data = invalid_ptr(data);
self.ptr = vptr(v.as_mut_ptr());
self.len = v.len();
self.cap = v.capacity();
@ -690,7 +761,7 @@ impl BytesMut {
// Reserved above
debug_assert!(dst.len() >= cnt);
ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr(), cnt);
}
unsafe {
@ -700,10 +771,11 @@ impl BytesMut {
/// Absorbs a `BytesMut` that was previously split off.
///
/// If the two `BytesMut` objects were previously contiguous, i.e., if
/// `other` was created by calling `split_off` on this `BytesMut`, then
/// this is an `O(1)` operation that just decreases a reference
/// count and sets a few indices. Otherwise this method degenerates to
/// If the two `BytesMut` objects were previously contiguous and not mutated
/// in a way that causes re-allocation i.e., if `other` was created by
/// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
/// that just decreases a reference count and sets a few indices.
/// Otherwise this method degenerates to
/// `self.extend_from_slice(other.as_ref())`.
///
/// # Examples
@ -754,7 +826,7 @@ impl BytesMut {
ptr,
len,
cap,
data: data as *mut _,
data: invalid_ptr(data),
}
}
@ -801,7 +873,7 @@ impl BytesMut {
// Updating the start of the view is setting `ptr` to point to the
// new start and updating the `len` field to reflect the new length
// of the view.
self.ptr = vptr(self.ptr.as_ptr().offset(start as isize));
self.ptr = vptr(self.ptr.as_ptr().add(start));
if self.len >= start {
self.len -= start;
@ -825,7 +897,7 @@ impl BytesMut {
return Ok(());
}
let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) };
let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
if ptr == other.ptr.as_ptr()
&& self.kind() == KIND_ARC
&& other.kind() == KIND_ARC
@ -875,7 +947,7 @@ impl BytesMut {
// always succeed.
debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
self.data = shared as _;
self.data = shared;
}
/// Makes an exact shallow clone of `self`.
@ -908,13 +980,13 @@ impl BytesMut {
debug_assert_eq!(self.kind(), KIND_VEC);
debug_assert!(pos <= MAX_VEC_POS);
self.data = ((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut _;
self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK));
}
#[inline]
fn uninit_slice(&mut self) -> &mut UninitSlice {
unsafe {
let ptr = self.ptr.as_ptr().offset(self.len as isize);
let ptr = self.ptr.as_ptr().add(self.len);
let len = self.cap - self.len;
UninitSlice::from_raw_parts_mut(ptr, len)
@ -934,7 +1006,7 @@ impl Drop for BytesMut {
let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
}
} else if kind == KIND_ARC {
unsafe { release_shared(self.data as _) };
unsafe { release_shared(self.data) };
}
}
}
@ -1161,7 +1233,7 @@ impl<'a> IntoIterator for &'a BytesMut {
type IntoIter = core::slice::Iter<'a, u8>;
fn into_iter(self) -> Self::IntoIter {
self.as_ref().into_iter()
self.as_ref().iter()
}
}
@ -1190,7 +1262,18 @@ impl<'a> Extend<&'a u8> for BytesMut {
where
T: IntoIterator<Item = &'a u8>,
{
self.extend(iter.into_iter().map(|b| *b))
self.extend(iter.into_iter().copied())
}
}
impl Extend<Bytes> for BytesMut {
fn extend<T>(&mut self, iter: T)
where
T: IntoIterator<Item = Bytes>,
{
for bytes in iter {
self.extend_from_slice(&bytes)
}
}
}
@ -1202,7 +1285,7 @@ impl FromIterator<u8> for BytesMut {
impl<'a> FromIterator<&'a u8> for BytesMut {
fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
BytesMut::from_iter(into_iter.into_iter().map(|b| *b))
BytesMut::from_iter(into_iter.into_iter().copied())
}
}
@ -1243,10 +1326,13 @@ unsafe fn release_shared(ptr: *mut Shared) {
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Ordering::Acquire);
//
// Thread sanitizer does not support atomic fences. Use an atomic load
// instead.
(*ptr).ref_count.load(Ordering::Acquire);
// Drop the data
Box::from_raw(ptr);
drop(Box::from_raw(ptr));
}
impl Shared {
@ -1392,7 +1478,7 @@ impl PartialOrd<BytesMut> for str {
impl PartialEq<Vec<u8>> for BytesMut {
fn eq(&self, other: &Vec<u8>) -> bool {
*self == &other[..]
*self == other[..]
}
}
@ -1416,7 +1502,7 @@ impl PartialOrd<BytesMut> for Vec<u8> {
impl PartialEq<String> for BytesMut {
fn eq(&self, other: &String) -> bool {
*self == &other[..]
*self == other[..]
}
}
@ -1482,13 +1568,51 @@ impl PartialOrd<BytesMut> for &str {
impl PartialEq<BytesMut> for Bytes {
fn eq(&self, other: &BytesMut) -> bool {
&other[..] == &self[..]
other[..] == self[..]
}
}
impl PartialEq<Bytes> for BytesMut {
fn eq(&self, other: &Bytes) -> bool {
&other[..] == &self[..]
other[..] == self[..]
}
}
impl From<BytesMut> for Vec<u8> {
fn from(mut bytes: BytesMut) -> Self {
let kind = bytes.kind();
let mut vec = if kind == KIND_VEC {
unsafe {
let (off, _) = bytes.get_vec_pos();
rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
}
} else if kind == KIND_ARC {
let shared = bytes.data as *mut Shared;
if unsafe { (*shared).is_unique() } {
let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
unsafe { release_shared(shared) };
vec
} else {
return bytes.deref().to_vec();
}
} else {
return bytes.deref().to_vec();
};
let len = bytes.len;
unsafe {
ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
vec.set_len(len);
}
mem::forget(bytes);
vec
}
}
@ -1501,6 +1625,35 @@ fn vptr(ptr: *mut u8) -> NonNull<u8> {
}
}
/// Returns a dangling pointer with the given address. This is used to store
/// integer data in pointer fields.
///
/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
/// provenance checking is enabled.
#[inline]
fn invalid_ptr<T>(addr: usize) -> *mut T {
let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
debug_assert_eq!(ptr as usize, addr);
ptr.cast::<T>()
}
/// Precondition: dst >= original
///
/// The following line is equivalent to:
///
/// ```rust,ignore
/// self.ptr.as_ptr().offset_from(ptr) as usize;
/// ```
///
/// But due to min rust is 1.39 and it is only stablised
/// in 1.47, we cannot use it.
#[inline]
fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
debug_assert!(dst >= original);
dst as usize - original as usize
}
unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
let ptr = ptr.offset(-(off as isize));
len += off;
@ -1513,6 +1666,7 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize)
static SHARED_VTABLE: Vtable = Vtable {
clone: shared_v_clone,
to_vec: shared_v_to_vec,
drop: shared_v_drop,
};
@ -1520,10 +1674,32 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By
let shared = data.load(Ordering::Relaxed) as *mut Shared;
increment_shared(shared);
let data = AtomicPtr::new(shared as _);
let data = AtomicPtr::new(shared as *mut ());
Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
}
unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
if (*shared).is_unique() {
let shared = &mut *shared;
// Drop shared
let mut vec = mem::replace(&mut shared.vec, Vec::new());
release_shared(shared);
// Copy back buffer
ptr::copy(ptr, vec.as_mut_ptr(), len);
vec.set_len(len);
vec
} else {
let v = slice::from_raw_parts(ptr, len).to_vec();
release_shared(shared);
v
}
}
unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
data.with_mut(|shared| {
release_shared(*shared as *mut Shared);

6
third_party/rust/bytes/src/fmt/debug.rs поставляемый
Просмотреть файл

@ -25,7 +25,7 @@ impl Debug for BytesRef<'_> {
} else if b == b'\0' {
write!(f, "\\0")?;
// ASCII printable
} else if b >= 0x20 && b < 0x7f {
} else if (0x20..0x7f).contains(&b) {
write!(f, "{}", b as char)?;
} else {
write!(f, "\\x{:02x}", b)?;
@ -38,12 +38,12 @@ impl Debug for BytesRef<'_> {
impl Debug for Bytes {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Debug::fmt(&BytesRef(&self.as_ref()), f)
Debug::fmt(&BytesRef(self.as_ref()), f)
}
}
impl Debug for BytesMut {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Debug::fmt(&BytesRef(&self.as_ref()), f)
Debug::fmt(&BytesRef(self.as_ref()), f)
}
}

4
third_party/rust/bytes/src/loom.rs поставляемый
Просмотреть файл

@ -1,7 +1,7 @@
#[cfg(not(all(test, loom)))]
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
pub(crate) use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {
fn with_mut<F, R>(&mut self, f: F) -> R
@ -23,7 +23,7 @@ pub(crate) mod sync {
#[cfg(all(test, loom))]
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {}
}

175
third_party/rust/bytes/tests/test_bytes.rs поставляемый
Просмотреть файл

@ -4,8 +4,8 @@ use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::usize;
const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb";
const SHORT: &'static [u8] = b"hello world";
const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb";
const SHORT: &[u8] = b"hello world";
fn is_sync<T: Sync>() {}
fn is_send<T: Send>() {}
@ -411,8 +411,8 @@ fn freeze_after_split_off() {
fn fns_defined_for_bytes_mut() {
let mut bytes = BytesMut::from(&b"hello world"[..]);
bytes.as_ptr();
bytes.as_mut_ptr();
let _ = bytes.as_ptr();
let _ = bytes.as_mut_ptr();
// Iterator
let v: Vec<u8> = bytes.as_ref().iter().cloned().collect();
@ -443,7 +443,7 @@ fn reserve_growth() {
let _ = bytes.split();
bytes.reserve(65);
assert_eq!(bytes.capacity(), 128);
assert_eq!(bytes.capacity(), 117);
}
#[test]
@ -515,6 +515,34 @@ fn reserve_in_arc_unique_doubles() {
assert_eq!(2000, bytes.capacity());
}
#[test]
fn reserve_in_arc_unique_does_not_overallocate_after_split() {
let mut bytes = BytesMut::from(LONG);
let orig_capacity = bytes.capacity();
drop(bytes.split_off(LONG.len() / 2));
// now bytes is Arc and refcount == 1
let new_capacity = bytes.capacity();
bytes.reserve(orig_capacity - new_capacity);
assert_eq!(bytes.capacity(), orig_capacity);
}
#[test]
fn reserve_in_arc_unique_does_not_overallocate_after_multiple_splits() {
let mut bytes = BytesMut::from(LONG);
let orig_capacity = bytes.capacity();
for _ in 0..10 {
drop(bytes.split_off(LONG.len() / 2));
// now bytes is Arc and refcount == 1
let new_capacity = bytes.capacity();
bytes.reserve(orig_capacity - new_capacity);
}
assert_eq!(bytes.capacity(), orig_capacity);
}
#[test]
fn reserve_in_arc_nonunique_does_not_overallocate() {
let mut bytes = BytesMut::with_capacity(1000);
@ -527,6 +555,25 @@ fn reserve_in_arc_nonunique_does_not_overallocate() {
assert_eq!(2001, bytes.capacity());
}
/// This function tests `BytesMut::reserve_inner`, where `BytesMut` holds
/// a unique reference to the shared vector and decide to reuse it
/// by reallocating the `Vec`.
#[test]
fn reserve_shared_reuse() {
let mut bytes = BytesMut::with_capacity(1000);
bytes.put_slice(b"Hello, World!");
drop(bytes.split());
bytes.put_slice(b"!123ex123,sadchELLO,_wORLD!");
// Use split_off so that v.capacity() - self.cap != off
drop(bytes.split_off(9));
assert_eq!(&*bytes, b"!123ex123");
bytes.reserve(2000);
assert_eq!(&*bytes, b"!123ex123");
assert_eq!(bytes.capacity(), 2009);
}
#[test]
fn extend_mut() {
let mut bytes = BytesMut::with_capacity(0);
@ -544,6 +591,13 @@ fn extend_from_slice_mut() {
}
}
#[test]
fn extend_mut_from_bytes() {
let mut bytes = BytesMut::with_capacity(0);
bytes.extend([Bytes::from(LONG)]);
assert_eq!(*bytes, LONG[..]);
}
#[test]
fn extend_mut_without_size_hint() {
let mut bytes = BytesMut::with_capacity(0);
@ -874,7 +928,7 @@ fn from_iter_no_size_hint() {
fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) {
let slice = &(bytes.as_ref()[start..end]);
let sub = bytes.slice_ref(&slice);
let sub = bytes.slice_ref(slice);
assert_eq!(&sub[..], expected);
}
@ -894,7 +948,7 @@ fn slice_ref_empty() {
let bytes = Bytes::from(&b""[..]);
let slice = &(bytes.as_ref()[0..0]);
let sub = bytes.slice_ref(&slice);
let sub = bytes.slice_ref(slice);
assert_eq!(&sub[..], b"");
}
@ -1002,3 +1056,110 @@ fn box_slice_empty() {
let b = Bytes::from(empty);
assert!(b.is_empty());
}
#[test]
fn bytes_into_vec() {
// Test kind == KIND_VEC
let content = b"helloworld";
let mut bytes = BytesMut::new();
bytes.put_slice(content);
let vec: Vec<u8> = bytes.into();
assert_eq!(&vec, content);
// Test kind == KIND_ARC, shared.is_unique() == True
let mut bytes = BytesMut::new();
bytes.put_slice(b"abcdewe23");
bytes.put_slice(content);
// Overwrite the bytes to make sure only one reference to the underlying
// Vec exists.
bytes = bytes.split_off(9);
let vec: Vec<u8> = bytes.into();
assert_eq!(&vec, content);
// Test kind == KIND_ARC, shared.is_unique() == False
let prefix = b"abcdewe23";
let mut bytes = BytesMut::new();
bytes.put_slice(prefix);
bytes.put_slice(content);
let vec: Vec<u8> = bytes.split_off(prefix.len()).into();
assert_eq!(&vec, content);
let vec: Vec<u8> = bytes.into();
assert_eq!(&vec, prefix);
}
#[test]
fn test_bytes_into_vec() {
// Test STATIC_VTABLE.to_vec
let bs = b"1b23exfcz3r";
let vec: Vec<u8> = Bytes::from_static(bs).into();
assert_eq!(&*vec, bs);
// Test bytes_mut.SHARED_VTABLE.to_vec impl
eprintln!("1");
let mut bytes_mut: BytesMut = bs[..].into();
// Set kind to KIND_ARC so that after freeze, Bytes will use bytes_mut.SHARED_VTABLE
eprintln!("2");
drop(bytes_mut.split_off(bs.len()));
eprintln!("3");
let b1 = bytes_mut.freeze();
eprintln!("4");
let b2 = b1.clone();
eprintln!("{:#?}", (&*b1).as_ptr());
// shared.is_unique() = False
eprintln!("5");
assert_eq!(&*Vec::from(b2), bs);
// shared.is_unique() = True
eprintln!("6");
assert_eq!(&*Vec::from(b1), bs);
// Test bytes_mut.SHARED_VTABLE.to_vec impl where offset != 0
let mut bytes_mut1: BytesMut = bs[..].into();
let bytes_mut2 = bytes_mut1.split_off(9);
let b1 = bytes_mut1.freeze();
let b2 = bytes_mut2.freeze();
assert_eq!(Vec::from(b2), bs[9..]);
assert_eq!(Vec::from(b1), bs[..9]);
}
#[test]
fn test_bytes_into_vec_promotable_even() {
let vec = vec![33u8; 1024];
// Test cases where kind == KIND_VEC
let b1 = Bytes::from(vec.clone());
assert_eq!(Vec::from(b1), vec);
// Test cases where kind == KIND_ARC, ref_cnt == 1
let b1 = Bytes::from(vec.clone());
drop(b1.clone());
assert_eq!(Vec::from(b1), vec);
// Test cases where kind == KIND_ARC, ref_cnt == 2
let b1 = Bytes::from(vec.clone());
let b2 = b1.clone();
assert_eq!(Vec::from(b1), vec);
// Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
assert_eq!(Vec::from(b2), vec);
// Test cases where offset != 0
let mut b1 = Bytes::from(vec.clone());
let b2 = b1.split_off(20);
assert_eq!(Vec::from(b2), vec[20..]);
assert_eq!(Vec::from(b1), vec[..20]);
}

Просмотреть файл

@ -24,8 +24,7 @@ unsafe impl GlobalAlloc for Odd {
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
let ptr = ptr.offset(1);
ptr
ptr.offset(1)
} else {
ptr
}
@ -67,3 +66,32 @@ fn test_bytes_clone_drop() {
let b1 = Bytes::from(vec);
let _b2 = b1.clone();
}
#[test]
fn test_bytes_into_vec() {
let vec = vec![33u8; 1024];
// Test cases where kind == KIND_VEC
let b1 = Bytes::from(vec.clone());
assert_eq!(Vec::from(b1), vec);
// Test cases where kind == KIND_ARC, ref_cnt == 1
let b1 = Bytes::from(vec.clone());
drop(b1.clone());
assert_eq!(Vec::from(b1), vec);
// Test cases where kind == KIND_ARC, ref_cnt == 2
let b1 = Bytes::from(vec.clone());
let b2 = b1.clone();
assert_eq!(Vec::from(b1), vec);
// Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
assert_eq!(Vec::from(b2), vec);
// Test cases where offset != 0
let mut b1 = Bytes::from(vec.clone());
let b2 = b1.split_off(20);
assert_eq!(Vec::from(b2), vec[20..]);
assert_eq!(Vec::from(b1), vec[..20]);
}

Просмотреть файл

@ -1,61 +1,87 @@
use std::alloc::{GlobalAlloc, Layout, System};
use std::{mem, ptr};
use std::ptr::null_mut;
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use bytes::{Buf, Bytes};
#[global_allocator]
static LEDGER: Ledger = Ledger;
static LEDGER: Ledger = Ledger::new();
struct Ledger;
const LEDGER_LENGTH: usize = 2048;
const USIZE_SIZE: usize = mem::size_of::<usize>();
struct Ledger {
alloc_table: [(AtomicPtr<u8>, AtomicUsize); LEDGER_LENGTH],
}
unsafe impl GlobalAlloc for Ledger {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.align() == 1 && layout.size() > 0 {
// Allocate extra space to stash a record of
// how much space there was.
let orig_size = layout.size();
let size = orig_size + USIZE_SIZE;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => return ptr::null_mut(),
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
(ptr as *mut usize).write(orig_size);
let ptr = ptr.offset(USIZE_SIZE as isize);
ptr
} else {
ptr
impl Ledger {
const fn new() -> Self {
const ELEM: (AtomicPtr<u8>, AtomicUsize) =
(AtomicPtr::new(null_mut()), AtomicUsize::new(0));
let alloc_table = [ELEM; LEDGER_LENGTH];
Self { alloc_table }
}
/// Iterate over our table until we find an open entry, then insert into said entry
fn insert(&self, ptr: *mut u8, size: usize) {
for (entry_ptr, entry_size) in self.alloc_table.iter() {
// SeqCst is good enough here, we don't care about perf, i just want to be correct!
if entry_ptr
.compare_exchange(null_mut(), ptr, Ordering::SeqCst, Ordering::SeqCst)
.is_ok()
{
entry_size.store(size, Ordering::SeqCst);
break;
}
} else {
System.alloc(layout)
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if layout.align() == 1 && layout.size() > 0 {
let off_ptr = (ptr as *mut usize).offset(-1);
let orig_size = off_ptr.read();
if orig_size != layout.size() {
panic!(
"bad dealloc: alloc size was {}, dealloc size is {}",
orig_size,
layout.size()
);
fn remove(&self, ptr: *mut u8) -> usize {
for (entry_ptr, entry_size) in self.alloc_table.iter() {
// set the value to be something that will never try and be deallocated, so that we
// don't have any chance of a race condition
//
// dont worry, LEDGER_LENGTH is really long to compensate for us not reclaiming space
if entry_ptr
.compare_exchange(
ptr,
invalid_ptr(usize::MAX),
Ordering::SeqCst,
Ordering::SeqCst,
)
.is_ok()
{
return entry_size.load(Ordering::SeqCst);
}
}
let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) {
Ok(layout) => layout,
Err(_err) => std::process::abort(),
};
System.dealloc(off_ptr as *mut u8, new_layout);
panic!("Couldn't find a matching entry for {:x?}", ptr);
}
}
unsafe impl GlobalAlloc for Ledger {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let size = layout.size();
let ptr = System.alloc(layout);
self.insert(ptr, size);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let orig_size = self.remove(ptr);
if orig_size != layout.size() {
panic!(
"bad dealloc: alloc size was {}, dealloc size is {}",
orig_size,
layout.size()
);
} else {
System.dealloc(ptr, layout);
}
}
}
#[test]
fn test_bytes_advance() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
@ -77,3 +103,41 @@ fn test_bytes_truncate_and_advance() {
bytes.advance(1);
drop(bytes);
}
/// Returns a dangling pointer with the given address. This is used to store
/// integer data in pointer fields.
#[inline]
fn invalid_ptr<T>(addr: usize) -> *mut T {
let ptr = std::ptr::null_mut::<u8>().wrapping_add(addr);
debug_assert_eq!(ptr as usize, addr);
ptr.cast::<T>()
}
#[test]
fn test_bytes_into_vec() {
let vec = vec![33u8; 1024];
// Test cases where kind == KIND_VEC
let b1 = Bytes::from(vec.clone());
assert_eq!(Vec::from(b1), vec);
// Test cases where kind == KIND_ARC, ref_cnt == 1
let b1 = Bytes::from(vec.clone());
drop(b1.clone());
assert_eq!(Vec::from(b1), vec);
// Test cases where kind == KIND_ARC, ref_cnt == 2
let b1 = Bytes::from(vec.clone());
let b2 = b1.clone();
assert_eq!(Vec::from(b1), vec);
// Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
assert_eq!(Vec::from(b2), vec);
// Test cases where offset != 0
let mut b1 = Bytes::from(vec.clone());
let b2 = b1.split_off(20);
assert_eq!(Vec::from(b2), vec[20..]);
assert_eq!(Vec::from(b1), vec[..20]);
}

22
third_party/rust/bytes/tests/test_chain.rs поставляемый
Просмотреть файл

@ -133,6 +133,28 @@ fn vectored_read() {
}
}
#[test]
fn chain_growing_buffer() {
let mut buff = [' ' as u8; 10];
let mut vec = b"wassup".to_vec();
let mut chained = (&mut buff[..]).chain_mut(&mut vec).chain_mut(Vec::new()); // Required for potential overflow because remaining_mut for Vec is isize::MAX - vec.len(), but for chain_mut is usize::MAX
chained.put_slice(b"hey there123123");
assert_eq!(&buff, b"hey there1");
assert_eq!(&vec, b"wassup23123");
}
#[test]
fn chain_overflow_remaining_mut() {
let mut chained = Vec::<u8>::new().chain_mut(Vec::new()).chain_mut(Vec::new());
assert_eq!(chained.remaining_mut(), usize::MAX);
chained.put_slice(&[0; 256]);
assert_eq!(chained.remaining_mut(), usize::MAX);
}
#[test]
fn chain_get_bytes() {
let mut ab = Bytes::copy_from_slice(b"ab");