Bug 1878375 - Synchronize vendored Rust libraries with mozilla-central. r=aleca

mozilla-central: adf3c6ac684d94e7ecdf9b1d9645c49ee3c71a41
comm-central: 2563fd2163627867ccb3e3d9242ced2d3725238a

Differential Revision: https://phabricator.services.mozilla.com/D223428

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Thunderbird Updatebot 2024-09-24 21:55:15 +00:00
Родитель 4ae51d0ce6
Коммит ebd581961d
294 изменённых файлов: 98748 добавлений и 3533 удалений

4
rust/Cargo.lock сгенерированный
Просмотреть файл

@ -148,9 +148,9 @@ dependencies = [
[[package]]
name = "arrayvec"
version = "0.7.2"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
dependencies = [
"serde",
]

Просмотреть файл

@ -129,6 +129,9 @@ features = [
"Win32_Networking",
"Win32_Networking_WinSock",
"Win32_Security",
"Win32_Security_Cryptography",
"Win32_Security_Cryptography_Catalog",
"Win32_Security_WinTrust",
"Win32_Storage_FileSystem",
"Win32_System_Com",
"Win32_System_Diagnostics_Debug",

Просмотреть файл

@ -1 +1 @@
{"mc_workspace_toml": "ec71f03c202853aa5edcf44379703bc2af9c56562d81842e709041425d1acda1f9b672cb91b54170a15078f39b524afa3ba173a2df9dced966fd443bdce3eb4a", "mc_gkrust_toml": "be94c26c2408eeb2b2d12002288381817e4420414af5af78e6c0b2816036d5fe185c3246c6ef435916f547a9010e7c6aeff0be36ecc97e93b70ae9928c343509", "mc_hack_toml": "d76ce395d170433ad163ba7bd0d83a02fdc6c831287315a099d070348e2eb00f8f08da1d9a5fe135925865457a635954c216229ef0abd178a03f0922ebbc890e", "mc_cargo_lock": "6f048bd97c6fdec74401cd559f1ba86d8ba12e22d7d3b288e48cfe582cd521f5d97c6f8cb338a32381736e2d47965ec255eb2ada85e43fa3e718c6dcf7f9f7e5"}
{"mc_workspace_toml": "a652d02c7d42316fc0f7055bff72a67f3c54300ad0a73d549ae69f0e034d131652c6a48ca7293d968e2322d0260cd4e846d43ba2f40c638f4df1e9ba32bd25f6", "mc_gkrust_toml": "be94c26c2408eeb2b2d12002288381817e4420414af5af78e6c0b2816036d5fe185c3246c6ef435916f547a9010e7c6aeff0be36ecc97e93b70ae9928c343509", "mc_hack_toml": "e4426f914e6a77b257568f4a6f0a500dbe8d23159d76e3c82cd0b2b88d7448ea406293e2190543c08040273c5c51e70b76a2096169a8882cd19b62e0fc96f174", "mc_cargo_lock": "ee4cd01c11f2675cb2664d85ccc2f6b953704c041ef39003226bce46e9f2f68934c0c3c577e85cd2e296b1ae16d63a009cf888eb065ec4a8270c2d81e77393c8"}

Просмотреть файл

@ -16,8 +16,8 @@ harness = false
[dependencies]
mozilla-central-workspace-hack = { version = "0.1", features = ['gkrust'], optional = true }
gkrust-shared = { version = "0.1.0", path = "../../../toolkit/library/rust/shared" }
sys_tray = { version = "0.1.0", path = "../sys_tray" }
ews_xpcom = { version = "0.1.0", path = "../ews_xpcom" }
sys_tray = { version = "0.1.0", path = "../sys_tray" }
aa-stroke = { git = "https://github.com/FirefoxGraphics/aa-stroke", rev = "d94278ed9c7020f50232689a26d1277eb0eb74d2" }
app_services_logger = { path = "../../../services/common/app_services_logger" }
audio_thread_priority = { version = "0.32" }

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"f684ba712e1546b4cc7de9637f484598cd3fa49b7e7b32c2d98562a8f78ce98c","Cargo.toml":"94a588809d4be252f0146b9e193abc1b22d8afcce0265af19f12905a3db37998","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0245ee104228a100ce5fceecf43e25faae450494d9173f43fd94c27d69fdac13","README.md":"2264c34c62ea4c617d72047b00749b4786dfb9dff2fac24e0320170ee0cd19c8","benches/arraystring.rs":"fad1cecef71c290375befc77c75a868988b8d74135e8f8732bc5b58c85a8ab46","benches/extend.rs":"c38ecedbc88217a7e9fe1a73f916b168a96e48010a7ccd3dba5c3f8dea030d5d","ci/miri.sh":"6bad1d135e1bdd67a6b91c870a7cf5ee09a85f9515633592a6abfbba95fdaf52","src/array_string.rs":"4f0c2dab882e6df7d10a0b043220587626e64ff94dd53a80949667ed861490de","src/arrayvec.rs":"61fba79217f564e54761c25651c06ec3f6d23b9c6af4bfd621992ef2bb95a74b","src/arrayvec_impl.rs":"a5e3391dc350041651f0ba3816c863ff7f552ff553e4a88f801481dfad7e7613","src/char.rs":"1de50e1d6045af2b3496426492315ba774986f9bc8301ffa391de861a08cc9cb","src/errors.rs":"7fa2ff2350f811d52a210a7346c526d6715cacefd38a46e2d3b57ab7dc62b1ab","src/lib.rs":"29a4123616c0912ccae5d931d45f0ccc3746647da1ba077c34538824910dd0ca","src/utils.rs":"d1cdc508dfca385e63f1f57bc8b53ed4a7f515e4ac1ebaa97b1d543fc8369432","tests/serde.rs":"117eb2961b5954d13c577edf60bbb07cb7481685cc9d6c49760a981d71465849","tests/tests.rs":"f8a18ff5deadb167832964ca0fff4f280129dd4a1de024e9cc76ffb7efe1c12c"},"package":"8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"}
{"files":{"CHANGELOG.md":"b6cd865fb2685cf241e61f216686c87fd78e7d4b8ba4faf03857c8a2b920eee6","Cargo.toml":"ed90cef00235d8e2b9a3027e11baed38b66a2cf6003bccd805c056c1ec70c722","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"4da95ec4ecb65b738d470b7d762894ad9c97da93e6cbfb18b570fc2c96f4b871","README.md":"2264c34c62ea4c617d72047b00749b4786dfb9dff2fac24e0320170ee0cd19c8","benches/arraystring.rs":"fad1cecef71c290375befc77c75a868988b8d74135e8f8732bc5b58c85a8ab46","benches/extend.rs":"c38ecedbc88217a7e9fe1a73f916b168a96e48010a7ccd3dba5c3f8dea030d5d","src/array_string.rs":"9840c5b95af997584374467dfc365e4ab747175db8e0574be4f148454514fc8f","src/arrayvec.rs":"42923c20659346e48fd551529cb58b8e646c404637818ba466455665cb037dc4","src/arrayvec_impl.rs":"e2642ae566c83ef37ad9aec6af7e3c50af310ba304553f38b2a787666b507580","src/char.rs":"1de50e1d6045af2b3496426492315ba774986f9bc8301ffa391de861a08cc9cb","src/errors.rs":"7fa2ff2350f811d52a210a7346c526d6715cacefd38a46e2d3b57ab7dc62b1ab","src/lib.rs":"8919a7e0c20890b1f094996147a1486d20578579aef03692315cd509e1745222","src/utils.rs":"d1cdc508dfca385e63f1f57bc8b53ed4a7f515e4ac1ebaa97b1d543fc8369432","tests/borsh.rs":"4ea4d21cc311d68d8f234cd77699a88158af26cbc3a69ae1f25c0052663f861d","tests/serde.rs":"117eb2961b5954d13c577edf60bbb07cb7481685cc9d6c49760a981d71465849","tests/tests.rs":"19a9bce4b55506be9ffb7584f47dbfb1d59c66dbfaab55b7a28d827cc0411e78"},"package":"7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"}

31
third_party/rust/arrayvec/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,6 +1,33 @@
Recent Changes (arrayvec)
=========================
## 0.7.6
- Fix no-std build [#274](https://github.com/bluss/arrayvec/pull/274)
## 0.7.5
- Add `as_ptr` and `as_mut_ptr` to `ArrayString` [@YuhanLiin](https://github.com/YuhanLiin) [#260](https://github.com/bluss/arrayvec/pull/260)
- Add borsh serialization support by @honzasp and @Fuuzetsu [#259](https://github.com/bluss/arrayvec/pull/259)
- Move length field before before data in ArrayVec and ArrayString by @JakkuSakura [#255](https://github.com/bluss/arrayvec/pull/255)
- Fix miri error for ZST case in extend by @bluss
- implement AsRef<Path> for ArrayString by [@Zoybean](https://github.com/Zoybean) [#218](https://github.com/bluss/arrayvec/pull/218)
- Fix typos in changelog by [@striezel](https://github.com/striezel) [#241](https://github.com/bluss/arrayvec/pull/241)
- Add `as_slice`, `as_mut_slice` methods to `IntoIter` by [@clarfonthey](https://github.com/clarfonthey) [#224](https://github.com/bluss/arrayvec/pull/224)
## 0.7.4
- Add feature zeroize to support the `Zeroize` trait by @elichai
## 0.7.3
- Use track_caller on multiple methods like push and similar, for capacity
overflows by @kornelski
- impl BorrowMut for ArrayString by @msrd0
- Fix stacked borrows violations by @clubby789
- Update Miri CI by @RalfJung
## 0.7.2
- Add `.as_mut_str()` to `ArrayString` by @clarfonthey
@ -106,7 +133,7 @@ Recent Changes (arrayvec)
users outside the crate.
- Add `FromStr` impl for `ArrayString` by @despawnerer
- Add method `try_extend_from_slice` to `ArrayVec`, which is always
effecient by @Thomasdezeeuw.
efficient by @Thomasdezeeuw.
- Add method `remaining_capacity` by @Thomasdezeeuw
- Improve performance of the `extend` method.
- The index type of zero capacity vectors is now itself zero size, by
@ -157,7 +184,7 @@ Recent Changes (arrayvec)
- Fix future compat warning about raw pointer casts
- Use `drop_in_place` when dropping the arrayvec by-value iterator
- Decrease mininum Rust version (see docs) by @jeehoonkang
- Decrease minimum Rust version (see docs) by @jeehoonkang
- 0.3.25

70
third_party/rust/arrayvec/Cargo.toml поставляемый
Просмотреть файл

@ -11,38 +11,90 @@
[package]
edition = "2018"
rust-version = "1.51"
name = "arrayvec"
version = "0.7.2"
version = "0.7.6"
authors = ["bluss"]
build = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "A vector with fixed capacity, backed by an array (it can be stored on the stack too). Implements fixed capacity ArrayVec and ArrayString."
documentation = "https://docs.rs/arrayvec/"
keywords = ["stack", "vector", "array", "data-structure", "no_std"]
categories = ["data-structures", "no-std"]
readme = "README.md"
keywords = [
"stack",
"vector",
"array",
"data-structure",
"no_std",
]
categories = [
"data-structures",
"no-std",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/bluss/arrayvec"
[package.metadata.docs.rs]
features = ["serde"]
features = [
"borsh",
"serde",
"zeroize",
]
[package.metadata.release]
no-dev-version = true
tag-name = "{{version}}"
[profile.bench]
debug = true
debug = 2
[profile.release]
debug = true
debug = 2
[[bench]]
name = "extend"
harness = false
[lib]
name = "arrayvec"
path = "src/lib.rs"
[[test]]
name = "borsh"
path = "tests/borsh.rs"
[[test]]
name = "serde"
path = "tests/serde.rs"
[[test]]
name = "tests"
path = "tests/tests.rs"
[[bench]]
name = "arraystring"
path = "benches/arraystring.rs"
harness = false
[[bench]]
name = "extend"
path = "benches/extend.rs"
harness = false
[dependencies.borsh]
version = "1.2.0"
optional = true
default-features = false
[dependencies.serde]
version = "1.0"
optional = true
default-features = false
[dependencies.zeroize]
version = "1.4"
optional = true
default-features = false
[dev-dependencies.bencher]
version = "0.1.4"

2
third_party/rust/arrayvec/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,4 +1,4 @@
Copyright (c) Ulrik Sverdrup "bluss" 2015-2017
Copyright (c) Ulrik Sverdrup "bluss" 2015-2023
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

15
third_party/rust/arrayvec/ci/miri.sh поставляемый
Просмотреть файл

@ -1,15 +0,0 @@
#!/bin/sh
set -ex
export CARGO_NET_RETRY=5
export CARGO_NET_TIMEOUT=10
MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
rustup default "$MIRI_NIGHTLY"
rustup component add miri
cargo miri setup
cargo miri test

90
third_party/rust/arrayvec/src/array_string.rs поставляемый
Просмотреть файл

@ -1,10 +1,12 @@
use std::borrow::Borrow;
use std::borrow::{Borrow, BorrowMut};
use std::cmp;
use std::convert::TryFrom;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::mem::MaybeUninit;
use std::ops::{Deref, DerefMut};
#[cfg(feature="std")]
use std::path::Path;
use std::ptr;
use std::slice;
use std::str;
@ -31,10 +33,11 @@ use serde::{Serialize, Deserialize, Serializer, Deserializer};
/// The string is a contiguous value that you can store directly on the stack
/// if needed.
#[derive(Copy)]
#[repr(C)]
pub struct ArrayString<const CAP: usize> {
// the `len` first elements of the array are initialized
xs: [MaybeUninit<u8>; CAP],
len: LenUint,
xs: [MaybeUninit<u8>; CAP],
}
impl<const CAP: usize> Default for ArrayString<CAP>
@ -201,6 +204,7 @@ impl<const CAP: usize> ArrayString<CAP>
///
/// assert_eq!(&string[..], "ab");
/// ```
#[track_caller]
pub fn push(&mut self, c: char) {
self.try_push(c).unwrap();
}
@ -252,6 +256,7 @@ impl<const CAP: usize> ArrayString<CAP>
///
/// assert_eq!(&string[..], "ad");
/// ```
#[track_caller]
pub fn push_str(&mut self, s: &str) {
self.try_push_str(s).unwrap()
}
@ -371,10 +376,12 @@ impl<const CAP: usize> ArrayString<CAP>
let next = idx + ch.len_utf8();
let len = self.len();
let ptr = self.as_mut_ptr();
unsafe {
ptr::copy(self.as_ptr().add(next),
self.as_mut_ptr().add(idx),
len - next);
ptr::copy(
ptr.add(next),
ptr.add(idx),
len - next);
self.set_len(len - (next - idx));
}
ch
@ -410,11 +417,13 @@ impl<const CAP: usize> ArrayString<CAP>
self
}
fn as_ptr(&self) -> *const u8 {
/// Return a raw pointer to the string's buffer.
pub fn as_ptr(&self) -> *const u8 {
self.xs.as_ptr() as *const u8
}
fn as_mut_ptr(&mut self) -> *mut u8 {
/// Return a raw mutable pointer to the string's buffer.
pub fn as_mut_ptr(&mut self) -> *mut u8 {
self.xs.as_mut_ptr() as *mut u8
}
}
@ -479,6 +488,11 @@ impl<const CAP: usize> Borrow<str> for ArrayString<CAP>
fn borrow(&self) -> &str { self }
}
impl<const CAP: usize> BorrowMut<str> for ArrayString<CAP>
{
fn borrow_mut(&mut self) -> &mut str { self }
}
impl<const CAP: usize> AsRef<str> for ArrayString<CAP>
{
fn as_ref(&self) -> &str { self }
@ -489,6 +503,13 @@ impl<const CAP: usize> fmt::Debug for ArrayString<CAP>
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) }
}
#[cfg(feature="std")]
impl<const CAP: usize> AsRef<Path> for ArrayString<CAP> {
fn as_ref(&self) -> &Path {
self.as_str().as_ref()
}
}
impl<const CAP: usize> fmt::Display for ArrayString<CAP>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) }
@ -616,6 +637,37 @@ impl<'de, const CAP: usize> Deserialize<'de> for ArrayString<CAP>
}
}
#[cfg(feature = "borsh")]
/// Requires crate feature `"borsh"`
impl<const CAP: usize> borsh::BorshSerialize for ArrayString<CAP> {
fn serialize<W: borsh::io::Write>(&self, writer: &mut W) -> borsh::io::Result<()> {
<str as borsh::BorshSerialize>::serialize(&*self, writer)
}
}
#[cfg(feature = "borsh")]
/// Requires crate feature `"borsh"`
impl<const CAP: usize> borsh::BorshDeserialize for ArrayString<CAP> {
fn deserialize_reader<R: borsh::io::Read>(reader: &mut R) -> borsh::io::Result<Self> {
let len = <u32 as borsh::BorshDeserialize>::deserialize_reader(reader)? as usize;
if len > CAP {
return Err(borsh::io::Error::new(
borsh::io::ErrorKind::InvalidData,
format!("Expected a string no more than {} bytes long", CAP),
))
}
let mut buf = [0u8; CAP];
let buf = &mut buf[..len];
reader.read_exact(buf)?;
let s = str::from_utf8(&buf).map_err(|err| {
borsh::io::Error::new(borsh::io::ErrorKind::InvalidData, err.to_string())
})?;
Ok(Self::from(s).unwrap())
}
}
impl<'a, const CAP: usize> TryFrom<&'a str> for ArrayString<CAP>
{
type Error = CapacityError<&'a str>;
@ -638,3 +690,27 @@ impl<'a, const CAP: usize> TryFrom<fmt::Arguments<'a>> for ArrayString<CAP>
Ok(v)
}
}
#[cfg(feature = "zeroize")]
/// "Best efforts" zeroing of the `ArrayString`'s buffer when the `zeroize` feature is enabled.
///
/// The length is set to 0, and the buffer is dropped and zeroized.
/// Cannot ensure that previous moves of the `ArrayString` did not leave values on the stack.
///
/// ```
/// use arrayvec::ArrayString;
/// use zeroize::Zeroize;
/// let mut string = ArrayString::<6>::from("foobar").unwrap();
/// string.zeroize();
/// assert_eq!(string.len(), 0);
/// unsafe { string.set_len(string.capacity()) };
/// assert_eq!(&*string, "\0\0\0\0\0\0");
/// ```
impl<const CAP: usize> zeroize::Zeroize for ArrayString<CAP> {
fn zeroize(&mut self) {
// There are no elements to drop
self.clear();
// Zeroize the backing array.
self.xs.zeroize();
}
}

95
third_party/rust/arrayvec/src/arrayvec.rs поставляемый
Просмотреть файл

@ -39,10 +39,11 @@ use crate::utils::MakeMaybeUninit;
///
/// It offers a simple API but also dereferences to a slice, so that the full slice API is
/// available. The ArrayVec can be converted into a by value iterator.
#[repr(C)]
pub struct ArrayVec<T, const CAP: usize> {
len: LenUint,
// the `len` first elements of the array are initialized
xs: [MaybeUninit<T>; CAP],
len: LenUint,
}
impl<T, const CAP: usize> Drop for ArrayVec<T, CAP> {
@ -77,6 +78,8 @@ impl<T, const CAP: usize> ArrayVec<T, CAP> {
/// assert_eq!(&array[..], &[1, 2]);
/// assert_eq!(array.capacity(), 16);
/// ```
#[inline]
#[track_caller]
pub fn new() -> ArrayVec<T, CAP> {
assert_capacity_limit!(CAP);
unsafe {
@ -172,6 +175,7 @@ impl<T, const CAP: usize> ArrayVec<T, CAP> {
///
/// assert_eq!(&array[..], &[1, 2]);
/// ```
#[track_caller]
pub fn push(&mut self, element: T) {
ArrayVecImpl::push(self, element)
}
@ -277,6 +281,7 @@ impl<T, const CAP: usize> ArrayVec<T, CAP> {
/// assert_eq!(&array[..], &["y", "x"]);
///
/// ```
#[track_caller]
pub fn insert(&mut self, index: usize, element: T) {
self.try_insert(index, element).unwrap()
}
@ -507,7 +512,7 @@ impl<T, const CAP: usize> ArrayVec<T, CAP> {
}
if DELETED {
unsafe {
let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
let hole_slot = cur.sub(g.deleted_cnt);
ptr::copy_nonoverlapping(cur, hole_slot, 1);
}
}
@ -748,6 +753,7 @@ impl<T, const CAP: usize> DerefMut for ArrayVec<T, CAP> {
/// assert_eq!(array.capacity(), 3);
/// ```
impl<T, const CAP: usize> From<[T; CAP]> for ArrayVec<T, CAP> {
#[track_caller]
fn from(array: [T; CAP]) -> Self {
let array = ManuallyDrop::new(array);
let mut vec = <ArrayVec<T, CAP>>::new();
@ -843,11 +849,48 @@ impl<T, const CAP: usize> IntoIterator for ArrayVec<T, CAP> {
}
#[cfg(feature = "zeroize")]
/// "Best efforts" zeroing of the `ArrayVec`'s buffer when the `zeroize` feature is enabled.
///
/// The length is set to 0, and the buffer is dropped and zeroized.
/// Cannot ensure that previous moves of the `ArrayVec` did not leave values on the stack.
///
/// ```
/// use arrayvec::ArrayVec;
/// use zeroize::Zeroize;
/// let mut array = ArrayVec::from([1, 2, 3]);
/// array.zeroize();
/// assert_eq!(array.len(), 0);
/// let data = unsafe { core::slice::from_raw_parts(array.as_ptr(), array.capacity()) };
/// assert_eq!(data, [0, 0, 0]);
/// ```
impl<Z: zeroize::Zeroize, const CAP: usize> zeroize::Zeroize for ArrayVec<Z, CAP> {
fn zeroize(&mut self) {
// Zeroize all the contained elements.
self.iter_mut().zeroize();
// Drop all the elements and set the length to 0.
self.clear();
// Zeroize the backing array.
self.xs.zeroize();
}
}
/// By-value iterator for `ArrayVec`.
pub struct IntoIter<T, const CAP: usize> {
index: usize,
v: ArrayVec<T, CAP>,
}
impl<T, const CAP: usize> IntoIter<T, CAP> {
/// Returns the remaining items of this iterator as a slice.
pub fn as_slice(&self) -> &[T] {
&self.v[self.index..]
}
/// Returns the remaining items of this iterator as a mutable slice.
pub fn as_mut_slice(&mut self) -> &mut [T] {
&mut self.v[self.index..]
}
}
impl<T, const CAP: usize> Iterator for IntoIter<T, CAP> {
type Item = T;
@ -978,9 +1021,8 @@ impl<'a, T: 'a, const CAP: usize> Drop for Drain<'a, T, CAP> {
// memmove back untouched tail, update to new length
let start = source_vec.len();
let tail = self.tail_start;
let src = source_vec.as_ptr().add(tail);
let dst = source_vec.as_mut_ptr().add(start);
ptr::copy(src, dst, self.tail_len);
let ptr = source_vec.as_mut_ptr();
ptr::copy(ptr.add(tail), ptr.add(start), self.tail_len);
source_vec.set_len(start + self.tail_len);
}
}
@ -1012,6 +1054,7 @@ impl<T, const CAP: usize> Extend<T> for ArrayVec<T, CAP> {
/// Extend the `ArrayVec` with an iterator.
///
/// ***Panics*** if extending the vector exceeds its capacity.
#[track_caller]
fn extend<I: IntoIterator<Item=T>>(&mut self, iter: I) {
unsafe {
self.extend_from_iter::<_, true>(iter)
@ -1021,6 +1064,7 @@ impl<T, const CAP: usize> Extend<T> for ArrayVec<T, CAP> {
#[inline(never)]
#[cold]
#[track_caller]
fn extend_panic() {
panic!("ArrayVec: capacity exceeded in extend/from_iter");
}
@ -1032,6 +1076,7 @@ impl<T, const CAP: usize> ArrayVec<T, CAP> {
///
/// Unsafe because if CHECK is false, the length of the input is not checked.
/// The caller must ensure the length of the input fits in the capacity.
#[track_caller]
pub(crate) unsafe fn extend_from_iter<I, const CHECK: bool>(&mut self, iterable: I)
where I: IntoIterator<Item = T>
{
@ -1055,7 +1100,9 @@ impl<T, const CAP: usize> ArrayVec<T, CAP> {
if let Some(elt) = iter.next() {
if ptr == end_ptr && CHECK { extend_panic(); }
debug_assert_ne!(ptr, end_ptr);
ptr.write(elt);
if mem::size_of::<T>() != 0 {
ptr.write(elt);
}
ptr = raw_ptr_add(ptr, 1);
guard.data += 1;
} else {
@ -1082,7 +1129,7 @@ impl<T, const CAP: usize> ArrayVec<T, CAP> {
unsafe fn raw_ptr_add<T>(ptr: *mut T, offset: usize) -> *mut T {
if mem::size_of::<T>() == 0 {
// Special case for ZST
(ptr as usize).wrapping_add(offset) as _
ptr.cast::<u8>().wrapping_add(offset).cast::<T>()
} else {
ptr.add(offset)
}
@ -1265,3 +1312,37 @@ impl<'de, T: Deserialize<'de>, const CAP: usize> Deserialize<'de> for ArrayVec<T
deserializer.deserialize_seq(ArrayVecVisitor::<T, CAP>(PhantomData))
}
}
#[cfg(feature = "borsh")]
/// Requires crate feature `"borsh"`
impl<T, const CAP: usize> borsh::BorshSerialize for ArrayVec<T, CAP>
where
T: borsh::BorshSerialize,
{
fn serialize<W: borsh::io::Write>(&self, writer: &mut W) -> borsh::io::Result<()> {
<[T] as borsh::BorshSerialize>::serialize(self.as_slice(), writer)
}
}
#[cfg(feature = "borsh")]
/// Requires crate feature `"borsh"`
impl<T, const CAP: usize> borsh::BorshDeserialize for ArrayVec<T, CAP>
where
T: borsh::BorshDeserialize,
{
fn deserialize_reader<R: borsh::io::Read>(reader: &mut R) -> borsh::io::Result<Self> {
let mut values = Self::new();
let len = <u32 as borsh::BorshDeserialize>::deserialize_reader(reader)?;
for _ in 0..len {
let elem = <T as borsh::BorshDeserialize>::deserialize_reader(reader)?;
if let Err(_) = values.try_push(elem) {
return Err(borsh::io::Error::new(
borsh::io::ErrorKind::InvalidData,
format!("Expected an array with no more than {} items", CAP),
));
}
}
Ok(values)
}
}

Просмотреть файл

@ -35,6 +35,7 @@ pub(crate) trait ArrayVecImpl {
/// Return a raw mutable pointer to the vector's buffer.
fn as_mut_ptr(&mut self) -> *mut Self::Item;
#[track_caller]
fn push(&mut self, element: Self::Item) {
self.try_push(element).unwrap()
}

4
third_party/rust/arrayvec/src/lib.rs поставляемый
Просмотреть файл

@ -11,6 +11,10 @@
//! - Optional
//! - Enable serialization for ArrayVec and ArrayString using serde 1.x
//!
//! - `zeroize`
//! - Optional
//! - Implement `Zeroize` for ArrayVec and ArrayString
//!
//! ## Rust Version
//!
//! This version of arrayvec requires Rust 1.51 or later.

73
third_party/rust/arrayvec/tests/borsh.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,73 @@
#![cfg(feature = "borsh")]
use std::fmt;
extern crate arrayvec;
extern crate borsh;
fn assert_ser<T: borsh::BorshSerialize>(v: &T, expected_bytes: &[u8]) {
let mut actual_bytes = Vec::new();
v.serialize(&mut actual_bytes).unwrap();
assert_eq!(actual_bytes, expected_bytes);
}
fn assert_roundtrip<T: borsh::BorshSerialize + borsh::BorshDeserialize + PartialEq + fmt::Debug>(v: &T) {
let mut bytes = Vec::new();
v.serialize(&mut bytes).unwrap();
let v_de = T::try_from_slice(&bytes).unwrap();
assert_eq!(*v, v_de);
}
mod array_vec {
use arrayvec::ArrayVec;
use super::{assert_ser, assert_roundtrip};
#[test]
fn test_empty() {
let vec = ArrayVec::<u32, 0>::new();
assert_ser(&vec, b"\0\0\0\0");
assert_roundtrip(&vec);
}
#[test]
fn test_full() {
let mut vec = ArrayVec::<u32, 3>::new();
vec.push(0xdeadbeef);
vec.push(0x123);
vec.push(0x456);
assert_ser(&vec, b"\x03\0\0\0\xef\xbe\xad\xde\x23\x01\0\0\x56\x04\0\0");
assert_roundtrip(&vec);
}
#[test]
fn test_with_free_capacity() {
let mut vec = ArrayVec::<u32, 3>::new();
vec.push(0xdeadbeef);
assert_ser(&vec, b"\x01\0\0\0\xef\xbe\xad\xde");
assert_roundtrip(&vec);
}
}
mod array_string {
use arrayvec::ArrayString;
use super::{assert_ser, assert_roundtrip};
#[test]
fn test_empty() {
let string = ArrayString::<0>::new();
assert_ser(&string, b"\0\0\0\0");
assert_roundtrip(&string);
}
#[test]
fn test_full() {
let string = ArrayString::from_byte_string(b"hello world").unwrap();
assert_ser(&string, b"\x0b\0\0\0hello world");
assert_roundtrip(&string);
}
#[test]
fn test_with_free_capacity() {
let string = ArrayString::<16>::from("hello world").unwrap();
assert_ser(&string, b"\x0b\0\0\0hello world");
assert_roundtrip(&string);
}
}

16
third_party/rust/arrayvec/tests/tests.rs поставляемый
Просмотреть файл

@ -695,20 +695,6 @@ fn test_default() {
assert_eq!(v.len(), 0);
}
#[cfg(feature="array-sizes-33-128")]
#[test]
fn test_sizes_33_128() {
ArrayVec::from([0u8; 52]);
ArrayVec::from([0u8; 127]);
}
#[cfg(feature="array-sizes-129-255")]
#[test]
fn test_sizes_129_255() {
ArrayVec::from([0u8; 237]);
ArrayVec::from([0u8; 255]);
}
#[test]
fn test_extend_zst() {
let mut range = 0..10;
@ -790,4 +776,4 @@ fn test_arraystring_zero_filled_has_some_sanity_checks() {
let string = ArrayString::<4>::zero_filled();
assert_eq!(string.as_str(), "\0\0\0\0");
assert_eq!(string.len(), 4);
}
}

1
third_party/rust/breakpad-symbols/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"2dfb12c51f860b95f13b937e550dc7579bfe122854861717b8aed2c25fe51fe3","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"b0b97fcaf1d9eb5a3f3ca1fc0b0b1f593f7a116465ddcb8158541a40ff98660a","src/http.rs":"025a542391b2464fb6bdc769786b7c3d7ab697d932ee198360bc926e5e2b5cb6","src/lib.rs":"dd9a6cf9a140e5132db87e072550afa2418e2bb75cc0c652d929047e69850f6f","src/sym_file/mod.rs":"bb1c42d9b8823eabca753a7eff11533fdf403bcb0e0c91b298fdf07bcfde023e","src/sym_file/parser.rs":"6fbfd6805e8ef2cdadfd6c171d6ad40647a481760e7296f0ac093cb767fdf8dc","src/sym_file/types.rs":"c23a928bf092cbc9302316777ea00e416706bda6879ce7866a118ba18dbb718c","src/sym_file/walker.rs":"05f31914eb04186cdb292d68eb2f5bc5f2be9112e853867e49cc26eee1518a0a"},"package":"6aeaa2a7f839cbb61c2f59ad6e51cc3fd2c24aa2103cb24e6be143bcc114aa24"}

101
third_party/rust/breakpad-symbols/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,101 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "breakpad-symbols"
version = "0.22.1"
authors = ["Ted Mielczarek <ted@mielczarek.org>"]
build = false
exclude = ["testdata/*"]
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "A library for working with Google Breakpad's text-format symbol files."
homepage = "https://github.com/rust-minidump/rust-minidump"
readme = "README.md"
license = "MIT"
repository = "https://github.com/rust-minidump/rust-minidump"
resolver = "2"
[lib]
name = "breakpad_symbols"
path = "src/lib.rs"
[dependencies.async-trait]
version = "0.1.52"
[dependencies.cab]
version = "0.5.0"
optional = true
[dependencies.cachemap2]
version = "0.3.0"
[dependencies.circular]
version = "0.3.0"
[dependencies.debugid]
version = "0.8.0"
[dependencies.futures-util]
version = "0.3"
[dependencies.minidump-common]
version = "0.22.1"
[dependencies.nom]
version = "7"
[dependencies.range-map]
version = "0.2"
[dependencies.reqwest]
version = "0.11.6"
features = [
"gzip",
"rustls-tls",
]
optional = true
default-features = false
[dependencies.tempfile]
version = "3.3.0"
optional = true
[dependencies.thiserror]
version = "1.0.37"
[dependencies.tracing]
version = "0.1.34"
features = ["log"]
[dev-dependencies.tempfile]
version = "3.3.0"
[dev-dependencies.tokio]
version = "1.12.0"
features = ["full"]
[features]
fuzz = []
http = [
"reqwest",
"tempfile",
]
mozilla_cab_symbols = [
"http",
"cab",
]
[badges.travis-ci]
repository = "rust-minidump/rust-minidump"

21
third_party/rust/breakpad-symbols/LICENSE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2015-2023 rust-minidump contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

13
third_party/rust/breakpad-symbols/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,13 @@
[![crates.io](https://img.shields.io/crates/v/breakpad-symbols.svg)](https://crates.io/crates/breakpad-symbols) [![](https://docs.rs/breakpad-symbols/badge.svg)](https://docs.rs/breakpad-symbols)
Fetching, parsing, and evaluation of Breakpad's [text format .sym files](https://chromium.googlesource.com/breakpad/breakpad/+/master/docs/symbol_files.md).
Fetches breakpad symbol files from disk or [a server the conforms the the Tecken protocol](https://tecken.readthedocs.io/en/latest/download.html), and provides an on-disk temp symbol file cache.
Permissively parses breakpad symbol files to smooth over the unfortunately-very-common situation of corrupt debuginfo. Will generally try to recover the parse by discarding corrupt lines or arbitrarily picking one value when conflicts are found.
Provides an API for resolving functions and source line info by address from symbol files.
Provides an API for evaluating breakpad CFI (and WIN) expressions.
This is primarily designed for use by [minidump-processor](https://crates.io/crates/minidump-processor).

542
third_party/rust/breakpad-symbols/src/http.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,542 @@
//! Contains HTTP symbol retrieval specific functionality
use crate::*;
use reqwest::{redirect, Client, Url};
use std::io::{self, Write};
use std::path::Path;
use std::str::FromStr;
use std::time::Duration;
use tempfile::NamedTempFile;
use tracing::{debug, trace, warn};
/// A key that uniquely identifies a File associated with a module
type FileKey = (ModuleKey, FileKind);
/// An implementation of `SymbolSupplier` that loads Breakpad text-format
/// symbols from HTTP URLs.
///
/// See [`crate::breakpad_sym_lookup`] for details on how paths are searched.
pub struct HttpSymbolSupplier {
/// File paths that are known to be in the cache
#[allow(clippy::type_complexity)]
cached_file_paths: CacheMap<FileKey, CachedAsyncResult<(PathBuf, Option<Url>), FileError>>,
/// HTTP Client to use for fetching symbols.
client: Client,
/// URLs to search for symbols.
urls: Vec<Url>,
/// A `SimpleSymbolSupplier` to use for local symbol paths.
local: SimpleSymbolSupplier,
/// A path at which to cache downloaded symbols.
///
/// We recommend using a subdirectory of `std::env::temp_dir()`, as this
/// will be your OS's intended location for tempory files. This should
/// give you free garbage collection of the cache while still allowing it
/// to function between runs.
cache: PathBuf,
/// A path to a temporary location where downloaded symbols can be written
/// before being atomically swapped into the cache.
///
/// We recommend using `std::env::temp_dir()`, as this will be your OS's
/// intended location for temporary files.
tmp: PathBuf,
}
impl HttpSymbolSupplier {
/// Create a new `HttpSymbolSupplier`.
///
/// Symbols will be searched for in each of `local_paths` and `cache` first,
/// then via HTTP at each of `urls`. If a symbol file is found via HTTP it
/// will be saved under `cache`.
pub fn new(
urls: Vec<String>,
cache: PathBuf,
tmp: PathBuf,
mut local_paths: Vec<PathBuf>,
timeout: Duration,
) -> HttpSymbolSupplier {
let client = Client::builder().timeout(timeout).build().unwrap();
let urls = urls
.into_iter()
.filter_map(|mut u| {
if !u.ends_with('/') {
u.push('/');
}
Url::parse(&u).ok()
})
.collect();
local_paths.push(cache.clone());
let local = SimpleSymbolSupplier::new(local_paths);
let cached_file_paths = Default::default();
HttpSymbolSupplier {
client,
cached_file_paths,
urls,
local,
cache,
tmp,
}
}
#[tracing::instrument(level = "trace", skip(self, module), fields(module = crate::basename(&module.code_file())))]
pub async fn locate_file_internal(
&self,
module: &(dyn Module + Sync),
file_kind: FileKind,
) -> Result<(PathBuf, Option<Url>), FileError> {
self.cached_file_paths
.cache_default(file_key(module, file_kind))
.get(|| async {
// First look for the file in the cache
if let Ok(path) = self.local.locate_file(module, file_kind).await {
return Ok((path, None));
}
// Then try to download the file
// FIXME: if we try to parallelize this with `join` then if we have multiple hits
// we'll end up downloading all of them at once and having them race to write into
// the cache... is that ok? Maybe? Since only one will ever win the swap, and it's
// unlikely to get multiple hits... this might actually be ok!
if let Some(lookup) = lookup(module, file_kind) {
for url in &self.urls {
let fetch =
fetch_lookup(&self.client, url, &lookup, &self.cache, &self.tmp).await;
if let Ok((path, url)) = fetch {
return Ok((path, url));
}
}
// If we're allowed to look for mozilla's special CAB paths, do that
if cfg!(feature = "mozilla_cab_symbols") {
for url in &self.urls {
let fetch = fetch_cab_lookup(
&self.client,
url,
&lookup,
&self.cache,
&self.tmp,
)
.await;
if let Ok((path, url)) = fetch {
return Ok((path, url));
}
}
}
}
Err(FileError::NotFound)
})
.await
.as_ref()
.clone()
}
}
fn file_key(module: &(dyn Module + Sync), file_kind: FileKind) -> FileKey {
(module_key(module), file_kind)
}
fn create_cache_file(tmp_path: &Path, final_path: &Path) -> io::Result<NamedTempFile> {
// Use tempfile to save things to our cache to ensure proper
// atomicity of writes. We may want multiple instances of rust-minidump
// to be sharing a cache, and we don't want one instance to see another
// instance's partially written results.
//
// tempfile is designed explicitly for this purpose, and will handle all
// the platform-specific details and do its best to cleanup if things
// crash.
// First ensure that the target directory in the cache exists
let base = final_path.parent().ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
format!("Bad cache path: {final_path:?}"),
)
})?;
fs::create_dir_all(base)?;
NamedTempFile::new_in(tmp_path)
}
fn commit_cache_file(mut temp: NamedTempFile, final_path: &Path, url: &Url) -> io::Result<()> {
// Append any extra metadata we also want to be cached as "INFO" lines,
// because this is an established format that parsers will ignore the
// contents of by default.
// INFO URL allows us to properly report the url we retrieved a symbol file
// from, even when the file is loaded from our on-disk cache.
let cache_metadata = format!("INFO URL {url}\n");
temp.write_all(cache_metadata.as_bytes())?;
// TODO: don't do this
if final_path.exists() {
fs::remove_file(final_path)?;
}
// If another process already wrote this entry, prefer their value to
// avoid needless file system churn.
temp.persist_noclobber(final_path)?;
Ok(())
}
/// Perform a code_file/code_identifier lookup for a specific symbol server.
async fn individual_lookup_debug_info_by_code_info(
base_url: &Url,
lookup_path: &str,
) -> Option<DebugInfoResult> {
let url = base_url.join(lookup_path).ok()?;
debug!("Trying code file / code identifier lookup: {}", url);
// This should not follow redirects--we want the next url if there is one
let no_redirects_client = Client::builder()
.redirect(redirect::Policy::none())
.build()
.ok()?;
let response = no_redirects_client.get(url.clone()).send().await;
if let Ok(res) = response {
let res_status = res.status();
if res_status == reqwest::StatusCode::FOUND
|| res_status == reqwest::StatusCode::MOVED_PERMANENTLY
{
let location_header = res.headers().get("Location")?;
let mut new_url = location_header.to_str().ok()?;
if new_url.starts_with('/') {
new_url = new_url.strip_prefix('/').unwrap_or(new_url);
}
// new_url looks like some/path/stuff/xul.pdb/somedebugid/xul.sym and we want the debug
// file and debug id portions which are at fixed indexes from the end
let mut parts = new_url.rsplit('/');
let debug_identifier_part = parts.nth(1)?;
let debug_identifier = DebugId::from_str(debug_identifier_part).ok()?;
let debug_file_part = parts.next()?;
let debug_file = String::from(debug_file_part);
debug!("Found debug info {} {}", debug_file, debug_identifier);
return Some(DebugInfoResult {
debug_file,
debug_identifier,
});
}
}
None
}
/// Given a vector of symbol urls and a module with a code_file and code_identifier,
/// this tries to request a symbol file using the code file and code identifier.
///
/// `<code file>/<code identifier>/<code file>.sym`
///
/// If the symbol server returns an HTTP 302 redirect, the Location header will
/// have the correct download API url with the debug file and debug identifier.
///
/// This is supported by tecken
///
/// This returns a DebugInfoResult with the new debug file and debug identifier
/// or None.
async fn lookup_debug_info_by_code_info(
symbol_urls: &Vec<Url>,
module: &(dyn Module + Sync),
) -> Option<DebugInfoResult> {
let lookup_path = code_info_breakpad_sym_lookup(module)?;
for base_url in symbol_urls {
if let Some(result) =
individual_lookup_debug_info_by_code_info(base_url, &lookup_path).await
{
return Some(result);
}
}
debug!(
"No debug file / debug id found with lookup path {}.",
lookup_path
);
None
}
/// Fetch a symbol file from the URL made by combining `base_url` and `rel_path` using `client`,
/// save the file contents under `cache` + `rel_path` and also return them.
async fn fetch_symbol_file(
client: &Client,
base_url: &Url,
module: &(dyn Module + Sync),
cache: &Path,
tmp: &Path,
) -> Result<SymbolFile, SymbolError> {
trace!("HttpSymbolSupplier trying symbol server {}", base_url);
// This function is a bit of a complicated mess because we want to write
// the input to our symbol cache, but we're a streaming parser. So we
// use the bare SymbolFile::parse to get access to the contents of
// the input stream as it's downloaded+parsed to write it to disk.
//
// Note that caching is strictly "optional" because it's more important
// to parse the symbols. So if at any point the caching i/o fails, we just
// give up on caching but let the parse+download continue.
// First try to GET the file from a server
let sym_lookup = breakpad_sym_lookup(module).ok_or(SymbolError::MissingDebugFileOrId)?;
let mut url = base_url
.join(&sym_lookup.server_rel)
.map_err(|_| SymbolError::NotFound)?;
let code_id = module.code_identifier().unwrap_or_default();
url.query_pairs_mut()
.append_pair("code_file", crate::basename(&module.code_file()))
.append_pair("code_id", code_id.as_str());
debug!("Trying {}", url);
let res = client
.get(url.clone())
.send()
.await
.and_then(|res| res.error_for_status())
.map_err(|_| SymbolError::NotFound)?;
// Now try to create the temp cache file (not yet in the cache)
let final_cache_path = cache.join(sym_lookup.cache_rel);
let mut temp = create_cache_file(tmp, &final_cache_path)
.map_err(|e| {
warn!("Failed to save symbol file in local disk cache: {}", e);
})
.ok();
// Now stream parse the file as it downloads.
let mut symbol_file = SymbolFile::parse_async(res, |data| {
// While we're downloading+parsing, save this data to the the disk cache too
if let Some(file) = temp.as_mut() {
if let Err(e) = file.write_all(data) {
// Give up on caching this.
warn!("Failed to save symbol file in local disk cache: {}", e);
temp = None;
}
}
})
.await?;
// Make note of what URL this symbol file was downloaded from.
symbol_file.url = Some(url.to_string());
// Try to finish the cache file and atomically swap it into the cache.
if let Some(temp) = temp {
let _ = commit_cache_file(temp, &final_cache_path, &url).map_err(|e| {
warn!("Failed to save symbol file in local disk cache: {}", e);
});
}
Ok(symbol_file)
}
/// Like fetch_symbol_file but instead of parsing the file live, we just download it opaquely based
/// on the given Lookup.
///
/// The returned value is the path to the downloaded file and the url it was downloaded from.
async fn fetch_lookup(
client: &Client,
base_url: &Url,
lookup: &FileLookup,
cache: &Path,
tmp: &Path,
) -> Result<(PathBuf, Option<Url>), SymbolError> {
// First try to GET the file from a server
let url = base_url
.join(&lookup.server_rel)
.map_err(|_| SymbolError::NotFound)?;
debug!("Trying {}", url);
let mut res = client
.get(url.clone())
.send()
.await
.and_then(|res| res.error_for_status())
.map_err(|_| SymbolError::NotFound)?;
// Now try to create the temp cache file (not yet in the cache)
let final_cache_path = cache.join(&lookup.cache_rel);
let mut temp = create_cache_file(tmp, &final_cache_path)?;
// Now stream the contents to our file
while let Some(chunk) = res
.chunk()
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
{
temp.write_all(&chunk[..])?;
}
// And swap it into the cache
temp.persist_noclobber(&final_cache_path)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
trace!("symbols: fetched native binary: {}", lookup.cache_rel);
Ok((final_cache_path, Some(url)))
}
#[cfg(feature = "mozilla_cab_symbols")]
async fn fetch_cab_lookup(
client: &Client,
base_url: &Url,
lookup: &FileLookup,
cache: &Path,
tmp: &Path,
) -> Result<(PathBuf, Option<Url>), FileError> {
let cab_lookup = moz_lookup(lookup.clone());
// First try to GET the file from a server
let url = base_url
.join(&cab_lookup.server_rel)
.map_err(|_| FileError::NotFound)?;
debug!("Trying {}", url);
let res = client
.get(url.clone())
.send()
.await
.and_then(|res| res.error_for_status())
.map_err(|_| FileError::NotFound)?;
let cab_bytes = res.bytes().await.map_err(|_| FileError::NotFound)?;
let final_cache_path =
unpack_cabinet_file(&cab_bytes, lookup, cache, tmp).map_err(|_| FileError::NotFound)?;
trace!("symbols: fetched native binary: {}", lookup.cache_rel);
Ok((final_cache_path, Some(url)))
}
#[cfg(not(feature = "mozilla_cab_symbols"))]
async fn fetch_cab_lookup(
_client: &Client,
_base_url: &Url,
_lookup: &FileLookup,
_cache: &Path,
_tmp: &Path,
) -> Result<(PathBuf, Option<Url>), FileError> {
Err(FileError::NotFound)
}
#[cfg(feature = "mozilla_cab_symbols")]
pub fn unpack_cabinet_file(
buf: &[u8],
lookup: &FileLookup,
cache: &Path,
tmp: &Path,
) -> Result<PathBuf, std::io::Error> {
trace!("symbols: unpacking CAB file: {}", lookup.cache_rel);
// try to find a file in a cabinet archive and unpack it to the destination
use cab::Cabinet;
use std::io::Cursor;
fn get_cabinet_file(
cab: &Cabinet<Cursor<&[u8]>>,
file_name: &str,
) -> Result<String, std::io::Error> {
for folder in cab.folder_entries() {
for file in folder.file_entries() {
let cab_file_name = file.name();
if cab_file_name.ends_with(file_name) {
return Ok(cab_file_name.to_string());
}
}
}
Err(std::io::Error::from(std::io::ErrorKind::NotFound))
}
let final_cache_path = cache.join(&lookup.cache_rel);
let cursor = Cursor::new(buf);
let mut cab = Cabinet::new(cursor)?;
let file_name = final_cache_path.file_name().unwrap().to_string_lossy();
let cab_file = get_cabinet_file(&cab, &file_name)?;
let mut reader = cab.read_file(&cab_file)?;
// Now try to create the temp cache file (not yet in the cache)
let mut temp = create_cache_file(tmp, &final_cache_path)?;
std::io::copy(&mut reader, &mut temp)?;
// And swap it into the cache
temp.persist_noclobber(&final_cache_path)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
Ok(final_cache_path)
}
/// Try to lookup native binaries in the cache and by querying the symbol server
#[async_trait]
impl SymbolSupplier for HttpSymbolSupplier {
#[tracing::instrument(name = "symbols", level = "trace", skip_all, fields(file = crate::basename(&module.code_file())))]
async fn locate_symbols(
&self,
module: &(dyn Module + Sync),
) -> Result<LocateSymbolsResult, SymbolError> {
// If we don't have a debug_file or debug_identifier, then try to get it
// from a symbol server.
let mut debug_file = module.debug_file().map(|name| name.into_owned());
let mut debug_id = module.debug_identifier();
let missing_debug_info = debug_file.is_none() || debug_id.is_none();
let extra_debug_info;
if missing_debug_info {
debug!("Missing debug file or debug identifier--trying lookup with code info");
extra_debug_info = lookup_debug_info_by_code_info(&self.urls, module).await;
if let Some(debug_info_result) = &extra_debug_info {
debug_file = Some(debug_info_result.debug_file.clone());
debug_id = Some(debug_info_result.debug_identifier);
}
} else {
extra_debug_info = None;
}
// Build a minimal module for lookups with the debug file and debug
// identifier we need to use
let lookup_module = SimpleModule::from_basic_info(
debug_file,
debug_id,
Some(module.code_file().into_owned()),
module.code_identifier(),
);
// First: try local paths for sym files
let local_result = self.local.locate_symbols(&lookup_module).await;
if !matches!(local_result, Err(SymbolError::NotFound)) {
// Everything but NotFound prevents cascading
return local_result.map(|r| LocateSymbolsResult {
symbols: r.symbols,
extra_debug_info: r.extra_debug_info.or(extra_debug_info),
});
}
trace!("HttpSymbolSupplier search (SimpleSymbolSupplier found nothing)");
// Second: try to directly download sym files
for url in &self.urls {
// First, try to get a breakpad .sym file from the symbol server
let sym =
fetch_symbol_file(&self.client, url, &lookup_module, &self.cache, &self.tmp).await;
match sym {
Ok(symbols) => {
trace!("HttpSymbolSupplier parsed file!");
return Ok(LocateSymbolsResult {
symbols,
extra_debug_info,
});
}
Err(e) => {
trace!("HttpSymbolSupplier failed: {}", e);
}
}
}
// If we get this far, we have failed to find anything
Err(SymbolError::NotFound)
}
async fn locate_file(
&self,
module: &(dyn Module + Sync),
file_kind: FileKind,
) -> Result<PathBuf, FileError> {
self.locate_file_internal(module, file_kind)
.await
.map(|(path, _url)| path)
}
}

1238
third_party/rust/breakpad-symbols/src/lib.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

628
third_party/rust/breakpad-symbols/src/sym_file/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,628 @@
// Copyright 2015 Ted Mielczarek. See the COPYRIGHT
// file at the top-level directory of this distribution.
use crate::{FrameSymbolizer, FrameWalker, Module, SymbolError};
pub use crate::sym_file::types::*;
pub use parser::SymbolParser;
use std::fs::File;
use std::io::Read;
use std::ops::Deref;
use std::path::Path;
use tracing::trace;
mod parser;
mod types;
pub mod walker;
// # Sync VS Async
//
// There is both a sync and an async entry-point to the parser.
// The two impls should be essentially identical, except for how they
// read bytes from the input reader into our circular buffer.
//
//
// # Streaming
//
// This parser streams the input to avoid the need to materialize all of
// it into memory at once (symbol files can be a gigabyte!). As a result,
// we need to iteratively parse.
//
// We do this by repeatedly filling up a buffer with input and asking the
// parser to parse it. The parser will return how much of the input it
// consumed, which we can use to clear space in our buffer and to tell
// if it successfully consumed the whole input when the Reader runs dry.
//
//
// # Handling EOF / Capacity
//
// Having a fix-sized buffer has one fatal issue: if one atomic step
// of the parser needs more than this amount of data, then we won't
// be able to parse it.
//
// This can result in `buf` filling up and `buf.space()` becoming an
// empty slice. This in turn will make the reader yield 0 bytes, and
// we'll treat it like EOF and fail the parse. When this happens, we
// try to double the buffer's size and request more bytes. If we get
// more, hooray! If we don't, then it's a "real" EOF.
//
// The "atom" of our parser is a line, so we need our buffer to be able
// to fit any line. However we actually only have roughly
// *half* this value as our limit, as circular::Buffer will only
// `shift` the buffer's contents if over half of its capacity has been
// drained by `consume` -- and `space()` only grows when a `shift` happens.
//
// I have in fact seen 8kb function names from Rust (thanks generic combinators!)
// and 82kb function names from C++ (thanks 'auto' returns!), so we
// need a buffer size that can grow to at least 200KB. This is a *very* large
// amount to backshift repeatedly, so to keep this under control, we start
// with only a 10KB buffer, which is generous but tolerable.
//
// We should still have *SOME* limit on this to avoid nasty death spirals,
// so let's go with 2MB (MAX_BUFFER_CAPACITY), letting you have a horrifying 1MB symbol.
//
// But just *dying* when we hit this point is terrible, so lets have an
// extra layer of robustness: if we ever hit the limit, enter "panic recovery"
// and just start discarding bytes until we hit a newline. Then resume normal
// parsing. The net effect of this is that we just treat this one line as
// corrupt (because statistically it won't even be needed!).
// Allows for at least 80KB symbol names, at most 160KB symbol names (fuzzy because of circular).
static MAX_BUFFER_CAPACITY: usize = 1024 * 160;
static INITIAL_BUFFER_CAPACITY: usize = 1024 * 10;
impl SymbolFile {
/// Parse a SymbolFile from the given Reader.
///
/// Every time a chunk of the input is parsed, that chunk will
/// be passed to `callback` to allow you to do something else
/// with the data as it's streamed in (e.g. you can save the
/// input to a cache).
///
/// The reader is wrapped in a buffer reader so you shouldn't
/// buffer the input yourself.
pub fn parse<R: Read>(
mut input_reader: R,
mut callback: impl FnMut(&[u8]),
) -> Result<SymbolFile, SymbolError> {
let mut buf = circular::Buffer::with_capacity(INITIAL_BUFFER_CAPACITY);
let mut parser = SymbolParser::new();
let mut fully_consumed = false;
let mut tried_to_grow = false;
let mut in_panic_recovery = false;
let mut just_finished_recovering = false;
let mut total_consumed = 0u64;
loop {
if in_panic_recovery {
// PANIC RECOVERY MODE! DISCARD BYTES UNTIL NEWLINE.
let input = buf.data();
if let Some(new_line_idx) = input.iter().position(|&byte| byte == b'\n') {
// Hooray, we found a new line! Consume up to and including that, and resume.
let amount = new_line_idx + 1;
callback(&input[..amount]);
buf.consume(amount);
total_consumed += amount as u64;
// Back to normal!
in_panic_recovery = false;
fully_consumed = false;
just_finished_recovering = true;
parser.lines += 1;
trace!("RECOVERY: complete!");
} else {
// No newline, discard everything
let amount = input.len();
callback(&input[..amount]);
buf.consume(amount);
total_consumed += amount as u64;
// If the next read returns 0 bytes, then that's a proper EOF!
fully_consumed = true;
}
}
// Read the data in, and tell the circular buffer about the new data
let size = input_reader.read(buf.space())?;
buf.fill(size);
if size == 0 {
// If the reader returned no more bytes, this can be either mean
// EOF or the buffer is out of capacity. There are a lot of cases
// to consider, so let's go through them one at a time...
if just_finished_recovering && !buf.data().is_empty() {
// We just finished PANIC RECOVERY, but there's still bytes in
// the buffer. Assume that is parseable and resume normal parsing
// (do nothing, fallthrough to normal path).
} else if fully_consumed {
// Success! The last iteration cleared the buffer and we still got
// no more bytes, so that's a proper EOF with a complete parse!
return Ok(parser.finish());
} else if !tried_to_grow {
// We still have some stuff in the buffer, assume this is because
// the buffer is full, and try to make it BIGGER and ask for more again.
let new_cap = buf.capacity().saturating_mul(2);
if new_cap > MAX_BUFFER_CAPACITY {
// TIME TO PANIC!!! This line is catastrophically big, just start
// discarding bytes until we hit a newline.
trace!("RECOVERY: discarding enormous line {}", parser.lines);
in_panic_recovery = true;
continue;
}
trace!("parser out of space? trying more ({}KB)", new_cap / 1024);
buf.grow(new_cap);
tried_to_grow = true;
continue;
} else if total_consumed == 0 {
// We grew the buffer and still got no more bytes, so it's a proper EOF.
// But actually, we never consumed any bytes, so this is an empty file?
// Give a better error message for that.
return Err(SymbolError::ParseError(
"empty SymbolFile (probably something wrong with your debuginfo tooling?)",
0,
));
} else {
// Ok give up, this input is just impossible.
return Err(SymbolError::ParseError(
"unexpected EOF during parsing of SymbolFile (or a line was too long?)",
parser.lines,
));
}
} else {
tried_to_grow = false;
}
if in_panic_recovery {
// Don't run the normal parser while we're still recovering!
continue;
}
just_finished_recovering = false;
// Ask the parser to parse more of the input
let input = buf.data();
let consumed = parser.parse_more(input)?;
total_consumed += consumed as u64;
// Give the other consumer of this Reader a chance to use this data.
callback(&input[..consumed]);
// Remember for the next iteration if all the input was consumed.
fully_consumed = input.len() == consumed;
buf.consume(consumed);
}
}
/// `parse` but async
#[cfg(feature = "http")]
pub async fn parse_async(
mut response: reqwest::Response,
mut callback: impl FnMut(&[u8]),
) -> Result<SymbolFile, SymbolError> {
let mut chunk;
let mut slice = &[][..];
let mut input_reader = &mut slice;
let mut buf = circular::Buffer::with_capacity(INITIAL_BUFFER_CAPACITY);
let mut parser = SymbolParser::new();
let mut fully_consumed = false;
let mut tried_to_grow = false;
let mut in_panic_recovery = false;
let mut just_finished_recovering = false;
let mut total_consumed = 0u64;
loop {
if in_panic_recovery {
// PANIC RECOVERY MODE! DISCARD BYTES UNTIL NEWLINE.
let input = buf.data();
if let Some(new_line_idx) = input.iter().position(|&byte| byte == b'\n') {
// Hooray, we found a new line! Consume up to and including that, and resume.
let amount = new_line_idx + 1;
callback(&input[..amount]);
buf.consume(amount);
total_consumed += amount as u64;
// Back to normal!
in_panic_recovery = false;
fully_consumed = false;
just_finished_recovering = true;
parser.lines += 1;
trace!("PANIC RECOVERY: complete!");
} else {
// No newline, discard everything
let amount = input.len();
callback(&input[..amount]);
buf.consume(amount);
total_consumed += amount as u64;
// If the next read returns 0 bytes, then that's a proper EOF!
fully_consumed = true;
}
}
// Little rube-goldberg machine to stream the contents:
// * get a chunk (Bytes) from the Response
// * get its underlying slice
// * then get a mutable reference to that slice
// * then Read that mutable reference in our circular buffer
// * when the slice runs out, get the next chunk and repeat
if input_reader.is_empty() {
chunk = response
.chunk()
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
.unwrap_or_default();
slice = &chunk[..];
input_reader = &mut slice;
}
// Read the data in, and tell the circular buffer about the new data
let size = input_reader.read(buf.space())?;
buf.fill(size);
if size == 0 {
// If the reader returned no more bytes, this can be either mean
// EOF or the buffer is out of capacity. There are a lot of cases
// to consider, so let's go through them one at a time...
if just_finished_recovering && !buf.data().is_empty() {
// We just finished PANIC RECOVERY, but there's still bytes in
// the buffer. Assume that is parseable and resume normal parsing
// (do nothing, fallthrough to normal path).
} else if fully_consumed {
// Success! The last iteration cleared the buffer and we still got
// no more bytes, so that's a proper EOF with a complete parse!
return Ok(parser.finish());
} else if !tried_to_grow {
// We still have some stuff in the buffer, assume this is because
// the buffer is full, and try to make it BIGGER and ask for more again.
let new_cap = buf.capacity().saturating_mul(2);
if new_cap > MAX_BUFFER_CAPACITY {
// TIME TO PANIC!!! This line is catastrophically big, just start
// discarding bytes until we hit a newline.
trace!("RECOVERY: discarding enormous line {}", parser.lines);
in_panic_recovery = true;
continue;
}
trace!("parser out of space? trying more ({}KB)", new_cap / 1024);
buf.grow(new_cap);
tried_to_grow = true;
continue;
} else if total_consumed == 0 {
// We grew the buffer and still got no more bytes, so it's a proper EOF.
// But actually, we never consumed any bytes, so this is an empty file?
// Give a better error message for that.
return Err(SymbolError::ParseError(
"empty SymbolFile (probably something wrong with your debuginfo tooling?)",
0,
));
} else {
// Ok give up, this input is just impossible.
return Err(SymbolError::ParseError(
"unexpected EOF during parsing of SymbolFile (or a line was too long?)",
parser.lines,
));
}
} else {
tried_to_grow = false;
}
if in_panic_recovery {
// Don't run the normal parser while we're still recovering!
continue;
}
just_finished_recovering = false;
// Ask the parser to parse more of the input
let input = buf.data();
let consumed = parser.parse_more(input)?;
total_consumed += consumed as u64;
// Give the other consumer of this Reader a chance to use this data.
callback(&input[..consumed]);
// Remember for the next iteration if all the input was consumed.
fully_consumed = input.len() == consumed;
buf.consume(consumed);
}
}
// Parse a SymbolFile from bytes.
pub fn from_bytes(bytes: &[u8]) -> Result<SymbolFile, SymbolError> {
Self::parse(bytes, |_| ())
}
// Parse a SymbolFile from a file.
pub fn from_file(path: &Path) -> Result<SymbolFile, SymbolError> {
let file = File::open(path)?;
Self::parse(file, |_| ())
}
/// Fill in as much source information for `frame` as possible.
pub fn fill_symbol(&self, module: &dyn Module, frame: &mut dyn FrameSymbolizer) {
// Look for a FUNC covering the address first.
if frame.get_instruction() < module.base_address() {
return;
}
let addr = frame.get_instruction() - module.base_address();
if let Some(func) = self.functions.get(addr) {
// TODO: although FUNC records have a parameter size, it appears that
// they aren't to be trusted? The STACK WIN records are more reliable
// when available. This is important precisely because these values
// are used to unwind subsequent STACK WIN frames (because certain
// calling conventions have the caller push the callee's arguments,
// which affects the the stack's size!).
//
// Need to spend more time thinking about if this is the right approach
let parameter_size = if let Some(info) = self.win_stack_framedata_info.get(addr) {
info.parameter_size
} else if let Some(info) = self.win_stack_fpo_info.get(addr) {
info.parameter_size
} else {
func.parameter_size
};
frame.set_function(
&func.name,
func.address + module.base_address(),
parameter_size,
);
// See if there's source line and inline info as well.
//
// In the following, we transform data between two different representations of inline calls.
// The input shape has function names associated with the location of the call to that function.
// The output shape has function names associated with a location *inside* that function.
//
// Input:
//
// (
// outer_name,
// inline_calls: [ // Each location is the line of the *call* to the function
// (inline_call_location[0], inline_name[0]),
// (inline_call_location[1], inline_name[1]),
// (inline_call_location[2], inline_name[2]),
// ]
// innermost_location,
// )
//
// Output:
//
// ( // Each location is the line *inside* the function
// (outer_name, inline_call_location[0]),
// inlines: [
// (inline_name[0], inline_call_location[1]),
// (inline_name[1], inline_call_location[2]),
// (inline_name[2], innermost_location),
// ]
// )
if let Some((file_id, line, address, next_inline_origin)) =
func.get_outermost_sourceloc(addr)
{
if let Some(file) = self.files.get(&file_id) {
frame.set_source_file(file, line, address + module.base_address());
}
if let Some(mut inline_origin) = next_inline_origin {
// There is an inline call at the address.
// Enumerate all inlines at the address one by one by looking up
// successively deeper call depths.
// The call to `get_outermost_source_location` above looked up depth 0, so here
// we start at depth 1.
for depth in 1.. {
match func.get_inlinee_at_depth(depth, addr) {
Some((call_file_id, call_line, _address, next_inline_origin)) => {
// We found another inline frame.
let call_file = self.files.get(&call_file_id).map(Deref::deref);
if let Some(name) = self.inline_origins.get(&inline_origin) {
frame.add_inline_frame(name, call_file, Some(call_line));
}
inline_origin = next_inline_origin;
}
None => break,
}
}
// We've run out of inline calls but we still have to output the final frame.
let (file, line) = match func.get_innermost_sourceloc(addr) {
Some((file_id, line, _)) => (
self.files.get(&file_id).map(Deref::deref),
if line != 0 { Some(line) } else { None },
),
None => (None, None),
};
if let Some(name) = self.inline_origins.get(&inline_origin) {
frame.add_inline_frame(name, file, line);
}
}
}
} else if let Some(public) = self.find_nearest_public(addr) {
// We couldn't find a valid FUNC record, but we could find a PUBLIC record.
// Unfortauntely, PUBLIC records don't have end-points, so this could be
// a random PUBLIC record from the start of the module that isn't at all
// applicable. To try limit this problem, we can use the nearest FUNC
// record that comes *before* the address we're trying to find a symbol for.
//
// It is reasonable to assume a PUBLIC record cannot extend *past* a FUNC,
// so if the PUBLIC has a smaller base address than the nearest previous FUNC
// to our target address, the PUBLIC must actually end before that FUNC and
// therefore not actually apply to the target address.
//
// We get the nearest previous FUNC by getting the raw slice of ranges
// and binary searching for our base address. Rust's builtin binary search
// will fail to find the value since it uses strict equality *but* the Err
// will helpfully contain the index in the slice where our value "should"
// be inserted to preserve the sort. The element before this index is
// therefore the nearest previous value!
//
// Case analysis for this -1 because binary search is an off-by-one minefield:
//
// * if the address we were looking for came *before* every FUNC, binary_search
// would yield "0" because that's where it should go to preserve the sort.
// The checked_sub will then fail and make us just assume the PUBLIC is reasonable,
// which is correct.
//
// * if we get 1, this saying we actually want element 0, so again -1 is
// correct. (This generalizes to all other "reasonable" values, but 1 is easiest
// to think about given the previous case's analysis.)
//
// * if the address we were looking for came *after* every FUNC, binary search
// would yield "slice.len()", and the nearest FUNC is indeed at `len-1`, so
// again correct.
let funcs_slice = self.functions.ranges_values().as_slice();
let prev_func = funcs_slice
.binary_search_by_key(&addr, |(range, _)| range.start)
.err()
.and_then(|idx| idx.checked_sub(1))
.and_then(|idx| funcs_slice.get(idx));
if let Some(prev_func) = prev_func {
if public.address <= prev_func.1.address {
// This PUBLIC is truncated by a FUNC before it gets to `addr`,
// so we shouldn't use it.
return;
}
}
// Settle for a PUBLIC.
frame.set_function(
&public.name,
public.address + module.base_address(),
public.parameter_size,
);
}
}
pub fn walk_frame(&self, module: &dyn Module, walker: &mut dyn FrameWalker) -> Option<()> {
if walker.get_instruction() < module.base_address() {
return None;
}
let addr = walker.get_instruction() - module.base_address();
// Preferentially use framedata over fpo, because if both are present,
// the former tends to be more precise (breakpad heuristic).
let win_stack_result = if let Some(info) = self.win_stack_framedata_info.get(addr) {
walker::walk_with_stack_win_framedata(info, walker)
} else if let Some(info) = self.win_stack_fpo_info.get(addr) {
walker::walk_with_stack_win_fpo(info, walker)
} else {
None
};
// If STACK WIN failed, try STACK CFI
win_stack_result.or_else(|| {
if let Some(info) = self.cfi_stack_info.get(addr) {
// Don't use add_rules that come after this address
let mut count = 0;
let len = info.add_rules.len();
while count < len && info.add_rules[count].address <= addr {
count += 1;
}
walker::walk_with_stack_cfi(&info.init, &info.add_rules[0..count], walker)
} else {
None
}
})
}
/// Find the nearest `PublicSymbol` whose address is less than or equal to `addr`.
pub fn find_nearest_public(&self, addr: u64) -> Option<&PublicSymbol> {
self.publics.iter().rev().find(|&p| p.address <= addr)
}
}
#[cfg(test)]
mod test {
use super::*;
use std::ffi::OsStr;
fn test_symbolfile_from_file(rel_path: &str) {
let mut path = std::env::current_dir().unwrap();
if path.file_name() == Some(OsStr::new("rust-minidump")) {
path.push("breakpad-symbols");
}
path.push(rel_path);
let sym = SymbolFile::from_file(&path).unwrap();
assert_eq!(sym.files.len(), 6661);
assert_eq!(sym.publics.len(), 5);
assert_eq!(sym.find_nearest_public(0x9b07).unwrap().name, "_NLG_Return");
assert_eq!(
sym.find_nearest_public(0x142e7).unwrap().name,
"_NLG_Return"
);
assert_eq!(
sym.find_nearest_public(0x23b06).unwrap().name,
"__from_strstr_to_strchr"
);
assert_eq!(
sym.find_nearest_public(0xFFFFFFFF).unwrap().name,
"__from_strstr_to_strchr"
);
assert_eq!(sym.functions.ranges_values().count(), 1065);
assert_eq!(sym.functions.get(0x1000).unwrap().name, "vswprintf");
assert_eq!(sym.functions.get(0x1012).unwrap().name, "vswprintf");
assert!(sym.functions.get(0x1013).is_none());
// There are 1556 `STACK WIN 4` lines in the symbol file, but only 856
// that don't overlap. However they all overlap in ways that we have
// to handle in the wild.
assert_eq!(sym.win_stack_framedata_info.ranges_values().count(), 1556);
assert_eq!(sym.win_stack_fpo_info.ranges_values().count(), 259);
assert_eq!(
sym.win_stack_framedata_info.get(0x41b0).unwrap().address,
0x41b0
);
}
#[test]
fn test_symbolfile_from_lf_file() {
test_symbolfile_from_file(
"testdata/symbols/test_app.pdb/5A9832E5287241C1838ED98914E9B7FF1/test_app.sym",
);
}
#[test]
fn test_symbolfile_from_crlf_file() {
test_symbolfile_from_file(
"testdata/symbols/test_app.pdb/6A9832E5287241C1838ED98914E9B7FF1/test_app.sym",
);
}
fn test_symbolfile_from_bytes(symbolfile_bytes: &[u8]) {
let sym = SymbolFile::from_bytes(symbolfile_bytes).unwrap();
assert_eq!(sym.files.len(), 1);
assert_eq!(sym.publics.len(), 1);
assert_eq!(sym.functions.ranges_values().count(), 1);
assert_eq!(sym.functions.get(0x1000).unwrap().name, "another func");
assert_eq!(
sym.functions
.get(0x1000)
.unwrap()
.lines
.ranges_values()
.count(),
1
);
// test fallback
assert_eq!(sym.functions.get(0x1001).unwrap().name, "another func");
}
#[test]
fn test_symbolfile_from_bytes_with_lf() {
test_symbolfile_from_bytes(
b"MODULE Linux x86 ffff0000 bar
FILE 53 bar.c
PUBLIC 1234 10 some public
FUNC 1000 30 10 another func
1000 30 7 53
",
);
}
#[test]
fn test_symbolfile_from_bytes_with_crlf() {
test_symbolfile_from_bytes(
b"MODULE Linux x86 ffff0000 bar
FILE 53 bar.c
PUBLIC 1234 10 some public
FUNC 1000 30 10 another func
1000 30 7 53
",
);
}
}

1653
third_party/rust/breakpad-symbols/src/sym_file/parser.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

315
third_party/rust/breakpad-symbols/src/sym_file/types.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,315 @@
// Copyright 2015 Ted Mielczarek. See the COPYRIGHT
// file at the top-level directory of this distribution.
use range_map::{Range, RangeMap};
use std::collections::HashMap;
/// A publicly visible linker symbol.
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct PublicSymbol {
/// The symbol's address relative to the module's load address.
///
/// This field is declared first so that the derived Ord implementation sorts
/// by address first. We take advantage of the sort order during address lookup.
pub address: u64,
/// The name of the symbol.
pub name: String,
/// The size of parameters passed to the function.
pub parameter_size: u32,
}
/// A mapping from machine code bytes to source line and file.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SourceLine {
/// The start address relative to the module's load address.
pub address: u64,
/// The size of this range of instructions in bytes.
pub size: u32,
/// The source file name that generated this machine code.
///
/// This is an index into `SymbolFile::files`.
pub file: u32,
/// The line number in `file` that generated this machine code.
pub line: u32,
}
/// A single range which is covered by an inlined function call.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Inlinee {
/// The depth of the inline call.
pub depth: u32,
/// The start address relative to the module's load address.
pub address: u64,
/// The size of this range of instructions in bytes.
pub size: u32,
/// The source file which contains the function call.
///
/// This is an index into `SymbolFile::files`.
pub call_file: u32,
/// The line number in `call_file` for the function call.
pub call_line: u32,
/// The function name, as an index into `SymbolFile::inline_origins`.
pub origin_id: u32,
}
/// A source-language function.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Function {
/// The function's start address relative to the module's load address.
pub address: u64,
/// The size of the function in bytes.
pub size: u32,
/// The size of parameters passed to the function.
pub parameter_size: u32,
/// The name of the function as declared in the source.
pub name: String,
/// Source line information for this function.
pub lines: RangeMap<u64, SourceLine>,
/// Inlinee information for this function, sorted by (depth, address).
///
/// Essentially this can be considered as "one vec per depth", just with
/// all those vecs concatenated into one.
///
/// Inlinees form a nested structure, you can think of them like a flame graph.
pub inlinees: Vec<Inlinee>,
}
impl Function {
pub fn memory_range(&self) -> Option<Range<u64>> {
if self.size == 0 {
return None;
}
Some(Range::new(
self.address,
self.address.checked_add(self.size as u64)? - 1,
))
}
/// Returns `(file_id, line, address, inline_origin)` of the line or inline record that
/// covers the given address at the outermost level (i.e. not inside any
/// inlined calls).
pub fn get_outermost_sourceloc(&self, addr: u64) -> Option<(u32, u32, u64, Option<u32>)> {
if let Some((call_file, call_line, address, origin)) = self.get_inlinee_at_depth(0, addr) {
return Some((call_file, call_line, address, Some(origin)));
}
// Otherwise we return the line record covering this address.
let line = self.lines.get(addr)?;
Some((line.file, line.line, line.address, None))
}
/// Returns `(file_id, line, address)` of the line record that covers the
/// given address. Line records describe locations at the deepest level of
/// inlining at that address.
///
/// For example, if we have an "inline call stack" A -> B -> C at this
/// address, i.e. both the call to B and the call to C have been inlined all
/// the way into A (A being the "outer function"), then this method reports
/// locations in C.
pub fn get_innermost_sourceloc(&self, addr: u64) -> Option<(u32, u32, u64)> {
let line = self.lines.get(addr)?;
Some((line.file, line.line, line.address))
}
/// Returns `(call_file_id, call_line, address, inline_origin)` of the
/// inlinee record that covers the given address at the given depth.
///
/// We start at depth zero. For example, if we have an "inline call stack"
/// A -> B -> C at an address, i.e. both the call to B and the call to C have
/// been inlined all the way into A (A being the "outer function"), then the
/// call A -> B is at level zero, and the call B -> C is at level one.
pub fn get_inlinee_at_depth(&self, depth: u32, addr: u64) -> Option<(u32, u32, u64, u32)> {
let inlinee = match self
.inlinees
.binary_search_by_key(&(depth, addr), |inlinee| (inlinee.depth, inlinee.address))
{
// Exact match
Ok(index) => &self.inlinees[index],
// No match, insertion index is zero => before first element
Err(0) => return None,
// No exact match, insertion index points after inlinee whose (depth, addr) is < what were looking for
// => subtract 1 to get candidate
Err(index) => &self.inlinees[index - 1],
};
if inlinee.depth != depth {
return None;
}
let end_address = inlinee.address.checked_add(inlinee.size as u64)?;
if addr < end_address {
Some((
inlinee.call_file,
inlinee.call_line,
inlinee.address,
inlinee.origin_id,
))
} else {
None
}
}
}
/// Extra metadata that can be safely ignored, but may contain useful facts.
#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum Info {
/// The URL this file was downloaded from. This is added to symbol files
/// by HttpSymbolSupplier when it stores them in its cache, so that we
/// can populate that info even on a cache hit.
Url(String),
/// An info line we either don't know about or don't care about.
Unknown,
}
/// DWARF CFI rules for recovering registers at a specific address.
#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct CfiRules {
/// The address in question.
pub address: u64,
/// Postfix expressions to evaluate to recover register values.
pub rules: String,
}
/// Information used for unwinding stack frames using DWARF CFI.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct StackInfoCfi {
/// The initial rules for this address range.
pub init: CfiRules,
/// The size of this entire address range.
pub size: u32,
/// Additional rules to use at specified addresses.
pub add_rules: Vec<CfiRules>,
}
impl StackInfoCfi {
pub fn memory_range(&self) -> Option<Range<u64>> {
if self.size == 0 {
return None;
}
Some(Range::new(
self.init.address,
self.init.address.checked_add(self.size as u64)? - 1,
))
}
}
/// Specific details about whether the frame uses a base pointer or has a program string to
/// evaluate.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum WinFrameType {
/// This frame uses FPO-style data.
Fpo(StackInfoWin),
/// This frame uses new-style frame data, has a program string.
FrameData(StackInfoWin),
/// Some other type of frame.
Unhandled,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum WinStackThing {
ProgramString(String),
AllocatesBasePointer(bool),
}
/// Information used for unwinding stack frames using Windows frame info.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct StackInfoWin {
/// The address in question.
pub address: u64,
/// The size of the address range covered.
pub size: u32,
/// The size of the function's prologue.
pub prologue_size: u32,
/// The size of the function's epilogue.
pub epilogue_size: u32,
/// The size of arguments passed to this function.
pub parameter_size: u32,
/// The number of bytes in the stack frame for callee-saves registers.
pub saved_register_size: u32,
/// The number of bytes in the stack frame for local variables.
pub local_size: u32,
/// The maximum number of bytes pushed onto the stack by this frame.
pub max_stack_size: u32,
/// A program string or boolean regarding a base pointer.
pub program_string_or_base_pointer: WinStackThing,
}
impl StackInfoWin {
pub fn memory_range(&self) -> Option<Range<u64>> {
if self.size == 0 {
return None;
}
Some(Range::new(
self.address,
self.address.checked_add(self.size as u64)? - 1,
))
}
}
/// A parsed .sym file containing debug symbols.
#[derive(Debug, PartialEq, Eq)]
pub struct SymbolFile {
pub module_id: String,
pub debug_file: String,
/// The set of source files involved in compilation.
pub files: HashMap<u32, String>,
/// Publicly visible symbols.
pub publics: Vec<PublicSymbol>,
/// Functions.
pub functions: RangeMap<u64, Function>,
/// Function names for inlined functions.
pub inline_origins: HashMap<u32, String>,
/// DWARF CFI unwind information.
pub cfi_stack_info: RangeMap<u64, StackInfoCfi>,
/// Windows unwind information (frame data).
pub win_stack_framedata_info: RangeMap<u64, StackInfoWin>,
/// Windows unwind information (FPO data).
pub win_stack_fpo_info: RangeMap<u64, StackInfoWin>,
// Statistics which are strictly best-effort. Generally this
// means we might undercount in situations where we forgot to
// log an event.
/// If the symbol file was loaded from a URL, this is the url
pub url: Option<String>,
/// The number of times the parser found that the symbol file was
/// strictly ambiguous but simple heuristics repaired it. (e.g.
/// two STACK WIN entries overlapped, but the second was a suffix of
/// the first, so we just truncated the first.)
///
/// Ideally dump_syms would never output this kind of thing, but it's
/// tolerable.
pub ambiguities_repaired: u64,
/// The number of times the parser found that the symbol file was
/// ambiguous and just randomly picked one of the options to make
/// progress.
///
/// e.g. two STACK WIN entries with identical ranges but
/// different values, so one was discarded arbitrarily.
pub ambiguities_discarded: u64,
/// The number of times the parser found that a section of the file
/// (generally a line) was corrupt and discarded it.
///
/// e.g. a STACK WIN entry where the `type` and `has_program` fields
/// have inconsistent values.
pub corruptions_discarded: u64,
/// The number of times the cfi evaluator failed out in a way that
/// implies the cfi entry is fundamentally corrupt.
///
/// This isn't detectected during parsing for two reasons:
///
/// * We don't parse cfi program strings until we are requested to
/// execute them (there's ~millions of program strings which will
/// never need to be parsed, so eagerly parsing them would be
/// horribly expensive and pointless for anything but debug stats.)
///
/// * A program string may technically parse but still be impossible
/// to fully evaluate. For instance, it might try to pop values from
/// its internal stack when there are none left.
///
/// This number may be inflated if a corrupt cfi entry occurs in multiple
/// frames, as each attempted eval will be counted.
///
/// This number does not include cfi evaluations that failed in ways that
/// may be a result of incorrect input memory/registers (e.g. failing
/// to evaluate a "dereference pointer" instruction because the pointer
/// was not mapped memory). In these situations the cfi entry *may*
/// still be correct.
pub cfi_eval_corruptions: u64,
}

1836
third_party/rust/breakpad-symbols/src/sym_file/walker.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1
third_party/rust/cachemap2/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"04431cabfb5ac68f7bfc57cee9777a69d22c8375081a87a6957daf7d70c4bff7","ChangeLog.md":"7f2bf552c898421a93086be57074bbc3063ac25576eb7e56dc95035b97e31131","LICENSE":"b5b44ae2ab9e1ef50d9aeba9686c5cf3ec666b402420cb3abf98caf996755d6e","README.md":"6ebb8d035c049bac3ae17ece73378a75df7542b0177d04470199ecce178383be","src/dashmap_impl.rs":"8708bc2e0b3803ebb0a19ba3025c7d465d5a4dc7f1ee4a4fbc7411871c21b19d","src/hashmap_impl.rs":"3571e8c921a77a9b97edf4987000d8e5d7f2548208bc5b59d09ecf375ae101a9","src/lib.rs":"20b4db6ca813533c07717a25d19436b1c4a7d430691b8f5a6568f005909335f5"},"package":"68ccbd3153aa153b2f5eff557537ffce81e4dd6c50ae0eddc41dc8d0c388436f"}

39
third_party/rust/cachemap2/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,39 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "cachemap2"
version = "0.3.0"
authors = ["Alex Franchuk <alex.franchuk@gmail.com>"]
description = "A concurrent insert-only hashmap for caching values"
homepage = "https://github.com/afranchuk/cachemap2"
readme = "README.md"
keywords = [
"sync",
"data-structure",
"cache",
"hash-map",
]
categories = [
"concurrency",
"data-structures",
"caching",
]
license = "MIT"
[dependencies.abi_stable]
version = ">=0.9"
optional = true
[dependencies.dashmap]
version = "5.1"
optional = true

10
third_party/rust/cachemap2/ChangeLog.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
# cachemap2
## 0.3.0 -- 2024-01-19
* Add support for custom hashers (#1), thanks @zohnannor.
### Breaking changes
* The minimum version of dashmap is upped to 5.1.
## 0.2.0 -- 2023-03-21
* Initial forked release.

21
third_party/rust/cachemap2/LICENSE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 hclarke
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

36
third_party/rust/cachemap2/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,36 @@
# CacheMap
CacheMap is a data structure for concurrently caching values.
The `cache` function will look up a value in the map, or generate and store a new one using the
provided function.
This is a updated and maintained fork of [hclarke/cachemap](https://github.com/hclarke/cachemap).
## Example
```
use cachemap::CacheMap;
let m = CacheMap::new();
let fst = m.cache("key", || 5u32);
let snd = m.cache("key", || 7u32);
assert_eq!(*fst, *snd);
assert_eq!(*fst, 5u32);
```
## Features
- Can cache values concurrently (using `&CacheMap<K,V>` rather than `&mut CacheMap<K,V>`).
- Returned references use the map's lifetime, so clients can avoid smart pointers.
- Clients can optionally enable the `dashmap` feature, which uses `dashmap` internally and allows:
- getting `Arc<V>` pointers, in case values need to outlive the map, and
- adding `Arc<V>` directly, allowing unsized values, and re-using `Arc<V>`s from elsewhere.
- Clients can optionally enable the `abi_stable` feature which will derive `abi_stable::StableAbi`
on the type.
## AntiFeatures
- There is no cache invalidation: the only way to remove things from a CacheMap is to drop it.

248
third_party/rust/cachemap2/src/dashmap_impl.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,248 @@
use dashmap::DashMap;
use std::ops::Deref;
use std::sync::Arc;
use std::{collections::hash_map::RandomState, hash::Hash};
use std::{hash::BuildHasher, marker::PhantomData};
/// An insert-only map for caching the result of functions
pub struct CacheMap<K: Hash + Eq, V: ?Sized, S = RandomState> {
inner: DashMap<K, Arc<V>, S>,
}
/// A handle that can be converted to a &T or an Arc<T>
pub struct ArcRef<'a, T: ?Sized> {
// this pointer never gets dereferenced, but it has to be T, so that Ref is the right size for wide pointers
#[allow(dead_code)]
fake_ptr: *const T,
phantom: PhantomData<&'a T>,
}
impl<'a, T: ?Sized> Clone for ArcRef<'a, T> {
fn clone(&self) -> Self {
*self
}
}
impl<'a, T: ?Sized> Copy for ArcRef<'a, T> {}
impl<T: ?Sized> Deref for ArcRef<'_, T> {
type Target = Arc<T>;
fn deref(&self) -> &Self::Target {
unsafe { std::mem::transmute(self) }
}
}
impl<'a, T: ?Sized> ArcRef<'a, T> {
/// Converts the ArcRef into an Arc<T>
pub fn to_arc(self) -> Arc<T> {
self.deref().clone()
}
/// Converts the ArcRef into a &T
pub fn as_ref(self) -> &'a T {
let ptr = &**self as *const T;
unsafe { &*ptr }
}
}
impl<K: Hash + Eq, V: ?Sized, S: BuildHasher + Default + Clone> Default for CacheMap<K, V, S> {
fn default() -> Self {
CacheMap {
inner: Default::default(),
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Default + Clone> std::iter::FromIterator<(K, V)>
for CacheMap<K, V, S>
{
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (K, V)>,
{
CacheMap {
inner: iter.into_iter().map(|(k, v)| (k, Arc::new(v))).collect(),
}
}
}
pub struct IntoIter<K, V, S>(dashmap::iter::OwningIter<K, Arc<V>, S>);
impl<K: Eq + Hash, V, S: BuildHasher + Clone> Iterator for IntoIter<K, V, S> {
type Item = (K, Arc<V>);
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> IntoIterator for CacheMap<K, V, S> {
type Item = (K, Arc<V>);
type IntoIter = IntoIter<K, V, S>;
fn into_iter(self) -> Self::IntoIter {
IntoIter(self.inner.into_iter())
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> CacheMap<K, V, S> {
/// Fetch the value associated with the key, or run the provided function to insert one.
///
/// # Example
///
/// ```
/// use cachemap2::CacheMap;
///
/// let m = CacheMap::new();
///
/// let fst = m.cache("key", || 5u32).as_ref();
/// let snd = m.cache("key", || 7u32).as_ref();
///
/// assert_eq!(*fst, *snd);
/// assert_eq!(*fst, 5u32);
/// ```
pub fn cache<F: FnOnce() -> V>(&self, key: K, f: F) -> ArcRef<'_, V> {
self.cache_arc(key, || Arc::new(f()))
}
/// Fetch the value associated with the key, or insert a default value.
pub fn cache_default(&self, key: K) -> ArcRef<'_, V>
where
V: Default,
{
self.cache(key, || Default::default())
}
/// Return whether the map contains the given key.
pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
where
K: std::borrow::Borrow<Q>,
Q: Hash + Eq,
{
self.inner.contains_key(key)
}
}
impl<K: Hash + Eq, V: ?Sized> CacheMap<K, V, RandomState> {
/// Creates a new CacheMap
pub fn new() -> Self {
CacheMap {
inner: DashMap::new(),
}
}
}
impl<K: Hash + Eq, V: ?Sized, S: BuildHasher + Clone> CacheMap<K, V, S> {
/// Creates a new CacheMap with the provided hasher
pub fn with_hasher(hash_builder: S) -> Self {
Self {
inner: DashMap::with_hasher(hash_builder),
}
}
/// Fetch the value associated with the key, or run the provided function to insert one.
/// With this version, the function returns an Arc<V>, whch allows caching unsized types.
///
/// # Example
///
/// ```
/// use cachemap2::CacheMap;
///
/// let m: CacheMap<_, [usize]> = CacheMap::new();
///
/// let a = m.cache_arc("a", || {
/// let a = &[1,2,3][..];
/// a.into()
/// }).as_ref();
///
/// let b = m.cache_arc("b", || {
/// let b = &[9,9][..];
/// b.into()
/// }).as_ref();
///
/// assert_eq!(a, &[1,2,3]);
/// assert_eq!(b, &[9,9]);
/// ```
pub fn cache_arc<F: FnOnce() -> Arc<V>>(&self, key: K, f: F) -> ArcRef<'_, V> {
let val = self.inner.entry(key).or_insert_with(f);
let arc: &Arc<V> = &*val;
let arc_ref: &ArcRef<'_, V> = unsafe { std::mem::transmute(arc) };
*arc_ref
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn single_insert() {
let m = CacheMap::new();
let a = m.cache("key", || 21u32).as_ref();
assert_eq!(21, *a);
}
#[test]
fn contains_key() {
let m = CacheMap::new();
m.cache("key", || 21u32);
assert!(m.contains_key("key"));
assert!(!m.contains_key("other"));
}
#[test]
fn double_insert() {
let m = CacheMap::new();
let a = m.cache("key", || 5u32).as_ref();
let b = m.cache("key", || 7u32).as_ref();
assert_eq!(*a, *b);
assert_eq!(5, *a);
}
#[test]
fn insert_two() {
let m = CacheMap::new();
let a = m.cache("a", || 5u32).as_ref();
let b = m.cache("b", || 7u32).as_ref();
assert_eq!(5, *a);
assert_eq!(7, *b);
let c = m.cache("a", || 9u32).as_ref();
let d = m.cache("b", || 11u32).as_ref();
assert_eq!(*a, *c);
assert_eq!(*b, *d);
assert_eq!(5, *a);
assert_eq!(7, *b);
}
#[test]
fn use_after_drop() {
#[derive(Clone)]
struct Foo(usize);
impl Drop for Foo {
fn drop(&mut self) {
assert_eq!(33, self.0);
}
}
{
let mut arc = {
let m = CacheMap::new();
let a = m.cache("key", || Foo(99)).to_arc();
assert_eq!(99, (*a).0);
a
};
Arc::make_mut(&mut arc).0 = 33;
}
assert!(true);
}
}

276
third_party/rust/cachemap2/src/hashmap_impl.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,276 @@
use std::collections::hash_map::RandomState;
use std::hash::{BuildHasher, Hash};
#[cfg(not(feature = "abi_stable"))]
mod basic_impl {
pub type BoxImpl<T> = Box<T>;
pub type HashMapImpl<K, V, S> = std::collections::HashMap<K, V, S>;
pub type MutexImpl<T> = std::sync::Mutex<T>;
pub type MutexGuardImpl<'a, T> = std::sync::MutexGuard<'a, T>;
pub type IterImpl<'a, K, V> = std::collections::hash_map::Iter<'a, K, V>;
pub type IntoIterImpl<K, V> = std::collections::hash_map::IntoIter<K, V>;
pub fn box_into_inner_impl<T>(b: BoxImpl<T>) -> T {
*b
}
pub fn mutex_lock_impl<'a, T>(m: &'a MutexImpl<T>) -> MutexGuardImpl<'a, T> {
m.lock().unwrap()
}
pub fn mutex_into_inner_impl<T>(m: MutexImpl<T>) -> T {
m.into_inner().unwrap()
}
}
#[cfg(not(feature = "abi_stable"))]
use basic_impl::*;
#[cfg(feature = "abi_stable")]
mod abi_stable_impl {
use abi_stable::{
external_types::RMutex,
std_types::{RBox, RHashMap},
};
pub type BoxImpl<T> = RBox<T>;
pub type HashMapImpl<K, V, S> = RHashMap<K, V, S>;
pub type MutexImpl<T> = RMutex<T>;
pub type MutexGuardImpl<'a, T> =
abi_stable::external_types::parking_lot::mutex::RMutexGuard<'a, T>;
pub type IterImpl<'a, K, V> = abi_stable::std_types::map::Iter<'a, K, V>;
pub type IntoIterImpl<K, V> = abi_stable::std_types::map::IntoIter<K, V>;
pub fn box_into_inner_impl<T>(b: BoxImpl<T>) -> T {
RBox::into_inner(b)
}
pub fn mutex_lock_impl<'a, T>(m: &'a MutexImpl<T>) -> MutexGuardImpl<'a, T> {
m.lock()
}
pub fn mutex_into_inner_impl<T>(m: MutexImpl<T>) -> T {
m.into_inner()
}
}
#[cfg(feature = "abi_stable")]
use abi_stable_impl::*;
/// An insert-only map for caching the result of functions
#[cfg_attr(feature = "abi_stable", derive(abi_stable::StableAbi))]
#[cfg_attr(feature = "abi_stable", repr(C))]
pub struct CacheMap<K, V, S = RandomState> {
inner: MutexImpl<HashMapImpl<K, BoxImpl<V>, S>>,
}
impl<K: Eq + Hash, V, S: BuildHasher + Default> Default for CacheMap<K, V, S> {
fn default() -> Self {
CacheMap {
inner: MutexImpl::new(Default::default()),
}
}
}
impl<K: Eq + Hash, V, S: BuildHasher + Default> std::iter::FromIterator<(K, V)>
for CacheMap<K, V, S>
{
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (K, V)>,
{
CacheMap {
inner: MutexImpl::new(
iter.into_iter()
.map(|(k, v)| (k, BoxImpl::new(v)))
.collect(),
),
}
}
}
pub struct IntoIter<K, V>(IntoIterImpl<K, BoxImpl<V>>);
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|t| (t.0, box_into_inner_impl(t.1)))
}
}
impl<K, V, S> IntoIterator for CacheMap<K, V, S> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> Self::IntoIter {
IntoIter(mutex_into_inner_impl(self.inner).into_iter())
}
}
pub struct Iter<'a, K, V, S> {
iter: IterImpl<'a, K, BoxImpl<V>>,
_guard: MutexGuardImpl<'a, HashMapImpl<K, BoxImpl<V>, S>>,
}
impl<'a, K, V, S> Iterator for Iter<'a, K, V, S> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|t| (t.0, t.1.as_ref()))
}
}
impl<'a, K, V, S> IntoIterator for &'a CacheMap<K, V, S> {
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V, S>;
fn into_iter(self) -> Self::IntoIter {
let guard = mutex_lock_impl(&self.inner);
let iter = unsafe {
std::mem::transmute::<IterImpl<K, BoxImpl<V>>, IterImpl<K, BoxImpl<V>>>(guard.iter())
};
Iter {
iter,
_guard: guard,
}
}
}
impl<K: Eq + Hash, V, S: BuildHasher> CacheMap<K, V, S> {
/// Fetch the value associated with the key, or run the provided function to insert one.
///
/// # Example
///
/// ```
/// use cachemap2::CacheMap;
///
/// let m = CacheMap::new();
///
/// let fst = m.cache("key", || 5u32);
/// let snd = m.cache("key", || 7u32);
///
/// assert_eq!(*fst, *snd);
/// assert_eq!(*fst, 5u32);
/// ```
pub fn cache<F: FnOnce() -> V>(&self, key: K, f: F) -> &V {
let v = std::ptr::NonNull::from(
mutex_lock_impl(&self.inner)
.entry(key)
.or_insert_with(|| BoxImpl::new(f()))
.as_ref(),
);
// Safety: We only support adding entries to the hashmap, and as long as a reference is
// maintained the value will be present.
unsafe { v.as_ref() }
}
/// Fetch the value associated with the key, or insert a default value.
pub fn cache_default(&self, key: K) -> &V
where
V: Default,
{
self.cache(key, || Default::default())
}
/// Return whether the map contains the given key.
pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
where
K: std::borrow::Borrow<Q>,
Q: Hash + Eq,
{
mutex_lock_impl(&self.inner).contains_key(key)
}
/// Return an iterator over the map.
///
/// This iterator will lock the underlying map until it is dropped.
pub fn iter(&self) -> Iter<K, V, S> {
self.into_iter()
}
}
impl<K: Eq + Hash, V> CacheMap<K, V, RandomState> {
/// Creates a new CacheMap
pub fn new() -> Self {
Default::default()
}
}
impl<K: Eq + Hash, V, S: BuildHasher + Default> CacheMap<K, V, S> {
/// Creates a new CacheMap with the provided hasher
pub fn with_hasher(hash_builder: S) -> Self {
Self {
inner: MutexImpl::new(HashMapImpl::with_hasher(hash_builder)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn single_insert() {
let m = CacheMap::new();
let a = m.cache("key", || 21u32);
assert_eq!(21, *a);
}
#[test]
fn contains_key() {
let m = CacheMap::new();
m.cache("key", || 21u32);
assert!(m.contains_key("key"));
assert!(!m.contains_key("other"));
}
#[test]
fn double_insert() {
let m = CacheMap::new();
let a = m.cache("key", || 5u32);
let b = m.cache("key", || 7u32);
assert_eq!(*a, *b);
assert_eq!(5, *a);
}
#[test]
fn insert_two() {
let m = CacheMap::new();
let a = m.cache("a", || 5u32);
let b = m.cache("b", || 7u32);
assert_eq!(5, *a);
assert_eq!(7, *b);
let c = m.cache("a", || 9u32);
let d = m.cache("b", || 11u32);
assert_eq!(*a, *c);
assert_eq!(*b, *d);
assert_eq!(5, *a);
assert_eq!(7, *b);
}
#[test]
fn iter() {
use std::collections::HashMap;
use std::iter::FromIterator;
let m = CacheMap::new();
m.cache("a", || 5u32);
m.cache("b", || 7u32);
let mut expected = HashMap::<&'static str, u32>::from_iter([("a", 5u32), ("b", 7u32)]);
for (k, v) in &m {
assert!(expected.remove(k).expect("unexpected key") == *v);
}
assert!(expected.is_empty());
}
}

14
third_party/rust/cachemap2/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,14 @@
#[cfg(feature = "dashmap")]
mod dashmap_impl;
#[cfg(feature = "dashmap")]
pub use dashmap_impl::*;
#[cfg(all(feature = "dashmap", feature = "abi_stable"))]
compile_error!("abi_stable and dashmap features cannot be used together");
#[cfg(not(feature = "dashmap"))]
mod hashmap_impl;
#[cfg(not(feature = "dashmap"))]
pub use hashmap_impl::*;

1
third_party/rust/circular/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"8b979e7165afbae65030082527ef11775cbf311e8406018260f60289695f2164","LICENSE":"cef6497a64266f9c392ee9134aceb82739dadd176422fead980102109bf46d10","README.md":"718abbc1f45007f3c8b54e922abfc3c6e15c2d9eb70163cd1233ee17d4035343","src/lib.rs":"44c514d6556c7a1b130a7fd30246d3d02c4fe3305c2bb222a45c3f4970a5bca4"},"package":"b0fc239e0f6cb375d2402d48afb92f76f5404fd1df208a41930ec81eda078bea"}

23
third_party/rust/circular/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "circular"
version = "0.3.0"
authors = ["Geoffroy Couprie <geo.couprie@gmail.com>"]
include = ["LICENSE", "README.md", ".gitignore", ".travis.yml", "Cargo.toml", "src/*.rs"]
description = "A stream abstraction designed for use with nom"
readme = "README.md"
license = "MIT"
repository = "https://github.com/sozu-proxy/circular"
[dependencies]

20
third_party/rust/circular/LICENSE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
Copyright (c) 2017 Geoffroy Couprie
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

5
third_party/rust/circular/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,5 @@
# Circular
Circular is a stream abstraction designed for use with nom. It can expose the
available data, a mutable slice of the available space, and it separates
reading data from actually consuming it from the buffer.

415
third_party/rust/circular/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,415 @@
//! Circular, a stream abstraction designed for use with nom
//!
//! Circular provides a `Buffer` type that wraps a `Vec<u8>` with a position
//! and end. Compared to a stream abstraction that would use `std::io::Read`,
//! it separates the reading and consuming phases. `Read` is designed to write
//! the data in a mutable slice and consume it from the stream as it does that.
//!
//! When used in streaming mode, nom will try to parse a slice, then tell you
//! how much it consumed. So you don't know how much data was actually used
//! until the parser returns. `Circular::Buffer` exposes a `data()` method
//! that gives an immutable slice of all the currently readable data,
//! and a `consume()` method to advance the position in the stream.
//! The `space()` and `fill()` methods are the write counterparts to those methods.
//!
//! ```
//! extern crate circular;
//!
//! use circular::Buffer;
//! use std::io::Write;
//!
//! fn main() {
//!
//! // allocate a new Buffer
//! let mut b = Buffer::with_capacity(10);
//! assert_eq!(b.available_data(), 0);
//! assert_eq!(b.available_space(), 10);
//!
//! let res = b.write(&b"abcd"[..]);
//! assert_eq!(res.ok(), Some(4));
//! assert_eq!(b.available_data(), 4);
//! assert_eq!(b.available_space(), 6);
//!
//! //the 4 bytes we wrote are immediately available and usable for parsing
//! assert_eq!(b.data(), &b"abcd"[..]);
//!
//! // this will advance the position from 0 to 2. it does not modify the underlying Vec
//! b.consume(2);
//! assert_eq!(b.available_data(), 2);
//! assert_eq!(b.available_space(), 6);
//! assert_eq!(b.data(), &b"cd"[..]);
//!
//! // shift moves the available data at the beginning of the buffer.
//! // the position is now 0
//! b.shift();
//! assert_eq!(b.available_data(), 2);
//! assert_eq!(b.available_space(), 8);
//! assert_eq!(b.data(), &b"cd"[..]);
//! }
//!
use std::{cmp, ptr};
use std::io::{self,Write,Read};
use std::iter::repeat;
/// the Buffer contains the underlying memory and data positions
///
/// In all cases, `0 ≤ position ≤ end ≤ capacity` should be true
#[derive(Debug,PartialEq,Clone)]
pub struct Buffer {
/// the Vec containing the data
memory: Vec<u8>,
/// the current capacity of the Buffer
capacity: usize,
/// the current beginning of the available data
position: usize,
/// the current end of the available data
/// and beginning of the available space
end: usize
}
impl Buffer {
/// allocates a new buffer of maximum size `capacity`
pub fn with_capacity(capacity: usize) -> Buffer {
let mut v = Vec::with_capacity(capacity);
v.extend(repeat(0).take(capacity));
Buffer {
memory: v,
capacity: capacity,
position: 0,
end: 0
}
}
/// allocates a new buffer containing the slice `data`
///
/// the buffer starts full, its available data size is exactly `data.len()`
pub fn from_slice(data: &[u8]) -> Buffer {
Buffer {
memory: Vec::from(data),
capacity: data.len(),
position: 0,
end: data.len()
}
}
/// increases the size of the buffer
///
/// this does nothing if the buffer is already large enough
pub fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
self.memory.resize(new_size, 0);
self.capacity = new_size;
true
}
/// returns how much data can be read from the buffer
pub fn available_data(&self) -> usize {
self.end - self.position
}
/// returns how much free space is available to write to
pub fn available_space(&self) -> usize {
self.capacity - self.end
}
/// returns the underlying vector's size
pub fn capacity(&self) -> usize {
self.capacity
}
/// returns true if there is no more data to read
pub fn empty(&self) -> bool {
self.position == self.end
}
/// advances the position tracker
///
/// if the position gets past the buffer's half,
/// this will call `shift()` to move the remaining data
/// to the beginning of the buffer
pub fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
if self.position > self.capacity / 2 {
//trace!("consume shift: pos {}, end {}", self.position, self.end);
self.shift();
}
cnt
}
/// advances the position tracker
///
/// This method is similar to `consume()` but will not move data
/// to the beginning of the buffer
pub fn consume_noshift(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
cnt
}
/// after having written data to the buffer, use this function
/// to indicate how many bytes were written
///
/// if there is not enough available space, this function can call
/// `shift()` to move the remaining data to the beginning of the
/// buffer
pub fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
if self.available_space() < self.available_data() + cnt {
//trace!("fill shift: pos {}, end {}", self.position, self.end);
self.shift();
}
cnt
}
/// Get the current position
///
/// # Examples
/// ```
/// use circular::Buffer;
/// use std::io::{Read,Write};
///
/// let mut output = [0;5];
///
/// let mut b = Buffer::with_capacity(10);
///
/// let res = b.write(&b"abcdefgh"[..]);
///
/// b.read(&mut output);
///
/// // Position must be 5
/// assert_eq!(b.position(), 5);
/// assert_eq!(b.available_data(), 3);
/// ```
pub fn position(&self) -> usize {
self.position
}
/// moves the position and end trackers to the beginning
/// this function does not modify the data
pub fn reset(&mut self) {
self.position = 0;
self.end = 0;
}
/// returns a slice with all the available data
pub fn data(&self) -> &[u8] {
&self.memory[self.position..self.end]
}
/// returns a mutable slice with all the available space to
/// write to
pub fn space(&mut self) -> &mut[u8] {
&mut self.memory[self.end..self.capacity]
}
/// moves the data at the beginning of the buffer
///
/// if the position was more than 0, it is now 0
pub fn shift(&mut self) {
if self.position > 0 {
unsafe {
let length = self.end - self.position;
ptr::copy( (&self.memory[self.position..self.end]).as_ptr(), (&mut self.memory[..length]).as_mut_ptr(), length);
self.position = 0;
self.end = length;
}
}
}
//FIXME: this should probably be rewritten, and tested extensively
#[doc(hidden)]
pub fn delete_slice(&mut self, start: usize, length: usize) -> Option<usize> {
if start + length >= self.available_data() {
return None
}
unsafe {
let begin = self.position + start;
let next_end = self.end - length;
ptr::copy(
(&self.memory[begin+length..self.end]).as_ptr(),
(&mut self.memory[begin..next_end]).as_mut_ptr(),
self.end - (begin+length)
);
self.end = next_end;
}
Some(self.available_data())
}
//FIXME: this should probably be rewritten, and tested extensively
#[doc(hidden)]
pub fn replace_slice(&mut self, data: &[u8], start: usize, length: usize) -> Option<usize> {
let data_len = data.len();
if start + length > self.available_data() ||
self.position + start + data_len > self.capacity {
return None
}
unsafe {
let begin = self.position + start;
let slice_end = begin + data_len;
// we reduced the data size
if data_len < length {
ptr::copy(data.as_ptr(), (&mut self.memory[begin..slice_end]).as_mut_ptr(), data_len);
ptr::copy((&self.memory[start+length..self.end]).as_ptr(), (&mut self.memory[slice_end..]).as_mut_ptr(), self.end - (start + length));
self.end = self.end - (length - data_len);
// we put more data in the buffer
} else {
ptr::copy((&self.memory[start+length..self.end]).as_ptr(), (&mut self.memory[start+data_len..]).as_mut_ptr(), self.end - (start + length));
ptr::copy(data.as_ptr(), (&mut self.memory[begin..slice_end]).as_mut_ptr(), data_len);
self.end = self.end + data_len - length;
}
}
Some(self.available_data())
}
//FIXME: this should probably be rewritten, and tested extensively
#[doc(hidden)]
pub fn insert_slice(&mut self, data: &[u8], start: usize) -> Option<usize> {
let data_len = data.len();
if start > self.available_data() ||
self.position + self.end + data_len > self.capacity {
return None
}
unsafe {
let begin = self.position + start;
let slice_end = begin + data_len;
ptr::copy((&self.memory[start..self.end]).as_ptr(), (&mut self.memory[start+data_len..]).as_mut_ptr(), self.end - start);
ptr::copy(data.as_ptr(), (&mut self.memory[begin..slice_end]).as_mut_ptr(), data_len);
self.end = self.end + data_len;
}
Some(self.available_data())
}
}
impl Write for Buffer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self.space().write(buf) {
Ok(size) => { self.fill(size); Ok(size) },
err => err
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Read for Buffer {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let len = cmp::min(self.available_data(), buf.len());
unsafe {
ptr::copy((&self.memory[self.position..self.position+len]).as_ptr(), buf.as_mut_ptr(), len);
self.position += len;
}
Ok(len)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
#[test]
fn fill_and_consume() {
let mut b = Buffer::with_capacity(10);
assert_eq!(b.available_data(), 0);
assert_eq!(b.available_space(), 10);
let res = b.write(&b"abcd"[..]);
assert_eq!(res.ok(), Some(4));
assert_eq!(b.available_data(), 4);
assert_eq!(b.available_space(), 6);
assert_eq!(b.data(), &b"abcd"[..]);
b.consume(2);
assert_eq!(b.available_data(), 2);
assert_eq!(b.available_space(), 6);
assert_eq!(b.data(), &b"cd"[..]);
b.shift();
assert_eq!(b.available_data(), 2);
assert_eq!(b.available_space(), 8);
assert_eq!(b.data(), &b"cd"[..]);
assert_eq!(b.write(&b"efghijklmnop"[..]).ok(), Some(8));
assert_eq!(b.available_data(), 10);
assert_eq!(b.available_space(), 0);
assert_eq!(b.data(), &b"cdefghijkl"[..]);
b.shift();
assert_eq!(b.available_data(), 10);
assert_eq!(b.available_space(), 0);
assert_eq!(b.data(), &b"cdefghijkl"[..]);
}
#[test]
fn delete() {
let mut b = Buffer::with_capacity(10);
let _ = b.write(&b"abcdefgh"[..]);
assert_eq!(b.available_data(), 8);
assert_eq!(b.available_space(), 2);
assert_eq!(b.delete_slice(2, 3), Some(5));
assert_eq!(b.available_data(), 5);
assert_eq!(b.available_space(), 5);
assert_eq!(b.data(), &b"abfgh"[..]);
assert_eq!(b.delete_slice(5, 2), None);
assert_eq!(b.delete_slice(4, 2), None);
}
#[test]
fn replace() {
let mut b = Buffer::with_capacity(10);
let _ = b.write(&b"abcdefgh"[..]);
assert_eq!(b.available_data(), 8);
assert_eq!(b.available_space(), 2);
assert_eq!(b.replace_slice(&b"ABC"[..], 2, 3), Some(8));
assert_eq!(b.available_data(), 8);
assert_eq!(b.available_space(), 2);
assert_eq!(b.data(), &b"abABCfgh"[..]);
assert_eq!(b.replace_slice(&b"XYZ"[..], 8, 3), None);
assert_eq!(b.replace_slice(&b"XYZ"[..], 6, 3), None);
assert_eq!(b.replace_slice(&b"XYZ"[..], 2, 4), Some(7));
assert_eq!(b.available_data(), 7);
assert_eq!(b.available_space(), 3);
assert_eq!(b.data(), &b"abXYZgh"[..]);
assert_eq!(b.replace_slice(&b"123"[..], 2, 2), Some(8));
assert_eq!(b.available_data(), 8);
assert_eq!(b.available_space(), 2);
assert_eq!(b.data(), &b"ab123Zgh"[..]);
}
use std::str;
#[test]
fn set_position() {
let mut output = [0;5];
let mut b = Buffer::with_capacity(10);
let _ = b.write(&b"abcdefgh"[..]);
let _ = b.read(&mut output);
assert_eq!(b.available_data(), 3);
println!("{:?}", b.position());
}
#[test]
fn consume_without_shift() {
let mut b = Buffer::with_capacity(10);
let _ = b.write(&b"abcdefgh"[..]);
b.consume_noshift(6);
assert_eq!(b.position(), 6);
}
}

1
third_party/rust/framehop/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"6b9b3906662b434710edcd87739806a1b4d1312794f969b50e50705025c9d611","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"9ec2734f45b0d65192b9fee2307e05176b805a19efa994a553dcc5b2d3219a1e","Readme.md":"f1c7d3ed5b9ec8dbc9f87d742b213573d0667ca4b4d148cf1e72681115093e67","src/aarch64/arch.rs":"894d1d66ba487363cdf5f2dd66c20ff6560e3906e7c6ecd80e5fc11a682fa5d9","src/aarch64/cache.rs":"88bfc7ee6d38bd0a0fb5532f29fbcec88fd6a222db0e1a3398e0168d7c05d82c","src/aarch64/dwarf.rs":"800bb304e8d746fb1857c2da5486f278242103ea7eac45b1cdc135b1eb3b92f1","src/aarch64/instruction_analysis/epilogue.rs":"fe45d3fbb92dc7224526fb1e613019ebf36a9f2aa3f8fb52217d20744fbda3ae","src/aarch64/instruction_analysis/mod.rs":"6298b1c5b96a5ac8b1ca39a9764b1b71af8ca4a60b6a435e7404e0b02e700f6a","src/aarch64/instruction_analysis/prologue.rs":"065172ee6e2cb868c76dad0d704f0da15397e94424cb0c5522e4bffcae1b0f19","src/aarch64/macho.rs":"ec88fb0c02707d3d96a41f22bb2f698909af26b41ac9cca6b0244e837e240504","src/aarch64/mod.rs":"a94c4c0b1d3e08bce5b0baf9a6ba1b59f42da2809ce970b8a9050b9c3c46e00a","src/aarch64/pe.rs":"6800dfee18cb8eb96d8802c4a175cfca511d9503a7b6c09d0ce7e84c28d8a1a8","src/aarch64/unwind_rule.rs":"1119387590f16f4582672095f6c9462a94e3d4eaf448baa19c432c5e57fa055d","src/aarch64/unwinder.rs":"1dd24b21a49cf1b2fdcb5fada2afb54b2df269d3560be1e1f0063604593f26f1","src/aarch64/unwindregs.rs":"19e5fd82d62eac135c9075e75c0b031f3037a4b670060b3bc6746ef6d71685f8","src/add_signed.rs":"8c52b1d7c7dfc5cbdd477ff9dcce2e888f663a19e8ef6b89c209c06f7a532203","src/arch.rs":"f7dff12cdc2cf91986a5cb3c8d492f608264bd789841a0cfab1c7042233f0488","src/cache.rs":"90569eba164d72c3d20a0465d05a92bc35ceba38c21b944ced3c759f61be3268","src/code_address.rs":"1e2bd03a5813c0100171c7020dc05d8457e2670c7ef58c0c4e3627bf1d82f1b1","src/display_utils.rs":"2f874fd4778e7e304365d8b65d60dc4a5a8fa5ee2715740dc64e334989a1276d","src/dwarf.rs":"79689d0d16a5ccdb5a6c90d690602d1b9bb0100543c2922b47a4c5715004c581","src/error.rs":"bbcaa2ede65b465bff515e19c50f4a8b76c4fcb481297a50427fd21689121294","src/instruction_analysis.rs":"1023577c008a71805338cd45b8582774dd8c69c7bb349990992733297761743e","src/lib.rs":"f57770c147c5de29b4a3600675b459ce26925ad8c5be19ab0c9545883a7a9320","src/macho.rs":"472cd64d0ef4c4d7b91f3d19307875f61db67de52273fef186da9ede89016982","src/pe.rs":"d50f13dd153d124c3b76df7930295e2310381e7490d45563382775422a858bfe","src/rule_cache.rs":"d764fe5e9202314b77e536a7ebe7cb4d574494eeaeb76d2e7a13ff6b0770cf3b","src/unwind_result.rs":"ec6898d9e66b455978880884199d5570fd220c7d3d1101c6b854b9a2b6cea88d","src/unwind_rule.rs":"3335e0d2af34961ba4eff2d89db6bdde5950909f352539e96852c42b3ca16139","src/unwinder.rs":"1ccd6b02770ed54f8837615cd0da02be75e92da9db304e17a14b6cf8f36dd3e0","src/x86_64/arch.rs":"12ea62c70058eac1c2aa698594cc83fafc5d8ec7205596c4b6f6ff325bd1ed8d","src/x86_64/cache.rs":"57eecbc7a0eea21269ba87e80efd985b13d420b2546722ae1b7c73e2e1731169","src/x86_64/dwarf.rs":"6643cc16ac524c325c02ae3a980dd95da38f660328d7b75c1081454b85e24925","src/x86_64/instruction_analysis/epilogue.rs":"21b98f794ec11d501497904b352017d678ea57a2a1f1617a625b1044de1c79c5","src/x86_64/instruction_analysis/mod.rs":"df9089f73861574607dab07fda68b8c5bf1ff426401840a6c35503bda9996143","src/x86_64/instruction_analysis/prologue.rs":"57f2a9376a70ca708c0d9c85bd324edff8062f73102aa57a9c6319627d8189ad","src/x86_64/macho.rs":"1b8eb6622d36115ac664c54d2a8768cbadd17bdcf252e368cf5ea8a35339d5b9","src/x86_64/mod.rs":"160ad03cce68b6263028fa9eaf460a89fee57795a81adac8bed9c7d4fdf0ebad","src/x86_64/pe.rs":"25d850fc896e635831c90c1e4073900b32b02fff88858aa579a986aa4052d54e","src/x86_64/register_ordering.rs":"e4e01b5506eaf1d448874e10930035d4a67b8077803e2ceee305d9c5aa88cd2f","src/x86_64/unwind_rule.rs":"f5be036172ac469cbc8248047411cbd43931a1def52cc9fcacce54210abc9824","src/x86_64/unwinder.rs":"2d7228655cc427266e31f1405f44b9e81bb119b9eb0f4abb9a29b39697db2b44","src/x86_64/unwindregs.rs":"63b358fe31b613d456982360ff659927d540b502f9c1a3145c4ba66beb4afdfc"},"package":"0fd28d2036d4fd99e3629487baca659e5af1c5d554e320168613be79028610fc"}

89
third_party/rust/framehop/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,89 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "framehop"
version = "0.12.1"
authors = ["Markus Stange <mstange.moz@gmail.com>"]
exclude = [
"/.github",
"/.vscode",
"/tests",
"/fixtures",
"/big-fixtures",
]
description = "Stack frame unwinding support for various formats"
documentation = "https://docs.rs/framehop/"
readme = "Readme.md"
keywords = [
"unwind",
"stackwalk",
"profiling",
"debug",
]
categories = ["development-tools::debugging"]
license = "MIT/Apache-2.0"
repository = "https://github.com/mstange/framehop/"
[profile.release]
debug = 2
[dependencies.arrayvec]
version = "0.7.4"
default-features = false
[dependencies.cfg-if]
version = "1.0.0"
[dependencies.fallible-iterator]
version = "0.3.0"
[dependencies.gimli]
version = "0.30"
features = ["read"]
default-features = false
[dependencies.macho-unwind-info]
version = "0.4.0"
optional = true
[dependencies.object]
version = "0.36"
features = ["read_core"]
optional = true
default-features = false
[dependencies.pe-unwind-info]
version = "0.2.1"
optional = true
[dev-dependencies.flate2]
version = "1.0.28"
[dev-dependencies.itertools]
version = "0.13"
[dev-dependencies.object]
version = "0.36"
[features]
default = [
"std",
"macho",
"pe",
]
macho = ["macho-unwind-info"]
pe = ["pe-unwind-info"]
std = [
"arrayvec/std",
"gimli/std",
]

201
third_party/rust/framehop/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/framehop/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
Copyright (c) 2018 Markus Stange <mstange@themasta.com>
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

157
third_party/rust/framehop/Readme.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,157 @@
[![crates.io page](https://img.shields.io/crates/v/framehop.svg)](https://crates.io/crates/framehop)
[![docs.rs page](https://docs.rs/framehop/badge.svg)](https://docs.rs/framehop/)
# framehop
Framehop is a stack frame unwinder written in 100% Rust. It produces high quality stacks at high speed, on multiple platforms and architectures, without an expensive pre-processing step for unwind information. This makes it suitable for sampling profilers.
It currently supports unwinding x86_64 and aarch64, with unwind information formats commonly used on Windows, macOS, Linux and Android.
You give framehop register values, stack memory and unwind data, and framehop produces a list of return addresses.
Framehop can be used in the following scenarios:
- Live unwinding of a remote process. This is how [`samply`](https://github.com/mstange/samply/) uses it.
- Offline unwinding from saved registers and stack bytes, even on a different machine, a different OS, or a different CPU architecture.
- Live unwinding inside the same process. This is currently unproven, but should work as long as you can do heap allocation before sampling, in order to allocate a cache and to update the list of modules. The actual unwinding does not require any heap allocation and should work even inside a signal handler, as long as you use `MustNotAllocateDuringUnwind`.
As a user of framehop, your responsibilities are the following:
- You need to enumerate the modules (libraries) that are loaded in the sampled process ahead of time, or ideally maintain a live list which is updated whenever modules are loaded / unloaded.
- You need to provide address ranges and unwind section data for those modules.
- When sampling, you provide the register values and a callback to read arbitrary stack memory without segfaulting.
- On aarch64, picking the right bitmask to strip pointer authentication bits from return addresses is up to you.
- You will need to do symbol resolution yourself, if you want function names. Framehop only produces addresses, it does not do any symbolication.
In turn, framehop solves the following problems:
- It parses a number of different unwind information formats. At the moment, it supports the following:
- Apple's Compact Unwinding Format, in `__unwind_info` (macOS)
- DWARF CFI in `.eh_frame` (using `.eh_frame_hdr` as an index, if available)
- DWARF CFI in `.debug_frame`
- PE unwind info in `.pdata`, `.rdata` and `.xdata` (for Windows x86_64)
- It supports correct unwinding even when the program is interrupted inside a function prologue or epilogue. On macOS, it has to analyze assembly instructions in order to do this.
- On x86_64 and aarch64, it falls back to frame pointer unwinding if it cannot find unwind information for an address.
- It caches the unwind rule for each address in a fixed-size cache, so that repeated unwinding from the same address is even faster.
- It generates binary search indexes for unwind information formats which don't have them. Specifically, for `.debug_frame` and for `.eh_frame` without `.eh_frame_hdr`.
- It does a reasonable job of detecting the end of the stack, so that you can differentiate between properly terminated stacks and prematurely truncated stacks.
Framehop is not suitable for debuggers or to implement exception handling. Debuggers usually need to recover all register values for every frame whereas framehop only cares about return addresses. And exception handling needs the ability to call destructors, which is also a non-goal for framehop.
## Speed
Framehop is so fast that stack walking is a miniscule part of sampling in both scenarios where I've tried it.
In [this samply example](https://share.firefox.dev/3s6mQKl) of profiling a single-threaded Rust application, walking the stack takes a quarter of the time it take to query macOS for the thread's register values. In [another samply example](https://share.firefox.dev/3ksWaPt) of profiling a Firefox build without frame pointers, the dwarf unwinding takes 4x as long as the querying of the register values, but is still overall cheaper than the cost of thread_suspend + thread_get_state + thread_resume.
In [this example of processing a `perf.data` file](https://share.firefox.dev/3vSQOTb), the bottleneck is reading the bytes from disk, rather than stackwalking. [With a warm file cache](https://share.firefox.dev/3Kt6sK1), the cost of stack walking is still comparable to the cost of copying the bytes from the file cache, and most of the stack walking time is spent reading return addresses from the stack bytes.
Framehop achieves this speed in the following ways:
1. It only recovers registers which are needed for computing return addresses. On x86_64 that's `rip`, `rsp` and `rbp`, and on aarch64 that's `lr`, `sp` and `fp`. All other registers are not needed - in theory they could be used as inputs to DWARF CFI expressions, but in practice they are not.
2. It uses zero-copy parsing wherever possible. For example, the bytes in `__unwind_info` are only accessed during unwinding, and the binary search happens right inside the original `__unwind_info` memory. For DWARF unwinding, framehop uses the excellent [`gimli` crate](https://github.com/gimli-rs/gimli/), which was written with performance in mind.
3. It uses binary search to find the correct unwind rule in all supported unwind information formats. For formats without an built-in index, it creates an index when the module is added.
4. It caches unwind rules based on address. In practice, the 509-slot cache achieves a hit rate of around 80% on complicated code like Firefox (with the cache being shared across all Firefox processes). When profiling simpler applications, the hit rate is likely much higher.
Furthermore, adding a module is fast too because framehop only does minimal up-front parsing and processing - really, the only thing it does is to create the index of FDE offsets for `.eh_frame` / `.debug_frame`.
## Current State and Roadmap
Framehop is still a work in progress. Its API is subject to change. The API churn probably won't quieten down at least until we have one or two 32 bit architectures implemented.
That said, framehop works remarkably well on the supported platforms, and is definitely worth a try if you can stomach the frequent API breakages. Please file issues if you run into any trouble or have suggestions.
Eventually I'd like to use framehop as a replacement for Lul in the Gecko profiler (Firefox's built-in profiler). For that we'll also want to add x86 support (for 32 bit Windows and Linux) and EHABI / EXIDX support (for 32 bit ARM Android).
## Example
```rust
use framehop::aarch64::{CacheAarch64, UnwindRegsAarch64, UnwinderAarch64};
use framehop::{ExplicitModuleSectionInfo, FrameAddress, Module};
let mut cache = CacheAarch64::<_>::new();
let mut unwinder = UnwinderAarch64::new();
let module = Module::new(
"mybinary".to_string(),
0x1003fc000..0x100634000,
0x1003fc000,
ExplicitModuleSectionInfo {
base_svma: 0x100000000,
text_svma: Some(0x100000b64..0x1001d2d18),
text: Some(vec![/* __text */]),
stubs_svma: Some(0x1001d2d18..0x1001d309c),
stub_helper_svma: Some(0x1001d309c..0x1001d3438),
got_svma: Some(0x100238000..0x100238010),
unwind_info: Some(vec![/* __unwind_info */]),
eh_frame_svma: Some(0x100237f80..0x100237ffc),
eh_frame: Some(vec![/* __eh_frame */]),
text_segment_svma: Some(0x1003fc000..0x100634000),
text_segment: Some(vec![/* __TEXT */]),
..Default::default()
},
);
unwinder.add_module(module);
let pc = 0x1003fc000 + 0x1292c0;
let lr = 0x1003fc000 + 0xe4830;
let sp = 0x10;
let fp = 0x20;
let stack = [
1, 2, 3, 4, 0x40, 0x1003fc000 + 0x100dc4,
5, 6, 0x70, 0x1003fc000 + 0x12ca28,
7, 8, 9, 10, 0x0, 0x0,
];
let mut read_stack = |addr| stack.get((addr / 8) as usize).cloned().ok_or(());
use framehop::Unwinder;
let mut iter = unwinder.iter_frames(
pc,
UnwindRegsAarch64::new(lr, sp, fp),
&mut cache,
&mut read_stack,
);
let mut frames = Vec::new();
while let Ok(Some(frame)) = iter.next() {
frames.push(frame);
}
assert_eq!(
frames,
vec![
FrameAddress::from_instruction_pointer(0x1003fc000 + 0x1292c0),
FrameAddress::from_return_address(0x1003fc000 + 0x100dc4).unwrap(),
FrameAddress::from_return_address(0x1003fc000 + 0x12ca28).unwrap()
]
);
```
## Recommended Reading and Tools
Here's a list of articles I found useful during development:
- [Reliable and Fast DWARF-Based Stack Unwinding](https://hal.inria.fr/hal-02297690/document), also available [as a presentation](https://deepspec.org/events/dsw18/zappa-nardelli-deepspec18.pdf). This is **the** unwinding reference document. If want to read just one thing, read this. This article explains the background super clearly, and is very approachable. It shows how assembly and unwind information correspond to each other and has lots of examples that are easy to understand.
- [How fast can CFI/EXIDX-based stack unwinding be?](https://blog.mozilla.org/jseward/2013/08/29/how-fast-can-cfiexidx-based-stack-unwinding-be/), by Julian Seward
- [Unwinding a Stack by Hand with Frame Pointers and ORC](https://blogs.oracle.com/linux/post/unwinding-stack-frame-pointers-and-orc), by Stephen Brennan
- [Aarch64 DWARF register names](https://github.com/ARM-software/abi-aa/blob/main/aadwarf64/aadwarf64.rst#dwarf-register-names)
I used these tools very frequently:
- [Hopper Disassembler](https://www.hopperapp.com/), to look at assembly code.
- `llvm-dwarfdump --eh-frame mylib.so` to display DWARF unwind information.
- `llvm-objdump --section-headers mylib.so` to display section information.
- `unwindinfodump mylib.dylib` to display compact unwind information. (Install using `cargo install --examples macho-unwind-info`, see [macho-unwind-info](https://github.com/mstange/macho-unwind-info/blob/main/examples/unwindinfodump.rs).)
## License
Licensed under either of
* Apache License, Version 2.0 ([`LICENSE-APACHE`](./LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([`LICENSE-MIT`](./LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

10
third_party/rust/framehop/src/aarch64/arch.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
use super::unwind_rule::UnwindRuleAarch64;
use super::unwindregs::UnwindRegsAarch64;
use crate::arch::Arch;
/// The Aarch64 CPU architecture.
pub struct ArchAarch64;
impl Arch for ArchAarch64 {
type UnwindRule = UnwindRuleAarch64;
type UnwindRegs = UnwindRegsAarch64;
}

32
third_party/rust/framehop/src/aarch64/cache.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
use super::unwind_rule::*;
use crate::cache::*;
/// The unwinder cache type for [`UnwinderAarch64`](super::UnwinderAarch64).
pub struct CacheAarch64<P: AllocationPolicy = MayAllocateDuringUnwind>(
pub Cache<UnwindRuleAarch64, P>,
);
impl CacheAarch64<MayAllocateDuringUnwind> {
/// Create a new cache.
pub fn new() -> Self {
Self(Cache::new())
}
}
impl<P: AllocationPolicy> CacheAarch64<P> {
/// Create a new cache.
pub fn new_in() -> Self {
Self(Cache::new())
}
/// Returns a snapshot of the cache usage statistics.
pub fn stats(&self) -> CacheStats {
self.0.rule_cache.stats()
}
}
impl<P: AllocationPolicy> Default for CacheAarch64<P> {
fn default() -> Self {
Self::new_in()
}
}

195
third_party/rust/framehop/src/aarch64/dwarf.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,195 @@
use gimli::{
AArch64, CfaRule, Encoding, EvaluationStorage, Reader, ReaderOffset, Register, RegisterRule,
UnwindContextStorage, UnwindSection, UnwindTableRow,
};
use super::{arch::ArchAarch64, unwind_rule::UnwindRuleAarch64, unwindregs::UnwindRegsAarch64};
use crate::unwind_result::UnwindResult;
use crate::dwarf::{
eval_cfa_rule, eval_register_rule, ConversionError, DwarfUnwindRegs, DwarfUnwinderError,
DwarfUnwinding,
};
impl DwarfUnwindRegs for UnwindRegsAarch64 {
fn get(&self, register: Register) -> Option<u64> {
match register {
AArch64::SP => Some(self.sp()),
AArch64::X29 => Some(self.fp()),
AArch64::X30 => Some(self.lr()),
_ => None,
}
}
}
impl DwarfUnwinding for ArchAarch64 {
fn unwind_frame<F, R, UCS, ES>(
section: &impl UnwindSection<R>,
unwind_info: &UnwindTableRow<R::Offset, UCS>,
encoding: Encoding,
regs: &mut Self::UnwindRegs,
is_first_frame: bool,
read_stack: &mut F,
) -> Result<UnwindResult<Self::UnwindRule>, DwarfUnwinderError>
where
F: FnMut(u64) -> Result<u64, ()>,
R: Reader,
UCS: UnwindContextStorage<R::Offset>,
ES: EvaluationStorage<R>,
{
let cfa_rule = unwind_info.cfa();
let fp_rule = unwind_info.register(AArch64::X29);
let lr_rule = unwind_info.register(AArch64::X30);
match translate_into_unwind_rule(cfa_rule, &fp_rule, &lr_rule) {
Ok(unwind_rule) => return Ok(UnwindResult::ExecRule(unwind_rule)),
Err(_err) => {
// Could not translate into a cacheable unwind rule. Fall back to the generic path.
// eprintln!("Unwind rule translation failed: {:?}", err);
}
}
let cfa = eval_cfa_rule::<R, _, ES>(section, cfa_rule, encoding, regs)
.ok_or(DwarfUnwinderError::CouldNotRecoverCfa)?;
let lr = regs.lr();
let fp = regs.fp();
let sp = regs.sp();
let (fp, lr) = if !is_first_frame {
if cfa <= sp {
return Err(DwarfUnwinderError::StackPointerMovedBackwards);
}
let fp = eval_register_rule::<R, F, _, ES>(
section, fp_rule, cfa, encoding, fp, regs, read_stack,
)
.ok_or(DwarfUnwinderError::CouldNotRecoverFramePointer)?;
let lr = eval_register_rule::<R, F, _, ES>(
section, lr_rule, cfa, encoding, lr, regs, read_stack,
)
.ok_or(DwarfUnwinderError::CouldNotRecoverReturnAddress)?;
(fp, lr)
} else {
// For the first frame, be more lenient when encountering errors.
// TODO: Find evidence of what this gives us. I think on macOS the prologue often has Unknown register rules
// and we only encounter prologues for the first frame.
let fp = eval_register_rule::<R, F, _, ES>(
section, fp_rule, cfa, encoding, fp, regs, read_stack,
)
.unwrap_or(fp);
let lr = eval_register_rule::<R, F, _, ES>(
section, lr_rule, cfa, encoding, lr, regs, read_stack,
)
.unwrap_or(lr);
(fp, lr)
};
regs.set_fp(fp);
regs.set_sp(cfa);
regs.set_lr(lr);
Ok(UnwindResult::Uncacheable(lr))
}
fn rule_if_uncovered_by_fde() -> Self::UnwindRule {
UnwindRuleAarch64::NoOpIfFirstFrameOtherwiseFp
}
}
fn register_rule_to_cfa_offset<RO: ReaderOffset>(
rule: &RegisterRule<RO>,
) -> Result<Option<i64>, ConversionError> {
match *rule {
RegisterRule::Undefined | RegisterRule::SameValue => Ok(None),
RegisterRule::Offset(offset) => Ok(Some(offset)),
_ => Err(ConversionError::RegisterNotStoredRelativeToCfa),
}
}
fn translate_into_unwind_rule<RO: ReaderOffset>(
cfa_rule: &CfaRule<RO>,
fp_rule: &RegisterRule<RO>,
lr_rule: &RegisterRule<RO>,
) -> Result<UnwindRuleAarch64, ConversionError> {
match cfa_rule {
CfaRule::RegisterAndOffset { register, offset } => match *register {
AArch64::SP => {
let sp_offset_by_16 =
u16::try_from(offset / 16).map_err(|_| ConversionError::SpOffsetDoesNotFit)?;
let lr_cfa_offset = register_rule_to_cfa_offset(lr_rule)?;
let fp_cfa_offset = register_rule_to_cfa_offset(fp_rule)?;
match (lr_cfa_offset, fp_cfa_offset) {
(None, Some(_)) => Err(ConversionError::RestoringFpButNotLr),
(None, None) => {
if let RegisterRule::Undefined = lr_rule {
// If the return address is undefined, this could have two reasons:
// - The column for the return address may have been manually set to "undefined"
// using DW_CFA_undefined. This usually means that the function never returns
// and can be treated as the root of the stack.
// - The column for the return may have been omitted from the DWARF CFI table.
// Per spec (at least as of DWARF >= 3), this means that it should be treated
// as undefined. But it seems that compilers often do this when they really mean
// "same value".
// Gimli follows DWARF 3 and does not differentiate between "omitted" and "undefined".
Ok(
UnwindRuleAarch64::OffsetSpIfFirstFrameOtherwiseStackEndsHere {
sp_offset_by_16,
},
)
} else {
Ok(UnwindRuleAarch64::OffsetSp { sp_offset_by_16 })
}
}
(Some(lr_cfa_offset), None) => {
let lr_storage_offset_from_sp_by_8 =
i16::try_from((offset + lr_cfa_offset) / 8)
.map_err(|_| ConversionError::LrStorageOffsetDoesNotFit)?;
Ok(UnwindRuleAarch64::OffsetSpAndRestoreLr {
sp_offset_by_16,
lr_storage_offset_from_sp_by_8,
})
}
(Some(lr_cfa_offset), Some(fp_cfa_offset)) => {
let lr_storage_offset_from_sp_by_8 =
i16::try_from((offset + lr_cfa_offset) / 8)
.map_err(|_| ConversionError::LrStorageOffsetDoesNotFit)?;
let fp_storage_offset_from_sp_by_8 =
i16::try_from((offset + fp_cfa_offset) / 8)
.map_err(|_| ConversionError::FpStorageOffsetDoesNotFit)?;
Ok(UnwindRuleAarch64::OffsetSpAndRestoreFpAndLr {
sp_offset_by_16,
fp_storage_offset_from_sp_by_8,
lr_storage_offset_from_sp_by_8,
})
}
}
}
AArch64::X29 => {
let lr_cfa_offset = register_rule_to_cfa_offset(lr_rule)?
.ok_or(ConversionError::FramePointerRuleDoesNotRestoreLr)?;
let fp_cfa_offset = register_rule_to_cfa_offset(fp_rule)?
.ok_or(ConversionError::FramePointerRuleDoesNotRestoreFp)?;
if *offset == 16 && fp_cfa_offset == -16 && lr_cfa_offset == -8 {
Ok(UnwindRuleAarch64::UseFramePointer)
} else {
let sp_offset_from_fp_by_8 = u16::try_from(offset / 8)
.map_err(|_| ConversionError::SpOffsetFromFpDoesNotFit)?;
let lr_storage_offset_from_fp_by_8 =
i16::try_from((offset + lr_cfa_offset) / 8)
.map_err(|_| ConversionError::LrStorageOffsetDoesNotFit)?;
let fp_storage_offset_from_fp_by_8 =
i16::try_from((offset + fp_cfa_offset) / 8)
.map_err(|_| ConversionError::FpStorageOffsetDoesNotFit)?;
Ok(UnwindRuleAarch64::UseFramepointerWithOffsets {
sp_offset_from_fp_by_8,
fp_storage_offset_from_fp_by_8,
lr_storage_offset_from_fp_by_8,
})
}
}
_ => Err(ConversionError::CfaIsOffsetFromUnknownRegister),
},
CfaRule::Expression(_) => Err(ConversionError::CfaIsExpression),
}
}

Просмотреть файл

@ -0,0 +1,702 @@
use super::super::unwind_rule::UnwindRuleAarch64;
struct EpilogueDetectorAarch64 {
sp_offset: i32,
fp_offset_from_initial_sp: Option<i32>,
lr_offset_from_initial_sp: Option<i32>,
}
enum EpilogueStepResult {
NeedMore,
FoundBodyInstruction(UnexpectedInstructionType),
FoundReturn,
FoundTailCall,
CouldBeAuthTailCall,
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum EpilogueResult {
ProbablyStillInBody(UnexpectedInstructionType),
ReachedFunctionEndWithoutReturn,
FoundReturnOrTailCall {
sp_offset: i32,
fp_offset_from_initial_sp: Option<i32>,
lr_offset_from_initial_sp: Option<i32>,
},
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum UnexpectedInstructionType {
LoadOfWrongSize,
LoadReferenceRegisterNotSp,
AddSubNotOperatingOnSp,
AutibspNotFollowedByExpectedTailCall,
BranchWithUnadjustedStackPointer,
Unknown,
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum EpilogueInstructionType {
NotExpectedInEpilogue,
CouldBeTailCall {
/// If auth tail call, the offset in bytes where the autibsp would be.
/// If regular tail call, we just check if the previous instruction
/// adjusts the stack pointer.
offset_of_expected_autibsp: u8,
},
CouldBePartOfAuthTailCall {
/// In bytes
offset_of_expected_autibsp: u8,
},
VeryLikelyPartOfEpilogue,
}
impl EpilogueDetectorAarch64 {
pub fn new() -> Self {
Self {
sp_offset: 0,
fp_offset_from_initial_sp: None,
lr_offset_from_initial_sp: None,
}
}
pub fn analyze_slice(&mut self, function_bytes: &[u8], pc_offset: usize) -> EpilogueResult {
let mut bytes = &function_bytes[pc_offset..];
if bytes.len() < 4 {
return EpilogueResult::ReachedFunctionEndWithoutReturn;
}
let mut word = u32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]);
bytes = &bytes[4..];
match Self::analyze_instruction(word) {
EpilogueInstructionType::NotExpectedInEpilogue => {
return EpilogueResult::ProbablyStillInBody(UnexpectedInstructionType::Unknown)
}
EpilogueInstructionType::CouldBeTailCall {
offset_of_expected_autibsp,
} => {
if pc_offset >= offset_of_expected_autibsp as usize {
let auth_tail_call_bytes =
&function_bytes[pc_offset - offset_of_expected_autibsp as usize..];
if auth_tail_call_bytes[0..4] == [0xff, 0x23, 0x03, 0xd5]
&& Self::is_auth_tail_call(&auth_tail_call_bytes[4..])
{
return EpilogueResult::FoundReturnOrTailCall {
sp_offset: 0,
fp_offset_from_initial_sp: None,
lr_offset_from_initial_sp: None,
};
}
}
if pc_offset >= 4 {
let prev_b = &function_bytes[pc_offset - 4..pc_offset];
let prev_word =
u32::from_le_bytes([prev_b[0], prev_b[1], prev_b[2], prev_b[3]]);
if Self::instruction_adjusts_stack_pointer(prev_word) {
return EpilogueResult::FoundReturnOrTailCall {
sp_offset: 0,
fp_offset_from_initial_sp: None,
lr_offset_from_initial_sp: None,
};
}
}
return EpilogueResult::ProbablyStillInBody(UnexpectedInstructionType::Unknown);
}
EpilogueInstructionType::CouldBePartOfAuthTailCall {
offset_of_expected_autibsp,
} => {
if pc_offset >= offset_of_expected_autibsp as usize {
let auth_tail_call_bytes =
&function_bytes[pc_offset - offset_of_expected_autibsp as usize..];
if auth_tail_call_bytes[0..4] == [0xff, 0x23, 0x03, 0xd5]
&& Self::is_auth_tail_call(&auth_tail_call_bytes[4..])
{
return EpilogueResult::FoundReturnOrTailCall {
sp_offset: 0,
fp_offset_from_initial_sp: None,
lr_offset_from_initial_sp: None,
};
}
}
return EpilogueResult::ProbablyStillInBody(UnexpectedInstructionType::Unknown);
}
EpilogueInstructionType::VeryLikelyPartOfEpilogue => {}
}
loop {
match self.step_instruction(word) {
EpilogueStepResult::NeedMore => {
if bytes.len() < 4 {
return EpilogueResult::ReachedFunctionEndWithoutReturn;
}
word = u32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]);
bytes = &bytes[4..];
continue;
}
EpilogueStepResult::FoundBodyInstruction(uit) => {
return EpilogueResult::ProbablyStillInBody(uit);
}
EpilogueStepResult::FoundReturn | EpilogueStepResult::FoundTailCall => {}
EpilogueStepResult::CouldBeAuthTailCall => {
if !Self::is_auth_tail_call(bytes) {
return EpilogueResult::ProbablyStillInBody(
UnexpectedInstructionType::AutibspNotFollowedByExpectedTailCall,
);
}
}
}
return EpilogueResult::FoundReturnOrTailCall {
sp_offset: self.sp_offset,
fp_offset_from_initial_sp: self.fp_offset_from_initial_sp,
lr_offset_from_initial_sp: self.lr_offset_from_initial_sp,
};
}
}
fn instruction_adjusts_stack_pointer(word: u32) -> bool {
// Detect load from sp-relative offset with writeback.
if (word >> 22) & 0b1011111011 == 0b1010100011 && (word >> 5) & 0b11111 == 31 {
return true;
}
// Detect sub sp, sp, 0xXXXX
if (word >> 23) & 0b111111111 == 0b100100010
&& word & 0b11111 == 31
&& (word >> 5) & 0b11111 == 31
{
return true;
}
false
}
fn is_auth_tail_call(bytes_after_autibsp: &[u8]) -> bool {
// libsystem_malloc.dylib contains over a hundred of these.
// At the end of the function, after restoring the registers from the stack,
// there's an autibsp instruction, followed by some check (not sure what it
// does), and then a tail call. These instructions should all be counted as
// part of the epilogue; returning at this point is just "follow lr" instead
// of "use the frame pointer".
//
// 180139058 ff 23 03 d5 autibsp
//
// 18013905c d0 07 1e ca eor x16, lr, lr, lsl #1
// 180139060 50 00 f0 b6 tbz x16, 0x3e, $+0x8
// 180139064 20 8e 38 d4 brk #0xc471 ; "breakpoint trap"
//
// and then a tail call, of one of these forms:
//
// 180139068 13 00 00 14 b some_outside_function
//
// 18013a364 f0 36 88 d2 mov x16, #0xXXXX
// 18013a368 70 08 1f d7 braa xX, x16
//
if bytes_after_autibsp.len() < 16 {
return false;
}
let eor_tbz_brk = &bytes_after_autibsp[..12];
if eor_tbz_brk
!= [
0xd0, 0x07, 0x1e, 0xca, 0x50, 0x00, 0xf0, 0xb6, 0x20, 0x8e, 0x38, 0xd4,
]
{
return false;
}
let first_tail_call_instruction_opcode = u32::from_le_bytes([
bytes_after_autibsp[12],
bytes_after_autibsp[13],
bytes_after_autibsp[14],
bytes_after_autibsp[15],
]);
let bits_26_to_32 = first_tail_call_instruction_opcode >> 26;
if bits_26_to_32 == 0b000101 {
// This is a `b` instruction. We've found the tail call.
return true;
}
// If we get here, it's either not a recognized instruction sequence,
// or the tail call is of the form `mov x16, #0xXXXX`, `braa xX, x16`.
if bytes_after_autibsp.len() < 20 {
return false;
}
let bits_23_to_32 = first_tail_call_instruction_opcode >> 23;
let is_64_mov = (bits_23_to_32 & 0b111000111) == 0b110000101;
let result_reg = first_tail_call_instruction_opcode & 0b11111;
if !is_64_mov || result_reg != 16 {
return false;
}
let braa_opcode = u32::from_le_bytes([
bytes_after_autibsp[16],
bytes_after_autibsp[17],
bytes_after_autibsp[18],
bytes_after_autibsp[19],
]);
(braa_opcode & 0xff_ff_fc_00) == 0xd7_1f_08_00 && (braa_opcode & 0b11111) == 16
}
pub fn analyze_instruction(word: u32) -> EpilogueInstructionType {
// Detect ret and retab
if word == 0xd65f03c0 || word == 0xd65f0fff {
return EpilogueInstructionType::VeryLikelyPartOfEpilogue;
}
// Detect autibsp
if word == 0xd50323ff {
return EpilogueInstructionType::CouldBePartOfAuthTailCall {
offset_of_expected_autibsp: 0,
};
}
// Detect `eor x16, lr, lr, lsl #1`
if word == 0xca1e07d0 {
return EpilogueInstructionType::CouldBePartOfAuthTailCall {
offset_of_expected_autibsp: 4,
};
}
// Detect `tbz x16, 0x3e, $+0x8`
if word == 0xb6f00050 {
return EpilogueInstructionType::CouldBePartOfAuthTailCall {
offset_of_expected_autibsp: 8,
};
}
// Detect `brk #0xc471`
if word == 0xd4388e20 {
return EpilogueInstructionType::CouldBePartOfAuthTailCall {
offset_of_expected_autibsp: 12,
};
}
// Detect `b` and `br xX`
if (word >> 26) == 0b000101 || word & 0xff_ff_fc_1f == 0xd6_1f_00_00 {
// This could be a branch with a target inside this function, or
// a tail call outside of this function.
return EpilogueInstructionType::CouldBeTailCall {
offset_of_expected_autibsp: 16,
};
}
// Detect `mov x16, #0xXXXX`
if (word >> 23) & 0b111000111 == 0b110000101 && word & 0b11111 == 16 {
return EpilogueInstructionType::CouldBePartOfAuthTailCall {
offset_of_expected_autibsp: 16,
};
}
// Detect `braa xX, x16`
if word & 0xff_ff_fc_00 == 0xd7_1f_08_00 && word & 0b11111 == 16 {
return EpilogueInstructionType::CouldBePartOfAuthTailCall {
offset_of_expected_autibsp: 20,
};
}
if (word >> 22) & 0b1011111001 == 0b1010100001 {
// Section C3.3, Loads and stores.
// but only loads that are commonly seen in prologues / epilogues (bits 29 and 31 are set)
let writeback_bits = (word >> 23) & 0b11;
if writeback_bits == 0b00 {
// Not 64-bit load.
return EpilogueInstructionType::NotExpectedInEpilogue;
}
let reference_reg = ((word >> 5) & 0b11111) as u16;
if reference_reg != 31 {
return EpilogueInstructionType::NotExpectedInEpilogue;
}
return EpilogueInstructionType::VeryLikelyPartOfEpilogue;
}
if (word >> 23) & 0b111111111 == 0b100100010 {
// Section C3.4, Data processing - immediate
// unsigned add imm, size class X (8 bytes)
let result_reg = (word & 0b11111) as u16;
let input_reg = ((word >> 5) & 0b11111) as u16;
if result_reg != 31 || input_reg != 31 {
return EpilogueInstructionType::NotExpectedInEpilogue;
}
return EpilogueInstructionType::VeryLikelyPartOfEpilogue;
}
EpilogueInstructionType::NotExpectedInEpilogue
}
pub fn step_instruction(&mut self, word: u32) -> EpilogueStepResult {
// Detect ret and retab
if word == 0xd65f03c0 || word == 0xd65f0fff {
return EpilogueStepResult::FoundReturn;
}
// Detect autibsp
if word == 0xd50323ff {
return EpilogueStepResult::CouldBeAuthTailCall;
}
// Detect b
if (word >> 26) == 0b000101 {
// This could be a branch with a target inside this function, or
// a tail call outside of this function.
// Let's use the following heuristic: If this instruction is followed
// by valid epilogue instructions which adjusted the stack pointer, then
// we treat it as a tail call.
if self.sp_offset != 0 {
return EpilogueStepResult::FoundTailCall;
}
return EpilogueStepResult::FoundBodyInstruction(
UnexpectedInstructionType::BranchWithUnadjustedStackPointer,
);
}
if (word >> 22) & 0b1011111001 == 0b1010100001 {
// Section C3.3, Loads and stores.
// but only those that are commonly seen in prologues / epilogues (bits 29 and 31 are set)
let writeback_bits = (word >> 23) & 0b11;
if writeback_bits == 0b00 {
// Not 64-bit load/store.
return EpilogueStepResult::FoundBodyInstruction(
UnexpectedInstructionType::LoadOfWrongSize,
);
}
let reference_reg = ((word >> 5) & 0b11111) as u16;
if reference_reg != 31 {
return EpilogueStepResult::FoundBodyInstruction(
UnexpectedInstructionType::LoadReferenceRegisterNotSp,
);
}
let is_preindexed_writeback = writeback_bits == 0b11; // TODO: are there preindexed loads? What do they mean?
let is_postindexed_writeback = writeback_bits == 0b01;
let imm7 = (((((word >> 15) & 0b1111111) as i16) << 9) >> 6) as i32;
let reg_loc = if is_postindexed_writeback {
self.sp_offset
} else {
self.sp_offset + imm7
};
let pair_reg_1 = (word & 0b11111) as u16;
if pair_reg_1 == 29 {
self.fp_offset_from_initial_sp = Some(reg_loc);
} else if pair_reg_1 == 30 {
self.lr_offset_from_initial_sp = Some(reg_loc);
}
let pair_reg_2 = ((word >> 10) & 0b11111) as u16;
if pair_reg_2 == 29 {
self.fp_offset_from_initial_sp = Some(reg_loc + 8);
} else if pair_reg_2 == 30 {
self.lr_offset_from_initial_sp = Some(reg_loc + 8);
}
if is_preindexed_writeback || is_postindexed_writeback {
self.sp_offset += imm7;
}
return EpilogueStepResult::NeedMore;
}
if (word >> 23) & 0b111111111 == 0b100100010 {
// Section C3.4, Data processing - immediate
// unsigned add imm, size class X (8 bytes)
let result_reg = (word & 0b11111) as u16;
let input_reg = ((word >> 5) & 0b11111) as u16;
if result_reg != 31 || input_reg != 31 {
return EpilogueStepResult::FoundBodyInstruction(
UnexpectedInstructionType::AddSubNotOperatingOnSp,
);
}
let mut imm12 = ((word >> 10) & 0b111111111111) as i32;
let shift_immediate_by_12 = ((word >> 22) & 0b1) == 0b1;
if shift_immediate_by_12 {
imm12 <<= 12
}
self.sp_offset += imm12;
return EpilogueStepResult::NeedMore;
}
EpilogueStepResult::FoundBodyInstruction(UnexpectedInstructionType::Unknown)
}
}
pub fn unwind_rule_from_detected_epilogue(
bytes: &[u8],
pc_offset: usize,
) -> Option<UnwindRuleAarch64> {
let mut detector = EpilogueDetectorAarch64::new();
match detector.analyze_slice(bytes, pc_offset) {
EpilogueResult::ProbablyStillInBody(_)
| EpilogueResult::ReachedFunctionEndWithoutReturn => None,
EpilogueResult::FoundReturnOrTailCall {
sp_offset,
fp_offset_from_initial_sp,
lr_offset_from_initial_sp,
} => {
let sp_offset_by_16 = u16::try_from(sp_offset / 16).ok()?;
let rule = match (fp_offset_from_initial_sp, lr_offset_from_initial_sp) {
(None, None) if sp_offset_by_16 == 0 => UnwindRuleAarch64::NoOp,
(None, None) => UnwindRuleAarch64::OffsetSp { sp_offset_by_16 },
(None, Some(lr_offset)) => UnwindRuleAarch64::OffsetSpAndRestoreLr {
sp_offset_by_16,
lr_storage_offset_from_sp_by_8: i16::try_from(lr_offset / 8).ok()?,
},
(Some(_), None) => return None,
(Some(fp_offset), Some(lr_offset)) => {
UnwindRuleAarch64::OffsetSpAndRestoreFpAndLr {
sp_offset_by_16,
fp_storage_offset_from_sp_by_8: i16::try_from(fp_offset / 8).ok()?,
lr_storage_offset_from_sp_by_8: i16::try_from(lr_offset / 8).ok()?,
}
}
};
Some(rule)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_epilogue_1() {
// 1000e0d18 fd 7b 44 a9 ldp fp, lr, [sp, #0x40]
// 1000e0d1c f4 4f 43 a9 ldp x20, x19, [sp, #0x30]
// 1000e0d20 f6 57 42 a9 ldp x22, x21, [sp, #0x20]
// 1000e0d24 ff 43 01 91 add sp, sp, #0x50
// 1000e0d28 c0 03 5f d6 ret
let bytes = &[
0xfd, 0x7b, 0x44, 0xa9, 0xf4, 0x4f, 0x43, 0xa9, 0xf6, 0x57, 0x42, 0xa9, 0xff, 0x43,
0x01, 0x91, 0xc0, 0x03, 0x5f, 0xd6,
];
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 0),
Some(UnwindRuleAarch64::OffsetSpAndRestoreFpAndLr {
sp_offset_by_16: 5,
fp_storage_offset_from_sp_by_8: 8,
lr_storage_offset_from_sp_by_8: 9,
})
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 4),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 5 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 8),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 5 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 12),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 5 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 16),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(unwind_rule_from_detected_epilogue(bytes, 20), None);
}
#[test]
fn test_epilogue_with_retab() {
// _malloc_zone_realloc epilogue
// 18012466c e0 03 16 aa mov x0,x22
// 180124670 fd 7b 43 a9 ldp x29=>local_10,x30,[sp, #0x30]
// 180124674 f4 4f 42 a9 ldp x20,x19,[sp, #local_20]
// 180124678 f6 57 41 a9 ldp x22,x21,[sp, #local_30]
// 18012467c f8 5f c4 a8 ldp x24,x23,[sp], #0x40
// 180124680 ff 0f 5f d6 retab
// 180124684 a0 01 80 52 mov w0,#0xd
// 180124688 20 60 a6 72 movk w0,#0x3301, LSL #16
let bytes = &[
0xe0, 0x03, 0x16, 0xaa, 0xfd, 0x7b, 0x43, 0xa9, 0xf4, 0x4f, 0x42, 0xa9, 0xf6, 0x57,
0x41, 0xa9, 0xf8, 0x5f, 0xc4, 0xa8, 0xff, 0x0f, 0x5f, 0xd6, 0xa0, 0x01, 0x80, 0x52,
0x20, 0x60, 0xa6, 0x72,
];
assert_eq!(unwind_rule_from_detected_epilogue(bytes, 0), None);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 4),
Some(UnwindRuleAarch64::OffsetSpAndRestoreFpAndLr {
sp_offset_by_16: 4,
fp_storage_offset_from_sp_by_8: 6,
lr_storage_offset_from_sp_by_8: 7
})
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 8),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 4 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 12),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 4 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 16),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 4 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 20),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(unwind_rule_from_detected_epilogue(bytes, 24), None);
}
#[test]
fn test_epilogue_with_retab_2() {
// _tiny_free_list_add_ptr:
// ...
// 18013e114 28 01 00 79 strh w8, [x9]
// 18013e118 fd 7b c1 a8 ldp fp, lr, [sp], #0x10
// 18013e11c ff 0f 5f d6 retab
// 18013e120 e2 03 08 aa mov x2, x8
// 18013e124 38 76 00 94 bl _free_list_checksum_botch
// ...
let bytes = &[
0x28, 0x01, 0x00, 0x79, 0xfd, 0x7b, 0xc1, 0xa8, 0xff, 0x0f, 0x5f, 0xd6, 0xe2, 0x03,
0x08, 0xaa, 0x38, 0x76, 0x00, 0x94,
];
assert_eq!(unwind_rule_from_detected_epilogue(bytes, 0), None);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 4),
Some(UnwindRuleAarch64::OffsetSpAndRestoreFpAndLr {
sp_offset_by_16: 1,
fp_storage_offset_from_sp_by_8: 0,
lr_storage_offset_from_sp_by_8: 1
})
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 8),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(unwind_rule_from_detected_epilogue(bytes, 12), None);
assert_eq!(unwind_rule_from_detected_epilogue(bytes, 16), None);
}
#[test]
fn test_epilogue_with_regular_tail_call() {
// (in rustup) __ZN126_$LT$$LT$toml..value..Value$u20$as$u20$serde..de..Deserialize$GT$..deserialize..ValueVisitor$u20$as$u20$serde..de..Visitor$GT$9visit_map17h0afd4b269ef00eebE
// ...
// 1002566b4 fc 6f c6 a8 ldp x28, x27, [sp], #0x60
// 1002566b8 bc ba ff 17 b __ZN4core3ptr41drop_in_place$LT$toml..de..MapVisitor$GT$17hd4556de1a4edab42E
// ...
let bytes = &[0xfc, 0x6f, 0xc6, 0xa8, 0xbc, 0xba, 0xff, 0x17];
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 0),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 6 })
);
}
// This test fails at the moment.
#[test]
fn test_epilogue_with_register_tail_call() {
// This test requires lookbehind in the epilogue detection.
// We want to detect the `br` as a tail call. We should do this
// based on the fact that the previous instruction adjusted the
// stack pointer.
//
// (in rustup) __ZN4core3fmt9Formatter3pad17h3f40041e7f99f180E
// ...
// 1000500bc fa 67 c5 a8 ldp x26, x25, [sp], #0x50
// 1000500c0 60 00 1f d6 br x3
// ...
let bytes = &[0xfa, 0x67, 0xc5, 0xa8, 0x60, 0x00, 0x1f, 0xd6];
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 4),
Some(UnwindRuleAarch64::NoOp)
);
}
#[test]
fn test_epilogue_with_auth_tail_call() {
// _nanov2_free_definite_size
// ...
// 180139048 e1 03 13 aa mov x1, x19
// 18013904c fd 7b 42 a9 ldp fp, lr, [sp, #0x20]
// 180139050 f4 4f 41 a9 ldp x20, x19, [sp, #0x10]
// 180139054 f6 57 c3 a8 ldp x22, x21, [sp], #0x30
// 180139058 ff 23 03 d5 autibsp
// 18013905c d0 07 1e ca eor x16, lr, lr, lsl #1
// 180139060 50 00 f0 b6 tbz x16, 0x3e, loc_180139068
// 180139064 20 8e 38 d4 brk #0xc471
// loc_180139068:
// 180139068 13 00 00 14 b _nanov2_free_to_block
// loc_18013906c:
// 18013906c a0 16 78 f9 ldr x0, [x21, #0x7028]
// 180139070 03 3c 40 f9 ldr x3, [x0, #0x78]
// ...
let bytes = &[
0xe1, 0x03, 0x13, 0xaa, 0xfd, 0x7b, 0x42, 0xa9, 0xf4, 0x4f, 0x41, 0xa9, 0xf6, 0x57,
0xc3, 0xa8, 0xff, 0x23, 0x03, 0xd5, 0xd0, 0x07, 0x1e, 0xca, 0x50, 0x00, 0xf0, 0xb6,
0x20, 0x8e, 0x38, 0xd4, 0x13, 0x00, 0x00, 0x14, 0xa0, 0x16, 0x78, 0xf9, 0x03, 0x3c,
0x40, 0xf9,
];
assert_eq!(unwind_rule_from_detected_epilogue(bytes, 0), None);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 4),
Some(UnwindRuleAarch64::OffsetSpAndRestoreFpAndLr {
sp_offset_by_16: 3,
fp_storage_offset_from_sp_by_8: 4,
lr_storage_offset_from_sp_by_8: 5
})
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 8),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 3 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 12),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 3 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 16),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 20),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 24),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 28),
Some(UnwindRuleAarch64::NoOp)
);
}
#[test]
fn test_epilogue_with_auth_tail_call_2() {
// _malloc_zone_claimed_addres
// ...
// 1801457ac e1 03 13 aa mov x1, x19
// 1801457b0 fd 7b 41 a9 ldp fp, lr, [sp, #0x10]
// 1801457b4 f4 4f c2 a8 ldp x20, x19, [sp], #0x20
// 1801457b8 ff 23 03 d5 autibsp
// 1801457bc d0 07 1e ca eor x16, lr, lr, lsl #1
// 1801457c0 50 00 f0 b6 tbz x16, 0x3e, loc_1801457c8
// 1801457c4 20 8e 38 d4 brk #0xc471
// loc_1801457c8:
// 1801457c8 f0 77 9c d2 mov x16, #0xe3bf
// 1801457cc 50 08 1f d7 braa x2, x16
// ...
let bytes = &[
0xe1, 0x03, 0x13, 0xaa, 0xfd, 0x7b, 0x41, 0xa9, 0xf4, 0x4f, 0xc2, 0xa8, 0xff, 0x23,
0x03, 0xd5, 0xd0, 0x07, 0x1e, 0xca, 0x50, 0x00, 0xf0, 0xb6, 0x20, 0x8e, 0x38, 0xd4,
0xf0, 0x77, 0x9c, 0xd2, 0x50, 0x08, 0x1f, 0xd7,
];
assert_eq!(unwind_rule_from_detected_epilogue(bytes, 0), None);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 4),
Some(UnwindRuleAarch64::OffsetSpAndRestoreFpAndLr {
sp_offset_by_16: 2,
fp_storage_offset_from_sp_by_8: 2,
lr_storage_offset_from_sp_by_8: 3
})
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 8),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 2 })
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 12),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 16),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 20),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 24),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_epilogue(bytes, 28),
Some(UnwindRuleAarch64::NoOp)
);
}
}

Просмотреть файл

@ -0,0 +1,25 @@
use super::arch::ArchAarch64;
use crate::instruction_analysis::InstructionAnalysis;
mod epilogue;
mod prologue;
use epilogue::unwind_rule_from_detected_epilogue;
use prologue::unwind_rule_from_detected_prologue;
impl InstructionAnalysis for ArchAarch64 {
fn rule_from_prologue_analysis(
text_bytes: &[u8],
pc_offset: usize,
) -> Option<Self::UnwindRule> {
let (slice_from_start, slice_to_end) = text_bytes.split_at(pc_offset);
unwind_rule_from_detected_prologue(slice_from_start, slice_to_end)
}
fn rule_from_epilogue_analysis(
text_bytes: &[u8],
pc_offset: usize,
) -> Option<Self::UnwindRule> {
unwind_rule_from_detected_epilogue(text_bytes, pc_offset)
}
}

Просмотреть файл

@ -0,0 +1,400 @@
use super::super::unwind_rule::UnwindRuleAarch64;
struct PrologueDetectorAarch64 {
sp_offset: i32,
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum PrologueStepResult {
UnexpectedInstruction(UnexpectedInstructionType),
ValidPrologueInstruction,
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum PrologueResult {
ProbablyAlreadyInBody(UnexpectedInstructionType),
FoundFunctionStart { sp_offset: i32 },
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum PrologueInstructionType {
NotExpectedInPrologue,
CouldBePartOfPrologueIfThereIsAlsoAStackPointerSub,
VeryLikelyPartOfPrologue,
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum UnexpectedInstructionType {
StoreOfWrongSize,
StoreReferenceRegisterNotSp,
AddSubNotOperatingOnSp,
NoNextInstruction,
NoStackPointerSubBeforeStore,
Unknown,
}
impl PrologueDetectorAarch64 {
pub fn new() -> Self {
Self { sp_offset: 0 }
}
pub fn analyze_slices(
&mut self,
slice_from_start: &[u8],
slice_to_end: &[u8],
) -> PrologueResult {
// There are at least two options of what we could do here:
// - We could walk forwards from the function start to the instruction pointer.
// - We could walk backwards from the instruction pointer to the function start.
// Walking backwards is fine on arm64 because instructions are fixed size.
// Walking forwards requires that we have a useful function start address.
//
// Unfortunately, we can't rely on having a useful function start address.
// We get the funcion start address from the __unwind_info, which often collapses
// consecutive functions with the same unwind rules into a single entry, discarding
// the original function start addresses.
// Concretely, this means that `slice_from_start` may start much earlier than the
// current function.
//
// So we walk backwards. We first check the next instruction, and then
// go backwards from the instruction pointer to the function start.
// If the instruction we're about to execute is one that we'd expect to find in a prologue,
// then we assume that we're in a prologue. Then we single-step backwards until we
// either run out of instructions (which means we've definitely hit the start of the
// function), or until we find an instruction that we would not expect in a prologue.
// At that point we guess that this instruction must be belonging to the previous
// function, and that we've succesfully found the start of the current function.
if slice_to_end.len() < 4 {
return PrologueResult::ProbablyAlreadyInBody(
UnexpectedInstructionType::NoNextInstruction,
);
}
let next_instruction = u32::from_le_bytes([
slice_to_end[0],
slice_to_end[1],
slice_to_end[2],
slice_to_end[3],
]);
let next_instruction_type = Self::analyze_prologue_instruction_type(next_instruction);
if next_instruction_type == PrologueInstructionType::NotExpectedInPrologue {
return PrologueResult::ProbablyAlreadyInBody(UnexpectedInstructionType::Unknown);
}
let instructions = slice_from_start
.chunks_exact(4)
.map(|c| u32::from_le_bytes([c[0], c[1], c[2], c[3]]))
.rev();
for instruction in instructions {
if let PrologueStepResult::UnexpectedInstruction(_) =
self.reverse_step_instruction(instruction)
{
break;
}
}
if next_instruction_type
== PrologueInstructionType::CouldBePartOfPrologueIfThereIsAlsoAStackPointerSub
&& self.sp_offset == 0
{
return PrologueResult::ProbablyAlreadyInBody(
UnexpectedInstructionType::NoStackPointerSubBeforeStore,
);
}
PrologueResult::FoundFunctionStart {
sp_offset: self.sp_offset,
}
}
/// Check if the instruction indicates that we're likely in a prologue.
pub fn analyze_prologue_instruction_type(word: u32) -> PrologueInstructionType {
// Detect pacibsp (verify stack pointer authentication) and `mov x29, sp`.
if word == 0xd503237f || word == 0x910003fd {
return PrologueInstructionType::VeryLikelyPartOfPrologue;
}
let bits_22_to_32 = word >> 22;
// Detect stores of register pairs to the stack.
if bits_22_to_32 & 0b1011111001 == 0b1010100000 {
// Section C3.3, Loads and stores.
// Only stores that are commonly seen in prologues (bits 22, 29 and 31 are set)
let writeback_bits = bits_22_to_32 & 0b110;
let reference_reg = ((word >> 5) & 0b11111) as u16;
if writeback_bits == 0b000 || reference_reg != 31 {
return PrologueInstructionType::NotExpectedInPrologue;
}
// We are storing a register pair to the stack. This is something that
// can happen in a prologue but it can also happen in the body of a
// function.
if writeback_bits == 0b100 {
// No writeback.
return PrologueInstructionType::CouldBePartOfPrologueIfThereIsAlsoAStackPointerSub;
}
return PrologueInstructionType::VeryLikelyPartOfPrologue;
}
// Detect sub instructions operating on the stack pointer.
// Detect `add fp, sp, #0xXX` instructions
if bits_22_to_32 & 0b1011111110 == 0b1001000100 {
// Section C3.4, Data processing - immediate
// unsigned add / sub imm, size class X (8 bytes)
let result_reg = (word & 0b11111) as u16;
let input_reg = ((word >> 5) & 0b11111) as u16;
let is_sub = ((word >> 30) & 0b1) == 0b1;
let expected_result_reg = if is_sub { 31 } else { 29 };
if input_reg != 31 || result_reg != expected_result_reg {
return PrologueInstructionType::NotExpectedInPrologue;
}
return PrologueInstructionType::VeryLikelyPartOfPrologue;
}
PrologueInstructionType::NotExpectedInPrologue
}
/// Step backwards over one (already executed) instruction.
pub fn reverse_step_instruction(&mut self, word: u32) -> PrologueStepResult {
// Detect pacibsp (verify stack pointer authentication)
if word == 0xd503237f {
return PrologueStepResult::ValidPrologueInstruction;
}
// Detect stores of register pairs to the stack.
if (word >> 22) & 0b1011111001 == 0b1010100000 {
// Section C3.3, Loads and stores.
// but only those that are commonly seen in prologues / prologues (bits 29 and 31 are set)
let writeback_bits = (word >> 23) & 0b11;
if writeback_bits == 0b00 {
// Not 64-bit load/store.
return PrologueStepResult::UnexpectedInstruction(
UnexpectedInstructionType::StoreOfWrongSize,
);
}
let reference_reg = ((word >> 5) & 0b11111) as u16;
if reference_reg != 31 {
return PrologueStepResult::UnexpectedInstruction(
UnexpectedInstructionType::StoreReferenceRegisterNotSp,
);
}
let is_preindexed_writeback = writeback_bits == 0b11;
let is_postindexed_writeback = writeback_bits == 0b01; // TODO: are there postindexed stores? What do they mean?
if is_preindexed_writeback || is_postindexed_writeback {
let imm7 = (((((word >> 15) & 0b1111111) as i16) << 9) >> 6) as i32;
self.sp_offset -= imm7; // - to undo the instruction
}
return PrologueStepResult::ValidPrologueInstruction;
}
// Detect sub instructions operating on the stack pointer.
if (word >> 23) & 0b111111111 == 0b110100010 {
// Section C3.4, Data processing - immediate
// unsigned sub imm, size class X (8 bytes)
let result_reg = (word & 0b11111) as u16;
let input_reg = ((word >> 5) & 0b11111) as u16;
if result_reg != 31 || input_reg != 31 {
return PrologueStepResult::UnexpectedInstruction(
UnexpectedInstructionType::AddSubNotOperatingOnSp,
);
}
let mut imm12 = ((word >> 10) & 0b111111111111) as i32;
let shift_immediate_by_12 = ((word >> 22) & 0b1) == 0b1;
if shift_immediate_by_12 {
imm12 <<= 12
}
self.sp_offset += imm12; // + to undo the sub instruction
return PrologueStepResult::ValidPrologueInstruction;
}
PrologueStepResult::UnexpectedInstruction(UnexpectedInstructionType::Unknown)
}
}
pub fn unwind_rule_from_detected_prologue(
slice_from_start: &[u8],
slice_to_end: &[u8],
) -> Option<UnwindRuleAarch64> {
let mut detector = PrologueDetectorAarch64::new();
match detector.analyze_slices(slice_from_start, slice_to_end) {
PrologueResult::ProbablyAlreadyInBody(_) => None,
PrologueResult::FoundFunctionStart { sp_offset } => {
let sp_offset_by_16 = u16::try_from(sp_offset / 16).ok()?;
let rule = if sp_offset_by_16 == 0 {
UnwindRuleAarch64::NoOp
} else {
UnwindRuleAarch64::OffsetSp { sp_offset_by_16 }
};
Some(rule)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_prologue_1() {
// gimli::read::unit::parse_attribute
// 1000dfeb8 ff 43 01 d1 sub sp, sp, #0x50
// 1000dfebc f6 57 02 a9 stp x22, x21, [sp, #local_30]
// 1000dfec0 f4 4f 03 a9 stp x20, x19, [sp, #local_20]
// 1000dfec4 fd 7b 04 a9 stp x29, x30, [sp, #local_10]
// 1000dfec8 fd 03 01 91 add x29, sp, #0x40
// 1000dfecc f4 03 04 aa mov x20, x4
// 1000dfed0 f5 03 01 aa mov x21, x1
let bytes = &[
0xff, 0x43, 0x01, 0xd1, 0xf6, 0x57, 0x02, 0xa9, 0xf4, 0x4f, 0x03, 0xa9, 0xfd, 0x7b,
0x04, 0xa9, 0xfd, 0x03, 0x01, 0x91, 0xf4, 0x03, 0x04, 0xaa, 0xf5, 0x03, 0x01, 0xaa,
];
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..0], &bytes[0..]),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..4], &bytes[4..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 5 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..8], &bytes[8..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 5 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..12], &bytes[12..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 5 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..16], &bytes[16..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 5 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..20], &bytes[20..]),
None
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..24], &bytes[24..]),
None
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..28], &bytes[28..]),
None
);
}
#[test]
fn test_prologue_with_pacibsp() {
// 1801245c4 08 58 29 b8 str w8,[x0, w9, UXTW #0x2]
// 1801245c8 c0 03 5f d6 ret
// _malloc_zone_realloc
// 1801245cc 7f 23 03 d5 pacibsp
// 1801245d0 f8 5f bc a9 stp x24,x23,[sp, #local_40]!
// 1801245d4 f6 57 01 a9 stp x22,x21,[sp, #local_30]
// 1801245d8 f4 4f 02 a9 stp x20,x19,[sp, #local_20]
// 1801245dc fd 7b 03 a9 stp x29,x30,[sp, #local_10]
// 1801245e0 fd c3 00 91 add x29,sp,#0x30
// 1801245e4 f3 03 02 aa mov x19,x2
// 1801245e8 f4 03 01 aa mov x20,x1
let bytes = &[
0x08, 0x58, 0x29, 0xb8, 0xc0, 0x03, 0x5f, 0xd6, 0x7f, 0x23, 0x03, 0xd5, 0xf8, 0x5f,
0xbc, 0xa9, 0xf6, 0x57, 0x01, 0xa9, 0xf4, 0x4f, 0x02, 0xa9, 0xfd, 0x7b, 0x03, 0xa9,
0xfd, 0xc3, 0x00, 0x91, 0xf3, 0x03, 0x02, 0xaa, 0xf4, 0x03, 0x01, 0xaa,
];
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..0], &bytes[0..]),
None
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..4], &bytes[4..]),
None
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..8], &bytes[8..]),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..12], &bytes[12..]),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..16], &bytes[16..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 4 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..20], &bytes[20..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 4 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..24], &bytes[24..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 4 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..28], &bytes[28..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 4 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..32], &bytes[32..]),
None
);
}
#[test]
fn test_prologue_with_mov_fp_sp() {
// _tiny_free_list_add_ptr
// 180126e94 7f 23 03 d5 pacibsp
// 180126e98 fd 7b bf a9 stp x29,x30,[sp, #local_10]!
// 180126e9c fd 03 00 91 mov x29,sp
// 180126ea0 68 04 00 51 sub w8,w3,#0x1
let bytes = &[
0x7f, 0x23, 0x03, 0xd5, 0xfd, 0x7b, 0xbf, 0xa9, 0xfd, 0x03, 0x00, 0x91, 0x68, 0x04,
0x00, 0x51,
];
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..0], &bytes[0..]),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..4], &bytes[4..]),
Some(UnwindRuleAarch64::NoOp)
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..8], &bytes[8..]),
Some(UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 1 })
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..12], &bytes[12..]),
None
);
}
#[test]
fn test_no_prologue_despite_stack_store() {
// We're in the middle of a function and are storing something to the stack.
// But this is not a prologue, so it shouldn't be detected as one.
//
// 1004073d0 e8 17 00 f9 str x8,[sp, #0x28]
// 1004073d4 03 00 00 14 b LAB_1004073e0
// 1004073d8 ff ff 01 a9 stp xzr,xzr,[sp, #0x18] ; <-- stores the pair xzr, xzr on the stack
// 1004073dc ff 17 00 f9 str xzr,[sp, #0x28]
// 1004073e0 e0 03 00 91 mov x0,sp
let bytes = &[
0xe8, 0x17, 0x00, 0xf9, 0x03, 0x00, 0x00, 0x14, 0xff, 0xff, 0x01, 0xa9, 0xff, 0x17,
0x00, 0xf9, 0xe0, 0x03, 0x00, 0x91,
];
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..0], &bytes[0..]),
None
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..4], &bytes[4..]),
None
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..8], &bytes[8..]),
None
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..12], &bytes[12..]),
None
);
assert_eq!(
unwind_rule_from_detected_prologue(&bytes[..16], &bytes[16..]),
None
);
}
}

96
third_party/rust/framehop/src/aarch64/macho.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,96 @@
use super::arch::ArchAarch64;
use super::unwind_rule::UnwindRuleAarch64;
use crate::instruction_analysis::InstructionAnalysis;
use crate::macho::{CompactUnwindInfoUnwinderError, CompactUnwindInfoUnwinding, CuiUnwindResult};
use macho_unwind_info::opcodes::OpcodeArm64;
use macho_unwind_info::Function;
impl CompactUnwindInfoUnwinding for ArchAarch64 {
fn unwind_frame(
function: Function,
is_first_frame: bool,
address_offset_within_function: usize,
function_bytes: Option<&[u8]>,
) -> Result<CuiUnwindResult<UnwindRuleAarch64>, CompactUnwindInfoUnwinderError> {
let opcode = OpcodeArm64::parse(function.opcode);
if is_first_frame {
if opcode == OpcodeArm64::Null {
return Ok(CuiUnwindResult::ExecRule(UnwindRuleAarch64::NoOp));
}
// The pc might be in a prologue or an epilogue. The compact unwind info format ignores
// prologues and epilogues; the opcodes only describe the function body. So we do some
// instruction analysis to check for prologues and epilogues.
if let Some(function_bytes) = function_bytes {
if let Some(rule) = Self::rule_from_instruction_analysis(
function_bytes,
address_offset_within_function,
) {
// We are inside a prologue / epilogue. Ignore the opcode and use the rule from
// instruction analysis.
return Ok(CuiUnwindResult::ExecRule(rule));
}
}
}
// At this point we know with high certainty that we are in a function body.
let r = match opcode {
OpcodeArm64::Null => {
return Err(CompactUnwindInfoUnwinderError::FunctionHasNoInfo);
}
OpcodeArm64::Frameless {
stack_size_in_bytes,
} => {
if is_first_frame {
if stack_size_in_bytes == 0 {
CuiUnwindResult::ExecRule(UnwindRuleAarch64::NoOp)
} else {
CuiUnwindResult::ExecRule(UnwindRuleAarch64::OffsetSp {
sp_offset_by_16: stack_size_in_bytes / 16,
})
}
} else {
return Err(CompactUnwindInfoUnwinderError::CallerCannotBeFrameless);
}
}
OpcodeArm64::Dwarf { eh_frame_fde } => CuiUnwindResult::NeedDwarf(eh_frame_fde),
OpcodeArm64::FrameBased { .. } => {
CuiUnwindResult::ExecRule(UnwindRuleAarch64::UseFramePointer)
}
OpcodeArm64::UnrecognizedKind(kind) => {
return Err(CompactUnwindInfoUnwinderError::BadOpcodeKind(kind))
}
};
Ok(r)
}
fn rule_for_stub_helper(
offset: u32,
) -> Result<CuiUnwindResult<UnwindRuleAarch64>, CompactUnwindInfoUnwinderError> {
// shared:
// +0x0 1d309c B1 94 48 10 adr x17, #0x100264330
// +0x4 1d30a0 1F 20 03 D5 nop
// +0x8 1d30a4 F0 47 BF A9 stp x16, x17, [sp, #-0x10]!
// +0xc 1d30a8 1F 20 03 D5 nop
// +0x10 1d30ac F0 7A 32 58 ldr x16, #dyld_stub_binder_100238008
// +0x14 1d30b0 00 02 1F D6 br x16
// first stub:
// +0x18 1d30b4 50 00 00 18 ldr w16, =0x1800005000000000
// +0x1c 1d30b8 F9 FF FF 17 b 0x1001d309c
// +0x20 1d30bc 00 00 00 00 (padding)
// second stub:
// +0x24 1d30c0 50 00 00 18 ldr w16, =0x1800005000000012
// +0x28 1d30c4 F6 FF FF 17 b 0x1001d309c
// +0x2c 1d30c8 00 00 00 00 (padding)
let rule = if offset < 0xc {
// Stack pointer hasn't been touched, just follow lr
UnwindRuleAarch64::NoOp
} else if offset < 0x18 {
// Add 0x10 to the stack pointer and follow lr
UnwindRuleAarch64::OffsetSp { sp_offset_by_16: 1 }
} else {
// Stack pointer hasn't been touched, just follow lr
UnwindRuleAarch64::NoOp
};
Ok(CuiUnwindResult::ExecRule(rule))
}
}

17
third_party/rust/framehop/src/aarch64/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,17 @@
mod arch;
mod cache;
mod dwarf;
mod instruction_analysis;
#[cfg(feature = "macho")]
mod macho;
#[cfg(feature = "pe")]
mod pe;
mod unwind_rule;
mod unwinder;
mod unwindregs;
pub use arch::*;
pub use cache::*;
pub use unwind_rule::*;
pub use unwinder::*;
pub use unwindregs::*;

19
third_party/rust/framehop/src/aarch64/pe.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
use super::arch::ArchAarch64;
use crate::pe::{PeSections, PeUnwinderError, PeUnwinding};
use crate::unwind_result::UnwindResult;
impl PeUnwinding for ArchAarch64 {
fn unwind_frame<F, D>(
_sections: PeSections<D>,
_address: u32,
_regs: &mut Self::UnwindRegs,
_is_first_frame: bool,
_read_stack: &mut F,
) -> Result<UnwindResult<Self::UnwindRule>, PeUnwinderError>
where
F: FnMut(u64) -> Result<u64, ()>,
D: core::ops::Deref<Target = [u8]>,
{
Err(PeUnwinderError::Aarch64Unsupported)
}
}

264
third_party/rust/framehop/src/aarch64/unwind_rule.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,264 @@
use super::unwindregs::UnwindRegsAarch64;
use crate::add_signed::checked_add_signed;
use crate::error::Error;
use crate::unwind_rule::UnwindRule;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum UnwindRuleAarch64 {
/// (sp, fp, lr) = (sp, fp, lr)
/// Only possible for the first frame. Subsequent frames must get the
/// return address from somewhere other than the lr register to avoid
/// infinite loops.
NoOp,
/// (sp, fp, lr) = if is_first_frame (sp, fp, lr) else (fp + 16, *fp, *(fp + 8))
/// Used as a fallback rule.
NoOpIfFirstFrameOtherwiseFp,
/// (sp, fp, lr) = (sp + 16x, fp, lr)
/// Only possible for the first frame. Subsequent frames must get the
/// return address from somewhere other than the lr register to avoid
/// infinite loops.
OffsetSp { sp_offset_by_16: u16 },
/// (sp, fp, lr) = (sp + 16x, fp, lr) if is_first_frame
/// This rule reflects an ambiguity in DWARF CFI information. When the
/// return address is "undefined" because it was omitted, it could mean
/// "same value", but this is only allowed for the first frame.
OffsetSpIfFirstFrameOtherwiseStackEndsHere { sp_offset_by_16: u16 },
/// (sp, fp, lr) = (sp + 16x, fp, *(sp + 8y))
OffsetSpAndRestoreLr {
sp_offset_by_16: u16,
lr_storage_offset_from_sp_by_8: i16,
},
/// (sp, fp, lr) = (sp + 16x, *(sp + 8y), *(sp + 8z))
OffsetSpAndRestoreFpAndLr {
sp_offset_by_16: u16,
fp_storage_offset_from_sp_by_8: i16,
lr_storage_offset_from_sp_by_8: i16,
},
/// (sp, fp, lr) = (fp + 16, *fp, *(fp + 8))
UseFramePointer,
/// (sp, fp, lr) = (fp + 8x, *(fp + 8y), *(fp + 8z))
UseFramepointerWithOffsets {
sp_offset_from_fp_by_8: u16,
fp_storage_offset_from_fp_by_8: i16,
lr_storage_offset_from_fp_by_8: i16,
},
}
impl UnwindRule for UnwindRuleAarch64 {
type UnwindRegs = UnwindRegsAarch64;
fn rule_for_stub_functions() -> Self {
UnwindRuleAarch64::NoOp
}
fn rule_for_function_start() -> Self {
UnwindRuleAarch64::NoOp
}
fn fallback_rule() -> Self {
UnwindRuleAarch64::UseFramePointer
}
fn exec<F>(
self,
is_first_frame: bool,
regs: &mut UnwindRegsAarch64,
read_stack: &mut F,
) -> Result<Option<u64>, Error>
where
F: FnMut(u64) -> Result<u64, ()>,
{
let lr = regs.lr();
let sp = regs.sp();
let fp = regs.fp();
let (new_lr, new_sp, new_fp) = match self {
UnwindRuleAarch64::NoOp => {
if !is_first_frame {
return Err(Error::DidNotAdvance);
}
(lr, sp, fp)
}
UnwindRuleAarch64::NoOpIfFirstFrameOtherwiseFp => {
if is_first_frame {
(lr, sp, fp)
} else {
let fp = regs.fp();
let new_sp = fp.checked_add(16).ok_or(Error::IntegerOverflow)?;
let new_lr =
read_stack(fp + 8).map_err(|_| Error::CouldNotReadStack(fp + 8))?;
let new_fp = read_stack(fp).map_err(|_| Error::CouldNotReadStack(fp))?;
if new_sp <= sp {
return Err(Error::FramepointerUnwindingMovedBackwards);
}
(new_lr, new_sp, new_fp)
}
}
UnwindRuleAarch64::OffsetSpIfFirstFrameOtherwiseStackEndsHere { sp_offset_by_16 } => {
if !is_first_frame {
return Ok(None);
}
let sp_offset = u64::from(sp_offset_by_16) * 16;
let new_sp = sp.checked_add(sp_offset).ok_or(Error::IntegerOverflow)?;
(lr, new_sp, fp)
}
UnwindRuleAarch64::OffsetSp { sp_offset_by_16 } => {
if !is_first_frame {
return Err(Error::DidNotAdvance);
}
let sp_offset = u64::from(sp_offset_by_16) * 16;
let new_sp = sp.checked_add(sp_offset).ok_or(Error::IntegerOverflow)?;
(lr, new_sp, fp)
}
UnwindRuleAarch64::OffsetSpAndRestoreLr {
sp_offset_by_16,
lr_storage_offset_from_sp_by_8,
} => {
let sp_offset = u64::from(sp_offset_by_16) * 16;
let new_sp = sp.checked_add(sp_offset).ok_or(Error::IntegerOverflow)?;
let lr_storage_offset = i64::from(lr_storage_offset_from_sp_by_8) * 8;
let lr_location =
checked_add_signed(sp, lr_storage_offset).ok_or(Error::IntegerOverflow)?;
let new_lr =
read_stack(lr_location).map_err(|_| Error::CouldNotReadStack(lr_location))?;
(new_lr, new_sp, fp)
}
UnwindRuleAarch64::OffsetSpAndRestoreFpAndLr {
sp_offset_by_16,
fp_storage_offset_from_sp_by_8,
lr_storage_offset_from_sp_by_8,
} => {
let sp_offset = u64::from(sp_offset_by_16) * 16;
let new_sp = sp.checked_add(sp_offset).ok_or(Error::IntegerOverflow)?;
let lr_storage_offset = i64::from(lr_storage_offset_from_sp_by_8) * 8;
let lr_location =
checked_add_signed(sp, lr_storage_offset).ok_or(Error::IntegerOverflow)?;
let new_lr =
read_stack(lr_location).map_err(|_| Error::CouldNotReadStack(lr_location))?;
let fp_storage_offset = i64::from(fp_storage_offset_from_sp_by_8) * 8;
let fp_location =
checked_add_signed(sp, fp_storage_offset).ok_or(Error::IntegerOverflow)?;
let new_fp =
read_stack(fp_location).map_err(|_| Error::CouldNotReadStack(fp_location))?;
(new_lr, new_sp, new_fp)
}
UnwindRuleAarch64::UseFramePointer => {
// Do a frame pointer stack walk. Frame-based aarch64 functions store the caller's fp and lr
// on the stack and then set fp to the address where the caller's fp is stored.
//
// Function prologue example (this one also stores x19, x20, x21 and x22):
// stp x22, x21, [sp, #-0x30]! ; subtracts 0x30 from sp, and then stores (x22, x21) at sp
// stp x20, x19, [sp, #0x10] ; stores (x20, x19) at sp + 0x10 (== original sp - 0x20)
// stp fp, lr, [sp, #0x20] ; stores (fp, lr) at sp + 0x20 (== original sp - 0x10)
// add fp, sp, #0x20 ; sets fp to the address where the old fp is stored on the stack
//
// Function epilogue:
// ldp fp, lr, [sp, #0x20] ; restores fp and lr from the stack
// ldp x20, x19, [sp, #0x10] ; restores x20 and x19
// ldp x22, x21, [sp], #0x30 ; restores x22 and x21, and then adds 0x30 to sp
// ret ; follows lr to jump back to the caller
//
// Functions are called with bl ("branch with link"); bl puts the return address into the lr register.
// When a function reaches its end, ret reads the return address from lr and jumps to it.
// On aarch64, the stack pointer is always aligned to 16 bytes, and registers are usually written
// to and read from the stack in pairs.
// In frame-based functions, fp and lr are placed next to each other on the stack.
// So when a function is called, we have the following stack layout:
//
// [... rest of the stack]
// ^ sp ^ fp
// bl some_function ; jumps to the function and sets lr = return address
// [... rest of the stack]
// ^ sp ^ fp
// adjust stack ptr, write some registers, and write fp and lr
// [more saved regs] [caller's frame pointer] [return address] [... rest of the stack]
// ^ sp ^ fp
// add fp, sp, #0x20 ; sets fp to where the caller's fp is now stored
// [more saved regs] [caller's frame pointer] [return address] [... rest of the stack]
// ^ sp ^ fp
// <function contents> ; can execute bl and overwrite lr with a new value
// ... [more saved regs] [caller's frame pointer] [return address] [... rest of the stack]
// ^ sp ^ fp
//
// So: *fp is the caller's frame pointer, and *(fp + 8) is the return address.
let fp = regs.fp();
let new_sp = fp.checked_add(16).ok_or(Error::IntegerOverflow)?;
let new_lr = read_stack(fp + 8).map_err(|_| Error::CouldNotReadStack(fp + 8))?;
let new_fp = read_stack(fp).map_err(|_| Error::CouldNotReadStack(fp))?;
if new_fp == 0 {
return Ok(None);
}
if new_fp <= fp || new_sp <= sp {
return Err(Error::FramepointerUnwindingMovedBackwards);
}
(new_lr, new_sp, new_fp)
}
UnwindRuleAarch64::UseFramepointerWithOffsets {
sp_offset_from_fp_by_8,
fp_storage_offset_from_fp_by_8,
lr_storage_offset_from_fp_by_8,
} => {
let sp_offset_from_fp = u64::from(sp_offset_from_fp_by_8) * 8;
let new_sp = fp
.checked_add(sp_offset_from_fp)
.ok_or(Error::IntegerOverflow)?;
let lr_storage_offset = i64::from(lr_storage_offset_from_fp_by_8) * 8;
let lr_location =
checked_add_signed(fp, lr_storage_offset).ok_or(Error::IntegerOverflow)?;
let new_lr =
read_stack(lr_location).map_err(|_| Error::CouldNotReadStack(lr_location))?;
let fp_storage_offset = i64::from(fp_storage_offset_from_fp_by_8) * 8;
let fp_location =
checked_add_signed(fp, fp_storage_offset).ok_or(Error::IntegerOverflow)?;
let new_fp =
read_stack(fp_location).map_err(|_| Error::CouldNotReadStack(fp_location))?;
if new_fp == 0 {
return Ok(None);
}
if new_fp <= fp || new_sp <= sp {
return Err(Error::FramepointerUnwindingMovedBackwards);
}
(new_lr, new_sp, new_fp)
}
};
let return_address = regs.lr_mask().strip_ptr_auth(new_lr);
if return_address == 0 {
return Ok(None);
}
if !is_first_frame && new_sp == sp {
return Err(Error::DidNotAdvance);
}
regs.set_lr(new_lr);
regs.set_sp(new_sp);
regs.set_fp(new_fp);
Ok(Some(return_address))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_basic() {
let stack = [
1, 2, 3, 4, 0x40, 0x100200, 5, 6, 0x70, 0x100100, 7, 8, 9, 10, 0x0, 0x0,
];
let mut read_stack = |addr| Ok(stack[(addr / 8) as usize]);
let mut regs = UnwindRegsAarch64::new(0x100300, 0x10, 0x20);
let res = UnwindRuleAarch64::NoOp.exec(true, &mut regs, &mut read_stack);
assert_eq!(res, Ok(Some(0x100300)));
assert_eq!(regs.sp(), 0x10);
let res = UnwindRuleAarch64::UseFramePointer.exec(false, &mut regs, &mut read_stack);
assert_eq!(res, Ok(Some(0x100200)));
assert_eq!(regs.sp(), 0x30);
assert_eq!(regs.fp(), 0x40);
let res = UnwindRuleAarch64::UseFramePointer.exec(false, &mut regs, &mut read_stack);
assert_eq!(res, Ok(Some(0x100100)));
assert_eq!(regs.sp(), 0x50);
assert_eq!(regs.fp(), 0x70);
let res = UnwindRuleAarch64::UseFramePointer.exec(false, &mut regs, &mut read_stack);
assert_eq!(res, Ok(None));
}
}

66
third_party/rust/framehop/src/aarch64/unwinder.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,66 @@
use core::ops::Deref;
use crate::{
unwinder::UnwinderInternal, AllocationPolicy, Error, FrameAddress, MayAllocateDuringUnwind,
Module, Unwinder,
};
use super::{ArchAarch64, CacheAarch64, UnwindRegsAarch64};
/// The unwinder for the Aarch64 CPU architecture. Use the [`Unwinder`] trait for unwinding.
///
/// Type arguments:
///
/// - `D`: The type for unwind section data in the modules. See [`Module`].
/// - `P`: The [`AllocationPolicy`].
pub struct UnwinderAarch64<D, P = MayAllocateDuringUnwind>(UnwinderInternal<D, ArchAarch64, P>);
impl<D, P> Default for UnwinderAarch64<D, P> {
fn default() -> Self {
Self::new()
}
}
impl<D, P> Clone for UnwinderAarch64<D, P> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<D, P> UnwinderAarch64<D, P> {
/// Create an unwinder for a process.
pub fn new() -> Self {
Self(UnwinderInternal::new())
}
}
impl<D: Deref<Target = [u8]>, P: AllocationPolicy> Unwinder for UnwinderAarch64<D, P> {
type UnwindRegs = UnwindRegsAarch64;
type Cache = CacheAarch64<P>;
type Module = Module<D>;
fn add_module(&mut self, module: Module<D>) {
self.0.add_module(module);
}
fn remove_module(&mut self, module_address_range_start: u64) {
self.0.remove_module(module_address_range_start);
}
fn max_known_code_address(&self) -> u64 {
self.0.max_known_code_address()
}
fn unwind_frame<F>(
&self,
address: FrameAddress,
regs: &mut UnwindRegsAarch64,
cache: &mut CacheAarch64<P>,
read_stack: &mut F,
) -> Result<Option<u64>, Error>
where
F: FnMut(u64) -> Result<u64, ()>,
{
self.0.unwind_frame(address, regs, &mut cache.0, read_stack)
}
}

182
third_party/rust/framehop/src/aarch64/unwindregs.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,182 @@
use core::fmt::Debug;
use crate::display_utils::HexNum;
/// The registers used for unwinding on Aarch64. We only need lr (x30), sp (x31),
/// and fp (x29).
///
/// We also have a [`PtrAuthMask`] which allows stripping off the pointer authentication
/// hash bits from the return address when unwinding through libraries which use pointer
/// authentication, e.g. in system libraries on macOS.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct UnwindRegsAarch64 {
lr_mask: PtrAuthMask,
lr: u64,
sp: u64,
fp: u64,
}
/// Aarch64 CPUs support special instructions which interpret pointers as pair
/// of the pointer address and an encrypted hash: The address is stored in the
/// lower bits and the hash in the high bits. These are called "authenticated"
/// pointers. Special instructions exist to verify pointers before dereferencing
/// them.
///
/// Return address can be such authenticated pointers. To return to an
/// authenticated return address, the "retab" instruction is used instead of
/// the regular "ret" instruction.
///
/// Stack walkers need to strip the encrypted hash from return addresses because
/// they need the raw code address.
///
/// On macOS arm64, system libraries compiled with the arm64e target use pointer
/// pointer authentication for return addresses.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct PtrAuthMask(pub u64);
impl PtrAuthMask {
/// Create a no-op mask which treats all bits of the pointer as address bits,
/// so no bits are stripped.
pub fn new_no_strip() -> Self {
Self(u64::MAX)
}
/// Create a mask for 24 bits hash + 40 bits pointer. This appears to be
/// what macOS arm64e uses. It is unclear whether we can rely on this or
/// whether it can change.
///
/// On macOS arm64, this mask can be applied to both authenticated pointers
/// and to non-authenticated pointers without data loss; non-authenticated
/// don't appear to use the top 24 bits (they're always zero).
pub fn new_24_40() -> Self {
Self(u64::MAX >> 24)
}
/// Deduce a mask based on the highest known address. The leading zero bits
/// in this address will be reserved for the hash.
pub fn from_max_known_address(address: u64) -> Self {
Self(u64::MAX >> address.leading_zeros())
}
/// Apply the mask to the given pointer.
#[inline(always)]
pub fn strip_ptr_auth(&self, ptr: u64) -> u64 {
ptr & self.0
}
}
impl UnwindRegsAarch64 {
/// Create a set of unwind register values and do not apply any pointer
/// authentication stripping.
pub fn new(lr: u64, sp: u64, fp: u64) -> Self {
Self {
lr_mask: PtrAuthMask::new_no_strip(),
lr,
sp,
fp,
}
}
/// Create a set of unwind register values with the given mask for return
/// address pointer authentication stripping.
pub fn new_with_ptr_auth_mask(
code_ptr_auth_mask: PtrAuthMask,
lr: u64,
sp: u64,
fp: u64,
) -> Self {
Self {
lr_mask: code_ptr_auth_mask,
lr: code_ptr_auth_mask.strip_ptr_auth(lr),
sp,
fp,
}
}
/// Get the [`PtrAuthMask`] which we apply to the `lr` value.
#[inline(always)]
pub fn lr_mask(&self) -> PtrAuthMask {
self.lr_mask
}
/// Get the stack pointer value.
#[inline(always)]
pub fn sp(&self) -> u64 {
self.sp
}
/// Set the stack pointer value.
#[inline(always)]
pub fn set_sp(&mut self, sp: u64) {
self.sp = sp
}
/// Get the frame pointer value (x29).
#[inline(always)]
pub fn fp(&self) -> u64 {
self.fp
}
/// Set the frame pointer value (x29).
#[inline(always)]
pub fn set_fp(&mut self, fp: u64) {
self.fp = fp
}
/// Get the lr register value.
#[inline(always)]
pub fn lr(&self) -> u64 {
self.lr
}
/// Set the lr register value.
#[inline(always)]
pub fn set_lr(&mut self, lr: u64) {
self.lr = self.lr_mask.strip_ptr_auth(lr)
}
}
impl Debug for UnwindRegsAarch64 {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("UnwindRegsAarch64")
.field("lr", &HexNum(self.lr))
.field("sp", &HexNum(self.sp))
.field("fp", &HexNum(self.fp))
.finish()
}
}
#[cfg(test)]
mod test {
use crate::aarch64::PtrAuthMask;
#[test]
fn test() {
assert_eq!(PtrAuthMask::new_24_40().0, u64::MAX >> 24);
assert_eq!(PtrAuthMask::new_24_40().0, (1 << 40) - 1);
assert_eq!(
PtrAuthMask::from_max_known_address(0x0000aaaab54f7000).0,
0x0000ffffffffffff
);
assert_eq!(
PtrAuthMask::from_max_known_address(0x0000ffffa3206000).0,
0x0000ffffffffffff
);
assert_eq!(
PtrAuthMask::from_max_known_address(0xffffffffc05a9000).0,
0xffffffffffffffff
);
assert_eq!(
PtrAuthMask::from_max_known_address(0x000055ba9f07e000).0,
0x00007fffffffffff
);
assert_eq!(
PtrAuthMask::from_max_known_address(0x00007f76b8019000).0,
0x00007fffffffffff
);
assert_eq!(
PtrAuthMask::from_max_known_address(0x000000022a3ccff7).0,
0x00000003ffffffff
);
}
}

99
third_party/rust/framehop/src/add_signed.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,99 @@
/// Add a signed integer to this unsigned integer, with wrapping.
#[allow(unused)]
pub fn wrapping_add_signed<T: AddSigned>(lhs: T, rhs: T::Signed) -> T {
lhs.wrapping_add_signed(rhs)
}
/// Add a signed integer to this unsigned integer, but only if doing so
/// does not cause underflow / overflow.
pub fn checked_add_signed<T: AddSigned>(lhs: T, rhs: T::Signed) -> Option<T> {
lhs.checked_add_signed(rhs)
}
/// A trait which adds method to unsigned integers which allow checked and
/// wrapping addition of the corresponding signed integer type.
/// Unfortunately, these methods conflict with the proposed standard rust
/// methods, so this trait isn't actually usable without risking build
/// errors once these methods are stabilized.
/// https://github.com/rust-lang/rust/issues/87840
pub trait AddSigned: Sized {
type Signed;
/// Add a signed integer to this unsigned integer, with wrapping.
fn wrapping_add_signed(self, rhs: Self::Signed) -> Self;
/// Add a signed integer to this unsigned integer, but only if doing so
/// does not cause underflow / overflow.
fn checked_add_signed(self, rhs: Self::Signed) -> Option<Self>;
}
impl AddSigned for u64 {
type Signed = i64;
fn wrapping_add_signed(self, rhs: i64) -> u64 {
self.wrapping_add(rhs as u64)
}
fn checked_add_signed(self, rhs: i64) -> Option<u64> {
let res = AddSigned::wrapping_add_signed(self, rhs);
if (rhs >= 0 && res >= self) || (rhs < 0 && res < self) {
Some(res)
} else {
None
}
}
}
impl AddSigned for u32 {
type Signed = i32;
fn wrapping_add_signed(self, rhs: i32) -> u32 {
self.wrapping_add(rhs as u32)
}
fn checked_add_signed(self, rhs: i32) -> Option<u32> {
let res = AddSigned::wrapping_add_signed(self, rhs);
if (rhs >= 0 && res >= self) || (rhs < 0 && res < self) {
Some(res)
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::{checked_add_signed, wrapping_add_signed};
#[test]
fn test_wrapping() {
assert_eq!(wrapping_add_signed(1, 2), 3u64);
assert_eq!(wrapping_add_signed(2, 1), 3u64);
assert_eq!(wrapping_add_signed(5, -4), 1u64);
assert_eq!(wrapping_add_signed(5, -5), 0u64);
assert_eq!(wrapping_add_signed(u64::MAX - 5, 3), u64::MAX - 2);
assert_eq!(wrapping_add_signed(u64::MAX - 5, 5), u64::MAX);
assert_eq!(wrapping_add_signed(u64::MAX - 5, -5), u64::MAX - 10);
assert_eq!(wrapping_add_signed(1, -2), u64::MAX);
assert_eq!(wrapping_add_signed(2, -4), u64::MAX - 1);
assert_eq!(wrapping_add_signed(u64::MAX, 1), 0);
assert_eq!(wrapping_add_signed(u64::MAX - 5, 6), 0);
assert_eq!(wrapping_add_signed(u64::MAX - 5, 9), 3);
}
#[test]
fn test_checked() {
assert_eq!(checked_add_signed(1, 2), Some(3u64));
assert_eq!(checked_add_signed(2, 1), Some(3u64));
assert_eq!(checked_add_signed(5, -4), Some(1u64));
assert_eq!(checked_add_signed(5, -5), Some(0u64));
assert_eq!(checked_add_signed(u64::MAX - 5, 3), Some(u64::MAX - 2));
assert_eq!(checked_add_signed(u64::MAX - 5, 5), Some(u64::MAX));
assert_eq!(checked_add_signed(u64::MAX - 5, -5), Some(u64::MAX - 10));
assert_eq!(checked_add_signed(1u64, -2), None);
assert_eq!(checked_add_signed(2u64, -4), None);
assert_eq!(checked_add_signed(u64::MAX, 1), None);
assert_eq!(checked_add_signed(u64::MAX - 5, 6), None);
assert_eq!(checked_add_signed(u64::MAX - 5, 9), None);
}
}

6
third_party/rust/framehop/src/arch.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,6 @@
use crate::unwind_rule::UnwindRule;
pub trait Arch {
type UnwindRegs;
type UnwindRule: UnwindRule<UnwindRegs = Self::UnwindRegs>;
}

81
third_party/rust/framehop/src/cache.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,81 @@
use alloc::boxed::Box;
use crate::{rule_cache::RuleCache, unwind_rule::UnwindRule};
pub use crate::rule_cache::CacheStats;
/// A trait which lets you opt into allocation-free unwinding. The two implementations of
/// this trait are [`MustNotAllocateDuringUnwind`] and [`MayAllocateDuringUnwind`].
pub trait AllocationPolicy {
type GimliUnwindContextStorage<R: gimli::ReaderOffset>: gimli::UnwindContextStorage<R>;
type GimliEvaluationStorage<R: gimli::Reader>: gimli::EvaluationStorage<R>;
}
/// Require allocation-free unwinding. This is one of the two [`AllocationPolicy`]
/// implementations.
///
/// Using this means that the unwinder cache takes up more memory, because it preallocates
/// space for DWARF CFI unwind table row evaluation and for DWARF CFI expression evaluation.
/// And because those preallocations are of a fixed size, it is possible that this fixed
/// size is not large enough for certain DWARF unwinding tasks.
pub struct MustNotAllocateDuringUnwind;
/// This is only used in the implementation of [MustNotAllocateDuringUnwind] and
/// is not intended to be used by the outside world.
#[doc(hidden)]
pub struct StoreOnStack;
impl<RO: gimli::ReaderOffset> gimli::UnwindContextStorage<RO> for StoreOnStack {
type Rules = [(gimli::Register, gimli::RegisterRule<RO>); 192];
type Stack = [gimli::UnwindTableRow<RO, Self>; 4];
}
impl<R: gimli::Reader> gimli::EvaluationStorage<R> for StoreOnStack {
type Stack = [gimli::Value; 64];
type ExpressionStack = [(R, R); 4];
type Result = [gimli::Piece<R>; 1];
}
impl AllocationPolicy for MustNotAllocateDuringUnwind {
type GimliUnwindContextStorage<R: gimli::ReaderOffset> = StoreOnStack;
type GimliEvaluationStorage<R: gimli::Reader> = StoreOnStack;
}
/// Allow allocation during unwinding. This is one of the two [`AllocationPolicy`]
/// implementations.
///
/// This is the preferred policy because it saves memory and places no limitations on
/// DWARF CFI evaluation.
pub struct MayAllocateDuringUnwind;
impl AllocationPolicy for MayAllocateDuringUnwind {
type GimliUnwindContextStorage<R: gimli::ReaderOffset> = gimli::StoreOnHeap;
type GimliEvaluationStorage<R: gimli::Reader> = gimli::StoreOnHeap;
}
/// The unwinder cache. This needs to be created upfront before unwinding. During
/// unwinding, the unwinder needs exclusive access to this cache.
///
/// A single unwinder cache can be used with multiple unwinders alternatingly.
///
/// The cache stores unwind rules for addresses it has seen before, and it stores the
/// unwind context which gimli needs for DWARF CFI evaluation.
pub struct Cache<R: UnwindRule, P: AllocationPolicy = MayAllocateDuringUnwind> {
pub(crate) gimli_unwind_context:
Box<gimli::UnwindContext<usize, P::GimliUnwindContextStorage<usize>>>,
pub(crate) rule_cache: RuleCache<R>,
}
impl<R: UnwindRule, P: AllocationPolicy> Cache<R, P> {
pub fn new() -> Self {
Self {
gimli_unwind_context: Box::new(gimli::UnwindContext::new_in()),
rule_cache: RuleCache::new(),
}
}
}
impl<R: UnwindRule, P: AllocationPolicy> Default for Cache<R, P> {
fn default() -> Self {
Self::new()
}
}

75
third_party/rust/framehop/src/code_address.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,75 @@
use core::num::NonZeroU64;
/// An absolute code address for a stack frame. Can either be taken directly from the
/// instruction pointer ("program counter"), or from a return address.
///
/// These addresses are "AVMAs", i.e. Actual Virtual Memory Addresses, i.e. addresses
/// in the virtual memory of the profiled process.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum FrameAddress {
/// This address is the instruction pointer / program counter. This is what unwinding
/// starts with.
InstructionPointer(u64),
/// This is a return address, i.e. the address to which the CPU will jump to when
/// returning from a function. This is the address of the instruction *after* the
/// call instruction.
///
/// Unwinding produces a list of return addresses.
ReturnAddress(NonZeroU64),
}
impl FrameAddress {
/// Create a [`FrameAddress::InstructionPointer`].
pub fn from_instruction_pointer(ip: u64) -> Self {
FrameAddress::InstructionPointer(ip)
}
/// Create a [`FrameAddress::ReturnAddress`]. This returns `None` if the given
/// address is zero.
pub fn from_return_address(return_address: u64) -> Option<Self> {
Some(FrameAddress::ReturnAddress(NonZeroU64::new(
return_address,
)?))
}
/// The raw address (AVMA).
pub fn address(self) -> u64 {
match self {
FrameAddress::InstructionPointer(address) => address,
FrameAddress::ReturnAddress(address) => address.into(),
}
}
/// The address (AVMA) that should be used for lookup.
///
/// If this address is taken directly from the instruction pointer, then the lookup
/// address is just the raw address.
///
/// If this address is a return address, then the lookup address is that address **minus
/// one byte**. This adjusted address will point inside the call instruction. This
/// subtraction of one byte is needed if you want to look up unwind information or
/// debug information, because you usually want the information for the call, not for
/// the next instruction after the call.
///
/// Furthermore, this distinction matters if a function calls a noreturn function as
/// the last thing it does: If the call is the final instruction of the function, then
/// the return address will point *after* the function, into the *next* function.
/// If, during unwinding, you look up unwind information for that next function, you'd
/// get incorrect unwinding.
/// This has been observed in practice with `+[NSThread exit]`.
pub fn address_for_lookup(self) -> u64 {
match self {
FrameAddress::InstructionPointer(address) => address,
FrameAddress::ReturnAddress(address) => u64::from(address) - 1,
}
}
/// Returns whether this address is a return address.
pub fn is_return_address(self) -> bool {
match self {
FrameAddress::InstructionPointer(_) => false,
FrameAddress::ReturnAddress(_) => true,
}
}
}

17
third_party/rust/framehop/src/display_utils.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,17 @@
use core::fmt::{Binary, Debug, LowerHex};
pub struct HexNum<N: LowerHex>(pub N);
impl<N: LowerHex> Debug for HexNum<N> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
LowerHex::fmt(&self.0, f)
}
}
pub struct BinNum<N: Binary>(pub N);
impl<N: Binary> Debug for BinNum<N> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
Binary::fmt(&self.0, f)
}
}

478
third_party/rust/framehop/src/dwarf.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,478 @@
use core::marker::PhantomData;
use alloc::vec::Vec;
use gimli::{
CfaRule, CieOrFde, DebugFrame, EhFrame, EhFrameHdr, Encoding, EndianSlice, Evaluation,
EvaluationResult, EvaluationStorage, Expression, LittleEndian, Location, ParsedEhFrameHdr,
Reader, ReaderOffset, Register, RegisterRule, UnwindContext, UnwindContextStorage,
UnwindOffset, UnwindSection, UnwindTableRow, Value,
};
pub(crate) use gimli::BaseAddresses;
use crate::{arch::Arch, unwind_result::UnwindResult, ModuleSectionInfo};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DwarfUnwinderError {
FdeFromOffsetFailed(gimli::Error),
UnwindInfoForAddressFailed(gimli::Error),
StackPointerMovedBackwards,
DidNotAdvance,
CouldNotRecoverCfa,
CouldNotRecoverReturnAddress,
CouldNotRecoverFramePointer,
}
impl core::fmt::Display for DwarfUnwinderError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::FdeFromOffsetFailed(err) => {
write!(f, "Could not get the FDE for the supplied offset: {err}")
}
Self::UnwindInfoForAddressFailed(err) => write!(
f,
"Could not find DWARF unwind info for the requested address: {err}"
),
Self::StackPointerMovedBackwards => write!(f, "Stack pointer moved backwards"),
Self::DidNotAdvance => write!(f, "Did not advance"),
Self::CouldNotRecoverCfa => write!(f, "Could not recover the CFA"),
Self::CouldNotRecoverReturnAddress => write!(f, "Could not recover the return address"),
Self::CouldNotRecoverFramePointer => write!(f, "Could not recover the frame pointer"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for DwarfUnwinderError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::FdeFromOffsetFailed(e) => Some(e),
Self::UnwindInfoForAddressFailed(e) => Some(e),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum ConversionError {
CfaIsExpression,
CfaIsOffsetFromUnknownRegister,
ReturnAddressRuleWithUnexpectedOffset,
ReturnAddressRuleWasWeird,
SpOffsetDoesNotFit,
RegisterNotStoredRelativeToCfa,
RestoringFpButNotLr,
LrStorageOffsetDoesNotFit,
FpStorageOffsetDoesNotFit,
SpOffsetFromFpDoesNotFit,
FramePointerRuleDoesNotRestoreLr,
FramePointerRuleDoesNotRestoreFp,
FramePointerRuleDoesNotRestoreBp,
FramePointerRuleHasStrangeBpOffset,
}
pub trait DwarfUnwinding: Arch {
fn unwind_frame<F, R, UCS, ES>(
section: &impl UnwindSection<R>,
unwind_info: &UnwindTableRow<R::Offset, UCS>,
encoding: Encoding,
regs: &mut Self::UnwindRegs,
is_first_frame: bool,
read_stack: &mut F,
) -> Result<UnwindResult<Self::UnwindRule>, DwarfUnwinderError>
where
F: FnMut(u64) -> Result<u64, ()>,
R: Reader,
UCS: UnwindContextStorage<R::Offset>,
ES: EvaluationStorage<R>;
fn rule_if_uncovered_by_fde() -> Self::UnwindRule;
}
pub enum UnwindSectionType {
EhFrame,
DebugFrame,
}
pub struct DwarfUnwinder<'a, R, A, UCS>
where
R: Reader,
A: DwarfUnwinding,
UCS: UnwindContextStorage<R::Offset>,
{
unwind_section_data: R,
unwind_section_type: UnwindSectionType,
eh_frame_hdr: Option<ParsedEhFrameHdr<EndianSlice<'a, R::Endian>>>,
unwind_context: &'a mut UnwindContext<R::Offset, UCS>,
base_svma: u64,
bases: BaseAddresses,
_arch: PhantomData<A>,
}
impl<'a, R, A, UCS> DwarfUnwinder<'a, R, A, UCS>
where
R: Reader,
A: DwarfUnwinding,
UCS: UnwindContextStorage<R::Offset>,
{
pub fn new(
unwind_section_data: R,
unwind_section_type: UnwindSectionType,
eh_frame_hdr_data: Option<&'a [u8]>,
unwind_context: &'a mut UnwindContext<R::Offset, UCS>,
bases: BaseAddresses,
base_svma: u64,
) -> Self {
let eh_frame_hdr = match eh_frame_hdr_data {
Some(eh_frame_hdr_data) => {
let hdr = EhFrameHdr::new(eh_frame_hdr_data, unwind_section_data.endian());
match hdr.parse(&bases, 8) {
Ok(hdr) => Some(hdr),
Err(_) => None,
}
}
None => None,
};
Self {
unwind_section_data,
unwind_section_type,
eh_frame_hdr,
unwind_context,
bases,
base_svma,
_arch: PhantomData,
}
}
pub fn get_fde_offset_for_relative_address(&self, rel_lookup_address: u32) -> Option<u32> {
let lookup_svma = self.base_svma + rel_lookup_address as u64;
let eh_frame_hdr = self.eh_frame_hdr.as_ref()?;
let table = eh_frame_hdr.table()?;
let fde_ptr = table.lookup(lookup_svma, &self.bases).ok()?;
let fde_offset = table.pointer_to_offset(fde_ptr).ok()?;
fde_offset.0.into_u64().try_into().ok()
}
pub fn unwind_frame_with_fde<F, ES>(
&mut self,
regs: &mut A::UnwindRegs,
is_first_frame: bool,
rel_lookup_address: u32,
fde_offset: u32,
read_stack: &mut F,
) -> Result<UnwindResult<A::UnwindRule>, DwarfUnwinderError>
where
F: FnMut(u64) -> Result<u64, ()>,
ES: EvaluationStorage<R>,
{
let lookup_svma = self.base_svma + rel_lookup_address as u64;
let unwind_section_data = self.unwind_section_data.clone();
match self.unwind_section_type {
UnwindSectionType::EhFrame => {
let mut eh_frame = EhFrame::from(unwind_section_data);
eh_frame.set_address_size(8);
let unwind_info = self.unwind_info_for_fde(&eh_frame, lookup_svma, fde_offset);
if let Err(DwarfUnwinderError::UnwindInfoForAddressFailed(_)) = unwind_info {
return Ok(UnwindResult::ExecRule(A::rule_if_uncovered_by_fde()));
}
let (unwind_info, encoding) = unwind_info?;
A::unwind_frame::<F, R, UCS, ES>(
&eh_frame,
unwind_info,
encoding,
regs,
is_first_frame,
read_stack,
)
}
UnwindSectionType::DebugFrame => {
let mut debug_frame = DebugFrame::from(unwind_section_data);
debug_frame.set_address_size(8);
let unwind_info = self.unwind_info_for_fde(&debug_frame, lookup_svma, fde_offset);
if let Err(DwarfUnwinderError::UnwindInfoForAddressFailed(_)) = unwind_info {
return Ok(UnwindResult::ExecRule(A::rule_if_uncovered_by_fde()));
}
let (unwind_info, encoding) = unwind_info?;
A::unwind_frame::<F, R, UCS, ES>(
&debug_frame,
unwind_info,
encoding,
regs,
is_first_frame,
read_stack,
)
}
}
}
fn unwind_info_for_fde<US: UnwindSection<R>>(
&mut self,
unwind_section: &US,
lookup_svma: u64,
fde_offset: u32,
) -> Result<(&UnwindTableRow<R::Offset, UCS>, Encoding), DwarfUnwinderError> {
let fde = unwind_section.fde_from_offset(
&self.bases,
US::Offset::from(R::Offset::from_u32(fde_offset)),
US::cie_from_offset,
);
let fde = fde.map_err(DwarfUnwinderError::FdeFromOffsetFailed)?;
let encoding = fde.cie().encoding();
let unwind_info: &UnwindTableRow<_, _> = fde
.unwind_info_for_address(
unwind_section,
&self.bases,
self.unwind_context,
lookup_svma,
)
.map_err(DwarfUnwinderError::UnwindInfoForAddressFailed)?;
Ok((unwind_info, encoding))
}
}
pub(crate) fn base_addresses_for_sections<D>(
section_info: &mut impl ModuleSectionInfo<D>,
) -> BaseAddresses {
let mut start_addr = |names: &[&[u8]]| -> u64 {
names
.iter()
.find_map(|name| section_info.section_svma_range(name))
.map(|r| r.start)
.unwrap_or_default()
};
BaseAddresses::default()
.set_eh_frame(start_addr(&[b"__eh_frame", b".eh_frame"]))
.set_eh_frame_hdr(start_addr(&[b"__eh_frame_hdr", b".eh_frame_hdr"]))
.set_text(start_addr(&[b"__text", b".text"]))
.set_got(start_addr(&[b"__got", b".got"]))
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DwarfCfiIndexError {
Gimli(gimli::Error),
CouldNotSubtractBaseAddress,
RelativeAddressTooBig,
FdeOffsetTooBig,
}
impl core::fmt::Display for DwarfCfiIndexError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Gimli(e) => write!(f, "EhFrame processing failed: {e}"),
Self::CouldNotSubtractBaseAddress => {
write!(f, "Could not subtract base address to create relative pc")
}
Self::RelativeAddressTooBig => write!(f, "Relative address did not fit into u32"),
Self::FdeOffsetTooBig => write!(f, "FDE offset did not fit into u32"),
}
}
}
impl From<gimli::Error> for DwarfCfiIndexError {
fn from(e: gimli::Error) -> Self {
Self::Gimli(e)
}
}
#[cfg(feature = "std")]
impl std::error::Error for DwarfCfiIndexError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Gimli(e) => Some(e),
_ => None,
}
}
}
/// A binary search table for eh_frame FDEs. We generate this whenever a module
/// without eh_frame_hdr is added.
pub struct DwarfCfiIndex {
/// Contains the initial address for every FDE, relative to the base address.
/// This vector is sorted so that it can be used for binary search.
/// It has the same length as `fde_offsets`.
sorted_fde_pc_starts: Vec<u32>,
/// Contains the FDE offset for every FDE. The FDE at offset `fde_offsets[i]`
/// has a PC range which starts at `sorted_fde_pc_starts[i]`.
fde_offsets: Vec<u32>,
}
impl DwarfCfiIndex {
pub fn try_new<R, US>(
unwind_section: US,
bases: BaseAddresses,
base_svma: u64,
) -> Result<Self, DwarfCfiIndexError>
where
R: Reader,
R::Offset: TryInto<u32>,
US: UnwindSection<R>,
{
let mut fde_pc_and_offset = Vec::new();
let mut cur_cie = None;
let mut entries_iter = unwind_section.entries(&bases);
while let Some(entry) = entries_iter.next()? {
let fde = match entry {
CieOrFde::Cie(cie) => {
cur_cie = Some(cie);
continue;
}
CieOrFde::Fde(partial_fde) => {
partial_fde.parse(|unwind_section, bases, cie_offset| {
if let Some(cie) = &cur_cie {
if cie.offset()
== <US::Offset as UnwindOffset<R::Offset>>::into(cie_offset)
{
return Ok(cie.clone());
}
}
let cie = unwind_section.cie_from_offset(bases, cie_offset);
if let Ok(cie) = &cie {
cur_cie = Some(cie.clone());
}
cie
})?
}
};
let pc = fde.initial_address();
let relative_pc = pc
.checked_sub(base_svma)
.ok_or(DwarfCfiIndexError::CouldNotSubtractBaseAddress)?;
let relative_pc = u32::try_from(relative_pc)
.map_err(|_| DwarfCfiIndexError::RelativeAddressTooBig)?;
let fde_offset = <R::Offset as TryInto<u32>>::try_into(fde.offset())
.map_err(|_| DwarfCfiIndexError::FdeOffsetTooBig)?;
fde_pc_and_offset.push((relative_pc, fde_offset));
}
fde_pc_and_offset.sort_by_key(|(pc, _)| *pc);
let sorted_fde_pc_starts = fde_pc_and_offset.iter().map(|(pc, _)| *pc).collect();
let fde_offsets = fde_pc_and_offset.into_iter().map(|(_, fde)| fde).collect();
Ok(Self {
sorted_fde_pc_starts,
fde_offsets,
})
}
pub fn try_new_eh_frame<D>(
eh_frame_data: &[u8],
section_info: &mut impl ModuleSectionInfo<D>,
) -> Result<Self, DwarfCfiIndexError> {
let bases = base_addresses_for_sections(section_info);
let mut eh_frame = EhFrame::from(EndianSlice::new(eh_frame_data, LittleEndian));
eh_frame.set_address_size(8);
Self::try_new(eh_frame, bases, section_info.base_svma())
}
pub fn try_new_debug_frame<D>(
debug_frame_data: &[u8],
section_info: &mut impl ModuleSectionInfo<D>,
) -> Result<Self, DwarfCfiIndexError> {
let bases = base_addresses_for_sections(section_info);
let mut debug_frame = DebugFrame::from(EndianSlice::new(debug_frame_data, LittleEndian));
debug_frame.set_address_size(8);
Self::try_new(debug_frame, bases, section_info.base_svma())
}
pub fn fde_offset_for_relative_address(&self, rel_lookup_address: u32) -> Option<u32> {
let i = match self.sorted_fde_pc_starts.binary_search(&rel_lookup_address) {
Err(0) => return None,
Ok(i) => i,
Err(i) => i - 1,
};
Some(self.fde_offsets[i])
}
}
pub trait DwarfUnwindRegs {
fn get(&self, register: Register) -> Option<u64>;
}
pub fn eval_cfa_rule<R: Reader, UR: DwarfUnwindRegs, S: EvaluationStorage<R>>(
section: &impl UnwindSection<R>,
rule: &CfaRule<R::Offset>,
encoding: Encoding,
regs: &UR,
) -> Option<u64> {
match rule {
CfaRule::RegisterAndOffset { register, offset } => {
let val = regs.get(*register)?;
u64::try_from(i64::try_from(val).ok()?.checked_add(*offset)?).ok()
}
CfaRule::Expression(expr) => {
let expr = expr.get(section).ok()?;
eval_expr::<R, UR, S>(expr, encoding, regs)
}
}
}
fn eval_expr<R: Reader, UR: DwarfUnwindRegs, S: EvaluationStorage<R>>(
expr: Expression<R>,
encoding: Encoding,
regs: &UR,
) -> Option<u64> {
let mut eval = Evaluation::<R, S>::new_in(expr.0, encoding);
let mut result = eval.evaluate().ok()?;
loop {
match result {
EvaluationResult::Complete => break,
EvaluationResult::RequiresRegister { register, .. } => {
let value = regs.get(register)?;
result = eval.resume_with_register(Value::Generic(value as _)).ok()?;
}
_ => return None,
}
}
let x = &eval.as_result().last()?.location;
if let Location::Address { address } = x {
Some(*address)
} else {
None
}
}
pub fn eval_register_rule<R, F, UR, S>(
section: &impl UnwindSection<R>,
rule: RegisterRule<R::Offset>,
cfa: u64,
encoding: Encoding,
val: u64,
regs: &UR,
read_stack: &mut F,
) -> Option<u64>
where
R: Reader,
F: FnMut(u64) -> Result<u64, ()>,
UR: DwarfUnwindRegs,
S: EvaluationStorage<R>,
{
match rule {
RegisterRule::Undefined => None,
RegisterRule::SameValue => Some(val),
RegisterRule::Offset(offset) => {
let cfa_plus_offset =
u64::try_from(i64::try_from(cfa).ok()?.checked_add(offset)?).ok()?;
read_stack(cfa_plus_offset).ok()
}
RegisterRule::ValOffset(offset) => {
u64::try_from(i64::try_from(cfa).ok()?.checked_add(offset)?).ok()
}
RegisterRule::Register(register) => regs.get(register),
RegisterRule::Expression(expr) => {
let expr = expr.get(section).ok()?;
let val = eval_expr::<R, UR, S>(expr, encoding, regs)?;
read_stack(val).ok()
}
RegisterRule::ValExpression(expr) => {
let expr = expr.get(section).ok()?;
eval_expr::<R, UR, S>(expr, encoding, regs)
}
RegisterRule::Architectural => {
// Unimplemented
// TODO: Find out what the architectural rules for x86_64 and for aarch64 are, if any.
None
}
_ => None,
}
}

116
third_party/rust/framehop/src/error.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,116 @@
use crate::dwarf::DwarfUnwinderError;
#[cfg(feature = "macho")]
use crate::macho::CompactUnwindInfoUnwinderError;
#[cfg(feature = "pe")]
use crate::pe::PeUnwinderError;
/// The error type used in this crate.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Error {
CouldNotReadStack(u64),
FramepointerUnwindingMovedBackwards,
DidNotAdvance,
IntegerOverflow,
ReturnAddressIsNull,
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::CouldNotReadStack(addr) => write!(f, "Could not read stack memory at 0x{addr:x}"),
Self::FramepointerUnwindingMovedBackwards => {
write!(f, "Frame pointer unwinding moved backwards")
}
Self::DidNotAdvance => write!(
f,
"Neither the code address nor the stack pointer changed, would loop"
),
Self::IntegerOverflow => write!(f, "Unwinding caused integer overflow"),
Self::ReturnAddressIsNull => write!(f, "Return address is null"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for Error {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UnwinderError {
#[cfg(feature = "macho")]
CompactUnwindInfo(CompactUnwindInfoUnwinderError),
Dwarf(DwarfUnwinderError),
#[cfg(feature = "pe")]
Pe(PeUnwinderError),
#[cfg(feature = "macho")]
NoDwarfData,
NoModuleUnwindData,
EhFrameHdrCouldNotFindAddress,
DwarfCfiIndexCouldNotFindAddress,
}
impl core::fmt::Display for UnwinderError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
#[cfg(feature = "macho")]
Self::CompactUnwindInfo(err) => {
write!(f, "Compact Unwind Info unwinding failed: {err}")
}
Self::Dwarf(err) => write!(f, "DWARF unwinding failed: {err}"),
#[cfg(feature = "pe")]
Self::Pe(err) => write!(f, "PE unwinding failed: {err}"),
#[cfg(feature = "macho")]
Self::NoDwarfData => write!(
f,
"__unwind_info referred to DWARF FDE but we do not have __eh_frame data"
),
Self::NoModuleUnwindData => {
write!(f, "No unwind data for the module containing the address")
}
Self::EhFrameHdrCouldNotFindAddress => write!(
f,
".eh_frame_hdr was not successful in looking up the address in the table"
),
Self::DwarfCfiIndexCouldNotFindAddress => write!(
f,
"Failed to look up the address in the DwarfCfiIndex search table"
),
}
}
}
impl From<DwarfUnwinderError> for UnwinderError {
fn from(e: DwarfUnwinderError) -> Self {
Self::Dwarf(e)
}
}
#[cfg(feature = "pe")]
impl From<PeUnwinderError> for UnwinderError {
fn from(e: PeUnwinderError) -> Self {
Self::Pe(e)
}
}
#[cfg(feature = "macho")]
impl From<CompactUnwindInfoUnwinderError> for UnwinderError {
fn from(e: CompactUnwindInfoUnwinderError) -> Self {
match e {
CompactUnwindInfoUnwinderError::BadDwarfUnwinding(e) => UnwinderError::Dwarf(e),
e => UnwinderError::CompactUnwindInfo(e),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for UnwinderError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
#[cfg(feature = "macho")]
Self::CompactUnwindInfo(e) => Some(e),
Self::Dwarf(e) => Some(e),
#[cfg(feature = "pe")]
Self::Pe(e) => Some(e),
_ => None,
}
}
}

20
third_party/rust/framehop/src/instruction_analysis.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
use crate::arch::Arch;
pub trait InstructionAnalysis: Arch {
/// Caller guarantees pc_offset <= text_bytes.len()
fn rule_from_prologue_analysis(text_bytes: &[u8], pc_offset: usize)
-> Option<Self::UnwindRule>;
/// Caller guarantees pc_offset <= text_bytes.len()
fn rule_from_epilogue_analysis(text_bytes: &[u8], pc_offset: usize)
-> Option<Self::UnwindRule>;
/// Caller guarantees pc_offset <= text_bytes.len()
fn rule_from_instruction_analysis(
text_bytes: &[u8],
pc_offset: usize,
) -> Option<Self::UnwindRule> {
Self::rule_from_prologue_analysis(text_bytes, pc_offset)
.or_else(|| Self::rule_from_epilogue_analysis(text_bytes, pc_offset))
}
}

166
third_party/rust/framehop/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,166 @@
//! # framehop
//!
//! Framehop is a stack frame unwinder written in 100% Rust. It produces high quality stacks at high speed, on multiple platforms and architectures, without an expensive pre-processing step for unwind information. This makes it suitable for sampling profilers.
//!
//! It currently supports unwinding x86_64 and aarch64, with unwind information formats commonly used on Windows, macOS, Linux and Android.
//!
//! You give framehop register values, stack memory and unwind data, and framehop produces a list of return addresses.
//!
//! Framehop can be used in the following scenarios:
//!
//! - Live unwinding of a remote process. This is how [`samply`](https://github.com/mstange/samply/) uses it.
//! - Offline unwinding from saved registers and stack bytes, even on a different machine, a different OS, or a different CPU architecture.
//! - Live unwinding inside the same process. This is currently unproven, but should work as long as you can do heap allocation before sampling, in order to allocate a cache and to update the list of modules. The actual unwinding does not require any heap allocation and should work even inside a signal handler, as long as you use `MustNotAllocateDuringUnwind`.
//!
//! As a user of framehop, your responsibilities are the following:
//!
//! - You need to enumerate the modules (libraries) that are loaded in the sampled process ahead of time, or ideally maintain a live list which is updated whenever modules are loaded / unloaded.
//! - You need to provide address ranges and unwind section data for those modules.
//! - When sampling, you provide the register values and a callback to read arbitrary stack memory without segfaulting.
//! - On aarch64, picking the right bitmask to strip pointer authentication bits from return addresses is up to you.
//! - You will need to do symbol resolution yourself, if you want function names. Framehop only produces addresses, it does not do any symbolication.
//!
//! In turn, framehop solves the following problems:
//!
//! - It parses a number of different unwind information formats. At the moment, it supports the following:
//! - Apple's Compact Unwinding Format, in `__unwind_info` (macOS)
//! - DWARF CFI in `.eh_frame` (using `.eh_frame_hdr` as an index, if available)
//! - DWARF CFI in `.debug_frame`
//! - PE unwind info in `.pdata`, `.rdata` and `.xdata` (for Windows x86_64)
//! - It supports correct unwinding even when the program is interrupted inside a function prologue or epilogue. On macOS, it has to analyze assembly instructions in order to do this.
//! - On x86_64 and aarch64, it falls back to frame pointer unwinding if it cannot find unwind information for an address.
//! - It caches the unwind rule for each address in a fixed-size cache, so that repeated unwinding from the same address is even faster.
//! - It generates binary search indexes for unwind information formats which don't have them. Specifically, for `.debug_frame` and for `.eh_frame` without `.eh_frame_hdr`.
//! - It does a reasonable job of detecting the end of the stack, so that you can differentiate between properly terminated stacks and prematurely truncated stacks.
//!
//! Framehop is not suitable for debuggers or to implement exception handling. Debuggers usually need to recover all register values for every frame whereas framehop only cares about return addresses. And exception handling needs the ability to call destructors, which is also a non-goal for framehop.
//!
//! ## Speed
//!
//! Framehop achieves high speed in the following ways:
//!
//! 1. It only recovers registers which are needed for computing return addresses. On x86_64 that's `rip`, `rsp` and `rbp`, and on aarch64 that's `lr`, `sp` and `fp`. All other registers are not needed - in theory they could be used as inputs to DWARF CFI expressions, but in practice they are not.
//! 2. It uses zero-copy parsing wherever possible. For example, the bytes in `__unwind_info` are only accessed during unwinding, and the binary search happens right inside the original `__unwind_info` memory. For DWARF unwinding, framehop uses the excellent [`gimli` crate](https://github.com/gimli-rs/gimli/), which was written with performance in mind.
//! 3. It uses binary search to find the correct unwind rule in all supported unwind information formats. For formats without an built-in index, it creates an index when the module is added.
//! 4. It caches unwind rules based on address. In practice, the 509-slot cache achieves a hit rate of around 80% on complicated code like Firefox (with the cache being shared across all Firefox processes). When profiling simpler applications, the hit rate is likely much higher.
//!
//! Furthermore, adding a module is fast too because framehop only does minimal up-front parsing and processing - really, the only thing it does is to create the index of FDE offsets for `.eh_frame` / `.debug_frame`.
//!
//! ## Example
//!
//! ```
//! use core::ops::Range;
//! use framehop::aarch64::{CacheAarch64, UnwindRegsAarch64, UnwinderAarch64};
//! use framehop::{ExplicitModuleSectionInfo, FrameAddress, Module};
//!
//! let mut cache = CacheAarch64::<_>::new();
//! let mut unwinder = UnwinderAarch64::new();
//!
//! let module = Module::new(
//! "mybinary".to_string(),
//! 0x1003fc000..0x100634000,
//! 0x1003fc000,
//! ExplicitModuleSectionInfo {
//! base_svma: 0x100000000,
//! text_svma: Some(0x100000b64..0x1001d2d18),
//! text: Some(vec![/* __text */]),
//! stubs_svma: Some(0x1001d2d18..0x1001d309c),
//! stub_helper_svma: Some(0x1001d309c..0x1001d3438),
//! got_svma: Some(0x100238000..0x100238010),
//! unwind_info: Some(vec![/* __unwind_info */]),
//! eh_frame_svma: Some(0x100237f80..0x100237ffc),
//! eh_frame: Some(vec![/* __eh_frame */]),
//! text_segment_svma: Some(0x1003fc000..0x100634000),
//! text_segment: Some(vec![/* __TEXT */]),
//! ..Default::default()
//! },
//! );
//! unwinder.add_module(module);
//!
//! let pc = 0x1003fc000 + 0x1292c0;
//! let lr = 0x1003fc000 + 0xe4830;
//! let sp = 0x10;
//! let fp = 0x20;
//! let stack = [
//! 1, 2, 3, 4, 0x40, 0x1003fc000 + 0x100dc4,
//! 5, 6, 0x70, 0x1003fc000 + 0x12ca28,
//! 7, 8, 9, 10, 0x0, 0x0,
//! ];
//! let mut read_stack = |addr| stack.get((addr / 8) as usize).cloned().ok_or(());
//!
//! use framehop::Unwinder;
//! let mut iter = unwinder.iter_frames(
//! pc,
//! UnwindRegsAarch64::new(lr, sp, fp),
//! &mut cache,
//! &mut read_stack,
//! );
//!
//! let mut frames = Vec::new();
//! while let Ok(Some(frame)) = iter.next() {
//! frames.push(frame);
//! }
//!
//! assert_eq!(
//! frames,
//! vec![
//! FrameAddress::from_instruction_pointer(0x1003fc000 + 0x1292c0),
//! FrameAddress::from_return_address(0x1003fc000 + 0x100dc4).unwrap(),
//! FrameAddress::from_return_address(0x1003fc000 + 0x12ca28).unwrap()
//! ]
//! );
//! ```
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
mod add_signed;
mod arch;
mod cache;
mod code_address;
mod display_utils;
mod dwarf;
mod error;
mod instruction_analysis;
#[cfg(feature = "macho")]
mod macho;
#[cfg(feature = "pe")]
mod pe;
mod rule_cache;
mod unwind_result;
mod unwind_rule;
mod unwinder;
/// Types for unwinding on the aarch64 CPU architecture.
pub mod aarch64;
/// Types for unwinding on the x86_64 CPU architecture.
pub mod x86_64;
pub use cache::{AllocationPolicy, MayAllocateDuringUnwind, MustNotAllocateDuringUnwind};
pub use code_address::FrameAddress;
pub use error::Error;
pub use rule_cache::CacheStats;
pub use unwinder::{
ExplicitModuleSectionInfo, Module, ModuleSectionInfo, UnwindIterator, Unwinder,
};
/// The unwinder cache for the native CPU architecture.
#[cfg(target_arch = "aarch64")]
pub type CacheNative<P> = aarch64::CacheAarch64<P>;
/// The unwind registers type for the native CPU architecture.
#[cfg(target_arch = "aarch64")]
pub type UnwindRegsNative = aarch64::UnwindRegsAarch64;
/// The unwinder type for the native CPU architecture.
#[cfg(target_arch = "aarch64")]
pub type UnwinderNative<D, P> = aarch64::UnwinderAarch64<D, P>;
/// The unwinder cache for the native CPU architecture.
#[cfg(target_arch = "x86_64")]
pub type CacheNative<P> = x86_64::CacheX86_64<P>;
/// The unwind registers type for the native CPU architecture.
#[cfg(target_arch = "x86_64")]
pub type UnwindRegsNative = x86_64::UnwindRegsX86_64;
/// The unwinder type for the native CPU architecture.
#[cfg(target_arch = "x86_64")]
pub type UnwinderNative<D, P> = x86_64::UnwinderX86_64<D, P>;

206
third_party/rust/framehop/src/macho.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,206 @@
use core::marker::PhantomData;
use crate::dwarf::DwarfUnwinderError;
use crate::{arch::Arch, unwind_rule::UnwindRule};
use macho_unwind_info::UnwindInfo;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CompactUnwindInfoUnwinderError {
BadFormat(macho_unwind_info::Error),
AddressOutsideRange(u32),
CallerCannotBeFrameless,
FunctionHasNoInfo,
BpOffsetDoesNotFit,
BadOpcodeKind(u8),
BadDwarfUnwinding(DwarfUnwinderError),
NoTextBytesToLookUpIndirectStackOffset,
IndirectStackOffsetOutOfBounds,
StackAdjustOverflow,
StackSizeDoesNotFit,
StubFunctionCannotBeCaller,
InvalidFrameless,
}
impl core::fmt::Display for CompactUnwindInfoUnwinderError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::BadFormat(err) => write!(f, "Bad __unwind_info format: {err}"),
Self::AddressOutsideRange(addr) => write!(f, "Address 0x{addr:x} outside of the range covered by __unwind_info"),
Self::CallerCannotBeFrameless => write!(f, "Encountered a non-leaf function which was marked as frameless."),
Self::FunctionHasNoInfo => write!(f, "No unwind info (null opcode) for this function in __unwind_info"),
Self::BpOffsetDoesNotFit => write!(f, "rbp offset from the stack pointer divided by 8 does not fit into i16"),
Self::BadOpcodeKind(kind) => write!(f, "Unrecognized __unwind_info opcode kind {kind}"),
Self::BadDwarfUnwinding(err) => write!(f, "DWARF unwinding failed: {err}"),
Self::NoTextBytesToLookUpIndirectStackOffset => write!(f, "Don't have the function bytes to look up the offset for frameless function with indirect stack offset"),
Self::IndirectStackOffsetOutOfBounds => write!(f, "Stack offset not found inside the bounds of the text bytes"),
Self::StackAdjustOverflow => write!(f, "Stack adjust addition overflowed"),
Self::StackSizeDoesNotFit => write!(f, "Stack size does not fit into the rule representation"),
Self::StubFunctionCannotBeCaller => write!(f, "A caller had its address in the __stubs section"),
Self::InvalidFrameless => write!(f, "Encountered invalid unwind entry"),
}
}
}
impl From<macho_unwind_info::Error> for CompactUnwindInfoUnwinderError {
fn from(e: macho_unwind_info::Error) -> Self {
Self::BadFormat(e)
}
}
impl From<DwarfUnwinderError> for CompactUnwindInfoUnwinderError {
fn from(e: DwarfUnwinderError) -> Self {
Self::BadDwarfUnwinding(e)
}
}
#[cfg(feature = "std")]
impl std::error::Error for CompactUnwindInfoUnwinderError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::BadFormat(e) => Some(e),
Self::BadDwarfUnwinding(e) => Some(e),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CuiUnwindResult<R: UnwindRule> {
ExecRule(R),
NeedDwarf(u32),
}
pub trait CompactUnwindInfoUnwinding: Arch {
fn unwind_frame(
function: macho_unwind_info::Function,
is_first_frame: bool,
address_offset_within_function: usize,
function_bytes: Option<&[u8]>,
) -> Result<CuiUnwindResult<Self::UnwindRule>, CompactUnwindInfoUnwinderError>;
fn rule_for_stub_helper(
offset: u32,
) -> Result<CuiUnwindResult<Self::UnwindRule>, CompactUnwindInfoUnwinderError>;
}
#[derive(Clone, Copy)]
pub struct TextBytes<'a> {
offset_from_base_address: u32,
bytes: &'a [u8],
}
impl<'a> TextBytes<'a> {
pub fn new(offset_from_base_address: u32, bytes: &'a [u8]) -> Self {
Self {
offset_from_base_address,
bytes,
}
}
}
pub struct CompactUnwindInfoUnwinder<'a, A: CompactUnwindInfoUnwinding> {
unwind_info_data: &'a [u8],
text_bytes: Option<TextBytes<'a>>,
stubs_range: (u32, u32),
stub_helper_range: (u32, u32),
_arch: PhantomData<A>,
}
impl<'a, A: CompactUnwindInfoUnwinding> CompactUnwindInfoUnwinder<'a, A> {
pub fn new(
unwind_info_data: &'a [u8],
text_bytes: Option<TextBytes<'a>>,
stubs_range: (u32, u32),
stub_helper_range: (u32, u32),
) -> Self {
Self {
unwind_info_data,
text_bytes,
stubs_range,
stub_helper_range,
_arch: PhantomData,
}
}
pub fn function_for_address(
&self,
address: u32,
) -> Result<macho_unwind_info::Function, CompactUnwindInfoUnwinderError> {
let unwind_info = UnwindInfo::parse(self.unwind_info_data)
.map_err(CompactUnwindInfoUnwinderError::BadFormat)?;
let function = unwind_info
.lookup(address)
.map_err(CompactUnwindInfoUnwinderError::BadFormat)?;
function.ok_or(CompactUnwindInfoUnwinderError::AddressOutsideRange(address))
}
pub fn unwind_frame(
&mut self,
rel_lookup_address: u32,
is_first_frame: bool,
) -> Result<CuiUnwindResult<A::UnwindRule>, CompactUnwindInfoUnwinderError> {
// Exclude __stubs and __stub_helper sections. The __unwind_info does not describe those
// sections. These sections need to be manually excluded because the addresses in
// __unwind_info can be both before and after the stubs/stub_helper sections, if there is
// both a __text and a text_env section.
if self.stubs_range.0 <= rel_lookup_address && rel_lookup_address < self.stubs_range.1 {
if !is_first_frame {
return Err(CompactUnwindInfoUnwinderError::StubFunctionCannotBeCaller);
}
// All stub functions are frameless.
return Ok(CuiUnwindResult::ExecRule(
A::UnwindRule::rule_for_stub_functions(),
));
}
if self.stub_helper_range.0 <= rel_lookup_address
&& rel_lookup_address < self.stub_helper_range.1
{
if !is_first_frame {
return Err(CompactUnwindInfoUnwinderError::StubFunctionCannotBeCaller);
}
let lookup_address_relative_to_section = rel_lookup_address - self.stub_helper_range.0;
return <A as CompactUnwindInfoUnwinding>::rule_for_stub_helper(
lookup_address_relative_to_section,
);
}
let function = match self.function_for_address(rel_lookup_address) {
Ok(f) => f,
Err(CompactUnwindInfoUnwinderError::AddressOutsideRange(_)) if is_first_frame => {
// pc is falling into this module's address range, but it's not covered by __unwind_info.
// This could mean that we're inside a stub function, in the __stubs section.
// All stub functions are frameless.
// TODO: Obtain the actual __stubs address range and do better checking here.
return Ok(CuiUnwindResult::ExecRule(
A::UnwindRule::rule_for_stub_functions(),
));
}
Err(err) => return Err(err),
};
if is_first_frame && rel_lookup_address == function.start_address {
return Ok(CuiUnwindResult::ExecRule(
A::UnwindRule::rule_for_function_start(),
));
}
let address_offset_within_function =
usize::try_from(rel_lookup_address - function.start_address).unwrap();
let function_bytes = self.text_bytes.and_then(|text_bytes| {
let TextBytes {
offset_from_base_address,
bytes,
} = text_bytes;
let function_start_relative_to_text = function
.start_address
.checked_sub(offset_from_base_address)?
as usize;
let function_end_relative_to_text =
function.end_address.checked_sub(offset_from_base_address)? as usize;
bytes.get(function_start_relative_to_text..function_end_relative_to_text)
});
<A as CompactUnwindInfoUnwinding>::unwind_frame(
function,
is_first_frame,
address_offset_within_function,
function_bytes,
)
}
}

100
third_party/rust/framehop/src/pe.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,100 @@
use crate::{arch::Arch, unwind_result::UnwindResult};
use core::ops::Range;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PeUnwinderError {
MissingUnwindInfoData(u32),
MissingInstructionData(u32),
MissingStackData(Option<u64>),
UnwindInfoParseError,
Aarch64Unsupported,
}
impl core::fmt::Display for PeUnwinderError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::MissingUnwindInfoData(rva) => {
write!(f, "failed to read unwind info memory at RVA {rva:x}")
}
Self::MissingInstructionData(rva) => {
write!(f, "failed to read instruction memory at RVA {rva:x}")
}
Self::MissingStackData(addr) => {
write!(f, "failed to read stack")?;
if let Some(addr) = addr {
write!(f, " at address {addr:x}")?;
}
Ok(())
}
Self::UnwindInfoParseError => write!(f, "failed to parse UnwindInfo"),
Self::Aarch64Unsupported => write!(f, "AArch64 is not yet supported"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for PeUnwinderError {}
/// Data and the related RVA range within the binary.
///
/// This is only used by PE unwinding.
///
/// Type arguments:
/// - `D`: The type for unwind section data. This allows carrying owned data on the
/// module, e.g. `Vec<u8>`. But it could also be a wrapper around mapped memory from
/// a file or a different process, for example. It just needs to provide a slice of
/// bytes via its `Deref` implementation.
pub struct DataAtRvaRange<D> {
pub data: D,
pub rva_range: Range<u32>,
}
pub struct PeSections<'a, D> {
pub pdata: &'a D,
pub rdata: Option<&'a DataAtRvaRange<D>>,
pub xdata: Option<&'a DataAtRvaRange<D>>,
pub text: Option<&'a DataAtRvaRange<D>>,
}
impl<'a, D> PeSections<'a, D>
where
D: core::ops::Deref<Target = [u8]>,
{
pub fn unwind_info_memory_at_rva(&self, rva: u32) -> Result<&'a [u8], PeUnwinderError> {
[&self.rdata, &self.xdata]
.into_iter()
.find_map(|o| o.and_then(|m| memory_at_rva(m, rva)))
.ok_or(PeUnwinderError::MissingUnwindInfoData(rva))
}
pub fn text_memory_at_rva(&self, rva: u32) -> Result<&'a [u8], PeUnwinderError> {
self.text
.and_then(|m| memory_at_rva(m, rva))
.ok_or(PeUnwinderError::MissingInstructionData(rva))
}
}
fn memory_at_rva<D: core::ops::Deref<Target = [u8]>>(
DataAtRvaRange { data, rva_range }: &DataAtRvaRange<D>,
address: u32,
) -> Option<&[u8]> {
if rva_range.contains(&address) {
let offset = address - rva_range.start;
Some(&data[(offset as usize)..])
} else {
None
}
}
pub trait PeUnwinding: Arch {
fn unwind_frame<F, D>(
sections: PeSections<D>,
address: u32,
regs: &mut Self::UnwindRegs,
is_first_frame: bool,
read_stack: &mut F,
) -> Result<UnwindResult<Self::UnwindRule>, PeUnwinderError>
where
F: FnMut(u64) -> Result<u64, ()>,
D: core::ops::Deref<Target = [u8]>;
}

146
third_party/rust/framehop/src/rule_cache.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,146 @@
use alloc::boxed::Box;
use crate::unwind_rule::UnwindRule;
const CACHE_ENTRY_COUNT: usize = 509;
pub struct RuleCache<R: UnwindRule> {
entries: Box<[Option<CacheEntry<R>>; CACHE_ENTRY_COUNT]>,
stats: CacheStats,
}
impl<R: UnwindRule> RuleCache<R> {
pub fn new() -> Self {
Self {
entries: Box::new([None; CACHE_ENTRY_COUNT]),
stats: CacheStats::new(),
}
}
pub fn lookup(&mut self, address: u64, modules_generation: u16) -> CacheResult<R> {
let slot = (address % (CACHE_ENTRY_COUNT as u64)) as u16;
match &self.entries[slot as usize] {
None => {
self.stats.miss_empty_slot_count += 1;
}
Some(entry) => {
if entry.modules_generation == modules_generation {
if entry.address == address {
self.stats.hit_count += 1;
return CacheResult::Hit(entry.unwind_rule);
} else {
self.stats.miss_wrong_address_count += 1;
}
} else {
self.stats.miss_wrong_modules_count += 1;
}
}
}
CacheResult::Miss(CacheHandle {
slot,
address,
modules_generation,
})
}
pub fn insert(&mut self, handle: CacheHandle, unwind_rule: R) {
let CacheHandle {
slot,
address,
modules_generation,
} = handle;
self.entries[slot as usize] = Some(CacheEntry {
address,
modules_generation,
unwind_rule,
});
}
/// Returns a snapshot of the cache usage statistics.
pub fn stats(&self) -> CacheStats {
self.stats
}
}
pub enum CacheResult<R: UnwindRule> {
Miss(CacheHandle),
Hit(R),
}
pub struct CacheHandle {
slot: u16,
address: u64,
modules_generation: u16,
}
const _: () = assert!(
CACHE_ENTRY_COUNT as u64 <= u16::MAX as u64,
"u16 should be sufficient to store the cache slot index"
);
#[derive(Clone, Copy, Debug)]
struct CacheEntry<R: UnwindRule> {
address: u64,
modules_generation: u16,
unwind_rule: R,
}
/// Statistics about the effectiveness of the rule cache.
#[derive(Default, Debug, Clone, Copy)]
pub struct CacheStats {
/// The number of successful cache hits.
pub hit_count: u64,
/// The number of cache misses that were due to an empty slot.
pub miss_empty_slot_count: u64,
/// The number of cache misses that were due to a filled slot whose module
/// generation didn't match the unwinder's current module generation.
/// (This means that either the unwinder's modules have changed since the
/// rule in this slot was stored, or the same cache is used with multiple
/// unwinders and the unwinders are stomping on each other's cache slots.)
pub miss_wrong_modules_count: u64,
/// The number of cache misses that were due to cache slot collisions of
/// different addresses.
pub miss_wrong_address_count: u64,
}
impl CacheStats {
/// Create a new instance.
pub fn new() -> Self {
Default::default()
}
/// The number of total lookups.
pub fn total(&self) -> u64 {
self.hits() + self.misses()
}
/// The number of total hits.
pub fn hits(&self) -> u64 {
self.hit_count
}
/// The number of total misses.
pub fn misses(&self) -> u64 {
self.miss_empty_slot_count + self.miss_wrong_modules_count + self.miss_wrong_address_count
}
}
#[cfg(test)]
mod tests {
use crate::{aarch64::UnwindRuleAarch64, x86_64::UnwindRuleX86_64};
use super::*;
// Ensure that the size of Option<CacheEntry<UnwindRuleX86_64>> doesn't change by accident.
#[test]
fn test_cache_entry_size() {
assert_eq!(
core::mem::size_of::<Option<CacheEntry<UnwindRuleX86_64>>>(),
16
);
assert_eq!(
core::mem::size_of::<Option<CacheEntry<UnwindRuleAarch64>>>(),
24 // <-- larger than we'd like
);
}
}

5
third_party/rust/framehop/src/unwind_result.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,5 @@
#[derive(Debug, Clone)]
pub enum UnwindResult<R> {
ExecRule(R),
Uncacheable(u64),
}

18
third_party/rust/framehop/src/unwind_rule.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,18 @@
use crate::error::Error;
pub trait UnwindRule: Copy + core::fmt::Debug {
type UnwindRegs;
fn exec<F>(
self,
is_first_frame: bool,
regs: &mut Self::UnwindRegs,
read_stack: &mut F,
) -> Result<Option<u64>, Error>
where
F: FnMut(u64) -> Result<u64, ()>;
fn rule_for_stub_functions() -> Self;
fn rule_for_function_start() -> Self;
fn fallback_rule() -> Self;
}

1013
third_party/rust/framehop/src/unwinder.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

10
third_party/rust/framehop/src/x86_64/arch.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
use super::unwind_rule::UnwindRuleX86_64;
use super::unwindregs::UnwindRegsX86_64;
use crate::arch::Arch;
/// The x86_64 CPU architecture.
pub struct ArchX86_64;
impl Arch for ArchX86_64 {
type UnwindRule = UnwindRuleX86_64;
type UnwindRegs = UnwindRegsX86_64;
}

32
third_party/rust/framehop/src/x86_64/cache.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
use super::unwind_rule::*;
use crate::cache::*;
/// The unwinder cache type for [`UnwinderX86_64`](super::UnwinderX86_64).
pub struct CacheX86_64<P: AllocationPolicy = MayAllocateDuringUnwind>(
pub Cache<UnwindRuleX86_64, P>,
);
impl CacheX86_64<MayAllocateDuringUnwind> {
/// Create a new cache.
pub fn new() -> Self {
Self(Cache::new())
}
}
impl<P: AllocationPolicy> CacheX86_64<P> {
/// Create a new cache.
pub fn new_in() -> Self {
Self(Cache::new())
}
/// Returns a snapshot of the cache usage statistics.
pub fn stats(&self) -> CacheStats {
self.0.rule_cache.stats()
}
}
impl<P: AllocationPolicy> Default for CacheX86_64<P> {
fn default() -> Self {
Self::new_in()
}
}

164
third_party/rust/framehop/src/x86_64/dwarf.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,164 @@
use gimli::{
CfaRule, Encoding, EvaluationStorage, Reader, ReaderOffset, Register, RegisterRule,
UnwindContextStorage, UnwindSection, UnwindTableRow, X86_64,
};
use super::{arch::ArchX86_64, unwind_rule::UnwindRuleX86_64, unwindregs::UnwindRegsX86_64};
use crate::dwarf::{
eval_cfa_rule, eval_register_rule, ConversionError, DwarfUnwindRegs, DwarfUnwinderError,
DwarfUnwinding,
};
use crate::unwind_result::UnwindResult;
impl DwarfUnwindRegs for UnwindRegsX86_64 {
fn get(&self, register: Register) -> Option<u64> {
match register {
X86_64::RA => Some(self.ip()),
X86_64::RSP => Some(self.sp()),
X86_64::RBP => Some(self.bp()),
_ => None,
}
}
}
impl DwarfUnwinding for ArchX86_64 {
fn unwind_frame<F, R, UCS, ES>(
section: &impl UnwindSection<R>,
unwind_info: &UnwindTableRow<R::Offset, UCS>,
encoding: Encoding,
regs: &mut Self::UnwindRegs,
is_first_frame: bool,
read_stack: &mut F,
) -> Result<UnwindResult<Self::UnwindRule>, DwarfUnwinderError>
where
F: FnMut(u64) -> Result<u64, ()>,
R: Reader,
UCS: UnwindContextStorage<R::Offset>,
ES: EvaluationStorage<R>,
{
let cfa_rule = unwind_info.cfa();
let bp_rule = unwind_info.register(X86_64::RBP);
let ra_rule = unwind_info.register(X86_64::RA);
match translate_into_unwind_rule(cfa_rule, &bp_rule, &ra_rule) {
Ok(unwind_rule) => return Ok(UnwindResult::ExecRule(unwind_rule)),
Err(_err) => {
// Could not translate into a cacheable unwind rule. Fall back to the generic path.
// eprintln!("Unwind rule translation failed: {:?}", err);
}
}
let cfa = eval_cfa_rule::<R, _, ES>(section, cfa_rule, encoding, regs)
.ok_or(DwarfUnwinderError::CouldNotRecoverCfa)?;
let ip = regs.ip();
let bp = regs.bp();
let sp = regs.sp();
let new_bp = eval_register_rule::<R, F, _, ES>(
section, bp_rule, cfa, encoding, bp, regs, read_stack,
)
.unwrap_or(bp);
let return_address = match eval_register_rule::<R, F, _, ES>(
section, ra_rule, cfa, encoding, ip, regs, read_stack,
) {
Some(ra) => ra,
None => {
read_stack(cfa - 8).map_err(|_| DwarfUnwinderError::CouldNotRecoverReturnAddress)?
}
};
if cfa == sp && return_address == ip {
return Err(DwarfUnwinderError::DidNotAdvance);
}
if !is_first_frame && cfa < regs.sp() {
return Err(DwarfUnwinderError::StackPointerMovedBackwards);
}
regs.set_ip(return_address);
regs.set_bp(new_bp);
regs.set_sp(cfa);
Ok(UnwindResult::Uncacheable(return_address))
}
fn rule_if_uncovered_by_fde() -> Self::UnwindRule {
UnwindRuleX86_64::JustReturnIfFirstFrameOtherwiseFp
}
}
fn register_rule_to_cfa_offset<RO: ReaderOffset>(
rule: &RegisterRule<RO>,
) -> Result<Option<i64>, ConversionError> {
match *rule {
RegisterRule::Undefined | RegisterRule::SameValue => Ok(None),
RegisterRule::Offset(offset) => Ok(Some(offset)),
_ => Err(ConversionError::RegisterNotStoredRelativeToCfa),
}
}
fn translate_into_unwind_rule<RO: ReaderOffset>(
cfa_rule: &CfaRule<RO>,
bp_rule: &RegisterRule<RO>,
ra_rule: &RegisterRule<RO>,
) -> Result<UnwindRuleX86_64, ConversionError> {
match ra_rule {
RegisterRule::Undefined => {
// No return address. This means that we've reached the end of the stack.
return Ok(UnwindRuleX86_64::EndOfStack);
}
RegisterRule::Offset(offset) if *offset == -8 => {
// This is normal case. Return address is [CFA-8].
}
RegisterRule::Offset(_) => {
// Unsupported, will have to use the slow path.
return Err(ConversionError::ReturnAddressRuleWithUnexpectedOffset);
}
_ => {
// Unsupported, will have to use the slow path.
return Err(ConversionError::ReturnAddressRuleWasWeird);
}
}
match cfa_rule {
CfaRule::RegisterAndOffset { register, offset } => match *register {
X86_64::RSP => {
let sp_offset_by_8 =
u16::try_from(offset / 8).map_err(|_| ConversionError::SpOffsetDoesNotFit)?;
let fp_cfa_offset = register_rule_to_cfa_offset(bp_rule)?;
match fp_cfa_offset {
None => Ok(UnwindRuleX86_64::OffsetSp { sp_offset_by_8 }),
Some(bp_cfa_offset) => {
let bp_storage_offset_from_sp_by_8 =
i16::try_from((offset + bp_cfa_offset) / 8)
.map_err(|_| ConversionError::FpStorageOffsetDoesNotFit)?;
Ok(UnwindRuleX86_64::OffsetSpAndRestoreBp {
sp_offset_by_8,
bp_storage_offset_from_sp_by_8,
})
}
}
}
X86_64::RBP => {
let bp_cfa_offset = register_rule_to_cfa_offset(bp_rule)?
.ok_or(ConversionError::FramePointerRuleDoesNotRestoreBp)?;
if *offset == 16 && bp_cfa_offset == -16 {
Ok(UnwindRuleX86_64::UseFramePointer)
} else {
// TODO: Maybe handle this case. This case has been observed in _ffi_call_unix64,
// which has the following unwind table:
//
// 00000060 00000024 0000001c FDE cie=00000048 pc=000de548...000de6a6
// 0xde548: CFA=reg7+8: reg16=[CFA-8]
// 0xde562: CFA=reg6+32: reg6=[CFA-16], reg16=[CFA-8]
// 0xde5ad: CFA=reg7+8: reg16=[CFA-8]
// 0xde668: CFA=reg7+8: reg6=[CFA-16], reg16=[CFA-8]
Err(ConversionError::FramePointerRuleHasStrangeBpOffset)
}
}
_ => Err(ConversionError::CfaIsOffsetFromUnknownRegister),
},
CfaRule::Expression(_) => Err(ConversionError::CfaIsExpression),
}
}

Просмотреть файл

@ -0,0 +1,85 @@
use super::super::unwind_rule::UnwindRuleX86_64;
pub fn unwind_rule_from_detected_epilogue(
text_bytes: &[u8],
pc_offset: usize,
) -> Option<UnwindRuleX86_64> {
let (slice_from_start, slice_to_end) = text_bytes.split_at(pc_offset);
let mut sp_offset_by_8 = 0;
let mut bp_offset_by_8 = None;
let mut bytes = slice_to_end;
loop {
if bytes.is_empty() {
return None;
}
// Detect ret
if bytes[0] == 0xc3 {
break;
}
// Detect jmp
if bytes[0] == 0xeb || bytes[0] == 0xe9 || bytes[0] == 0xff {
// This could be a tail call, or just a regular jump inside the current function.
// Ideally, we would check whether the jump target is inside this function.
// But this would require having an accurate idea of where the current function
// starts and ends.
// For now, we instead use the following heuristic: Any jmp that directly follows
// a `pop` instruction is treated as a tail call.
if sp_offset_by_8 != 0 {
// We have detected a pop in the previous loop iteration.
break;
}
// This must be the first iteration. Look backwards.
if let Some(potential_pop_byte) = slice_from_start.last() {
// Get the previous byte. We have no idea how long the previous instruction
// is, so we might be looking at a random last byte of a wider instruction.
// Let's just pray that this is not the case.
if potential_pop_byte & 0xf8 == 0x58 {
// Assuming we haven't just misinterpreted the last byte of a wider
// instruction, this is a `pop rXX`.
break;
}
}
return None;
}
// Detect pop rbp
if bytes[0] == 0x5d {
bp_offset_by_8 = Some(sp_offset_by_8 as i16);
sp_offset_by_8 += 1;
bytes = &bytes[1..];
continue;
}
// Detect pop rXX
if (0x58..=0x5f).contains(&bytes[0]) {
sp_offset_by_8 += 1;
bytes = &bytes[1..];
continue;
}
// Detect pop rXX with prefix
if bytes.len() >= 2 && bytes[0] & 0xfe == 0x40 && bytes[1] & 0xf8 == 0x58 {
sp_offset_by_8 += 1;
bytes = &bytes[2..];
continue;
}
// Unexpected instruction.
// This probably means that we weren't in an epilogue after all.
return None;
}
// We've found the return or the tail call.
let rule = if sp_offset_by_8 == 0 {
UnwindRuleX86_64::JustReturn
} else {
sp_offset_by_8 += 1; // Add one for popping the return address.
if let Some(bp_storage_offset_from_sp_by_8) = bp_offset_by_8 {
UnwindRuleX86_64::OffsetSpAndRestoreBp {
sp_offset_by_8,
bp_storage_offset_from_sp_by_8,
}
} else {
UnwindRuleX86_64::OffsetSp { sp_offset_by_8 }
}
};
Some(rule)
}

Просмотреть файл

@ -0,0 +1,24 @@
use super::arch::ArchX86_64;
use crate::instruction_analysis::InstructionAnalysis;
mod epilogue;
mod prologue;
use epilogue::unwind_rule_from_detected_epilogue;
use prologue::unwind_rule_from_detected_prologue;
impl InstructionAnalysis for ArchX86_64 {
fn rule_from_prologue_analysis(
text_bytes: &[u8],
pc_offset: usize,
) -> Option<Self::UnwindRule> {
unwind_rule_from_detected_prologue(text_bytes, pc_offset)
}
fn rule_from_epilogue_analysis(
text_bytes: &[u8],
pc_offset: usize,
) -> Option<Self::UnwindRule> {
unwind_rule_from_detected_epilogue(text_bytes, pc_offset)
}
}

Просмотреть файл

@ -0,0 +1,100 @@
use super::super::unwind_rule::UnwindRuleX86_64;
pub fn unwind_rule_from_detected_prologue(
text_bytes: &[u8],
pc_offset: usize,
) -> Option<UnwindRuleX86_64> {
let (slice_from_start, slice_to_end) = text_bytes.split_at(pc_offset);
if !is_next_instruction_expected_in_prologue(slice_to_end) {
return None;
}
// We're in a prologue. Find the current stack depth of this frame by
// walking backwards. This is risky business, because x86 is a variable
// length encoding so you never know what you're looking at if you look
// backwards.
// Let's do it anyway and hope our heuristics are good enough so that
// they work in more cases than they fail in.
let mut cursor = slice_from_start.len();
let mut sp_offset_by_8 = 0;
loop {
if cursor >= 4 {
// Detect push rbp; mov rbp, rsp [0x55, 0x48 0x89 0xe5]
if slice_from_start[cursor - 4..cursor] == [0x55, 0x48, 0x89, 0xe5] {
return Some(UnwindRuleX86_64::UseFramePointer);
}
}
if cursor >= 1 {
// Detect push rXX with optional prefix
let byte = slice_from_start[cursor - 1];
if byte & 0xf8 == 0x50 {
sp_offset_by_8 += 1;
cursor -= 1;
// Consume prefix, if present
if cursor >= 1 && slice_from_start[cursor - 1] & 0xfe == 0x40 {
cursor -= 1;
}
continue;
}
}
break;
}
sp_offset_by_8 += 1; // Add one for popping the return address.
Some(UnwindRuleX86_64::OffsetSp { sp_offset_by_8 })
}
fn is_next_instruction_expected_in_prologue(bytes: &[u8]) -> bool {
if bytes.len() < 4 {
return false;
}
// Detect push rXX
if bytes[0] & 0xf8 == 0x50 {
return true;
}
// Detect push rXX with prefix
if bytes[0] & 0xfe == 0x40 && bytes[1] & 0xf8 == 0x50 {
return true;
}
// Detect sub rsp, 0xXX (8-bit immediate operand)
if bytes[0..2] == [0x83, 0xec] {
return true;
}
// Detect sub rsp, 0xXX with prefix (8-bit immediate operand)
if bytes[0..3] == [0x48, 0x83, 0xec] {
return true;
}
// Detect sub rsp, 0xXX (32-bit immediate operand)
if bytes[0..2] == [0x81, 0xec] {
return true;
}
// Detect sub rsp, 0xXX with prefix (32-bit immediate operand)
if bytes[0..3] == [0x48, 0x81, 0xec] {
return true;
}
// Detect mov rbp, rsp [0x48 0x89 0xe5]
if bytes[0..3] == [0x48, 0x89, 0xe5] {
return true;
}
false
}
// TODO: Write tests for different "sub" types
// 4e88e40 41 57 push r15
// 4e88e42 41 56 push r14
// 4e88e44 53 push rbx
// 4e88e45 48 81 EC 80 00 00 00 sub rsp, 0x80
// 4e88e4c 48 89 F3 mov rbx, rsi
//
//
// 4423f9 55 push rbp
// 4423fa 48 89 E5 mov rbp, rsp
// 4423fd 41 57 push r15
// 4423ff 41 56 push r14
// 442401 41 55 push r13
// 442403 41 54 push r12
// 442405 53 push rbx
// 442406 48 83 EC 18 sub rsp, 0x18
// 44240a 48 8B 07 mov rax, qword [rdi]

171
third_party/rust/framehop/src/x86_64/macho.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,171 @@
use super::arch::ArchX86_64;
use super::unwind_rule::UnwindRuleX86_64;
use crate::instruction_analysis::InstructionAnalysis;
use crate::macho::{CompactUnwindInfoUnwinderError, CompactUnwindInfoUnwinding, CuiUnwindResult};
use macho_unwind_info::opcodes::{OpcodeX86_64, RegisterNameX86_64};
use macho_unwind_info::Function;
impl CompactUnwindInfoUnwinding for ArchX86_64 {
fn unwind_frame(
function: Function,
is_first_frame: bool,
address_offset_within_function: usize,
function_bytes: Option<&[u8]>,
) -> Result<CuiUnwindResult<UnwindRuleX86_64>, CompactUnwindInfoUnwinderError> {
let opcode = OpcodeX86_64::parse(function.opcode);
if is_first_frame {
// The pc might be in a prologue or an epilogue. The compact unwind info format ignores
// prologues and epilogues; the opcodes only describe the function body. So we do some
// instruction analysis to check for prologues and epilogues.
if let Some(function_bytes) = function_bytes {
if let Some(rule) = Self::rule_from_instruction_analysis(
function_bytes,
address_offset_within_function,
) {
// We are inside a prologue / epilogue. Ignore the opcode and use the rule from
// instruction analysis.
return Ok(CuiUnwindResult::ExecRule(rule));
}
if opcode == OpcodeX86_64::Null
&& function_bytes.starts_with(&[0x55, 0x48, 0x89, 0xe5])
{
// The function is uncovered but it has a `push rbp; mov rbp, rsp` prologue.
return Ok(CuiUnwindResult::ExecRule(UnwindRuleX86_64::UseFramePointer));
}
}
if opcode == OpcodeX86_64::Null {
return Ok(CuiUnwindResult::ExecRule(UnwindRuleX86_64::JustReturn));
}
}
// At this point we know with high certainty that we are in a function body.
let r = match opcode {
OpcodeX86_64::Null => {
return Err(CompactUnwindInfoUnwinderError::FunctionHasNoInfo);
}
OpcodeX86_64::FramelessImmediate {
stack_size_in_bytes,
saved_regs,
} => {
if stack_size_in_bytes == 8 {
CuiUnwindResult::ExecRule(UnwindRuleX86_64::JustReturn)
} else {
let bp_positon_from_outside = saved_regs
.iter()
.rev()
.flatten()
.position(|r| *r == RegisterNameX86_64::Rbp);
match bp_positon_from_outside {
Some(pos) => {
let bp_offset_from_sp =
stack_size_in_bytes as i32 - 2 * 8 - pos as i32 * 8;
let bp_storage_offset_from_sp_by_8 =
i16::try_from(bp_offset_from_sp / 8).map_err(|_| {
CompactUnwindInfoUnwinderError::BpOffsetDoesNotFit
})?;
CuiUnwindResult::ExecRule(UnwindRuleX86_64::OffsetSpAndRestoreBp {
sp_offset_by_8: stack_size_in_bytes / 8,
bp_storage_offset_from_sp_by_8,
})
}
None => CuiUnwindResult::ExecRule(UnwindRuleX86_64::OffsetSp {
sp_offset_by_8: stack_size_in_bytes / 8,
}),
}
}
}
OpcodeX86_64::FramelessIndirect {
immediate_offset_from_function_start,
stack_adjust_in_bytes,
saved_regs,
} => {
let function_bytes = function_bytes.ok_or(
CompactUnwindInfoUnwinderError::NoTextBytesToLookUpIndirectStackOffset,
)?;
let sub_immediate_bytes = function_bytes
.get(
immediate_offset_from_function_start as usize
..immediate_offset_from_function_start as usize + 4,
)
.ok_or(CompactUnwindInfoUnwinderError::IndirectStackOffsetOutOfBounds)?;
let sub_immediate = u32::from_le_bytes([
sub_immediate_bytes[0],
sub_immediate_bytes[1],
sub_immediate_bytes[2],
sub_immediate_bytes[3],
]);
let stack_size_in_bytes =
sub_immediate
.checked_add(stack_adjust_in_bytes.into())
.ok_or(CompactUnwindInfoUnwinderError::StackAdjustOverflow)?;
let sp_offset_by_8 = u16::try_from(stack_size_in_bytes / 8)
.map_err(|_| CompactUnwindInfoUnwinderError::StackSizeDoesNotFit)?;
let bp_positon_from_outside = saved_regs
.iter()
.rev()
.flatten()
.position(|r| *r == RegisterNameX86_64::Rbp);
match bp_positon_from_outside {
Some(pos) => {
let bp_offset_from_sp = stack_size_in_bytes as i32 - 2 * 8 - pos as i32 * 8;
let bp_storage_offset_from_sp_by_8 =
i16::try_from(bp_offset_from_sp / 8)
.map_err(|_| CompactUnwindInfoUnwinderError::BpOffsetDoesNotFit)?;
CuiUnwindResult::ExecRule(UnwindRuleX86_64::OffsetSpAndRestoreBp {
sp_offset_by_8,
bp_storage_offset_from_sp_by_8,
})
}
None => {
CuiUnwindResult::ExecRule(UnwindRuleX86_64::OffsetSp { sp_offset_by_8 })
}
}
}
OpcodeX86_64::Dwarf { eh_frame_fde } => CuiUnwindResult::NeedDwarf(eh_frame_fde),
OpcodeX86_64::FrameBased { .. } => {
CuiUnwindResult::ExecRule(UnwindRuleX86_64::UseFramePointer)
}
OpcodeX86_64::UnrecognizedKind(kind) => {
return Err(CompactUnwindInfoUnwinderError::BadOpcodeKind(kind))
}
OpcodeX86_64::InvalidFrameless => {
return Err(CompactUnwindInfoUnwinderError::InvalidFrameless)
}
};
Ok(r)
}
fn rule_for_stub_helper(
offset: u32,
) -> Result<CuiUnwindResult<UnwindRuleX86_64>, CompactUnwindInfoUnwinderError> {
// shared:
// +0x0 235cc4 4C 8D 1D 3D 03 04 00 lea r11, qword [dyld_stub_binder_276000+8]
// +0x7 235ccb 41 53 push r11
// +0x9 235ccd FF 25 2D 03 04 00 jmp qword [dyld_stub_binder_276000] ; tail call
// +0xf 235cd3 90 nop
// first stub:
// +0x10 235cd4 68 F1 61 00 00 push 0x61f1
// +0x15 235cd9 E9 E6 FF FF FF jmp 0x235cc4 ; jump to shared
// second stub:
// +0x1a 235cde 68 38 62 00 00 push 0x6238
// +0x1f 235ce3 E9 DC FF FF FF jmp 0x235cc4 ; jump to shared
let rule = if offset < 0x7 {
// pop 1 and return
UnwindRuleX86_64::OffsetSp { sp_offset_by_8: 2 }
} else if offset < 0x10 {
// pop 2 and return
UnwindRuleX86_64::OffsetSp { sp_offset_by_8: 3 }
} else {
let offset_after_shared = offset - 0x10;
let offset_within_stub = offset_after_shared % 10;
if offset_within_stub < 5 {
UnwindRuleX86_64::JustReturn
// just return
} else {
// pop 1 and return
UnwindRuleX86_64::OffsetSp { sp_offset_by_8: 2 }
}
};
Ok(CuiUnwindResult::ExecRule(rule))
}
}

18
third_party/rust/framehop/src/x86_64/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,18 @@
mod arch;
mod cache;
mod dwarf;
mod instruction_analysis;
#[cfg(feature = "macho")]
mod macho;
#[cfg(feature = "pe")]
mod pe;
mod register_ordering;
mod unwind_rule;
mod unwinder;
mod unwindregs;
pub use arch::*;
pub use cache::*;
pub use unwind_rule::*;
pub use unwinder::*;
pub use unwindregs::*;

224
third_party/rust/framehop/src/x86_64/pe.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,224 @@
use super::{
arch::ArchX86_64,
unwind_rule::{OffsetOrPop, UnwindRuleX86_64},
unwindregs::Reg,
};
use crate::arch::Arch;
use crate::pe::{PeSections, PeUnwinderError, PeUnwinding};
use crate::unwind_result::UnwindResult;
use core::ops::ControlFlow;
use alloc::vec::Vec;
use pe_unwind_info::x86_64::{
FunctionEpilogInstruction, FunctionTableEntries, Register, UnwindInfo, UnwindInfoTrailer,
UnwindOperation, UnwindState,
};
struct State<'a, F> {
regs: &'a mut <ArchX86_64 as Arch>::UnwindRegs,
read_stack: &'a mut F,
}
impl<F> UnwindState for State<'_, F>
where
F: FnMut(u64) -> Result<u64, ()>,
{
fn read_register(&mut self, register: Register) -> u64 {
self.regs.get(convert_pe_register(register))
}
fn read_stack(&mut self, addr: u64) -> Option<u64> {
(self.read_stack)(addr).ok()
}
fn write_register(&mut self, register: Register, value: u64) {
self.regs.set(convert_pe_register(register), value)
}
fn write_xmm_register(&mut self, _register: pe_unwind_info::x86_64::XmmRegister, _value: u128) {
// Ignore
}
}
fn convert_pe_register(r: Register) -> Reg {
match r {
Register::RAX => Reg::RAX,
Register::RCX => Reg::RCX,
Register::RDX => Reg::RDX,
Register::RBX => Reg::RBX,
Register::RSP => Reg::RSP,
Register::RBP => Reg::RBP,
Register::RSI => Reg::RSI,
Register::RDI => Reg::RDI,
Register::R8 => Reg::R8,
Register::R9 => Reg::R9,
Register::R10 => Reg::R10,
Register::R11 => Reg::R11,
Register::R12 => Reg::R12,
Register::R13 => Reg::R13,
Register::R14 => Reg::R14,
Register::R15 => Reg::R15,
}
}
impl From<&'_ FunctionEpilogInstruction> for OffsetOrPop {
fn from(value: &'_ FunctionEpilogInstruction) -> Self {
match value {
FunctionEpilogInstruction::AddSP(offset) => {
if let Ok(v) = (offset / 8).try_into() {
OffsetOrPop::OffsetBy8(v)
} else {
OffsetOrPop::None
}
}
FunctionEpilogInstruction::Pop(reg) => OffsetOrPop::Pop(convert_pe_register(*reg)),
_ => OffsetOrPop::None,
}
}
}
impl From<&'_ UnwindOperation> for OffsetOrPop {
fn from(value: &'_ UnwindOperation) -> Self {
match value {
UnwindOperation::UnStackAlloc(offset) => {
if let Ok(v) = (offset / 8).try_into() {
OffsetOrPop::OffsetBy8(v)
} else {
OffsetOrPop::None
}
}
UnwindOperation::PopNonVolatile(reg) => OffsetOrPop::Pop(convert_pe_register(*reg)),
_ => OffsetOrPop::None,
}
}
}
impl PeUnwinding for ArchX86_64 {
fn unwind_frame<F, D>(
sections: PeSections<D>,
address: u32,
regs: &mut Self::UnwindRegs,
is_first_frame: bool,
read_stack: &mut F,
) -> Result<UnwindResult<Self::UnwindRule>, PeUnwinderError>
where
F: FnMut(u64) -> Result<u64, ()>,
D: core::ops::Deref<Target = [u8]>,
{
let entries = FunctionTableEntries::parse(sections.pdata);
let Some(function) = entries.lookup(address) else {
return Ok(UnwindResult::ExecRule(UnwindRuleX86_64::JustReturn));
};
let read_stack_err = |read_stack: &mut F, addr| {
read_stack(addr).map_err(|()| PeUnwinderError::MissingStackData(Some(addr)))
};
let unwind_info_address = function.unwind_info_address.get();
let unwind_info =
UnwindInfo::parse(sections.unwind_info_memory_at_rva(unwind_info_address)?)
.ok_or(PeUnwinderError::UnwindInfoParseError)?;
if is_first_frame {
// Check whether the address is in the function epilog. If so, we need to
// simulate the remaining epilog instructions (unwind codes don't account for
// unwinding from the epilog). We only need to check this for the first unwind info (if
// there are chained infos).
let bytes = (function.end_address.get() - address) as usize;
let instruction = &sections.text_memory_at_rva(address)?[..bytes];
if let Ok(epilog_instructions) =
FunctionEpilogInstruction::parse_sequence(instruction, unwind_info.frame_register())
{
// If the epilog is an optional AddSP followed by Pops, we can return a cache
// rule.
if let Some(rule) =
UnwindRuleX86_64::for_sequence_of_offset_or_pop(epilog_instructions.iter())
{
return Ok(UnwindResult::ExecRule(rule));
}
for instruction in epilog_instructions.iter() {
match instruction {
FunctionEpilogInstruction::AddSP(offset) => {
let rsp = regs.get(Reg::RSP);
regs.set(Reg::RSP, rsp + *offset as u64);
}
FunctionEpilogInstruction::AddSPFromFP(offset) => {
let fp = unwind_info
.frame_register()
.expect("invalid fp register offset");
let fp = convert_pe_register(fp);
let fp = regs.get(fp);
regs.set(Reg::RSP, fp + *offset as u64);
}
FunctionEpilogInstruction::Pop(reg) => {
let rsp = regs.get(Reg::RSP);
let val = read_stack_err(read_stack, rsp)?;
regs.set(convert_pe_register(*reg), val);
regs.set(Reg::RSP, rsp + 8);
}
}
}
let rsp = regs.get(Reg::RSP);
let ra = read_stack_err(read_stack, rsp)?;
regs.set(Reg::RSP, rsp + 8);
return Ok(UnwindResult::Uncacheable(ra));
}
}
// Get all chained UnwindInfo and resolve errors when collecting.
let chained_info = core::iter::successors(Some(Ok(unwind_info)), |info| {
let Ok(info) = info else {
return None;
};
if let Some(UnwindInfoTrailer::ChainedUnwindInfo { chained }) = info.trailer() {
let unwind_info_address = chained.unwind_info_address.get();
Some(
sections
.unwind_info_memory_at_rva(unwind_info_address)
.and_then(|data| {
UnwindInfo::parse(data).ok_or(PeUnwinderError::UnwindInfoParseError)
}),
)
} else {
None
}
})
.collect::<Result<Vec<_>, _>>()?;
// Get all operations across chained UnwindInfo. The first should be filtered to only those
// operations which are before the offset in the function.
let offset = address - function.begin_address.get();
let operations = chained_info.into_iter().enumerate().flat_map(|(i, info)| {
info.unwind_operations()
.skip_while(move |(o, _)| i == 0 && *o as u32 > offset)
.map(|(_, op)| op)
});
// We need to collect operations to first check (without losing ownership) whether an
// unwind rule can be returned.
let operations = operations.collect::<Vec<_>>();
if let Some(rule) = UnwindRuleX86_64::for_sequence_of_offset_or_pop(operations.iter()) {
return Ok(UnwindResult::ExecRule(rule));
}
// Resolve operations to get the return address.
let mut state = State { regs, read_stack };
for op in operations {
if let ControlFlow::Break(ra) = unwind_info
.resolve_operation(&mut state, &op)
.ok_or(PeUnwinderError::MissingStackData(None))?
{
return Ok(UnwindResult::Uncacheable(ra));
}
}
let rsp = regs.get(Reg::RSP);
let ra = read_stack_err(read_stack, rsp)?;
regs.set(Reg::RSP, rsp + 8);
Ok(UnwindResult::Uncacheable(ra))
}
}

84
third_party/rust/framehop/src/x86_64/register_ordering.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,84 @@
use super::unwindregs::Reg;
use arrayvec::ArrayVec;
const ENCODE_REGISTERS: [Reg; 8] = [
Reg::RBX,
Reg::RBP,
Reg::RDI,
Reg::RSI,
Reg::R12,
Reg::R13,
Reg::R14,
Reg::R15,
];
pub fn decode(count: u8, encoded_ordering: u16) -> ArrayVec<Reg, 8> {
let mut regs: ArrayVec<Reg, 8> = ENCODE_REGISTERS.into();
let mut r = encoded_ordering;
let mut n: u16 = 8;
while r != 0 {
let index = r % n;
if index != 0 {
regs[(8 - n as usize)..].swap(index as usize, 0);
}
r /= n;
n -= 1;
}
regs.truncate(count as usize);
regs
}
pub fn encode(registers: &[Reg]) -> Option<(u8, u16)> {
if registers.len() > ENCODE_REGISTERS.len() {
return None;
}
let count = registers.len() as u8;
let mut r: u16 = 0;
let mut reg_order: ArrayVec<Reg, 8> = ENCODE_REGISTERS.into();
let mut scale: u16 = 1;
for (i, reg) in registers.iter().enumerate() {
let index = reg_order[i..].iter().position(|r| r == reg)?;
if index as u16 != 0 {
reg_order[i..].swap(index, 0);
}
r += index as u16 * scale;
scale *= 8 - i as u16;
}
Some((count, r))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn unhandled_orderings() {
use super::Reg::*;
assert_eq!(encode(&[RAX]), None, "RAX is a volatile register, i.e. not a callee-save register, so it does not need to be restored during epilogs and is not covered by the encoding.");
assert_eq!(encode(&[RSI, RSI]), None, "Valid register orderings only contain each register (at most) once, so there is no encoding for a sequence with repeated registers.");
}
#[test]
fn roundtrip_all() {
// Test all possible register orderings.
// That is, for all permutations of length 0 to 8 of the ENCODE_REGISTERS array, check that
// the register ordering rountrips successfully through encoding and decoding.
use itertools::Itertools;
for permutation in (0..=8).flat_map(|k| ENCODE_REGISTERS.iter().cloned().permutations(k)) {
let permutation = permutation.as_slice();
let encoding = encode(permutation);
if let Some((count, encoded)) = encoding {
assert_eq!(
decode(count, encoded).as_slice(),
permutation,
"Register permutation should roundtrip correctly",
);
} else {
panic!("Register permutation failed to encode: {permutation:?}");
}
}
}
}

309
third_party/rust/framehop/src/x86_64/unwind_rule.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,309 @@
use super::register_ordering;
use super::unwindregs::{Reg, UnwindRegsX86_64};
use crate::add_signed::checked_add_signed;
use crate::error::Error;
use crate::unwind_rule::UnwindRule;
use arrayvec::ArrayVec;
/// For all of these: return address is *(new_sp - 8)
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum UnwindRuleX86_64 {
EndOfStack,
/// (sp, bp) = (sp + 8, bp)
JustReturn,
/// (sp, bp) = if is_first_frame (sp + 8, bp) else (bp + 16, *bp)
JustReturnIfFirstFrameOtherwiseFp,
/// (sp, bp) = (sp + 8x, bp)
OffsetSp {
sp_offset_by_8: u16,
},
/// (sp, bp) = (sp + 8x, *(sp + 8y))
OffsetSpAndRestoreBp {
sp_offset_by_8: u16,
bp_storage_offset_from_sp_by_8: i16,
},
/// (sp, bp) = (bp + 16, *bp)
UseFramePointer,
/// (sp, ...) = (sp + 8 * (offset + register count), ... popped according to encoded ordering)
/// This supports the common case of pushed callee-saved registers followed by a stack
/// allocation. Up to 8 registers can be stored, which covers all callee-saved registers (aside
/// from RSP which is implicit).
///
/// The registers are stored in a separate compressed ordering to facilitate restoring register
/// values if desired. If not for this we could simply store the total offset.
OffsetSpAndPopRegisters {
/// The additional stack pointer offset to undo before popping the registers, divided by 8 bytes.
sp_offset_by_8: u16,
/// The number of registers to pop from the stack.
register_count: u8,
/// An encoded ordering of the callee-save registers to pop from the stack, see register_ordering.
encoded_registers_to_pop: u16,
},
}
pub enum OffsetOrPop {
None,
OffsetBy8(u16),
Pop(Reg),
}
impl UnwindRuleX86_64 {
/// Get the rule which represents the given operations, if possible.
pub fn for_sequence_of_offset_or_pop<I, T>(iter: I) -> Option<Self>
where
I: Iterator<Item = T>,
T: Into<OffsetOrPop>,
{
let mut iter = iter.map(Into::into).peekable();
let sp_offset_by_8 = if let Some(&OffsetOrPop::OffsetBy8(offset)) = iter.peek() {
iter.next();
offset
} else {
0
};
let mut regs = ArrayVec::<Reg, 8>::new();
for i in iter {
if let OffsetOrPop::Pop(reg) = i {
// If try_push errors we've exceeded the number of supported registers: there's no
// way to encode these operations as an unwind rule.
regs.try_push(reg).ok()?;
} else {
return None;
}
}
if regs.is_empty() && sp_offset_by_8 == 0 {
Some(Self::JustReturn)
} else {
let (register_count, encoded_registers_to_pop) = register_ordering::encode(&regs)?;
Some(Self::OffsetSpAndPopRegisters {
sp_offset_by_8,
register_count,
encoded_registers_to_pop,
})
}
}
}
impl UnwindRule for UnwindRuleX86_64 {
type UnwindRegs = UnwindRegsX86_64;
fn rule_for_stub_functions() -> Self {
UnwindRuleX86_64::JustReturn
}
fn rule_for_function_start() -> Self {
UnwindRuleX86_64::JustReturn
}
fn fallback_rule() -> Self {
UnwindRuleX86_64::UseFramePointer
}
fn exec<F>(
self,
is_first_frame: bool,
regs: &mut UnwindRegsX86_64,
read_stack: &mut F,
) -> Result<Option<u64>, Error>
where
F: FnMut(u64) -> Result<u64, ()>,
{
let sp = regs.sp();
let (new_sp, new_bp) = match self {
UnwindRuleX86_64::EndOfStack => return Ok(None),
UnwindRuleX86_64::JustReturn => {
let new_sp = sp.checked_add(8).ok_or(Error::IntegerOverflow)?;
(new_sp, regs.bp())
}
UnwindRuleX86_64::JustReturnIfFirstFrameOtherwiseFp => {
if is_first_frame {
let new_sp = sp.checked_add(8).ok_or(Error::IntegerOverflow)?;
(new_sp, regs.bp())
} else {
let sp = regs.sp();
let bp = regs.bp();
let new_sp = bp.checked_add(16).ok_or(Error::IntegerOverflow)?;
if new_sp <= sp {
return Err(Error::FramepointerUnwindingMovedBackwards);
}
let new_bp = read_stack(bp).map_err(|_| Error::CouldNotReadStack(bp))?;
(new_sp, new_bp)
}
}
UnwindRuleX86_64::OffsetSp { sp_offset_by_8 } => {
let sp_offset = u64::from(sp_offset_by_8) * 8;
let new_sp = sp.checked_add(sp_offset).ok_or(Error::IntegerOverflow)?;
(new_sp, regs.bp())
}
UnwindRuleX86_64::OffsetSpAndRestoreBp {
sp_offset_by_8,
bp_storage_offset_from_sp_by_8,
} => {
let sp_offset = u64::from(sp_offset_by_8) * 8;
let new_sp = sp.checked_add(sp_offset).ok_or(Error::IntegerOverflow)?;
let bp_storage_offset_from_sp = i64::from(bp_storage_offset_from_sp_by_8) * 8;
let bp_location = checked_add_signed(sp, bp_storage_offset_from_sp)
.ok_or(Error::IntegerOverflow)?;
let new_bp = match read_stack(bp_location) {
Ok(new_bp) => new_bp,
Err(()) if is_first_frame && bp_location < sp => {
// Ignore errors when reading beyond the stack pointer in the first frame.
// These negative offsets are sometimes seen in x86_64 epilogues, where
// a bunch of registers are popped one after the other, and the compiler
// doesn't always set the already-popped register to "unchanged" (because
// doing so would take up extra space in the dwarf information).
// read_stack may legitimately refuse to read beyond the stack pointer,
// for example when the stack bytes are coming from a linux perf event
// sample record, where the ustack bytes are copied starting from sp.
regs.bp()
}
Err(()) => return Err(Error::CouldNotReadStack(bp_location)),
};
(new_sp, new_bp)
}
UnwindRuleX86_64::UseFramePointer => {
// Do a frame pointer stack walk. Code that is compiled with frame pointers
// has the following function prologues and epilogues:
//
// Function prologue:
// pushq %rbp
// movq %rsp, %rbp
//
// Function epilogue:
// popq %rbp
// ret
//
// Functions are called with callq; callq pushes the return address onto the stack.
// When a function reaches its end, ret pops the return address from the stack and jumps to it.
// So when a function is called, we have the following stack layout:
//
// [... rest of the stack]
// ^ rsp ^ rbp
// callq some_function
// [return address] [... rest of the stack]
// ^ rsp ^ rbp
// pushq %rbp
// [caller's frame pointer] [return address] [... rest of the stack]
// ^ rsp ^ rbp
// movq %rsp, %rbp
// [caller's frame pointer] [return address] [... rest of the stack]
// ^ rsp, rbp
// <other instructions>
// [... more stack] [caller's frame pointer] [return address] [... rest of the stack]
// ^ rsp ^ rbp
//
// So: *rbp is the caller's frame pointer, and *(rbp + 8) is the return address.
//
// Or, in other words, the following linked list is built up on the stack:
// #[repr(C)]
// struct CallFrameInfo {
// previous: *const CallFrameInfo,
// return_address: *const c_void,
// }
// and rbp is a *const CallFrameInfo.
let sp = regs.sp();
let bp = regs.bp();
if bp == 0 {
return Ok(None);
}
let new_sp = bp.checked_add(16).ok_or(Error::IntegerOverflow)?;
if new_sp <= sp {
return Err(Error::FramepointerUnwindingMovedBackwards);
}
let new_bp = read_stack(bp).map_err(|_| Error::CouldNotReadStack(bp))?;
// new_bp is the caller's bp. If the caller uses frame pointers, then bp should be
// a valid frame pointer and we could do a coherency check on new_bp to make sure
// it's moving in the right direction. But if the caller is using bp as a general
// purpose register, then any value (including zero) would be a valid value.
// At this point we don't know how the caller uses bp, so we leave new_bp unchecked.
(new_sp, new_bp)
}
UnwindRuleX86_64::OffsetSpAndPopRegisters {
sp_offset_by_8,
register_count,
encoded_registers_to_pop,
} => {
let sp = regs.sp();
let mut sp = sp
.checked_add(sp_offset_by_8 as u64 * 8)
.ok_or(Error::IntegerOverflow)?;
for reg in register_ordering::decode(register_count, encoded_registers_to_pop) {
let value = read_stack(sp).map_err(|_| Error::CouldNotReadStack(sp))?;
sp = sp.checked_add(8).ok_or(Error::IntegerOverflow)?;
regs.set(reg, value);
}
(sp.checked_add(8).ok_or(Error::IntegerOverflow)?, regs.bp())
}
};
let return_address =
read_stack(new_sp - 8).map_err(|_| Error::CouldNotReadStack(new_sp - 8))?;
if return_address == 0 {
return Ok(None);
}
if new_sp == sp && return_address == regs.ip() {
return Err(Error::DidNotAdvance);
}
regs.set_ip(return_address);
regs.set_sp(new_sp);
regs.set_bp(new_bp);
Ok(Some(return_address))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_basic() {
let stack = [
1, 2, 0x100300, 4, 0x40, 0x100200, 5, 6, 0x70, 0x100100, 7, 8, 9, 10, 0x0, 0x0,
];
let mut read_stack = |addr| Ok(stack[(addr / 8) as usize]);
let mut regs = UnwindRegsX86_64::new(0x100400, 0x10, 0x20);
let res =
UnwindRuleX86_64::OffsetSp { sp_offset_by_8: 1 }.exec(true, &mut regs, &mut read_stack);
assert_eq!(res, Ok(Some(0x100300)));
assert_eq!(regs.ip(), 0x100300);
assert_eq!(regs.sp(), 0x18);
assert_eq!(regs.bp(), 0x20);
let res = UnwindRuleX86_64::UseFramePointer.exec(true, &mut regs, &mut read_stack);
assert_eq!(res, Ok(Some(0x100200)));
assert_eq!(regs.ip(), 0x100200);
assert_eq!(regs.sp(), 0x30);
assert_eq!(regs.bp(), 0x40);
let res = UnwindRuleX86_64::UseFramePointer.exec(false, &mut regs, &mut read_stack);
assert_eq!(res, Ok(Some(0x100100)));
assert_eq!(regs.ip(), 0x100100);
assert_eq!(regs.sp(), 0x50);
assert_eq!(regs.bp(), 0x70);
let res = UnwindRuleX86_64::UseFramePointer.exec(false, &mut regs, &mut read_stack);
assert_eq!(res, Ok(None));
}
#[test]
fn test_overflow() {
// This test makes sure that debug builds don't panic when trying to use frame pointer
// unwinding on code that was using the bp register as a general-purpose register and
// storing -1 in it. -1 is u64::MAX, so an unchecked add panics in debug builds.
let stack = [
1, 2, 0x100300, 4, 0x40, 0x100200, 5, 6, 0x70, 0x100100, 7, 8, 9, 10, 0x0, 0x0,
];
let mut read_stack = |addr| Ok(stack[(addr / 8) as usize]);
let mut regs = UnwindRegsX86_64::new(0x100400, u64::MAX / 8 * 8, u64::MAX);
let res = UnwindRuleX86_64::JustReturn.exec(true, &mut regs, &mut read_stack);
assert_eq!(res, Err(Error::IntegerOverflow));
let res =
UnwindRuleX86_64::OffsetSp { sp_offset_by_8: 1 }.exec(true, &mut regs, &mut read_stack);
assert_eq!(res, Err(Error::IntegerOverflow));
let res = UnwindRuleX86_64::OffsetSpAndRestoreBp {
sp_offset_by_8: 1,
bp_storage_offset_from_sp_by_8: 2,
}
.exec(true, &mut regs, &mut read_stack);
assert_eq!(res, Err(Error::IntegerOverflow));
let res = UnwindRuleX86_64::UseFramePointer.exec(true, &mut regs, &mut read_stack);
assert_eq!(res, Err(Error::IntegerOverflow));
}
}

68
third_party/rust/framehop/src/x86_64/unwinder.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,68 @@
use core::ops::Deref;
use super::arch::ArchX86_64;
use super::cache::CacheX86_64;
use super::unwindregs::UnwindRegsX86_64;
use crate::cache::{AllocationPolicy, MayAllocateDuringUnwind};
use crate::error::Error;
use crate::unwinder::UnwinderInternal;
use crate::unwinder::{Module, Unwinder};
use crate::FrameAddress;
/// The unwinder for the x86_64 CPU architecture. Use the [`Unwinder`] trait for unwinding.
///
/// Type arguments:
///
/// - `D`: The type for unwind section data in the modules. See [`Module`].
/// - `P`: The [`AllocationPolicy`].
pub struct UnwinderX86_64<D, P = MayAllocateDuringUnwind>(UnwinderInternal<D, ArchX86_64, P>);
impl<D, P> Default for UnwinderX86_64<D, P> {
fn default() -> Self {
Self::new()
}
}
impl<D, P> Clone for UnwinderX86_64<D, P> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<D, P> UnwinderX86_64<D, P> {
/// Create an unwinder for a process.
pub fn new() -> Self {
Self(UnwinderInternal::new())
}
}
impl<D: Deref<Target = [u8]>, P: AllocationPolicy> Unwinder for UnwinderX86_64<D, P> {
type UnwindRegs = UnwindRegsX86_64;
type Cache = CacheX86_64<P>;
type Module = Module<D>;
fn add_module(&mut self, module: Module<D>) {
self.0.add_module(module);
}
fn remove_module(&mut self, module_address_range_start: u64) {
self.0.remove_module(module_address_range_start);
}
fn max_known_code_address(&self) -> u64 {
self.0.max_known_code_address()
}
fn unwind_frame<F>(
&self,
address: FrameAddress,
regs: &mut UnwindRegsX86_64,
cache: &mut CacheX86_64<P>,
read_stack: &mut F,
) -> Result<Option<u64>, Error>
where
F: FnMut(u64) -> Result<u64, ()>,
{
self.0.unwind_frame(address, regs, &mut cache.0, read_stack)
}
}

102
third_party/rust/framehop/src/x86_64/unwindregs.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,102 @@
use core::fmt::Debug;
use crate::display_utils::HexNum;
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct UnwindRegsX86_64 {
ip: u64,
regs: [u64; 16],
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(u8)]
pub enum Reg {
RAX,
RDX,
RCX,
RBX,
RSI,
RDI,
RBP,
RSP,
R8,
R9,
R10,
R11,
R12,
R13,
R14,
R15,
}
impl UnwindRegsX86_64 {
pub fn new(ip: u64, sp: u64, bp: u64) -> Self {
let mut r = Self {
ip,
regs: Default::default(),
};
r.set_sp(sp);
r.set_bp(bp);
r
}
#[inline(always)]
pub fn get(&self, reg: Reg) -> u64 {
self.regs[reg as usize]
}
#[inline(always)]
pub fn set(&mut self, reg: Reg, value: u64) {
self.regs[reg as usize] = value;
}
#[inline(always)]
pub fn ip(&self) -> u64 {
self.ip
}
#[inline(always)]
pub fn set_ip(&mut self, ip: u64) {
self.ip = ip
}
#[inline(always)]
pub fn sp(&self) -> u64 {
self.get(Reg::RSP)
}
#[inline(always)]
pub fn set_sp(&mut self, sp: u64) {
self.set(Reg::RSP, sp)
}
#[inline(always)]
pub fn bp(&self) -> u64 {
self.get(Reg::RBP)
}
#[inline(always)]
pub fn set_bp(&mut self, bp: u64) {
self.set(Reg::RBP, bp)
}
}
impl Debug for UnwindRegsX86_64 {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("UnwindRegsX86_64")
.field("ip", &HexNum(self.ip()))
.field("rax", &HexNum(self.get(Reg::RAX)))
.field("rdx", &HexNum(self.get(Reg::RDX)))
.field("rcx", &HexNum(self.get(Reg::RCX)))
.field("rbx", &HexNum(self.get(Reg::RBX)))
.field("rsi", &HexNum(self.get(Reg::RSI)))
.field("rdi", &HexNum(self.get(Reg::RDI)))
.field("rbp", &HexNum(self.get(Reg::RBP)))
.field("rsp", &HexNum(self.get(Reg::RSP)))
.field("r8", &HexNum(self.get(Reg::R8)))
.field("r9", &HexNum(self.get(Reg::R9)))
.field("r10", &HexNum(self.get(Reg::R10)))
.field("r11", &HexNum(self.get(Reg::R11)))
.field("r12", &HexNum(self.get(Reg::R12)))
.field("r13", &HexNum(self.get(Reg::R13)))
.field("r14", &HexNum(self.get(Reg::R14)))
.field("r15", &HexNum(self.get(Reg::R15)))
.finish()
}
}

1
third_party/rust/gimli/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"39644968fcea2bf6cf14f94047cc8b5e9785797631c0cd8033e4e2cdbcf27969","Cargo.toml":"1ecca3db954f8885686c1e3ca6b7222d500bc26926a46438eabd519569109c32","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"6b55025491f62ca7dd19a7a6cdb9154b06db33c85247c88a19804ab1c1ba2b5e","src/arch.rs":"735a8e871479263ad2dd86c38cc68167da320800ca70b8b1d25a224e5f3d0bd8","src/common.rs":"92bb5bc1eebe0a1906389a75096288773bb86b8895b827851bfb082a3c4999f8","src/constants.rs":"33b74f752fc11aefa1f5ef36a08c2fac19453e6d16f8e490d15b2eabcd63a55c","src/endianity.rs":"1f7e62ae34f540c06bedf1e7948739211556eea7dd83731a5ca52c7d687ed0fc","src/leb128.rs":"996d5c79d027f97c010ca487bc4ff5f8265f4b9e63d62b4e4fa291383c259ee9","src/lib.rs":"538a8080f33a0641f831e883085425c36cbce2ae39e0cd5e0b6c7c062bca7712","src/read/abbrev.rs":"f937e45d151ac5073f2c526b792e86e5ba96d3d36cb0377682a596c272be589a","src/read/addr.rs":"a6a535b690793e4c8ec85127d558e796cb8f6272533cd0418886bbc44289039e","src/read/aranges.rs":"fd3ff965cfd23c8b425c555f8f34a190764ae993433f32c63f9452c6604806cd","src/read/cfi.rs":"93e7572e44d97d10977833cedab78d68b6f0fec643edda4a613ad8ae845a93ce","src/read/dwarf.rs":"0f30d814dfe067aa6fbd0b80dac8e1a2532e2c5cd5e584c151a8915356b6b2d7","src/read/endian_reader.rs":"25752b609d74ad7dc85df84d044d0e931024a95af72a760cd51f834016775b3e","src/read/endian_slice.rs":"5b44661714967780b8c9f52fdaf655a53e309c38cbd3daf11bf6b1d5f6d067bb","src/read/index.rs":"2a28d032bc3bc5235545ac526b367512ac0aa7807909b6c02c8d3f84f5beff87","src/read/line.rs":"463fedce39895af793cdce413d9593cfd3470939f9f944fd7814ded5946d5b7e","src/read/lists.rs":"67ca9e1a36a91feb4996d035211de845205212bfda02163685d217818567ff93","src/read/loclists.rs":"a05933e752d44c1d26e83c321dbc1b8a3616b1d76ad15f488858f7f74fd3aece","src/read/lookup.rs":"0cf89ba12b9d48b1fe035dd3a497730323acb9427a9457abbc2f7c58c4c71165","src/read/mod.rs":"1154168832c544acd31f467668fb86536232138c84e5918ba3b1cc66d1554d05","src/read/op.rs":"8782f09332eea1a218aa524a67c9c1cc2e73a8210b30402519dbe8fcf21dcf6e","src/read/pubnames.rs":"ed752ee1a7017e6d3be42d81e4ddaaac960ef08081463a19106c9f041526d4a3","src/read/pubtypes.rs":"5e75b32c0923e827aff0bb2db456797a0e8d38ba46be992558a7990b3196bcf5","src/read/reader.rs":"afc9c2cfbfe0fce5b1825d029f8e841100f48b04b86181950a213fbb82e6ad63","src/read/relocate.rs":"6844b113eb8218152e29912accc54b26bc2498e97bfe4af824472ddb69b8601c","src/read/rnglists.rs":"d1afeb1779d145493a1fc665fa32820c63c539e40b10ecd5b5f343836da188e6","src/read/str.rs":"4dd98cc8d93ce6f06c194eae034bfe0a3d45a9f06fbeaca38d8f29a9c7cf15a5","src/read/unit.rs":"bcff85e55148bf141984a4cb20eb5983cfd85de6e8a4535cef2ab19e8e0f5103","src/read/util.rs":"61e41212f1c8336988c9a7a1523c2913af8c8a66d2dd59d3631ba179e801e3bd","src/read/value.rs":"1c0db3759c65ffda3520fcecd36118367dfb46845035d5d97fcba2f0ea780380","src/test_util.rs":"291eefa6b51c6d934ba2f4a4c9bc7c403046fc1cccf4d43487820f0154bb89e2","src/write/abbrev.rs":"fa02163389e92e804d139cf84f833ab6af932083f0eb2d74464b4a70bd3237ff","src/write/cfi.rs":"323ab703251a41fe83172d749c8afec7d869c5d52e8edd85d7b87450102e6e3a","src/write/dwarf.rs":"8a1a0893e31134ad68993994594f3024ad0c8af7c1188b29e0ffc26b42edef21","src/write/endian_vec.rs":"1d5811986648816a677580b22630f5059757a381487d73e9adbb3008c9ae0c58","src/write/line.rs":"80f7626f15467d69fb73a9d9fda7863fe343f236d5fcdbc353bdf2a2a4b1bb42","src/write/loc.rs":"2a58b0f57ab344f23de81e459f6fefa153e29e0384af31bbcbc80095af0fa703","src/write/mod.rs":"6e43a028baf73bf50ee276a3f08f31adc69cacdde25d56b55f14c0d48ca6f3aa","src/write/op.rs":"e599fa116366f273ca33da3428132f2b9da21c0cc50a0c0ccfd0f524ccb4e82e","src/write/range.rs":"28033849e7912f60d137c2f2e0065c5169a7f16896b179178c8e3674d7c2785e","src/write/relocate.rs":"117b97eae3ca2aad9d5b242652ebbdb333440e877be37873a7ef5ba1a39ced43","src/write/section.rs":"126a0202d606ea94d5b7ee4853afefb05f2546710210954fd0cc18af8674a511","src/write/str.rs":"4850cc2fee55980f9cbb6b4169f9861ab9d05c2b28a85c2b790480b83a66f514","src/write/unit.rs":"35419f917bd759ab026c9701ac0aef9a945ffb95a10f1c9c72608020206edf44","src/write/writer.rs":"7d5dd07b82ec3becebb060c106d4ea697cbd8b9b64a5de78403511a5244e08b1"},"package":"e2e1d97fbe9722ba9bbd0c97051c2956e726562b61f86a25a4360398a40edfc9"}

1102
third_party/rust/gimli/CHANGELOG.md поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

109
third_party/rust/gimli/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,109 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.60"
name = "gimli"
version = "0.30.0"
include = [
"/CHANGELOG.md",
"/Cargo.toml",
"/LICENSE-APACHE",
"/LICENSE-MIT",
"/README.md",
"/src",
]
description = "A library for reading and writing the DWARF debugging format."
documentation = "https://docs.rs/gimli"
readme = "./README.md"
keywords = [
"DWARF",
"debug",
"ELF",
"eh_frame",
]
categories = [
"development-tools::debugging",
"development-tools::profiling",
"parser-implementations",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/gimli-rs/gimli"
resolver = "2"
[profile.bench]
codegen-units = 1
debug = 2
split-debuginfo = "packed"
[profile.test]
split-debuginfo = "packed"
[dependencies.alloc]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-alloc"
[dependencies.compiler_builtins]
version = "0.1.2"
optional = true
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dependencies.fallible-iterator]
version = "0.3.0"
optional = true
default-features = false
[dependencies.indexmap]
version = "2.0.0"
optional = true
[dependencies.stable_deref_trait]
version = "1.1.0"
optional = true
default-features = false
[dev-dependencies.test-assembler]
version = "0.1.3"
[features]
default = [
"read-all",
"write",
]
endian-reader = [
"read",
"dep:stable_deref_trait",
]
fallible-iterator = ["dep:fallible-iterator"]
read = ["read-core"]
read-all = [
"read",
"std",
"fallible-iterator",
"endian-reader",
]
read-core = []
rustc-dep-of-std = [
"dep:core",
"dep:alloc",
"dep:compiler_builtins",
]
std = [
"fallible-iterator?/std",
"stable_deref_trait?/std",
]
write = ["dep:indexmap"]

201
third_party/rust/gimli/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/gimli/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
Copyright (c) 2015 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

81
third_party/rust/gimli/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,81 @@
# `gimli`
[![](https://img.shields.io/crates/v/gimli.svg) ![](https://img.shields.io/crates/d/gimli.svg)](https://crates.io/crates/gimli)
[![](https://docs.rs/gimli/badge.svg)](https://docs.rs/gimli/)
[![Build Status](https://github.com/gimli-rs/gimli/workflows/Rust/badge.svg)](https://github.com/gimli-rs/gimli/actions)
[![Coverage Status](https://coveralls.io/repos/github/gimli-rs/gimli/badge.svg?branch=master)](https://coveralls.io/github/gimli-rs/gimli?branch=master)
`gimli` is a library for reading and writing the
[DWARF debugging format](https://dwarfstd.org/).
* **Zero copy:** everything is just a reference to the original input buffer. No
copies of the input data get made.
* **Lazy:** you can iterate compilation units without parsing their
contents. Parse only as many debugging information entry (DIE) trees as you
iterate over. `gimli` also uses `DW_AT_sibling` references to avoid parsing a
DIE's children to find its next sibling, when possible.
* **Cross-platform:** `gimli` makes no assumptions about what kind of object
file you're working with. The flipside to that is that it's up to you to
provide an ELF loader on Linux or Mach-O loader on macOS.
* Unsure which object file parser to use? Try the cross-platform
[`object`](https://github.com/gimli-rs/object) crate. See the
[`gimli-examples`](./crates/examples/src/bin) crate for usage with `gimli`.
## Install
Add this to your `Cargo.toml`:
```toml
[dependencies]
gimli = "0.30.0"
```
The minimum supported Rust version is:
* 1.60.0 for the `read` feature and its dependencies.
* 1.65.0 for other features.
## Documentation
* [Documentation on docs.rs](https://docs.rs/gimli/)
* Example programs:
* [A simple `.debug_info` parser](./crates/examples/src/bin/simple.rs)
* [A simple `.debug_line` parser](./crates/examples/src/bin/simple_line.rs)
* [A `dwarfdump` clone](./crates/examples/src/bin/dwarfdump.rs)
* [An `addr2line` clone](https://github.com/gimli-rs/addr2line)
* [`ddbug`](https://github.com/gimli-rs/ddbug), a utility giving insight into
code generation by making debugging information readable.
* [`dwprod`](https://github.com/fitzgen/dwprod), a tiny utility to list the
compilers used to create each compilation unit within a shared library or
executable (via `DW_AT_producer`).
* [`dwarf-validate`](./crates/examples/src/bin/dwarf-validate.rs), a program to validate the
integrity of some DWARF and its references between sections and compilation
units.
## License
Licensed under either of
* Apache License, Version 2.0 ([`LICENSE-APACHE`](./LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([`LICENSE-MIT`](./LICENSE-MIT) or https://opensource.org/licenses/MIT)
at your option.
## Contribution
See [CONTRIBUTING.md](./CONTRIBUTING.md) for hacking.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

1088
third_party/rust/gimli/src/arch.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

392
third_party/rust/gimli/src/common.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,392 @@
/// Whether the format of a compilation unit is 32- or 64-bit.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Format {
/// 64-bit DWARF
Dwarf64 = 8,
/// 32-bit DWARF
Dwarf32 = 4,
}
impl Format {
/// Return the serialized size of an initial length field for the format.
#[inline]
pub fn initial_length_size(self) -> u8 {
match self {
Format::Dwarf32 => 4,
Format::Dwarf64 => 12,
}
}
/// Return the natural word size for the format
#[inline]
pub fn word_size(self) -> u8 {
match self {
Format::Dwarf32 => 4,
Format::Dwarf64 => 8,
}
}
}
/// Which vendor extensions to support.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum Vendor {
/// A default set of extensions, including some common GNU extensions.
Default,
/// AAarch64 extensions.
AArch64,
}
/// Encoding parameters that are commonly used for multiple DWARF sections.
///
/// This is intended to be small enough to pass by value.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
// `address_size` and `format` are used more often than `version`, so keep
// them first.
#[repr(C)]
pub struct Encoding {
/// The size of an address.
pub address_size: u8,
// The size of a segment selector.
// TODO: pub segment_size: u8,
/// Whether the DWARF format is 32- or 64-bit.
pub format: Format,
/// The DWARF version of the header.
pub version: u16,
}
/// Encoding parameters for a line number program.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct LineEncoding {
/// The size in bytes of the smallest target machine instruction.
pub minimum_instruction_length: u8,
/// The maximum number of individual operations that may be encoded in an
/// instruction.
pub maximum_operations_per_instruction: u8,
/// The initial value of the `is_stmt` register.
pub default_is_stmt: bool,
/// The minimum value which a special opcode can add to the line register.
pub line_base: i8,
/// The range of values which a special opcode can add to the line register.
pub line_range: u8,
}
impl Default for LineEncoding {
fn default() -> Self {
// Values from LLVM.
LineEncoding {
minimum_instruction_length: 1,
maximum_operations_per_instruction: 1,
default_is_stmt: true,
line_base: -5,
line_range: 14,
}
}
}
/// A DWARF register number.
///
/// The meaning of this value is ABI dependent. This is generally encoded as
/// a ULEB128, but supported architectures need 16 bits at most.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Register(pub u16);
/// An offset into the `.debug_abbrev` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct DebugAbbrevOffset<T = usize>(pub T);
/// An offset to a set of entries in the `.debug_addr` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugAddrBase<T = usize>(pub T);
/// An index into a set of addresses in the `.debug_addr` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugAddrIndex<T = usize>(pub T);
/// An offset into the `.debug_aranges` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugArangesOffset<T = usize>(pub T);
/// An offset into the `.debug_info` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub struct DebugInfoOffset<T = usize>(pub T);
/// An offset into the `.debug_line` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugLineOffset<T = usize>(pub T);
/// An offset into the `.debug_line_str` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugLineStrOffset<T = usize>(pub T);
/// An offset into either the `.debug_loc` section or the `.debug_loclists` section,
/// depending on the version of the unit the offset was contained in.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct LocationListsOffset<T = usize>(pub T);
/// An offset to a set of location list offsets in the `.debug_loclists` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugLocListsBase<T = usize>(pub T);
/// An index into a set of location list offsets in the `.debug_loclists` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugLocListsIndex<T = usize>(pub T);
/// An offset into the `.debug_macinfo` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct DebugMacinfoOffset<T = usize>(pub T);
/// An offset into the `.debug_macro` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct DebugMacroOffset<T = usize>(pub T);
/// An offset into either the `.debug_ranges` section or the `.debug_rnglists` section,
/// depending on the version of the unit the offset was contained in.
///
/// If this is from a DWARF 4 DWO file, then it must additionally be offset by the
/// value of `DW_AT_GNU_ranges_base`. You can use `Dwarf::ranges_offset_from_raw` to do this.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct RawRangeListsOffset<T = usize>(pub T);
/// An offset into either the `.debug_ranges` section or the `.debug_rnglists` section,
/// depending on the version of the unit the offset was contained in.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct RangeListsOffset<T = usize>(pub T);
/// An offset to a set of range list offsets in the `.debug_rnglists` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugRngListsBase<T = usize>(pub T);
/// An index into a set of range list offsets in the `.debug_rnglists` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugRngListsIndex<T = usize>(pub T);
/// An offset into the `.debug_str` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugStrOffset<T = usize>(pub T);
/// An offset to a set of entries in the `.debug_str_offsets` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugStrOffsetsBase<T = usize>(pub T);
/// An index into a set of entries in the `.debug_str_offsets` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugStrOffsetsIndex<T = usize>(pub T);
/// An offset into the `.debug_types` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub struct DebugTypesOffset<T = usize>(pub T);
/// A type signature as used in the `.debug_types` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct DebugTypeSignature(pub u64);
/// An offset into the `.debug_frame` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct DebugFrameOffset<T = usize>(pub T);
impl<T> From<T> for DebugFrameOffset<T> {
#[inline]
fn from(o: T) -> Self {
DebugFrameOffset(o)
}
}
/// An offset into the `.eh_frame` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct EhFrameOffset<T = usize>(pub T);
impl<T> From<T> for EhFrameOffset<T> {
#[inline]
fn from(o: T) -> Self {
EhFrameOffset(o)
}
}
/// An offset into the `.debug_info` or `.debug_types` sections.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub enum UnitSectionOffset<T = usize> {
/// An offset into the `.debug_info` section.
DebugInfoOffset(DebugInfoOffset<T>),
/// An offset into the `.debug_types` section.
DebugTypesOffset(DebugTypesOffset<T>),
}
impl<T> From<DebugInfoOffset<T>> for UnitSectionOffset<T> {
fn from(offset: DebugInfoOffset<T>) -> Self {
UnitSectionOffset::DebugInfoOffset(offset)
}
}
impl<T> From<DebugTypesOffset<T>> for UnitSectionOffset<T> {
fn from(offset: DebugTypesOffset<T>) -> Self {
UnitSectionOffset::DebugTypesOffset(offset)
}
}
impl<T> UnitSectionOffset<T>
where
T: Clone,
{
/// Returns the `DebugInfoOffset` inside, or `None` otherwise.
pub fn as_debug_info_offset(&self) -> Option<DebugInfoOffset<T>> {
match self {
UnitSectionOffset::DebugInfoOffset(offset) => Some(offset.clone()),
UnitSectionOffset::DebugTypesOffset(_) => None,
}
}
/// Returns the `DebugTypesOffset` inside, or `None` otherwise.
pub fn as_debug_types_offset(&self) -> Option<DebugTypesOffset<T>> {
match self {
UnitSectionOffset::DebugInfoOffset(_) => None,
UnitSectionOffset::DebugTypesOffset(offset) => Some(offset.clone()),
}
}
}
/// An identifier for a DWARF section.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub enum SectionId {
/// The `.debug_abbrev` section.
DebugAbbrev,
/// The `.debug_addr` section.
DebugAddr,
/// The `.debug_aranges` section.
DebugAranges,
/// The `.debug_cu_index` section.
DebugCuIndex,
/// The `.debug_frame` section.
DebugFrame,
/// The `.eh_frame` section.
EhFrame,
/// The `.eh_frame_hdr` section.
EhFrameHdr,
/// The `.debug_info` section.
DebugInfo,
/// The `.debug_line` section.
DebugLine,
/// The `.debug_line_str` section.
DebugLineStr,
/// The `.debug_loc` section.
DebugLoc,
/// The `.debug_loclists` section.
DebugLocLists,
/// The `.debug_macinfo` section.
DebugMacinfo,
/// The `.debug_macro` section.
DebugMacro,
/// The `.debug_pubnames` section.
DebugPubNames,
/// The `.debug_pubtypes` section.
DebugPubTypes,
/// The `.debug_ranges` section.
DebugRanges,
/// The `.debug_rnglists` section.
DebugRngLists,
/// The `.debug_str` section.
DebugStr,
/// The `.debug_str_offsets` section.
DebugStrOffsets,
/// The `.debug_tu_index` section.
DebugTuIndex,
/// The `.debug_types` section.
DebugTypes,
}
impl SectionId {
/// Returns the ELF section name for this kind.
pub fn name(self) -> &'static str {
match self {
SectionId::DebugAbbrev => ".debug_abbrev",
SectionId::DebugAddr => ".debug_addr",
SectionId::DebugAranges => ".debug_aranges",
SectionId::DebugCuIndex => ".debug_cu_index",
SectionId::DebugFrame => ".debug_frame",
SectionId::EhFrame => ".eh_frame",
SectionId::EhFrameHdr => ".eh_frame_hdr",
SectionId::DebugInfo => ".debug_info",
SectionId::DebugLine => ".debug_line",
SectionId::DebugLineStr => ".debug_line_str",
SectionId::DebugLoc => ".debug_loc",
SectionId::DebugLocLists => ".debug_loclists",
SectionId::DebugMacinfo => ".debug_macinfo",
SectionId::DebugMacro => ".debug_macro",
SectionId::DebugPubNames => ".debug_pubnames",
SectionId::DebugPubTypes => ".debug_pubtypes",
SectionId::DebugRanges => ".debug_ranges",
SectionId::DebugRngLists => ".debug_rnglists",
SectionId::DebugStr => ".debug_str",
SectionId::DebugStrOffsets => ".debug_str_offsets",
SectionId::DebugTuIndex => ".debug_tu_index",
SectionId::DebugTypes => ".debug_types",
}
}
/// Returns the ELF section name for this kind, when found in a .dwo or .dwp file.
pub fn dwo_name(self) -> Option<&'static str> {
Some(match self {
SectionId::DebugAbbrev => ".debug_abbrev.dwo",
SectionId::DebugCuIndex => ".debug_cu_index",
SectionId::DebugInfo => ".debug_info.dwo",
SectionId::DebugLine => ".debug_line.dwo",
// The debug_loc section can be present in the dwo when using the
// GNU split-dwarf extension to DWARF4.
SectionId::DebugLoc => ".debug_loc.dwo",
SectionId::DebugLocLists => ".debug_loclists.dwo",
SectionId::DebugMacinfo => ".debug_macinfo.dwo",
SectionId::DebugMacro => ".debug_macro.dwo",
SectionId::DebugRngLists => ".debug_rnglists.dwo",
SectionId::DebugStr => ".debug_str.dwo",
SectionId::DebugStrOffsets => ".debug_str_offsets.dwo",
SectionId::DebugTuIndex => ".debug_tu_index",
SectionId::DebugTypes => ".debug_types.dwo",
_ => return None,
})
}
/// Returns the XCOFF section name for this kind.
pub fn xcoff_name(self) -> Option<&'static str> {
Some(match self {
SectionId::DebugAbbrev => ".dwabrev",
SectionId::DebugAranges => ".dwarnge",
SectionId::DebugFrame => ".dwframe",
SectionId::DebugInfo => ".dwinfo",
SectionId::DebugLine => ".dwline",
SectionId::DebugLoc => ".dwloc",
SectionId::DebugMacinfo => ".dwmac",
SectionId::DebugPubNames => ".dwpbnms",
SectionId::DebugPubTypes => ".dwpbtyp",
SectionId::DebugRanges => ".dwrnges",
SectionId::DebugStr => ".dwstr",
_ => return None,
})
}
}
/// An optionally-provided implementation-defined compilation unit ID to enable
/// split DWARF and linking a split compilation unit back together.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct DwoId(pub u64);
/// The "type" of file with DWARF debugging information. This determines, among other things,
/// which files DWARF sections should be loaded from.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DwarfFileType {
/// A normal executable or object file.
Main,
/// A .dwo split DWARF file.
Dwo,
// TODO: Supplementary files, .dwps?
}
impl Default for DwarfFileType {
fn default() -> Self {
DwarfFileType::Main
}
}

1443
third_party/rust/gimli/src/constants.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

256
third_party/rust/gimli/src/endianity.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,256 @@
//! Types for compile-time and run-time endianity.
use core::convert::TryInto;
use core::fmt::Debug;
/// A trait describing the endianity of some buffer.
pub trait Endianity: Debug + Default + Clone + Copy + PartialEq + Eq {
/// Return true for big endian byte order.
fn is_big_endian(self) -> bool;
/// Return true for little endian byte order.
#[inline]
fn is_little_endian(self) -> bool {
!self.is_big_endian()
}
/// Reads an unsigned 16 bit integer from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 2`.
#[inline]
fn read_u16(self, buf: &[u8]) -> u16 {
let bytes: &[u8; 2] = buf[..2].try_into().unwrap();
if self.is_big_endian() {
u16::from_be_bytes(*bytes)
} else {
u16::from_le_bytes(*bytes)
}
}
/// Reads an unsigned 32 bit integer from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 4`.
#[inline]
fn read_u32(self, buf: &[u8]) -> u32 {
let bytes: &[u8; 4] = buf[..4].try_into().unwrap();
if self.is_big_endian() {
u32::from_be_bytes(*bytes)
} else {
u32::from_le_bytes(*bytes)
}
}
/// Reads an unsigned 64 bit integer from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 8`.
#[inline]
fn read_u64(self, buf: &[u8]) -> u64 {
let bytes: &[u8; 8] = buf[..8].try_into().unwrap();
if self.is_big_endian() {
u64::from_be_bytes(*bytes)
} else {
u64::from_le_bytes(*bytes)
}
}
/// Read an unsigned n-bytes integer u64.
///
/// # Panics
///
/// Panics when `buf.len() < 1` or `buf.len() > 8`.
#[inline]
fn read_uint(&mut self, buf: &[u8]) -> u64 {
let mut tmp = [0; 8];
if self.is_big_endian() {
tmp[8 - buf.len()..].copy_from_slice(buf);
} else {
tmp[..buf.len()].copy_from_slice(buf);
}
self.read_u64(&tmp)
}
/// Reads a signed 16 bit integer from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 2`.
#[inline]
fn read_i16(self, buf: &[u8]) -> i16 {
self.read_u16(buf) as i16
}
/// Reads a signed 32 bit integer from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 4`.
#[inline]
fn read_i32(self, buf: &[u8]) -> i32 {
self.read_u32(buf) as i32
}
/// Reads a signed 64 bit integer from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 8`.
#[inline]
fn read_i64(self, buf: &[u8]) -> i64 {
self.read_u64(buf) as i64
}
/// Reads a 32 bit floating point number from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 8`.
#[inline]
fn read_f32(self, buf: &[u8]) -> f32 {
f32::from_bits(self.read_u32(buf))
}
/// Reads a 32 bit floating point number from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 8`.
#[inline]
fn read_f64(self, buf: &[u8]) -> f64 {
f64::from_bits(self.read_u64(buf))
}
/// Writes an unsigned 16 bit integer `n` to `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 2`.
#[inline]
fn write_u16(self, buf: &mut [u8], n: u16) {
let bytes = if self.is_big_endian() {
n.to_be_bytes()
} else {
n.to_le_bytes()
};
buf[..2].copy_from_slice(&bytes);
}
/// Writes an unsigned 32 bit integer `n` to `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 4`.
#[inline]
fn write_u32(self, buf: &mut [u8], n: u32) {
let bytes = if self.is_big_endian() {
n.to_be_bytes()
} else {
n.to_le_bytes()
};
buf[..4].copy_from_slice(&bytes);
}
/// Writes an unsigned 64 bit integer `n` to `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 8`.
#[inline]
fn write_u64(self, buf: &mut [u8], n: u64) {
let bytes = if self.is_big_endian() {
n.to_be_bytes()
} else {
n.to_le_bytes()
};
buf[..8].copy_from_slice(&bytes);
}
}
/// Byte order that is selectable at runtime.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum RunTimeEndian {
/// Little endian byte order.
Little,
/// Big endian byte order.
Big,
}
impl Default for RunTimeEndian {
#[cfg(target_endian = "little")]
#[inline]
fn default() -> RunTimeEndian {
RunTimeEndian::Little
}
#[cfg(target_endian = "big")]
#[inline]
fn default() -> RunTimeEndian {
RunTimeEndian::Big
}
}
impl Endianity for RunTimeEndian {
#[inline]
fn is_big_endian(self) -> bool {
self != RunTimeEndian::Little
}
}
/// Little endian byte order.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct LittleEndian;
impl Default for LittleEndian {
#[inline]
fn default() -> LittleEndian {
LittleEndian
}
}
impl Endianity for LittleEndian {
#[inline]
fn is_big_endian(self) -> bool {
false
}
}
/// Big endian byte order.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct BigEndian;
impl Default for BigEndian {
#[inline]
fn default() -> BigEndian {
BigEndian
}
}
impl Endianity for BigEndian {
#[inline]
fn is_big_endian(self) -> bool {
true
}
}
/// The native endianity for the target platform.
#[cfg(target_endian = "little")]
pub type NativeEndian = LittleEndian;
#[cfg(target_endian = "little")]
#[allow(non_upper_case_globals)]
#[doc(hidden)]
pub const NativeEndian: LittleEndian = LittleEndian;
/// The native endianity for the target platform.
#[cfg(target_endian = "big")]
pub type NativeEndian = BigEndian;
#[cfg(target_endian = "big")]
#[allow(non_upper_case_globals)]
#[doc(hidden)]
pub const NativeEndian: BigEndian = BigEndian;

612
third_party/rust/gimli/src/leb128.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,612 @@
//! Read and write DWARF's "Little Endian Base 128" (LEB128) variable length
//! integer encoding.
//!
//! The implementation is a direct translation of the psuedocode in the DWARF 4
//! standard's appendix C.
//!
//! Read and write signed integers:
//!
//! ```
//! # #[cfg(all(feature = "read", feature = "write"))] {
//! use gimli::{EndianSlice, NativeEndian, leb128};
//!
//! let mut buf = [0; 1024];
//!
//! // Write to anything that implements `std::io::Write`.
//! {
//! let mut writable = &mut buf[..];
//! leb128::write::signed(&mut writable, -12345).expect("Should write number");
//! }
//!
//! // Read from anything that implements `gimli::Reader`.
//! let mut readable = EndianSlice::new(&buf[..], NativeEndian);
//! let val = leb128::read::signed(&mut readable).expect("Should read number");
//! assert_eq!(val, -12345);
//! # }
//! ```
//!
//! Or read and write unsigned integers:
//!
//! ```
//! # #[cfg(all(feature = "read", feature = "write"))] {
//! use gimli::{EndianSlice, NativeEndian, leb128};
//!
//! let mut buf = [0; 1024];
//!
//! {
//! let mut writable = &mut buf[..];
//! leb128::write::unsigned(&mut writable, 98765).expect("Should write number");
//! }
//!
//! let mut readable = EndianSlice::new(&buf[..], NativeEndian);
//! let val = leb128::read::unsigned(&mut readable).expect("Should read number");
//! assert_eq!(val, 98765);
//! # }
//! ```
const CONTINUATION_BIT: u8 = 1 << 7;
#[cfg(feature = "read-core")]
const SIGN_BIT: u8 = 1 << 6;
#[inline]
fn low_bits_of_byte(byte: u8) -> u8 {
byte & !CONTINUATION_BIT
}
#[inline]
#[allow(dead_code)]
fn low_bits_of_u64(val: u64) -> u8 {
let byte = val & u64::from(core::u8::MAX);
low_bits_of_byte(byte as u8)
}
/// A module for reading signed and unsigned integers that have been LEB128
/// encoded.
#[cfg(feature = "read-core")]
pub mod read {
use super::{low_bits_of_byte, CONTINUATION_BIT, SIGN_BIT};
use crate::read::{Error, Reader, Result};
/// Read bytes until the LEB128 continuation bit is not set.
pub fn skip<R: Reader>(r: &mut R) -> Result<()> {
loop {
let byte = r.read_u8()?;
if byte & CONTINUATION_BIT == 0 {
return Ok(());
}
}
}
/// Read an unsigned LEB128 number from the given `Reader` and
/// return it or an error if reading failed.
pub fn unsigned<R: Reader>(r: &mut R) -> Result<u64> {
let mut result = 0;
let mut shift = 0;
loop {
let byte = r.read_u8()?;
if shift == 63 && byte != 0x00 && byte != 0x01 {
return Err(Error::BadUnsignedLeb128);
}
let low_bits = u64::from(low_bits_of_byte(byte));
result |= low_bits << shift;
if byte & CONTINUATION_BIT == 0 {
return Ok(result);
}
shift += 7;
}
}
/// Read an LEB128 u16 from the given `Reader` and
/// return it or an error if reading failed.
pub fn u16<R: Reader>(r: &mut R) -> Result<u16> {
let byte = r.read_u8()?;
let mut result = u16::from(low_bits_of_byte(byte));
if byte & CONTINUATION_BIT == 0 {
return Ok(result);
}
let byte = r.read_u8()?;
result |= u16::from(low_bits_of_byte(byte)) << 7;
if byte & CONTINUATION_BIT == 0 {
return Ok(result);
}
let byte = r.read_u8()?;
if byte > 0x03 {
return Err(Error::BadUnsignedLeb128);
}
result += u16::from(byte) << 14;
Ok(result)
}
/// Read a signed LEB128 number from the given `Reader` and
/// return it or an error if reading failed.
pub fn signed<R: Reader>(r: &mut R) -> Result<i64> {
let mut result = 0;
let mut shift = 0;
let size = 64;
let mut byte;
loop {
byte = r.read_u8()?;
if shift == 63 && byte != 0x00 && byte != 0x7f {
return Err(Error::BadSignedLeb128);
}
let low_bits = i64::from(low_bits_of_byte(byte));
result |= low_bits << shift;
shift += 7;
if byte & CONTINUATION_BIT == 0 {
break;
}
}
if shift < size && (SIGN_BIT & byte) == SIGN_BIT {
// Sign extend the result.
result |= !0 << shift;
}
Ok(result)
}
}
/// A module for writing integers encoded as LEB128.
#[cfg(feature = "write")]
pub mod write {
use super::{low_bits_of_u64, CONTINUATION_BIT};
use std::io;
/// Write the given unsigned number using the LEB128 encoding to the given
/// `std::io::Write`able. Returns the number of bytes written to `w`, or an
/// error if writing failed.
pub fn unsigned<W>(w: &mut W, mut val: u64) -> Result<usize, io::Error>
where
W: io::Write,
{
let mut bytes_written = 0;
loop {
let mut byte = low_bits_of_u64(val);
val >>= 7;
if val != 0 {
// More bytes to come, so set the continuation bit.
byte |= CONTINUATION_BIT;
}
let buf = [byte];
w.write_all(&buf)?;
bytes_written += 1;
if val == 0 {
return Ok(bytes_written);
}
}
}
/// Return the size of the LEB128 encoding of the given unsigned number.
pub fn uleb128_size(mut val: u64) -> usize {
let mut size = 0;
loop {
val >>= 7;
size += 1;
if val == 0 {
return size;
}
}
}
/// Write the given signed number using the LEB128 encoding to the given
/// `std::io::Write`able. Returns the number of bytes written to `w`, or an
/// error if writing failed.
pub fn signed<W>(w: &mut W, mut val: i64) -> Result<usize, io::Error>
where
W: io::Write,
{
let mut bytes_written = 0;
loop {
let mut byte = val as u8;
// Keep the sign bit for testing
val >>= 6;
let done = val == 0 || val == -1;
if done {
byte &= !CONTINUATION_BIT;
} else {
// Remove the sign bit
val >>= 1;
// More bytes to come, so set the continuation bit.
byte |= CONTINUATION_BIT;
}
let buf = [byte];
w.write_all(&buf)?;
bytes_written += 1;
if done {
return Ok(bytes_written);
}
}
}
/// Return the size of the LEB128 encoding of the given signed number.
pub fn sleb128_size(mut val: i64) -> usize {
let mut size = 0;
loop {
val >>= 6;
let done = val == 0 || val == -1;
val >>= 1;
size += 1;
if done {
return size;
}
}
}
}
#[cfg(test)]
#[cfg(all(feature = "read", feature = "write"))]
mod tests {
use super::{low_bits_of_byte, low_bits_of_u64, read, write, CONTINUATION_BIT};
use crate::endianity::NativeEndian;
use crate::read::{EndianSlice, Error, ReaderOffsetId};
trait ResultExt {
fn map_eof(self, input: &[u8]) -> Self;
}
impl<T> ResultExt for Result<T, Error> {
fn map_eof(self, input: &[u8]) -> Self {
match self {
Err(Error::UnexpectedEof(id)) => {
let id = ReaderOffsetId(id.0 - input.as_ptr() as u64);
Err(Error::UnexpectedEof(id))
}
r => r,
}
}
}
#[test]
fn test_low_bits_of_byte() {
for i in 0..127 {
assert_eq!(i, low_bits_of_byte(i));
assert_eq!(i, low_bits_of_byte(i | CONTINUATION_BIT));
}
}
#[test]
fn test_low_bits_of_u64() {
for i in 0u64..127 {
assert_eq!(i as u8, low_bits_of_u64(1 << 16 | i));
assert_eq!(
i as u8,
low_bits_of_u64(i << 16 | i | (u64::from(CONTINUATION_BIT)))
);
}
}
// Examples from the DWARF 4 standard, section 7.6, figure 22.
#[test]
fn test_read_unsigned() {
let buf = [2u8];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
2,
read::unsigned(&mut readable).expect("Should read number")
);
let buf = [127u8];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
127,
read::unsigned(&mut readable).expect("Should read number")
);
let buf = [CONTINUATION_BIT, 1];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
128,
read::unsigned(&mut readable).expect("Should read number")
);
let buf = [1u8 | CONTINUATION_BIT, 1];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
129,
read::unsigned(&mut readable).expect("Should read number")
);
let buf = [2u8 | CONTINUATION_BIT, 1];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
130,
read::unsigned(&mut readable).expect("Should read number")
);
let buf = [57u8 | CONTINUATION_BIT, 100];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
12857,
read::unsigned(&mut readable).expect("Should read number")
);
}
// Examples from the DWARF 4 standard, section 7.6, figure 23.
#[test]
fn test_read_signed() {
let buf = [2u8];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(2, read::signed(&mut readable).expect("Should read number"));
let buf = [0x7eu8];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(-2, read::signed(&mut readable).expect("Should read number"));
let buf = [127u8 | CONTINUATION_BIT, 0];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
127,
read::signed(&mut readable).expect("Should read number")
);
let buf = [1u8 | CONTINUATION_BIT, 0x7f];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
-127,
read::signed(&mut readable).expect("Should read number")
);
let buf = [CONTINUATION_BIT, 1];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
128,
read::signed(&mut readable).expect("Should read number")
);
let buf = [CONTINUATION_BIT, 0x7f];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
-128,
read::signed(&mut readable).expect("Should read number")
);
let buf = [1u8 | CONTINUATION_BIT, 1];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
129,
read::signed(&mut readable).expect("Should read number")
);
let buf = [0x7fu8 | CONTINUATION_BIT, 0x7e];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
-129,
read::signed(&mut readable).expect("Should read number")
);
}
#[test]
fn test_read_signed_63_bits() {
let buf = [
CONTINUATION_BIT,
CONTINUATION_BIT,
CONTINUATION_BIT,
CONTINUATION_BIT,
CONTINUATION_BIT,
CONTINUATION_BIT,
CONTINUATION_BIT,
CONTINUATION_BIT,
0x40,
];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
-0x4000_0000_0000_0000,
read::signed(&mut readable).expect("Should read number")
);
}
#[test]
fn test_read_unsigned_not_enough_data() {
let buf = [CONTINUATION_BIT];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
read::unsigned(&mut readable).map_eof(&buf),
Err(Error::UnexpectedEof(ReaderOffsetId(1)))
);
}
#[test]
fn test_read_signed_not_enough_data() {
let buf = [CONTINUATION_BIT];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
read::signed(&mut readable).map_eof(&buf),
Err(Error::UnexpectedEof(ReaderOffsetId(1)))
);
}
#[test]
fn test_write_unsigned_not_enough_space() {
let mut buf = [0; 1];
let mut writable = &mut buf[..];
match write::unsigned(&mut writable, 128) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::WriteZero),
otherwise => panic!("Unexpected: {:?}", otherwise),
}
}
#[test]
fn test_write_signed_not_enough_space() {
let mut buf = [0; 1];
let mut writable = &mut buf[..];
match write::signed(&mut writable, 128) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::WriteZero),
otherwise => panic!("Unexpected: {:?}", otherwise),
}
}
#[test]
fn dogfood_signed() {
fn inner(i: i64) {
let mut buf = [0u8; 1024];
{
let mut writable = &mut buf[..];
write::signed(&mut writable, i).expect("Should write signed number");
}
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
let result = read::signed(&mut readable).expect("Should be able to read it back again");
assert_eq!(i, result);
}
for i in -513..513 {
inner(i);
}
inner(core::i64::MIN);
}
#[test]
fn dogfood_unsigned() {
for i in 0..1025 {
let mut buf = [0u8; 1024];
{
let mut writable = &mut buf[..];
write::unsigned(&mut writable, i).expect("Should write signed number");
}
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
let result =
read::unsigned(&mut readable).expect("Should be able to read it back again");
assert_eq!(i, result);
}
}
#[test]
fn test_read_unsigned_overflow() {
let buf = [
2u8 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
1,
];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert!(read::unsigned(&mut readable).is_err());
}
#[test]
fn test_read_signed_overflow() {
let buf = [
2u8 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
2 | CONTINUATION_BIT,
1,
];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert!(read::signed(&mut readable).is_err());
}
#[test]
fn test_read_multiple() {
let buf = [2u8 | CONTINUATION_BIT, 1u8, 1u8];
let mut readable = EndianSlice::new(&buf[..], NativeEndian);
assert_eq!(
read::unsigned(&mut readable).expect("Should read first number"),
130u64
);
assert_eq!(
read::unsigned(&mut readable).expect("Should read first number"),
1u64
);
}
#[test]
fn test_read_u16() {
for (buf, val) in [
(&[2][..], 2),
(&[0x7f][..], 0x7f),
(&[0x80, 1][..], 0x80),
(&[0x81, 1][..], 0x81),
(&[0x82, 1][..], 0x82),
(&[0xff, 0x7f][..], 0x3fff),
(&[0x80, 0x80, 1][..], 0x4000),
(&[0xff, 0xff, 1][..], 0x7fff),
(&[0xff, 0xff, 3][..], 0xffff),
]
.iter()
{
let mut readable = EndianSlice::new(buf, NativeEndian);
assert_eq!(*val, read::u16(&mut readable).expect("Should read number"));
}
for buf in [
&[0x80][..],
&[0x80, 0x80][..],
&[0x80, 0x80, 4][..],
&[0x80, 0x80, 0x80, 3][..],
]
.iter()
{
let mut readable = EndianSlice::new(buf, NativeEndian);
assert!(read::u16(&mut readable).is_err(), "{:?}", buf);
}
}
}

79
third_party/rust/gimli/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,79 @@
//! `gimli` is a library for reading and writing the
//! [DWARF debugging format](https://dwarfstd.org/).
//!
//! See the [read](./read/index.html) and [write](./write/index.html) modules
//! for examples and API documentation.
//!
//! ## Cargo Features
//!
//! Cargo features that can be enabled with `gimli`:
//!
//! * `std`: Enabled by default. Use the `std` library. Disabling this feature
//! allows using `gimli` in embedded environments that do not have access to
//! `std`. Note that even when `std` is disabled, `gimli` still requires an
//! implementation of the `alloc` crate.
//!
//! * `read`: Enabled by default. Enables the `read` module. Use of `std` is
//! optional.
//!
//! * `write`: Enabled by default. Enables the `write` module. Always uses
//! the `std` library.
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
// Selectively enable rust 2018 warnings
#![warn(bare_trait_objects)]
#![warn(unused_extern_crates)]
#![warn(ellipsis_inclusive_range_patterns)]
#![warn(elided_lifetimes_in_paths)]
#![warn(explicit_outlives_requirements)]
// Style.
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::collapsible_else_if)]
#![allow(clippy::comparison_chain)]
#![allow(clippy::manual_range_contains)]
#![allow(clippy::needless_late_init)]
#![allow(clippy::too_many_arguments)]
// False positives with `fallible_iterator`.
#![allow(clippy::should_implement_trait)]
// False positives.
#![allow(clippy::derive_partial_eq_without_eq)]
#![no_std]
#[allow(unused_imports)]
#[cfg(any(feature = "read", feature = "write"))]
#[macro_use]
extern crate alloc;
#[cfg(any(feature = "std", feature = "write"))]
#[macro_use]
extern crate std;
#[cfg(feature = "endian-reader")]
pub use stable_deref_trait::{CloneStableDeref, StableDeref};
mod common;
pub use crate::common::*;
mod arch;
pub use crate::arch::*;
pub mod constants;
// For backwards compat.
pub use crate::constants::*;
mod endianity;
pub use crate::endianity::*;
pub mod leb128;
#[cfg(feature = "read-core")]
pub mod read;
// For backwards compat.
#[cfg(feature = "read-core")]
pub use crate::read::*;
#[cfg(feature = "write")]
pub mod write;
#[cfg(test)]
mod test_util;

1092
third_party/rust/gimli/src/read/abbrev.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

118
third_party/rust/gimli/src/read/addr.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,118 @@
use crate::common::{DebugAddrBase, DebugAddrIndex, SectionId};
use crate::read::{Reader, ReaderOffset, Result, Section};
/// The raw contents of the `.debug_addr` section.
#[derive(Debug, Default, Clone, Copy)]
pub struct DebugAddr<R> {
section: R,
}
impl<R: Reader> DebugAddr<R> {
// TODO: add an iterator over the sets of addresses in the section.
// This is not needed for common usage of the section though.
/// Returns the address at the given `base` and `index`.
///
/// A set of addresses in the `.debug_addr` section consists of a header
/// followed by a series of addresses.
///
/// The `base` must be the `DW_AT_addr_base` value from the compilation unit DIE.
/// This is an offset that points to the first address following the header.
///
/// The `index` is the value of a `DW_FORM_addrx` attribute.
///
/// The `address_size` must be the size of the address for the compilation unit.
/// This value must also match the header. However, note that we do not parse the
/// header to validate this, since locating the header is unreliable, and the GNU
/// extensions do not emit it.
pub fn get_address(
&self,
address_size: u8,
base: DebugAddrBase<R::Offset>,
index: DebugAddrIndex<R::Offset>,
) -> Result<u64> {
let input = &mut self.section.clone();
input.skip(base.0)?;
input.skip(R::Offset::from_u64(
index.0.into_u64() * u64::from(address_size),
)?)?;
input.read_address(address_size)
}
}
impl<T> DebugAddr<T> {
/// Create a `DebugAddr` section that references the data in `self`.
///
/// This is useful when `R` implements `Reader` but `T` does not.
///
/// Used by `DwarfSections::borrow`.
pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugAddr<R>
where
F: FnMut(&'a T) -> R,
{
borrow(&self.section).into()
}
}
impl<R> Section<R> for DebugAddr<R> {
fn id() -> SectionId {
SectionId::DebugAddr
}
fn reader(&self) -> &R {
&self.section
}
}
impl<R> From<R> for DebugAddr<R> {
fn from(section: R) -> Self {
DebugAddr { section }
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::read::EndianSlice;
use crate::test_util::GimliSectionMethods;
use crate::{Format, LittleEndian};
use test_assembler::{Endian, Label, LabelMaker, Section};
#[test]
fn test_get_address() {
for format in [Format::Dwarf32, Format::Dwarf64] {
for address_size in [4, 8] {
let zero = Label::new();
let length = Label::new();
let start = Label::new();
let first = Label::new();
let end = Label::new();
let mut section = Section::with_endian(Endian::Little)
.mark(&zero)
.initial_length(format, &length, &start)
.D16(5)
.D8(address_size)
.D8(0)
.mark(&first);
for i in 0..20 {
section = section.word(address_size, 1000 + i);
}
section = section.mark(&end);
length.set_const((&end - &start) as u64);
let section = section.get_contents().unwrap();
let debug_addr = DebugAddr::from(EndianSlice::new(&section, LittleEndian));
let base = DebugAddrBase((&first - &zero) as usize);
assert_eq!(
debug_addr.get_address(address_size, base, DebugAddrIndex(0)),
Ok(1000)
);
assert_eq!(
debug_addr.get_address(address_size, base, DebugAddrIndex(19)),
Ok(1019)
);
}
}
}
}

650
third_party/rust/gimli/src/read/aranges.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,650 @@
use crate::common::{DebugArangesOffset, DebugInfoOffset, Encoding, SectionId};
use crate::endianity::Endianity;
use crate::read::{EndianSlice, Error, Range, Reader, ReaderOffset, Result, Section};
/// The `DebugAranges` struct represents the DWARF address range information
/// found in the `.debug_aranges` section.
#[derive(Debug, Default, Clone, Copy)]
pub struct DebugAranges<R> {
section: R,
}
impl<'input, Endian> DebugAranges<EndianSlice<'input, Endian>>
where
Endian: Endianity,
{
/// Construct a new `DebugAranges` instance from the data in the `.debug_aranges`
/// section.
///
/// It is the caller's responsibility to read the `.debug_aranges` section and
/// present it as a `&[u8]` slice. That means using some ELF loader on
/// Linux, a Mach-O loader on macOS, etc.
///
/// ```
/// use gimli::{DebugAranges, LittleEndian};
///
/// # let buf = [];
/// # let read_debug_aranges_section = || &buf;
/// let debug_aranges =
/// DebugAranges::new(read_debug_aranges_section(), LittleEndian);
/// ```
pub fn new(section: &'input [u8], endian: Endian) -> Self {
DebugAranges {
section: EndianSlice::new(section, endian),
}
}
}
impl<R: Reader> DebugAranges<R> {
/// Iterate the sets of entries in the `.debug_aranges` section.
///
/// Each set of entries belongs to a single unit.
pub fn headers(&self) -> ArangeHeaderIter<R> {
ArangeHeaderIter {
input: self.section.clone(),
offset: DebugArangesOffset(R::Offset::from_u8(0)),
}
}
/// Get the header at the given offset.
pub fn header(&self, offset: DebugArangesOffset<R::Offset>) -> Result<ArangeHeader<R>> {
let mut input = self.section.clone();
input.skip(offset.0)?;
ArangeHeader::parse(&mut input, offset)
}
}
impl<T> DebugAranges<T> {
/// Create a `DebugAranges` section that references the data in `self`.
///
/// This is useful when `R` implements `Reader` but `T` does not.
///
/// Used by `DwarfSections::borrow`.
pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugAranges<R>
where
F: FnMut(&'a T) -> R,
{
borrow(&self.section).into()
}
}
impl<R> Section<R> for DebugAranges<R> {
fn id() -> SectionId {
SectionId::DebugAranges
}
fn reader(&self) -> &R {
&self.section
}
}
impl<R> From<R> for DebugAranges<R> {
fn from(section: R) -> Self {
DebugAranges { section }
}
}
/// An iterator over the headers of a `.debug_aranges` section.
#[derive(Clone, Debug)]
pub struct ArangeHeaderIter<R: Reader> {
input: R,
offset: DebugArangesOffset<R::Offset>,
}
impl<R: Reader> ArangeHeaderIter<R> {
/// Advance the iterator to the next header.
pub fn next(&mut self) -> Result<Option<ArangeHeader<R>>> {
if self.input.is_empty() {
return Ok(None);
}
let len = self.input.len();
match ArangeHeader::parse(&mut self.input, self.offset) {
Ok(header) => {
self.offset.0 += len - self.input.len();
Ok(Some(header))
}
Err(e) => {
self.input.empty();
Err(e)
}
}
}
}
#[cfg(feature = "fallible-iterator")]
impl<R: Reader> fallible_iterator::FallibleIterator for ArangeHeaderIter<R> {
type Item = ArangeHeader<R>;
type Error = Error;
fn next(&mut self) -> ::core::result::Result<Option<Self::Item>, Self::Error> {
ArangeHeaderIter::next(self)
}
}
/// A header for a set of entries in the `.debug_arange` section.
///
/// These entries all belong to a single unit.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ArangeHeader<R, Offset = <R as Reader>::Offset>
where
R: Reader<Offset = Offset>,
Offset: ReaderOffset,
{
offset: DebugArangesOffset<Offset>,
encoding: Encoding,
length: Offset,
debug_info_offset: DebugInfoOffset<Offset>,
segment_size: u8,
entries: R,
}
impl<R, Offset> ArangeHeader<R, Offset>
where
R: Reader<Offset = Offset>,
Offset: ReaderOffset,
{
fn parse(input: &mut R, offset: DebugArangesOffset<Offset>) -> Result<Self> {
let (length, format) = input.read_initial_length()?;
let mut rest = input.split(length)?;
// Check the version. The DWARF 5 spec says that this is always 2, but version 3
// has been observed in the wild, potentially due to a bug; see
// https://github.com/gimli-rs/gimli/issues/559 for more information.
// lldb allows versions 2 through 5, possibly by mistake.
let version = rest.read_u16()?;
if version != 2 && version != 3 {
return Err(Error::UnknownVersion(u64::from(version)));
}
let debug_info_offset = rest.read_offset(format).map(DebugInfoOffset)?;
let address_size = rest.read_u8()?;
let segment_size = rest.read_u8()?;
// unit_length + version + offset + address_size + segment_size
let header_length = format.initial_length_size() + 2 + format.word_size() + 1 + 1;
// The first tuple following the header in each set begins at an offset that is
// a multiple of the size of a single tuple (that is, the size of a segment selector
// plus twice the size of an address).
let tuple_length = address_size
.checked_mul(2)
.and_then(|x| x.checked_add(segment_size))
.ok_or(Error::InvalidAddressRange)?;
if tuple_length == 0 {
return Err(Error::InvalidAddressRange);
}
let padding = if header_length % tuple_length == 0 {
0
} else {
tuple_length - header_length % tuple_length
};
rest.skip(R::Offset::from_u8(padding))?;
let encoding = Encoding {
format,
version,
address_size,
// TODO: segment_size
};
Ok(ArangeHeader {
offset,
encoding,
length,
debug_info_offset,
segment_size,
entries: rest,
})
}
/// Return the offset of this header within the `.debug_aranges` section.
#[inline]
pub fn offset(&self) -> DebugArangesOffset<Offset> {
self.offset
}
/// Return the length of this set of entries, including the header.
#[inline]
pub fn length(&self) -> Offset {
self.length
}
/// Return the encoding parameters for this set of entries.
#[inline]
pub fn encoding(&self) -> Encoding {
self.encoding
}
/// Return the segment size for this set of entries.
#[inline]
pub fn segment_size(&self) -> u8 {
self.segment_size
}
/// Return the offset into the .debug_info section for this set of arange entries.
#[inline]
pub fn debug_info_offset(&self) -> DebugInfoOffset<Offset> {
self.debug_info_offset
}
/// Return the arange entries in this set.
#[inline]
pub fn entries(&self) -> ArangeEntryIter<R> {
ArangeEntryIter {
input: self.entries.clone(),
encoding: self.encoding,
segment_size: self.segment_size,
}
}
}
/// An iterator over the aranges from a `.debug_aranges` section.
///
/// Can be [used with
/// `FallibleIterator`](./index.html#using-with-fallibleiterator).
#[derive(Debug, Clone)]
pub struct ArangeEntryIter<R: Reader> {
input: R,
encoding: Encoding,
segment_size: u8,
}
impl<R: Reader> ArangeEntryIter<R> {
/// Advance the iterator and return the next arange.
///
/// Returns the newly parsed arange as `Ok(Some(arange))`. Returns `Ok(None)`
/// when iteration is complete and all aranges have already been parsed and
/// yielded. If an error occurs while parsing the next arange, then this error
/// is returned as `Err(e)`, and all subsequent calls return `Ok(None)`.
pub fn next(&mut self) -> Result<Option<ArangeEntry>> {
if self.input.is_empty() {
return Ok(None);
}
match ArangeEntry::parse(&mut self.input, self.encoding, self.segment_size) {
Ok(Some(entry)) => Ok(Some(entry)),
Ok(None) => {
self.input.empty();
Ok(None)
}
Err(e) => {
self.input.empty();
Err(e)
}
}
}
}
#[cfg(feature = "fallible-iterator")]
impl<R: Reader> fallible_iterator::FallibleIterator for ArangeEntryIter<R> {
type Item = ArangeEntry;
type Error = Error;
fn next(&mut self) -> ::core::result::Result<Option<Self::Item>, Self::Error> {
ArangeEntryIter::next(self)
}
}
/// A single parsed arange.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct ArangeEntry {
segment: Option<u64>,
address: u64,
length: u64,
}
impl ArangeEntry {
/// Parse a single arange. Return `None` for the null arange, `Some` for an actual arange.
fn parse<R: Reader>(
input: &mut R,
encoding: Encoding,
segment_size: u8,
) -> Result<Option<Self>> {
let address_size = encoding.address_size;
let tuple_length = R::Offset::from_u8(2 * address_size + segment_size);
if tuple_length > input.len() {
input.empty();
return Ok(None);
}
let segment = if segment_size != 0 {
input.read_address(segment_size)?
} else {
0
};
let address = input.read_address(address_size)?;
let length = input.read_address(address_size)?;
match (segment, address, length) {
// This is meant to be a null terminator, but in practice it can occur
// before the end, possibly due to a linker omitting a function and
// leaving an unrelocated entry.
(0, 0, 0) => Self::parse(input, encoding, segment_size),
_ => Ok(Some(ArangeEntry {
segment: if segment_size != 0 {
Some(segment)
} else {
None
},
address,
length,
})),
}
}
/// Return the segment selector of this arange.
#[inline]
pub fn segment(&self) -> Option<u64> {
self.segment
}
/// Return the beginning address of this arange.
#[inline]
pub fn address(&self) -> u64 {
self.address
}
/// Return the length of this arange.
#[inline]
pub fn length(&self) -> u64 {
self.length
}
/// Return the range.
#[inline]
pub fn range(&self) -> Range {
Range {
begin: self.address,
end: self.address.wrapping_add(self.length),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::{DebugInfoOffset, Format};
use crate::endianity::LittleEndian;
use crate::read::EndianSlice;
#[test]
fn test_iterate_headers() {
#[rustfmt::skip]
let buf = [
// 32-bit length = 28.
0x1c, 0x00, 0x00, 0x00,
// Version.
0x02, 0x00,
// Offset.
0x01, 0x02, 0x03, 0x04,
// Address size.
0x04,
// Segment size.
0x00,
// Dummy padding and arange tuples.
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 32-bit length = 36.
0x24, 0x00, 0x00, 0x00,
// Version.
0x02, 0x00,
// Offset.
0x11, 0x12, 0x13, 0x14,
// Address size.
0x04,
// Segment size.
0x00,
// Dummy padding and arange tuples.
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
let debug_aranges = DebugAranges::new(&buf, LittleEndian);
let mut headers = debug_aranges.headers();
let header = headers
.next()
.expect("should parse header ok")
.expect("should have a header");
assert_eq!(header.offset(), DebugArangesOffset(0));
assert_eq!(header.debug_info_offset(), DebugInfoOffset(0x0403_0201));
let header = headers
.next()
.expect("should parse header ok")
.expect("should have a header");
assert_eq!(header.offset(), DebugArangesOffset(0x20));
assert_eq!(header.debug_info_offset(), DebugInfoOffset(0x1413_1211));
}
#[test]
fn test_parse_header_ok() {
#[rustfmt::skip]
let buf = [
// 32-bit length = 32.
0x20, 0x00, 0x00, 0x00,
// Version.
0x02, 0x00,
// Offset.
0x01, 0x02, 0x03, 0x04,
// Address size.
0x08,
// Segment size.
0x04,
// Length to here = 12, tuple length = 20.
// Padding to tuple length multiple = 4.
0x10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
// Dummy arange tuple data.
0x20, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
// Dummy next arange.
0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
];
let rest = &mut EndianSlice::new(&buf, LittleEndian);
let header =
ArangeHeader::parse(rest, DebugArangesOffset(0x10)).expect("should parse header ok");
assert_eq!(
*rest,
EndianSlice::new(&buf[buf.len() - 16..], LittleEndian)
);
assert_eq!(
header,
ArangeHeader {
offset: DebugArangesOffset(0x10),
encoding: Encoding {
format: Format::Dwarf32,
version: 2,
address_size: 8,
},
length: 0x20,
debug_info_offset: DebugInfoOffset(0x0403_0201),
segment_size: 4,
entries: EndianSlice::new(&buf[buf.len() - 32..buf.len() - 16], LittleEndian),
}
);
}
#[test]
fn test_parse_header_overflow_error() {
#[rustfmt::skip]
let buf = [
// 32-bit length = 32.
0x20, 0x00, 0x00, 0x00,
// Version.
0x02, 0x00,
// Offset.
0x01, 0x02, 0x03, 0x04,
// Address size.
0xff,
// Segment size.
0xff,
// Length to here = 12, tuple length = 20.
// Padding to tuple length multiple = 4.
0x10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
// Dummy arange tuple data.
0x20, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
// Dummy next arange.
0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
];
let rest = &mut EndianSlice::new(&buf, LittleEndian);
let error = ArangeHeader::parse(rest, DebugArangesOffset(0x10))
.expect_err("should fail to parse header");
assert_eq!(error, Error::InvalidAddressRange);
}
#[test]
fn test_parse_header_div_by_zero_error() {
#[rustfmt::skip]
let buf = [
// 32-bit length = 32.
0x20, 0x00, 0x00, 0x00,
// Version.
0x02, 0x00,
// Offset.
0x01, 0x02, 0x03, 0x04,
// Address size = 0. Could cause a division by zero if we aren't
// careful.
0x00,
// Segment size.
0x00,
// Length to here = 12, tuple length = 20.
// Padding to tuple length multiple = 4.
0x10, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
// Dummy arange tuple data.
0x20, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
// Dummy next arange.
0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
];
let rest = &mut EndianSlice::new(&buf, LittleEndian);
let error = ArangeHeader::parse(rest, DebugArangesOffset(0x10))
.expect_err("should fail to parse header");
assert_eq!(error, Error::InvalidAddressRange);
}
#[test]
fn test_parse_entry_ok() {
let encoding = Encoding {
format: Format::Dwarf32,
version: 2,
address_size: 4,
};
let segment_size = 0;
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09];
let rest = &mut EndianSlice::new(&buf, LittleEndian);
let entry =
ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok");
assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian));
assert_eq!(
entry,
Some(ArangeEntry {
segment: None,
address: 0x0403_0201,
length: 0x0807_0605,
})
);
}
#[test]
fn test_parse_entry_segment() {
let encoding = Encoding {
format: Format::Dwarf32,
version: 2,
address_size: 4,
};
let segment_size = 8;
#[rustfmt::skip]
let buf = [
// Segment.
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
// Address.
0x01, 0x02, 0x03, 0x04,
// Length.
0x05, 0x06, 0x07, 0x08,
// Next tuple.
0x09
];
let rest = &mut EndianSlice::new(&buf, LittleEndian);
let entry =
ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok");
assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian));
assert_eq!(
entry,
Some(ArangeEntry {
segment: Some(0x1817_1615_1413_1211),
address: 0x0403_0201,
length: 0x0807_0605,
})
);
}
#[test]
fn test_parse_entry_zero() {
let encoding = Encoding {
format: Format::Dwarf32,
version: 2,
address_size: 4,
};
let segment_size = 0;
#[rustfmt::skip]
let buf = [
// Zero tuple.
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// Address.
0x01, 0x02, 0x03, 0x04,
// Length.
0x05, 0x06, 0x07, 0x08,
// Next tuple.
0x09
];
let rest = &mut EndianSlice::new(&buf, LittleEndian);
let entry =
ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok");
assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian));
assert_eq!(
entry,
Some(ArangeEntry {
segment: None,
address: 0x0403_0201,
length: 0x0807_0605,
})
);
}
}

7921
third_party/rust/gimli/src/read/cfi.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1681
third_party/rust/gimli/src/read/dwarf.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше