Merge autoland to mozilla-central. a=merge

This commit is contained in:
Sandor Molnar 2021-09-16 18:58:41 +03:00
Родитель c60bc752d8 b6521a7e56
Коммит 7398f479b5
120 изменённых файлов: 18115 добавлений и 435 удалений

51
Cargo.lock сгенерированный
Просмотреть файл

@ -76,6 +76,15 @@ dependencies = [
"serde",
]
[[package]]
name = "arbitrary"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "237430fd6ed3740afe94eefcc278ae21e050285be882804e0d6e8695f0c94691"
dependencies = [
"derive_arbitrary",
]
[[package]]
name = "arrayref"
version = "0.3.6"
@ -1155,6 +1164,17 @@ dependencies = [
"wio",
]
[[package]]
name = "derive_arbitrary"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f1281ee141df08871db9fe261ab5312179eac32d1e314134ceaa8dd7c042f5a"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "derive_common"
version = "0.0.1"
@ -2066,6 +2086,15 @@ dependencies = [
"cc",
]
[[package]]
name = "gluesmith"
version = "0.1.0"
dependencies = [
"arbitrary",
"libc",
"wasm-smith",
]
[[package]]
name = "goblin"
version = "0.1.3"
@ -2547,6 +2576,7 @@ dependencies = [
"baldrdash",
"encoding_c",
"encoding_c_mem",
"gluesmith",
"mozglue-static",
"mozilla-central-workspace-hack",
"smoosh",
@ -5409,6 +5439,27 @@ name = "wasm-bindgen"
version = "0.2.100"
source = "git+https://github.com/kvark/dummy-web#5731e569d865a1ebaf116f48dad781f355a99243"
[[package]]
name = "wasm-encoder"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2caacc74c68c74f0008c4055cdf509c43e623775eaf73323bb818dcf666ed9bd"
dependencies = [
"leb128",
]
[[package]]
name = "wasm-smith"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b05fb86fe9042112d9659e3f56736b8ad4122023e568e5c55184677c84f66c9"
dependencies = [
"arbitrary",
"indexmap",
"leb128",
"wasm-encoder",
]
[[package]]
name = "wasmparser"
version = "0.78.2"

Просмотреть файл

@ -1782,6 +1782,9 @@ pref("browser.contentblocking.state-partitioning.mvp.ui.enabled", true);
// Restrict relaxing default referrer policy:
// "rp": Restrict relaxing default referrer policy enabled
// "-rp": Restrict relaxing default referrer policy disabled
// OCSP cache partitioning:
// "ocsp": OCSP cache partitioning enabled
// "-ocsp": OCSP cache partitioning disabled
// Cookie behavior:
// "cookieBehavior0": cookie behaviour BEHAVIOR_ACCEPT
// "cookieBehavior1": cookie behaviour BEHAVIOR_REJECT_FOREIGN
@ -1797,7 +1800,7 @@ pref("browser.contentblocking.state-partitioning.mvp.ui.enabled", true);
// "cookieBehaviorPBM4": cookie behaviour BEHAVIOR_REJECT_TRACKER
// "cookieBehaviorPBM5": cookie behaviour BEHAVIOR_REJECT_TRACKER_AND_PARTITION_FOREIGN
// One value from each section must be included in the browser.contentblocking.features.strict pref.
pref("browser.contentblocking.features.strict", "tp,tpPrivate,cookieBehavior5,cookieBehaviorPBM5,cm,fp,stp,lvl2,rp");
pref("browser.contentblocking.features.strict", "tp,tpPrivate,cookieBehavior5,cookieBehaviorPBM5,cm,fp,stp,lvl2,rp,ocsp");
// Hide the "Change Block List" link for trackers/tracking content in the custom
// Content Blocking/ETP panel. By default, it will not be visible. There is also

Просмотреть файл

@ -1321,6 +1321,10 @@ BrowserGlue.prototype = {
"network.http.referer.disallowCrossSiteRelaxingDefault",
this._matchCBCategory
);
Services.prefs.removeObserver(
"privacy.partition.network_state.ocsp_cache",
this._matchCBCategory
);
Services.prefs.removeObserver(
ContentBlockingCategoriesPrefs.PREF_CB_CATEGORY,
this._updateCBCategory
@ -1766,6 +1770,10 @@ BrowserGlue.prototype = {
"network.http.referer.disallowCrossSiteRelaxingDefault",
this._matchCBCategory
);
Services.prefs.addObserver(
"privacy.partition.network_state.ocsp_cache",
this._matchCBCategory
);
Services.prefs.addObserver(
ContentBlockingCategoriesPrefs.PREF_CB_CATEGORY,
this._updateCBCategory
@ -4370,6 +4378,7 @@ var ContentBlockingCategoriesPrefs = {
"privacy.trackingprotection.cryptomining.enabled": null,
"privacy.annotate_channels.strict_list.enabled": null,
"network.http.referer.disallowCrossSiteRelaxingDefault": null,
"privacy.partition.network_state.ocsp_cache": null,
},
standard: {
"network.cookie.cookieBehavior": null,
@ -4381,6 +4390,7 @@ var ContentBlockingCategoriesPrefs = {
"privacy.trackingprotection.cryptomining.enabled": null,
"privacy.annotate_channels.strict_list.enabled": null,
"network.http.referer.disallowCrossSiteRelaxingDefault": null,
"privacy.partition.network_state.ocsp_cache": null,
},
};
let type = "strict";
@ -4459,6 +4469,16 @@ var ContentBlockingCategoriesPrefs = {
"network.http.referer.disallowCrossSiteRelaxingDefault"
] = false;
break;
case "ocsp":
this.CATEGORY_PREFS[type][
"privacy.partition.network_state.ocsp_cache"
] = true;
break;
case "-ocsp":
this.CATEGORY_PREFS[type][
"privacy.partition.network_state.ocsp_cache"
] = false;
break;
case "cookieBehavior0":
this.CATEGORY_PREFS[type]["network.cookie.cookieBehavior"] =
Ci.nsICookieService.BEHAVIOR_ACCEPT;

Просмотреть файл

@ -16,6 +16,7 @@ const STP_PREF = "privacy.trackingprotection.socialtracking.enabled";
const CM_PREF = "privacy.trackingprotection.cryptomining.enabled";
const LEVEL2_PREF = "privacy.annotate_channels.strict_list.enabled";
const REFERRER_PREF = "network.http.referer.disallowCrossSiteRelaxingDefault";
const OCSP_PREF = "privacy.partition.network_state.ocsp_cache";
const PREF_TEST_NOTIFICATIONS =
"browser.safebrowsing.test-notifications.enabled";
const STRICT_PREF = "browser.contentblocking.features.strict";
@ -317,6 +318,7 @@ add_task(async function testContentBlockingStandardCategory() {
[CM_PREF]: null,
[LEVEL2_PREF]: null,
[REFERRER_PREF]: null,
[OCSP_PREF]: null,
};
for (let pref in prefs) {
@ -357,6 +359,7 @@ add_task(async function testContentBlockingStandardCategory() {
REFERRER_PREF,
!Services.prefs.getBoolPref(REFERRER_PREF)
);
Services.prefs.setBoolPref(OCSP_PREF, !Services.prefs.getBoolPref(OCSP_PREF));
for (let pref in prefs) {
switch (Services.prefs.getPrefType(pref)) {
@ -419,6 +422,7 @@ add_task(async function testContentBlockingStrictCategory() {
Services.prefs.setBoolPref(TP_PBM_PREF, false);
Services.prefs.setBoolPref(LEVEL2_PREF, false);
Services.prefs.setBoolPref(REFERRER_PREF, false);
Services.prefs.setBoolPref(OCSP_PREF, false);
Services.prefs.setIntPref(
NCB_PREF,
Ci.nsICookieService.BEHAVIOR_LIMIT_FOREIGN
@ -541,6 +545,20 @@ add_task(async function testContentBlockingStrictCategory() {
`${REFERRER_PREF} has been set to false`
);
break;
case "ocsp":
is(
Services.prefs.getBoolPref(OCSP_PREF),
true,
`${OCSP_PREF} has been set to true`
);
break;
case "-ocsp":
is(
Services.prefs.getBoolPref(OCSP_PREF),
false,
`${OCSP_PREF} has been set to false`
);
break;
case "cookieBehavior0":
is(
Services.prefs.getIntPref(NCB_PREF),
@ -645,6 +663,7 @@ add_task(async function testContentBlockingCustomCategory() {
STP_PREF,
CM_PREF,
REFERRER_PREF,
OCSP_PREF,
];
await openPreferencesViaOpenPreferencesAPI("privacy", { leaveOpen: true });
@ -692,6 +711,7 @@ add_task(async function testContentBlockingCustomCategory() {
TP_PREF,
TP_PBM_PREF,
REFERRER_PREF,
OCSP_PREF,
]) {
Services.prefs.setBoolPref(pref, !Services.prefs.getBoolPref(pref));
await TestUtils.waitForCondition(

Просмотреть файл

@ -19,6 +19,7 @@ const CM_PREF = "privacy.trackingprotection.cryptomining.enabled";
const STP_PREF = "privacy.trackingprotection.socialtracking.enabled";
const LEVEL2_PREF = "privacy.annotate_channels.strict_list.enabled";
const REFERRER_PREF = "network.http.referer.disallowCrossSiteRelaxingDefault";
const OCSP_PREF = "privacy.partition.network_state.ocsp_cache";
const STRICT_DEF_PREF = "browser.contentblocking.features.strict";
// Tests that the content blocking standard category definition is based on the default settings of
@ -69,6 +70,10 @@ add_task(async function testContentBlockingStandardDefinition() {
!Services.prefs.prefHasUserValue(REFERRER_PREF),
`${REFERRER_PREF} pref has the default value`
);
ok(
!Services.prefs.prefHasUserValue(OCSP_PREF),
`${OCSP_PREF} pref has the default value`
);
let defaults = Services.prefs.getDefaultBranch("");
let originalTP = defaults.getBoolPref(TP_PREF);
@ -80,6 +85,7 @@ add_task(async function testContentBlockingStandardDefinition() {
let originalNCBP = defaults.getIntPref(NCBP_PREF);
let originalLEVEL2 = defaults.getBoolPref(LEVEL2_PREF);
let originalREFERRER = defaults.getBoolPref(REFERRER_PREF);
let originalOCSP = defaults.getBoolPref(OCSP_PREF);
let nonDefaultNCB;
switch (originalNCB) {
@ -109,6 +115,7 @@ add_task(async function testContentBlockingStandardDefinition() {
defaults.setIntPref(NCB_PREF, !originalNCB);
defaults.setBoolPref(LEVEL2_PREF, !originalLEVEL2);
defaults.setBoolPref(REFERRER_PREF, !originalREFERRER);
defaults.setBoolPref(OCSP_PREF, !originalOCSP);
ok(
!Services.prefs.prefHasUserValue(TP_PREF),
@ -146,6 +153,10 @@ add_task(async function testContentBlockingStandardDefinition() {
!Services.prefs.prefHasUserValue(REFERRER_PREF),
`${REFERRER_PREF} pref has the default value`
);
ok(
!Services.prefs.prefHasUserValue(OCSP_PREF),
`${OCSP_PREF} pref has the default value`
);
// cleanup
defaults.setIntPref(NCB_PREF, originalNCB);
@ -158,6 +169,7 @@ add_task(async function testContentBlockingStandardDefinition() {
defaults.setIntPref(NCBP_PREF, originalNCBP);
defaults.setBoolPref(LEVEL2_PREF, originalLEVEL2);
defaults.setBoolPref(REFERRER_PREF, originalREFERRER);
defaults.setBoolPref(OCSP_PREF, originalOCSP);
});
// Tests that the content blocking strict category definition changes the behavior
@ -168,7 +180,7 @@ add_task(async function testContentBlockingStrictDefinition() {
let originalStrictPref = defaults.getStringPref(STRICT_DEF_PREF);
defaults.setStringPref(
STRICT_DEF_PREF,
"tp,tpPrivate,fp,cm,cookieBehavior0,cookieBehaviorPBM0,stp,lvl2,rp"
"tp,tpPrivate,fp,cm,cookieBehavior0,cookieBehaviorPBM0,stp,lvl2,rp,ocsp"
);
Services.prefs.setStringPref(CAT_PREF, "strict");
is(
@ -183,7 +195,7 @@ add_task(async function testContentBlockingStrictDefinition() {
);
is(
Services.prefs.getStringPref(STRICT_DEF_PREF),
"tp,tpPrivate,fp,cm,cookieBehavior0,cookieBehaviorPBM0,stp,lvl2,rp",
"tp,tpPrivate,fp,cm,cookieBehavior0,cookieBehaviorPBM0,stp,lvl2,rp,ocsp",
`${STRICT_DEF_PREF} changed to what we set.`
);
@ -232,6 +244,11 @@ add_task(async function testContentBlockingStrictDefinition() {
true,
`${REFERRER_PREF} pref has been set to true`
);
is(
Services.prefs.getBoolPref(OCSP_PREF),
true,
`${OCSP_PREF} pref has been set to true`
);
// Note, if a pref is not listed it will use the default value, however this is only meant as a
// backup if a mistake is made. The UI will not respond correctly.
@ -272,10 +289,14 @@ add_task(async function testContentBlockingStrictDefinition() {
!Services.prefs.prefHasUserValue(REFERRER_PREF),
`${REFERRER_PREF} pref has the default value`
);
ok(
!Services.prefs.prefHasUserValue(OCSP_PREF),
`${OCSP_PREF} pref has the default value`
);
defaults.setStringPref(
STRICT_DEF_PREF,
"-tpPrivate,-fp,-cm,-tp,cookieBehavior3,cookieBehaviorPBM2,-stp,-lvl2,-rp"
"-tpPrivate,-fp,-cm,-tp,cookieBehavior3,cookieBehaviorPBM2,-stp,-lvl2,-rp,-ocsp"
);
is(
Services.prefs.getBoolPref(TP_PREF),
@ -322,6 +343,11 @@ add_task(async function testContentBlockingStrictDefinition() {
false,
`${REFERRER_PREF} pref has been set to false`
);
is(
Services.prefs.getBoolPref(OCSP_PREF),
false,
`${OCSP_PREF} pref has been set to false`
);
// cleanup
defaults.setStringPref(STRICT_DEF_PREF, originalStrictPref);

Просмотреть файл

@ -106,7 +106,7 @@ add_task(async function runTests() {
// cookieBehaviors both are BEHAVIOR_REJECT_TRACKER_AND_PARTITION_FOREIGN in
// the strict feature value.
await testCookieBlockingInfoStrict(
"tp,tpPrivate,cookieBehavior5,cookieBehaviorPBM5,cm,fp,stp,lvl2,rp",
"tp,tpPrivate,cookieBehavior5,cookieBehaviorPBM5,cm,fp,stp,lvl2,rp,ocsp",
false
);
@ -114,7 +114,7 @@ add_task(async function runTests() {
// cookieBehavior is BEHAVIOR_REJECT_TRACKER and the private cookieBehavior is
// BEHAVIOR_REJECT_TRACKER_AND_PARTITION_FOREIGN
await testCookieBlockingInfoStrict(
"tp,tpPrivate,cookieBehavior4,cookieBehaviorPBM5,cm,fp,stp,lvl2,rp",
"tp,tpPrivate,cookieBehavior4,cookieBehaviorPBM5,cm,fp,stp,lvl2,rp,ocsp",
true
);

Просмотреть файл

@ -3660,6 +3660,8 @@ nsDocShell::DisplayLoadError(nsresult aError, nsIURI* aURI,
addHostPort = true;
break;
case NS_ERROR_BLOCKED_BY_POLICY:
case NS_ERROR_DOM_COOP_FAILED:
case NS_ERROR_DOM_COEP_FAILED:
// Page blocked by policy
error = "blockedByPolicy";
break;
@ -6148,7 +6150,9 @@ nsresult nsDocShell::FilterStatusForErrorPage(
aStatus == NS_ERROR_PROXY_AUTHENTICATION_FAILED ||
aStatus == NS_ERROR_PROXY_TOO_MANY_REQUESTS ||
aStatus == NS_ERROR_MALFORMED_URI ||
aStatus == NS_ERROR_BLOCKED_BY_POLICY) &&
aStatus == NS_ERROR_BLOCKED_BY_POLICY ||
aStatus == NS_ERROR_DOM_COOP_FAILED ||
aStatus == NS_ERROR_DOM_COEP_FAILED) &&
(aIsTopFrame || aUseErrorPages)) {
return aStatus;
}

Просмотреть файл

@ -4,6 +4,7 @@
https://bugzilla.mozilla.org/show_bug.cgi?id=448166
-->
<head>
<meta charset="utf-8" />
<title>Test for Bug 448166</title>
<script src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
@ -23,8 +24,11 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=448166
/** Test for Bug 448166 **/
isnot($("test").href, "http://www.mozilla.org/",
"Should notice unpaired surrogate");
is($("test").href, "http://www.xn--mozilla-2e14b.org/",
"Should replace unpaired surrogate with replacement char");
is($("test").href, "http://www.moz<6F>illa.org",
"URL parser fails. Href returns original input string");
SimpleTest.doesThrow(() => { new URL($("test").href);}, "URL parser rejects input");
is($("control").href, "http://www.mozilla.org/",
"Just making sure .href works");

Просмотреть файл

@ -0,0 +1,10 @@
[package]
name = "gluesmith"
version = "0.1.0"
authors = ["Christian Holler"]
license = "MPL 2.0"
[dependencies]
wasm-smith = "0.7.2"
arbitrary = { version = "1.0.0", features = ["derive"] }
libc = "0.2"

Просмотреть файл

@ -0,0 +1,15 @@
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
FINAL_LIBRARY = "js"
# Includes should be relative to parent path
LOCAL_INCLUDES += ["!../..", "../.."]
include("../../js-config.mozbuild")
include("../../js-cxxflags.mozbuild")
DIRS += ["../../rust"]

Просмотреть файл

@ -0,0 +1,52 @@
/* Copyright 2021 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern crate arbitrary;
extern crate wasm_smith;
use arbitrary::{Arbitrary, Unstructured};
use wasm_smith::Module;
use std::ptr;
#[no_mangle]
pub unsafe extern "C" fn gluesmith(
data: *mut u8,
len: usize,
out: *mut u8,
maxlen: usize,
) -> usize {
let buf: &[u8] = std::slice::from_raw_parts(data, len);
let mut u = Unstructured::new(buf);
let module = match Module::arbitrary(&mut u) {
Ok(m) => m,
Err(_e) => return 0,
};
let wasm_bytes = module.to_bytes();
let src_len = wasm_bytes.len();
if src_len > maxlen {
return 0;
}
let src_ptr = wasm_bytes.as_ptr();
ptr::copy_nonoverlapping(src_ptr, out, src_len);
return src_len;
}

Просмотреть файл

@ -39,6 +39,7 @@ if CONFIG["FUZZING_INTERFACES"]:
USE_LIBS += [
"static:js",
"static:jsrust",
]
DEFINES["topsrcdir"] = "%s/js/src" % TOPSRCDIR

Просмотреть файл

@ -30,6 +30,11 @@ using namespace js::wasm;
extern JS::PersistentRootedObject gGlobal;
extern JSContext* gCx;
static bool gIsWasmSmith = false;
extern "C" {
size_t gluesmith(uint8_t* data, size_t size, uint8_t* out, size_t maxsize);
}
static int testWasmInit(int* argc, char*** argv) {
if (!wasm::HasSupport(gCx) ||
!GlobalObject::getOrCreateConstructor(gCx, JSProto_WebAssembly)) {
@ -39,6 +44,11 @@ static int testWasmInit(int* argc, char*** argv) {
return 0;
}
static int testWasmSmithInit(int* argc, char*** argv) {
gIsWasmSmith = true;
return testWasmInit(argc, argv);
}
static bool emptyNativeFunction(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setUndefined();
@ -124,8 +134,22 @@ static int testWasmFuzz(const uint8_t* buf, size_t size) {
// Ensure we have no lingering exceptions from previous modules
gCx->clearPendingException();
unsigned char moduleLen = buf[currentIndex];
currentIndex++;
uint16_t moduleLen;
if (gIsWasmSmith) {
// Jump over the optByte. Unlike with the regular format, for
// wasm-smith we are fixing this and use byte 0 as opt-byte.
// Eventually this will also be changed for the regular format.
if (!currentIndex) {
currentIndex++;
}
// Caller ensures the structural soundness of the input here
moduleLen = *((uint16_t*)&buf[currentIndex]);
currentIndex += 2;
} else {
moduleLen = buf[currentIndex];
currentIndex++;
}
if (size - currentIndex < moduleLen) {
moduleLen = size - currentIndex;
@ -135,11 +159,16 @@ static int testWasmFuzz(const uint8_t* buf, size_t size) {
continue;
}
if (currentIndex == 1) {
if (currentIndex == 1 || (gIsWasmSmith && currentIndex == 3)) {
// If this is the first module we are reading, we use the first
// few bytes to tweak some settings. These are fixed anyway and
// overwritten later on.
uint8_t optByte = (uint8_t)buf[currentIndex];
uint8_t optByte;
if (gIsWasmSmith) {
optByte = (uint8_t)buf[0];
} else {
optByte = (uint8_t)buf[currentIndex];
}
// Note that IonPlatformSupport() and CraneliftPlatformSupport() do not
// take into account whether those compilers support particular features
@ -198,6 +227,15 @@ static int testWasmFuzz(const uint8_t* buf, size_t size) {
uint32_t magic_header = 0x6d736100;
uint32_t magic_version = 0x1;
if (gIsWasmSmith) {
// When using wasm-smith, magic values should already be there.
// Checking this to make sure the data passed is sane.
MOZ_RELEASE_ASSERT(*(uint32_t*)(&buf[currentIndex]) == magic_header,
"Magic header mismatch!");
MOZ_RELEASE_ASSERT(*(uint32_t*)(&buf[currentIndex + 4]) == magic_version,
"Magic version mismatch!");
}
// We just skip over the first 8 bytes now because we fill them
// with `magic_header` and `magic_version` anyway.
currentIndex += 8;
@ -451,4 +489,68 @@ static int testWasmFuzz(const uint8_t* buf, size_t size) {
return 0;
}
static int testWasmSmithFuzz(const uint8_t* buf, size_t size) {
// Define maximum sizes for the input to wasm-smith as well
// as the resulting modules. The input to output size factor
// of wasm-smith is somewhat variable but a factor of 4 seems
// to roughly work out. The logic below also assumes that these
// are powers of 2.
const size_t maxInputSize = 1024;
const size_t maxModuleSize = 4096;
size_t maxModules = size / maxInputSize + 1;
// We need 1 leading byte for options and 2 bytes for size per module
uint8_t* out =
new uint8_t[1 + maxModules * (maxModuleSize + sizeof(uint16_t))];
auto deleteGuard = mozilla::MakeScopeExit([&] { delete[] out; });
// Copy the opt-byte.
out[0] = buf[0];
size_t outIndex = 1;
size_t currentIndex = 1;
while (currentIndex < size) {
size_t remaining = size - currentIndex;
// We need to have at least a size and some byte to read.
if (remaining <= sizeof(uint16_t)) {
break;
}
// Determine size of the next input, limited to `maxInputSize`.
uint16_t inSize =
(*((uint16_t*)&buf[currentIndex]) & (maxInputSize - 1)) + 1;
remaining -= sizeof(uint16_t);
currentIndex += sizeof(uint16_t);
// Cap to remaining bytes.
inSize = remaining >= inSize ? inSize : remaining;
size_t outSize =
gluesmith((uint8_t*)&buf[currentIndex], inSize,
out + outIndex + sizeof(uint16_t), maxModuleSize);
if (!outSize) {
break;
}
currentIndex += inSize;
// Write the size of the resulting module to our output buffer.
*(uint16_t*)(&out[outIndex]) = (uint16_t)outSize;
outIndex += sizeof(uint16_t) + outSize;
}
// If we lack at least one module, don't do anything.
if (outIndex == 1) {
return 0;
}
return testWasmFuzz(out, outIndex);
}
MOZ_FUZZING_INTERFACE_RAW(testWasmInit, testWasmFuzz, Wasm);
MOZ_FUZZING_INTERFACE_RAW(testWasmSmithInit, testWasmSmithFuzz, WasmSmith);

Просмотреть файл

@ -340,10 +340,10 @@ class ArenaLists {
Arena* takeSweptEmptyArenas();
bool foregroundFinalize(JSFreeOp* fop, AllocKind thingKind,
js::SliceBudget& sliceBudget,
SortedArenaList& sweepList);
static void backgroundFinalize(JSFreeOp* fop, Arena* listHead, Arena** empty);
void setIncrementalSweptArenas(AllocKind kind, SortedArenaList& arenas);
void clearIncrementalSweptArenas();
void mergeFinalizedArenas(AllocKind thingKind, SortedArenaList& finalizedArenas);
void setParallelAllocEnabled(bool enabled);
void setParallelUnmarkEnabled(bool enabled);
@ -379,12 +379,8 @@ class ArenaLists {
inline JSRuntime* runtime();
inline JSRuntime* runtimeFromAnyThread();
inline void queueForForegroundSweep(JSFreeOp* fop,
const FinalizePhase& phase);
inline void queueForBackgroundSweep(JSFreeOp* fop,
const FinalizePhase& phase);
inline void queueForForegroundSweep(AllocKind thingKind);
inline void queueForBackgroundSweep(AllocKind thingKind);
void queueForForegroundSweep(AllocKind thingKind);
void queueForBackgroundSweep(AllocKind thingKind);
TenuredCell* refillFreeListAndAllocate(FreeLists& freeLists,
AllocKind thingKind,

Просмотреть файл

@ -1590,11 +1590,11 @@ bool GCRuntime::isCompactingGCEnabled() const {
rt->mainContextFromOwnThread()->compactingDisabledCount == 0;
}
void ArenaLists::queueForForegroundSweep(JSFreeOp* fop,
const FinalizePhase& phase) {
gcstats::AutoPhase ap(fop->runtime()->gc.stats(), phase.statsPhase);
void GCRuntime::queueForForegroundSweep(Zone* zone, JSFreeOp* fop,
const FinalizePhase& phase) {
gcstats::AutoPhase ap(stats(), phase.statsPhase);
for (auto kind : phase.kinds) {
queueForForegroundSweep(kind);
zone->arenas.queueForForegroundSweep(kind);
}
}
@ -1607,20 +1607,19 @@ void ArenaLists::queueForForegroundSweep(AllocKind thingKind) {
arenaList(thingKind).clear();
}
void ArenaLists::queueForBackgroundSweep(JSFreeOp* fop,
const FinalizePhase& phase) {
gcstats::AutoPhase ap(fop->runtime()->gc.stats(), phase.statsPhase);
void GCRuntime::queueForBackgroundSweep(Zone* zone, JSFreeOp* fop,
const FinalizePhase& phase) {
gcstats::AutoPhase ap(stats(), phase.statsPhase);
for (auto kind : phase.kinds) {
queueForBackgroundSweep(kind);
zone->arenas.queueForBackgroundSweep(kind);
}
}
inline void ArenaLists::queueForBackgroundSweep(AllocKind thingKind) {
void ArenaLists::queueForBackgroundSweep(AllocKind thingKind) {
MOZ_ASSERT(IsBackgroundFinalized(thingKind));
MOZ_ASSERT(concurrentUse(thingKind) == ConcurrentUse::None);
ArenaList* al = &arenaList(thingKind);
arenasToSweep(thingKind) = al->head();
arenasToSweep(thingKind) = arenaList(thingKind).head();
arenaList(thingKind).clear();
if (arenasToSweep(thingKind)) {
@ -1630,9 +1629,8 @@ inline void ArenaLists::queueForBackgroundSweep(AllocKind thingKind) {
}
}
/*static*/
void ArenaLists::backgroundFinalize(JSFreeOp* fop, Arena* listHead,
Arena** empty) {
void GCRuntime::backgroundFinalize(JSFreeOp* fop, Arena* listHead,
Arena** empty) {
MOZ_ASSERT(listHead);
MOZ_ASSERT(empty);
@ -1653,10 +1651,6 @@ void ArenaLists::backgroundFinalize(JSFreeOp* fop, Arena* listHead,
// allocated before background finalization finishes; now that finalization is
// complete, we want to merge these lists back together.
ArenaLists* lists = &zone->arenas;
ArenaList& al = lists->arenaList(thingKind);
// Flatten |finalizedSorted| into a regular ArenaList.
ArenaList finalized = finalizedSorted.toArenaList();
// We must take the GC lock to be able to safely modify the ArenaList;
// however, this does not by itself make the changes visible to all threads,
@ -1664,21 +1658,30 @@ void ArenaLists::backgroundFinalize(JSFreeOp* fop, Arena* listHead,
// That safety is provided by the ReleaseAcquire memory ordering of the
// background finalize state, which we explicitly set as the final step.
{
AutoLockGC lock(lists->runtimeFromAnyThread());
AutoLockGC lock(rt);
MOZ_ASSERT(lists->concurrentUse(thingKind) ==
ConcurrentUse::BackgroundFinalize);
// Join |al| and |finalized| into a single list.
ArenaList allocatedDuringSweep = std::move(al);
al = std::move(finalized);
al.insertListWithCursorAtEnd(lists->newArenasInMarkPhase(thingKind));
al.insertListWithCursorAtEnd(allocatedDuringSweep);
lists->newArenasInMarkPhase(thingKind).clear();
ArenaLists::ConcurrentUse::BackgroundFinalize);
lists->mergeFinalizedArenas(thingKind, finalizedSorted);
lists->arenasToSweep(thingKind) = nullptr;
}
lists->concurrentUse(thingKind) = ConcurrentUse::None;
lists->concurrentUse(thingKind) = ArenaLists::ConcurrentUse::None;
}
// After finalizing arenas, merge the following to get the final state of an
// arena list:
// - arenas allocated during marking
// - arenas allocated during sweeping
// - finalized arenas
void ArenaLists::mergeFinalizedArenas(AllocKind thingKind, SortedArenaList& finalizedArenas) {
ArenaList& arenas = arenaList(thingKind);
ArenaList allocatedDuringSweep = std::move(arenas);
arenas = finalizedArenas.toArenaList();
arenas.insertListWithCursorAtEnd(newArenasInMarkPhase(thingKind));
arenas.insertListWithCursorAtEnd(allocatedDuringSweep);
newArenasInMarkPhase(thingKind).clear();
}
void ArenaLists::queueForegroundThingsForSweep() {
@ -2100,7 +2103,7 @@ void GCRuntime::sweepBackgroundThings(ZoneList& zones) {
Arena* arenas = zone->arenas.arenasToSweep(kind);
MOZ_RELEASE_ASSERT(uintptr_t(arenas) != uintptr_t(-1));
if (arenas) {
ArenaLists::backgroundFinalize(&fop, arenas, &emptyArenas);
backgroundFinalize(&fop, arenas, &emptyArenas);
}
}
}
@ -4199,10 +4202,10 @@ IncrementalProgress GCRuntime::beginSweepingSweepGroup(JSFreeOp* fop,
// or on the background thread.
for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
zone->arenas.queueForForegroundSweep(fop, ForegroundObjectFinalizePhase);
zone->arenas.queueForForegroundSweep(fop, ForegroundNonObjectFinalizePhase);
queueForForegroundSweep(zone, fop, ForegroundObjectFinalizePhase);
queueForForegroundSweep(zone, fop, ForegroundNonObjectFinalizePhase);
for (const auto& phase : BackgroundFinalizePhases) {
zone->arenas.queueForBackgroundSweep(fop, phase);
queueForBackgroundSweep(zone, fop, phase);
}
zone->arenas.queueForegroundThingsForSweep();
@ -4340,41 +4343,27 @@ void GCRuntime::beginSweepPhase(JS::GCReason reason, AutoGCSession& session) {
sweepActions->assertFinished();
}
bool ArenaLists::foregroundFinalize(JSFreeOp* fop, AllocKind thingKind,
SliceBudget& sliceBudget,
SortedArenaList& sweepList) {
checkNoArenasToUpdateForKind(thingKind);
bool GCRuntime::foregroundFinalize(JSFreeOp* fop, Zone* zone,
AllocKind thingKind,
SliceBudget& sliceBudget,
SortedArenaList& sweepList) {
ArenaLists& lists = zone->arenas;
lists.checkNoArenasToUpdateForKind(thingKind);
// Arenas are released for use for new allocations as soon as the finalizers
// for that allocation kind have run. This means that a cell's finalizer can
// safely use IsAboutToBeFinalized to check other cells of the same alloc
// kind, but not of different alloc kinds: the other arena may have already
// had new objects allocated in it, and since we allocate black,
// IsAboutToBeFinalized will return false even though the referent we intended
// to check is long gone.
if (!FinalizeArenas(fop, &arenasToSweep(thingKind), sweepList, thingKind,
sliceBudget)) {
// Non-empty arenas are reused for use for new allocations as soon as the
// finalizers for that allocation kind have run. Empty arenas are only
// released when everything in the zone has been swept (see
// GCRuntime::sweepBackgroundThings for more details).
if (!FinalizeArenas(fop, &lists.arenasToSweep(thingKind), sweepList,
thingKind, sliceBudget)) {
// Copy the current contents of sweepList so that ArenaIter can find them.
incrementalSweptArenaKind = thingKind;
incrementalSweptArenas.ref().clear();
incrementalSweptArenas = sweepList.toArenaList();
lists.setIncrementalSweptArenas(thingKind, sweepList);
return false;
}
// Clear the list of swept arenas now these are moving back to the main arena
// lists.
incrementalSweptArenaKind = AllocKind::LIMIT;
incrementalSweptArenas.ref().clear();
sweepList.extractEmpty(&savedEmptyArenas.ref());
ArenaList& al = arenaList(thingKind);
ArenaList allocatedDuringSweep = std::move(al);
al = sweepList.toArenaList();
al.insertListWithCursorAtEnd(newArenasInMarkPhase(thingKind));
al.insertListWithCursorAtEnd(allocatedDuringSweep);
newArenasInMarkPhase(thingKind).clear();
sweepList.extractEmpty(&lists.savedEmptyArenas.ref());
lists.mergeFinalizedArenas(thingKind, sweepList);
lists.clearIncrementalSweptArenas();
return true;
}
@ -4591,8 +4580,7 @@ IncrementalProgress GCRuntime::finalizeAllocKind(JSFreeOp* fop,
AutoSetThreadIsSweeping threadIsSweeping(sweepZone);
if (!sweepZone->arenas.foregroundFinalize(fop, sweepAllocKind, budget,
sweepList)) {
if (!foregroundFinalize(fop, sweepZone, sweepAllocKind, budget, sweepList)) {
return NotFinished;
}

Просмотреть файл

@ -518,6 +518,29 @@ static bool GCSliceCountGetter(JSContext* cx, unsigned argc, Value* vp) {
return true;
}
static bool GCCompartmentCount(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
size_t count = 0;
for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
count += zone->compartments().length();
}
args.rval().setNumber(double(count));
return true;
}
static bool GCLastStartReason(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
const char* reason = ExplainGCReason(cx->runtime()->gc.lastStartReason());
RootedString str(cx, JS_NewStringCopyZ(cx, reason));
if (!str) {
return false;
}
args.rval().setString(str);
return true;
}
static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setNumber(double(cx->zone()->gcHeapSize.bytes()));
@ -585,7 +608,9 @@ JSObject* NewMemoryInfoObject(JSContext* cx) {
{"gcNumber", GCNumberGetter},
{"majorGCCount", MajorGCCountGetter},
{"minorGCCount", MinorGCCountGetter},
{"sliceCount", GCSliceCountGetter}};
{"sliceCount", GCSliceCountGetter},
{"compartmentCount", GCCompartmentCount},
{"lastStartReason", GCLastStartReason}};
for (auto pair : getters) {
JSNative getter = pair.getter;

Просмотреть файл

@ -621,6 +621,8 @@ class GCRuntime {
bool unregisterWeakRefWrapper(JSObject* wrapper);
void traceKeptObjects(JSTracer* trc);
JS::GCReason lastStartReason() const { return initialReason; }
private:
enum IncrementalResult { ResetIncremental = 0, Ok };
@ -774,6 +776,11 @@ class GCRuntime {
void markIncomingGrayCrossCompartmentPointers();
IncrementalProgress beginSweepingSweepGroup(JSFreeOp* fop,
SliceBudget& budget);
void queueForForegroundSweep(Zone* zone, JSFreeOp* fop,
const FinalizePhase& phase);
void queueForBackgroundSweep(Zone* zone, JSFreeOp* fop,
const FinalizePhase& phase);
IncrementalProgress markDuringSweeping(JSFreeOp* fop, SliceBudget& budget);
void updateAtomsBitmap();
void sweepCCWrappers();
@ -793,6 +800,9 @@ class GCRuntime {
IncrementalProgress sweepAtomsTable(JSFreeOp* fop, SliceBudget& budget);
IncrementalProgress sweepWeakCaches(JSFreeOp* fop, SliceBudget& budget);
IncrementalProgress finalizeAllocKind(JSFreeOp* fop, SliceBudget& budget);
bool foregroundFinalize(JSFreeOp* fop, Zone* zone, AllocKind thingKind,
js::SliceBudget& sliceBudget,
SortedArenaList& sweepList);
IncrementalProgress sweepPropMapTree(JSFreeOp* fop, SliceBudget& budget);
void endSweepPhase(bool lastGC);
bool allCCVisibleZonesWereCollected();
@ -805,6 +815,7 @@ class GCRuntime {
void startBackgroundFree();
void freeFromBackgroundThread(AutoLockHelperThreadState& lock);
void sweepBackgroundThings(ZoneList& zones);
void backgroundFinalize(JSFreeOp* fop, Arena* listHead, Arena** empty);
void assertBackgroundSweepingFinished();
// Compacting GC. Implemented in Compacting.cpp.

Просмотреть файл

@ -241,6 +241,18 @@ Arena* ArenaLists::takeSweptEmptyArenas() {
return arenas;
}
void ArenaLists::setIncrementalSweptArenas(AllocKind kind,
SortedArenaList& arenas) {
incrementalSweptArenaKind = kind;
incrementalSweptArenas.ref().clear();
incrementalSweptArenas = arenas.toArenaList();
}
void ArenaLists::clearIncrementalSweptArenas() {
incrementalSweptArenaKind = AllocKind::LIMIT;
incrementalSweptArenas.ref().clear();
}
void ArenaLists::checkGCStateNotInUse() {
// Called before and after collection to check the state is as expected.
#ifdef DEBUG

Просмотреть файл

@ -0,0 +1,133 @@
// Test 'compartment revived' GCs, where we do an extra GC if there are
// compartments which we expected to die but were kept alive.
// A global used as the destination for transplants.
let transplantTargetGlobal = newGlobal();
function didCompartmentRevivedGC() {
return performance.mozMemory.gc.lastStartReason === "COMPARTMENT_REVIVED";
}
function compartmentCount() {
let r = performance.mozMemory.gc.compartmentCount;
return r;
}
function startIncrementalGC() {
startgc(1);
while (gcstate() === "Prepare") {
gcslice(100, {dontStart: true});
}
assertEq(gcstate(), "Mark");
}
function finishIncrementalGC() {
while (gcstate() !== "NotActive") {
gcslice(100, {dontStart: true});
}
assertEq(gcstate(), "NotActive");
}
// Create a new compartment and global and return the global.
function createCompartment() {
return newGlobal({newCompartment: true});
}
// Create a transplantable object and create a wrapper to it from a new
// compartment. Return a function to transplant the target object.
function createTransplantableWrapperTarget(wrapperGlobal) {
let {object: target, transplant} = transplantableObject();
wrapperGlobal.wrapper = target;
return transplant;
}
// Transplant an object to a new global by calling the transplant
// function. This remaps all wrappers pointing to the target object,
// potentially keeping dead compartments alive.
function transplantTargetAndRemapWrappers(transplant) {
transplant(transplantTargetGlobal);
}
// Test no compartment revived GC triggered in normal cases.
function testNormal() {
gc();
assertEq(didCompartmentRevivedGC(), false);
startIncrementalGC();
finishIncrementalGC();
assertEq(didCompartmentRevivedGC(), false);
let initialCount = compartmentCount();
createCompartment();
startIncrementalGC();
finishIncrementalGC();
assertEq(compartmentCount(), initialCount);
}
// Test compartment revived GC is triggered by wrapper remapping.
function testCompartmentRevived1() {
let initialCount = compartmentCount();
let compartment = createCompartment();
let transplant = createTransplantableWrapperTarget(compartment);
compartment = null;
startIncrementalGC();
transplantTargetAndRemapWrappers(transplant);
finishIncrementalGC();
assertEq(didCompartmentRevivedGC(), true);
assertEq(compartmentCount(), initialCount);
}
// Test no compartment revived GC is triggered for compartments transitively
// kept alive by black roots.
function testCompartmentRevived2() {
let initialCount = compartmentCount();
let compartment = createCompartment();
let transplant = createTransplantableWrapperTarget(compartment);
let liveCompartment = createCompartment();
liveCompartment.wrapper = compartment;
compartment = null;
startIncrementalGC();
transplantTargetAndRemapWrappers(transplant);
finishIncrementalGC();
assertEq(didCompartmentRevivedGC(), false);
assertEq(compartmentCount(), initialCount + 2);
liveCompartment = null;
gc();
assertEq(compartmentCount(), initialCount);
}
// Test no compartment revived GC is triggered for compartments transitively
// kept alive by gray roots.
function testCompartmentRevived3() {
let initialCount = compartmentCount();
let compartment = createCompartment();
let transplant = createTransplantableWrapperTarget(compartment);
let liveCompartment = createCompartment();
liveCompartment.wrapper = compartment;
liveCompartment.eval('grayRoot()[0] = this');
liveCompartment = null;
gc();
startIncrementalGC();
transplantTargetAndRemapWrappers(transplant);
finishIncrementalGC();
assertEq(didCompartmentRevivedGC(), false);
assertEq(compartmentCount(), initialCount + 2);
// There's no easy way to clear gray roots for a compartment we don't have
// any reference to.
}
gczeal(0);
testNormal();
testCompartmentRevived1();
testCompartmentRevived2();
testCompartmentRevived3();

Просмотреть файл

@ -4829,22 +4829,28 @@ template <typename Handler>
bool BaselineCodeGen<Handler>::emit_PopLexicalEnv() {
frame.syncStack(0);
masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
Register scratch1 = R0.scratchReg();
auto ifDebuggee = [this, scratch1]() {
masm.loadBaselineFramePtr(BaselineFrameReg, scratch1);
auto ifDebuggee = [this]() {
prepareVMCall();
pushBytecodePCArg();
pushArg(R0.scratchReg());
pushArg(scratch1);
using Fn = bool (*)(JSContext*, BaselineFrame*, jsbytecode*);
return callVM<Fn, jit::DebugLeaveThenPopLexicalEnv>();
};
auto ifNotDebuggee = [this]() {
prepareVMCall();
pushArg(R0.scratchReg());
using Fn = bool (*)(JSContext*, BaselineFrame*);
return callVM<Fn, jit::PopLexicalEnv>();
auto ifNotDebuggee = [this, scratch1]() {
Register scratch2 = R1.scratchReg();
masm.loadPtr(frame.addressOfEnvironmentChain(), scratch1);
masm.debugAssertObjectHasClass(scratch1, scratch2,
&LexicalEnvironmentObject::class_);
Address enclosingAddr(scratch1,
EnvironmentObject::offsetOfEnclosingEnvironment());
masm.unboxObject(enclosingAddr, scratch1);
masm.storePtr(scratch1, frame.addressOfEnvironmentChain());
return true;
};
return emitDebugInstrumentation(ifDebuggee, mozilla::Some(ifNotDebuggee));
}

Просмотреть файл

@ -4185,6 +4185,17 @@ void MacroAssembler::debugAssertObjHasFixedSlots(Register obj,
#endif
}
void MacroAssembler::debugAssertObjectHasClass(Register obj, Register scratch,
const JSClass* clasp) {
#ifdef DEBUG
Label done;
branchTestObjClassNoSpectreMitigations(Assembler::Equal, obj, clasp, scratch,
&done);
assumeUnreachable("Class check failed");
bind(&done);
#endif
}
void MacroAssembler::branchArrayIsNotPacked(Register array, Register temp1,
Register temp2, Label* label) {
loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);

Просмотреть файл

@ -4652,6 +4652,9 @@ class MacroAssembler : public MacroAssemblerSpecific {
void debugAssertIsObject(const ValueOperand& val);
void debugAssertObjHasFixedSlots(Register obj, Register scratch);
void debugAssertObjectHasClass(Register obj, Register scratch,
const JSClass* clasp);
void branchArrayIsNotPacked(Register array, Register temp1, Register temp2,
Label* label);

Просмотреть файл

@ -214,7 +214,6 @@ namespace jit {
_(ObjectWithProtoOperation, js::ObjectWithProtoOperation) \
_(OnDebuggerStatement, js::jit::OnDebuggerStatement) \
_(OptimizeSpreadCall, js::OptimizeSpreadCall) \
_(PopLexicalEnv, js::jit::PopLexicalEnv) \
_(ProcessCallSiteObjOperation, js::ProcessCallSiteObjOperation) \
_(ProxyGetProperty, js::ProxyGetProperty) \
_(ProxyGetPropertyByValue, js::ProxyGetPropertyByValue) \

Просмотреть файл

@ -1492,11 +1492,6 @@ bool PushLexicalEnv(JSContext* cx, BaselineFrame* frame,
return frame->pushLexicalEnvironment(cx, scope);
}
bool PopLexicalEnv(JSContext* cx, BaselineFrame* frame) {
frame->popOffEnvironmentChain<ScopedLexicalEnvironmentObject>();
return true;
}
bool DebugLeaveThenPopLexicalEnv(JSContext* cx, BaselineFrame* frame,
jsbytecode* pc) {
MOZ_ALWAYS_TRUE(DebugLeaveLexicalEnv(cx, frame, pc));

Просмотреть файл

@ -478,7 +478,6 @@ JSObject* InitRestParameter(JSContext* cx, uint32_t length, Value* rest,
Handle<LexicalScope*> scope);
[[nodiscard]] bool PushClassBodyEnv(JSContext* cx, BaselineFrame* frame,
Handle<ClassBodyScope*> scope);
[[nodiscard]] bool PopLexicalEnv(JSContext* cx, BaselineFrame* frame);
[[nodiscard]] bool DebugLeaveThenPopLexicalEnv(JSContext* cx,
BaselineFrame* frame,
jsbytecode* pc);

Просмотреть файл

@ -16,6 +16,7 @@ cranelift_none = ['jsrust_shared/cranelift_none']
moz_memory = ['mozglue-static/moz_memory']
simd-accel = ['jsrust_shared/simd-accel']
smoosh = ['jsrust_shared/smoosh']
gluesmith = ['jsrust_shared/gluesmith']
[dependencies]
jsrust_shared = { path = "./shared" }

Просмотреть файл

@ -19,6 +19,9 @@ if CONFIG["ENABLE_WASM_CRANELIFT"]:
if CONFIG["MOZ_RUST_SIMD"]:
features += ["simd-accel"]
if CONFIG["FUZZING_INTERFACES"]:
features += ["gluesmith"]
if CONFIG["JS_ENABLE_SMOOSH"]:
features += ["smoosh"]

Просмотреть файл

@ -15,6 +15,7 @@ encoding_c_mem = "0.2.4"
smoosh = { path = "../../frontend/smoosh", optional = true }
mozilla-central-workspace-hack = { path = "../../../../build/workspace-hack" }
mozglue-static = { path = "../../../../mozglue/static/rust" }
gluesmith = { path = "../../fuzz-tests/gluesmith", optional = true }
[features]
cranelift_x86 = ['baldrdash/cranelift_x86']

Просмотреть файл

@ -21,3 +21,6 @@ extern crate mozglue_static;
#[cfg(feature = "smoosh")]
extern crate smoosh;
#[cfg(feature = "gluesmith")]
extern crate gluesmith;

Просмотреть файл

@ -521,6 +521,8 @@ bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& moduleEnv,
TempAllocator alloc(&lifo);
JitContext jitContext(&alloc);
WasmMacroAssembler masm(alloc, moduleEnv);
AutoCreatedBy acb(masm, "wasm::CraneliftCompileFunctions");
MOZ_ASSERT(IsCompilingWasm());
// Swap in already-allocated empty vectors to avoid malloc/free.

Просмотреть файл

@ -10506,6 +10506,18 @@
value: true
mirror: always
# Partition the OCSP cache by the partitionKey.
- name: privacy.partition.network_state.ocsp_cache
type: RelaxedAtomicBool
value: @IS_NIGHTLY_BUILD@
mirror: always
# Partition the OCSP cache by the partitionKey for private browsing mode.
- name: privacy.partition.network_state.ocsp_cache.pbmode
type: RelaxedAtomicBool
value: true
mirror: always
- name: privacy.partition.bloburl_per_agent_cluster
type: RelaxedAtomicBool
value: false

Просмотреть файл

@ -4295,6 +4295,15 @@ pref("toolkit.aboutProcesses.showAllSubframes", false);
#else
pref("toolkit.aboutProcesses.showThreads", false);
#endif
// If `true`, about:processes will offer to profile processes.
#ifdef NIGHTLY_BUILD
pref("toolkit.aboutProcesses.showProfilerIcons", true);
#else
pref("toolkit.aboutProcesses.showProfilerIcons", false);
#endif
// Time in seconds between when the profiler is started and when the
// profile is captured.
pref("toolkit.aboutProcesses.profileDuration", 5);
// When a crash happens, whether to include heap regions of the crash context
// in the minidump. Enabled by default on nightly and aurora.

Просмотреть файл

@ -220,6 +220,7 @@ static const char* gCallbackPrefsForSocketProcess[] = {
"network.dns.disableIPv6",
"network.dns.skipTRR-when-parental-control-enabled",
"network.offline-mirrors-connectivity",
"network.disable-localhost-when-offline",
nullptr,
};

Просмотреть файл

@ -599,43 +599,51 @@ nsresult nsStandardURL::NormalizeIPv4(const nsACString& host,
return NS_OK;
}
nsresult nsStandardURL::NormalizeIDN(const nsACString& host,
nsCString& result) {
// If host is ACE, then convert to UTF-8. Else, if host is already UTF-8,
// then make sure it is normalized per IDN.
// this function returns true if normalization succeeds.
nsresult nsStandardURL::NormalizeIDN(const nsCString& host, nsCString& result) {
result.Truncate();
mDisplayHost.Truncate();
nsresult rv;
if (!gIDN) {
return NS_ERROR_UNEXPECTED;
}
bool isAscii;
nsAutoCString normalized;
rv = gIDN->ConvertToDisplayIDN(host, &isAscii, normalized);
// If the input is ASCII, and not ACE encoded, then there's no processing
// needed. This is needed because we want to allow ascii labels longer than
// 64 characters for some schemes.
bool isACE = false;
if (IsAscii(host) && NS_SUCCEEDED(gIDN->IsACE(host, &isACE)) && !isACE) {
mCheckedIfHostA = true;
result = host;
return NS_OK;
}
// Even if it's already ACE, we must still call ConvertUTF8toACE in order
// for the input normalization to take place.
rv = gIDN->ConvertUTF8toACE(host, result);
if (NS_FAILED(rv)) {
return rv;
}
// The result is ASCII. No need to convert to ACE.
if (isAscii) {
result = normalized;
// If the ASCII representation doesn't contain the xn-- token then we don't
// need to call ConvertToDisplayIDN as that would not change anything.
if (!StringBeginsWith(result, "xn--"_ns) &&
result.Find(".xn--"_ns) == kNotFound) {
mCheckedIfHostA = true;
mDisplayHost.Truncate();
return NS_OK;
}
rv = gIDN->ConvertUTF8toACE(normalized, result);
bool isAscii = true;
nsAutoCString displayHost;
rv = gIDN->ConvertToDisplayIDN(result, &isAscii, displayHost);
if (NS_FAILED(rv)) {
return rv;
}
mCheckedIfHostA = true;
mDisplayHost = normalized;
if (!isAscii) {
mDisplayHost = displayHost;
}
return NS_OK;
}

Просмотреть файл

@ -184,7 +184,7 @@ class nsStandardURL : public nsIFileURL,
bool ValidIPv6orHostname(const char* host, uint32_t length);
static bool IsValidOfBase(unsigned char c, const uint32_t base);
nsresult NormalizeIDN(const nsACString& host, nsCString& result);
nsresult NormalizeIDN(const nsCString& host, nsCString& result);
nsresult CheckIfHostIsAscii();
void CoalescePath(netCoalesceFlags coalesceFlag, char* path);

Просмотреть файл

@ -212,7 +212,13 @@ nsresult nsIDNService::IDNA2008StringPrep(const nsAString& input,
return NS_OK;
}
if (info.errors != 0) {
uint32_t ignoredErrors = 0;
if (flag == eStringPrepForDNS) {
ignoredErrors = UIDNA_ERROR_LEADING_HYPHEN | UIDNA_ERROR_TRAILING_HYPHEN |
UIDNA_ERROR_HYPHEN_3_4;
}
if ((info.errors & ~ignoredErrors) != 0) {
if (flag == eStringPrepForDNS) {
output.Truncate();
}
@ -325,16 +331,25 @@ nsresult nsIDNService::ACEtoUTF8(const nsACString& input, nsACString& _retval,
}
NS_IMETHODIMP nsIDNService::IsACE(const nsACString& input, bool* _retval) {
const char* data = input.BeginReading();
uint32_t dataLen = input.Length();
// look for the ACE prefix in the input string. it may occur
// at the beginning of any segment in the domain name. for
// example: "www.xn--ENCODED.com"
const char* p = PL_strncasestr(data, kACEPrefix, dataLen);
if (!IsAscii(input)) {
*_retval = false;
return NS_OK;
}
*_retval = p && (p == data || *(p - 1) == '.');
auto stringContains = [](const nsACString& haystack,
const nsACString& needle) {
return std::search(haystack.BeginReading(), haystack.EndReading(),
needle.BeginReading(),
needle.EndReading()) != haystack.EndReading();
};
*_retval = StringBeginsWith(input, "xn--"_ns) ||
(!input.IsEmpty() && input[0] != '.' &&
stringContains(input, ".xn--"_ns));
return NS_OK;
}

Просмотреть файл

@ -30,6 +30,8 @@ AutomaticAuth=You are about to log in to the site “%1$S” with the username
TrackerUriBlocked=The resource at “%1$S” was blocked because content blocking is enabled.
UnsafeUriBlocked=The resource at “%1$S” was blocked by Safe Browsing.
# LOCALIZATION NOTE (CORPBlocked): %1$S is the URL of the blocked resource. %2$S is the URL of the MDN page about CORP.
CORPBlocked=The resource at “%1$S” was blocked due to its Cross-Origin-Resource-Policy header (or lack thereof). See %2$S
CookieBlockedByPermission=Request to access cookies or storage on “%1$S” was blocked because of custom cookie permission.
CookieBlockedTracker=Request to access cookie or storage on “%1$S” was blocked because it came from a tracker and content blocking is enabled.
CookieBlockedAll=Request to access cookie or storage on “%1$S” was blocked because we are blocking all storage access requests.

Просмотреть файл

@ -2212,7 +2212,7 @@ nsresult HttpBaseChannel::ProcessCrossOriginEmbedderPolicyHeader() {
mLoadInfo->GetLoadingEmbedderPolicy() !=
nsILoadInfo::EMBEDDER_POLICY_NULL &&
resultPolicy != nsILoadInfo::EMBEDDER_POLICY_REQUIRE_CORP) {
return NS_ERROR_BLOCKED_BY_POLICY;
return NS_ERROR_DOM_COEP_FAILED;
}
return NS_OK;
@ -2440,7 +2440,7 @@ nsresult HttpBaseChannel::ComputeCrossOriginOpenerPolicyMismatch() {
LOG((
"HttpBaseChannel::ComputeCrossOriginOpenerPolicyMismatch network error "
"for non empty sandboxing and non null COOP"));
return NS_ERROR_BLOCKED_BY_POLICY;
return NS_ERROR_DOM_COOP_FAILED;
}
// In xpcshell-tests we don't always have a current window global
@ -2503,6 +2503,18 @@ nsresult HttpBaseChannel::ComputeCrossOriginOpenerPolicyMismatch() {
return NS_OK;
}
nsresult HttpBaseChannel::ProcessCrossOriginSecurityHeaders() {
nsresult rv = ProcessCrossOriginEmbedderPolicyHeader();
if (NS_FAILED(rv)) {
return rv;
}
rv = ProcessCrossOriginResourcePolicyHeader();
if (NS_FAILED(rv)) {
return rv;
}
return ComputeCrossOriginOpenerPolicyMismatch();
}
enum class Report { Error, Warning };
// Helper Function to report messages to the console when the loaded

Просмотреть файл

@ -609,6 +609,8 @@ class HttpBaseChannel : public nsHashPropertyBag,
nsresult ComputeCrossOriginOpenerPolicyMismatch();
nsresult ProcessCrossOriginSecurityHeaders();
nsresult ValidateMIMEType();
bool EnsureOpaqueResponseIsAllowed();

Просмотреть файл

@ -1010,6 +1010,8 @@ void HttpChannelChild::DoOnStopRequest(nsIRequest* aRequest,
};
checkForBlockedContent();
MaybeLogCOEPError(aChannelStatus);
// See bug 1587686. If the redirect setup is not completed, the post-redirect
// channel will be not opened and mListener will be null.
MOZ_ASSERT(mListener || !LoadWasOpened());
@ -2976,6 +2978,28 @@ HttpChannelChild::LogMimeTypeMismatch(const nsACString& aMessageName,
return NS_OK;
}
nsresult HttpChannelChild::MaybeLogCOEPError(nsresult aStatus) {
if (aStatus == NS_ERROR_DOM_CORP_FAILED) {
RefPtr<Document> doc;
mLoadInfo->GetLoadingDocument(getter_AddRefs(doc));
nsAutoCString url;
mURI->GetSpec(url);
AutoTArray<nsString, 2> params;
params.AppendElement(NS_ConvertUTF8toUTF16(url));
// The MDN URL intentionally ends with a # so the webconsole linkification
// doesn't ignore the final ) of the URL
params.AppendElement(
u"https://developer.mozilla.org/docs/Web/HTTP/Cross-Origin_Resource_Policy_(CORP)#"_ns);
nsContentUtils::ReportToConsole(nsIScriptError::errorFlag, "COEP"_ns, doc,
nsContentUtils::eNECKO_PROPERTIES,
"CORPBlocked", params);
}
return NS_OK;
}
nsresult HttpChannelChild::CrossProcessRedirectFinished(nsresult aStatus) {
if (!CanSend()) {
return NS_BINDING_FAILED;

Просмотреть файл

@ -257,6 +257,8 @@ class HttpChannelChild final : public PHttpChannelChild,
// ensure Cacnel is processed before any other channel events.
void CancelOnMainThread(nsresult aRv);
nsresult MaybeLogCOEPError(nsresult aStatus);
private:
// this section is for main-thread-only object
// all the references need to be proxy released on main thread.

Просмотреть файл

@ -2076,7 +2076,7 @@ void nsHttpChannel::AsyncContinueProcessResponse() {
nsresult nsHttpChannel::ContinueProcessResponse1() {
MOZ_ASSERT(!mCallOnResume, "How did that happen?");
nsresult rv;
nsresult rv = NS_OK;
if (mSuspendCount) {
LOG(("Waiting until resume to finish processing response [this=%p]\n",
@ -2088,21 +2088,6 @@ nsresult nsHttpChannel::ContinueProcessResponse1() {
return NS_OK;
}
rv = ProcessCrossOriginResourcePolicyHeader();
if (NS_FAILED(rv)) {
mStatus = NS_ERROR_DOM_CORP_FAILED;
HandleAsyncAbort();
return NS_OK;
}
rv = ComputeCrossOriginOpenerPolicyMismatch();
if (rv == NS_ERROR_BLOCKED_BY_POLICY) {
// this navigates the doc's browsing context to a network error.
mStatus = NS_ERROR_BLOCKED_BY_POLICY;
HandleAsyncAbort();
return NS_OK;
}
// Check if request was cancelled during http-on-examine-response.
if (mCanceled) {
return CallOnStartRequest();
@ -2165,13 +2150,6 @@ nsresult nsHttpChannel::ContinueProcessResponse1() {
LOG((" continuation state has been reset"));
}
rv = ProcessCrossOriginEmbedderPolicyHeader();
if (NS_FAILED(rv)) {
mStatus = NS_ERROR_BLOCKED_BY_POLICY;
HandleAsyncAbort();
return NS_OK;
}
// No process switch needed, continue as normal.
return ContinueProcessResponse2(rv);
}
@ -2547,6 +2525,13 @@ nsresult nsHttpChannel::ContinueProcessNormal(nsresult rv) {
return rv;
}
rv = ProcessCrossOriginSecurityHeaders();
if (NS_FAILED(rv)) {
mStatus = rv;
HandleAsyncAbort();
return rv;
}
// if we're here, then any byte-range requests failed to result in a partial
// response. we must clear this flag to prevent BufferPartialContent from
// being called inside our OnDataAvailable (see bug 136678).
@ -5054,6 +5039,13 @@ nsresult nsHttpChannel::AsyncProcessRedirection(uint32_t redirectType) {
LOG(("nsHttpChannel::AsyncProcessRedirection [this=%p type=%u]\n", this,
redirectType));
nsresult rv = ProcessCrossOriginSecurityHeaders();
if (NS_FAILED(rv)) {
mStatus = rv;
HandleAsyncAbort();
return rv;
}
nsAutoCString location;
// if a location header was not given, then we can't perform the redirect,
@ -5080,7 +5072,7 @@ nsresult nsHttpChannel::AsyncProcessRedirection(uint32_t redirectType) {
LOG(("redirecting to: %s [redirection-limit=%u]\n", location.get(),
uint32_t(mRedirectionLimit)));
nsresult rv = CreateNewURI(location.get(), getter_AddRefs(mRedirectURI));
rv = CreateNewURI(location.get(), getter_AddRefs(mRedirectURI));
if (NS_FAILED(rv)) {
LOG(("Invalid URI for redirect: Location: %s\n", location.get()));
@ -6869,29 +6861,11 @@ nsHttpChannel::OnStartRequest(nsIRequest* request) {
return NS_OK;
}
rv = ProcessCrossOriginEmbedderPolicyHeader();
rv = ProcessCrossOriginSecurityHeaders();
if (NS_FAILED(rv)) {
mStatus = NS_ERROR_BLOCKED_BY_POLICY;
mStatus = rv;
HandleAsyncAbort();
return NS_OK;
}
rv = ProcessCrossOriginResourcePolicyHeader();
if (NS_FAILED(rv)) {
mStatus = NS_ERROR_DOM_CORP_FAILED;
HandleAsyncAbort();
return NS_OK;
}
// before we check for redirects, check if the load should be shifted into a
// new process.
rv = ComputeCrossOriginOpenerPolicyMismatch();
if (rv == NS_ERROR_BLOCKED_BY_POLICY) {
// this navigates the doc's browsing context to a network error.
mStatus = NS_ERROR_BLOCKED_BY_POLICY;
HandleAsyncAbort();
return NS_OK;
return rv;
}
// No process change is needed, so continue on to ContinueOnStartRequest1.

Просмотреть файл

@ -24,7 +24,7 @@ function run_test() {
.setSpec("http://%80.com")
.finalize();
},
/NS_ERROR_UNEXPECTED/,
/NS_ERROR_MALFORMED_URI/,
"illegal UTF character"
);

Просмотреть файл

@ -1164,7 +1164,7 @@ add_test(
() => {
stringToURL("https://b%9a/");
},
/NS_ERROR_UNEXPECTED/,
/NS_ERROR_MALFORMED_URI/,
"bad URI"
);

Просмотреть файл

@ -28,6 +28,7 @@
#include "NSSCertDBTrustDomain.h"
#include "pk11pub.h"
#include "mozilla/StaticPrefs_privacy.h"
#include "mozpkix/pkixnss.h"
#include "ScopedNSSTypes.h"
#include "secerr.h"
@ -63,8 +64,11 @@ static SECStatus DigestLength(UniquePK11Context& context, uint32_t length) {
// It is only non-empty when "privacy.firstParty.isolate" is enabled, in order
// to isolate OCSP cache by first party.
// Let firstPartyDomainLen be the number of bytes of firstPartyDomain.
// Let partitionKey be the partition key of originAttributes.
// Let partitionKeyLen be the number of bytes of partitionKey.
// The value calculated is SHA384(derIssuer || derPublicKey || serialNumberLen
// || serialNumber || firstPartyDomainLen || firstPartyDomain).
// || serialNumber || firstPartyDomainLen || firstPartyDomain || partitionKeyLen
// || partitionKey).
// Because the DER encodings include the length of the data encoded, and we also
// include the length of serialNumber and originAttributes, there do not exist
// A(derIssuerA, derPublicKeyA, serialNumberLenA, serialNumberA,
@ -108,17 +112,36 @@ static SECStatus CertIDHash(SHA384Buffer& buf, const CertID& certID,
return rv;
}
// OCSP should not be isolated by containers.
NS_ConvertUTF16toUTF8 firstPartyDomain(originAttributes.mFirstPartyDomain);
if (!firstPartyDomain.IsEmpty()) {
rv = DigestLength(context, firstPartyDomain.Length());
auto populateOriginAttributesKey = [&context](const nsString& aKey) {
NS_ConvertUTF16toUTF8 key(aKey);
if (key.IsEmpty()) {
return SECSuccess;
}
SECStatus rv = DigestLength(context, key.Length());
if (rv != SECSuccess) {
return rv;
}
rv =
PK11_DigestOp(context.get(),
BitwiseCast<const unsigned char*>(firstPartyDomain.get()),
firstPartyDomain.Length());
return PK11_DigestOp(context.get(),
BitwiseCast<const unsigned char*>(key.get()),
key.Length());
};
// OCSP should be isolated by firstPartyDomain and partitionKey, but not
// by containers.
rv = populateOriginAttributesKey(originAttributes.mFirstPartyDomain);
if (rv != SECSuccess) {
return rv;
}
bool isolateByPartitionKey =
originAttributes.mPrivateBrowsingId > 0
? StaticPrefs::privacy_partition_network_state_ocsp_cache_pbmode()
: StaticPrefs::privacy_partition_network_state_ocsp_cache();
if (isolateByPartitionKey) {
rv = populateOriginAttributesKey(originAttributes.mPartitionKey);
if (rv != SECSuccess) {
return rv;
}
@ -174,9 +197,11 @@ bool OCSPCache::FindInternal(const CertID& aCertID,
static inline void LogWithCertID(const char* aMessage, const CertID& aCertID,
const OriginAttributes& aOriginAttributes) {
NS_ConvertUTF16toUTF8 firstPartyDomain(aOriginAttributes.mFirstPartyDomain);
nsAutoString info = u"firstPartyDomain: "_ns +
aOriginAttributes.mFirstPartyDomain +
u", partitionKey: "_ns + aOriginAttributes.mPartitionKey;
MOZ_LOG(gCertVerifierLog, LogLevel::Debug,
(aMessage, &aCertID, firstPartyDomain.get()));
(aMessage, &aCertID, NS_ConvertUTF16toUTF8(info).get()));
}
void OCSPCache::MakeMostRecentlyUsed(size_t aIndex,

Просмотреть файл

@ -9,6 +9,7 @@
#include "gtest/gtest.h"
#include "mozilla/BasePrincipal.h"
#include "mozilla/Casting.h"
#include "mozilla/Preferences.h"
#include "mozilla/Sprintf.h"
#include "nss.h"
#include "mozpkix/pkixtypes.h"
@ -304,6 +305,14 @@ TEST_F(psm_OCSPCacheTest, NetworkFailure) {
TEST_F(psm_OCSPCacheTest, TestOriginAttributes) {
CertID certID(fakeIssuer1, fakeKey000, fakeSerial0000);
// We test two attributes, firstPartyDomain and partitionKey, respectively
// because we don't have entries that have both attributes set because the two
// features that use these attributes are mutually exclusive.
// Set pref for OCSP cache network partitioning.
mozilla::Preferences::SetBool("privacy.partition.network_state.ocsp_cache",
true);
SCOPED_TRACE("");
OriginAttributes attrs;
attrs.mFirstPartyDomain.AssignLiteral("foo.com");
@ -314,8 +323,35 @@ TEST_F(psm_OCSPCacheTest, TestOriginAttributes) {
attrs.mFirstPartyDomain.AssignLiteral("bar.com");
ASSERT_FALSE(cache.Get(certID, attrs, resultOut, timeOut));
// OCSP cache should not be isolated by containers.
// OCSP cache should not be isolated by containers for firstPartyDomain.
attrs.mUserContextId = 1;
attrs.mFirstPartyDomain.AssignLiteral("foo.com");
ASSERT_TRUE(cache.Get(certID, attrs, resultOut, timeOut));
// Clear originAttributes.
attrs.mUserContextId = 0;
attrs.mFirstPartyDomain.Truncate();
// Add OCSP cache for the partitionKey.
attrs.mPartitionKey.AssignLiteral("(https,foo.com)");
PutAndGet(cache, certID, Success, now, attrs);
// Check cache entry for the partitionKey.
attrs.mPartitionKey.AssignLiteral("(https,foo.com)");
ASSERT_TRUE(cache.Get(certID, attrs, resultOut, timeOut));
// OCSP cache entry should not exist for the other partitionKey.
attrs.mPartitionKey.AssignLiteral("(https,bar.com)");
ASSERT_FALSE(cache.Get(certID, attrs, resultOut, timeOut));
// OCSP cache should not be isolated by containers for partitonKey.
attrs.mUserContextId = 1;
attrs.mPartitionKey.AssignLiteral("(https,foo.com)");
ASSERT_TRUE(cache.Get(certID, attrs, resultOut, timeOut));
// OCSP cache should not exist for the OAs which has both attributes set.
attrs.mUserContextId = 0;
attrs.mFirstPartyDomain.AssignLiteral("foo.com");
attrs.mPartitionKey.AssignLiteral("(https,foo.com)");
ASSERT_FALSE(cache.Get(certID, attrs, resultOut, timeOut));
}

Просмотреть файл

@ -403,4 +403,94 @@ function add_tests() {
clearOCSPCache();
run_next_test();
});
// This test makes sure that OCSP cache are isolated by partitionKey.
add_test(function() {
Services.prefs.setBoolPref(
"privacy.partition.network_state.ocsp_cache",
true
);
run_next_test();
});
// A good OCSP response will be cached.
add_ocsp_test(
"ocsp-stapling-none.example.com",
PRErrorCodeSuccess,
[respondWithGoodOCSP],
"No stapled response (partitionKey = (https,foo.com)) -> a fetch " +
"should have been attempted",
{ partitionKey: "(https,foo.com)" }
);
// The cache will prevent a fetch from happening.
add_ocsp_test(
"ocsp-stapling-none.example.com",
PRErrorCodeSuccess,
[],
"Noted OCSP server failure (partitionKey = (https,foo.com)) -> a " +
"fetch should not have been attempted",
{ partitionKey: "(https,foo.com)" }
);
// Using a different partitionKey should result in a fetch.
add_ocsp_test(
"ocsp-stapling-none.example.com",
PRErrorCodeSuccess,
[respondWithGoodOCSP],
"Noted OCSP server failure (partitionKey = (https,bar.com)) -> a " +
"fetch should have been attempted",
{ partitionKey: "(https,bar.com)" }
);
// ---------------------------------------------------------------------------
// Reset state
add_test(function() {
Services.prefs.clearUserPref("privacy.partition.network_state.ocsp_cache");
clearOCSPCache();
run_next_test();
});
// This test makes sure that OCSP cache are isolated by partitionKey in
// private mode.
// A good OCSP response will be cached.
add_ocsp_test(
"ocsp-stapling-none.example.com",
PRErrorCodeSuccess,
[respondWithGoodOCSP],
"No stapled response (partitionKey = (https,foo.com)) -> a fetch " +
"should have been attempted",
{ partitionKey: "(https,foo.com)", privateBrowsingId: 1 }
);
// The cache will prevent a fetch from happening.
add_ocsp_test(
"ocsp-stapling-none.example.com",
PRErrorCodeSuccess,
[],
"Noted OCSP server failure (partitionKey = (https,foo.com)) -> a " +
"fetch should not have been attempted",
{ partitionKey: "(https,foo.com)", privateBrowsingId: 1 }
);
// Using a different partitionKey should result in a fetch.
add_ocsp_test(
"ocsp-stapling-none.example.com",
PRErrorCodeSuccess,
[respondWithGoodOCSP],
"Noted OCSP server failure (partitionKey = (https,bar.com)) -> a " +
"fetch should have been attempted",
{ partitionKey: "(https,bar.com)", privateBrowsingId: 1 }
);
// ---------------------------------------------------------------------------
// Reset state
add_test(function() {
clearOCSPCache();
run_next_test();
});
}

Просмотреть файл

@ -3,8 +3,8 @@ Change log
All notable changes to this program are documented in this file.
0.30.0 (2021-09-15)
--------------------
0.30.0 (2021-09-16, `d372710b98a6`)
------------------------------------
### Known problems

Просмотреть файл

@ -146,12 +146,6 @@
[Parsing: <file:..> against <http://www.example.com/test>]
expected: FAIL
[Parsing: <http://﷐zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <http://%ef%b7%90zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <http://192.168.0.257> against <http://other.com/>]
expected: FAIL
@ -281,12 +275,6 @@
[Parsing: <file://[example\]/> against <about:blank>]
expected: FAIL
[Parsing: <https://<2F>> against <about:blank>]
expected: FAIL
[Parsing: <https://%EF%BF%BD> against <about:blank>]
expected: FAIL
[Parsing: <sc://\x00/> against <about:blank>]
expected: FAIL
@ -498,12 +486,6 @@
[Parsing: <x> against <sc://ñ>]
expected: FAIL
[Parsing: <http://﷐zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <https://<2F>> against <about:blank>]
expected: FAIL
[Parsing: <data:test# »> against <about:blank>]
expected: FAIL

Просмотреть файл

@ -143,12 +143,6 @@
[Parsing: <http://www.@pple.com> against <about:blank>]
expected: FAIL
[Parsing: <http://﷐zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <http://%ef%b7%90zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <http://192.168.0.257> against <http://other.com/>]
expected: FAIL
@ -281,12 +275,6 @@
[Parsing: <file://[example\]/> against <about:blank>]
expected: FAIL
[Parsing: <https://<2F>> against <about:blank>]
expected: FAIL
[Parsing: <https://%EF%BF%BD> against <about:blank>]
expected: FAIL
[Parsing: <sc://\x00/> against <about:blank>]
expected: FAIL
@ -498,12 +486,6 @@
[Parsing: <x> against <sc://ñ>]
expected: FAIL
[Parsing: <http://﷐zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <https://<2F>> against <about:blank>]
expected: FAIL
[Parsing: <data:test# »> against <about:blank>]
expected: FAIL
@ -531,12 +513,6 @@
[Parsing: <x> against <sc://ñ>]
expected: FAIL
[Parsing: <http://﷐zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <https://<2F>> against <about:blank>]
expected: FAIL
[Parsing: <data:test# »> against <about:blank>]
expected: FAIL

Просмотреть файл

@ -48,27 +48,9 @@
[Location's href: http://@:www.example.com should throw]
expected: FAIL
[URL's href: https://<2F> should throw]
expected: FAIL
[XHR: https://<2F> should throw]
expected: FAIL
[sendBeacon(): https://<2F> should throw]
expected: FAIL
[Location's href: https://<2F> should throw]
expected: FAIL
[URL's href: https://%EF%BF%BD should throw]
expected: FAIL
[XHR: https://%EF%BF%BD should throw]
expected: FAIL
[sendBeacon(): https://%EF%BF%BD should throw]
expected: FAIL
[Location's href: https://%EF%BF%BD should throw]
expected: FAIL
@ -159,27 +141,15 @@
[Location's href: sc://\]/ should throw]
expected: FAIL
[XHR: ftp://example.com%80/ should throw]
expected: FAIL
[Location's href: ftp://example.com%80/ should throw]
expected: FAIL
[XHR: ftp://example.com%A0/ should throw]
expected: FAIL
[Location's href: ftp://example.com%A0/ should throw]
expected: FAIL
[XHR: https://example.com%80/ should throw]
expected: FAIL
[Location's href: https://example.com%80/ should throw]
expected: FAIL
[XHR: https://example.com%A0/ should throw]
expected: FAIL
[Location's href: https://example.com%A0/ should throw]
expected: FAIL
@ -231,12 +201,6 @@
[window.open(): file://[example\]/ should throw]
expected: FAIL
[window.open(): https://<2F> should throw]
expected: FAIL
[window.open(): https://%EF%BF%BD should throw]
expected: FAIL
[window.open(): sc://\x00/ should throw]
expected: FAIL
@ -312,12 +276,6 @@
[URL's constructor's base argument: file://[example\]/ should throw]
expected: FAIL
[URL's constructor's base argument: https://<2F> should throw]
expected: FAIL
[URL's constructor's base argument: https://%EF%BF%BD should throw]
expected: FAIL
[URL's constructor's base argument: sc://\x00/ should throw]
expected: FAIL
@ -354,42 +312,12 @@
[URL's constructor's base argument: non-special://[:80/ should throw]
expected: FAIL
[sendBeacon(): https://<2F> should throw]
expected: FAIL
[window.open(): https://<2F> should throw]
expected: FAIL
[Location's href: https://<2F> should throw]
expected: FAIL
[XHR: https://<2F> should throw]
expected: FAIL
[URL's constructor's base argument: https://<2F> should throw]
expected: FAIL
[URL's href: https://<2F> should throw]
expected: FAIL
[sendBeacon(): https://<2F> should throw]
expected: FAIL
[window.open(): https://<2F> should throw]
expected: FAIL
[Location's href: https://<2F> should throw]
expected: FAIL
[XHR: https://<2F> should throw]
expected: FAIL
[URL's constructor's base argument: https://<2F> should throw]
expected: FAIL
[URL's href: https://<2F> should throw]
expected: FAIL
[URL's constructor's base argument: file://xn--/p should throw]
expected: FAIL

Просмотреть файл

@ -80,33 +80,6 @@
[xn--a.β (using <area>.hostname)]
expected: FAIL
[.example (using URL)]
expected: FAIL
[.example (using URL.host)]
expected: FAIL
[.example (using URL.hostname)]
expected: FAIL
[.example (using <a>)]
expected: FAIL
[.example (using <a>.host)]
expected: FAIL
[.example (using <a>.hostname)]
expected: FAIL
[.example (using <area>)]
expected: FAIL
[.example (using <area>.host)]
expected: FAIL
[.example (using <area>.hostname)]
expected: FAIL
[xn--1ug.example (using URL)]
expected: FAIL
@ -134,33 +107,6 @@
[xn--1ug.example (using <area>.hostname)]
expected: FAIL
[يa (using URL)]
expected: FAIL
[يa (using URL.host)]
expected: FAIL
[يa (using URL.hostname)]
expected: FAIL
[يa (using <a>)]
expected: FAIL
[يa (using <a>.host)]
expected: FAIL
[يa (using <a>.hostname)]
expected: FAIL
[يa (using <area>)]
expected: FAIL
[يa (using <area>.host)]
expected: FAIL
[يa (using <area>.hostname)]
expected: FAIL
[xn--a-yoc (using URL)]
expected: FAIL
@ -188,33 +134,6 @@
[xn--a-yoc (using <area>.hostname)]
expected: FAIL
[<5B>.com (using URL)]
expected: FAIL
[<5B>.com (using URL.host)]
expected: FAIL
[<5B>.com (using URL.hostname)]
expected: FAIL
[<5B>.com (using <a>)]
expected: FAIL
[<5B>.com (using <a>.host)]
expected: FAIL
[<5B>.com (using <a>.hostname)]
expected: FAIL
[<5B>.com (using <area>)]
expected: FAIL
[<5B>.com (using <area>.host)]
expected: FAIL
[<5B>.com (using <area>.hostname)]
expected: FAIL
[xn--zn7c.com (using URL)]
expected: FAIL
@ -349,4 +268,3 @@
[xn-- (using URL.host)]
expected: FAIL

Просмотреть файл

@ -80,18 +80,6 @@
[Parsing: <file:..> against <http://www.example.com/test>]
expected: FAIL
[Parsing: <http://﷐zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <http://%ef%b7%90zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <https://<2F>> against <about:blank>]
expected: FAIL
[Parsing: <https://%EF%BF%BD> against <about:blank>]
expected: FAIL
[Parsing: <sc://faß.ExAmPlE/> against <about:blank>]
expected: FAIL
@ -688,18 +676,6 @@
[Parsing: <file:..> against <http://www.example.com/test>]
expected: FAIL
[Parsing: <http://﷐zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <http://%ef%b7%90zyx.com> against <http://other.com/>]
expected: FAIL
[Parsing: <https://<2F>> against <about:blank>]
expected: FAIL
[Parsing: <https://%EF%BF%BD> against <about:blank>]
expected: FAIL
[Parsing: <sc://faß.ExAmPlE/> against <about:blank>]
expected: FAIL

Просмотреть файл

@ -0,0 +1,73 @@
<!doctype html>
<html>
<title> Images on a page Cross-Origin-Embedder-Policy: require-corp should load the same from the cache or network</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/common/get-host-info.sub.js"></script>
<script>
function remote(path) {
const REMOTE_ORIGIN = get_host_info().HTTPS_REMOTE_ORIGIN;
return new URL(path, REMOTE_ORIGIN);
}
//
// This test loads a same-orign iframe resources/load_corp_images.html with
// Cross-Origin-Embedder-Policy: require-corp
// The iframe loads two cross origin images, one with a
// Cross-Origin-Resource-Policy: cross-origin header, and one without.
// We expect the image with the header to load successfully and the one without
// to fail to load.
// After the first load we then reload the iframe, with the same expectations
// for the image loads when they are loaded from the cache.
//
const image_path = "/html/cross-origin-embedder-policy/resources/corp-image.py";
let EXPECTED_LOADS = {
[`NETWORK-${remote(image_path)}`]: false,
[`NETWORK-${remote(image_path)}?corp-cross-origin=1`]: true,
[`CACHED-${remote(image_path)}`]: false,
[`CACHED-${remote(image_path)}?corp-cross-origin=1`]: true,
}
let TESTS = {};
for (let t in EXPECTED_LOADS) {
TESTS[t] = async_test(t);
}
window.addEventListener("load", async () => {
let iframe = document.createElement("iframe");
let firstRun = true;
let t = async_test("main_test");
await new Promise((resolve, reject) => {
iframe.src = "resources/load-corp-images.html";
iframe.onload = () => { resolve() };
iframe.onerror = (e) => { reject(); };
window.addEventListener("message", (event) => {
// After the first done event we reload the iframe.
if (event.data.done) {
if (firstRun) {
firstRun = false;
iframe.contentWindow.location.reload();
} else {
// After the second done event the test is finished.
t.done();
}
} else {
// Check that each image either loads or doesn't based on the expectations
let testName = `${firstRun ? "NETWORK-" : "CACHED-"}${event.data.src}`;
let test = TESTS[testName];
test.step(() => {
assert_equals(event.data.loaded, EXPECTED_LOADS[testName], `${firstRun ? "NETWORK" : "CACHED"} load of ${event.data.src} should ${EXPECTED_LOADS[testName] ? "" : "not"} succeed`);
});
test.done();
}
}, false);
document.body.appendChild(iframe);
})
});
</script>
</html>

Просмотреть файл

@ -0,0 +1 @@
Cross-Origin-Embedder-Policy: require-corp

Просмотреть файл

@ -0,0 +1,27 @@
import json
import base64
# A 1x1 PNG image.
# Source: https://commons.wikimedia.org/wiki/File:1x1.png (Public Domain)
IMAGE = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5CYII="
def main(request, response):
response.headers.set(b'Access-Control-Allow-Origin', b'*')
response.headers.set(b'Access-Control-Allow-Methods', b'OPTIONS, GET, POST')
response.headers.set(b'Access-Control-Allow-Headers', b'Content-Type')
response.headers.set(b"Cache-Control", b"max-age=3600");
# CORS preflight
if request.method == u'OPTIONS':
return u''
if b'some-etag' == request.headers.get(b"If-None-Match", None):
response.status = 304
return u''
if request.GET.first(b"corp-cross-origin", default=b""):
response.headers.set(b'Cross-Origin-Resource-Policy', b'cross-origin')
response.headers.set(b'Etag', b'some-etag')
response.headers.set(b'Content-Type', b'image/png')
return base64.b64decode(IMAGE)

Просмотреть файл

@ -0,0 +1,34 @@
<!doctype html>
<html>
<script src="/common/get-host-info.sub.js"></script>
<script>
function remote(path) {
const REMOTE_ORIGIN = get_host_info().HTTPS_REMOTE_ORIGIN;
return new URL(path, REMOTE_ORIGIN);
}
const image_path = "/html/cross-origin-embedder-policy/resources/corp-image.py";
window.addEventListener("load", async () => {
await new Promise(resolve => {
let img = document.createElement("img");
img.src = remote(image_path);
img.onload = () => { window.parent.postMessage({loaded: true, src: img.src}, "*"); resolve(); };
img.onerror = (e) => { window.parent.postMessage({loaded: false, src: img.src}, "*"); resolve(); };
document.body.appendChild(img);
});
await new Promise(resolve => {
let img = document.createElement("img");
img.src = remote(image_path + "?corp-cross-origin=1");
img.onload = () => { window.parent.postMessage({loaded: true, src: img.src}, "*"); resolve(); };
img.onerror = (e) => { window.parent.postMessage({loaded: false, src: img.src}, "*"); resolve(); };
document.body.appendChild(img);
});
window.parent.postMessage({done: true}, "*")
});
</script>
</html>

Просмотреть файл

@ -0,0 +1 @@
cross-origin-embedder-policy: require-corp

1
third_party/rust/arbitrary/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"edf3c8b74500275d5c9136f23c82eadf5356d4e804ef629ac5d15842e6859897","Cargo.lock":"7c30080ec598ac111d312a1e1ace0df52e346dd70ebce305df876906324810bb","Cargo.toml":"1bc1fc9183477e937309eb3c3d25a4aea2973bb858cac84b119a243d0a787be3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"15656cc11a8331f28c0986b8ab97220d3e76f98e60ed388b5ffad37dfac4710c","README.md":"950e645eba942c01ae12aaf835b23f2b70bcc75c561e663fe483e000d067a57e","examples/derive_enum.rs":"4d399f1805c48780443182aa141be4e3bf773649b118eab245280799e67742f6","publish.sh":"752e221bdd960666b127df15effddd3d789ff3f1762498961fc79ae99f9a27f1","src/error.rs":"88293a722e314bcc05e4836167b49e76520c9b3d7f64d1ae711f7bd29280f480","src/lib.rs":"78ad87737c6f5cdf38f1f2ed797587c6e258739397f21109d3a969f0ee33a140","src/size_hint.rs":"9762b183f8277ee4955fe5b22552961744b6237286758161a551f904ef43e3eb","src/unstructured.rs":"990a08e3037c9cecf7395deecc27fd8c99ec27c8d80c6b5f85c2aca75b629d6e","tests/derive.rs":"6a4aaa87ee08ea2b67e97e7f6fc7c6247ef9a11144b5465a802631ed0ee5465e","tests/path.rs":"a9706f00ce95d5a11652ae926830756d9111837b55073a0bc6a1eadd25033387"},"package":"237430fd6ed3740afe94eefcc278ae21e050285be882804e0d6e8695f0c94691"}

298
third_party/rust/arbitrary/CHANGELOG.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,298 @@
## Unreleased
Released YYYY-MM-DD.
### Added
* TODO (or remove section if none)
### Changed
* TODO (or remove section if none)
### Deprecated
* TODO (or remove section if none)
### Removed
* TODO (or remove section if none)
### Fixed
* TODO (or remove section if none)
### Security
* TODO (or remove section if none)
## 1.0.1
Released 2021-05-20.
### Added
* `Arbitrary` impls for `NonZeroX` types [#79](https://github.com/rust-fuzz/arbitrary/pull/79)
* `Arbitrary` impls for all arrays using const generics [#55](https://github.com/rust-fuzz/arbitrary/pull/55)
* `Arbitrary` impls for `Ipv4Addr` and `Ipv6Addr` [#84](https://github.com/rust-fuzz/arbitrary/pull/84)
### Fixed
* Use fewer bytes for `Unstructured::int_in_range()` [#80](https://github.com/rust-fuzz/arbitrary/pull/80)
* Use correct range for `char` generation [#83](https://github.com/rust-fuzz/arbitrary/pull/83)
--------------------------------------------------------------------------------
## 1.0.0
Released 2020-02-24.
See 1.0.0-rc1 and 1.0.0-rc2 for changes since 0.4.7, which was the last main
line release.
--------------------------------------------------------------------------------
## 1.0.0-rc2
Released 2021-02-09.
### Added
* The `Arbitrary` trait is now implemented for `&[u8]`. [#67](https://github.com/rust-fuzz/arbitrary/pull/67)
### Changed
* Rename `Unstructured#get_bytes` to `Unstructured#bytes`. [#70](https://github.com/rust-fuzz/arbitrary/pull/70)
* Passing an empty slice of choices to `Unstructured#choose` returns an error. Previously it would panic. [71](https://github.com/rust-fuzz/arbitrary/pull/71)
--------------------------------------------------------------------------------
## 1.0.0-rc1
Released 2020-11-25.
### Added
* The `Arbitrary` trait is now implemented for `&str`. [#63](https://github.com/rust-fuzz/arbitrary/pull/63)
### Changed
* The `Arbitrary` trait now has a lifetime parameter, allowing `Arbitrary` implementations that borrow from the raw input (e.g. the new `&str` implementaton). The `derive(Arbitrary)` macro also supports deriving `Arbitrary` on types with lifetimes now. [#63](https://github.com/rust-fuzz/arbitrary/pull/63)
### Removed
* The `shrink` method on the `Arbitrary` trait has been removed.
We have found that, in practice, using [internal reduction](https://drmaciver.github.io/papers/reduction-via-generation-preview.pdf) via approaches like `cargo fuzz tmin`, where the raw input bytes are reduced rather than the `T: Arbitrary` type constructed from those raw bytes, has the best efficiency-to-maintenance ratio. To the best of our knowledge, no one is relying on or using the `Arbitrary::shrink` method. If you *are* using and relying on the `Arbitrary::shrink` method, please reach out by [dropping a comment here](https://github.com/rust-fuzz/arbitrary/issues/62) and explaining how you're using it and what your use case is. We'll figure out what the best solution is, including potentially adding shrinking functionality back to the `arbitrary` crate.
--------------------------------------------------------------------------------
## 0.4.7
Released 2020-10-14.
### Added
* Added an optimization to avoid unnecessarily consuming bytes from the
underlying data when there is only one possible choice in
`Unstructured::{int_in_range, choose, etc..}`.
* Added license files to the derive crate.
### Changed
* The `Arbitrary` implementation for `std::time::Duration` should now be faster
and produce durations with a more-uniform distribution of nanoseconds.
--------------------------------------------------------------------------------
## 0.4.6
Released 2020-08-22.
### Added
* Added the `Unstructured::peek_bytes` method.
### Changed
* Test case reduction via `cargo fuzz tmin` should be much more effective at
reducing the sizes of collections now. (See
[#53](https://github.com/rust-fuzz/arbitrary/pull/53) and the commit messages
for details.)
* Fuzzing with mutation-based fuzzers (like libFuzzer) should be more efficient
now. (See [#53](https://github.com/rust-fuzz/arbitrary/pull/53) and the commit
messages for details)
--------------------------------------------------------------------------------
## 0.4.5
Released 2020-06-18.
### Added
* Implement `Arbitrary` for zero length arrays.
* Implement `Arbitrary` for `Range` and `RangeInclusive`.
--------------------------------------------------------------------------------
## 0.4.4
Released 2020-04-29.
### Fixed
* Fixed the custom derive for enums when used via its full path (like
`#[derive(arbitrary::Arbitrary)]` rather than like `#[derive(Arbitrary)]`).
## 0.4.3
Released 2020-04-28.
### Fixed
* Fixed the custom derive when used via its full path (like
`#[derive(arbitrary::Arbitrary)]` rather than like `#[derive(Arbitrary)]`).
--------------------------------------------------------------------------------
## 0.4.2
Released 2020-04-17.
### Changed
* We forgot to release a new version of the `derive_arbitrary` crate last
release. This release fixes that and so the `synstructure` dependency is
finally actually removed in the cargo releases.
--------------------------------------------------------------------------------
## 0.4.1
Released 2020-03-18.
### Removed
* Removed an internal dependency on the `synstructure` crate when the `derive`
feature is enabled. This should not have any visible downstream effects other
than faster build times!
--------------------------------------------------------------------------------
## 0.4.0
Released 2020-01-22.
This is technically a breaking change, but we expect that nearly everyone should
be able to upgrade without any compilation errors. The only exception is if you
were implementing the `Arbitrary::size_hint` method by hand. If so, see the
"changed" section below and the [API docs for
`Arbitrary::shrink`](https://docs.rs/arbitrary/0.4.0/arbitrary/trait.Arbitrary.html#method.size_hint)
for details.
### Added
* Added [the `arbitary::size_hint::recursion_guard` helper
function][recursion_guard] for guarding against infinite recursion in
`size_hint` implementations for recursive types.
### Changed
* The `Arbitrary::size_hint` signature now takes a `depth: usize`
parameter. This should be passed along unmodified to any nested calls of other
`size_hint` methods. If you're implementing `size_hint` for a recursive type
(like a linked list or tree) or a generic type with type parameters, you
should use [the new `arbitrary::size_hint::recursion_guard` helper
function][recursion_guard].
### Fixed
* Fixed infinite recursion in generated `size_hint` implementations
from `#[derive(Arbitrary)]` for recursive types.
[recursion_guard]: https://docs.rs/arbitrary/0.4.0/arbitrary/size_hint/fn.recursion_guard.html
--------------------------------------------------------------------------------
## 0.3.2
Released 2020-01-16.
### Changed
* Updated the custom derive's dependencies.
--------------------------------------------------------------------------------
## 0.3.2
Released 2020-01-15.
### Fixed
* Fixed an over-eager assertion condition in `Unstructured::int_in_range` that
would incorrectly trigger when given valid ranges of length one.
--------------------------------------------------------------------------------
## 0.3.1
Released 2020-01-14.
### Fixed
* Fixed some links and version numbers in README.
--------------------------------------------------------------------------------
## 0.3.0
Released 2020-01-14.
### Added
* Added the `"derive"` cargo feature, to enable `#[derive(Arbitrary)]` for
custom types. Enabling this feature re-exports functionality from the
`derive_arbitrary` crate.
* The custom derive for `Arbitrary` implements the shrink method for you now.
* All implementations of `Arbitrary` for `std` types implement shrinking now.
* Added the `Arbitrary::arbitrary_take_rest` method allows an `Arbitrary`
implementation to consume all of the rest of the remaining raw input. It has a
default implementation that forwards to `Arbitrary::arbitrary` and the custom
derive creates a smart implementation for your custom types.
* Added the `Arbitrary::size_hint` method for hinting how many raw bytes an
implementation needs to construct itself. This has a default implementation,
but the custom derive creates a smart implementation for your custom types.
* Added the `Unstructured::choose` method to choose one thing among a set of
choices.
* Added the `Unstructured::arbitrary_len` method to get an arbitrary length for
a collection of some arbitrary type.
* Added the `Unstructured::arbitrary_iter` method to create an iterator of
arbitrary instance of some type.
### Changed
* The `Arbitrary` trait was simplified a bit.
* `Unstructured` is a concrete type now, not a trait.
* Switched to Rust 2018 edition.
### Removed
* `RingBuffer` and `FiniteBuffer` are removed. Use `Unstructured` instead.
### Fixed
* Better `Arbitrary` implementation for `char`.
* Better `Arbitrary` implementation for `String`.
--------------------------------------------------------------------------------
## 0.2.0
--------------------------------------------------------------------------------
## 0.1.0

56
third_party/rust/arbitrary/Cargo.lock сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,56 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "arbitrary"
version = "1.0.1"
dependencies = [
"derive_arbitrary",
]
[[package]]
name = "derive_arbitrary"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df89dd0d075dea5cc5fdd6d5df6b8a61172a710b3efac1d6bdb9dd8b78f82c1a"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "proc-macro2"
version = "1.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quote"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "syn"
version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"

41
third_party/rust/arbitrary/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "arbitrary"
version = "1.0.1"
authors = ["The Rust-Fuzz Project Developers", "Nick Fitzgerald <fitzgen@gmail.com>", "Manish Goregaokar <manishsmail@gmail.com>", "Simonas Kazlauskas <arbitrary@kazlauskas.me>", "Brian L. Troutwine <brian@troutwine.us>", "Corey Farwell <coreyf@rwell.org>"]
description = "The trait for generating structured data from unstructured data"
documentation = "https://docs.rs/arbitrary/"
readme = "README.md"
keywords = ["arbitrary", "testing"]
categories = ["development-tools::testing"]
license = "MIT/Apache-2.0"
repository = "https://github.com/rust-fuzz/arbitrary/"
[[example]]
name = "derive_enum"
required-features = ["derive"]
[[test]]
name = "derive"
path = "./tests/derive.rs"
required-features = ["derive"]
[dependencies.derive_arbitrary]
version = "1.0.0"
optional = true
[dev-dependencies]
[features]
derive = ["derive_arbitrary"]

201
third_party/rust/arbitrary/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

27
third_party/rust/arbitrary/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
MIT License
Copyright (c) 2019 Manish Goregaokar
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

96
third_party/rust/arbitrary/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,96 @@
<div align="center">
<h1><code>Arbitrary</code></h1>
<p><strong>The trait for generating structured data from arbitrary, unstructured input.</strong></p>
<img alt="GitHub Actions Status" src="https://github.com/rust-fuzz/rust_arbitrary/workflows/Rust/badge.svg"/>
</div>
## About
The `Arbitrary` crate lets you construct arbitrary instances of a type.
This crate is primarily intended to be combined with a fuzzer like [libFuzzer
and `cargo-fuzz`](https://github.com/rust-fuzz/cargo-fuzz) or
[AFL](https://github.com/rust-fuzz/afl.rs), and to help you turn the raw,
untyped byte buffers that they produce into well-typed, valid, structured
values. This allows you to combine structure-aware test case generation with
coverage-guided, mutation-based fuzzers.
## Documentation
[**Read the API documentation on `docs.rs`!**](https://docs.rs/arbitrary)
## Example
Say you're writing a color conversion library, and you have an `Rgb` struct to
represent RGB colors. You might want to implement `Arbitrary` for `Rgb` so that
you could take arbitrary `Rgb` instances in a test function that asserts some
property (for example, asserting that RGB converted to HSL and converted back to
RGB always ends up exactly where we started).
### Automatically Deriving `Arbitrary`
Automatically deriving the `Arbitrary` trait is the recommended way to implement
`Arbitrary` for your types.
Automatically deriving `Arbitrary` requires you to enable the `"derive"` cargo
feature:
```toml
# Cargo.toml
[dependencies]
arbitrary = { version = "1", features = ["derive"] }
```
And then you can simply add `#[derive(Arbitrary)]` annotations to your types:
```rust
// rgb.rs
use arbitrary::Arbitrary;
#[derive(Arbitrary)]
pub struct Rgb {
pub r: u8,
pub g: u8,
pub b: u8,
}
```
### Implementing `Arbitrary` By Hand
Alternatively, you can write an `Arbitrary` implementation by hand:
```rust
// rgb.rs
use arbitrary::{Arbitrary, Result, Unstructured};
#[derive(Copy, Clone, Debug)]
pub struct Rgb {
pub r: u8,
pub g: u8,
pub b: u8,
}
impl<'a> Arbitrary<'a> for Rgb {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
let r = u8::arbitrary(u)?;
let g = u8::arbitrary(u)?;
let b = u8::arbitrary(u)?;
Ok(Rgb { r, g, b })
}
}
```
## License
Licensed under dual MIT or Apache-2.0 at your choice.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this project by you, as defined in the Apache-2.0 license,
shall be dual licensed as above, without any additional terms or conditions.

23
third_party/rust/arbitrary/examples/derive_enum.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
//! A simple example of deriving the `Arbitrary` trait for an `enum`.
//!
//! Note that this requires enabling the "derive" cargo feature.
use arbitrary::{Arbitrary, Unstructured};
#[derive(Arbitrary, Debug)]
enum MyEnum {
UnitVariant,
TupleVariant(bool, u32),
StructVariant { x: i8, y: (u8, i32) },
}
fn main() {
let raw = b"This is some raw, unstructured data!";
let mut unstructured = Unstructured::new(raw);
let instance = MyEnum::arbitrary(&mut unstructured)
.expect("`unstructured` has enough underlying data to create all variants of `MyEnum`");
println!("Here is an arbitrary enum: {:?}", instance);
}

14
third_party/rust/arbitrary/publish.sh поставляемый Executable file
Просмотреть файл

@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -eux
cd $(dirname $0)/derive
cargo publish
cd ..
# Let the crates.io index figure out we've published `derive_arbitrary` already.
sleep 5
cargo publish

40
third_party/rust/arbitrary/src/error.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,40 @@
use std::{error, fmt};
/// An enumeration of buffer creation errors
#[derive(Debug, Clone, Copy)]
#[non_exhaustive]
pub enum Error {
/// No choices were provided to the Unstructured::choose call
EmptyChoose,
/// There was not enough underlying data to fulfill some request for raw
/// bytes.
NotEnoughData,
/// The input bytes were not of the right format
IncorrectFormat,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::EmptyChoose => write!(
f,
"`arbitrary::Unstructured::choose` must be given a non-empty set of choices"
),
Error::NotEnoughData => write!(
f,
"There is not enough underlying raw data to construct an `Arbitrary` instance"
),
Error::IncorrectFormat => write!(
f,
"The raw data is not of the correct format to construct this type"
),
}
}
}
impl error::Error for Error {}
/// A `Result` with the error type fixed as `arbitrary::Error`.
///
/// Either an `Ok(T)` or `Err(arbitrary::Error)`.
pub type Result<T> = std::result::Result<T, Error>;

1191
third_party/rust/arbitrary/src/lib.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

124
third_party/rust/arbitrary/src/size_hint.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,124 @@
//! Utilities for working with and combining the results of
//! [`Arbitrary::size_hint`][crate::Arbitrary::size_hint].
/// Protects against potential infinite recursion when calculating size hints
/// due to indirect type recursion.
///
/// When the depth is not too deep, calls `f` with `depth + 1` to calculate the
/// size hint.
///
/// Otherwise, returns the default size hint: `(0, None)`.
#[inline]
pub fn recursion_guard(
depth: usize,
f: impl FnOnce(usize) -> (usize, Option<usize>),
) -> (usize, Option<usize>) {
const MAX_DEPTH: usize = 20;
if depth > MAX_DEPTH {
(0, None)
} else {
f(depth + 1)
}
}
/// Take the sum of the `lhs` and `rhs` size hints.
#[inline]
pub fn and(lhs: (usize, Option<usize>), rhs: (usize, Option<usize>)) -> (usize, Option<usize>) {
let lower = lhs.0 + rhs.0;
let upper = lhs.1.and_then(|lhs| rhs.1.map(|rhs| lhs + rhs));
(lower, upper)
}
/// Take the sum of all of the given size hints.
///
/// If `hints` is empty, returns `(0, Some(0))`, aka the size of consuming
/// nothing.
#[inline]
pub fn and_all(hints: &[(usize, Option<usize>)]) -> (usize, Option<usize>) {
hints.iter().copied().fold((0, Some(0)), and)
}
/// Take the minimum of the lower bounds and maximum of the upper bounds in the
/// `lhs` and `rhs` size hints.
#[inline]
pub fn or(lhs: (usize, Option<usize>), rhs: (usize, Option<usize>)) -> (usize, Option<usize>) {
let lower = std::cmp::min(lhs.0, rhs.0);
let upper = lhs
.1
.and_then(|lhs| rhs.1.map(|rhs| std::cmp::max(lhs, rhs)));
(lower, upper)
}
/// Take the maximum of the `lhs` and `rhs` size hints.
///
/// If `hints` is empty, returns `(0, Some(0))`, aka the size of consuming
/// nothing.
#[inline]
pub fn or_all(hints: &[(usize, Option<usize>)]) -> (usize, Option<usize>) {
if let Some(head) = hints.first().copied() {
hints[1..].iter().copied().fold(head, or)
} else {
(0, Some(0))
}
}
#[cfg(test)]
mod tests {
#[test]
fn and() {
assert_eq!((5, Some(5)), super::and((2, Some(2)), (3, Some(3))));
assert_eq!((5, None), super::and((2, Some(2)), (3, None)));
assert_eq!((5, None), super::and((2, None), (3, Some(3))));
assert_eq!((5, None), super::and((2, None), (3, None)));
}
#[test]
fn or() {
assert_eq!((2, Some(3)), super::or((2, Some(2)), (3, Some(3))));
assert_eq!((2, None), super::or((2, Some(2)), (3, None)));
assert_eq!((2, None), super::or((2, None), (3, Some(3))));
assert_eq!((2, None), super::or((2, None), (3, None)));
}
#[test]
fn and_all() {
assert_eq!((0, Some(0)), super::and_all(&[]));
assert_eq!(
(7, Some(7)),
super::and_all(&[(1, Some(1)), (2, Some(2)), (4, Some(4))])
);
assert_eq!(
(7, None),
super::and_all(&[(1, Some(1)), (2, Some(2)), (4, None)])
);
assert_eq!(
(7, None),
super::and_all(&[(1, Some(1)), (2, None), (4, Some(4))])
);
assert_eq!(
(7, None),
super::and_all(&[(1, None), (2, Some(2)), (4, Some(4))])
);
}
#[test]
fn or_all() {
assert_eq!((0, Some(0)), super::or_all(&[]));
assert_eq!(
(1, Some(4)),
super::or_all(&[(1, Some(1)), (2, Some(2)), (4, Some(4))])
);
assert_eq!(
(1, None),
super::or_all(&[(1, Some(1)), (2, Some(2)), (4, None)])
);
assert_eq!(
(1, None),
super::or_all(&[(1, Some(1)), (2, None), (4, Some(4))])
);
assert_eq!(
(1, None),
super::or_all(&[(1, None), (2, Some(2)), (4, Some(4))])
);
}
}

714
third_party/rust/arbitrary/src/unstructured.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,714 @@
// Copyright © 2019 The Rust Fuzz Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Wrappers around raw, unstructured bytes.
use crate::{Arbitrary, Error, Result};
use std::marker::PhantomData;
use std::{mem, ops};
/// A source of unstructured data.
///
/// An `Unstructured` helps `Arbitrary` implementations interpret raw data
/// (typically provided by a fuzzer) as a "DNA string" that describes how to
/// construct the `Arbitrary` type. The goal is that a small change to the "DNA
/// string" (the raw data wrapped by an `Unstructured`) results in a small
/// change to the generated `Arbitrary` instance. This helps a fuzzer
/// efficiently explore the `Arbitrary`'s input space.
///
/// `Unstructured` is deterministic: given the same raw data, the same series of
/// API calls will return the same results (modulo system resource constraints,
/// like running out of memory). However, `Unstructured` does not guarantee
/// anything beyond that: it makes not guarantee that it will yield bytes from
/// the underlying data in any particular order.
///
/// You shouldn't generally need to use an `Unstructured` unless you are writing
/// a custom `Arbitrary` implementation by hand, instead of deriving it. Mostly,
/// you should just be passing it through to nested `Arbitrary::arbitrary`
/// calls.
///
/// # Example
///
/// Imagine you were writing a color conversion crate. You might want to write
/// fuzz tests that take a random RGB color and assert various properties, run
/// functions and make sure nothing panics, etc.
///
/// Below is what translating the fuzzer's raw input into an `Unstructured` and
/// using that to generate an arbitrary RGB color might look like:
///
/// ```
/// # #[cfg(feature = "derive")] fn foo() {
/// use arbitrary::{Arbitrary, Unstructured};
///
/// /// An RGB color.
/// #[derive(Arbitrary)]
/// pub struct Rgb {
/// r: u8,
/// g: u8,
/// b: u8,
/// }
///
/// // Get the raw bytes from the fuzzer.
/// # let get_input_from_fuzzer = || &[];
/// let raw_data: &[u8] = get_input_from_fuzzer();
///
/// // Wrap it in an `Unstructured`.
/// let mut unstructured = Unstructured::new(raw_data);
///
/// // Generate an `Rgb` color and run our checks.
/// if let Ok(rgb) = Rgb::arbitrary(&mut unstructured) {
/// # let run_my_color_conversion_checks = |_| {};
/// run_my_color_conversion_checks(rgb);
/// }
/// # }
/// ```
pub struct Unstructured<'a> {
data: &'a [u8],
}
impl<'a> Unstructured<'a> {
/// Create a new `Unstructured` from the given raw data.
///
/// # Example
///
/// ```
/// use arbitrary::Unstructured;
///
/// let u = Unstructured::new(&[1, 2, 3, 4]);
/// ```
pub fn new(data: &'a [u8]) -> Self {
Unstructured { data }
}
/// Get the number of remaining bytes of underlying data that are still
/// available.
///
/// # Example
///
/// ```
/// use arbitrary::{Arbitrary, Unstructured};
///
/// let mut u = Unstructured::new(&[1, 2, 3]);
///
/// // Initially have three bytes of data.
/// assert_eq!(u.len(), 3);
///
/// // Generating a `bool` consumes one byte from the underlying data, so
/// // we are left with two bytes afterwards.
/// let _ = bool::arbitrary(&mut u);
/// assert_eq!(u.len(), 2);
/// ```
#[inline]
pub fn len(&self) -> usize {
self.data.len()
}
/// Is the underlying unstructured data exhausted?
///
/// `unstructured.is_empty()` is the same as `unstructured.len() == 0`.
///
/// # Example
///
/// ```
/// use arbitrary::{Arbitrary, Unstructured};
///
/// let mut u = Unstructured::new(&[1, 2, 3, 4]);
///
/// // Initially, we are not empty.
/// assert!(!u.is_empty());
///
/// // Generating a `u32` consumes all four bytes of the underlying data, so
/// // we become empty afterwards.
/// let _ = u32::arbitrary(&mut u);
/// assert!(u.is_empty());
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Generate an arbitrary instance of `A`.
///
/// This is simply a helper method that is equivalent to `<A as
/// Arbitrary>::arbitrary(self)`. This helper is a little bit more concise,
/// and can be used in situations where Rust's type inference will figure
/// out what `A` should be.
///
/// # Example
///
/// ```
/// # #[cfg(feature="derive")] fn foo() -> arbitrary::Result<()> {
/// use arbitrary::{Arbitrary, Unstructured};
///
/// #[derive(Arbitrary)]
/// struct MyType {
/// // ...
/// }
///
/// fn do_stuff(value: MyType) {
/// # let _ = value;
/// // ...
/// }
///
/// let mut u = Unstructured::new(&[1, 2, 3, 4]);
///
/// // Rust's type inference can figure out that `value` should be of type
/// // `MyType` here:
/// let value = u.arbitrary()?;
/// do_stuff(value);
/// # Ok(()) }
/// ```
pub fn arbitrary<A>(&mut self) -> Result<A>
where
A: Arbitrary<'a>,
{
<A as Arbitrary<'a>>::arbitrary(self)
}
/// Get the number of elements to insert when building up a collection of
/// arbitrary `ElementType`s.
///
/// This uses the [`<ElementType as
/// Arbitrary>::size_hint`][crate::Arbitrary::size_hint] method to smartly
/// choose a length such that we most likely have enough underlying bytes to
/// construct that many arbitrary `ElementType`s.
///
/// This should only be called within an `Arbitrary` implementation.
///
/// # Example
///
/// ```
/// use arbitrary::{Arbitrary, Result, Unstructured};
/// # pub struct MyCollection<T> { _t: std::marker::PhantomData<T> }
/// # impl<T> MyCollection<T> {
/// # pub fn with_capacity(capacity: usize) -> Self { MyCollection { _t: std::marker::PhantomData } }
/// # pub fn insert(&mut self, element: T) {}
/// # }
///
/// impl<'a, T> Arbitrary<'a> for MyCollection<T>
/// where
/// T: Arbitrary<'a>,
/// {
/// fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
/// // Get the number of `T`s we should insert into our collection.
/// let len = u.arbitrary_len::<T>()?;
///
/// // And then create a collection of that length!
/// let mut my_collection = MyCollection::with_capacity(len);
/// for _ in 0..len {
/// let element = T::arbitrary(u)?;
/// my_collection.insert(element);
/// }
///
/// Ok(my_collection)
/// }
/// }
/// ```
pub fn arbitrary_len<ElementType>(&mut self) -> Result<usize>
where
ElementType: Arbitrary<'a>,
{
let byte_size = self.arbitrary_byte_size()?;
let (lower, upper) = <ElementType as Arbitrary>::size_hint(0);
let elem_size = upper.unwrap_or_else(|| lower * 2);
let elem_size = std::cmp::max(1, elem_size);
Ok(byte_size / elem_size)
}
fn arbitrary_byte_size(&mut self) -> Result<usize> {
if self.data.is_empty() {
Ok(0)
} else if self.data.len() == 1 {
self.data = &[];
Ok(0)
} else {
// Take lengths from the end of the data, since the `libFuzzer` folks
// found that this lets fuzzers more efficiently explore the input
// space.
//
// https://github.com/rust-fuzz/libfuzzer-sys/blob/0c450753/libfuzzer/utils/FuzzedDataProvider.h#L92-L97
// We only consume as many bytes as necessary to cover the entire
// range of the byte string.
let len = if self.data.len() <= std::u8::MAX as usize + 1 {
let bytes = 1;
let max_size = self.data.len() - bytes;
let (rest, for_size) = self.data.split_at(max_size);
self.data = rest;
Self::int_in_range_impl(0..=max_size as u8, for_size.iter().copied())?.0 as usize
} else if self.data.len() <= std::u16::MAX as usize + 1 {
let bytes = 2;
let max_size = self.data.len() - bytes;
let (rest, for_size) = self.data.split_at(max_size);
self.data = rest;
Self::int_in_range_impl(0..=max_size as u16, for_size.iter().copied())?.0 as usize
} else if self.data.len() <= std::u32::MAX as usize + 1 {
let bytes = 4;
let max_size = self.data.len() - bytes;
let (rest, for_size) = self.data.split_at(max_size);
self.data = rest;
Self::int_in_range_impl(0..=max_size as u32, for_size.iter().copied())?.0 as usize
} else {
let bytes = 8;
let max_size = self.data.len() - bytes;
let (rest, for_size) = self.data.split_at(max_size);
self.data = rest;
Self::int_in_range_impl(0..=max_size as u64, for_size.iter().copied())?.0 as usize
};
Ok(len)
}
}
/// Generate an integer within the given range.
///
/// Do not use this to generate the size of a collection. Use
/// `arbitrary_len` instead.
///
/// # Panics
///
/// Panics if `range.start >= range.end`. That is, the given range must be
/// non-empty.
///
/// # Example
///
/// ```
/// use arbitrary::{Arbitrary, Unstructured};
///
/// let mut u = Unstructured::new(&[1, 2, 3, 4]);
///
/// let x: i32 = u.int_in_range(-5_000..=-1_000)
/// .expect("constructed `u` with enough bytes to generate an `i32`");
///
/// assert!(-5_000 <= x);
/// assert!(x <= -1_000);
/// ```
pub fn int_in_range<T>(&mut self, range: ops::RangeInclusive<T>) -> Result<T>
where
T: Int,
{
let (result, bytes_consumed) = Self::int_in_range_impl(range, self.data.iter().cloned())?;
self.data = &self.data[bytes_consumed..];
Ok(result)
}
fn int_in_range_impl<T>(
range: ops::RangeInclusive<T>,
mut bytes: impl Iterator<Item = u8>,
) -> Result<(T, usize)>
where
T: Int,
{
let start = range.start();
let end = range.end();
assert!(
start <= end,
"`arbitrary::Unstructured::int_in_range` requires a non-empty range"
);
// When there is only one possible choice, don't waste any entropy from
// the underlying data.
if start == end {
return Ok((*start, 0));
}
let range: T::Widest = end.as_widest() - start.as_widest();
let mut result = T::Widest::ZERO;
let mut offset: usize = 0;
while offset < mem::size_of::<T>()
&& (range >> T::Widest::from_usize(offset * 8)) > T::Widest::ZERO
{
let byte = bytes.next().ok_or(Error::NotEnoughData)?;
result = (result << 8) | T::Widest::from_u8(byte);
offset += 1;
}
// Avoid division by zero.
if let Some(range) = range.checked_add(T::Widest::ONE) {
result = result % range;
}
Ok((
T::from_widest(start.as_widest().wrapping_add(result)),
offset,
))
}
/// Choose one of the given choices.
///
/// This should only be used inside of `Arbitrary` implementations.
///
/// Returns an error if there is not enough underlying data to make a
/// choice or if no choices are provided.
///
/// # Examples
///
/// Selecting from an array of choices:
///
/// ```
/// use arbitrary::Unstructured;
///
/// let mut u = Unstructured::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]);
/// let choices = ['a', 'b', 'c', 'd', 'e', 'f', 'g'];
///
/// let choice = u.choose(&choices).unwrap();
///
/// println!("chose {}", choice);
/// ```
///
/// An error is returned if no choices are provided:
///
/// ```
/// use arbitrary::Unstructured;
///
/// let mut u = Unstructured::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]);
/// let choices: [char; 0] = [];
///
/// let result = u.choose(&choices);
///
/// assert!(result.is_err());
/// ```
pub fn choose<'b, T>(&mut self, choices: &'b [T]) -> Result<&'b T> {
if choices.is_empty() {
return Err(Error::EmptyChoose);
}
let idx = self.int_in_range(0..=choices.len() - 1)?;
Ok(&choices[idx])
}
/// Fill a `buffer` with bytes from the underlying raw data.
///
/// This should only be called within an `Arbitrary` implementation. This is
/// a very low-level operation. You should generally prefer calling nested
/// `Arbitrary` implementations like `<Vec<u8>>::arbitrary` and
/// `String::arbitrary` over using this method directly.
///
/// If this `Unstructured` does not have enough data to fill the whole
/// `buffer`, an error is returned.
///
/// # Example
///
/// ```
/// use arbitrary::Unstructured;
///
/// let mut u = Unstructured::new(&[1, 2, 3, 4]);
///
/// let mut buf = [0; 2];
/// assert!(u.fill_buffer(&mut buf).is_ok());
/// assert!(u.fill_buffer(&mut buf).is_ok());
/// ```
pub fn fill_buffer(&mut self, buffer: &mut [u8]) -> Result<()> {
let n = std::cmp::min(buffer.len(), self.data.len());
buffer[..n].copy_from_slice(&self.data[..n]);
for byte in buffer[n..].iter_mut() {
*byte = 0;
}
self.data = &self.data[n..];
Ok(())
}
/// Provide `size` bytes from the underlying raw data.
///
/// This should only be called within an `Arbitrary` implementation. This is
/// a very low-level operation. You should generally prefer calling nested
/// `Arbitrary` implementations like `<Vec<u8>>::arbitrary` and
/// `String::arbitrary` over using this method directly.
///
/// # Example
///
/// ```
/// use arbitrary::Unstructured;
///
/// let mut u = Unstructured::new(&[1, 2, 3, 4]);
///
/// assert!(u.bytes(2).unwrap() == &[1, 2]);
/// assert!(u.bytes(2).unwrap() == &[3, 4]);
/// ```
pub fn bytes(&mut self, size: usize) -> Result<&'a [u8]> {
if self.data.len() < size {
return Err(Error::NotEnoughData);
}
let (for_buf, rest) = self.data.split_at(size);
self.data = rest;
Ok(for_buf)
}
/// Peek at `size` number of bytes of the underlying raw input.
///
/// Does not consume the bytes, only peeks at them.
///
/// Returns `None` if there are not `size` bytes left in the underlying raw
/// input.
///
/// # Example
///
/// ```
/// use arbitrary::Unstructured;
///
/// let u = Unstructured::new(&[1, 2, 3]);
///
/// assert_eq!(u.peek_bytes(0).unwrap(), []);
/// assert_eq!(u.peek_bytes(1).unwrap(), [1]);
/// assert_eq!(u.peek_bytes(2).unwrap(), [1, 2]);
/// assert_eq!(u.peek_bytes(3).unwrap(), [1, 2, 3]);
///
/// assert!(u.peek_bytes(4).is_none());
/// ```
pub fn peek_bytes(&self, size: usize) -> Option<&'a [u8]> {
self.data.get(..size)
}
/// Consume all of the rest of the remaining underlying bytes.
///
/// Returns a slice of all the remaining, unconsumed bytes.
///
/// # Example
///
/// ```
/// use arbitrary::Unstructured;
///
/// let mut u = Unstructured::new(&[1, 2, 3]);
///
/// let mut remaining = u.take_rest();
///
/// assert_eq!(remaining, [1, 2, 3]);
/// ```
pub fn take_rest(mut self) -> &'a [u8] {
mem::replace(&mut self.data, &[])
}
/// Provide an iterator over elements for constructing a collection
///
/// This is useful for implementing [`Arbitrary::arbitrary`] on collections
/// since the implementation is simply `u.arbitrary_iter()?.collect()`
pub fn arbitrary_iter<'b, ElementType: Arbitrary<'a>>(
&'b mut self,
) -> Result<ArbitraryIter<'a, 'b, ElementType>> {
Ok(ArbitraryIter {
u: &mut *self,
_marker: PhantomData,
})
}
/// Provide an iterator over elements for constructing a collection from
/// all the remaining bytes.
///
/// This is useful for implementing [`Arbitrary::arbitrary_take_rest`] on collections
/// since the implementation is simply `u.arbitrary_take_rest_iter()?.collect()`
pub fn arbitrary_take_rest_iter<ElementType: Arbitrary<'a>>(
self,
) -> Result<ArbitraryTakeRestIter<'a, ElementType>> {
let (lower, upper) = ElementType::size_hint(0);
let elem_size = upper.unwrap_or(lower * 2);
let elem_size = std::cmp::max(1, elem_size);
let size = self.len() / elem_size;
Ok(ArbitraryTakeRestIter {
size,
u: Some(self),
_marker: PhantomData,
})
}
}
/// Utility iterator produced by [`Unstructured::arbitrary_iter`]
pub struct ArbitraryIter<'a, 'b, ElementType> {
u: &'b mut Unstructured<'a>,
_marker: PhantomData<ElementType>,
}
impl<'a, 'b, ElementType: Arbitrary<'a>> Iterator for ArbitraryIter<'a, 'b, ElementType> {
type Item = Result<ElementType>;
fn next(&mut self) -> Option<Result<ElementType>> {
let keep_going = self.u.arbitrary().unwrap_or(false);
if keep_going {
Some(Arbitrary::arbitrary(self.u))
} else {
None
}
}
}
/// Utility iterator produced by [`Unstructured::arbitrary_take_rest_iter`]
pub struct ArbitraryTakeRestIter<'a, ElementType> {
u: Option<Unstructured<'a>>,
size: usize,
_marker: PhantomData<ElementType>,
}
impl<'a, ElementType: Arbitrary<'a>> Iterator for ArbitraryTakeRestIter<'a, ElementType> {
type Item = Result<ElementType>;
fn next(&mut self) -> Option<Result<ElementType>> {
if let Some(mut u) = self.u.take() {
if self.size == 1 {
Some(Arbitrary::arbitrary_take_rest(u))
} else if self.size == 0 {
None
} else {
self.size -= 1;
let ret = Arbitrary::arbitrary(&mut u);
self.u = Some(u);
Some(ret)
}
} else {
None
}
}
}
/// A trait that is implemented for all of the primitive integers:
///
/// * `u8`
/// * `u16`
/// * `u32`
/// * `u64`
/// * `u128`
/// * `usize`
/// * `i8`
/// * `i16`
/// * `i32`
/// * `i64`
/// * `i128`
/// * `isize`
///
/// Don't implement this trait yourself.
pub trait Int:
Copy
+ PartialOrd
+ Ord
+ ops::Sub<Self, Output = Self>
+ ops::Rem<Self, Output = Self>
+ ops::Shr<Self, Output = Self>
+ ops::Shl<usize, Output = Self>
+ ops::BitOr<Self, Output = Self>
{
#[doc(hidden)]
type Widest: Int;
#[doc(hidden)]
const ZERO: Self;
#[doc(hidden)]
const ONE: Self;
#[doc(hidden)]
fn as_widest(self) -> Self::Widest;
#[doc(hidden)]
fn from_widest(w: Self::Widest) -> Self;
#[doc(hidden)]
fn from_u8(b: u8) -> Self;
#[doc(hidden)]
fn from_usize(u: usize) -> Self;
#[doc(hidden)]
fn checked_add(self, rhs: Self) -> Option<Self>;
#[doc(hidden)]
fn wrapping_add(self, rhs: Self) -> Self;
}
macro_rules! impl_int {
( $( $ty:ty : $widest:ty ; )* ) => {
$(
impl Int for $ty {
type Widest = $widest;
const ZERO: Self = 0;
const ONE: Self = 1;
fn as_widest(self) -> Self::Widest {
self as $widest
}
fn from_widest(w: Self::Widest) -> Self {
let x = <$ty>::max_value().as_widest();
(w % x) as Self
}
fn from_u8(b: u8) -> Self {
b as Self
}
fn from_usize(u: usize) -> Self {
u as Self
}
fn checked_add(self, rhs: Self) -> Option<Self> {
<$ty>::checked_add(self, rhs)
}
fn wrapping_add(self, rhs: Self) -> Self {
<$ty>::wrapping_add(self, rhs)
}
}
)*
}
}
impl_int! {
u8: u128;
u16: u128;
u32: u128;
u64: u128;
u128: u128;
usize: u128;
i8: i128;
i16: i128;
i32: i128;
i64: i128;
i128: i128;
isize: i128;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_byte_size() {
let mut u = Unstructured::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 6]);
// Should take one byte off the end
assert_eq!(u.arbitrary_byte_size().unwrap(), 6);
assert_eq!(u.len(), 9);
let mut v = vec![];
v.resize(260, 0);
v.push(1);
v.push(4);
let mut u = Unstructured::new(&v);
// Should read two bytes off the end
assert_eq!(u.arbitrary_byte_size().unwrap(), 0x104);
assert_eq!(u.len(), 260);
}
#[test]
fn int_in_range_of_one() {
let mut u = Unstructured::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 6]);
let x = u.int_in_range(0..=0).unwrap();
assert_eq!(x, 0);
let choice = *u.choose(&[42]).unwrap();
assert_eq!(choice, 42)
}
#[test]
fn int_in_range_uses_minimal_amount_of_bytes() {
let mut u = Unstructured::new(&[1]);
u.int_in_range::<u8>(0..=u8::MAX).unwrap();
let mut u = Unstructured::new(&[1]);
u.int_in_range::<u32>(0..=u8::MAX as u32).unwrap();
let mut u = Unstructured::new(&[1]);
u.int_in_range::<u32>(0..=u8::MAX as u32 + 1).unwrap_err();
}
}

188
third_party/rust/arbitrary/tests/derive.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,188 @@
#![cfg(feature = "derive")]
use arbitrary::*;
fn arbitrary_from<'a, T: Arbitrary<'a>>(input: &'a [u8]) -> T {
let mut buf = Unstructured::new(input);
T::arbitrary(&mut buf).expect("can create arbitrary instance OK")
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Arbitrary)]
pub struct Rgb {
pub r: u8,
pub g: u8,
pub b: u8,
}
#[test]
fn struct_with_named_fields() {
let rgb: Rgb = arbitrary_from(&[4, 5, 6]);
assert_eq!(rgb.r, 4);
assert_eq!(rgb.g, 5);
assert_eq!(rgb.b, 6);
assert_eq!((3, Some(3)), <Rgb as Arbitrary>::size_hint(0));
}
#[derive(Copy, Clone, Debug, Arbitrary)]
struct MyTupleStruct(u8, bool);
#[test]
fn tuple_struct() {
let s: MyTupleStruct = arbitrary_from(&[43, 42]);
assert_eq!(s.0, 43);
assert_eq!(s.1, false);
let s: MyTupleStruct = arbitrary_from(&[42, 43]);
assert_eq!(s.0, 42);
assert_eq!(s.1, true);
assert_eq!((2, Some(2)), <MyTupleStruct as Arbitrary>::size_hint(0));
}
#[derive(Clone, Debug, Arbitrary)]
struct EndingInVec(u8, bool, u32, Vec<u16>);
#[derive(Clone, Debug, Arbitrary)]
struct EndingInString(u8, bool, u32, String);
#[test]
fn test_take_rest() {
let bytes = [1, 1, 1, 2, 3, 4, 5, 6, 7, 8];
let s1 = EndingInVec::arbitrary_take_rest(Unstructured::new(&bytes)).unwrap();
let s2 = EndingInString::arbitrary_take_rest(Unstructured::new(&bytes)).unwrap();
assert_eq!(s1.0, 1);
assert_eq!(s2.0, 1);
assert_eq!(s1.1, true);
assert_eq!(s2.1, true);
assert_eq!(s1.2, 0x4030201);
assert_eq!(s2.2, 0x4030201);
assert_eq!(s1.3, vec![0x605, 0x807]);
assert_eq!(s2.3, "\x05\x06\x07\x08");
}
#[derive(Copy, Clone, Debug, Arbitrary)]
enum MyEnum {
Unit,
Tuple(u8, u16),
Struct { a: u32, b: (bool, u64) },
}
#[test]
fn derive_enum() {
let mut raw = vec![
// The choice of which enum variant takes 4 bytes.
1, 2, 3, 4,
// And then we need up to 13 bytes for creating `MyEnum::Struct`, the
// largest variant.
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
];
let mut saw_unit = false;
let mut saw_tuple = false;
let mut saw_struct = false;
for i in 0..=255 {
// Choose different variants each iteration.
for el in &mut raw[..4] {
*el = i;
}
let e: MyEnum = arbitrary_from(&raw);
match e {
MyEnum::Unit => {
saw_unit = true;
}
MyEnum::Tuple(a, b) => {
saw_tuple = true;
assert_eq!(a, arbitrary_from(&raw[4..5]));
assert_eq!(b, arbitrary_from(&raw[5..]));
}
MyEnum::Struct { a, b } => {
saw_struct = true;
assert_eq!(a, arbitrary_from(&raw[4..8]));
assert_eq!(b, arbitrary_from(&raw[8..]));
}
}
}
assert!(saw_unit);
assert!(saw_tuple);
assert!(saw_struct);
assert_eq!((4, Some(17)), <MyEnum as Arbitrary>::size_hint(0));
}
#[derive(Arbitrary, Debug)]
enum RecursiveTree {
Leaf,
Node {
left: Box<RecursiveTree>,
right: Box<RecursiveTree>,
},
}
#[test]
fn recursive() {
let raw = vec![1, 2, 3, 4, 5, 6, 7, 8, 9];
let _rec: RecursiveTree = arbitrary_from(&raw);
let (lower, upper) = <RecursiveTree as Arbitrary>::size_hint(0);
assert_eq!(lower, 4, "need a u32 for the discriminant at minimum");
assert!(
upper.is_none(),
"potentially infinitely recursive, so no upper bound"
);
}
#[derive(Arbitrary, Debug)]
struct Generic<T> {
inner: T,
}
#[test]
fn generics() {
let raw = vec![1, 2, 3, 4, 5, 6, 7, 8, 9];
let gen: Generic<bool> = arbitrary_from(&raw);
assert!(gen.inner);
let (lower, upper) = <Generic<u32> as Arbitrary>::size_hint(0);
assert_eq!(lower, 4);
assert_eq!(upper, Some(4));
}
#[derive(Arbitrary, Debug)]
struct OneLifetime<'a> {
alpha: &'a str,
}
#[test]
fn one_lifetime() {
// Last byte is used for length
let raw: Vec<u8> = vec![97, 98, 99, 100, 3];
let lifetime: OneLifetime = arbitrary_from(&raw);
assert_eq!("abc", lifetime.alpha);
let (lower, upper) = <OneLifetime as Arbitrary>::size_hint(0);
assert_eq!(lower, 8);
assert_eq!(upper, None);
}
#[derive(Arbitrary, Debug)]
struct TwoLifetimes<'a, 'b> {
alpha: &'a str,
beta: &'b str,
}
#[test]
fn two_lifetimes() {
// Last byte is used for length
let raw: Vec<u8> = vec![97, 98, 99, 100, 101, 102, 103, 3];
let lifetime: TwoLifetimes = arbitrary_from(&raw);
assert_eq!("abc", lifetime.alpha);
assert_eq!("def", lifetime.beta);
let (lower, upper) = <TwoLifetimes as Arbitrary>::size_hint(0);
assert_eq!(lower, 16);
assert_eq!(upper, None);
}

29
third_party/rust/arbitrary/tests/path.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,29 @@
#![cfg(feature = "derive")]
// Regression test for ensuring the derives work without Arbitrary being imported
#[derive(arbitrary::Arbitrary, Clone, Debug)]
pub struct Struct {
x: u8,
y: u8,
}
#[derive(arbitrary::Arbitrary, Clone, Debug)]
pub struct Tuple(u8);
#[derive(arbitrary::Arbitrary, Clone, Debug)]
pub struct Unit(u8);
#[derive(arbitrary::Arbitrary, Clone, Debug)]
pub enum Enum {
X(u8),
Y(u8),
}
#[derive(arbitrary::Arbitrary, Clone, Debug)]
struct EndingInVec(u8, bool, u32, Vec<u16>);
#[derive(arbitrary::Arbitrary, Debug)]
struct Generic<T> {
inner: T,
}

1
third_party/rust/derive_arbitrary/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"0e723b19ec2f6d2ef9c0252720c70cad769c76349815721bce064c621e89a11e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"15656cc11a8331f28c0986b8ab97220d3e76f98e60ed388b5ffad37dfac4710c","README.md":"7059db284b2016ba7355c63a2b14eb732c7b8952286ff1bc4fdde605018a39c4","src/lib.rs":"cdff09ae28184bc4bab1ff751ab9ea228c650463d4a08b359588efbae9d932f1"},"package":"5f1281ee141df08871db9fe261ab5312179eac32d1e314134ceaa8dd7c042f5a"}

36
third_party/rust/derive_arbitrary/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,36 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "derive_arbitrary"
version = "1.0.1"
authors = ["The Rust-Fuzz Project Developers", "Nick Fitzgerald <fitzgen@gmail.com>", "Manish Goregaokar <manishsmail@gmail.com>", "Andre Bogus <bogusandre@gmail.com>", "Corey Farwell <coreyf@rwell.org>"]
description = "Derives arbitrary traits"
documentation = "https://docs.rs/arbitrary/"
readme = "README.md"
keywords = ["arbitrary", "testing", "derive", "macro"]
categories = ["development-tools::testing"]
license = "MIT/Apache-2.0"
repository = "https://github.com/rust-fuzz/arbitrary"
[lib]
proc_macro = true
[dependencies.proc-macro2]
version = "1.0"
[dependencies.quote]
version = "1.0"
[dependencies.syn]
version = "1.0"
features = ["derive"]

201
third_party/rust/derive_arbitrary/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

27
third_party/rust/derive_arbitrary/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
MIT License
Copyright (c) 2019 Manish Goregaokar
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

7
third_party/rust/derive_arbitrary/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
# `#[derive(Arbitrary)]`
This crate implements support for automatically deriving [the `Arbitrary`
trait](https://docs.rs/arbitrary/*/arbitrary/trait.Arbitrary.html).
Don't depend on this crate directly, though. Instead, enable the `"derive"`
feature of the `arbitrary` crate.

196
third_party/rust/derive_arbitrary/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,196 @@
extern crate proc_macro;
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::*;
static ARBITRARY_LIFETIME_NAME: &str = "'arbitrary";
#[proc_macro_derive(Arbitrary)]
pub fn derive_arbitrary(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream {
let input = syn::parse_macro_input!(tokens as syn::DeriveInput);
let (lifetime_without_bounds, lifetime_with_bounds) =
build_arbitrary_lifetime(input.generics.clone());
let arbitrary_method = gen_arbitrary_method(&input, lifetime_without_bounds.clone());
let size_hint_method = gen_size_hint_method(&input);
let name = input.ident;
// Add a bound `T: Arbitrary` to every type parameter T.
let generics = add_trait_bounds(input.generics, lifetime_without_bounds.clone());
// Build ImplGeneric with a lifetime (https://github.com/dtolnay/syn/issues/90)
let mut generics_with_lifetime = generics.clone();
generics_with_lifetime
.params
.push(GenericParam::Lifetime(lifetime_with_bounds));
let (impl_generics, _, _) = generics_with_lifetime.split_for_impl();
// Build TypeGenerics and WhereClause without a lifetime
let (_, ty_generics, where_clause) = generics.split_for_impl();
(quote! {
impl #impl_generics arbitrary::Arbitrary<#lifetime_without_bounds> for #name #ty_generics #where_clause {
#arbitrary_method
#size_hint_method
}
})
.into()
}
// Returns: (lifetime without bounds, lifetime with bounds)
// Example: ("'arbitrary", "'arbitrary: 'a + 'b")
fn build_arbitrary_lifetime(generics: Generics) -> (LifetimeDef, LifetimeDef) {
let lifetime_without_bounds =
LifetimeDef::new(Lifetime::new(ARBITRARY_LIFETIME_NAME, Span::call_site()));
let mut lifetime_with_bounds = lifetime_without_bounds.clone();
for param in generics.params.iter() {
if let GenericParam::Lifetime(lifetime_def) = param {
lifetime_with_bounds
.bounds
.push(lifetime_def.lifetime.clone());
}
}
(lifetime_without_bounds, lifetime_with_bounds)
}
// Add a bound `T: Arbitrary` to every type parameter T.
fn add_trait_bounds(mut generics: Generics, lifetime: LifetimeDef) -> Generics {
for param in generics.params.iter_mut() {
if let GenericParam::Type(type_param) = param {
type_param
.bounds
.push(parse_quote!(arbitrary::Arbitrary<#lifetime>));
}
}
generics
}
fn gen_arbitrary_method(input: &DeriveInput, lifetime: LifetimeDef) -> TokenStream {
let ident = &input.ident;
let arbitrary_structlike = |fields| {
let arbitrary = construct(fields, |_, _| quote!(arbitrary::Arbitrary::arbitrary(u)?));
let arbitrary_take_rest = construct_take_rest(fields);
quote! {
fn arbitrary(u: &mut arbitrary::Unstructured<#lifetime>) -> arbitrary::Result<Self> {
Ok(#ident #arbitrary)
}
fn arbitrary_take_rest(mut u: arbitrary::Unstructured<#lifetime>) -> arbitrary::Result<Self> {
Ok(#ident #arbitrary_take_rest)
}
}
};
match &input.data {
Data::Struct(data) => arbitrary_structlike(&data.fields),
Data::Union(data) => arbitrary_structlike(&Fields::Named(data.fields.clone())),
Data::Enum(data) => {
let variants = data.variants.iter().enumerate().map(|(i, variant)| {
let idx = i as u64;
let ctor = construct(&variant.fields, |_, _| {
quote!(arbitrary::Arbitrary::arbitrary(u)?)
});
let variant_name = &variant.ident;
quote! { #idx => #ident::#variant_name #ctor }
});
let variants_take_rest = data.variants.iter().enumerate().map(|(i, variant)| {
let idx = i as u64;
let ctor = construct_take_rest(&variant.fields);
let variant_name = &variant.ident;
quote! { #idx => #ident::#variant_name #ctor }
});
let count = data.variants.len() as u64;
quote! {
fn arbitrary(u: &mut arbitrary::Unstructured<#lifetime>) -> arbitrary::Result<Self> {
// Use a multiply + shift to generate a ranged random number
// with slight bias. For details, see:
// https://lemire.me/blog/2016/06/30/fast-random-shuffling
Ok(match (u64::from(<u32 as arbitrary::Arbitrary>::arbitrary(u)?) * #count) >> 32 {
#(#variants,)*
_ => unreachable!()
})
}
fn arbitrary_take_rest(mut u: arbitrary::Unstructured<#lifetime>) -> arbitrary::Result<Self> {
// Use a multiply + shift to generate a ranged random number
// with slight bias. For details, see:
// https://lemire.me/blog/2016/06/30/fast-random-shuffling
Ok(match (u64::from(<u32 as arbitrary::Arbitrary>::arbitrary(&mut u)?) * #count) >> 32 {
#(#variants_take_rest,)*
_ => unreachable!()
})
}
}
}
}
}
fn construct(fields: &Fields, ctor: impl Fn(usize, &Field) -> TokenStream) -> TokenStream {
match fields {
Fields::Named(names) => {
let names = names.named.iter().enumerate().map(|(i, f)| {
let name = f.ident.as_ref().unwrap();
let ctor = ctor(i, f);
quote! { #name: #ctor }
});
quote! { { #(#names,)* } }
}
Fields::Unnamed(names) => {
let names = names.unnamed.iter().enumerate().map(|(i, f)| {
let ctor = ctor(i, f);
quote! { #ctor }
});
quote! { ( #(#names),* ) }
}
Fields::Unit => quote!(),
}
}
fn construct_take_rest(fields: &Fields) -> TokenStream {
construct(fields, |idx, _| {
if idx + 1 == fields.len() {
quote! { arbitrary::Arbitrary::arbitrary_take_rest(u)? }
} else {
quote! { arbitrary::Arbitrary::arbitrary(&mut u)? }
}
})
}
fn gen_size_hint_method(input: &DeriveInput) -> TokenStream {
let size_hint_fields = |fields: &Fields| {
let tys = fields.iter().map(|f| &f.ty);
quote! {
arbitrary::size_hint::and_all(&[
#( <#tys as arbitrary::Arbitrary>::size_hint(depth) ),*
])
}
};
let size_hint_structlike = |fields: &Fields| {
let hint = size_hint_fields(fields);
quote! {
#[inline]
fn size_hint(depth: usize) -> (usize, Option<usize>) {
arbitrary::size_hint::recursion_guard(depth, |depth| #hint)
}
}
};
match &input.data {
Data::Struct(data) => size_hint_structlike(&data.fields),
Data::Union(data) => size_hint_structlike(&Fields::Named(data.fields.clone())),
Data::Enum(data) => {
let variants = data.variants.iter().map(|v| size_hint_fields(&v.fields));
quote! {
#[inline]
fn size_hint(depth: usize) -> (usize, Option<usize>) {
arbitrary::size_hint::and(
<u32 as arbitrary::Arbitrary>::size_hint(depth),
arbitrary::size_hint::recursion_guard(depth, |depth| {
arbitrary::size_hint::or_all(&[ #( #variants ),* ])
}),
)
}
}
}
}
}

1
third_party/rust/wasm-encoder/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"aed5fe39e7e69c8fa0aed88e1ec320b7b947321dbaa001329a3d07aec37106d5","LICENSE":"268872b9816f90fd8e85db5a28d33f8150ebb8dd016653fb39ef1f94f2686bc5","README.md":"8d8b48c8ed202bd18494ed37678a0712688909bc350e5423fc18d70447e31f56","src/aliases.rs":"13dfd1947ad7285c1bab5b2bd14703e0e0a9392519e4a3a709a6d160751dfd80","src/code.rs":"84dbc269ab1dca363b7b40a6dd4f8fc6d64e0fac6f56daa5fcab7ba0fd85cfc8","src/custom.rs":"0926d7c9b8418c316b2478abced1ddc0cf88f25c68c4c320e90fff237a033dd1","src/data.rs":"e89f9297f0e5cf2b784c59f445d6f2e7901eaaf7897731f4ef6282672fe7ae70","src/elements.rs":"8c68d1e0b176c643ba4d49705043d68ae063dfe7b790bbc2d0d8edf931981c16","src/encoders.rs":"c721ac7ad3080da42ff3e7397007e44471d1e47a8de117fbde7ab799b75d10a6","src/exports.rs":"4ef5ae7daa5f082499913538d88c4ad61ff293c7f5c4c414adb900a514ff361a","src/functions.rs":"e433d9199bad8c75609202a4282797e594002e3610d803e5732989f7b5edc2a6","src/globals.rs":"bb033493914c1c321f19fd3c974f174c13f877659ae7aae31a664790d839276a","src/imports.rs":"563184465211d7381f86d2040cc26a2322296fc966f0ba02251163a54100b8d4","src/instances.rs":"d790933204e530e1264a289ef2090193d19006d082fbeba37a204b2f811d10df","src/lib.rs":"91f84effdbe8f4dbeb8f3faa26c2b4979e6ee5ebc7683aba7a7784c7e79823c3","src/linking.rs":"830516e338fbe79dad86eab3476944bb810af57b3d1a3f215d3770327d379d9e","src/memories.rs":"950285bcf52e4dd6647b1efda05280a3c93c966ff0da33695e2da187dbff4a12","src/modules.rs":"9d92969c27f0fd59efa14c7bf9ac7b96c951c379de3f85907b387184dd571f3a","src/start.rs":"a2466aba18cd194dbd17ac9103f63639f8538cba765891f07b410107b752bafd","src/tables.rs":"6102a61c69046f826ca8cd4738120c7988f3b639d667ad453b9e1af9052c513e","src/types.rs":"9d268a4437922f607f23703b94b0ada05f6266b6c08a362fd4674649b3e68976","tests/linking.rs":"43025bd4a1270a6a925f421ba728e0ad180ac8e5ea6cb80f2fc443153a5c4ec4"},"package":"2caacc74c68c74f0008c4055cdf509c43e623775eaf73323bb818dcf666ed9bd"}

30
third_party/rust/wasm-encoder/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,30 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "wasm-encoder"
version = "0.6.0"
authors = ["Nick Fitzgerald <fitzgen@gmail.com>"]
description = "A low-level WebAssembly encoder.\n"
homepage = "https://github.com/bytecodealliance/wasm-tools/tree/main/crates/wasm-encoder"
documentation = "https://docs.rs/wasm-encoder"
readme = "README.md"
license = "Apache-2.0 WITH LLVM-exception"
repository = "https://github.com/bytecodealliance/wasm-tools/tree/main/crates/wasm-encoder"
[dependencies.leb128]
version = "0.2.4"
[dev-dependencies.anyhow]
version = "1.0.38"
[dev-dependencies.tempfile]
version = "3.2.0"

220
third_party/rust/wasm-encoder/LICENSE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,220 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--- LLVM Exceptions to the Apache 2.0 License ----
As an exception, if, as a result of your compiling your source code, portions
of this Software are embedded into an Object form of such source code, you
may redistribute such embedded portions in such Object form without complying
with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
In addition, if you combine or link compiled forms of this Software with
software that is licensed under the GPLv2 ("Combined Software") and if a
court of competent jurisdiction determines that the patent provision (Section
3), the indemnity provision (Section 9) or other Section of the License
conflicts with the conditions of the GPLv2, you may retroactively and
prospectively choose to deem waived or otherwise exclude such Section(s) of
the License, but only in their entirety and only with respect to the Combined
Software.

81
third_party/rust/wasm-encoder/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,81 @@
<div align="center">
<h1><code>wasm-encoder</code></h1>
<strong>A <a href="https://bytecodealliance.org/">Bytecode Alliance</a> project</strong>
<p>
<strong>A WebAssembly encoder for Rust.</strong>
</p>
<p>
<a href="https://crates.io/crates/wasm-encoder"><img src="https://img.shields.io/crates/v/wasm-encoder.svg?style=flat-square" alt="Crates.io version" /></a>
<a href="https://crates.io/crates/wasm-encoder"><img src="https://img.shields.io/crates/d/wasm-encoder.svg?style=flat-square" alt="Download" /></a>
<a href="https://docs.rs/wasm-encoder/"><img src="https://img.shields.io/static/v1?label=docs&message=wasm-encoder&color=blue&style=flat-square" alt="docs.rs docs" /></a>
</p>
</div>
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
wasm-encoder = "0.3"
```
And then you can encode WebAssembly binaries via:
```rust
use wasm_encoder::{
CodeSection, Export, ExportSection, Function, FunctionSection, Instruction,
Module, TypeSection, ValType,
};
let mut module = Module::new();
// Encode the type section.
let mut types = TypeSection::new();
let params = vec![ValType::I32, ValType::I32];
let results = vec![ValType::I32];
types.function(params, results);
module.section(&types);
// Encode the function section.
let mut functions = FunctionSection::new();
let type_index = 0;
functions.function(type_index);
module.section(&functions);
// Encode the export section.
let mut exports = ExportSection::new();
exports.export("f", Export::Function(0));
module.section(&exports);
// Encode the code section.
let mut codes = CodeSection::new();
let locals = vec![];
let mut f = Function::new(locals);
f.instruction(Instruction::LocalGet(0));
f.instruction(Instruction::LocalGet(1));
f.instruction(Instruction::I32Add);
f.instruction(Instruction::End);
codes.function(&f);
module.section(&codes);
// Extract the encoded Wasm bytes for this module.
let wasm_bytes = module.finish();
// We generated a valid Wasm module!
assert!(wasmparser::validate(&wasm_bytes).is_ok());
```
# License
This project is licensed under the Apache 2.0 license with the LLVM exception.
See [LICENSE](LICENSE) for more details.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this project by you, as defined in the Apache-2.0 license,
shall be licensed as above, without any additional terms or conditions.

97
third_party/rust/wasm-encoder/src/aliases.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,97 @@
use super::*;
/// An encoder for the alias section.
///
/// Note that this is part of the [module linking proposal][proposal] and is not
/// currently part of stable WebAssembly.
///
/// [proposal]: https://github.com/webassembly/module-linking
///
/// # Example
///
/// ```
/// use wasm_encoder::{Module, AliasSection, ItemKind};
///
/// let mut aliases = AliasSection::new();
/// aliases.outer_type(0, 2);
/// aliases.instance_export(0, ItemKind::Function, "foo");
///
/// let mut module = Module::new();
/// module.section(&aliases);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct AliasSection {
bytes: Vec<u8>,
num_added: u32,
}
impl AliasSection {
/// Construct a new alias section encoder.
pub fn new() -> AliasSection {
AliasSection {
bytes: vec![],
num_added: 0,
}
}
/// How many aliases have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define an alias that references the export of a defined instance.
pub fn instance_export(
&mut self,
instance: u32,
kind: crate::ItemKind,
name: &str,
) -> &mut Self {
self.bytes.push(0x00);
self.bytes.extend(encoders::u32(instance));
self.bytes.push(kind as u8);
self.bytes.extend(encoders::str(name));
self.num_added += 1;
self
}
/// Define an alias that references an outer module's type.
pub fn outer_type(&mut self, depth: u32, ty: u32) -> &mut Self {
self.bytes.push(0x01);
self.bytes.extend(encoders::u32(depth));
self.bytes.push(0x07);
self.bytes.extend(encoders::u32(ty));
self.num_added += 1;
self
}
/// Define an alias that references an outer module's module.
pub fn outer_module(&mut self, depth: u32, module: u32) -> &mut Self {
self.bytes.push(0x01);
self.bytes.extend(encoders::u32(depth));
self.bytes.push(ItemKind::Module as u8);
self.bytes.extend(encoders::u32(module));
self.num_added += 1;
self
}
}
impl Section for AliasSection {
fn id(&self) -> u8 {
SectionId::Alias.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}

2116
third_party/rust/wasm-encoder/src/code.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

59
third_party/rust/wasm-encoder/src/custom.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,59 @@
use super::*;
/// A custom section holding arbitrary data.
#[derive(Clone, Debug)]
pub struct CustomSection<'a> {
/// The name of this custom section.
pub name: &'a str,
/// This custom section's data.
pub data: &'a [u8],
}
impl Section for CustomSection<'_> {
fn id(&self) -> u8 {
SectionId::Custom.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let name_len = encoders::u32(u32::try_from(self.name.len()).unwrap());
let n = name_len.len();
sink.extend(
encoders::u32(u32::try_from(n + self.name.len() + self.data.len()).unwrap())
.chain(name_len)
.chain(self.name.as_bytes().iter().copied())
.chain(self.data.iter().copied()),
);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_custom_section() {
let custom = CustomSection {
name: "test",
data: &[11, 22, 33, 44],
};
let mut encoded = vec![];
custom.encode(&mut encoded);
#[rustfmt::skip]
assert_eq!(encoded, vec![
// LEB128 length of section.
9,
// LEB128 length of name.
4,
// Name.
b't', b'e', b's', b't',
// Data.
11, 22, 33, 44,
]);
}
}

190
third_party/rust/wasm-encoder/src/data.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,190 @@
use super::*;
/// An encoder for the data section.
///
/// # Example
///
/// ```
/// use wasm_encoder::{
/// DataSection, Instruction, MemorySection, MemoryType,
/// Module,
/// };
///
/// let mut memory = MemorySection::new();
/// memory.memory(MemoryType {
/// minimum: 1,
/// maximum: None,
/// memory64: false,
/// });
///
/// let mut data = DataSection::new();
/// let memory_index = 0;
/// let offset = Instruction::I32Const(42);
/// let segment_data = b"hello";
/// data.active(memory_index, offset, segment_data.iter().copied());
///
/// let mut module = Module::new();
/// module
/// .section(&memory)
/// .section(&data);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct DataSection {
bytes: Vec<u8>,
num_added: u32,
}
/// A segment in the data section.
#[derive(Clone, Copy, Debug)]
pub struct DataSegment<'a, D> {
/// This data segment's mode.
pub mode: DataSegmentMode<'a>,
/// This data segment's data.
pub data: D,
}
/// A data segment's mode.
#[derive(Clone, Copy, Debug)]
pub enum DataSegmentMode<'a> {
/// An active data segment.
Active {
/// The memory this segment applies to.
memory_index: u32,
/// The offset where this segment's data is initialized at.
offset: Instruction<'a>,
},
/// A passive data segment.
///
/// Passive data segments are part of the bulk memory proposal.
Passive,
}
impl DataSection {
/// Create a new data section encoder.
pub fn new() -> DataSection {
DataSection {
bytes: vec![],
num_added: 0,
}
}
/// How many segments have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define an active data segment.
pub fn segment<D>(&mut self, segment: DataSegment<D>) -> &mut Self
where
D: IntoIterator<Item = u8>,
D::IntoIter: ExactSizeIterator,
{
match segment.mode {
DataSegmentMode::Passive => {
self.bytes.push(0x01);
}
DataSegmentMode::Active {
memory_index: 0,
offset,
} => {
self.bytes.push(0x00);
offset.encode(&mut self.bytes);
Instruction::End.encode(&mut self.bytes);
}
DataSegmentMode::Active {
memory_index,
offset,
} => {
self.bytes.push(0x02);
self.bytes.extend(encoders::u32(memory_index));
offset.encode(&mut self.bytes);
Instruction::End.encode(&mut self.bytes);
}
}
let data = segment.data.into_iter();
self.bytes
.extend(encoders::u32(u32::try_from(data.len()).unwrap()));
self.bytes.extend(data);
self.num_added += 1;
self
}
/// Define an active data segment.
pub fn active<'a, D>(
&mut self,
memory_index: u32,
offset: Instruction<'a>,
data: D,
) -> &mut Self
where
D: IntoIterator<Item = u8>,
D::IntoIter: ExactSizeIterator,
{
self.segment(DataSegment {
mode: DataSegmentMode::Active {
memory_index,
offset,
},
data,
})
}
/// Define a passive data segment.
///
/// Passive data segments are part of the bulk memory proposal.
pub fn passive<'a, D>(&mut self, data: D) -> &mut Self
where
D: IntoIterator<Item = u8>,
D::IntoIter: ExactSizeIterator,
{
self.segment(DataSegment {
mode: DataSegmentMode::Passive,
data,
})
}
}
impl Section for DataSection {
fn id(&self) -> u8 {
SectionId::Data.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}
/// An encoder for the data count section.
#[derive(Clone, Copy, Debug)]
pub struct DataCountSection {
/// The number of segments in the data section.
pub count: u32,
}
impl Section for DataCountSection {
fn id(&self) -> u8 {
SectionId::DataCount.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let count = encoders::u32(self.count);
let n = count.len();
sink.extend(encoders::u32(u32::try_from(n).unwrap()).chain(count));
}
}

239
third_party/rust/wasm-encoder/src/elements.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,239 @@
use super::*;
/// An encoder for the element section.
///
/// # Example
///
/// ```
/// use wasm_encoder::{
/// Elements, ElementSection, Instruction, Module, TableSection, TableType,
/// ValType,
/// };
///
/// let mut tables = TableSection::new();
/// tables.table(TableType {
/// element_type: ValType::FuncRef,
/// minimum: 128,
/// maximum: None,
/// });
///
/// let mut elements = ElementSection::new();
/// let table_index = 0;
/// let offset = Instruction::I32Const(42);
/// let element_type = ValType::FuncRef;
/// let functions = Elements::Functions(&[
/// // Function indices...
/// ]);
/// elements.active(Some(table_index), offset, element_type, functions);
///
/// let mut module = Module::new();
/// module
/// .section(&tables)
/// .section(&elements);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct ElementSection {
bytes: Vec<u8>,
num_added: u32,
}
/// A sequence of elements in a segment in the element section.
#[derive(Clone, Copy, Debug)]
pub enum Elements<'a> {
/// A sequences of references to functions by their indices.
Functions(&'a [u32]),
/// A sequence of reference expressions.
Expressions(&'a [Element]),
}
/// An element in a segment in the element section.
#[derive(Clone, Copy, Debug)]
pub enum Element {
/// A null reference.
Null,
/// A `ref.func n`.
Func(u32),
}
/// An element segment's mode.
#[derive(Clone, Copy, Debug)]
pub enum ElementMode<'a> {
/// A passive element segment.
///
/// Passive segments are part of the bulk memory proposal.
Passive,
/// A declared element segment.
///
/// Declared segments are part of the bulk memory proposal.
Declared,
/// An active element segment.
Active {
/// The table index.
///
/// `None` is implicitly table `0`. Non-`None` tables are part of the
/// reference types proposal.
table: Option<u32>,
/// The offset within the table to place this segment.
offset: Instruction<'a>,
},
}
/// An element segment in the element section.
#[derive(Clone, Copy, Debug)]
pub struct ElementSegment<'a> {
/// The element segment's mode.
pub mode: ElementMode<'a>,
/// The element segment's type.
pub element_type: ValType,
/// This segment's elements.
pub elements: Elements<'a>,
}
impl ElementSection {
/// Create a new element section encoder.
pub fn new() -> ElementSection {
ElementSection {
bytes: vec![],
num_added: 0,
}
}
/// How many segments have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define an element segment.
pub fn segment<'a>(&mut self, segment: ElementSegment<'a>) -> &mut Self {
let expr_bit = match segment.elements {
Elements::Expressions(_) => 0b100,
Elements::Functions(_) => 0b000,
};
match &segment.mode {
ElementMode::Active {
table: None,
offset,
} => {
self.bytes.extend(encoders::u32(0x00 | expr_bit));
offset.encode(&mut self.bytes);
Instruction::End.encode(&mut self.bytes);
}
ElementMode::Passive => {
self.bytes.extend(encoders::u32(0x01 | expr_bit));
if expr_bit == 0 {
self.bytes.push(0x00); // elemkind == funcref
} else {
self.bytes.push(segment.element_type.into());
}
}
ElementMode::Active {
table: Some(i),
offset,
} => {
self.bytes.extend(encoders::u32(0x02 | expr_bit));
self.bytes.extend(encoders::u32(*i));
offset.encode(&mut self.bytes);
Instruction::End.encode(&mut self.bytes);
if expr_bit == 0 {
self.bytes.push(0x00); // elemkind == funcref
} else {
self.bytes.push(segment.element_type.into());
}
}
ElementMode::Declared => {
self.bytes.extend(encoders::u32(0x03 | expr_bit));
if expr_bit == 0 {
self.bytes.push(0x00); // elemkind == funcref
} else {
self.bytes.push(segment.element_type.into());
}
}
}
match segment.elements {
Elements::Functions(fs) => {
self.bytes
.extend(encoders::u32(u32::try_from(fs.len()).unwrap()));
for f in fs {
self.bytes.extend(encoders::u32(*f));
}
}
Elements::Expressions(e) => {
self.bytes.extend(encoders::u32(e.len() as u32));
for expr in e {
match expr {
Element::Func(i) => Instruction::RefFunc(*i).encode(&mut self.bytes),
Element::Null => {
Instruction::RefNull(segment.element_type).encode(&mut self.bytes)
}
}
Instruction::End.encode(&mut self.bytes);
}
}
}
self.num_added += 1;
self
}
/// Define an active element segment.
pub fn active<'a>(
&mut self,
table_index: Option<u32>,
offset: Instruction,
element_type: ValType,
elements: Elements<'a>,
) -> &mut Self {
self.segment(ElementSegment {
mode: ElementMode::Active {
table: table_index,
offset,
},
element_type,
elements,
})
}
/// Encode a passive element segment.
///
/// Passive segments are part of the bulk memory proposal.
pub fn passive<'a>(&mut self, element_type: ValType, elements: Elements<'a>) -> &mut Self {
self.segment(ElementSegment {
mode: ElementMode::Passive,
element_type,
elements,
})
}
/// Encode a declared element segment.
///
/// Declared segments are part of the bulk memory proposal.
pub fn declared<'a>(&mut self, element_type: ValType, elements: Elements<'a>) -> &mut Self {
self.segment(ElementSegment {
mode: ElementMode::Declared,
element_type,
elements,
})
}
}
impl Section for ElementSection {
fn id(&self) -> u8 {
SectionId::Element.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}

70
third_party/rust/wasm-encoder/src/encoders.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,70 @@
//! Low-level encoders.
//!
//! This module provides low-level encoders that can be used (for example) to
//! define your own custom section encodings.
use std::convert::TryFrom;
/// Encode a `u32` as a ULEB128.
pub fn u32(n: u32) -> impl ExactSizeIterator<Item = u8> {
let mut buf = [0; 5];
let n = leb128::write::unsigned(&mut &mut buf[..], n.into()).unwrap();
<_>::into_iter(buf).take(n)
}
/// Encode a `u64` as a ULEB128.
pub fn u64(n: u64) -> impl ExactSizeIterator<Item = u8> {
let mut buf = [0; 10];
let n = leb128::write::unsigned(&mut &mut buf[..], n.into()).unwrap();
<_>::into_iter(buf).take(n)
}
/// Encode an `i32` as a SLEB128.
pub fn s32(x: i32) -> impl ExactSizeIterator<Item = u8> {
let mut buf = [0; 5];
let n = leb128::write::signed(&mut &mut buf[..], x.into()).unwrap();
<_>::into_iter(buf).take(n)
}
/// Encode an `i64` that uses at most 33 bits as a SLEB128.
///
/// # Panics
///
/// Panics if more than 33 bits are used.
///
/// ```
/// wasm_encoder::encoders::s33(1 << 32);
/// ```
///
/// ```should_panic
/// wasm_encoder::encoders::s33(1 << 33);
/// ```
///
/// ```
/// wasm_encoder::encoders::s33(-1 << 32);
/// ```
///
/// ```should_panic
/// wasm_encoder::encoders::s33(-1 << 33);
/// ```
pub fn s33(x: i64) -> impl ExactSizeIterator<Item = u8> {
assert!({
let mask = 1 << 33 << 30 >> 30;
x != mask && (x & mask == 0) == (x >= 0)
});
let mut buf = [0; 5];
let n = leb128::write::signed(&mut &mut buf[..], x).unwrap();
<_>::into_iter(buf).take(n)
}
/// Encode an `i64` as a SLEB128.
pub fn s64(x: i64) -> impl ExactSizeIterator<Item = u8> {
let mut buf = [0; 10];
let n = leb128::write::signed(&mut &mut buf[..], x).unwrap();
<_>::into_iter(buf).take(n)
}
/// Encode a length-prefixed UTF-8 string.
pub fn str<'a>(s: &'a str) -> impl Iterator<Item = u8> + 'a {
u32(u32::try_from(s.len()).unwrap()).chain(s.as_bytes().iter().copied())
}

147
third_party/rust/wasm-encoder/src/exports.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,147 @@
use super::*;
/// An encoder for the export section.
///
/// # Example
///
/// ```
/// use wasm_encoder::{
/// Export, ExportSection, TableSection, TableType, Module, ValType,
/// };
///
/// let mut tables = TableSection::new();
/// tables.table(TableType {
/// element_type: ValType::FuncRef,
/// minimum: 128,
/// maximum: None,
/// });
///
/// let mut exports = ExportSection::new();
/// exports.export("my-table", Export::Table(0));
///
/// let mut module = Module::new();
/// module
/// .section(&tables)
/// .section(&exports);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct ExportSection {
bytes: Vec<u8>,
num_added: u32,
}
impl ExportSection {
/// Create a new export section encoder.
pub fn new() -> ExportSection {
ExportSection {
bytes: vec![],
num_added: 0,
}
}
/// How many exports have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define an export.
pub fn export(&mut self, name: &str, export: Export) -> &mut Self {
self.bytes.extend(encoders::str(name));
export.encode(&mut self.bytes);
self.num_added += 1;
self
}
}
impl Section for ExportSection {
fn id(&self) -> u8 {
SectionId::Export.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}
/// A WebAssembly export.
#[derive(Clone, Copy, Debug)]
pub enum Export {
/// An export of the `n`th function.
Function(u32),
/// An export of the `n`th table.
Table(u32),
/// An export of the `n`th memory.
Memory(u32),
/// An export of the `n`th global.
Global(u32),
/// An export of the `n`th instance.
///
/// Note that this is part of the [module linking proposal][proposal] and is
/// not currently part of stable WebAssembly.
///
/// [proposal]: https://github.com/webassembly/module-linking
Instance(u32),
/// An export of the `n`th module.
///
/// Note that this is part of the [module linking proposal][proposal] and is
/// not currently part of stable WebAssembly.
///
/// [proposal]: https://github.com/webassembly/module-linking
Module(u32),
}
impl Export {
pub(crate) fn encode(&self, bytes: &mut Vec<u8>) {
let idx = match *self {
Export::Function(x) => {
bytes.push(ItemKind::Function as u8);
x
}
Export::Table(x) => {
bytes.push(ItemKind::Table as u8);
x
}
Export::Memory(x) => {
bytes.push(ItemKind::Memory as u8);
x
}
Export::Global(x) => {
bytes.push(ItemKind::Global as u8);
x
}
Export::Instance(x) => {
bytes.push(ItemKind::Instance as u8);
x
}
Export::Module(x) => {
bytes.push(ItemKind::Module as u8);
x
}
};
bytes.extend(encoders::u32(idx));
}
}
/// Kinds of WebAssembly items
#[allow(missing_docs)]
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum ItemKind {
Function = 0x00,
Table = 0x01,
Memory = 0x02,
Global = 0x03,
Module = 0x05,
Instance = 0x06,
}

68
third_party/rust/wasm-encoder/src/functions.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,68 @@
use super::*;
/// An encoder for the function section.
///
/// # Example
///
/// ```
/// use wasm_encoder::{Module, FunctionSection, ValType};
///
/// let mut functions = FunctionSection::new();
/// let type_index = 0;
/// functions.function(type_index);
///
/// let mut module = Module::new();
/// module.section(&functions);
///
/// // Note: this will generate an invalid module because we didn't generate a
/// // code section containing the function body. See the documentation for
/// // `CodeSection` for details.
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct FunctionSection {
bytes: Vec<u8>,
num_added: u32,
}
impl FunctionSection {
/// Construct a new function section encoder.
pub fn new() -> FunctionSection {
FunctionSection {
bytes: vec![],
num_added: 0,
}
}
/// How many functions have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define a function that uses the given type.
pub fn function(&mut self, type_index: u32) -> &mut Self {
self.bytes.extend(encoders::u32(type_index));
self.num_added += 1;
self
}
}
impl Section for FunctionSection {
fn id(&self) -> u8 {
SectionId::Function.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}

87
third_party/rust/wasm-encoder/src/globals.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,87 @@
use super::*;
/// An encoder for the global section.
///
/// # Example
///
/// ```
/// use wasm_encoder::{Module, GlobalSection, GlobalType, Instruction, ValType};
///
/// let mut globals = GlobalSection::new();
/// globals.global(
/// GlobalType {
/// val_type: ValType::I32,
/// mutable: false,
/// },
/// Instruction::I32Const(42),
/// );
///
/// let mut module = Module::new();
/// module.section(&globals);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct GlobalSection {
bytes: Vec<u8>,
num_added: u32,
}
impl GlobalSection {
/// Create a new global section encoder.
pub fn new() -> GlobalSection {
GlobalSection {
bytes: vec![],
num_added: 0,
}
}
/// How many globals have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define a global.
pub fn global(&mut self, global_type: GlobalType, init_expr: Instruction) -> &mut Self {
global_type.encode(&mut self.bytes);
init_expr.encode(&mut self.bytes);
Instruction::End.encode(&mut self.bytes);
self.num_added += 1;
self
}
}
impl Section for GlobalSection {
fn id(&self) -> u8 {
SectionId::Global.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}
/// A global's type.
#[derive(Clone, Copy, Debug)]
pub struct GlobalType {
/// This global's value type.
pub val_type: ValType,
/// Whether this global is mutable or not.
pub mutable: bool,
}
impl GlobalType {
pub(crate) fn encode(&self, bytes: &mut Vec<u8>) {
bytes.push(self.val_type.into());
bytes.push(self.mutable as u8);
}
}

155
third_party/rust/wasm-encoder/src/imports.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,155 @@
use super::*;
use std::convert::TryFrom;
/// An encoder for the import section.
///
/// # Example
///
/// ```
/// use wasm_encoder::{Module, ImportSection, MemoryType};
///
/// let mut imports = ImportSection::new();
/// imports.import(
/// "env",
/// Some("memory"),
/// MemoryType {
/// minimum: 1,
/// maximum: None,
/// memory64: false,
/// }
/// );
///
/// let mut module = Module::new();
/// module.section(&imports);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct ImportSection {
bytes: Vec<u8>,
num_added: u32,
}
impl ImportSection {
/// Construct a new import section encoder.
pub fn new() -> ImportSection {
ImportSection {
bytes: vec![],
num_added: 0,
}
}
/// How many imports have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define an import.
pub fn import(
&mut self,
module: &str,
name: Option<&str>,
ty: impl Into<EntityType>,
) -> &mut Self {
self.bytes.extend(encoders::str(module));
match name {
Some(name) => self.bytes.extend(encoders::str(name)),
None => {
self.bytes.push(0x00);
self.bytes.push(0xff);
}
}
ty.into().encode(&mut self.bytes);
self.num_added += 1;
self
}
}
impl Section for ImportSection {
fn id(&self) -> u8 {
SectionId::Import.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}
/// The type of an entity.
#[derive(Clone, Copy, Debug)]
pub enum EntityType {
/// The `n`th type, which is a function.
Function(u32),
/// A table type.
Table(TableType),
/// A memory type.
Memory(MemoryType),
/// A global type.
Global(GlobalType),
/// The `n`th type, which is an instance.
Instance(u32),
/// The `n`th type, which is a module.
Module(u32),
}
// NB: no `impl From<u32> for ImportType` because instances and modules also use
// `u32` indices in module linking, so we would have to remove that impl when
// adding support for module linking anyways.
impl From<TableType> for EntityType {
fn from(t: TableType) -> Self {
EntityType::Table(t)
}
}
impl From<MemoryType> for EntityType {
fn from(m: MemoryType) -> Self {
EntityType::Memory(m)
}
}
impl From<GlobalType> for EntityType {
fn from(g: GlobalType) -> Self {
EntityType::Global(g)
}
}
impl EntityType {
pub(crate) fn encode(&self, dst: &mut Vec<u8>) {
match self {
EntityType::Function(x) => {
dst.push(0x00);
dst.extend(encoders::u32(*x));
}
EntityType::Table(ty) => {
dst.push(0x01);
ty.encode(dst);
}
EntityType::Memory(ty) => {
dst.push(0x02);
ty.encode(dst);
}
EntityType::Global(ty) => {
dst.push(0x03);
ty.encode(dst);
}
EntityType::Module(ty) => {
dst.push(0x05);
dst.extend(encoders::u32(*ty));
}
EntityType::Instance(ty) => {
dst.push(0x06);
dst.extend(encoders::u32(*ty));
}
}
}
}

86
third_party/rust/wasm-encoder/src/instances.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,86 @@
use super::*;
/// An encoder for the instance section.
///
/// Note that this is part of the [module linking proposal][proposal] and is not
/// currently part of stable WebAssembly.
///
/// [proposal]: https://github.com/webassembly/module-linking
///
/// # Example
///
/// ```
/// use wasm_encoder::{Module, InstanceSection, Export};
///
/// let mut instances = InstanceSection::new();
/// instances.instantiate(0, vec![
/// ("x", Export::Function(0)),
/// ("", Export::Module(2)),
/// ("foo", Export::Global(0)),
/// ]);
///
/// let mut module = Module::new();
/// module.section(&instances);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct InstanceSection {
bytes: Vec<u8>,
num_added: u32,
}
impl InstanceSection {
/// Construct a new instance section encoder.
pub fn new() -> InstanceSection {
InstanceSection {
bytes: vec![],
num_added: 0,
}
}
/// How many instances have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define an instantiation of the given module with the given items as
/// arguments to the instantiation.
pub fn instantiate<'a, I>(&mut self, module: u32, args: I) -> &mut Self
where
I: IntoIterator<Item = (&'a str, Export)>,
I::IntoIter: ExactSizeIterator,
{
let args = args.into_iter();
self.bytes.push(0x00);
self.bytes.extend(encoders::u32(module));
self.bytes
.extend(encoders::u32(u32::try_from(args.len()).unwrap()));
for (name, export) in args {
self.bytes.extend(encoders::str(name));
export.encode(&mut self.bytes);
}
self.num_added += 1;
self
}
}
impl Section for InstanceSection {
fn id(&self) -> u8 {
SectionId::Instance.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}

265
third_party/rust/wasm-encoder/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,265 @@
//! A WebAssembly encoder.
//!
//! The main builder is the [`Module`]. You can build a section with a
//! section-specific builder, like [`TypeSection`] or [`ImportSection`], and
//! then add it to the module with [`Module::section`]. When you are finished
//! building the module, call either [`Module::as_slice`] or [`Module::finish`]
//! to get the encoded bytes. The former gives a shared reference to the
//! underlying bytes as a slice, while the latter gives you ownership of them as
//! a vector.
//!
//! # Example
//!
//! If we wanted to build this module:
//!
//! ```wasm
//! (module
//! (type (func (param i32 i32) (result i32)))
//! (func (type 0)
//! local.get 0
//! local.get 1
//! i32.add)
//! (export "f" (func 0)))
//! ```
//!
//! then we would do this:
//!
//! ```
//! use wasm_encoder::{
//! CodeSection, Export, ExportSection, Function, FunctionSection, Instruction,
//! Module, TypeSection, ValType,
//! };
//!
//! let mut module = Module::new();
//!
//! // Encode the type section.
//! let mut types = TypeSection::new();
//! let params = vec![ValType::I32, ValType::I32];
//! let results = vec![ValType::I32];
//! types.function(params, results);
//! module.section(&types);
//!
//! // Encode the function section.
//! let mut functions = FunctionSection::new();
//! let type_index = 0;
//! functions.function(type_index);
//! module.section(&functions);
//!
//! // Encode the export section.
//! let mut exports = ExportSection::new();
//! exports.export("f", Export::Function(0));
//! module.section(&exports);
//!
//! // Encode the code section.
//! let mut codes = CodeSection::new();
//! let locals = vec![];
//! let mut f = Function::new(locals);
//! f.instruction(Instruction::LocalGet(0));
//! f.instruction(Instruction::LocalGet(1));
//! f.instruction(Instruction::I32Add);
//! f.instruction(Instruction::End);
//! codes.function(&f);
//! module.section(&codes);
//!
//! // Extract the encoded Wasm bytes for this module.
//! let wasm_bytes = module.finish();
//!
//! // We generated a valid Wasm module!
//! assert!(wasmparser::validate(&wasm_bytes).is_ok());
//! ```
#![deny(missing_docs, missing_debug_implementations)]
mod aliases;
mod code;
mod custom;
mod data;
mod elements;
mod exports;
mod functions;
mod globals;
mod imports;
mod instances;
mod linking;
mod memories;
mod modules;
mod start;
mod tables;
mod types;
pub use aliases::*;
pub use code::*;
pub use custom::*;
pub use data::*;
pub use elements::*;
pub use exports::*;
pub use functions::*;
pub use globals::*;
pub use imports::*;
pub use instances::*;
pub use linking::*;
pub use memories::*;
pub use modules::*;
pub use start::*;
pub use tables::*;
pub use types::*;
pub mod encoders;
use std::convert::TryFrom;
/// A Wasm module that is being encoded.
#[derive(Clone, Debug)]
pub struct Module {
bytes: Vec<u8>,
}
/// A WebAssembly section.
///
/// Various builders defined in this crate already implement this trait, but you
/// can also implement it yourself for your own custom section builders, or use
/// `RawSection` to use a bunch of raw bytes as a section.
pub trait Section {
/// This section's id.
///
/// See `SectionId` for known section ids.
fn id(&self) -> u8;
/// Write this section's data and data length prefix into the given sink.
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>;
}
/// A section made up of uninterpreted, raw bytes.
///
/// Allows you to splat any data into a Wasm section.
#[derive(Clone, Copy, Debug)]
pub struct RawSection<'a> {
/// The id for this section.
pub id: u8,
/// The raw data for this section.
pub data: &'a [u8],
}
impl Section for RawSection<'_> {
fn id(&self) -> u8 {
self.id
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
sink.extend(
encoders::u32(u32::try_from(self.data.len()).unwrap()).chain(self.data.iter().copied()),
);
}
}
impl Module {
/// Begin writing a new `Module`.
#[rustfmt::skip]
pub fn new() -> Self {
Module {
bytes: vec![
// Magic
0x00, 0x61, 0x73, 0x6D,
// Version
0x01, 0x00, 0x00, 0x00,
],
}
}
/// Write a section into this module.
///
/// It is your responsibility to define the sections in the [proper
/// order](https://webassembly.github.io/spec/core/binary/modules.html#binary-module),
/// and to ensure that each kind of section (other than custom sections) is
/// only defined once. While this is a potential footgun, it also allows you
/// to use this crate to easily construct test cases for bad Wasm module
/// encodings.
pub fn section(&mut self, section: &impl Section) -> &mut Self {
self.bytes.push(section.id());
section.encode(&mut self.bytes);
self
}
/// Get the encoded Wasm module as a slice.
pub fn as_slice(&self) -> &[u8] {
&self.bytes
}
/// Finish writing this Wasm module and extract ownership of the encoded
/// bytes.
pub fn finish(self) -> Vec<u8> {
self.bytes
}
}
/// Known section IDs.
///
/// Useful for implementing the `Section` trait, or for setting
/// `RawSection::id`.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
#[repr(u8)]
#[allow(missing_docs)]
pub enum SectionId {
Custom = 0,
Type = 1,
Import = 2,
Function = 3,
Table = 4,
Memory = 5,
Global = 6,
Export = 7,
Start = 8,
Element = 9,
Code = 10,
Data = 11,
DataCount = 12,
Module = 14,
Instance = 15,
Alias = 16,
}
impl From<SectionId> for u8 {
#[inline]
fn from(id: SectionId) -> u8 {
id as u8
}
}
/// The type of a value.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum ValType {
/// The `i32` type.
I32 = 0x7F,
/// The `i64` type.
I64 = 0x7E,
/// The `f32` type.
F32 = 0x7D,
/// The `f64` type.
F64 = 0x7C,
/// The `v128` type.
///
/// Part of the SIMD proposal.
V128 = 0x7B,
/// The `funcref` type.
///
/// Part of the reference types proposal when used anywhere other than a
/// table's element type.
FuncRef = 0x70,
/// The `externref` type.
///
/// Part of the reference types proposal.
ExternRef = 0x6F,
}
impl From<ValType> for u8 {
#[inline]
fn from(t: ValType) -> u8 {
t as u8
}
}

290
third_party/rust/wasm-encoder/src/linking.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,290 @@
use super::*;
use std::convert::TryInto;
/// An encoder for the [linking custom
/// section](https://github.com/WebAssembly/tool-conventions/blob/master/Linking.md#linking-metadata-section).
///
/// This section is a non-standard convention that is supported by the LLVM
/// toolchain. It, along with associated "reloc.*" custom sections, allows you
/// to treat a Wasm module as a low-level object file that can be linked with
/// other Wasm object files to produce a final, complete Wasm module.
///
/// The linking section must come before the reloc sections.
///
/// # Example
///
/// ```
/// use wasm_encoder::{LinkingSection, Module, SymbolTable};
///
/// // Create a new linking section.
/// let mut linking = LinkingSection::new();
///
/// // Define a symbol table.
/// let mut sym_tab = SymbolTable::new();
///
/// // Define a function symbol in the symbol table.
/// let flags = SymbolTable::WASM_SYM_BINDING_LOCAL | SymbolTable::WASM_SYM_EXPORTED;
/// let func_index = 42;
/// let sym_name = "my_exported_func";
/// sym_tab.function(flags, func_index, Some(sym_name));
///
/// // Add the symbol table to our linking section.
/// linking.symbol_table(&sym_tab);
///
/// // Add the linking section to a new Wasm module and get the encoded bytes.
/// let mut module = Module::new();
/// module.section(&linking);
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct LinkingSection {
bytes: Vec<u8>,
}
impl LinkingSection {
/// Construct a new encoder for the linking custom section.
pub fn new() -> Self {
LinkingSection { bytes: vec![] }
}
// TODO: `fn segment_info` for the `WASM_SEGMENT_INFO` linking subsection.
// TODO: `fn init_funcs` for the `WASM_INIT_FUNCS` linking subsection.
// TODO: `fn comdat_info` for the `WASM_COMDAT_INFO` linking subsection.
/// Add a symbol table subsection.
pub fn symbol_table(&mut self, symbol_table: &SymbolTable) -> &mut Self {
symbol_table.encode(&mut self.bytes);
self
}
}
impl Section for LinkingSection {
fn id(&self) -> u8 {
SectionId::Custom.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let name_len = encoders::u32(u32::try_from("linking".len()).unwrap());
let name_len_len = name_len.len();
let version = 2;
sink.extend(
encoders::u32(
u32::try_from(name_len_len + "linking".len() + 1 + self.bytes.len()).unwrap(),
)
.chain(name_len)
.chain(b"linking".iter().copied())
.chain(encoders::u32(version))
.chain(self.bytes.iter().copied()),
);
}
}
#[allow(unused)]
const WASM_SEGMENT_INFO: u8 = 5;
#[allow(unused)]
const WASM_INIT_FUNCS: u8 = 6;
#[allow(unused)]
const WASM_COMDAT_INFO: u8 = 7;
const WASM_SYMBOL_TABLE: u8 = 8;
/// A subsection of the [linking custom section][crate::LinkingSection] that
/// provides extra information about the symbols present in this Wasm object
/// file.
#[derive(Clone, Debug)]
pub struct SymbolTable {
bytes: Vec<u8>,
num_added: u32,
}
const SYMTAB_FUNCTION: u32 = 0;
const SYMTAB_DATA: u32 = 1;
const SYMTAB_GLOBAL: u32 = 2;
#[allow(unused)]
const SYMTAB_SECTION: u32 = 3;
#[allow(unused)]
const SYMTAB_TAG: u32 = 4;
const SYMTAB_TABLE: u32 = 5;
impl SymbolTable {
/// Construct a new symbol table subsection encoder.
pub fn new() -> Self {
SymbolTable {
bytes: vec![],
num_added: 0,
}
}
/// Define a function symbol in this symbol table.
///
/// The `name` must be omitted if `index` references an imported table and
/// the `WASM_SYM_EXPLICIT_NAME` flag is not set.
pub fn function(&mut self, flags: u32, index: u32, name: Option<&str>) -> &mut Self {
self.bytes.extend(
encoders::u32(SYMTAB_FUNCTION)
.chain(encoders::u32(flags))
.chain(encoders::u32(index)),
);
if let Some(name) = name {
self.bytes.extend(
encoders::u32(name.len().try_into().unwrap())
.chain(name.as_bytes().iter().copied()),
);
}
self.num_added += 1;
self
}
/// Define a global symbol in this symbol table.
///
/// The `name` must be omitted if `index` references an imported table and
/// the `WASM_SYM_EXPLICIT_NAME` flag is not set.
pub fn global(&mut self, flags: u32, index: u32, name: Option<&str>) -> &mut Self {
self.bytes.extend(
encoders::u32(SYMTAB_GLOBAL)
.chain(encoders::u32(flags))
.chain(encoders::u32(index)),
);
if let Some(name) = name {
self.bytes.extend(
encoders::u32(name.len().try_into().unwrap())
.chain(name.as_bytes().iter().copied()),
);
}
self.num_added += 1;
self
}
// TODO: tags
/// Define a table symbol in this symbol table.
///
/// The `name` must be omitted if `index` references an imported table and
/// the `WASM_SYM_EXPLICIT_NAME` flag is not set.
pub fn table(&mut self, flags: u32, index: u32, name: Option<&str>) -> &mut Self {
self.bytes.extend(
encoders::u32(SYMTAB_TABLE)
.chain(encoders::u32(flags))
.chain(encoders::u32(index)),
);
if let Some(name) = name {
self.bytes.extend(
encoders::u32(name.len().try_into().unwrap())
.chain(name.as_bytes().iter().copied()),
);
}
self.num_added += 1;
self
}
/// Add a data symbol to this symbol table.
pub fn data(
&mut self,
flags: u32,
name: &str,
definition: Option<DataSymbolDefinition>,
) -> &mut Self {
self.bytes.extend(
encoders::u32(SYMTAB_DATA)
.chain(encoders::u32(flags))
.chain(encoders::u32(name.len().try_into().unwrap()))
.chain(name.as_bytes().iter().copied()),
);
if let Some(def) = definition {
self.bytes.extend(
encoders::u32(def.index)
.chain(encoders::u32(def.offset))
.chain(encoders::u32(def.size)),
);
}
self.num_added += 1;
self
}
// TODO: sections
fn encode(&self, bytes: &mut Vec<u8>) {
let num_added = encoders::u32(self.num_added);
let num_added_len = num_added.len();
let payload_len = num_added_len + self.bytes.len();
bytes.extend(
std::iter::once(WASM_SYMBOL_TABLE)
.chain(encoders::u32(payload_len.try_into().unwrap()))
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}
/// # Symbol definition flags.
impl SymbolTable {
/// This is a weak symbol.
///
/// This flag is mutually exclusive with `WASM_SYM_BINDING_LOCAL`.
///
/// When linking multiple modules defining the same symbol, all weak
/// definitions are discarded if any strong definitions exist; then if
/// multiple weak definitions exist all but one (unspecified) are discarded;
/// and finally it is an error if more than one definition remains.
pub const WASM_SYM_BINDING_WEAK: u32 = 0x1;
/// This is a local symbol.
///
/// This flag is mutually exclusive with `WASM_SYM_BINDING_WEAK`.
///
/// Local symbols are not to be exported, or linked to other
/// modules/sections. The names of all non-local symbols must be unique, but
/// the names of local symbols are not considered for uniqueness. A local
/// function or global symbol cannot reference an import.
pub const WASM_SYM_BINDING_LOCAL: u32 = 0x02;
/// This is a hidden symbol.
///
/// Hidden symbols are not to be exported when performing the final link,
/// but may be linked to other modules.
pub const WASM_SYM_VISIBILITY_HIDDEN: u32 = 0x04;
/// This symbol is not defined.
///
/// For non-data symbols, this must match whether the symbol is an import or
/// is defined; for data symbols, determines whether a segment is specified.
pub const WASM_SYM_UNDEFINED: u32 = 0x10;
/// This symbol is intended to be exported from the wasm module to the host
/// environment.
///
/// This differs from the visibility flags in that it effects the static
/// linker.
pub const WASM_SYM_EXPORTED: u32 = 0x20;
/// This symbol uses an explicit symbol name, rather than reusing the name
/// from a wasm import.
///
/// This allows it to remap imports from foreign WebAssembly modules into
/// local symbols with different names.
pub const WASM_SYM_EXPLICIT_NAME: u32 = 0x40;
/// This symbol is intended to be included in the linker output, regardless
/// of whether it is used by the program.
pub const WASM_SYM_NO_STRIP: u32 = 0x80;
}
/// The definition of a data symbol within a symbol table.
#[derive(Clone, Debug)]
pub struct DataSymbolDefinition {
/// The index of the data segment that this symbol is in.
pub index: u32,
/// The offset of this symbol within its segment.
pub offset: u32,
/// The byte size (which can be zero) of this data symbol.
///
/// Note that `offset + size` must be less than or equal to the segment's
/// size.
pub size: u32,
}

95
third_party/rust/wasm-encoder/src/memories.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,95 @@
use super::*;
/// An encoder for the memory section.
///
/// # Example
///
/// ```
/// use wasm_encoder::{Module, MemorySection, MemoryType};
///
/// let mut memories = MemorySection::new();
/// memories.memory(MemoryType {
/// minimum: 1,
/// maximum: None,
/// memory64: false,
/// });
///
/// let mut module = Module::new();
/// module.section(&memories);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct MemorySection {
bytes: Vec<u8>,
num_added: u32,
}
impl MemorySection {
/// Create a new memory section encoder.
pub fn new() -> MemorySection {
MemorySection {
bytes: vec![],
num_added: 0,
}
}
/// How many memories have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define a memory.
pub fn memory(&mut self, memory_type: MemoryType) -> &mut Self {
memory_type.encode(&mut self.bytes);
self.num_added += 1;
self
}
}
impl Section for MemorySection {
fn id(&self) -> u8 {
SectionId::Memory.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}
/// A memory's type.
#[derive(Clone, Copy, Debug)]
pub struct MemoryType {
/// Minimum size, in pages, of this memory
pub minimum: u64,
/// Maximum size, in pages, of this memory
pub maximum: Option<u64>,
/// Whether or not this is a 64-bit memory.
pub memory64: bool,
}
impl MemoryType {
pub(crate) fn encode(&self, bytes: &mut Vec<u8>) {
let mut flags = 0;
if self.maximum.is_some() {
flags |= 0b001;
}
if self.memory64 {
flags |= 0b100;
}
bytes.push(flags);
bytes.extend(encoders::u64(self.minimum));
if let Some(max) = self.maximum {
bytes.extend(encoders::u64(max));
}
}
}

72
third_party/rust/wasm-encoder/src/modules.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,72 @@
use super::*;
/// An encoder for the module section.
///
/// Note that this is part of the [module linking proposal][proposal] and is
/// not currently part of stable WebAssembly.
///
/// [proposal]: https://github.com/webassembly/module-linking
///
/// # Example
///
/// ```
/// use wasm_encoder::{ModuleSection, Module};
///
/// let mut modules = ModuleSection::new();
/// modules.module(&Module::new());
/// modules.module(&Module::new());
///
/// let mut module = Module::new();
/// module.section(&modules);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct ModuleSection {
bytes: Vec<u8>,
num_added: u32,
}
impl ModuleSection {
/// Create a new code section encoder.
pub fn new() -> ModuleSection {
ModuleSection {
bytes: vec![],
num_added: 0,
}
}
/// How many modules have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Writes a module into this module code section.
pub fn module(&mut self, module: &Module) -> &mut Self {
self.bytes.extend(
encoders::u32(u32::try_from(module.bytes.len()).unwrap())
.chain(module.bytes.iter().copied()),
);
self.num_added += 1;
self
}
}
impl Section for ModuleSection {
fn id(&self) -> u8 {
SectionId::Module.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}

41
third_party/rust/wasm-encoder/src/start.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
use super::*;
/// An encoder for the start section.
///
/// # Example
///
/// Note: this doesn't actually define the function at index 0, its type, or its
/// code body, so the resulting Wasm module will be invalid. See `TypeSection`,
/// `FunctionSection`, and `CodeSection` for details on how to generate those
/// things.
///
/// ```
/// use wasm_encoder::{Module, StartSection};
///
/// let start = StartSection { function_index: 0 };
///
/// let mut module = Module::new();
/// module.section(&start);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Copy, Debug)]
pub struct StartSection {
/// The index of the start function.
pub function_index: u32,
}
impl Section for StartSection {
fn id(&self) -> u8 {
SectionId::Start.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let f = encoders::u32(self.function_index);
let n = f.len();
sink.extend(encoders::u32(n as u32).chain(f));
}
}

93
third_party/rust/wasm-encoder/src/tables.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,93 @@
use super::*;
/// An encoder for the table section.
///
/// # Example
///
/// ```
/// use wasm_encoder::{Module, TableSection, TableType, ValType};
///
/// let mut tables = TableSection::new();
/// tables.table(TableType {
/// element_type: ValType::FuncRef,
/// minimum: 128,
/// maximum: None,
/// });
///
/// let mut module = Module::new();
/// module.section(&tables);
///
/// let wasm_bytes = module.finish();
/// ```
#[derive(Clone, Debug)]
pub struct TableSection {
bytes: Vec<u8>,
num_added: u32,
}
impl TableSection {
/// Construct a new table section encoder.
pub fn new() -> TableSection {
TableSection {
bytes: vec![],
num_added: 0,
}
}
/// How many tables have been defined inside this section so far?
pub fn len(&self) -> u32 {
self.num_added
}
/// Define a table.
pub fn table(&mut self, table_type: TableType) -> &mut Self {
table_type.encode(&mut self.bytes);
self.num_added += 1;
self
}
}
impl Section for TableSection {
fn id(&self) -> u8 {
SectionId::Table.into()
}
fn encode<S>(&self, sink: &mut S)
where
S: Extend<u8>,
{
let num_added = encoders::u32(self.num_added);
let n = num_added.len();
sink.extend(
encoders::u32(u32::try_from(n + self.bytes.len()).unwrap())
.chain(num_added)
.chain(self.bytes.iter().copied()),
);
}
}
/// A table's type.
#[derive(Clone, Copy, Debug)]
pub struct TableType {
/// The table's element type.
pub element_type: ValType,
/// Minimum size, in elements, of this table
pub minimum: u32,
/// Maximum size, in elements, of this table
pub maximum: Option<u32>,
}
impl TableType {
pub(crate) fn encode(&self, bytes: &mut Vec<u8>) {
bytes.push(self.element_type.into());
let mut flags = 0;
if self.maximum.is_some() {
flags |= 0b001;
}
bytes.push(flags);
bytes.extend(encoders::u32(self.minimum));
if let Some(max) = self.maximum {
bytes.extend(encoders::u32(max));
}
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше