Bug 1885100 - add about:inference r=Mardak,fluent-reviewers,bolsson

Differential Revision: https://phabricator.services.mozilla.com/D215471
This commit is contained in:
Tarek Ziadé 2024-07-18 10:30:59 +00:00
Родитель 9830be58cf
Коммит 3b01b9a53e
17 изменённых файлов: 738 добавлений и 21 удалений

Просмотреть файл

@ -118,6 +118,10 @@ static const RedirEntry kRedirMap[] = {
nsIAboutModule::URI_SAFE_FOR_UNTRUSTED_CONTENT |
nsIAboutModule::URI_CAN_LOAD_IN_CHILD | nsIAboutModule::ALLOW_SCRIPT |
nsIAboutModule::HIDE_FROM_ABOUTABOUT},
#if defined(NIGHTLY_BUILD)
{"inference", "chrome://global/content/aboutInference.html",
nsIAboutModule::ALLOW_SCRIPT | nsIAboutModule::IS_SECURE_CHROME_UI},
#endif
{"license", "chrome://global/content/license.html",
nsIAboutModule::URI_SAFE_FOR_UNTRUSTED_CONTENT |
nsIAboutModule::IS_SECURE_CHROME_UI},

Просмотреть файл

@ -47,6 +47,8 @@ if not defined('MOZ_GLEAN_ANDROID'):
about_pages.append('glean')
if buildconfig.substs['MOZ_WIDGET_TOOLKIT'] != 'android' and buildconfig.substs['MOZ_WIDGET_TOOLKIT'] != 'windows':
about_pages.append('webauthn')
if defined('NIGHTLY_BUILD'):
about_pages.append('inference')
Headers = ['/docshell/build/nsDocShellModule.h']

Просмотреть файл

@ -0,0 +1,60 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
@import url("chrome://global/skin/in-content/common.css");
html {
background-color: var(--in-content-page-background);
}
button {
min-width: 10px;
padding: 2px;
background: none;
}
button:hover {
background-color: var(--in-content-hover-background);
cursor: pointer;
}
body {
padding: 0 2em;
min-width: 45em;
margin: auto;
}
caption {
caption-side: top;
padding: 10px;
font-weight: bold;
}
table {
border-collapse: collapse;
font-family: sans-serif;
font-size: 0.8rem;
letter-spacing: 1px;
}
th,
td {
border: 1px solid var(--in-content-border-color);
padding: 8px 10px;
}
td {
text-align: center;
}
tr:nth-child(even) td {
background-color: var(--in-content-box-background);
}
tr:nth-child(odd) td {
background-color: var(--in-content-box-background-odd);
}
#warning {
display: none;
}

Просмотреть файл

@ -0,0 +1,23 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
about-inference-title = Inference Manager
about-inference-header = ✨ Inference Manager
about-inference-description = This page gathers all information about local inference
about-inference-warning = browser.ml.enable is set to False !
about-inference-simd-supported = SIMD supported
about-inference-onnx-runtime-file = ONNX Runtime file
about-inference-downloads-description = You can delete models here to free up disk space. They will be downloaded again on first inference.
about-inference-delete-button = ❌
.title = Delete model
about-inference-processes-title = Running Inference Processes
about-inference-models-title = Models downloaded
about-inference-yes = Yes
about-inference-no = No
about-inference-total = Total
about-inference-no-processes = No inference processes are running.
about-inference-memory = Memory
about-inference-pid = PID
about-inference-file = File
about-inference-size = Size

Просмотреть файл

@ -0,0 +1,44 @@
<!-- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
<!DOCTYPE html>
<html>
<head>
<meta
http-equiv="Content-Security-Policy"
content="default-src chrome:;img-src data:; object-src 'none'"
/>
<meta name="color-scheme" content="light dark" />
<title data-l10n-id="about-inference-title"></title>
<link rel="stylesheet" href="chrome://global/skin/in-content/common.css" />
<link rel="localization" href="preview/aboutInference.ftl" />
<script src="chrome://global/content/aboutInference.js"></script>
<link rel="stylesheet" href="chrome://global/content/aboutInference.css" />
</head>
<body>
<h1 data-l10n-id="about-inference-header"></h1>
<p data-l10n-id="about-inference-description"></p>
<div id="warning" data-l10n-id="about-inference-warning"></div>
<div id="generalInfo">
<ul>
<li>
<span data-l10n-id="about-inference-simd-supported"></span>:
<span id="onnxSimd"></span>
</li>
<li>
<span data-l10n-id="about-inference-onnx-runtime-file"></span>:
<span id="onnxRuntime"></span>
</li>
</ul>
</div>
<h2 data-l10n-id="about-inference-processes-title"></h2>
<div id="runningInference"></div>
<h2 data-l10n-id="about-inference-models-title"></h2>
<p data-l10n-id="about-inference-downloads-description"></p>
<div id="modelFiles"></div>
</body>
</html>

Просмотреть файл

@ -0,0 +1,233 @@
/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*-*/
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
"use strict";
/**
* Imports necessary modules from ChromeUtils.
*/
const lazy = {};
ChromeUtils.defineESModuleGetters(lazy, {
IndexedDBCache: "chrome://global/content/ml/ModelHub.sys.mjs",
ModelHub: "chrome://global/content/ml/ModelHub.sys.mjs",
detectSimdSupport: "chrome://global/content/ml/Utils.sys.mjs",
getRuntimeWasmFilename: "chrome://global/content/ml/Utils.sys.mjs",
DownloadUtils: "resource://gre/modules/DownloadUtils.sys.mjs",
});
/**
* Preferences for machine learning enablement and model hub configuration.
*/
const ML_ENABLE = Services.prefs.getBoolPref("browser.ml.enable");
const MODEL_HUB_ROOT_URL = Services.prefs.getStringPref(
"browser.ml.modelHubRootUrl"
);
const MODEL_HUB_URL_TEMPLATE = Services.prefs.getStringPref(
"browser.ml.modelHubUrlTemplate"
);
let modelHub = null;
let modelCache = null;
/**
* Gets an instance of ModelHub. Initializes it if it doesn't already exist.
*
* @returns {ModelHub} The ModelHub instance.
*/
function getModelHub() {
if (!modelHub) {
modelHub = new lazy.ModelHub({
rootUrl: MODEL_HUB_ROOT_URL,
urlTemplate: MODEL_HUB_URL_TEMPLATE,
});
}
return modelHub;
}
/**
* Formats a number of bytes into a human-readable string.
*
* @param {number} bytes - The number of bytes to format.
* @returns {string} The formatted string.
*/
function formatBytes(bytes) {
const size = lazy.DownloadUtils.convertByteUnits(bytes);
return `${size[0]} ${size[1]}`;
}
/**
* Displays process information in a table. Only includes processes of type "inference".
*
* @async
*/
async function displayProcessInfo() {
let info = await ChromeUtils.requestProcInfo();
let tableContainer = document.getElementById("runningInference");
let fragment = document.createDocumentFragment();
let table = document.createElement("table");
table.border = "1";
let thead = document.createElement("thead");
let headerRow = document.createElement("tr");
let th1 = document.createElement("th");
document.l10n.setAttributes(th1, "about-inference-pid");
headerRow.appendChild(th1);
let th2 = document.createElement("th");
document.l10n.setAttributes(th2, "about-inference-memory");
headerRow.appendChild(th2);
thead.appendChild(headerRow);
table.appendChild(thead);
let foundInference = false;
let tbody = document.createElement("tbody");
for (const child of info.children) {
if (child.type === "inference") {
foundInference = true;
let row = document.createElement("tr");
let pidCell = document.createElement("td");
pidCell.textContent = child.pid;
row.appendChild(pidCell);
let memoryCell = document.createElement("td");
memoryCell.textContent = formatBytes(child.memory);
row.appendChild(memoryCell);
tbody.appendChild(row);
}
}
table.appendChild(tbody);
if (foundInference) {
table.appendChild(tbody);
fragment.appendChild(table);
} else {
let noneLabel = document.createElement("div");
document.l10n.setAttributes(noneLabel, "about-inference-no-processes");
fragment.appendChild(noneLabel);
}
tableContainer.innerHTML = "";
tableContainer.appendChild(fragment);
}
/**
* Displays information about the machine learning models and process info.
*
* @async
*/
async function displayInfo() {
if (!ML_ENABLE) {
let warning = document.getElementById("warning");
warning.style.display = "block";
}
let cache = await lazy.IndexedDBCache.init();
let models = await cache.listModels();
let modelFilesDiv = document.getElementById("modelFiles");
// Use DocumentFragment to avoid reflows
let fragment = document.createDocumentFragment();
for (const entry of models) {
let files = await cache.listFiles(entry.name, entry.revision);
// Create a new table for the current model
let table = document.createElement("table");
// caption block
let caption = document.createElement("caption");
let modelInfo = document.createElement("div");
modelInfo.textContent = `${entry.name} (${entry.revision})`;
let deleteButton = document.createElement("button");
document.l10n.setAttributes(deleteButton, "about-inference-delete-button");
deleteButton.onclick = async () => {
await cache.deleteModel(entry.name, entry.revision);
modelFilesDiv.removeChild(table); // Remove the table from the DOM
};
modelInfo.appendChild(deleteButton);
caption.appendChild(modelInfo);
table.appendChild(caption);
// Create table headers
let thead = document.createElement("thead");
let headerRow = document.createElement("tr");
let thFile = document.createElement("th");
document.l10n.setAttributes(thFile, "about-inference-file");
headerRow.appendChild(thFile);
thFile = document.createElement("th");
document.l10n.setAttributes(thFile, "about-inference-size");
headerRow.appendChild(thFile);
thead.appendChild(headerRow);
table.appendChild(thead);
// Create table body
let tbody = document.createElement("tbody");
let totalSize = 0;
for (const file of files) {
let row = document.createElement("tr");
let tdFile = document.createElement("td");
tdFile.textContent = file.path;
row.appendChild(tdFile);
const fileSize = parseInt(
file.headers.fileSize || file.headers["Content-Length"] || 0
);
tdFile = document.createElement("td");
tdFile.textContent = formatBytes(fileSize);
row.appendChild(tdFile);
tbody.appendChild(row);
totalSize += fileSize;
}
// Append the total line
let totalRow = document.createElement("tr");
let tdTotalLabel = document.createElement("td");
document.l10n.setAttributes(tdTotalLabel, "about-inference-total");
totalRow.appendChild(tdTotalLabel);
let tdTotalValue = document.createElement("td");
tdTotalValue.textContent = formatBytes(totalSize);
totalRow.appendChild(tdTotalValue);
tbody.appendChild(totalRow);
table.appendChild(tbody);
fragment.appendChild(table);
}
modelFilesDiv.innerHTML = "";
modelFilesDiv.appendChild(fragment);
await displayProcessInfo();
document.getElementById("onnxRuntime").textContent =
lazy.getRuntimeWasmFilename();
if (lazy.detectSimdSupport()) {
document.l10n.setAttributes(
document.getElementById("onnxSimd"),
"about-inference-yes"
);
} else {
document.l10n.setAttributes(
document.getElementById("onnxSimd"),
"about-inference-no"
);
}
}
/**
* Initializes the display of information when the window loads and sets an interval to update it.
*
* @async
*/
window.onload = async function () {
await displayInfo();
setInterval(displayInfo, 5000);
};

Просмотреть файл

@ -0,0 +1,8 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
toolkit.jar:
content/global/aboutInference.html (content/aboutInference.html)
content/global/aboutInference.js (content/aboutInference.js)
content/global/aboutInference.css (content/aboutInference.css)

Просмотреть файл

@ -0,0 +1,12 @@
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
with Files("**"):
BUG_COMPONENT = ("Core", "Machine Learning")
JAR_MANIFESTS += ["jar.mn"]
BROWSER_CHROME_MANIFESTS += ["tests/browser/browser.toml"]

Просмотреть файл

@ -0,0 +1,7 @@
[DEFAULT]
support-files = [
"head.js"
]
["browser_aboutinference.js"]
https_first_disabled = true

Просмотреть файл

@ -0,0 +1,83 @@
requestLongerTimeout(2);
/**
* Checks that the page renders without issue, and that the expected elements
* are there.
*/
add_task(async function test_about_inference_enabled() {
await openAboutInference({
runInPage: async ({ selectors }) => {
const { document, window } = content;
function checkElementIsVisible(expectVisible, name) {
const expected = expectVisible ? "visible" : "hidden";
const element = document.querySelector(selectors[name]);
ok(Boolean(element), `Element ${name} was found.`);
const { visibility } = window.getComputedStyle(element);
is(
visibility,
expected,
`Element ${name} was not ${expected} but should be.`
);
}
checkElementIsVisible(true, "pageHeader");
const element = document.querySelector(selectors.warning);
const { display } = window.getComputedStyle(element);
is(display, "none", "The warning should be hidden");
},
});
});
/**
* Checks that the page renders with a warning when ml is disabled.
*/
add_task(async function test_about_inference_disabled() {
await openAboutInference({
prefs: [["browser.ml.enable", false]],
runInPage: async ({ selectors }) => {
const { document, window } = content;
const element = document.querySelector(selectors.warning);
const { display } = window.getComputedStyle(element);
is(display, "block", "The warning should be visible");
Assert.equal(element.textContent, "browser.ml.enable is set to False !");
},
});
});
/**
* Checks that the inference process is shown on the page
*/
add_task(async function test_about_inference_process() {
await openAboutInference({
runInference: true,
runInPage: async ({ selectors }) => {
function waitForInnerHTML(selector, substring, interval = 100) {
return new Promise((resolve, reject) => {
const { document } = content;
const element = document.querySelector(selector);
if (!element) {
reject(new Error(`No element found with selector: ${selector}`));
return;
}
const checkInnerHTML = () => {
console.log(
`Checking innerHTML of element with selector: ${selector}`
);
if (element.innerHTML.includes(substring)) {
console.log(
`Substring "${substring}" found in element with selector: ${selector}`
);
resolve();
} else {
// eslint-disable-next-line mozilla/no-arbitrary-setTimeout
setTimeout(checkInnerHTML, interval);
}
};
checkInnerHTML();
});
}
// When the process is shown, we display its memory size in MB
await waitForInnerHTML(selectors.processes, "MB");
},
});
});

Просмотреть файл

@ -0,0 +1,125 @@
Services.scriptloader.loadSubScript(
"chrome://mochitests/content/browser/toolkit/components/ml/tests/browser/head.js",
this
);
async function setupRemoteClient() {
const { removeMocks, remoteClients } = await createAndMockMLRemoteSettings({
autoDownloadFromRemoteSettings: false,
});
return {
remoteClients,
async cleanup() {
await removeMocks();
await waitForCondition(
() => EngineProcess.areAllEnginesTerminated(),
"Waiting for all of the engines to be terminated.",
100,
200
);
},
};
}
async function runInferenceProcess(remoteClients) {
info("Building the egnine process");
const { PipelineOptions, EngineProcess } = ChromeUtils.importESModule(
"chrome://global/content/ml/EngineProcess.sys.mjs"
);
const options = new PipelineOptions({
taskName: "moz-echo",
});
const engineParent = await EngineProcess.getMLEngineParent();
const engine = engineParent.getEngine(options);
const inferencePromise = engine.run({ data: "This gets echoed." });
await remoteClients["ml-onnx-runtime"].resolvePendingDownloads(1);
Assert.equal(
(await inferencePromise).output,
"This gets echoed.",
"The text get echoed exercising the whole flow."
);
}
/**
* The mochitest runs in the parent process. This function opens up a new tab,
* navigates to about:inference, and passes the test requirements into the content process.
*
* @param {object} options - The options object.
* @param {boolean} options.disabled - Flag to disable the inference functionality.
* @param {Function} options.runInPage - The function to run in the content process.
* @param {Array} [options.prefs] - An array of additional preferences to set.
* @param {boolean} options.runInference - If true, runs an inference task
*
* @returns {Promise<void>} A promise that resolves when the test is complete.
*/
async function openAboutInference({
disabled,
runInPage,
prefs,
runInference = false,
}) {
await SpecialPowers.pushPrefEnv({
set: [
// Enabled by default.
["browser.ml.enable", !disabled],
["browser.ml.logLevel", "Debug"],
...(prefs ?? []),
],
});
let cleanup;
let remoteClients;
// run inference
if (runInference) {
let set = await setupRemoteClient();
cleanup = set.cleanup;
remoteClients = set.remoteClients;
await runInferenceProcess(remoteClients);
}
/**
* Collect any relevant selectors for the page here.
*/
const selectors = {
pageHeader: '[data-l10n-id="about-inference-header"]',
warning: "div#warning",
processes: "div#runningInference",
};
// Start the tab at a blank page.
let tab = await BrowserTestUtils.openNewForegroundTab(
gBrowser,
BLANK_PAGE,
true // waitForLoad
);
// Now load the about:inference page, since the actor could be mocked.
BrowserTestUtils.startLoadingURIString(tab.linkedBrowser, "about:inference");
await BrowserTestUtils.browserLoaded(tab.linkedBrowser);
await ContentTask.spawn(tab.linkedBrowser, { selectors }, runInPage);
await loadBlankPage();
BrowserTestUtils.removeTab(tab);
await SpecialPowers.popPrefEnv();
if (runInference) {
await EngineProcess.destroyMLEngine();
await cleanup();
}
}
/**
* Loads the blank-page URL.
*
* This is useful for resetting the state during cleanup, and also
* before starting a test, to further help ensure that there is no
* unintentional state left over from test case.
*/
async function loadBlankPage() {
BrowserTestUtils.startLoadingURIString(gBrowser.selectedBrowser, BLANK_PAGE);
await BrowserTestUtils.browserLoaded(gBrowser.selectedBrowser);
}

Просмотреть файл

@ -28,7 +28,14 @@ const ALLOWED_HUBS = [
"https://localhost",
"https://model-hub.mozilla.org",
];
const ALLOWED_HEADERS_KEYS = ["Content-Type", "ETag", "status"];
const ALLOWED_HEADERS_KEYS = [
"Content-Type",
"ETag",
"status",
"fileSize", // the size in bytes we store
"Content-Length", // the size we download (can be different when gzipped)
];
const DEFAULT_URL_TEMPLATE = "{model}/resolve/{revision}";
/**
@ -278,7 +285,7 @@ export class IndexedDBCache {
const headersKey = `${model}/${revision}`;
const cacheKey = `${model}/${revision}/${file}`;
const headers = await this.#getData(this.headersStoreName, headersKey);
if (headers && headers.files[cacheKey]) {
if (headers?.files[cacheKey]) {
return headers.files[cacheKey];
}
return null; // Return null if no headers is found
@ -308,27 +315,30 @@ export class IndexedDBCache {
* @param {string} model - The model name (organization/name).
* @param {string} revision - The model version.
* @param {string} file - The file name.
* @param {ArrayBuffer} arrayBuffer - The data to cache.
* @param {Blob} data - The data to cache.
* @param {object} [headers] - The headers for the file.
* @returns {Promise<void>}
*/
async put(model, revision, file, arrayBuffer, headers = {}) {
async put(model, revision, file, data, headers = {}) {
const fileSize = data.size;
const cacheKey = `${model}/${revision}/${file}`;
const newSize = this.totalSize + arrayBuffer.byteLength;
const newSize = this.totalSize + fileSize;
if (newSize > this.#maxSize) {
throw new Error("Exceeding total cache size limit of 1GB");
}
const headersKey = `${model}/${revision}`;
const data = { id: cacheKey, data: arrayBuffer };
const fileEntry = { id: cacheKey, data };
// Store the file data
await this.#updateData(this.fileStoreName, data);
lazy.console.debug(`Storing ${cacheKey} with size:`, file);
await this.#updateData(this.fileStoreName, fileEntry);
// Update headers store - whith defaults for ETag and Content-Type
headers = headers || {};
headers["Content-Type"] =
headers["Content-Type"] ?? "application/octet-stream";
headers.fileSize = fileSize;
headers.ETag = headers.ETag ?? NO_ETAG;
// filter out any keys that are not allowed
@ -339,6 +349,7 @@ export class IndexedDBCache {
return obj;
}, {});
lazy.console.debug(`Storing ${cacheKey} with headers:`, headers);
const headersStore = (await this.#getData(
this.headersStoreName,
headersKey
@ -350,7 +361,7 @@ export class IndexedDBCache {
await this.#updateData(this.headersStoreName, headersStore);
// Update size
await this.#updateTotalSize(arrayBuffer.byteLength);
await this.#updateTotalSize(fileSize);
}
/**
@ -374,6 +385,7 @@ export class IndexedDBCache {
* @returns {Promise<void>}
*/
async deleteModel(model, revision) {
lazy.console.debug("Deleting model", model, revision);
const headersKey = `${model}/${revision}`;
const headers = await this.#getData(this.headersStoreName, headersKey);
if (headers) {
@ -384,6 +396,49 @@ export class IndexedDBCache {
}
}
/**
* Lists all files for a given model and revision stored in the cache.
*
* @param {string} model - The model name (organization/name).
* @param {string} revision - The model version.
* @returns {Promise<Array<string>>} An array of file identifiers.
*/
async listFiles(model, revision) {
const headersKey = `${model}/${revision}`;
let files = [];
const headers = await this.#getData(this.headersStoreName, headersKey);
if (headers?.files) {
const prefix = `${headersKey}/`;
for (const file in headers.files) {
const filePath = file.startsWith(prefix)
? file.slice(prefix.length)
: file;
files.push({ path: filePath, headers: headers.files[file] });
}
}
return files;
}
/**
* Parses a string with three '/' like 'model/distilvit/main'
* and returns an object with name and revision.
*
* @param {string} str - The input string.
* @returns {{name: string, revision: string}} An object with name and revision.
*/
parseModelString(str) {
const parts = str.split("/");
if (parts.length === 3) {
return {
name: `${parts[0]}/${parts[1]}`,
revision: parts[2],
};
}
throw new Error(
"Invalid model string format. Expected format: 'model/name/revision'"
);
}
/**
* Lists all models stored in the cache.
*
@ -401,8 +456,9 @@ export class IndexedDBCache {
request.onerror = event => reject(event.target.error);
request.onsuccess = event => {
const cursor = event.target.result;
if (cursor) {
models.push(cursor.value.id); // Assuming id is the organization/modelName
if (cursor?.value.id !== "totalSize") {
const model = this.parseModelString(cursor.value.id);
models.push(model);
cursor.continue();
} else {
resolve(models);
@ -778,6 +834,7 @@ export class ModelHub {
// We don't store the boundary or the charset, just the content type,
// so we drop what's after the semicolon.
"Content-Type": response.headers.get("Content-Type").split(";")[0],
"Content-Length": response.headers.get("Content-Length"),
ETag: response.headers.get("ETag"),
};

Просмотреть файл

@ -16,7 +16,7 @@ export function arrayBufferToBlobURL(buffer) {
/**
* Validate some simple Wasm that uses a SIMD operation.
*/
function detectSimdSupport() {
export function detectSimdSupport() {
return WebAssembly.validate(
new Uint8Array(
// ```

Просмотреть файл

@ -176,3 +176,9 @@ In the code above, **progressData** is an object of type `ProgressAndStatusCallb
- **id**: An ID uniquely identifying the object/file being tracked.
- **ok**: A boolean indicating if the operation was succesfull.
- **metadata**: Any additional metadata for the operation being tracked.
about:inference
:::::::::::::::
In Nightly, you can use `about:inference <about:inference>`_ to check the status of the engine and manage downloads of models.

Просмотреть файл

@ -43,6 +43,10 @@ const badHubs = [
"https://model-hub.mozilla.org.hack", // Domain that contains allowed domain
];
function createBlob(size = 8) {
return new Blob([new ArrayBuffer(size)]);
}
/**
* Make sure we reject bad model hub URLs.
*/
@ -533,7 +537,7 @@ add_task(async function test_Init() {
*/
add_task(async function test_PutAndCheckExists() {
const cache = await initializeCache();
const testData = new ArrayBuffer(8); // Example data
const testData = createBlob();
const key = "file.txt";
await cache.put("org/model", "v1", "file.txt", testData, {
ETag: "ETAG123",
@ -557,7 +561,7 @@ add_task(async function test_PutAndCheckExists() {
*/
add_task(async function test_PutAndGet() {
const cache = await initializeCache();
const testData = new ArrayBuffer(8); // Example data
const testData = createBlob();
await cache.put("org/model", "v1", "file.txt", testData, {
ETag: "ETAG123",
});
@ -586,7 +590,7 @@ add_task(async function test_PutAndGet() {
*/
add_task(async function test_GetHeaders() {
const cache = await initializeCache();
const testData = new ArrayBuffer(8);
const testData = createBlob();
const headers = {
ETag: "ETAG123",
status: 200,
@ -605,6 +609,7 @@ add_task(async function test_GetHeaders() {
ETag: "ETAG123",
status: 200,
"Content-Type": "application/octet-stream",
fileSize: 8,
},
storedHeaders,
"The retrieved headers should match the stored headers."
@ -617,14 +622,15 @@ add_task(async function test_GetHeaders() {
*/
add_task(async function test_ListModels() {
const cache = await initializeCache();
await cache.put("org1/modelA", "v1", "file1.txt", new ArrayBuffer(8), null);
await cache.put("org2/modelB", "v1", "file2.txt", new ArrayBuffer(8), null);
await cache.put("org1/modelA", "v1", "file1.txt", createBlob(), null);
await cache.put("org2/modelB", "v2", "file2.txt", createBlob(), null);
const models = await cache.listModels();
Assert.ok(
models.includes("org1/modelA/v1") && models.includes("org2/modelB/v1"),
"All models should be listed."
);
const wanted = [
{ name: "org1/modelA", revision: "v1" },
{ name: "org2/modelB", revision: "v2" },
];
Assert.deepEqual(models, wanted, "All models should be listed");
await deleteCache(cache);
});
@ -633,7 +639,7 @@ add_task(async function test_ListModels() {
*/
add_task(async function test_DeleteModel() {
const cache = await initializeCache();
await cache.put("org/model", "v1", "file.txt", new ArrayBuffer(8), null);
await cache.put("org/model", "v1", "file.txt", createBlob(), null);
await cache.deleteModel("org/model", "v1");
const dataAfterDelete = await cache.getFile("org/model", "v1", "file.txt");
@ -644,3 +650,48 @@ add_task(async function test_DeleteModel() {
);
await deleteCache(cache);
});
/**
* Test listing files
*/
add_task(async function test_listFiles() {
const cache = await initializeCache();
const headers = { "Content-Length": "12345", ETag: "XYZ" };
const blob = createBlob();
await cache.put("org/model", "v1", "file.txt", blob, null);
await cache.put("org/model", "v1", "file2.txt", blob, null);
await cache.put("org/model", "v1", "sub/file3.txt", createBlob(32), headers);
const files = await cache.listFiles("org/model", "v1");
const wanted = [
{
path: "file.txt",
headers: {
"Content-Type": "application/octet-stream",
fileSize: 8,
ETag: "NO_ETAG",
},
},
{
path: "file2.txt",
headers: {
"Content-Type": "application/octet-stream",
fileSize: 8,
ETag: "NO_ETAG",
},
},
{
path: "sub/file3.txt",
headers: {
"Content-Length": "12345",
"Content-Type": "application/octet-stream",
fileSize: 32,
ETag: "XYZ",
},
},
];
Assert.deepEqual(files, wanted);
await deleteCache(cache);
});

Просмотреть файл

@ -13,6 +13,7 @@ if CONFIG["MOZ_HAS_REMOTE"]:
DIRS += [
"aboutcheckerboard",
"aboutinference",
"aboutmemory",
"aboutprocesses",
"alerts",

Просмотреть файл

@ -13,6 +13,7 @@
#endif
#if defined(NIGHTLY_BUILD)
preview/megalist.ftl (../components/satchel/megalist/content/megalist.ftl)
preview/aboutInference.ftl (../components/aboutinference/content/aboutInference.ftl)
#endif
@AB_CD@.jar: