Squashed commit of the following:

commit 8e5e38cc0a
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Mon Aug 17 18:48:25 2020 +0000

    Remove packer

commit bd3ff883dd
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Mon Aug 17 18:37:55 2020 +0000

    Restore version as apiVersion

commit 8d717c09e5
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Mon Aug 17 18:09:34 2020 +0000

    Remove VM worker

commit 029c600e95
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Wed Aug 12 18:56:54 2020 +0000

    s/server/laboratory/g

commit a187b0978e
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Wed Aug 12 18:44:48 2020 +0000

    Update azure deployments

commit 03fc892df9
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Wed Aug 12 15:04:52 2020 +0000

    Update VSCode settings

commit 0c533c1803
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Aug 11 23:35:44 2020 +0000

    ci: wait before hitting laboratory

commit 2ecf45fa8d
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Aug 11 22:48:32 2020 +0000

    Remove benchmark mode, remove tslint comments, add Docker in CI

commit 3634f45360
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Aug 11 21:18:20 2020 +0000

    fix tsconfigs

commit 9664174ae4
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Sat Aug 8 04:30:27 2020 +0000

    Updated Dockerfile & bin scripts

commit c184aa15ef
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Fri Aug 7 20:44:03 2020 +0000

    Resolve README run-through issues

commit af5be66d9e
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Fri Aug 7 15:51:29 2020 +0000

    better monorepo with TS package references

commit 7423673378
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Mon Aug 3 21:42:48 2020 +0000

    WIP move server out of sds

commit 1f9d40a501
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Sat Aug 1 03:14:21 2020 +0000

    WIP: moving services into their own projects

commit ea12c1c5f2
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Fri Jul 31 19:02:46 2020 +0000

    Adds AAD auth to laboratory and CLI

commit 244416de22
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Jul 28 22:32:09 2020 +0000

    Remove static 'blob' field in favor of Suite property usage
    Update unit tests with status codes

commit bb7816dd6a
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Jul 28 22:04:31 2020 +0000

    RESTful status codes

commit a6559c35d9
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Jul 28 21:46:00 2020 +0000

    Update "runs for a benchmark/suite" to follow REST
    Use express-async-errors

commit 78027d28f1
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Jul 28 20:57:29 2020 +0000

    Normalize on main.js as entrypoint
    Fix/simplify error handling
    Streamline lab app setup

commit 32c5a7e9cb
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Jul 28 02:20:20 2020 +0000

    Remove extra internal id property

commit 765fab4c1f
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Tue Jul 28 02:01:35 2020 +0000

    Update API spec
    Fix REST script

commit e6ab277655
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Mon Jul 27 22:37:39 2020 +0000

    Add volumes to queue message
    Update README
    Allow both HTTP & HTTPS
    Fix POST of run results

commit ef9c1cc285
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Mon Jul 27 17:45:31 2020 +0000

    Add suite volume definitions
    Use sqlite for laboratory tests

commit a62c0f34ef
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Sat Jul 25 04:30:49 2020 +0000

    Update OpenAPI spec to current

commit a41444bd6b
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Fri Jul 24 21:49:03 2020 +0000

    PATCH -> POST for run results
    Remove wildcard CORS headers
    Simply express routes

commit 095da1b1c0
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Fri Jul 24 20:23:27 2020 +0000

    Flatten benchmarks. Remove pipelines, single mode per-benchmark

commit 0ffe57940a
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Fri Jul 24 18:59:53 2020 +0000

    version cleanup

commit 60aed9f33f
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Fri Jul 24 18:59:31 2020 +0000

    delete TODO

commit 9ec8b45180
Author: Noel Bundick <noelbundick@gmail.com>
Date:   Fri Jul 24 18:58:45 2020 +0000

    remove unused version field
This commit is contained in:
Noel Bundick 2020-08-17 12:09:06 -07:00
Родитель 8c26196d9f
Коммит dcb922aa20
148 изменённых файлов: 26806 добавлений и 6077 удалений

Просмотреть файл

@ -2,9 +2,9 @@
.git
.github
.vscode
build
dist
docs
*Dockerfile*
node_modules
scripts
**/dist
**/*Dockerfile*
**/node_modules

Просмотреть файл

@ -1,32 +0,0 @@
# To use in bash:
# set -o allexport; source .env; set +o allexport
# Azure Service Principal - ex: from `az ad sp create-for-rbac`
AZURE_TENANT_ID=
AZURE_CLIENT_ID=
AZURE_CLIENT_SECRET=
# Packer
AZURE_SUBSCRIPTION_ID=
# Laboratory configuration
PORT=3000
LABORATORY_ENDPOINT=https://mylaboratory.azurewebsites.net
#WEBSITE_HOSTNAME=mylaboratory.azurewebsites.net # this is populated automatically by Azure App Service
# Blob configuration
BLOB_CONTAINER=https://mystorage.blob.core.windows.net/runs
# Queue configuration
QUEUE_MODE=azure
QUEUE_ENDPOINT=https://mystorage.queue.core.windows.net/myqueue
# Database configuration
SQL_MODE=azuresql
SQL_HOST=mydatabase.database.windows.net
SQL_DB=laboratory
AZURE_CLIENT_ID=00000000-0000-0000-0000-000000000000
# Functional test variables
TEST_QUEUE_SERVICE_URL=https://mystorage.queue.core.windows.net
TEST_BASE_VOLUME_PATH=/tmp/sds/volumes/

16
.eslintrc.json Normal file
Просмотреть файл

@ -0,0 +1,16 @@
{
"extends": "./node_modules/gts/",
"ignorePatterns": [
"dist/**/*"
],
"overrides": [
{
"files": [
"test/**/*.ts"
],
"env": {
"mocha": true
}
}
]
}

4
.github/workflows/ci.yml поставляемый
Просмотреть файл

@ -14,7 +14,7 @@ jobs:
cache-name: cache-node-modules
with:
path: ~/.npm
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('package-lock.json') }}
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
@ -23,3 +23,5 @@ jobs:
- run: npm install
- run: npm run test
- run: docker-compose up --exit-code-from cli

2
.gitignore поставляемый
Просмотреть файл

@ -1,4 +1,6 @@
.env
*.tgz
*.tsbuildinfo
build
dist
node_modules

Просмотреть файл

@ -1,4 +1,4 @@
spec: build/test
spec: test/**/*.ts
recursive: true
ignore:
- "**/functional/**/*.js" # don't run functional tests by default
- "**/functional/**/*.ts" # don't run functional tests by default

Просмотреть файл

@ -1,5 +1,5 @@
tunnels:
deploy:
addr: 8080
addr: 9001
proto: http
bind_tls: true

4
.prettierrc.js Normal file
Просмотреть файл

@ -0,0 +1,4 @@
module.exports = {
...require('gts/.prettierrc.json'),
bracketSpacing: true,
}

1
.vscode/extensions.json поставляемый
Просмотреть файл

@ -1,6 +1,5 @@
{
"recommendations": [
"hbenl.vscode-mocha-test-adapter",
"humao.rest-client",
"ms-azuretools.vscode-azureappservice",
"msazurermtools.azurerm-vscode-tools",

29
.vscode/launch.json поставляемый
Просмотреть файл

@ -1,29 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "cli",
"program": "${workspaceFolder}/src/cli/sds.ts",
"args": ["demo"],
"preLaunchTask": "tsc: build - tsconfig.json",
"outFiles": [
"${workspaceFolder}/build/**/*.js"
]
},
{
"type": "node",
"request": "launch",
"name": "laboratory",
"program": "${workspaceFolder}/src/laboratory/server/main.ts",
"preLaunchTask": "tsc: build - tsconfig.json",
"outFiles": [
"${workspaceFolder}/build/**/*.js"
]
}
]
}

9
.vscode/settings.json поставляемый
Просмотреть файл

@ -4,16 +4,9 @@
"files.insertFinalNewline": true,
"typescript.tsdk": "node_modules/typescript/lib",
// note: mochaExplorer captures only a subset of mocha options, so the
// configuration in .mocharc.yml used by CI and CLI tools needs to be replicated
"mochaExplorer.files": "build/test/unit/**/*.js",
"mochaExplorer.mochaPath": "./node_modules/mocha",
"mochaExplorer.require": "source-map-support/register",
"rest-client.environmentVariables": {
"$shared": {
"laboratoryHost": "http://localhost:3000",
"laboratoryRegistry": "sdslaboratory.azurecr.io"
"laboratoryHost": "http://localhost:3000"
}
}
}

Просмотреть файл

@ -1,30 +1,32 @@
# Typescript compilation in a build container
FROM node:lts-slim AS build
RUN npm set unsafe-perm true
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY package*.json lerna.json tsconfig.json ./
RUN npm ci --ignore-scripts
COPY tsconfig.json .
COPY src src
RUN npm run compile
COPY packages ./packages
RUN npm run pack
# Run as a nonprivileged user in production mode
# Application images
FROM node:lts-slim AS app
ENV NODE_ENV=production
ENV NODE_ENV=production PATH="${PATH}:node_modules/.bin"
WORKDIR /app
RUN chown node:node .
USER node
COPY --from=build /app/package*.json ./
RUN npm install
COPY --from=build /app/build/src /app/build/src
# Custom targets for specific entrypoints
FROM app AS worker
CMD npm run worker
FROM app AS cli
COPY --from=build /app/packages/sds/*.tgz /app/packages/cli/*.tgz /packages/
RUN npm install /packages/*.tgz
CMD sds-cli
FROM app AS laboratory
CMD npm run laboratory
COPY --from=build /app/packages/sds/*.tgz /app/packages/laboratory/*.tgz /packages/
RUN npm install /packages/*.tgz
CMD sds-laboratory
FROM app AS worker
COPY --from=build /app/packages/sds/*.tgz /app/packages/worker/*.tgz /packages/
RUN npm install /packages/*.tgz
CMD sds-worker

165
README.md
Просмотреть файл

@ -20,7 +20,7 @@ With `SDS`, an organization can host machine learning challenges and invite thir
written in [TypeScript](https://www.typescriptlang.org/).
In order to use `SDS` you must have
[Node](https://nodejs.org/en/download/) installed on your machine.
`SDS` has been tested with Node version [13.7.0](https://nodejs.org/download/release/v13.7.0/).
`SDS` has been tested with Node version [12.16.3](https://nodejs.org/download/release/v12.16.3/).
Here are the steps for cloning and building `SDS`:
~~~
@ -35,47 +35,45 @@ This local instance does not have a worker pool, so it won't be able to actually
Open two shell windows. In the first window, start the laboratory service:
~~~
% node build/src/laboratory/server/main.js
% npm run laboratory
~~~
We can run the CLI run the second shell window. Let's start with the `--help` command:
We can run the CLI run the second shell window. Let's start with the `help` command:
~~~
% node build/src/cli/sds.js --help
% npm run cli help
Usage: sds [options] [command]
Secure Data Sandbox CLI
Options:
-V, --version output the version number
-h, --help display help for command
Commands:
connect <server> PARTIALLY IMPLEMENTED. Connect to a Laboratory service.
create <type> <spec> Create a benchmark, candidate, or suite from a specification where <type> is either "benchmark",
"candidate", or "suite".
connect [service] Connect to a Laboratory [service] or print connection info.
create <type> <spec> Create a benchmark, candidate, or suite from a specification where <type> is either "benchmark", "candidate", or
"suite".
demo Configures Laboratory service with demo data.
deploy <server> NOT YET IMPLEMENTED. Deploy a Laboratory service.
examples Show usage examples.
list <type> Display summary information about benchmarks, candidates, runs, and suites.
results <benchmark> <suite> Display the results of all runs against a named benchmark and suite.
run <candidate> <suite> Run a named <candidate> against a named <suite>.
show <type> [name] Display all benchmarks, candidates, suites, or runs. If optional [name] is specified, only show
matching items.
show <type> [name] Display all benchmarks, candidates, suites, or runs. If optional [name] is specified, only show matching items.
help [command] display help for command
For more information and examples, see https://github.com/microsoft/secure-data-sandbox/README.md
For more information and examples, see https://github.com/microsoft/secure-data-sandbox/blob/main/laboratory/README.md
~~~
The first thing we need to do is connect the CLI to the laboratory service that we just started. Currently the `build/src/laboratory/server/main.js` listens on port 3000 of localhost.
The first thing we need to do is connect the CLI to the laboratory service that we just started. Currently `packages/laboratory/dist/main.js` listens on port 3000 of localhost.
~~~
% node build/src/cli/sds.js connect localhost:3000
% npm run cli connect http://localhost:3000
Connected to http://localhost:3000/
Connected to http://localhost:3000/.
~~~
This writes the connection information to `~/.sds`, which is consulted every time the CLI is run. If you don't connect to a Laboratory, you will get the following error:
~~~
% node build/src/cli/sds.js list benchmark
%npm run cli list benchmark
Error: No laboratory connection. Use the "connect" command to specify a laboratory.
~~~
@ -88,56 +86,68 @@ we can use the `demo` command to populate the server with sample data, including
* Two `runs` with results.
~~~
% node build/src/cli/sds.js demo
% npm run cli demo
=== Sample benchmark ===
name: benchmark1
author: author1
version: 0.0.1
pipelines:
- mode: mode1
stages:
- {}
- image: benchmark-image-mode1
version: v1alpha1
stages:
- name: candidate
kind: candidate
volumes:
- volume: training
path: /input
- name: scoring
image: benchmark-image
kind: container
volumes:
- volume: reference
path: /reference
=== Sample candidate ===
name: candidate1
author: author1
version: 0.0.1
version: v1alpha1
benchmark: benchmark1
mode: mode1
image: candidate1-image
=== Sample suite ===
name: suite1
author: author1
version: 0.0.1
version: v1alpha1
benchmark: benchmark1
mode: mode1
volumes:
- name: training
type: AzureBlob
target: 'https://sample.blob.core.windows.net/training'
- name: reference
type: AzureBlob
target: 'https://sample.blob.core.windows.net/reference'
Initiated run f411c160-6bad-11ea-bd94-8fa64eaf2878
Initiated run f4156ae0-6bad-11ea-bd94-8fa64eaf2878
Initiated run 0db6c510-d059-11ea-ab64-31e44163fc86
Initiated run 0dba4780-d059-11ea-ab64-31e44163fc86
~~~
If we didn't want to use the built-in `demo` command, we could have created the benchmark, candidate, suite, and runs manually as follows:
~~~
% node build/src/cli/sds.js create benchmark sample-data/benchmark1.yaml
% npm run cli create benchmark sample-data/benchmark1.yaml
benchmark created
% node build/src/cli/sds.js create candidate sample-data/candidate1.yaml
% npm run cli create candidate sample-data/candidate1.yaml
candidate created
% node build/src/cli/sds.js create suite sample-data/suite1.yaml
% npm run cli create suite sample-data/suite1.yaml
suite created
% node build/src/cli/sds.js run candidate1 suite1
Scheduling run dfc8c5e0-6bae-11ea-bd94-8fa64eaf2878
% npm run cli run candidate1 suite1
Scheduling run 1dae9970-d059-11ea-ab64-31e44163fc86
% node build/src/cli/sds.js run candidate1 suite1
Scheduling run e152c140-6bae-11ea-bd94-8fa64eaf2878
% npm run cli run candidate1 suite1
Scheduling run 1fbe1880-d059-11ea-ab64-31e44163fc86
~~~
The `demo` command does one thing we can't do through the CLI, and that is to pretend to be a worker and report status for the runs.
@ -145,75 +155,84 @@ The `demo` command does one thing we can't do through the CLI, and that is to pr
**List benchmarks, candidates, suites**
~~~
% node build/src/cli/sds.js list benchmark
% npm run cli list benchmark
name submitter date
benchmark1 author1 2020-03-19 14:37:31 PDT
benchmark1 author1 2020-07-27 22:32:28 UTC
% node build/src/cli/sds.js list candidate
name submitter date
candidate1 author1 2020-03-19 14:37:31 PDT
% npm run cli list candidate
name submitter date
candidate1 author1 2020-07-27 22:32:28 UTC
% node build/src/cli/sds.js list suite
% npm run cli list suite
name submitter date
suite1 author1 2020-03-19 14:39:15 PDT
suite1 author1 2020-07-27 22:32:28 UTC
~~~
**Show benchmarks, candidates, suites**
~~~
% node build/src/cli/sds.js show benchmark benchmark1
pipelines:
- mode: mode1
stages:
- {}
- image: benchmark-image-mode1
id: 1
% npm run cli show benchmark benchmark1
stages:
- name: candidate
kind: candidate
volumes:
- volume: training
path: /input
- name: scoring
kind: container
image: benchmark-image
volumes:
- volume: reference
path: /reference
name: benchmark1
author: author1
version: 0.0.1
createdAt: 2020-03-19T21:37:31.437Z
updatedAt: 2020-03-21T20:00:04.907Z
version: v1alpha1
createdAt: 2020-07-27T22:32:28.865Z
updatedAt: 2020-07-27T22:32:43.284Z
% node build/src/cli/sds.js show candidate candidate1
id: 1
% npm run cli show candidate candidate1
name: candidate1
author: author1
version: 0.0.1
version: v1alpha1
benchmark: benchmark1
mode: mode1
image: candidate1-image
createdAt: 2020-03-19T21:37:31.452Z
updatedAt: 2020-03-21T20:00:37.772Z
createdAt: 2020-07-27T22:32:28.883Z
updatedAt: 2020-07-27T22:32:47.384Z
% node build/src/cli/sds.js show suite suite1
id: 1
% npm run cli show suite suite1
volumes:
- name: training
type: AzureBlob
target: 'https://sample.blob.core.windows.net/training'
- name: reference
type: AzureBlob
target: 'https://sample.blob.core.windows.net/reference'
name: suite1
author: author1
version: 0.0.1
version: v1alpha1
benchmark: benchmark1
mode: mode1
createdAt: 2020-03-19T21:39:15.634Z
updatedAt: 2020-03-21T20:00:48.302Z
createdAt: 2020-07-27T22:32:28.889Z
updatedAt: 2020-07-27T22:32:50.623Z
~~~
**List runs**
~~~
% node build/src/cli/sds.js list run
name submitter date candidate suite status
f411c160-6bad-11ea-bd94-8fa64eaf2878 unknown 2020-03-21 12:55:45 PDT candidate1 suite1 completed
f4156ae0-6bad-11ea-bd94-8fa64eaf2878 unknown 2020-03-21 12:55:45 PDT candidate1 suite1 completed
dfc8c5e0-6bae-11ea-bd94-8fa64eaf2878 unknown 2020-03-21 13:02:21 PDT candidate1 suite1 created
e152c140-6bae-11ea-bd94-8fa64eaf2878 unknown 2020-03-21 13:02:23 PDT candidate1 suite1 created
% npm run cli list run
name submitter date candidate suite status
0db6c510-d059-11ea-ab64-31e44163fc86 unknown 2020-07-27 22:32:28 UTC candidate1 suite1 completed
0dba4780-d059-11ea-ab64-31e44163fc86 unknown 2020-07-27 22:32:28 UTC candidate1 suite1 completed
1dae9970-d059-11ea-ab64-31e44163fc86 unknown 2020-07-27 22:32:55 UTC candidate1 suite1 created
1fbe1880-d059-11ea-ab64-31e44163fc86 unknown 2020-07-27 22:32:59 UTC candidate1 suite1 created
~~~
**Displaying Run Results**
~~~
% node build/src/cli/sds.js results benchmark1 suite1
% npm run cli results benchmark1 suite1
run submitter date passed failed skipped
f411c160-6bad-11ea-bd94-8fa64eaf2878 unknown 2020-03-21 12:55:45 PDT 5 6 ---
f4156ae0-6bad-11ea-bd94-8fa64eaf2878 unknown 2020-03-21 12:55:45 PDT 3 --- 7
0db6c510-d059-11ea-ab64-31e44163fc86 unknown 2020-07-27 22:32:28 UTC 5 6 ---
0dba4780-d059-11ea-ab64-31e44163fc86 unknown 2020-07-27 22:32:28 UTC 3 --- 7
~~~
## Deploying SDS to the cloud

490
api.yml
Просмотреть файл

@ -1,490 +0,0 @@
openapi: 3.0.2
info:
title: secure-data-sandbox
version: 0.0.1
license:
name: MIT
url: https://github.com/microsoft/secure-data-sandbox/blob/main/LICENSE
contact:
name: microsoft/secure-data-sandbox
url: https://github.com/microsoft/secure-data-sandbox
description: A toolkit for conducting machine learning trials against confidential data
tags:
- name: Benchmarks
- name: Suites
- name: Candidates
paths:
/Benchmarks:
get:
tags: [Benchmarks]
responses:
200:
description: Benchmarks
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Benchmark"
/benchmarks/{benchmarkName}:
parameters:
- name: benchmarkName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
get:
tags: [Benchmarks]
responses:
200:
$ref: "#/components/responses/Benchmark"
400:
$ref: "#/components/responses/BadRequest"
404:
$ref: "#/components/responses/NotFound"
put:
tags: [Benchmarks]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Benchmark"
responses:
200:
$ref: "#/components/responses/Benchmark"
400:
$ref: "#/components/responses/BadRequest"
/benchmarks/{benchmarkName}/suites:
parameters:
- name: benchmarkName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
get:
tags: [Suites]
responses:
200:
description: Suites
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Suite"
/benchmarks/{benchmarkName}/suites/{suiteName}:
parameters:
- name: benchmarkName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
- name: suiteName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
get:
tags: [Suites]
responses:
200:
$ref: "#/components/responses/Suite"
400:
$ref: "#/components/responses/BadRequest"
404:
$ref: "#/components/responses/NotFound"
put:
tags: [Suites]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Suite"
responses:
200:
$ref: "#/components/responses/Suite"
400:
$ref: "#/components/responses/BadRequest"
/benchmarks/{benchmarkName}/candidates:
parameters:
- name: benchmarkName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
get:
tags: [Candidates]
responses:
200:
description: Candidates
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Candidate"
/benchmarks/{benchmarkName}/candidates/{candidateName}:
parameters:
- name: benchmarkName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
- name: candidateName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
get:
tags: [Candidates]
responses:
200:
$ref: "#/components/responses/Candidate"
400:
$ref: "#/components/responses/BadRequest"
404:
$ref: "#/components/responses/NotFound"
put:
tags: [Candidates]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Candidate"
responses:
200:
$ref: "#/components/responses/Candidate"
400:
$ref: "#/components/responses/BadRequest"
/benchmarks/{benchmarkName}/candidates/{candidateName}/runs:
parameters:
- name: benchmarkName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
- name: candidateName
in: path
required: true
schema:
$ref: "#/components/schemas/EntityName"
post:
tags: [Runs]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/PipelineRunRequest"
responses:
202:
description: Run requested
content:
application/json:
schema:
$ref: "#/components/schemas/PipelineRun"
get:
tags: [Runs]
responses:
200:
description: Candidate runs
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/PipelineRun"
/runs/{runId}:
parameters:
- name: runId
in: path
required: true
schema:
type: string
format: uuid
get:
tags: [Runs]
responses:
200:
description: Run details
content:
application/json:
schema:
$ref: "#/components/schemas/PipelineRun"
/runs/{runId}/replay:
parameters:
- name: runId
in: path
required: true
schema:
type: string
format: uuid
post:
tags: [Runs]
responses:
202:
description: Run requested
content:
application/json:
schema:
$ref: "#/components/schemas/PipelineRun"
components:
schemas:
EntityName:
type: string
minLength: 1
maxLength: 64
pattern: '^[a-zA-Z0-9\.\-\_]*$'
BaseEntity:
type: object
properties:
name:
$ref: "#/components/schemas/EntityName"
type:
type: string
readOnly: true
author:
type: string
createdDate:
type: string
format: date-time
readOnly: true
updatedDate:
type: string
format: date-time
readOnly: true
version:
type: string
required:
- name
- version
Error:
type: object
properties:
error:
type: string
required:
- error
readOnly: true
Suite:
type: object
allOf:
- $ref: "#/components/schemas/BaseEntity"
properties:
type:
enum: [suite]
target:
$ref: "#/components/schemas/Target"
volumes:
type: object
minProperties: 1
additionalProperties:
type: object
properties:
target:
type: string
required:
- target
required:
- target
Benchmark:
type: object
allOf:
- $ref: "#/components/schemas/BaseEntity"
properties:
type:
enum: [benchmark]
modes:
type: object
minProperties: 1
additionalProperties:
$ref: "#/components/schemas/PipelineMode"
required:
- name
- version
- modes
Candidate:
type: object
allOf:
- $ref: "#/components/schemas/BaseEntity"
properties:
type:
enum: [candidate]
target:
$ref: "#/components/schemas/Target"
secrets:
type: array
items:
type: object
properties:
type:
type: string
secretUri:
type: string
required:
- type
- secretUri
externalEndpoints:
type: array
items:
type: string
required:
- target
PipelineRunRequest:
type: object
properties:
suite:
$ref: "#/components/schemas/EntityName"
stages:
minProperties: 1
additionalProperties:
type: object
properties:
image:
type: string
required:
- suite
- stages
PipelineRun:
type: object
readOnly: true
allOf:
- $ref: "#/components/schemas/BaseEntity"
properties:
name:
type: string
format: uuid
status:
type: string
enum: ['created', 'running', 'completed', 'failed']
target:
$ref: "#/components/schemas/Target"
stages:
type: object
additionalProperties:
type: object
properties:
type:
type: string
image:
type: string
volumes:
additionalProperties:
type: object
properties:
path:
type: string
target:
type: string
writable:
type: boolean
PipelineMode:
type: object
properties:
type:
type: string
stages:
type: object
additionalProperties:
oneOf:
- $ref: "#/components/schemas/CandidatePipelineStage"
- $ref: "#/components/schemas/ContainerPipelineStage"
discriminator:
propertyName: type
mapping:
candidate: "#/components/schemas/CandidatePipelineStage"
container: "#/components/schemas/ContainerPipelineStage"
minProperties: 1
volumes:
type: object
additionalProperties:
type: object
required:
- stages
- type
BasePipelineStage:
type: object
properties:
type:
type: string
env:
type: object
minProperties: 1
additionalProperties:
type: string
volumeMounts:
type: object
minProperties: 1
additionalProperties:
type: object
properties:
path:
type: string
writable:
type: boolean
required:
- path
required:
- type
CandidatePipelineStage:
type: object
allOf:
- $ref: "#/components/schemas/BasePipelineStage"
properties:
type:
enum: [candidate]
ContainerPipelineStage:
type: object
allOf:
- $ref: "#/components/schemas/BasePipelineStage"
properties:
type:
enum: [container]
image:
type: string
required:
- image
Target:
type: object
properties:
contest:
$ref: "#/components/schemas/EntityName"
mode:
type: string
required:
- contest
- mode
responses:
BadRequest:
description: Not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
NotFound:
description: Not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
Candidate:
description: Candidate details
content:
application/json:
schema:
$ref: "#/components/schemas/Candidate"
Benchmark:
description: Contest details
content:
application/json:
schema:
$ref: "#/components/schemas/Benchmark"
Suite:
description: Suite details
content:
application/json:
schema:
$ref: "#/components/schemas/Suite"

Просмотреть файл

@ -280,16 +280,12 @@
"serverFarmId": "[resourceId('Microsoft.Web/serverFarms', variables('appSvcPlan'))]",
"siteConfig": {
"alwaysOn": true,
"appCommandLine": "npm run laboratory",
"appCommandLine": "node_modules/@microsoft/sds-laboratory/dist/main.js",
"appSettings": [
{
"name": "AZURE_CLIENT_ID",
"value": "[reference(resourceId('Microsoft.ManagedIdentity/userAssignedIdentities', variables('identity'))).clientId]"
},
{
"name": "BLOB_CONTAINER",
"value": "[concat(reference(resourceId('Microsoft.Storage/storageAccounts', variables('metadataStorage'))).primaryEndpoints.blob, variables('runsContainer'))]"
},
{
"name": "QUEUE_MODE",
"value": "azure"

Просмотреть файл

@ -1,102 +0,0 @@
{
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"asgId": {
"type": "string"
},
"identityId": {
"type": "string"
},
"subnetId": {
"type": "string"
},
"imageId": {
"type": "string",
"defaultValue": "[resourceId('Microsoft.Compute/images', 'sds-worker')]"
},
"vmPassword": {
"type": "securestring",
"defaultValue": "[newGuid()]"
},
"vmSize": {
"type": "string",
"defaultValue": "Standard_DS2_v2"
}
},
"variables": {
"name": "worker"
},
"resources": [
{
"type": "Microsoft.Network/networkInterfaces",
"apiVersion": "2019-11-01",
"location": "[resourceGroup().location]",
"name": "[variables('name')]",
"properties": {
"ipConfigurations": [
{
"name": "default",
"properties": {
"applicationSecurityGroups": [
{
"id": "[parameters('asgId')]"
}
],
"privateIPAllocationMethod": "Dynamic",
"subnet": {
"id": "[parameters('subnetId')]"
}
}
}
]
}
},
{
"dependsOn": [
"[resourceId('Microsoft.Network/networkInterfaces', variables('name'))]"
],
"type": "Microsoft.Compute/virtualMachines",
"apiVersion": "2019-07-01",
"location": "[resourceGroup().location]",
"name": "[variables('name')]",
"identity": {
"type": "UserAssigned",
"userAssignedIdentities": {
"[parameters('identityId')]": {
}
}
},
"properties": {
"hardwareProfile": {
"vmSize": "[parameters('vmSize')]"
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', variables('name'))]"
}
]
},
"osProfile": {
"adminUsername": "azureuser",
"adminPassword": "[parameters('vmPassword')]",
"computerName": "[variables('name')]",
"linuxConfiguration": {
}
},
"storageProfile": {
"imageReference": {
"id": "[parameters('imageId')]"
},
"osDisk": {
"createOption": "FromImage",
"managedDisk": {
"storageAccountType": "Premium_LRS"
}
}
}
}
}
]
}

Просмотреть файл

@ -61,32 +61,22 @@ validate_arguments() {
FORCE=${FORCE:-false}
}
deploy_environment() {
if ! az image show -g $RESOURCE_GROUP -n sds-worker &>/dev/null || [ "$FORCE" = true ]; then
TMP_DIR=$(mktemp -d)
(
pushd $TMP_DIR
curl -sL -O "${ASSETS_BASE}/worker/packer.json" -O "${ASSETS_BASE}/worker/setup.sh" -O "${ASSETS_BASE}/worker/start.sh"
packer build -force -var resource_group=$RESOURCE_GROUP packer.json
)
else
>&2 echo "Skipping worker VM image. Run with --force to recreate"
fi
deploy_environment() {
az deployment group create -g $RESOURCE_GROUP -u "${ASSETS_BASE}/azure/azuredeploy.json" -p "assetsBaseUrl=$ASSETS_BASE"
}
deploy_laboratory() {
SITE_ID=$(az deployment group show -g $RESOURCE_GROUP -n azuredeploy --query properties.outputs.laboratorySiteId.value -o tsv)
npm run laboratory:package:appservice
az webapp deployment source config-zip --ids $SITE_ID --src dist/secure-data-sandbox.zip
npm run pack:laboratory:appservice
az webapp deployment source config-zip --ids $SITE_ID --src dist/laboratory/sds-laboratory.zip
az webapp restart --ids $SITE_ID
}
deploy_worker() {
REGISTRY=$(az deployment group show -g $RESOURCE_GROUP -n azuredeploy --query properties.outputs.laboratoryRegistryName.value -o tsv)
# TODO: replace with a stable registry and/or always build during CI
az acr import -n $REGISTRY --source docker.io/acanthamoeba/sds-worker:latest -t worker:latest --force
}

Просмотреть файл

@ -1,55 +0,0 @@
{
"variables": {
"name": "sds-worker",
"resource_group": null,
"tenant_id": "{{env `AZURE_TENANT_ID`}}",
"subscription_id": "{{env `AZURE_SUBSCRIPTION_ID`}}",
"client_id": "{{env `AZURE_CLIENT_ID`}}",
"client_secret": "{{env `AZURE_CLIENT_SECRET`}}",
"location": "southcentralus"
},
"sensitive-variables": [
"client_secret"
],
"builders": [
{
"type": "azure-arm",
"async_resourcegroup_delete": true,
"tenant_id": "{{user `tenant_id`}}",
"subscription_id": "{{user `subscription_id`}}",
"client_id": "{{user `client_id`}}",
"client_secret": "{{user `client_secret`}}",
"location": "{{user `location`}}",
"os_type": "Linux",
"image_publisher": "Canonical",
"image_offer": "UbuntuServer",
"image_sku": "18.04-LTS",
"managed_image_name": "{{user `name`}}",
"managed_image_resource_group_name": "{{user `resource_group`}}"
}
],
"provisioners": [
{
"type": "file",
"source": "{{template_dir}}/start.sh",
"destination": "/tmp/start.sh"
},
{
"type": "shell",
"execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E bash '{{ .Path }}'",
"script": "{{template_dir}}/setup.sh"
},
{
"type": "shell",
"execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'",
"inline_shebang": "/bin/sh -x",
"inline": [
"/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync"
]
}
]
}

Просмотреть файл

@ -1,18 +0,0 @@
#!/bin/bash
set -euo pipefail
# Docker repository
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
# Microsoft repository
curl -fsSL https://packages.microsoft.com/keys/microsoft.asc | apt-key add -
apt-add-repository "deb [arch=amd64] https://packages.microsoft.com/ubuntu/$(lsb_release -rs)/prod $(lsb_release -cs) main"
# Install dependencies
apt-get update -y
apt-get install -y jq docker-ce docker-ce-cli containerd.io blobfuse fuse
# Move startup script
chmod +x /tmp/start.sh
mv /tmp/start.sh /var/lib/cloud/scripts/per-boot/start.sh

Просмотреть файл

@ -1,36 +0,0 @@
#!/bin/bash
set -euo pipefail
# service discovery via DNS
SETTINGS=$(dig @168.63.129.16 +short laboratory.environment.private txt)
# blobfuse: one-time setup
mkdir -p /mnt/blobfusetmp
# blobfuse: mount runs container
mkdir -p /var/sds/runs
export AZURE_STORAGE_ACCOUNT=$(grep -Po 'storageAccount=\K[^"]*' <<< $SETTINGS)
export AZURE_STORAGE_AUTH_TYPE=MSI
CONTAINER=$(grep -Po 'runsContainer=\K[^"]*' <<< $SETTINGS)
blobfuse /var/sds/runs --container-name=$CONTAINER --tmp-path=/mnt/blobfusetmp
# pull latest worker
AAD_ACCESS_TOKEN=$(curl -s -H 'Metadata: true' 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com/' | jq -r '.access_token')
CLAIMS=$(echo $AAD_ACCESS_TOKEN | cut -d '.' -f 2)
TENANT=$(echo $CLAIMS | base64 -d | jq -r '.iss | capture("https://(.*)/(?<tenant>.*)/").tenant')
REGISTRY=$(grep -Po 'registry=\K\K[^"]*' <<< $SETTINGS)
ACR_REFRESH_TOKEN=$(curl -s -X POST -H 'Content-Type: application/x-www-form-urlencoded' -d "grant_type=access_token&service=$REGISTRY&tenant=$TENANT&access_token=$AAD_ACCESS_TOKEN" https://$REGISTRY/oauth2/exchange | jq -r '.refresh_token')
docker login -u 00000000-0000-0000-0000-000000000000 -p $ACR_REFRESH_TOKEN $REGISTRY
# If the pull was unsuccessful (not yet provisioned, network blip, etc), wait a bit and keep trying
while ! docker pull $REGISTRY/worker; do
sleep 10
done
# run worker app
QUEUE_ENDPOINT=$(grep -Po 'runsQueueEndpoint=\K[^"]*' <<< $SETTINGS)
docker container rm worker --force || true
docker container run -d --name worker --restart always -v /var/run/docker.sock:/var/run/docker.sock -v /root/.docker/config.json:/home/node/.docker/config.json -e QUEUE_MODE=azure -e QUEUE_ENDPOINT=$QUEUE_ENDPOINT $REGISTRY/worker

14
docker-compose.yml Normal file
Просмотреть файл

@ -0,0 +1,14 @@
version: '3.8'
services:
laboratory:
build:
context: .
target: laboratory
cli:
build:
context: .
target: cli
command: /bin/sh -c 'sleep 5 && sds-cli connect http://laboratory:3000 && sds-cli demo'
depends_on:
- laboratory

9
lerna.json Normal file
Просмотреть файл

@ -0,0 +1,9 @@
{
"packages": [
"packages/sds",
"packages/cli",
"packages/worker",
"packages/laboratory"
],
"version": "0.0.0"
}

10235
package-lock.json сгенерированный

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,92 +1,27 @@
{
"name": "secure-data-sandbox",
"version": "0.0.1",
"description": "A toolkit for conducting machine learning trials against confidential data",
"license": "MIT",
"name": "root",
"private": true,
"scripts": {
"test": "mocha",
"pretest:functional": "npm run compile",
"test:functional": "mocha --config test/functional/.mocharc.yml",
"check": "gts check",
"clean": "gts clean && rimraf ./dist",
"cli": "node build/src/cli/sds.js",
"cli:dev": "npm run compile && node -r dotenv/config build/src/cli/sds.js",
"compile": "tsc -p .",
"fix": "gts fix",
"prepare": "npm run compile",
"pretest": "npm run compile",
"posttest": "npm run check",
"laboratory": "node build/src/laboratory/server/main.js",
"laboratory:dev": "npm run compile && node -r dotenv/config build/src/laboratory/server/main.js",
"prelaboratory:package:appservice": "rimraf ./dist",
"laboratory:package:appservice": "npm run compile && mkdir -p dist && cp -r --parents package*.json build/src dist && cd dist && npm install --ignore-scripts --only=prod && zip -r $npm_package_name.zip package.json package-lock.json build/src node_modules",
"sds": "node build/src/cli/sds.js",
"sds:dev": "npm run compile && node -r dotenv/config build/src/cli/sds.js",
"worker": "node build/src/worker/app.js",
"worker:dev": "npm run compile && node -r dotenv/config build/src/worker/app.js",
"preazure:dev": "serve -l 8080 deploy >/dev/null &",
"azure:dev": "ngrok start -config .ngrok.yml deploy",
"postazure:dev": "kill $(lsof -t -i:8080)"
},
"dependencies": {
"@azure/identity": "^1.0.3",
"@azure/storage-queue": "^12.0.4",
"axios": "^0.19.2",
"body-parser": "^1.19.0",
"commander": "^5.1.0",
"dockerode": "^3.2.0",
"env-var": "^6.1.1",
"express": "^4.17.1",
"fp-ts": "^2.5.4",
"io-ts": "^2.2.2",
"js-yaml": "^3.13.1",
"luxon": "^1.24.1",
"reflect-metadata": "^0.1.13",
"sequelize": "^5.21.7",
"sequelize-typescript": "^1.1.0",
"sqlite3": "^4.2.0",
"strong-error-handler": "^3.4.0",
"tedious": "^8.3.0",
"uuid": "^3.4.0"
"check": "lerna run check",
"cli": "npm run start --silent --prefix packages/cli",
"compile": "lerna run compile",
"predev:azure": "serve -l 9001 deploy >/dev/null &",
"dev:azure": "ngrok start -config .ngrok.yml deploy",
"postdev:azure": "kill $(lsof -t -i:9001)",
"fix": "lerna run fix",
"laboratory": "npm run start --silent --prefix packages/laboratory",
"pack": "lerna bootstrap --ignore-scripts && lerna exec npm pack",
"pack:laboratory:appservice": "rimraf ./dist/laboratory && mkdir -p ./dist/laboratory && npm run pack && cd ./dist/laboratory && npm init -y && npm install --only=prod ../../packages/sds/*.tgz ../../packages/laboratory/*.tgz && zip -r sds-laboratory.zip package*.json node_modules",
"postinstall": "lerna bootstrap",
"test": "lerna run test"
},
"devDependencies": {
"@sinonjs/fake-timers": "^6.0.1",
"@types/ajv": "^1.0.0",
"@types/bluebird": "^3.5.30",
"@types/body-parser": "^1.19.0",
"@types/chai": "^4.2.11",
"@types/chai-as-promised": "^7.1.2",
"@types/chai-http": "^4.2.0",
"@types/dockerode": "^2.5.28",
"@types/dotenv": "^8.2.0",
"@types/express": "^4.17.6",
"@types/js-yaml": "^3.12.3",
"@types/luxon": "^1.22.0",
"@types/mocha": "^7.0.2",
"@types/nock": "^11.1.0",
"@types/node": "^12.12.37",
"@types/request-promise": "^4.1.46",
"@types/sinon": "^7.5.2",
"@types/sinonjs__fake-timers": "^6.0.1",
"@types/tedious": "^4.0.0",
"@types/uuid": "^3.4.9",
"@types/validator": "^12.0.1",
"@types/yargs": "^15.0.4",
"chai": "^4.2.0",
"chai-as-promised": "^7.1.1",
"chai-exclude": "^2.0.2",
"chai-http": "^4.3.0",
"dotenv": "^8.2.0",
"gts": "^1.1.2",
"mocha": "^7.1.2",
"gts": "^2.0.2",
"lerna": "^3.22.1",
"ngrok": "^3.2.7",
"nock": "^12.0.3",
"rimraf": "^3.0.2",
"serve": "^11.3.2",
"sinon": "^9.0.2",
"source-map-support": "^0.5.19",
"typescript": "^3.8.3",
"typescript-json-schema": "^0.42.0",
"xmlhttprequest": "^1.8.0"
}
"typescript": "^3.9.7"
},
"dependencies": {}
}

Просмотреть файл

@ -0,0 +1,3 @@
{
"extends": "../../.eslintrc.json"
}

Просмотреть файл

@ -0,0 +1,2 @@
dist
node_modules

Просмотреть файл

@ -0,0 +1,3 @@
module.exports = {
...require('../../.prettierrc.js'),
}

3646
packages/cli/package-lock.json сгенерированный Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

51
packages/cli/package.json Normal file
Просмотреть файл

@ -0,0 +1,51 @@
{
"name": "@microsoft/sds-cli",
"version": "0.1.0",
"description": "A toolkit for conducting machine learning trials against confidential data",
"license": "MIT",
"main": "dist/index",
"typings": "dist/index",
"files": [
"dist"
],
"engines": {
"node": ">=12"
},
"bin": {
"sds-cli": "./dist/index.js"
},
"scripts": {
"check": "gts check",
"clean": "gts clean",
"compile": "tsc -p tsconfig.build.json",
"fix": "gts fix",
"prepare": "npm run compile",
"pretest": "npm run compile",
"posttest": "npm run check",
"start": "ts-node src/index.ts",
"test": "ts-mocha"
},
"devDependencies": {
"@types/chai": "^4.2.12",
"@types/js-yaml": "^3.12.5",
"@types/luxon": "^1.24.3",
"@types/mocha": "^8.0.1",
"@types/node": "^13.11.1",
"chai": "^4.2.0",
"gts": "^2.0.2",
"mocha": "^8.1.0",
"ts-mocha": "^7.0.0",
"typescript": "^3.8.3",
"uuid": "^8.3.0"
},
"dependencies": {
"@azure/msal-node": "^1.0.0-alpha.3",
"@microsoft/sds": "*",
"axios": "^0.19.2",
"commander": "^6.0.0",
"fp-ts": "^2.7.1",
"io-ts": "^2.2.9",
"js-yaml": "^3.14.0",
"luxon": "^1.24.1"
}
}

153
packages/cli/src/conn.ts Normal file
Просмотреть файл

@ -0,0 +1,153 @@
import * as msal from '@azure/msal-node';
import { promises as fs } from 'fs';
import * as path from 'path';
import * as os from 'os';
import * as yaml from 'js-yaml';
import * as t from 'io-ts';
import * as url from 'url';
import {
IllegalOperationError,
LaboratoryClient,
validate,
ClientConnectionInfoType,
} from '@microsoft/sds';
// global client
let client: LaboratoryClient | undefined;
const configDir = path.join(os.homedir(), '.sds');
const connFilePath = 'sds.yaml';
const tokenCachePath = 'accessTokens.json';
const cachePlugin = {
async readFromStorage() {
return readConfig(tokenCachePath);
},
async writeToStorage(getMergedState: (oldState: string) => string) {
let oldFile = '';
try {
oldFile = await readConfig(tokenCachePath);
} finally {
const mergedState = getMergedState(oldFile);
await writeConfig(tokenCachePath, mergedState);
}
},
};
export async function initConnection(host: string) {
const labUrl = new url.URL(host);
const endpoint = labUrl.href;
const connectionInfo = await new LaboratoryClient(
endpoint
).negotiateConnection();
const config: IConnectConfiguration = {
endpoint,
...connectionInfo,
};
await writeConfig(connFilePath, yaml.safeDump(config));
const newClient = buildClient(config);
await newClient.validateConnection();
client = newClient;
}
export async function getLabClient(): Promise<LaboratoryClient> {
try {
if (client) {
return client;
}
const text = await readConfig(connFilePath);
const config = validate(ConnectConfigurationType, yaml.safeLoad(text));
client = buildClient(config);
return client;
} catch {
throw new IllegalOperationError(
'No laboratory connection. Use the "connect" command to specify a laboratory.'
);
}
}
const ConnectConfigurationType = t.intersection([
t.type({
endpoint: t.string,
}),
ClientConnectionInfoType,
t.partial({
// from msal-common/AccountInfo
account: t.type({
homeAccountId: t.string,
environment: t.string,
tenantId: t.string,
username: t.string,
}),
}),
]);
type IConnectConfiguration = t.TypeOf<typeof ConnectConfigurationType>;
function acquireAADAccessToken(config: IConnectConfiguration) {
if (config.type !== 'aad') {
throw new Error(
'Cannot retrieve an AAD access token for a non-AAD connection'
);
}
return async () => {
const pca = new msal.PublicClientApplication({
auth: {
clientId: config.clientId,
authority: config.authority,
},
cache: {
cachePlugin,
},
});
const cache = pca.getTokenCache();
try {
await cache.readFromPersistence();
const silentResult = await pca.acquireTokenSilent({
account: config.account!,
scopes: config.scopes,
});
cache.writeToPersistence();
return silentResult.accessToken;
} catch (e) {
const deviceCodeResult = await pca.acquireTokenByDeviceCode({
deviceCodeCallback: response => console.log(response.message),
scopes: config.scopes,
});
config.account = deviceCodeResult.account;
await writeConfig(connFilePath, yaml.safeDump(config));
cache.writeToPersistence();
return deviceCodeResult.accessToken;
}
};
}
function buildClient(config: IConnectConfiguration): LaboratoryClient {
const tokenRetriever =
config.type === 'aad' ? acquireAADAccessToken(config) : undefined;
return new LaboratoryClient(config.endpoint, tokenRetriever);
}
async function readConfig(filePath: string): Promise<string> {
const fullPath = path.join(configDir, filePath);
try {
return await fs.readFile(fullPath, 'utf8');
} catch (e) {
const err = e as NodeJS.ErrnoException;
if (err.code === 'ENOENT') {
return '';
}
throw e;
}
}
async function writeConfig(filePath: string, data: string): Promise<void> {
const fullPath = path.join(configDir, filePath);
await fs.mkdir(configDir, { recursive: true });
await fs.writeFile(fullPath, data);
}

Просмотреть файл

@ -33,8 +33,7 @@ export function decodeError(e: NodeJS.ErrnoException | AxiosError): string {
if (e.code === 'ENOENT') {
// Most likely file not found.
// tslint:disable-next-line:no-any
const message = `cannot open file "${(e as any).path}".`;
const message = `cannot open file "${(e as NodeJS.ErrnoException).path}".`;
return message;
}

Просмотреть файл

@ -1,52 +1,70 @@
import * as yaml from 'js-yaml';
import {
apiVersion,
IBenchmark,
ICandidate,
IPipeline,
ISuite,
LaboratoryClient,
RunStatus,
} from '../laboratory';
const pipelines: IPipeline[] = [
{
mode: 'mode1',
stages: [
{
// Candidate
},
{
// Benchmark
image: 'benchmark-image-mode1',
},
],
},
];
BenchmarkStageKind,
} from '@microsoft/sds';
const benchmark1: IBenchmark = {
name: 'benchmark1',
author: 'author1',
version: apiVersion,
pipelines,
apiVersion: 'v1alpha1',
stages: [
{
// Candidate
name: 'candidate',
kind: BenchmarkStageKind.CANDIDATE,
volumes: [
{
volume: 'training',
path: '/input',
},
],
},
{
// Benchmark
name: 'scoring',
image: 'benchmark-image',
kind: BenchmarkStageKind.CONTAINER,
volumes: [
{
volume: 'reference',
path: '/reference',
},
],
},
],
};
const candidate1: ICandidate = {
name: 'candidate1',
author: 'author1',
version: apiVersion,
apiVersion: 'v1alpha1',
benchmark: 'benchmark1',
mode: 'mode1',
image: 'candidate1-image',
};
const suite1: ISuite = {
name: 'suite1',
author: 'author1',
version: apiVersion,
apiVersion: 'v1alpha1',
benchmark: 'benchmark1',
mode: 'mode1',
volumes: [
{
name: 'training',
type: 'AzureBlob',
target: 'https://sample.blob.core.windows.net/training',
},
{
name: 'reference',
type: 'AzureBlob',
target: 'https://sample.blob.core.windows.net/reference',
},
],
};
export async function configureDemo(lab: LaboratoryClient) {

Просмотреть файл

Просмотреть файл

@ -1,14 +1,12 @@
#!/usr/bin/env node
import { Command } from 'commander';
import * as fs from 'fs';
import { Decoder } from 'io-ts';
import * as t from 'io-ts';
import * as yaml from 'js-yaml';
import { DateTime } from 'luxon';
import * as os from 'os';
import * as path from 'path';
import {
apiVersion,
BenchmarkType,
CandidateType,
IBenchmark,
@ -17,15 +15,14 @@ import {
IllegalOperationError,
IRun,
ISuite,
LaboratoryClient,
SuiteType,
validate,
} from '../laboratory';
} from '@microsoft/sds';
import { initConnection, getLabClient } from './conn';
import { decodeError } from './decode_error';
import { configureDemo } from './demo';
import { formatChoices, formatTable, Alignment } from './formatting';
import { URL } from 'url';
const readme =
'https://github.com/microsoft/secure-data-sandbox/blob/main/laboratory/README.md';
@ -34,7 +31,6 @@ function main(argv: string[]) {
const program = new Command();
program.description('Secure Data Sandbox CLI');
program.version(apiVersion);
program
.command('connect [service]')
@ -151,29 +147,11 @@ function examples(argv: string[]) {
///////////////////////////////////////////////////////////////////////////////
async function connect(host: string) {
if (host === undefined) {
if (connection) {
console.log(`Connected to ${connection!.endpoint}.`);
} else {
console.log(
'No laboratory connection. Use the "connect" command to specify a laboratory.'
);
}
} else {
// Hostname validation according to RFC 1123 and RFC 952.
// https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address
const ipRE = /^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(:\d+)?$/;
const hostRE = /^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*(:\d+)?$/;
if (!host.match(ipRE) && !host.match(hostRE)) {
const message = `Illegal host "${host}"`;
throw new IllegalOperationError(message);
}
const url = new URL('https://' + host);
const endpoint = url.toString();
const config = yaml.safeDump({ endpoint });
fs.writeFileSync(sdsFile, config);
tryInitializeConnection();
console.log(`Connected to ${endpoint}.`);
throw new Error('You must specify a host.');
}
await initConnection(host);
console.log(`Connected to ${host}.`);
}
///////////////////////////////////////////////////////////////////////////////
@ -199,7 +177,7 @@ async function createHelper<T>(ops: ISpecOps<T>, specFile: string) {
//
///////////////////////////////////////////////////////////////////////////////
async function demo() {
configureDemo(getLab());
configureDemo(await getLabClient());
}
///////////////////////////////////////////////////////////////////////////////
@ -208,7 +186,7 @@ async function demo() {
//
///////////////////////////////////////////////////////////////////////////////
async function deploy() {
console.log(`deploy command not implemented.`);
console.log('deploy command not implemented.');
}
///////////////////////////////////////////////////////////////////////////////
@ -275,15 +253,15 @@ async function listHelper<T extends IEntityBase>(
//
///////////////////////////////////////////////////////////////////////////////
async function results(benchmark: string, suite: string) {
const results = await getLab().allRunResults(benchmark, suite);
const results = await (await getLabClient()).allRunResults(benchmark, suite);
if (results.length === 0) {
console.log(`No matching results`);
console.log('No matching results');
} else {
const columns = new Set<string>();
for (const result of results) {
for (const key in result.measures) {
if (!result.measures.hasOwnProperty(key)) continue;
if (!Object.prototype.hasOwnProperty.call(result, key)) continue;
columns.add(key);
}
}
@ -330,7 +308,10 @@ async function results(benchmark: string, suite: string) {
//
///////////////////////////////////////////////////////////////////////////////
async function run(candidate: string, suite: string) {
const run = await getLab().createRunRequest({ candidate, suite });
const run = await (await getLabClient()).createRunRequest({
candidate,
suite,
});
console.log(`Scheduling run ${run.name}`);
}
@ -367,7 +348,7 @@ async function showHelper<T>(ops: ISpecOps<T>, name?: string) {
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line: interface-name
// eslint-disable-next-line @typescript-eslint/interface-name-prefix
interface ISpecOps<T> {
name(): string;
load(specFile: string): T;
@ -381,45 +362,46 @@ const benchmarkOps: ISpecOps<IBenchmark> = {
name: () => 'benchmark',
load: (specFile: string) => load(BenchmarkType, specFile),
format: (spec: IBenchmark) => formatSpec(spec),
all: () => getLab().allBenchmarks(),
one: (name: string) => getLab().oneBenchmark(name),
upsert: (spec: IBenchmark, name?: string) =>
getLab().upsertBenchmark(spec, name),
all: async () => (await getLabClient()).allBenchmarks(),
one: async (name: string) => (await getLabClient()).oneBenchmark(name),
upsert: async (spec: IBenchmark, name?: string) =>
(await getLabClient()).upsertBenchmark(spec, name),
};
const candidateOps: ISpecOps<ICandidate> = {
name: () => 'candidate',
load: (specFile: string) => load(CandidateType, specFile),
format: (spec: ICandidate) => formatSpec(spec),
all: () => getLab().allCandidates(),
one: (name: string) => getLab().oneCandidate(name),
upsert: (spec: ICandidate, name?: string) =>
getLab().upsertCandidate(spec, name),
all: async () => (await getLabClient()).allCandidates(),
one: async (name: string) => (await getLabClient()).oneCandidate(name),
upsert: async (spec: ICandidate, name?: string) =>
(await getLabClient()).upsertCandidate(spec, name),
};
const suiteOps: ISpecOps<ISuite> = {
name: () => 'suite',
load: (specFile: string) => load(SuiteType, specFile),
format: (spec: ISuite) => formatSpec(spec),
all: () => getLab().allSuites(),
one: (name: string) => getLab().oneSuite(name),
upsert: (spec: ISuite, name?: string) => getLab().upsertSuite(spec, name),
all: async () => (await getLabClient()).allSuites(),
one: async (name: string) => (await getLabClient()).oneSuite(name),
upsert: async (spec: ISuite, name?: string) =>
(await getLabClient()).upsertSuite(spec, name),
};
const runOps: ISpecOps<IRun> = {
name: () => 'run',
load: (specFile: string) => {
throw new IllegalOperationError(`Load operation not supported for IRun.`);
throw new IllegalOperationError('Load operation not supported for IRun.');
},
format: (spec: IRun) => formatSpec(spec),
all: () => getLab().allRuns(),
one: (name: string) => getLab().oneRun(name),
all: async () => (await getLabClient()).allRuns(),
one: async (name: string) => (await getLabClient()).oneRun(name),
upsert: (spec: IRun, name?: string) => {
throw new IllegalOperationError(`Upsert operation not supported for IRun.`);
throw new IllegalOperationError('Upsert operation not supported for IRun.');
},
};
// tslint:disable-next-line: no-any
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async function dispatch<FUNCTION extends Function, PARAMS extends any[]>(
rawType: string,
legalTypes: string[],
@ -447,16 +429,19 @@ async function dispatch<FUNCTION extends Function, PARAMS extends any[]>(
await f.apply(null, [suiteOps, ...p]);
break;
default:
const message = `Invalid entity "${type}". Expected ${formatChoices(
legalTypes
)}.`;
throw new IllegalOperationError(message);
throw new IllegalOperationError(
`Invalid entity "${type}". Expected ${formatChoices(legalTypes)}.`
);
}
}
}
function load<I, A>(decoder: Decoder<I, A>, specFile: string): A {
const yamlText = fs.readFileSync(specFile, 'utf8');
function load<A>(
decoder: Decoder<string | object | undefined, A>,
specFile: string
): A {
const baseDir = process.env.INIT_CWD ?? process.cwd();
const yamlText = fs.readFileSync(path.resolve(baseDir, specFile), 'utf8');
const spec = yaml.safeLoad(yamlText);
return validate(decoder, spec);
}
@ -465,47 +450,4 @@ function formatSpec(spec: object) {
return yaml.safeDump(spec, {});
}
///////////////////////////////////////////////////////////////////////////////
//
// Connection management
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
const ConnectConfigurationType = t.type({
endpoint: t.string,
});
export type IConnectConfiguration = t.TypeOf<typeof ConnectConfigurationType>;
const sdsFile = path.join(os.homedir(), '.sds');
let connection: IConnectConfiguration | undefined;
let lab: LaboratoryClient | undefined;
function getLab(): LaboratoryClient {
if (connection === undefined || lab === undefined) {
tryInitializeConnection();
}
if (connection === undefined || lab === undefined) {
const message =
'No laboratory connection. Use the "connect" command to specify a laboratory.';
throw new IllegalOperationError(message);
}
return lab;
}
function tryInitializeConnection() {
try {
const yamlText = fs.readFileSync(sdsFile, 'utf8');
const root = yaml.safeLoad(yamlText);
connection = validate(ConnectConfigurationType, root);
lab = new LaboratoryClient(connection.endpoint);
} catch (e) {
const err = e as NodeJS.ErrnoException;
if (err.code !== 'ENOENT') {
const message = `Invalid ~/.sds file: "${err.message}"`;
console.log(message);
}
}
}
main(process.argv);

Просмотреть файл

Просмотреть файл

@ -0,0 +1,10 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "dist",
"rootDir": "src",
},
"include": [
"src"
]
}

Просмотреть файл

@ -0,0 +1,12 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "dist",
"baseUrl": "../",
"paths": {
"@microsoft/sds": [
"sds/src"
]
}
},
}

Просмотреть файл

@ -0,0 +1,17 @@
# To use in bash:
# set -o allexport; source .env; set +o allexport
# Laboratory server configuration
AZURE_CLIENT_ID=00000000-0000-0000-0000-000000000000
QUEUE_MODE=azure
QUEUE_ENDPOINT=https://mystorage.queue.core.windows.net/myqueue
SQL_MODE=azuresql
SQL_HOST=mydatabase.database.windows.net
SQL_DB=laboratory
# Laboratory AAD auth
# If AUTH_MODE is not set, auth is disabled
AUTH_MODE=aad
AUTH_TENANT_ID=00000000-0000-0000-0000-000000000000
AUTH_LABORATORY_CLIENT_ID=00000000-0000-0000-0000-000000000000
AUTH_CLI_CLIENT_ID=00000000-0000-0000-0000-000000000000

Просмотреть файл

@ -0,0 +1,3 @@
{
"extends": "../../.eslintrc.json"
}

Просмотреть файл

@ -0,0 +1,3 @@
module.exports = {
...require('../../.prettierrc.js'),
}

Просмотреть файл

@ -0,0 +1,389 @@
openapi: 3.0.2
info:
title: secure-data-sandbox
version: 0.0.1
license:
name: MIT
url: https://github.com/microsoft/secure-data-sandbox/blob/main/LICENSE
contact:
name: microsoft/secure-data-sandbox
url: https://github.com/microsoft/secure-data-sandbox
description: A toolkit for conducting machine learning trials against confidential data
tags:
- name: Benchmarks
- name: Suites
- name: Candidates
- name: Runs
paths:
/benchmarks:
get:
tags: [Benchmarks]
responses:
200:
description: Get all benchmarks
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/IBenchmark"
/benchmarks/{name}:
parameters:
- name: name
in: path
required: true
schema:
type: string
get:
tags: [Benchmarks]
responses:
200:
description: Get a benchmark
content:
application/json:
schema:
$ref: "#/components/schemas/IBenchmark"
put:
tags: [Benchmarks]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/IBenchmark"
responses:
200:
description: Create or update a benchmark
content:
application/json:
schema:
$ref: "#/components/schemas/IBenchmark"
/suites:
get:
tags: [Suites]
responses:
200:
description: Get all suites
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/ISuite"
/suites/{name}:
parameters:
- name: name
in: path
required: true
schema:
type: string
get:
tags: [Suites]
responses:
200:
description: Get a suite
content:
application/json:
schema:
$ref: "#/components/schemas/ISuite"
put:
tags: [Suites]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ISuite"
responses:
200:
description: Create or update a suite
content:
application/json:
schema:
$ref: "#/components/schemas/ISuite"
/candidates:
get:
tags: [Candidates]
responses:
200:
description: Get all candidates
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/ICandidate"
/candidates/{name}:
parameters:
- name: name
in: path
required: true
schema:
type: string
get:
tags: [Candidates]
responses:
200:
description: Get a candidate
content:
application/json:
schema:
$ref: "#/components/schemas/ICandidate"
put:
tags: [Candidates]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ICandidate"
responses:
200:
description: Create or update a candidate
content:
application/json:
schema:
$ref: "#/components/schemas/ICandidate"
/runs:
get:
tags: [Runs]
parameters:
- name: benchmark
in: query
schema:
type: string
- name: suite
in: query
schema:
type: string
responses:
200:
description: Get runs
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/IRun"
post:
tags: [Runs]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/IRunRequest"
responses:
202:
description: Schedule a run
content:
application/json:
schema:
$ref: "#/components/schemas/IRun"
/runs/{name}:
parameters:
- name: name
in: path
required: true
schema:
type: string
get:
tags: [Runs]
responses:
200:
description: Get a single run
content:
application/json:
schema:
$ref: "#/components/schemas/IRun"
patch:
tags: [Runs]
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/UpdateRunStatus"
responses:
204:
description: Update a Run
/runs/{name}/results:
parameters:
- name: name
in: path
required: true
schema:
type: string
post:
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ReportRunResults"
tags: [Runs]
responses:
204:
description: Add results to a run
components:
schemas:
IEntityBase:
type: object
properties:
name:
type: string
author:
type: string
createdAt:
type: string
format: date-time
updatedAt:
type: string
format: date-time
required:
- name
- author
PipelineStage:
type: object
properties:
name:
type: string
kind:
type: string
enum:
- candidate
- container
image:
type: string
volumes:
type: array
items:
type: object
properties:
volume:
type: string
path:
type: string
required:
- volume
- path
required:
- name
- kind
IBenchmark:
type: object
allOf:
- $ref: "#/components/schemas/IEntityBase"
properties:
stages:
type: array
items:
$ref: "#/components/schemas/PipelineStage"
required:
- stages
ISuite:
type: object
allOf:
- $ref: "#/components/schemas/IEntityBase"
properties:
benchmark:
type: string
volumes:
type: array
items:
type: object
properties:
name:
type: string
type:
type: string
target:
type: string
required:
- name
- type
- target
required:
- benchmark
- volumes
ICandidate:
type: object
allOf:
- $ref: "#/components/schemas/IEntityBase"
properties:
benchmark:
type: string
image:
type: string
required:
- benchmark
- image
IRun:
type: object
allOf:
- $ref: "#/components/schemas/IEntityBase"
properties:
benchmark:
$ref: "#/components/schemas/IBenchmark"
suite:
$ref: "#/components/schemas/ISuite"
candidate:
$ref: "#/components/schemas/ICandidate"
blob:
type: string
status:
$ref: "#/components/schemas/RunStatus"
required:
- benchmark
- suite
- candidate
- blob
- status
IRunRequest:
type: object
properties:
candidate:
type: string
suite:
type: string
required:
- candidate
- suite
IResult:
type: object
properties:
benchmark:
type: string
suite:
type: string
candidate:
type: string
measures:
$ref: "#/components/schemas/Measures"
required:
- benchmark
- suite
- candidate
- measures
Measures:
type: object
additionalProperties: true
ReportRunResults:
type: object
properties:
measures:
$ref: "#/components/schemas/Measures"
required:
- measures
RunStatus:
type: string
enum:
- created
- running
- completed
- failed
UpdateRunStatus:
type: object
properties:
status:
$ref: "#/components/schemas/RunStatus"

4984
packages/laboratory/package-lock.json сгенерированный Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,52 @@
{
"name": "@microsoft/sds-laboratory",
"version": "0.1.0",
"description": "A toolkit for conducting machine learning trials against confidential data",
"license": "MIT",
"main": "dist/index",
"typings": "dist/index",
"files": [
"dist"
],
"engines": {
"node": ">=12"
},
"bin": {
"sds-laboratory": "./dist/main.js"
},
"scripts": {
"check": "gts check",
"clean": "gts clean",
"compile": "tsc -p tsconfig.build.json",
"fix": "gts fix",
"prepare": "npm run compile",
"pretest": "npm run compile",
"posttest": "npm run check",
"start": "ts-node src/main.ts",
"test": "ts-mocha"
},
"devDependencies": {
"@types/chai": "^4.2.12",
"@types/chai-http": "^4.2.0",
"@types/express": "^4.17.7",
"@types/mocha": "^8.0.1",
"@types/node": "^13.11.1",
"@types/passport-azure-ad": "^4.0.7",
"chai": "^4.2.0",
"chai-http": "^4.3.0",
"gts": "^2.0.2",
"mocha": "^8.1.0",
"ts-mocha": "^7.0.0",
"typescript": "^3.8.3",
"xmlhttprequest": "^1.8.0"
},
"dependencies": {
"@microsoft/sds": "*",
"env-var": "^6.3.0",
"express": "^4.17.1",
"express-async-errors": "^3.1.1",
"passport": "^0.4.1",
"passport-azure-ad": "^4.2.1",
"strong-error-handler": "^3.5.0"
}
}

Просмотреть файл

@ -0,0 +1,96 @@
import * as express from 'express';
import * as errorhandler from 'strong-error-handler';
import * as passport from 'passport';
import { BearerStrategy } from 'passport-azure-ad';
import { IClientConnectionInfo, ILaboratory } from '@microsoft/sds';
import { setErrorStatus } from './errors';
import {
createBenchmarkRouter,
createCandidateRouter,
createRunRouter,
createSuiteRouter,
} from './routes';
import {
AuthConfiguration,
AADConfiguration,
AuthMode,
NoAuthConfiguration,
} from './configuration';
function configureAADAuth(app: express.Express, config: AADConfiguration) {
passport.use(
new BearerStrategy(
{
identityMetadata: `https://login.microsoftonline.com/${config.tenantId}/v2.0/.well-known/openid-configuration`,
clientID: config.laboratoryClientId,
},
(token, done) => {
return done(null, token);
}
)
);
// unauthenticated endpoint for clients to retrieve connection info
app
.get('/connect', (req, res) => {
const connectionInfo: IClientConnectionInfo = {
type: 'aad',
clientId: config.cliClientId,
authority: `https://login.microsoftonline.com/${config.tenantId}`,
scopes: config.scopes,
};
res.json(connectionInfo);
})
// require all endpoints to be authenticated
.use(passport.initialize())
.all('*', passport.authenticate('oauth-bearer', { session: false }));
}
export async function createApp(
lab: ILaboratory,
auth: AuthConfiguration = NoAuthConfiguration
): Promise<express.Express> {
const app = express().use(express.json());
// configure authorization
switch (auth.mode) {
case AuthMode.AAD:
configureAADAuth(app, auth as AADConfiguration);
break;
case AuthMode.None:
default:
app.get('/connect', (req, res) => {
const connectionInfo: IClientConnectionInfo = {
type: 'unauthenticated',
};
res.json(connectionInfo);
});
break;
}
// Set up application routes
app
.get('/connect/validate', (req, res) => {
res.status(200).end();
})
.use(createBenchmarkRouter(lab))
.use(createCandidateRouter(lab))
.use(createRunRouter(lab))
.use(createSuiteRouter(lab))
// Handle known errors
.use(setErrorStatus)
// Hide details in error messages
.use(
errorhandler({
debug: process.env.NODE_ENV === 'development',
negotiateContentType: false,
})
);
return app;
}

Просмотреть файл

@ -0,0 +1,129 @@
import * as os from 'os';
import {
QueueConfiguration,
AzureSqlDatabaseConfiguration,
DatabaseConfiguration,
DatabaseMode,
} from '@microsoft/sds';
import {
AzureCredential,
ParseQueueConfiguration,
} from '@microsoft/sds/dist/configuration';
import * as env from 'env-var';
export enum AuthMode {
AAD = 'AAD',
None = 'none',
}
export interface AuthConfiguration {
mode: AuthMode;
}
export interface AADConfiguration extends AuthConfiguration {
mode: AuthMode.AAD;
tenantId: string;
laboratoryClientId: string;
cliClientId: string;
scopes: string[];
}
export const NoAuthConfiguration = {
mode: AuthMode.None,
};
export interface LaboratoryConfiguration {
endpointBaseUrl: string;
port: number;
queue: QueueConfiguration;
database: DatabaseConfiguration;
auth: AuthConfiguration;
}
/**
* Retrieve a DatabaseConfiguration from the current execution environment.
*/
export function ParseDatabaseConfiguration(): DatabaseConfiguration {
const mode = env
.get('SQL_MODE')
.default(DatabaseMode.InMemory)
.asEnum(Object.values(DatabaseMode)) as DatabaseMode;
const host = env
.get('SQL_HOST')
.required(mode !== DatabaseMode.InMemory)
.asString();
switch (mode) {
case DatabaseMode.AzureSql:
return {
mode,
host,
database: env.get('SQL_DB').required().asString(),
credential: AzureCredential.getInstance(),
} as AzureSqlDatabaseConfiguration;
case DatabaseMode.InMemory:
return {
mode,
host: 'localhost',
};
}
}
function ParseAuthConfiguration(): AuthConfiguration {
const authMode = env.get('AUTH_MODE').asString();
if (authMode === 'aad') {
const tenantId = env.get('AUTH_TENANT_ID').required().asString();
const laboratoryClientId = env
.get('AUTH_LABORATORY_CLIENT_ID')
.required()
.asString();
const cliClientId = env.get('AUTH_CLI_CLIENT_ID').required().asString();
const scopes = env
.get('AUTH_SCOPES')
.default('laboratory')
.asArray(' ')
.map(s => `api://${laboratoryClientId}/${s}`);
// offline_access is required to use refresh tokens
scopes.push('offline_access');
const config: AADConfiguration = {
mode: AuthMode.AAD,
tenantId,
laboratoryClientId,
cliClientId,
scopes,
};
return config;
} else {
return NoAuthConfiguration;
}
}
export function ParseLaboratoryConfiguration(): LaboratoryConfiguration {
const port = env.get('PORT').default(3000).asPortNumber();
let endpointBaseUrl = env.get('LABORATORY_ENDPOINT').asUrlString();
// if endpoint is not explicitly specified, check for WEBSITE_HOSTNAME and assume HTTPS over 443
// this variable gets autowired by Azure App Service
if (!endpointBaseUrl) {
const hostname = env.get('WEBSITE_HOSTNAME').asString();
// if not found, fallback to machine hostname
endpointBaseUrl = hostname
? `https://${hostname}`
: `http://${os.hostname()}:${port}`;
}
return {
endpointBaseUrl,
port,
queue: ParseQueueConfiguration(),
database: ParseDatabaseConfiguration(),
auth: ParseAuthConfiguration(),
};
}

Просмотреть файл

@ -1,9 +1,6 @@
import * as express from 'express';
import {
EntityNotFoundError,
IllegalOperationError,
} from '../logic/interfaces';
import { EntityNotFoundError, IllegalOperationError } from '@microsoft/sds';
export function setErrorStatus(
err: Error,
@ -11,14 +8,10 @@ export function setErrorStatus(
res: express.Response,
next: express.NextFunction
) {
// console.log(`xxx translateError(${err.message})`);
if (err instanceof EntityNotFoundError) {
// console.log('res.statusCode = 404;');
res.statusCode = 404;
} else if (err instanceof IllegalOperationError) {
// console.log('res.statusCode = 400;');
res.statusCode = 400;
}
// console.log('next(err)');
next(err);
}

Просмотреть файл

@ -0,0 +1 @@
export * from './app';

Просмотреть файл

@ -0,0 +1,31 @@
#!/usr/bin/env node
import {
initializeSequelize,
SequelizeLaboratory,
PipelineRun,
GetQueue,
GetSequelizeOptions,
} from '@microsoft/sds';
import { ParseLaboratoryConfiguration } from './configuration';
import { createApp } from './app';
async function main(argv: string[]) {
const config = ParseLaboratoryConfiguration();
const queue = GetQueue<PipelineRun>(config.queue);
// initializeSequelize binds Sequelize to the models, effectively becoming a singleton / service locator
const sequelizeOptions = GetSequelizeOptions(config.database);
await initializeSequelize(sequelizeOptions);
const lab = new SequelizeLaboratory(config.endpointBaseUrl, queue);
const app = await createApp(lab, config.auth);
app.listen(config.port, () => {
console.log('Starting SDS laboratory service.');
console.log(`Service url is ${config.endpointBaseUrl}.`);
console.info(`Laboratory service listening on port ${config.port}.`);
});
}
main(process.argv).catch(e => console.error(e));

Просмотреть файл

@ -0,0 +1,25 @@
require('express-async-errors');
import { Router } from 'express';
import { BenchmarkType, ILaboratory, validate } from '@microsoft/sds';
export function createBenchmarkRouter(lab: ILaboratory): Router {
const router = Router();
router.get('/benchmarks', async (req, res) => {
res.json(await lab.allBenchmarks());
});
router
.route('/benchmarks/:name')
.get(async (req, res) => {
res.json(await lab.oneBenchmark(req.params['name']));
})
.put(async (req, res) => {
const benchmark = validate(BenchmarkType, req.body);
await lab.upsertBenchmark(benchmark);
res.json(await lab.oneBenchmark(benchmark.name));
});
return router;
}

Просмотреть файл

@ -0,0 +1,25 @@
require('express-async-errors');
import { Router } from 'express';
import { CandidateType, ILaboratory, validate } from '@microsoft/sds';
export function createCandidateRouter(lab: ILaboratory): Router {
const router = Router();
router.get('/candidates', async (req, res) => {
res.json(await lab.allCandidates());
});
router
.route('/candidates/:name')
.get(async (req, res) => {
res.json(await lab.oneCandidate(req.params['name']));
})
.put(async (req, res) => {
const candidate = validate(CandidateType, req.body);
await lab.upsertCandidate(candidate);
res.json(await lab.oneCandidate(candidate.name));
});
return router;
}

Просмотреть файл

@ -0,0 +1,56 @@
require('express-async-errors');
import { Router } from 'express';
import {
ILaboratory,
ReportRunResultsType,
RunRequestType,
UpdateRunStatusType,
validate,
} from '@microsoft/sds';
export function createRunRouter(lab: ILaboratory): Router {
const router = Router();
router
.route('/runs')
.get(async (req, res) => {
if (
typeof req.query.benchmark === 'string' &&
typeof req.query.suite === 'string'
) {
res.json(
await lab.allRunResults(req.query['benchmark'], req.query['suite'])
);
} else {
res.json(await lab.allRuns());
}
})
.post(async (req, res) => {
const runRequest = validate(RunRequestType, req.body);
const run = await lab.createRunRequest(runRequest);
res.status(202);
res.json(run);
});
router
.route('/runs/:name')
.get(async (req, res) => {
res.json(await lab.oneRun(req.params['name']));
})
.patch(async (req, res) => {
const { status } = validate(UpdateRunStatusType, req.body);
await lab.updateRunStatus(req.params['name'], status);
res.status(204);
res.end();
});
router.post('/runs/:name/results', async (req, res) => {
const { measures } = validate(ReportRunResultsType, req.body);
await lab.reportRunResults(req.params['name'], measures);
res.status(204);
res.end();
});
return router;
}

Просмотреть файл

@ -0,0 +1,25 @@
require('express-async-errors');
import { Router } from 'express';
import { ILaboratory, SuiteType, validate } from '@microsoft/sds';
export function createSuiteRouter(lab: ILaboratory): Router {
const router = Router();
router.get('/suites', async (req, res) => {
res.json(await lab.allSuites());
});
router
.route('/suites/:name')
.get(async (req, res) => {
res.json(await lab.oneSuite(req.params['name']));
})
.put(async (req, res) => {
const suite = validate(SuiteType, req.body);
await lab.upsertSuite(suite);
res.json(await lab.oneSuite(suite.name));
});
return router;
}

Просмотреть файл

@ -1,2 +1,5 @@
/* eslint-disable @typescript-eslint/no-empty-interface */
// superagent
// https://github.com/DefinitelyTyped/DefinitelyTyped/issues/12044
declare interface XMLHttpRequest {}

Просмотреть файл

@ -16,11 +16,7 @@ import { assert } from 'chai';
import chaiHttp = require('chai-http');
chai.use(chaiHttp);
import { createApp } from '../../../../src/laboratory/server';
// TODO: Perhaps move this to shims.ts.
// https://github.com/DefinitelyTyped/DefinitelyTyped/issues/12044
interface XMLHttpRequest {}
import { createApp } from '../src';
import {
BenchmarkType,
@ -38,12 +34,24 @@ import {
CandidateType,
SuiteType,
RunType,
} from '../../../../src';
ILaboratory,
} from '@microsoft/sds';
import { benchmark1, candidate1, run1, suite1 } from '../data';
import { assertDeepEqual, MockLaboratory } from '../shared';
import {
benchmark1,
candidate1,
run1,
suite1,
} from '../../sds/test/laboratory/data';
import { initTestEnvironment } from '../../sds/test/laboratory/shared';
let lab: ILaboratory;
describe('laboratory/server', () => {
before(async () => {
lab = await initTestEnvironment();
});
///////////////////////////////////////////////////////////////////////////
//
// Benchmarks
@ -51,8 +59,6 @@ describe('laboratory/server', () => {
///////////////////////////////////////////////////////////////////////////
describe('benchmarks', () => {
it('allBenchmarks()', async () => {
const lab = new MockLaboratory();
const expected: IBenchmark[] = [];
let called = false;
@ -63,7 +69,7 @@ describe('laboratory/server', () => {
chai
.request(await createApp(lab))
.get(`/benchmarks`)
.get('/benchmarks')
.end((err, res) => {
assert.equal(res.status, 200);
assert.deepEqual(res.body, expected);
@ -72,8 +78,6 @@ describe('laboratory/server', () => {
});
it('oneBenchmark()', async () => {
const lab = new MockLaboratory();
let observedName: string | undefined;
lab.oneBenchmark = async (name: string): Promise<IBenchmark> => {
observedName = name;
@ -92,8 +96,6 @@ describe('laboratory/server', () => {
});
it('upsertBenchmark()', async () => {
const lab = new MockLaboratory();
let observed: IBenchmark;
lab.upsertBenchmark = async (
benchmark: IBenchmark,
@ -120,8 +122,6 @@ describe('laboratory/server', () => {
///////////////////////////////////////////////////////////////////////////
describe('candidates', () => {
it('allCandidates()', async () => {
const lab = new MockLaboratory();
const expected: ICandidate[] = [];
let called = false;
@ -132,7 +132,7 @@ describe('laboratory/server', () => {
chai
.request(await createApp(lab))
.get(`/candidates`)
.get('/candidates')
.end((err, res) => {
assert.equal(res.status, 200);
assert.deepEqual(res.body, expected);
@ -141,8 +141,6 @@ describe('laboratory/server', () => {
});
it('oneCandidate()', async () => {
const lab = new MockLaboratory();
let observedName: string | undefined;
lab.oneCandidate = async (name: string): Promise<ICandidate> => {
observedName = name;
@ -161,8 +159,6 @@ describe('laboratory/server', () => {
});
it('upsertCandidate()', async () => {
const lab = new MockLaboratory();
let observed: ICandidate;
lab.upsertCandidate = async (
candidate: ICandidate,
@ -189,8 +185,6 @@ describe('laboratory/server', () => {
///////////////////////////////////////////////////////////////////////////
describe('suites', () => {
it('allSuites()', async () => {
const lab = new MockLaboratory();
const expected: ISuite[] = [];
let called = false;
@ -201,7 +195,7 @@ describe('laboratory/server', () => {
chai
.request(await createApp(lab))
.get(`/suites`)
.get('/suites')
.end((err, res) => {
assert.equal(res.status, 200);
assert.deepEqual(res.body, expected);
@ -210,8 +204,6 @@ describe('laboratory/server', () => {
});
it('oneSuite()', async () => {
const lab = new MockLaboratory();
let observedName: string | undefined;
lab.oneSuite = async (name: string): Promise<ISuite> => {
observedName = name;
@ -230,8 +222,6 @@ describe('laboratory/server', () => {
});
it('upsertSuite()', async () => {
const lab = new MockLaboratory();
let observed: ISuite;
lab.upsertSuite = async (suite: ISuite, name?: string): Promise<void> => {
observed = suite;
@ -255,8 +245,6 @@ describe('laboratory/server', () => {
///////////////////////////////////////////////////////////////////////////
describe('runs', () => {
it('allRuns()', async () => {
const lab = new MockLaboratory();
const expected: IRun[] = [];
let called = false;
@ -267,7 +255,7 @@ describe('laboratory/server', () => {
chai
.request(await createApp(lab))
.get(`/runs`)
.get('/runs')
.end((err, res) => {
assert.equal(res.status, 200);
assert.deepEqual(res.body, expected);
@ -276,8 +264,6 @@ describe('laboratory/server', () => {
});
it('oneRun()', async () => {
const lab = new MockLaboratory();
let observedName: string | undefined;
lab.oneRun = async (name: string): Promise<IRun> => {
observedName = name;
@ -296,8 +282,6 @@ describe('laboratory/server', () => {
});
it('createRunRequest()', async () => {
const lab = new MockLaboratory();
const runRequest: IRunRequest = {
candidate: run1.candidate.name,
suite: run1.suite.name,
@ -311,10 +295,10 @@ describe('laboratory/server', () => {
chai
.request(await createApp(lab))
.post(`/runs`)
.post('/runs')
.send(runRequest)
.end((err, res) => {
assert.equal(res.status, 200);
assert.equal(res.status, 202);
assert.deepEqual(observedRequest, runRequest);
const observed = validate(RunType, res.body);
assert.deepEqual(observed, run1);
@ -324,8 +308,6 @@ describe('laboratory/server', () => {
// This test fails because RunStatus.COMPLETED is serialized and transported as
// Object {completed: ""}
it('updateRunStatus()', async () => {
const lab = new MockLaboratory();
const name = 'foobar';
const status = RunStatus.COMPLETED;
const body: IUpdateRunStatus = { status };
@ -345,15 +327,13 @@ describe('laboratory/server', () => {
.patch(`/runs/${name}`)
.send(body)
.end((err, res) => {
assert.equal(res.status, 200);
assert.equal(res.status, 204);
assert.equal(observedRawName, name);
assert.equal(observedStatus, status);
});
});
it('reportRunResults()', async () => {
const lab = new MockLaboratory();
const name = 'foobar';
const measures = { passed: 1, failed: 2 };
const body: IReportRunResults = { measures };
@ -370,18 +350,16 @@ describe('laboratory/server', () => {
chai
.request(await createApp(lab))
.patch(`/runs/${name}/results`)
.post(`/runs/${name}/results`)
.send(body)
.end((err, res) => {
assert.equal(res.status, 200);
assert.equal(res.status, 204);
assert.equal(observedName, name);
assert.deepEqual(observedMeasures, measures);
});
});
it('allRunResults()', async () => {
const lab = new MockLaboratory();
const benchmark = 'benchmark1';
const suite = 'suite1';
const expected: IResult[] = [];
@ -402,7 +380,7 @@ describe('laboratory/server', () => {
chai
.request(await createApp(lab))
.get(`/runs/${benchmark}/${suite}`)
.get(`/runs?benchmark=${benchmark}&suite=${suite}`)
.end((err, res) => {
assert.equal(res.status, 200);
assert.deepEqual(res.body, expected);

Просмотреть файл

@ -0,0 +1,10 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "dist",
"rootDir": "src",
},
"include": [
"src"
]
}

Просмотреть файл

@ -0,0 +1,12 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "dist",
"baseUrl": "../",
"paths": {
"@microsoft/sds": [
"sds/src"
]
}
}
}

Просмотреть файл

@ -0,0 +1,3 @@
{
"extends": "../../.eslintrc.json"
}

Просмотреть файл

@ -0,0 +1,2 @@
dist
node_modules

Просмотреть файл

@ -0,0 +1,3 @@
module.exports = {
...require('../../.prettierrc.js'),
}

5064
packages/sds/package-lock.json сгенерированный Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

64
packages/sds/package.json Normal file
Просмотреть файл

@ -0,0 +1,64 @@
{
"name": "@microsoft/sds",
"version": "0.1.0",
"description": "A toolkit for conducting machine learning trials against confidential data",
"license": "MIT",
"main": "dist/index",
"typings": "dist/index",
"files": [
"dist"
],
"engines": {
"node": ">=12"
},
"scripts": {
"check": "gts check",
"clean": "gts clean",
"compile": "tsc -p tsconfig.build.json",
"fix": "gts fix",
"prepare": "npm run compile",
"pretest": "npm run compile",
"posttest": "npm run check",
"test": "ts-mocha"
},
"devDependencies": {
"@sinonjs/fake-timers": "^6.0.1",
"@types/bluebird": "^3.5.32",
"@types/chai": "^4.2.12",
"@types/chai-as-promised": "^7.1.3",
"@types/dotenv": "^8.2.0",
"@types/luxon": "^1.24.3",
"@types/mocha": "^8.0.1",
"@types/nock": "^11.1.0",
"@types/node": "^13.11.1",
"@types/sinonjs__fake-timers": "^6.0.1",
"@types/tedious": "^4.0.1",
"@types/uuid": "^8.0.0",
"@types/validator": "^13.1.0",
"chai": "^4.2.0",
"chai-as-promised": "^7.1.1",
"chai-exclude": "^2.0.2",
"dotenv": "^8.2.0",
"gts": "^2.0.2",
"mocha": "^8.1.0",
"nock": "^13.0.3",
"ts-mocha": "^7.0.0",
"typescript": "^3.8.3",
"xmlhttprequest": "^1.8.0"
},
"dependencies": {
"@azure/identity": "^1.0.3",
"@azure/storage-queue": "^12.1.0-preview.1",
"axios": "^0.19.2",
"env-var": "^6.3.0",
"fp-ts": "^2.7.1",
"io-ts": "^2.2.9",
"luxon": "^1.24.1",
"reflect-metadata": "^0.1.13",
"sequelize": "^5.22.3",
"sequelize-typescript": "^1.1.0",
"sqlite3": "^5.0.0",
"tedious": "^9.0.1",
"uuid": "^8.3.0"
}
}

Просмотреть файл

@ -0,0 +1,66 @@
import * as env from 'env-var';
import {
DefaultAzureCredential,
ChainedTokenCredential,
ManagedIdentityCredential,
EnvironmentCredential,
TokenCredential,
} from '@azure/identity';
import {
QueueMode,
QueueConfiguration,
AzureStorageQueueConfiguration,
} from './queue';
export class AzureCredential {
private static instance: TokenCredential;
private constructor() {}
static getInstance(): TokenCredential {
if (!AzureCredential.instance) {
const clientId = env.get('AZURE_CLIENT_ID').asString();
// DefaultAzureCredential in JS doesn't yet support passing clientId, so we use our own credential as a fallback
AzureCredential.instance = clientId
? new ChainedTokenCredential(
new EnvironmentCredential(),
new ManagedIdentityCredential(clientId)
)
: new DefaultAzureCredential();
}
return AzureCredential.instance;
}
}
/**
* Retrieve a QueueConfiguration from the current execution environment.
*/
export function ParseQueueConfiguration(): QueueConfiguration {
const mode = env
.get('QUEUE_MODE')
.default(QueueMode.InMemory)
.asEnum(Object.values(QueueMode)) as QueueMode;
const endpoint = env
.get('QUEUE_ENDPOINT')
.required(mode !== QueueMode.InMemory)
.asUrlString();
switch (mode) {
case QueueMode.Azure:
return {
mode,
endpoint,
credential: AzureCredential.getInstance(),
shouldCreateQueue: false,
} as AzureStorageQueueConfiguration;
case QueueMode.InMemory:
return {
mode: QueueMode.InMemory,
endpoint: 'http://localhost',
};
}
}

Просмотреть файл

@ -21,11 +21,9 @@ export interface AzureSqlDatabaseConfiguration extends DatabaseConfiguration {
export function GetSequelizeOptions(
config: DatabaseConfiguration
): SequelizeOptions {
// tsc ensures that all elements of the discriminated union are covered: https://www.typescriptlang.org/docs/handbook/advanced-types.html#exhaustiveness-checking
// The following is safe but tslint doesn't understand, so we suppress the rule: https://github.com/palantir/tslint/issues/2104
// tslint:disable:switch-default
switch (config.mode) {
case DatabaseMode.AzureSql:
// eslint-disable-next-line no-case-declarations
const azureConfig = config as AzureSqlDatabaseConfiguration;
return {
database: azureConfig.database,

Просмотреть файл

@ -0,0 +1,5 @@
export * from './laboratory';
export * from './queue';
export * from './messages';
export * from './database';
export * as configuration from './configuration';

Просмотреть файл

@ -6,8 +6,10 @@ import {
BenchmarkType,
CandidateArrayType,
CandidateType,
ClientConnectionInfoType,
IBenchmark,
ICandidate,
IClientConnectionInfo,
ILaboratory,
IllegalOperationError,
IReportRunResults,
@ -28,15 +30,48 @@ import {
validate,
} from '../logic';
const config: AxiosRequestConfig = {
// TODO: put credentials here.
};
// A TokenRetriever should return a valid OAuth2 Bearer token
type TokenRetriever = () => Promise<string>;
export class LaboratoryClient implements ILaboratory {
endpoint: string;
tokenRetriever?: TokenRetriever;
constructor(endpoint: string) {
constructor(endpoint: string, tokenRetriever?: TokenRetriever) {
this.endpoint = endpoint;
this.tokenRetriever = tokenRetriever;
}
private async getConfig(): Promise<AxiosRequestConfig> {
if (this.tokenRetriever) {
return {
headers: {
Authorization: `Bearer ${await this.tokenRetriever()}`,
},
};
}
return {};
}
/////////////////////////////////////////////////////////////////////////////
//
// Connection info
//
/////////////////////////////////////////////////////////////////////////////
async negotiateConnection(): Promise<IClientConnectionInfo> {
const url = new URL('connect', this.endpoint);
const response = await axios.get(url.toString());
const connectionInfo = validate(ClientConnectionInfoType, response.data);
return connectionInfo;
}
async validateConnection(): Promise<void> {
const url = new URL('connect/validate', this.endpoint);
const response = await axios.get(url.toString(), await this.getConfig());
if (response.status !== 200) {
throw new Error('There was something wrong with the connection');
}
}
/////////////////////////////////////////////////////////////////////////////
@ -46,7 +81,7 @@ export class LaboratoryClient implements ILaboratory {
/////////////////////////////////////////////////////////////////////////////
async allBenchmarks(): Promise<IBenchmark[]> {
const url = new URL('benchmarks', this.endpoint);
const response = await axios.get(url.toString(), config);
const response = await axios.get(url.toString(), await this.getConfig());
const benchmarks = validate(BenchmarkArrayType, response.data);
return benchmarks;
}
@ -54,7 +89,7 @@ export class LaboratoryClient implements ILaboratory {
async oneBenchmark(rawName: string): Promise<IBenchmark> {
const name = normalizeName(rawName);
const url = new URL(`benchmarks/${name}`, this.endpoint);
const response = await axios.get(url.toString(), config);
const response = await axios.get(url.toString(), await this.getConfig());
const benchmark = validate(BenchmarkType, response.data);
return benchmark;
}
@ -65,11 +100,11 @@ export class LaboratoryClient implements ILaboratory {
): Promise<void> {
const name = normalizeName(benchmark.name);
if (routeName && name !== normalizeName(routeName)) {
const message = `Route name, if specified, must equal benchmark.name.`;
const message = 'Route name, if specified, must equal benchmark.name.';
throw new IllegalOperationError(message);
}
const url = new URL(`benchmarks/${name}`, this.endpoint);
await axios.put(url.toString(), benchmark, config);
await axios.put(url.toString(), benchmark, await this.getConfig());
}
/////////////////////////////////////////////////////////////////////////////
@ -79,7 +114,7 @@ export class LaboratoryClient implements ILaboratory {
/////////////////////////////////////////////////////////////////////////////
async allCandidates(): Promise<ICandidate[]> {
const url = new URL('candidates', this.endpoint);
const response = await axios.get(url.toString(), config);
const response = await axios.get(url.toString(), await this.getConfig());
const candidates = validate(CandidateArrayType, response.data);
return candidates;
}
@ -87,7 +122,7 @@ export class LaboratoryClient implements ILaboratory {
async oneCandidate(rawName: string): Promise<ICandidate> {
const name = normalizeName(rawName);
const url = new URL(`candidates/${name}`, this.endpoint);
const response = await axios.get(url.toString(), config);
const response = await axios.get(url.toString(), await this.getConfig());
const candidate = validate(CandidateType, response.data);
return candidate;
}
@ -98,11 +133,11 @@ export class LaboratoryClient implements ILaboratory {
): Promise<void> {
const name = normalizeName(candidate.name);
if (routeName && name !== normalizeName(routeName)) {
const message = `Route name, if specified, must equal candidate.name.`;
const message = 'Route name, if specified, must equal candidate.name.';
throw new IllegalOperationError(message);
}
const url = new URL(`candidates/${name}`, this.endpoint);
await axios.put(url.toString(), candidate, config);
await axios.put(url.toString(), candidate, await this.getConfig());
}
/////////////////////////////////////////////////////////////////////////////
@ -112,7 +147,7 @@ export class LaboratoryClient implements ILaboratory {
/////////////////////////////////////////////////////////////////////////////
async allSuites(): Promise<ISuite[]> {
const url = new URL('suites', this.endpoint);
const response = await axios.get(url.toString(), config);
const response = await axios.get(url.toString(), await this.getConfig());
const suites = validate(SuiteArrayType, response.data);
return suites;
}
@ -120,7 +155,7 @@ export class LaboratoryClient implements ILaboratory {
async oneSuite(rawName: string): Promise<ISuite> {
const name = normalizeName(rawName);
const url = new URL(`suites/${name}`, this.endpoint);
const response = await axios.get(url.toString(), config);
const response = await axios.get(url.toString(), await this.getConfig());
const suite = validate(SuiteType, response.data);
return suite;
}
@ -128,11 +163,11 @@ export class LaboratoryClient implements ILaboratory {
async upsertSuite(suite: ISuite, routeName?: string): Promise<void> {
const name = normalizeName(suite.name);
if (routeName && name !== normalizeName(routeName)) {
const message = `Route name, if specified, must equal suite.name.`;
const message = 'Route name, if specified, must equal suite.name.';
throw new IllegalOperationError(message);
}
const url = new URL(`suites/${name}`, this.endpoint);
await axios.put(url.toString(), suite, config);
await axios.put(url.toString(), suite, await this.getConfig());
}
/////////////////////////////////////////////////////////////////////////////
@ -142,7 +177,7 @@ export class LaboratoryClient implements ILaboratory {
/////////////////////////////////////////////////////////////////////////////
async allRuns(): Promise<IRun[]> {
const url = new URL('runs', this.endpoint);
const response = await axios.get(url.toString(), config);
const response = await axios.get(url.toString(), await this.getConfig());
const runs = validate(RunArrayType, response.data);
return runs;
}
@ -150,14 +185,18 @@ export class LaboratoryClient implements ILaboratory {
async oneRun(rawName: string): Promise<IRun> {
const name = normalizeRunName(rawName);
const url = new URL(`runs/${name}`, this.endpoint);
const response = await axios.get(url.toString(), config);
const response = await axios.get(url.toString(), await this.getConfig());
const run = validate(RunType, response.data);
return run;
}
async createRunRequest(spec: IRunRequest): Promise<IRun> {
const url = new URL('runs', this.endpoint);
const response = await axios.post(url.toString(), spec, config);
const response = await axios.post(
url.toString(),
spec,
await this.getConfig()
);
const run = validate(RunType, response.data);
return run;
}
@ -166,21 +205,21 @@ export class LaboratoryClient implements ILaboratory {
const name = normalizeRunName(rawName);
const url = new URL(`runs/${name}`, this.endpoint);
const body: IUpdateRunStatus = { status };
await axios.patch(url.toString(), body, config);
await axios.patch(url.toString(), body, await this.getConfig());
}
async reportRunResults(rawName: string, measures: Measures): Promise<void> {
const name = normalizeRunName(rawName);
const url = new URL(`runs/${name}/results`, this.endpoint);
const body: IReportRunResults = { measures };
await axios.patch(url.toString(), body, config);
await axios.post(url.toString(), body, await this.getConfig());
}
async allRunResults(benchmark: string, suite: string): Promise<IResult[]> {
const b = normalizeName(benchmark);
const s = normalizeName(suite);
const url = new URL(`runs/${b}/${s}`, this.endpoint);
const response = await axios.get(url.toString(), config);
const url = new URL(`runs?benchmark=${b}&suite=${s}`, this.endpoint);
const response = await axios.get(url.toString(), await this.getConfig());
const results = validate(ResultArrayType, response.data);
return results;
}

Просмотреть файл

@ -1,3 +1,2 @@
export * from './client';
export * from './logic';
export * from './server';

Просмотреть файл

@ -2,9 +2,6 @@ import { either } from 'fp-ts/lib/Either';
import * as t from 'io-ts';
import { DateTime } from 'luxon';
export const apiVersion = '0.0.1';
// tslint:disable-next-line:variable-name
const DateType = new t.Type<Date, string, unknown>(
'Date',
(u): u is Date => u instanceof Date,
@ -16,18 +13,48 @@ const DateType = new t.Type<Date, string, unknown>(
a => a.toISOString()
);
// createEnum() from https://github.com/gcanti/io-ts/issues/67
/* eslint-disable @typescript-eslint/no-explicit-any */
const createEnum = <E>(e: any, name: string): t.Type<E> => {
const keys: any = {};
Object.keys(e).forEach(k => {
keys[e[k]] = null;
});
return t.keyof(keys, name) as any;
};
/* eslint-enable */
///////////////////////////////////////////////////////////////////////////////
//
// IClientConnectionInfo
//
///////////////////////////////////////////////////////////////////////////////
export const ClientConnectionInfoType = t.union([
t.type({
type: t.literal('aad'),
clientId: t.string,
authority: t.string,
scopes: t.array(t.string),
}),
t.type({
type: t.literal('unauthenticated'),
}),
]);
export type IClientConnectionInfo = t.TypeOf<typeof ClientConnectionInfoType>;
///////////////////////////////////////////////////////////////////////////////
//
// EntityBase
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
export const EntityBaseType = t.intersection([
t.type({
name: t.string,
author: t.string,
version: t.string,
apiVersion: t.string,
}),
t.partial({
createdAt: DateType,
@ -42,29 +69,43 @@ export type IEntityBase = t.TypeOf<typeof EntityBaseType>;
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
const PipelineStageType = t.partial({
image: t.string,
});
type PipelineStage = t.TypeOf<typeof PipelineStageType>;
export enum BenchmarkStageKind {
CANDIDATE = 'candidate',
CONTAINER = 'container',
}
// tslint:disable-next-line:variable-name
export const PipelineType = t.type({
mode: t.string,
stages: t.array(PipelineStageType),
});
export type IPipeline = t.TypeOf<typeof PipelineType>;
const BenchmarkStageKindType = createEnum<BenchmarkStageKind>(
BenchmarkStageKind,
'BenchmarkStageKind'
);
const PipelineStageVolumeMountType = t.type({
volume: t.string,
path: t.string,
});
type PipelineStageVolumeMount = t.TypeOf<typeof PipelineStageVolumeMountType>;
// TODO: make 'image' required when kind == 'container'
const PipelineStageType = t.intersection([
t.type({
name: t.string,
kind: BenchmarkStageKindType,
}),
t.partial({
image: t.string,
volumes: t.array(PipelineStageVolumeMountType),
}),
]);
export type PipelineStage = t.TypeOf<typeof PipelineStageType>;
// tslint:disable-next-line:variable-name
export const BenchmarkType = t.intersection([
EntityBaseType,
t.interface({
pipelines: t.array(PipelineType),
stages: t.array(PipelineStageType),
}),
]);
export type IBenchmark = t.TypeOf<typeof BenchmarkType>;
// tslint:disable-next-line:variable-name
export const BenchmarkArrayType = t.array(BenchmarkType);
///////////////////////////////////////////////////////////////////////////////
@ -73,18 +114,15 @@ export const BenchmarkArrayType = t.array(BenchmarkType);
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
export const CandidateType = t.intersection([
EntityBaseType,
t.interface({
benchmark: t.string,
mode: t.string,
image: t.string,
}),
]);
export type ICandidate = t.TypeOf<typeof CandidateType>;
// tslint:disable-next-line:variable-name
export const CandidateArrayType = t.array(CandidateType);
///////////////////////////////////////////////////////////////////////////////
@ -93,17 +131,22 @@ export const CandidateArrayType = t.array(CandidateType);
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
const SuiteVolumeType = t.type({
name: t.string,
type: t.string,
target: t.string,
});
export type SuiteVolume = t.TypeOf<typeof SuiteVolumeType>;
export const SuiteType = t.intersection([
EntityBaseType,
t.interface({
benchmark: t.string,
mode: t.string,
volumes: t.array(SuiteVolumeType),
}),
]);
export type ISuite = t.TypeOf<typeof SuiteType>;
// tslint:disable-next-line:variable-name
export const SuiteArrayType = t.array(SuiteType);
///////////////////////////////////////////////////////////////////////////////
@ -112,19 +155,6 @@ export const SuiteArrayType = t.array(SuiteType);
//
///////////////////////////////////////////////////////////////////////////////
// createEnum() from https://github.com/gcanti/io-ts/issues/67
// tslint:disable-next-line:no-any
const createEnum = <E>(e: any, name: string): t.Type<E> => {
// tslint:disable-next-line:no-any
const keys: any = {};
Object.keys(e).forEach(k => {
keys[e[k]] = null;
});
// tslint:disable-next-line:no-any
return t.keyof(keys, name) as any;
};
export enum RunStatus {
CREATED = 'created',
RUNNING = 'running',
@ -132,23 +162,19 @@ export enum RunStatus {
FAILED = 'failed',
}
// tslint:disable-next-line:variable-name
const RunStatusType = createEnum<RunStatus>(RunStatus, 'RunStatus');
// tslint:disable-next-line:variable-name
export const RunType = t.intersection([
EntityBaseType,
t.interface({
benchmark: BenchmarkType,
suite: SuiteType,
candidate: CandidateType,
blob: t.string,
status: RunStatusType,
}),
]);
export type IRun = t.TypeOf<typeof RunType>;
// tslint:disable-next-line:variable-name
export const RunArrayType = t.array(RunType);
///////////////////////////////////////////////////////////////////////////////
@ -157,7 +183,6 @@ export const RunArrayType = t.array(RunType);
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
export const RunRequestType = t.type({
candidate: t.string,
suite: t.string,
@ -170,7 +195,6 @@ export type IRunRequest = t.TypeOf<typeof RunRequestType>;
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
export const UpdateRunStatusType = t.type({
status: RunStatusType,
});
@ -182,7 +206,6 @@ export type IUpdateRunStatus = t.TypeOf<typeof UpdateRunStatusType>;
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
export const MeasuresType = t.UnknownRecord;
export type Measures = t.TypeOf<typeof MeasuresType>;
@ -192,7 +215,6 @@ export type Measures = t.TypeOf<typeof MeasuresType>;
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
export const ReportRunResultsType = t.type({
measures: MeasuresType,
});
@ -204,12 +226,10 @@ export type IReportRunResults = t.TypeOf<typeof ReportRunResultsType>;
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:variable-name
export const ResultType = t.intersection([
EntityBaseType,
t.type({
benchmark: t.string,
mode: t.string,
suite: t.string,
candidate: t.string,
measures: MeasuresType,
@ -217,7 +237,6 @@ export const ResultType = t.intersection([
]);
export type IResult = t.TypeOf<typeof ResultType>;
// tslint:disable-next-line:variable-name
export const ResultArrayType = t.array(ResultType);
///////////////////////////////////////////////////////////////////////////////
@ -226,7 +245,7 @@ export const ResultArrayType = t.array(ResultType);
//
///////////////////////////////////////////////////////////////////////////////
// tslint:disable-next-line:interface-name
// eslint-disable-next-line @typescript-eslint/interface-name-prefix
export interface ILaboratory {
allBenchmarks(): Promise<IBenchmark[]>;
oneBenchmark(name: string): Promise<IBenchmark>;

Просмотреть файл

@ -0,0 +1,15 @@
import { Benchmark } from './models';
import { normalizeName } from './normalize';
import { IBenchmark } from '../interfaces';
export function normalizeBenchmark(benchmark: IBenchmark): IBenchmark {
return {
...benchmark,
name: normalizeName(benchmark.name),
};
}
export async function processBenchmark(benchmark: IBenchmark) {
// Upsert Benchmark
await Benchmark.upsert<Benchmark>(benchmark);
}

Просмотреть файл

@ -7,7 +7,6 @@ export function normalizeCandidate(candidate: ICandidate): ICandidate {
...candidate,
name: normalizeName(candidate.name),
benchmark: normalizeName(candidate.benchmark),
mode: normalizeName(candidate.mode),
};
}
@ -21,12 +20,5 @@ export async function processCandidate(candidate: ICandidate): Promise<void> {
throw new IllegalOperationError(message);
}
// Verify that referenced model is provided by benchmark.
const modes = benchmark.pipelines.map(p => p.mode);
if (!modes.includes(candidate.mode)) {
const message = `Candidate references unknown mode "${candidate.mode}"`;
throw new IllegalOperationError(message);
}
await Candidate.upsert<Candidate>(candidate);
}

Просмотреть файл

@ -30,11 +30,9 @@ import { normalizeSuite, processSuite } from './suite';
export class SequelizeLaboratory implements ILaboratory {
private readonly server: string;
private readonly runBlobBase: string;
private readonly queue: IQueue<PipelineRun>;
constructor(server: string, runBlobBase: string, queue: IQueue<PipelineRun>) {
this.runBlobBase = runBlobBase;
constructor(server: string, queue: IQueue<PipelineRun>) {
this.queue = queue;
this.server = server;
}
@ -173,12 +171,7 @@ export class SequelizeLaboratory implements ILaboratory {
async createRunRequest(r: IRunRequest): Promise<IRun> {
const runRequest = normalizeRunRequest(r);
return processRunRequest(
this.server,
runRequest,
this.runBlobBase,
this.queue
);
return processRunRequest(this.server, runRequest, this.queue);
}
async updateRunStatus(rawName: string, status: RunStatus): Promise<void> {

Просмотреть файл

@ -1,6 +1,6 @@
import { DataType, Column, Model, Table } from 'sequelize-typescript';
import { IBenchmark, IPipeline } from '../../interfaces';
import { IBenchmark, PipelineStage } from '../../interfaces';
import { jsonColumn } from './decorators';
@ -8,7 +8,7 @@ import { jsonColumn } from './decorators';
export class Benchmark extends Model<Benchmark> implements IBenchmark {
@Column({
type: DataType.STRING,
unique: true,
primaryKey: true,
})
name!: string;
@ -16,9 +16,9 @@ export class Benchmark extends Model<Benchmark> implements IBenchmark {
author!: string;
@Column(DataType.STRING)
version!: string;
apiVersion!: string;
// TODO: REVIEW: magic number 1024
@Column(jsonColumn<IPipeline[]>('pipelines', 1024))
pipelines!: IPipeline[];
@Column(jsonColumn<PipelineStage[]>('stages', 1024))
stages!: PipelineStage[];
}

Просмотреть файл

@ -6,7 +6,7 @@ import { ICandidate } from '../../interfaces';
export class Candidate extends Model<Candidate> implements ICandidate {
@Column({
type: DataType.STRING,
unique: true,
primaryKey: true,
})
name!: string;
@ -14,14 +14,11 @@ export class Candidate extends Model<Candidate> implements ICandidate {
author!: string;
@Column(DataType.STRING)
version!: string;
apiVersion!: string;
@Column(DataType.STRING)
benchmark!: string;
@Column(DataType.STRING)
mode!: string;
@Column(DataType.STRING)
image!: string;
}

Просмотреть файл

@ -9,7 +9,7 @@ export function jsonColumn<T>(name: string, length: number) {
return {
type: DataType.STRING(length),
get(): T | undefined {
// tslint:disable-next-line:no-any
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const value = (this as any).getDataValue(name) as string;
// TODO: validate schema here.
// Will likely need to pass in a schema parameter or
@ -33,12 +33,13 @@ export function jsonColumn<T>(name: string, length: number) {
// Verify byte length of utf8 string fits in database field.
// Use >= to account for potential trailing \0.
if (buffer.byteLength >= length) {
const message = `serialized text too long in json column "${name}". ${buffer.byteLength +
1} exceeds limit of ${length}.`;
const message = `serialized text too long in json column "${name}". ${
buffer.byteLength + 1
} exceeds limit of ${length}.`;
throw new ValidationError(message);
}
// tslint:disable-next-line:no-any
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(this as any).setDataValue(name, text);
},
};

Просмотреть файл

@ -8,7 +8,7 @@ import { jsonColumn } from './decorators';
export class Result extends Model<Result> implements IResult {
@Column({
type: DataType.STRING,
unique: true,
primaryKey: true,
})
name!: string;
@ -16,14 +16,11 @@ export class Result extends Model<Result> implements IResult {
author!: string;
@Column(DataType.STRING)
version!: string;
apiVersion!: string;
@Column(DataType.STRING)
benchmark!: string;
@Column(DataType.STRING)
mode!: string;
@Column(DataType.STRING)
suite!: string;

Просмотреть файл

@ -14,7 +14,7 @@ import { jsonColumn } from './decorators';
export class Run extends Model<Run> implements IRun {
@Column({
type: DataType.STRING,
unique: true,
primaryKey: true,
})
name!: string;
@ -22,10 +22,7 @@ export class Run extends Model<Run> implements IRun {
author!: string;
@Column(DataType.STRING)
version!: string;
@Column(DataType.STRING)
blob!: string;
apiVersion!: string;
@Column(DataType.STRING)
status!: RunStatus;

Просмотреть файл

@ -1,12 +1,14 @@
import { DataType, Column, Model, Table } from 'sequelize-typescript';
import { ISuite } from '../../interfaces';
import { ISuite, SuiteVolume } from '../../interfaces';
import { jsonColumn } from './decorators';
@Table
export class Suite extends Model<Suite> implements ISuite {
@Column({
type: DataType.STRING,
unique: true,
primaryKey: true,
})
name!: string;
@ -14,11 +16,12 @@ export class Suite extends Model<Suite> implements ISuite {
author!: string;
@Column(DataType.STRING)
version!: string;
apiVersion!: string;
@Column(DataType.STRING)
benchmark!: string;
@Column(DataType.STRING)
mode!: string;
// TODO: REVIEW: magic number 1024
@Column(jsonColumn<SuiteVolume[]>('volumes', 1024))
volumes!: SuiteVolume[];
}

Просмотреть файл

@ -2,20 +2,20 @@ import { v1 } from 'uuid';
import { URL } from 'url';
import {
apiVersion,
EntityNotFoundError,
IBenchmark,
ICandidate,
IllegalOperationError,
IPipeline,
IResult,
IRun,
IRunRequest,
Measures,
RunStatus,
BenchmarkStageKind,
ISuite,
} from '../interfaces';
import { PipelineRun, PipelineStage } from '../../../messages';
import { PipelineRun, PipelineRunStage } from '../../../messages';
import { Benchmark, Candidate, Suite, Run, Result } from './models';
import { normalizeName } from './normalize';
import { IQueue } from '../../../queue';
@ -31,7 +31,6 @@ export function normalizeRunRequest(runRequest: IRunRequest): IRunRequest {
export async function processRunRequest(
server: string,
runRequest: IRunRequest,
runBlobBase: string,
queue: IQueue<PipelineRun>
): Promise<Run> {
// Verify that referenced candidate exists.
@ -56,12 +55,6 @@ export async function processRunRequest(
throw new IllegalOperationError(message);
}
// Verify that candidate and suite reference same mode.
if (candidate.mode !== suite.mode) {
const message = `Candidate mode "${candidate.mode}" doesn't match suite mode "${suite.mode}"`;
throw new IllegalOperationError(message);
}
// Verify that referenced benchmark exists.
const benchmark = await Benchmark.findOne({
where: { name: candidate.benchmark },
@ -71,19 +64,6 @@ export async function processRunRequest(
throw new IllegalOperationError(message);
}
// Find the pipeline for the candidate's mode.
let pipeline: IPipeline | undefined;
for (const p of benchmark.pipelines) {
if (p.mode === candidate.mode) {
pipeline = p;
break;
}
}
if (!pipeline) {
const message = `Candidate references unknown mode "${candidate.mode}"`;
throw new IllegalOperationError(message);
}
//
// All ok. Create the run.
//
@ -91,20 +71,11 @@ export async function processRunRequest(
// TODO: consider moving name generation to normalize.ts.
const name = v1();
// `new URL()` will clobber the final path segment unless the
// base ends with a trailing slash, so ensure it here
if (!runBlobBase.endsWith('/')) {
runBlobBase += '/';
}
const blobURI: URL = new URL(name, runBlobBase);
const run: IRun = {
name,
author: 'unknown', // TODO: fill in name
version: apiVersion,
apiVersion: 'v1alpha1',
status: RunStatus.CREATED,
blob: blobURI.toString(),
benchmark,
candidate,
suite,
@ -114,14 +85,7 @@ export async function processRunRequest(
const result = await Run.create(run);
// Queue the run request.
const message = createMessage(
server,
blobURI.toString(),
name,
benchmark,
candidate,
pipeline
);
const message = createMessage(server, name, benchmark, suite, candidate);
await queue.enqueue(message);
return result;
@ -171,9 +135,8 @@ export async function processRunResults(
const results: IResult = {
name: run.name,
author: run.author,
version: run.version,
apiVersion: 'v1alpha1',
benchmark: run.benchmark.name,
mode: run.suite.mode,
suite: run.suite.name,
candidate: run.candidate.name,
measures,
@ -184,29 +147,41 @@ export async function processRunResults(
function createMessage(
server: string,
blobPrefix: string,
name: string,
b: IBenchmark,
c: ICandidate,
pipeline: IPipeline
benchmark: IBenchmark,
suite: ISuite,
candidate: ICandidate
): PipelineRun {
const stages = pipeline.stages.map(s => {
const name = s.image ? 'benchmark' : 'candidate';
const image = s.image || c.image;
const stages = benchmark.stages.map(stage => {
const image =
stage.kind === BenchmarkStageKind.CANDIDATE
? candidate.image
: stage.image!;
const volumes = stage.volumes?.map(v => {
const sourceVolume = suite.volumes?.filter(sv => sv.name === v.volume)[0];
// TODO implement cmd, env, volumes.
return {
type: sourceVolume.type,
target: v.path,
source: sourceVolume.target,
readonly: true,
};
});
const stage: PipelineStage = { name, image };
return stage;
// TODO implement cmd, env.
return {
name: stage.name,
image,
volumes,
} as PipelineRunStage;
});
// const blobPrefix = `runs/${name}`;
const statusEndpoint = new URL(`runs/${name}`, server);
const resultsEndpoint = new URL(`runs/${name}/results`, server);
const message: PipelineRun = {
name,
blobPrefix,
statusEndpoint: statusEndpoint.toString(),
resultsEndpoint: resultsEndpoint.toString(),
stages,

Просмотреть файл

@ -5,7 +5,6 @@ import { Benchmark, Candidate, Result, Run, Suite } from './models';
export async function initializeSequelize(
options: SequelizeOptions
): Promise<Sequelize> {
// const sequelize = new Sequelize('sqlite::memory:');
const sequelize = new Sequelize(options);
sequelize.addModels([Benchmark, Candidate, Result, Run, Suite]);

Просмотреть файл

@ -7,7 +7,6 @@ export function normalizeSuite(suite: ISuite): ISuite {
...suite,
name: normalizeName(suite.name),
benchmark: normalizeName(suite.benchmark),
mode: normalizeName(suite.mode),
};
}
@ -21,12 +20,5 @@ export async function processSuite(suite: ISuite): Promise<void> {
throw new IllegalOperationError(message);
}
// Verify that referenced model is provided by benchmark.
const modes = benchmark.pipelines.map(p => p.mode);
if (!modes.includes(suite.mode)) {
const message = `Suite references unknown mode "${suite.mode}"`;
throw new IllegalOperationError(message);
}
await Suite.upsert<Suite>(suite);
}

Просмотреть файл

@ -1,15 +1,13 @@
// Data contracts for queue messages
export interface PipelineRun
extends Readonly<{
name: string;
blobPrefix: string;
statusEndpoint: string;
resultsEndpoint: string;
stages: ReadonlyArray<PipelineStage>;
}> {}
export type PipelineRun = Readonly<{
name: string;
statusEndpoint: string;
resultsEndpoint: string;
stages: ReadonlyArray<PipelineRunStage>;
}>;
export interface PipelineStage {
export interface PipelineRunStage {
name: string;
image: string;
cmd?: string[];
@ -17,6 +15,7 @@ export interface PipelineStage {
[key: string]: string;
}>;
volumes?: ReadonlyArray<{
type: string;
target: string;
source: string;
readonly: boolean;

Просмотреть файл

Просмотреть файл

@ -4,7 +4,7 @@ import { InMemoryQueue } from './inmemory';
/**
* Simple interface to send/receive messages from a queue.
*/
// tslint:disable:interface-name
// eslint-disable-next-line @typescript-eslint/interface-name-prefix
export interface IQueue<T> {
/**
* Enqueue a single message, serialized as JSON.
@ -47,15 +47,11 @@ export interface QueueConfiguration {
* @param endpoint The location of the queue.
*/
export function GetQueue<T>(config: QueueConfiguration): IQueue<T> {
// tsc ensures that all elements of the discriminated union are covered: https://www.typescriptlang.org/docs/handbook/advanced-types.html#exhaustiveness-checking
// The following is safe but tslint doesn't understand, so we suppress the rule: https://github.com/palantir/tslint/issues/2104
// tslint:disable:switch-default
switch (config.mode) {
case QueueMode.InMemory:
return new InMemoryQueue<T>();
case QueueMode.Azure:
const azureConfig = config as AzureStorageQueueConfiguration;
return new AzureStorageQueue<T>(azureConfig);
return new AzureStorageQueue<T>(config as AzureStorageQueueConfiguration);
}
}

Просмотреть файл

@ -2,7 +2,7 @@ import { IQueue, QueueMessage } from '.';
export class InMemoryQueue<T> implements IQueue<T> {
// Explicitly allowing 'any' for this test helper object
// tslint:disable:no-any
// eslint-disable-next-line @typescript-eslint/no-explicit-any
readonly data: any[] = [];
readonly visibilityTimeout: number;

Просмотреть файл

14
packages/sds/src/shims.d.ts поставляемый Normal file
Просмотреть файл

@ -0,0 +1,14 @@
/* eslint-disable @typescript-eslint/no-empty-interface */
// @azure/storage-queue
// Workaround for Azure SDK Typescript errors related to fetch() api
// https://github.com/Azure/azure-sdk-for-js/issues/7462
interface Headers {}
interface Response {}
interface RequestInit {}
interface RequestInfo {}
// superagent
// https://github.com/DefinitelyTyped/DefinitelyTyped/issues/12044
declare interface XMLHttpRequest {}

Просмотреть файл

Просмотреть файл

@ -9,10 +9,7 @@ import {
} from '../../src/queue';
export function getQueueConfiguration<T>() {
const serviceUrl = env
.get('TEST_QUEUE_SERVICE_URL')
.required()
.asUrlString();
const serviceUrl = env.get('TEST_QUEUE_SERVICE_URL').required().asUrlString();
const credential = new DefaultAzureCredential();
const queueEndpoint = `${serviceUrl}${v1()}`;
const config: AzureStorageQueueConfiguration = {
@ -27,10 +24,3 @@ export function getQueueConfiguration<T>() {
queue: GetQueue<T>(config),
};
}
export function getDockerBaseVolumePath(): string {
return env
.get('TEST_BASE_VOLUME_PATH')
.required()
.asString();
}

Просмотреть файл

@ -3,7 +3,7 @@ import { assert } from 'chai';
import chaiAsPromised = require('chai-as-promised');
import * as nock from 'nock';
import { LaboratoryClient } from '../../../../src/laboratory/client';
import { LaboratoryClient } from '../../../src/laboratory/client';
import { benchmark1, candidate1, run1, suite1, runRequest1 } from '../data';
import {
@ -15,7 +15,7 @@ import {
RunStatus,
SuiteType,
validate,
} from '../../../../src';
} from '../../../src';
chai.use(chaiAsPromised);
@ -29,9 +29,7 @@ describe('laboratory/client', () => {
///////////////////////////////////////////////////////////////////////////
describe('benchmarks', () => {
it('allBenchmarks()', async () => {
nock(endpoint)
.get('/benchmarks')
.reply(200, []);
nock(endpoint).get('/benchmarks').reply(200, []);
const client = new LaboratoryClient(endpoint);
const benchmarks = await client.allBenchmarks();
@ -83,9 +81,7 @@ describe('laboratory/client', () => {
///////////////////////////////////////////////////////////////////////////
describe('candidates', () => {
it('allCandidates()', async () => {
nock(endpoint)
.get('/candidates')
.reply(200, []);
nock(endpoint).get('/candidates').reply(200, []);
const client = new LaboratoryClient(endpoint);
const candidates = await client.allCandidates();
@ -137,9 +133,7 @@ describe('laboratory/client', () => {
///////////////////////////////////////////////////////////////////////////
describe('suites', () => {
it('allSuites()', async () => {
nock(endpoint)
.get('/suites')
.reply(200, []);
nock(endpoint).get('/suites').reply(200, []);
const client = new LaboratoryClient(endpoint);
const suites = await client.allSuites();
@ -147,9 +141,7 @@ describe('laboratory/client', () => {
});
it('oneSuite())', async () => {
nock(endpoint)
.get(`/suites/${suite1.name}`)
.reply(200, suite1);
nock(endpoint).get(`/suites/${suite1.name}`).reply(200, suite1);
const client = new LaboratoryClient(endpoint);
const observed = await client.oneSuite(suite1.name);
@ -191,9 +183,7 @@ describe('laboratory/client', () => {
///////////////////////////////////////////////////////////////////////////
describe('runs', () => {
it('allRuns()', async () => {
nock(endpoint)
.get('/runs')
.reply(200, []);
nock(endpoint).get('/runs').reply(200, []);
const client = new LaboratoryClient(endpoint);
const runs = await client.allRuns();
@ -201,9 +191,7 @@ describe('laboratory/client', () => {
});
it('oneRun())', async () => {
nock(endpoint)
.get(`/runs/${run1.name}`)
.reply(200, run1);
nock(endpoint).get(`/runs/${run1.name}`).reply(200, run1);
const client = new LaboratoryClient(endpoint);
const observed = await client.oneRun(run1.name);
@ -214,7 +202,7 @@ describe('laboratory/client', () => {
let request: nock.Body;
nock(endpoint)
.post('/runs')
.reply(200, (uri: string, body: nock.Body) => {
.reply(202, (uri: string, body: nock.Body) => {
request = body;
return run1;
});
@ -229,7 +217,7 @@ describe('laboratory/client', () => {
let request: nock.Body;
nock(endpoint)
.patch(`/runs/${run1.name}`)
.reply(200, (uri: string, body: nock.Body) => {
.reply(204, (uri: string, body: nock.Body) => {
request = body;
});
@ -245,8 +233,8 @@ describe('laboratory/client', () => {
it('reportRunResults()', async () => {
let request: nock.Body;
nock(endpoint)
.patch(`/runs/${run1.name}/results`)
.reply(200, (uri: string, body: nock.Body) => {
.post(`/runs/${run1.name}/results`)
.reply(204, (uri: string, body: nock.Body) => {
request = body;
});
@ -266,7 +254,7 @@ describe('laboratory/client', () => {
const suite = 'suite1';
nock(endpoint)
.get(`/runs/${benchmark}/${suite}`)
.get(`/runs?benchmark=${benchmark}&suite=${suite}`)
.reply(200, []);
const client = new LaboratoryClient(endpoint);

Просмотреть файл

@ -0,0 +1,223 @@
///////////////////////////////////////////////////////////////////////////////
//
// Data for tests in this directory.
//
///////////////////////////////////////////////////////////////////////////////
import { URL } from 'url';
import {
IBenchmark,
ICandidate,
IRun,
IRunRequest,
ISuite,
RunStatus,
BenchmarkStageKind,
} from '../../src';
export const serviceURL = 'http://localhost:3000'; // TODO: plumb real url.
export const timestamps = {
createdAt: new Date('2020-03-19T21:37:31.452Z'),
updatedAt: new Date('2020-03-20T22:37:31.452Z'),
};
export const benchmark1: IBenchmark = {
name: 'benchmark1',
author: 'author1',
apiVersion: 'v1alpha1',
stages: [
{
// Candidate
name: 'candidate',
kind: BenchmarkStageKind.CANDIDATE,
volumes: [
{
volume: 'training',
path: '/input',
},
],
},
{
// Benchmark
name: 'scoring',
image: 'benchmark-image',
kind: BenchmarkStageKind.CONTAINER,
volumes: [
{
volume: 'reference',
path: '/reference',
},
],
},
],
...timestamps,
};
export const benchmark2: IBenchmark = {
name: 'benchmark2',
author: 'author2',
apiVersion: 'v1alpha1',
stages: [
{
// Candidate
name: 'candidate',
kind: BenchmarkStageKind.CANDIDATE,
volumes: [
{
volume: 'training',
path: '/input',
},
],
},
{
// Benchmark
name: 'scoring',
image: 'benchmark-image',
kind: BenchmarkStageKind.CONTAINER,
volumes: [
{
volume: 'reference',
path: '/reference',
},
],
},
],
...timestamps,
};
export const benchmark3: IBenchmark = {
name: 'benchmark3',
author: 'author3',
apiVersion: 'v1alpha1',
stages: [
{
// Candidate
name: 'candidate',
kind: BenchmarkStageKind.CANDIDATE,
volumes: [
{
volume: 'training',
path: '/input',
},
],
},
{
// Benchmark
name: 'scoring',
image: 'benchmark-image',
kind: BenchmarkStageKind.CONTAINER,
volumes: [
{
volume: 'reference',
path: '/reference',
},
],
},
],
...timestamps,
};
export const candidate1: ICandidate = {
name: 'candidate1',
author: 'author1',
apiVersion: 'v1alpha1',
benchmark: 'benchmark1',
image: 'candidate1-image',
...timestamps,
};
export const candidate2: ICandidate = {
name: 'candidate2',
author: 'author2',
apiVersion: 'v1alpha1',
benchmark: 'benchmark1',
image: 'candidate2-image',
...timestamps,
};
export const candidate3: ICandidate = {
name: 'candidate3',
author: 'author3',
apiVersion: 'v1alpha1',
benchmark: 'benchmark1',
image: 'candidate3-image',
...timestamps,
};
export const suite1: ISuite = {
name: 'suite1',
author: 'author1',
apiVersion: 'v1alpha1',
benchmark: 'benchmark1',
volumes: [
{
name: 'training',
type: 'AzureBlob',
target: 'https://sample.blob.core.windows.net/training',
},
{
name: 'reference',
type: 'AzureBlob',
target: 'https://sample.blob.core.windows.net/reference',
},
],
...timestamps,
};
export const suite2: ISuite = {
name: 'suite2',
author: 'author2',
apiVersion: 'v1alpha1',
benchmark: 'benchmark1',
volumes: [
{
name: 'training',
type: 'AzureBlob',
target: 'https://sample.blob.core.windows.net/training',
},
{
name: 'reference',
type: 'AzureBlob',
target: 'https://sample.blob.core.windows.net/reference',
},
],
...timestamps,
};
export const suite3: ISuite = {
name: 'suite3',
author: 'author3',
apiVersion: 'v1alpha1',
benchmark: 'benchmark1',
volumes: [
{
name: 'training',
type: 'AzureBlob',
target: 'https://sample.blob.core.windows.net/training',
},
{
name: 'reference',
type: 'AzureBlob',
target: 'https://sample.blob.core.windows.net/reference',
},
],
...timestamps,
};
export const runRequest1: IRunRequest = {
candidate: 'candidate1',
suite: 'suite1',
};
const runid = '69bd5df9-48a2-4fd0-81c5-0a7d6932eef2';
export const run1: IRun = {
name: runid,
author: 'author1',
apiVersion: 'v1alpha1',
benchmark: benchmark1,
candidate: candidate1,
suite: suite1,
status: RunStatus.CREATED,
...timestamps,
};

Просмотреть файл

@ -57,7 +57,6 @@ describe('laboratory/benchmarks', () => {
const benchmark3 = {
...benchmark1,
version: benchmark1.version + 'x',
};
await lab.upsertBenchmark(benchmark3);
const results3 = await lab.allBenchmarks();

Просмотреть файл

@ -2,9 +2,6 @@ import * as chai from 'chai';
import { assert } from 'chai';
import chaiAsPromised = require('chai-as-promised');
import chaiExclude from 'chai-exclude';
import { Sequelize } from 'sequelize-typescript';
import { initializeSequelize, SequelizeLaboratory } from '../../../../src';
import { benchmark1, candidate1, candidate2, candidate3 } from '../data';
@ -68,7 +65,7 @@ describe('laboratory/candidates', () => {
const candidate3 = {
...candidate1,
version: candidate1.version + 'x',
apiVersion: candidate1.apiVersion + 'x',
};
await lab.upsertCandidate(candidate3);
const results3 = await lab.allCandidates();
@ -100,19 +97,11 @@ describe('laboratory/candidates', () => {
};
await assert.isRejected(lab.upsertCandidate(c2));
// Throws for invalid mode name
const c3 = {
...candidate3,
mode: '123_invalid_name',
};
await assert.isRejected(lab.upsertCandidate(c3));
// Lowercases name, benchmark, mode
// Lowercases name, benchmark
const c4 = {
...candidate3,
name: candidate3.name.toUpperCase(),
benchmark: candidate3.benchmark.toUpperCase(),
mode: candidate3.mode.toUpperCase(),
};
await lab.upsertCandidate(c4);
@ -124,16 +113,7 @@ describe('laboratory/candidates', () => {
...candidate3,
name: candidate3.name.toUpperCase(),
benchmark: 'unknown',
mode: candidate3.mode.toUpperCase(),
};
await assert.isRejected(lab.upsertCandidate(c5));
// Throws on non-existant mode
const c6 = {
...candidate3,
name: candidate3.name.toUpperCase(),
mode: 'unknown',
};
await assert.isRejected(lab.upsertCandidate(c6));
});
});

Просмотреть файл

@ -4,14 +4,12 @@ import chaiAsPromised = require('chai-as-promised');
import chaiExclude from 'chai-exclude';
import { URL } from 'url';
import { apiVersion, IResult, IRun, RunStatus } from '../../../../src';
import { IResult, IRun, RunStatus } from '../../../src';
import {
benchmark1,
blobBase,
candidate1,
candidate2,
pipelines,
serviceURL,
suite1,
} from '../data';
@ -48,11 +46,10 @@ describe('laboratory/runs', () => {
const expectedRun1: IRun = {
name: run1.name,
author: 'unknown',
version: apiVersion,
apiVersion: 'v1alpha1',
benchmark: benchmark1,
candidate: candidate1,
suite: suite1,
blob: `${blobBase}/${run1.name}`,
status: RunStatus.CREATED,
};
assertDeepEqual(run1, expectedRun1);
@ -72,7 +69,6 @@ describe('laboratory/runs', () => {
await messages[0].complete();
const expectedMessage = {
blobPrefix: new URL(run1.name, blobBase).toString(),
name: run1.name,
resultsEndpoint: new URL(
`runs/${run1.name}/results`,
@ -82,10 +78,26 @@ describe('laboratory/runs', () => {
{
image: candidate1.image,
name: 'candidate',
volumes: [
{
type: 'AzureBlob',
target: '/input',
source: 'https://sample.blob.core.windows.net/training',
readonly: true,
},
],
},
{
image: pipelines[0].stages[1].image!,
name: 'benchmark',
image: benchmark1.stages[1].image!,
name: 'scoring',
volumes: [
{
type: 'AzureBlob',
target: '/reference',
source: 'https://sample.blob.core.windows.net/reference',
readonly: true,
},
],
},
],
statusEndpoint: new URL(`runs/${run1.name}`, serviceURL).toString(),
@ -111,13 +123,12 @@ describe('laboratory/runs', () => {
assert.equal(results.length, 1);
const expectedResults: IResult = {
author: 'unknown',
apiVersion: 'v1alpha1',
benchmark: benchmark1.name,
candidate: candidate1.name,
measures,
mode: suite1.mode,
name: run1.name,
suite: suite1.name,
version: apiVersion,
};
assertDeepEqual(results[0], expectedResults);
@ -134,11 +145,10 @@ describe('laboratory/runs', () => {
const expected2: IRun = {
name: r2.name,
author: 'unknown',
version: apiVersion,
apiVersion: 'v1alpha1',
benchmark: benchmark1,
candidate: candidate2,
suite: suite1,
blob: `${blobBase}/${r2.name}`,
status: RunStatus.CREATED,
};
assertDeepEqual(bothRuns[1], expected2);

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше