Add mockAPI
This commit is contained in:
Родитель
29885158c2
Коммит
9ff02f59bb
|
@ -25,8 +25,10 @@
|
|||
// Optionally mount k8s auth to existing cluster
|
||||
// "-v", "${env:HOME}${env:USERPROFILE}/.kube:/home/vscode/.kube",
|
||||
],
|
||||
"forwardPorts": [8085],
|
||||
"extensions": [
|
||||
"ms-azuretools.vscode-docker",
|
||||
"humao.rest-client",
|
||||
"ms-vscode.go"
|
||||
],
|
||||
"settings": {
|
||||
|
@ -60,4 +62,5 @@
|
|||
"ms-azuretools.vscode-docker": "workspace"
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
.env
|
||||
|
||||
!/bin
|
||||
bin/mock-databricks-api
|
||||
/bin/manager
|
||||
cmd/manager/__debug_bin
|
||||
|
||||
|
|
22
Makefile
22
Makefile
|
@ -1,6 +1,11 @@
|
|||
timestamp := $(shell /bin/date "+%Y%m%d-%H%M%S")
|
||||
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= controller:latest
|
||||
|
||||
# MockAPI image URL to use all building/pushing image targets
|
||||
MOCKAPI_IMG ?= mockapi:${timestamp}
|
||||
|
||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||
CRD_OPTIONS ?= "crd:trivialVersions=true"
|
||||
|
||||
|
@ -20,7 +25,6 @@ else
|
|||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
timestamp := $(shell /bin/date "+%Y%m%d-%H%M%S")
|
||||
all: manager
|
||||
|
||||
# Run tests
|
||||
|
@ -215,3 +219,19 @@ install-test-dependency:
|
|||
&& go get github.com/onsi/ginkgo/ginkgo \
|
||||
&& go get golang.org/x/tools/cmd/cover \
|
||||
&& go get -u github.com/matm/gocov-html
|
||||
|
||||
build-mock-api:
|
||||
go build -o bin/mock-databricks-api ./mockapi
|
||||
|
||||
run-mock-api:
|
||||
go run ./mockapi
|
||||
|
||||
test-mock-api: lint
|
||||
go test ./mockapi/...
|
||||
|
||||
kind-deploy-mock-api: create-kindcluster install-prometheus
|
||||
docker build -t ${MOCKAPI_IMG} -f mockapi/Dockerfile .
|
||||
@echo "Loading mockAPI image into kind"
|
||||
kind load docker-image ${MOCKAPI_IMG} --name ${KIND_CLUSTER_NAME} -v 1
|
||||
cat ./mockapi/manifests/deployment.yaml | sed "s|mockapi:latest|${MOCKAPI_IMG}|" | kubectl apply -f -
|
||||
kubectl apply -f ./mockapi/manifests/service.yaml
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
# Mock databricks API
|
||||
|
||||
The databricks mock API can be found under `/databricks-mock-api`
|
||||
|
||||
This is a mock API for following sucess scenarios of the Databricks API:
|
||||
- [Jobs/](https://docs.databricks.com/dev-tools/api/latest/jobs.html):
|
||||
- Create
|
||||
- Get
|
||||
- List
|
||||
- Delete
|
||||
- Runs/
|
||||
- Submit
|
||||
- Get
|
||||
- GetOutput
|
||||
- List
|
||||
|
||||
In addition, each submitted run will cycle through it's life states (PENDING -> RUNNING -> TERMINATING -> TERMINATED).
|
||||
The length of each state is set in run_repository.go : timePerRunLifeState.
|
||||
|
||||
## Features
|
||||
|
||||
### Configurable API latency
|
||||
|
||||
To simulate Databricks API more accurately, we've added an option to configure a range of latency for each request.
|
||||
|
||||
The latency range can be configured by adding a min and max value for desired latency in milliseconds for a fast and slow requests using the environment variables:
|
||||
```text
|
||||
DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN
|
||||
DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX
|
||||
DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN
|
||||
DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX
|
||||
```
|
||||
When set, for each request will sleep for a time chosen at random between the min and max values.
|
||||
|
||||
If either of the variables is not set, the API will default to running with no latency.
|
||||
|
||||
### Configurable Rate Limiting
|
||||
|
||||
To allow rate-limiting requests to match Databricks API behaviour, a rate limit can be specified by setting `DATABRICKS_MOCK_API_RATE_LIMIT` environment variable to the number of requests per second that should be allowed against the API.
|
||||
|
||||
### Configurable Errors
|
||||
|
||||
To configure a percentage of responses that return a status code 500 response in the mock-api you can set `DATABRICKS_MOCK_API_ERROR_500_PROBABILITY`.
|
||||
|
||||
E.g. setting `DATABRICKS_MOCK_API_ERROR_500_PROBABILITY` to `20` will return a status code 500 response for roughly 20% of responses.
|
||||
|
||||
To configure a percentage of calls that should sink-hole, i.e. return no response and keep the connection open for 10 minutes, you can set `DATABRICKS_MOCK_API_ERROR_SINKHOLE_PROBABILITY`. Probabilities are as for `DATABRICKS_MOCK_API_ERROR_500_PROBABILITY`.
|
||||
|
||||
To configure a percentage of calls that should respond xml response with status code 200 response in the mock-api you can set`DATABRICKS_MOCK_API_ERROR_XML_RESPONSE_PROBABILITY`.Probabilities are as for `DATABRICKS_MOCK_API_ERROR_500_PROBABILITY`.
|
||||
|
||||
The combined probabilities much be <=100
|
||||
|
||||
### Dynamic Configuration
|
||||
|
||||
The API includes a `/config` endpoint that can be used to `GET`, `PUT` or `PATCH` configuration values. This allows configuration values to be retrieved from the API as well as changed without restarting the API (which would lose in-memory run data).
|
||||
|
||||
`GET` returns the full set of configurable values.
|
||||
|
||||
`PUT` expects a full set of configurable values to be specified and applies all the values.
|
||||
|
||||
`PATCH` allows one or more configurable values to be specified and only applies the values from the body.
|
||||
|
||||
## Running locally
|
||||
|
||||
Open this repo in VS Code and select "Remote-Containers: Reopen in Container".
|
||||
|
||||
Once the devcontainer has built and started, use `make run-mock-api` to run the API.
|
||||
|
||||
`mockapi_samples/*_sample.http` files contain example calls that can be made against the running API endpoints.
|
||||
|
||||
## Running in Kind
|
||||
|
||||
To run the mock api in Kind run `make kind-deploy-mock-api`. This will ensure a Kind cluster is created, deploy promethous with helm, build and load a docker image for the mock api into the Kind cluster and then create a Deployment and Service.
|
||||
|
||||
To test, run `kubectl port-forward svc/databricks-mock-api 8085:8080 -n databricks-mock-api` and make a request to http://localhost:8085 to verify that the API is running
|
||||
|
||||
## Running in a separate cluster
|
||||
|
||||
### Prerequisites
|
||||
|
||||
This assumes that you have a container registry and a Kubernetes cluster that is able to pull images from it.
|
||||
|
||||
> NB: For now, run the below outside the devcontainer as permissions don't allow the config to be modified currently
|
||||
|
||||
If using Azure Container registry then run `az acr login -n your-azure-container-registry-name` to ensure that you are authenticated to push images to the registry
|
||||
|
||||
Ensure your KUBECONFIG is set to point to the cluster you want to deploy to.
|
||||
|
||||
### Deploy to the cluster
|
||||
|
||||
Deploy to AKS with
|
||||
```bash
|
||||
IMG=your-container-registry.azurecr.io/databricks-mock-api:vsomething make aks-deploy
|
||||
```
|
||||
|
||||
To test, run `kubectl port-forward svc/databricks-mock-api 8085:8080` and make a request to http://localhost:8085 to verify that the API is running
|
||||
|
||||
> NB: Error: 'unauthorized: authentication required' make sure that the image name matches the ACR login server casing or try pushing the docker file outside of the container
|
|
@ -0,0 +1,33 @@
|
|||
@baseURL=http://localhost:8080/
|
||||
|
||||
# Get config values
|
||||
# @name getConfig
|
||||
GET {{baseURL}}config
|
||||
|
||||
###
|
||||
|
||||
# Set config values
|
||||
# @name setConfig
|
||||
PUT {{baseURL}}config
|
||||
content-type: application/json
|
||||
|
||||
{
|
||||
"DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX": 1000,
|
||||
"DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN": 30000,
|
||||
"DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX": 200,
|
||||
"DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN": 500,
|
||||
"DATABRICKS_MOCK_API_RATE_LIMIT": 30,
|
||||
"DATABRICKS_MOCK_API_ERROR_500_PROBABILITY": 10
|
||||
}
|
||||
|
||||
|
||||
###
|
||||
|
||||
# Set config values
|
||||
# @name patchConfig
|
||||
PATCH {{baseURL}}config
|
||||
content-type: application/json
|
||||
|
||||
{
|
||||
"DATABRICKS_MOCK_API_ERROR_500_PROBABILITY": 10
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
@baseURL=http://localhost:8080/api/
|
||||
# Get list of empty jobs
|
||||
# @name getJobList
|
||||
GET {{baseURL}}api/2.0/jobs/list
|
||||
|
||||
###
|
||||
|
||||
# Create a job and return a job ID
|
||||
# @name createJob
|
||||
POST {{baseURL}}2.0/jobs/create
|
||||
|
||||
< ./../api/integration_tests/test_data/job/job_create.json
|
||||
|
||||
###
|
||||
|
||||
# Get created job
|
||||
# @name getCreatedJob
|
||||
@jobID={{createJob.response.body.$.job_id}}
|
||||
GET {{baseURL}}2.0/jobs/get?job_id={{jobID}}
|
||||
|
||||
###
|
||||
|
||||
# Job shows in the list of jobs
|
||||
GET {{baseURL}}2.0/jobs/list
|
||||
|
||||
###
|
||||
|
||||
# Delete the job
|
||||
# @name deleteJob
|
||||
@jobID={{createJob.response.body.$.job_id}}
|
||||
POST {{baseURL}}2.0/jobs/delete
|
||||
|
||||
{
|
||||
"job_id" : {{jobID}}
|
||||
}
|
||||
|
||||
###
|
||||
|
||||
# Gone from the list
|
||||
GET {{baseURL}}2.0/jobs/list
|
|
@ -0,0 +1,54 @@
|
|||
@baseURL=http://localhost:8080/api/
|
||||
# Get list of empty runs
|
||||
# @name getRunList
|
||||
GET {{baseURL}}2.0/jobs/runs/list
|
||||
|
||||
###
|
||||
|
||||
# Submit a run and return a run ID
|
||||
# @name submitRun
|
||||
POST {{baseURL}}2.0/jobs/runs/submit
|
||||
|
||||
< ./../api/integration_tests/test_data/run/run_submit.json
|
||||
|
||||
###
|
||||
|
||||
# Get created run by ID
|
||||
# @name getCreatedRun
|
||||
@runID={{submitRun.response.body.$.run_id}}
|
||||
|
||||
GET {{baseURL}}2.0/jobs/runs/get?run_id={{runID}}
|
||||
|
||||
###
|
||||
|
||||
# Cancel the run by ID
|
||||
# @name cancelRun
|
||||
@runID={{submitRun.response.body.$.run_id}}
|
||||
POST {{baseURL}}2.0/jobs/runs/cancel
|
||||
|
||||
{
|
||||
"run_id" : {{runID}}
|
||||
}
|
||||
|
||||
###
|
||||
|
||||
# Delete the run by ID
|
||||
# @name deleteRun
|
||||
@runID={{submitRun.response.body.$.run_id}}
|
||||
POST {{baseURL}}2.0/jobs/runs/delete
|
||||
|
||||
{
|
||||
"run_id" : {{runID}}
|
||||
}
|
||||
|
||||
###
|
||||
|
||||
# Get submitted run output by ID
|
||||
# @name getCreatedRun
|
||||
@runID={{submitRun.response.body.$.run_id}}
|
||||
|
||||
GET {{baseURL}}2.0/jobs/runs/get-output?run_id={{runID}}
|
||||
###
|
||||
|
||||
# Run shows in the list of runs
|
||||
GET {{baseURL}}2.0/jobs/runs/list
|
12
go.mod
12
go.mod
|
@ -3,20 +3,21 @@ module github.com/microsoft/azure-databricks-operator
|
|||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/AlekSi/gocov-xml v0.0.0-20190121064608-3a14fb1c4737 // indirect
|
||||
github.com/101loops/bdd v0.0.0-20161224202746-3e71f58e2cc3 // indirect
|
||||
github.com/go-logr/logr v0.1.0
|
||||
github.com/jstemmer/go-junit-report v0.9.1 // indirect
|
||||
github.com/matm/gocov-html v0.0.0-20191111163307-9ee104d84c82 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/onsi/ginkgo v1.10.3
|
||||
github.com/onsi/gomega v1.7.0
|
||||
github.com/prometheus/client_golang v0.9.2
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stephanos/clock v0.0.0-20161224195152-e4ec0ab5053e
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/xinsnake/databricks-sdk-golang v0.1.3
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 // indirect
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343
|
||||
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea // indirect
|
||||
golang.org/x/tools v0.0.0-20191112232237-76a3b8da50ef // indirect
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.4 // indirect
|
||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
|
||||
|
@ -24,5 +25,4 @@ require (
|
|||
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90
|
||||
k8s.io/klog v1.0.0 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.4.0
|
||||
sigs.k8s.io/controller-tools v0.2.4 // indirect
|
||||
)
|
||||
|
|
98
go.sum
98
go.sum
|
@ -3,8 +3,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
|||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
github.com/AlekSi/gocov-xml v0.0.0-20190121064608-3a14fb1c4737 h1:JZHBkt0GhM+ARQykshqpI49yaWCHQbJonH3XpDTwMZQ=
|
||||
github.com/AlekSi/gocov-xml v0.0.0-20190121064608-3a14fb1c4737/go.mod h1:w1KSuh2JgIL3nyRiZijboSUwbbxOrTzWwyWVFUHtXBQ=
|
||||
github.com/101loops/bdd v0.0.0-20161224202746-3e71f58e2cc3 h1:80KWhZZrnW3s/PAIvssF5pCBo50DtphX1Wad66iqGIs=
|
||||
github.com/101loops/bdd v0.0.0-20161224202746-3e71f58e2cc3/go.mod h1:1aIYTieozlN6BE05blV9fx2Ypktm88fAaom7rBFOVJ4=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
|
@ -24,8 +24,6 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
|
|||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/axw/gocov v1.0.0 h1:YsqYR66hUmilVr23tu8USgnJIJvnwh3n7j5zRn7x4LU=
|
||||
github.com/axw/gocov v1.0.0/go.mod h1:LvQpEYiwwIb2nYkXY2fDWhg9/AsYqkhmrCshjlUJECE=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
|
@ -55,8 +53,6 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
|
|||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
|
@ -100,12 +96,6 @@ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/
|
|||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo=
|
||||
github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
|
||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
|
@ -128,8 +118,6 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
|||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
|
@ -139,18 +127,16 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
|||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
|
||||
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
|
||||
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
|
@ -163,16 +149,11 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE=
|
||||
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -187,12 +168,6 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN
|
|||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/matm/gocov-html v0.0.0-20191111163307-9ee104d84c82 h1:pMb6HhXFlcC2qHIx7Z++2nRhgQ+5kQx8wLbMqBpuU6U=
|
||||
github.com/matm/gocov-html v0.0.0-20191111163307-9ee104d84c82/go.mod h1:zha4ZSIA/qviBBKx3j6tJG/Lx6aIdjOXPWuKAcJchQM=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
|
@ -218,14 +193,10 @@ github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
|
|||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
|
||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs=
|
||||
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
|
@ -236,18 +207,12 @@ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77
|
|||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY=
|
||||
github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
|
@ -257,30 +222,29 @@ github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
|
|||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc=
|
||||
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stephanos/clock v0.0.0-20161224195152-e4ec0ab5053e h1:PQRvygw1P0KwOMoRQgWZDvEHHr71IrqffNBFx+/zF6g=
|
||||
github.com/stephanos/clock v0.0.0-20161224195152-e4ec0ab5053e/go.mod h1:dwToEiNfnifg5gO0zbbjCVMwfOpNQZ9IqmMdhpF94Xw=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xinsnake/databricks-sdk-golang v0.1.2 h1:G0tv1WRHL/XeZj8wYSJoPsqvF5EYUysOL1jRYQUR3ro=
|
||||
github.com/xinsnake/databricks-sdk-golang v0.1.2/go.mod h1:7/wZh5NJnaT7Cr9C0zMN11pWyZxuA3zml4yl7bDJrZo=
|
||||
github.com/xinsnake/databricks-sdk-golang v0.1.3 h1:H4IeTc9FQq/o8R1/Yx4hMEDKunt+/6CS2iBYnHqypsg=
|
||||
github.com/xinsnake/databricks-sdk-golang v0.1.3/go.mod h1:7/wZh5NJnaT7Cr9C0zMN11pWyZxuA3zml4yl7bDJrZo=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
|
@ -294,15 +258,11 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
|
|||
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg=
|
||||
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -327,12 +287,9 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
|
|||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20191021144547-ec77196f6094 h1:5O4U9trLjNpuhpynaDsqwCk+Tw6seqJz1EbqbnzHrc8=
|
||||
golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343 h1:00ohfJ4K98s3m6BGUoBd8nyfp4Yl0GoIKvw5abItTjI=
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
|
@ -355,15 +312,11 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea h1:Mz1TMnfJDRJLk8S8OPCoJYgrsp/Se/2TBre2+vwX128=
|
||||
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -373,11 +326,8 @@ golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3
|
|||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -387,19 +337,12 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
|||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774 h1:CQVOmarCBFzTx0kbOU0ru54Cvot8SdSrNYjZPhQl+gk=
|
||||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191112232237-76a3b8da50ef h1:B4rfsoFW4SQN/q7LIj1fTSRufTbcqUKY+LhKuslJ6JU=
|
||||
golang.org/x/tools v0.0.0-20191112232237-76a3b8da50ef/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.0.0 h1:OyHbl+7IOECpPKfVK42oFr6N7+Y2dR+Jsb/IiDV3hOo=
|
||||
gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
|
||||
gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
|
@ -409,6 +352,7 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt
|
|||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
@ -436,45 +380,31 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo=
|
||||
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f h1:8FRUST8oUkEI45WYKyD8ed7Ad0Kg5v11zHyPkEVb2xo=
|
||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 h1:V6ndwCPoao1yZ52agqOKaUAl7DYWVGiXjV7ePA2i610=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
|
||||
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA=
|
||||
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||
k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 h1:CS1tBQz3HOXiseWZu6ZicKX361CZLT97UFnnPx0aqBw=
|
||||
k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4=
|
||||
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
|
||||
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 h1:mLmhKUm1X+pXu0zXMEzNsOF5E2kKFGe5o6BZBIIqA6A=
|
||||
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk=
|
||||
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ=
|
||||
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||
k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE=
|
||||
k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ=
|
||||
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y=
|
||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE=
|
||||
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
|
@ -482,18 +412,10 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
|||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
sigs.k8s.io/controller-runtime v0.2.0-beta.4 h1:S1XVfRWR1MuIXZdkYx3jN8JDw+bbQxmWZroy0i87z/A=
|
||||
sigs.k8s.io/controller-runtime v0.2.0-beta.4/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME=
|
||||
sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg=
|
||||
sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns=
|
||||
sigs.k8s.io/controller-tools v0.2.0-beta.4 h1:W+coTe+nkVNclQrikwlRp6GJKwgcrHzvIQZ9kCaak5A=
|
||||
sigs.k8s.io/controller-tools v0.2.0-beta.4/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y=
|
||||
sigs.k8s.io/controller-tools v0.2.4 h1:la1h46EzElvWefWLqfsXrnsO3lZjpkI0asTpX6h8PLA=
|
||||
sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
|
||||
sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs=
|
||||
sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
|
||||
sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM=
|
||||
sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
#-------------------------------------------------------------------------------------------------------------
|
||||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
|
||||
#-------------------------------------------------------------------------------------------------------------
|
||||
|
||||
FROM golang:1.12-stretch as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Copy go.mod etc and download dependencies (leverage docker layer caching)
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
ENV GO111MODULE=on
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code over
|
||||
COPY mockapi/ mockapi/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o mock-databricks-api ./mockapi/
|
||||
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/mock-databricks-api .
|
||||
USER nonroot:nonroot
|
||||
|
||||
ENTRYPOINT [ "/mock-databricks-api" ]
|
|
@ -0,0 +1,42 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/repository"
|
||||
)
|
||||
|
||||
//CreateCluster handles the cluster create endpoint
|
||||
func CreateCluster(j *repository.ClusterRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Not implemented", http.StatusNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
// ListClusters handles the cluster list endpoint
|
||||
func ListClusters(j *repository.ClusterRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Not implemented", http.StatusNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
// GetCluster handles the cluster get endpoint
|
||||
func GetCluster(j *repository.ClusterRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Not implemented", http.StatusNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
// EditCluster handles the cluster edit endpoint
|
||||
func EditCluster(j *repository.ClusterRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Not implemented", http.StatusNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteCluster handles the cluster delete endpoint
|
||||
func DeleteCluster(j *repository.ClusterRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Not implemented", http.StatusNotImplemented)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
//Index returns index page
|
||||
func Index(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: Index - starting\n", requestID)
|
||||
_, _ = w.Write([]byte("Databricks mock API is up"))
|
||||
log.Printf("RequestID:%6d: Index - completed\n", requestID)
|
||||
}
|
||||
|
||||
//NotFoundPage returns a not found page
|
||||
func NotFoundPage(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("*** Not Found: %s", r.URL)
|
||||
http.Error(w, "404 page not found", http.StatusNotFound)
|
||||
}
|
||||
|
||||
//MethodNotAllowed returns a method not allowed page
|
||||
func MethodNotAllowed(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("*** Method Not Allowed: %s - %s", r.Method, r.URL)
|
||||
http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/middleware"
|
||||
)
|
||||
|
||||
// Config is used to represent the mock api config
|
||||
type Config struct {
|
||||
LatencySlowRequestMax *int `json:"DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX"`
|
||||
LatencySlowRequestMin *int `json:"DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN"`
|
||||
LatencyFastRequestMax *int `json:"DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX"`
|
||||
LatencyFastRequestMin *int `json:"DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN"`
|
||||
RateLimit *int `json:"DATABRICKS_MOCK_API_RATE_LIMIT"`
|
||||
Error500Probability *int `json:"DATABRICKS_MOCK_API_ERROR_500_PROBABILITY"`
|
||||
ErrorSinkHoleProbability *int `json:"DATABRICKS_MOCK_API_ERROR_SINKHOLE_PROBABILITY"`
|
||||
ErrorXMLResponseProbability *int `json:"DATABRICKS_MOCK_API_ERROR_XML_RESPONSE_PROBABILITY"`
|
||||
}
|
||||
|
||||
// GetConfig gets the current config
|
||||
func GetConfig() func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
config := Config{
|
||||
LatencySlowRequestMax: getIntSetting(middleware.LatencySlowRequestMaxEnvName),
|
||||
LatencySlowRequestMin: getIntSetting(middleware.LatencySlowRequestMinEnvName),
|
||||
LatencyFastRequestMax: getIntSetting(middleware.LatencyFastRequestMaxEnvName),
|
||||
LatencyFastRequestMin: getIntSetting(middleware.LatencyFastRequestMinEnvName),
|
||||
RateLimit: getIntSetting(middleware.RateLimitEnvName),
|
||||
Error500Probability: getIntSetting(middleware.Error500ResponseEnvName),
|
||||
ErrorSinkHoleProbability: getIntSetting(middleware.ErrorSinkHoleResponseEnvName),
|
||||
ErrorXMLResponseProbability: getIntSetting(middleware.ErrorXMLResponseEnvName),
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
if err := json.NewEncoder(w).Encode(config); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfig updates the config
|
||||
func SetConfig() func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var config Config
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
|
||||
defer r.Body.Close() // nolint: errcheck
|
||||
if err != nil {
|
||||
log.Printf("Config: Error reading the body: %v", err)
|
||||
http.Error(w, "Error reading the body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &config); err != nil {
|
||||
log.Printf("Config: Error parsing the body: %v", err)
|
||||
http.Error(w, "Error parsing body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO - would be nice to validate the config values here and return an error response
|
||||
// but that logic is currently buried in the middleware itself
|
||||
if err := updateIntSetting(w, middleware.LatencySlowRequestMaxEnvName, config.LatencySlowRequestMax); err != nil {
|
||||
return
|
||||
}
|
||||
if err := updateIntSetting(w, middleware.LatencySlowRequestMinEnvName, config.LatencySlowRequestMin); err != nil {
|
||||
return
|
||||
}
|
||||
if err := updateIntSetting(w, middleware.LatencyFastRequestMaxEnvName, config.LatencyFastRequestMax); err != nil {
|
||||
return
|
||||
}
|
||||
if err := updateIntSetting(w, middleware.LatencyFastRequestMinEnvName, config.LatencyFastRequestMin); err != nil {
|
||||
return
|
||||
}
|
||||
if err := updateIntSetting(w, middleware.RateLimitEnvName, config.RateLimit); err != nil {
|
||||
return
|
||||
}
|
||||
if err := updateIntSetting(w, middleware.Error500ResponseEnvName, config.Error500Probability); err != nil {
|
||||
return
|
||||
}
|
||||
if err := updateIntSetting(w, middleware.ErrorSinkHoleResponseEnvName, config.ErrorSinkHoleProbability); err != nil {
|
||||
return
|
||||
}
|
||||
if err := updateIntSetting(w, middleware.ErrorXMLResponseEnvName, config.ErrorXMLResponseProbability); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PatchConfig allows updating a subset of the config
|
||||
func PatchConfig() func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
|
||||
defer r.Body.Close() // nolint: errcheck
|
||||
if err != nil {
|
||||
log.Printf("Config: Error reading the body: %v", err)
|
||||
http.Error(w, "Error reading the body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var configPatch map[string]*int
|
||||
if err := json.Unmarshal(body, &configPatch); err != nil {
|
||||
log.Printf("Config: Error parsing the body: %v", err)
|
||||
http.Error(w, "Error parsing body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
for name, value := range configPatch {
|
||||
switch name {
|
||||
case middleware.LatencyFastRequestMaxEnvName,
|
||||
middleware.LatencyFastRequestMinEnvName,
|
||||
middleware.LatencySlowRequestMaxEnvName,
|
||||
middleware.LatencySlowRequestMinEnvName,
|
||||
middleware.Error500ResponseEnvName,
|
||||
middleware.ErrorSinkHoleResponseEnvName,
|
||||
middleware.ErrorXMLResponseEnvName,
|
||||
middleware.RateLimitEnvName:
|
||||
if err := setIntSetting(name, value); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
default:
|
||||
log.Printf("Error: unexepected value '%s'\n", name)
|
||||
http.Error(w, fmt.Sprintf("Error: unexepected value '%s'\n", name), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getIntSetting(name string) *int {
|
||||
value := os.Getenv(name)
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
intValue, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &intValue
|
||||
}
|
||||
func setIntSetting(name string, value *int) error {
|
||||
if value == nil {
|
||||
err := os.Unsetenv(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err := os.Setenv(name, fmt.Sprintf("%d", *value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func updateIntSetting(w http.ResponseWriter, name string, value *int) error {
|
||||
if err := setIntSetting(name, value); err != nil {
|
||||
log.Printf("Config: Error updating %s: %v", name, err)
|
||||
http.Error(w, fmt.Sprintf("Error updating %s", name), http.StatusBadRequest)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/repository"
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
)
|
||||
|
||||
//CreateJob handles the job create endpoint
|
||||
func CreateJob(j *repository.JobRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: CreateJob - starting\n", requestID)
|
||||
|
||||
var job dbmodel.JobSettings
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
|
||||
defer r.Body.Close() // nolint: errcheck
|
||||
if err != nil {
|
||||
log.Printf("RequestID:%6d: CreateJob - Error reading the body: %v", requestID, err)
|
||||
http.Error(w, "Error reading the body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &job); err != nil {
|
||||
log.Printf("RequestID:%6d: CreateJob - Error parsing the body: %v", requestID, err)
|
||||
http.Error(w, "Error parsing body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
response := dbmodel.Job{
|
||||
JobID: j.CreateJob(job),
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
if err := json.NewEncoder(w).Encode(response); err != nil {
|
||||
log.Printf("RequestID:%6d: CreateJob(%d) - Error writing the response: %v", requestID, response.JobID, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Printf("RequestID:%6d: CreateJob(%d) - completed\n", requestID, response.JobID)
|
||||
}
|
||||
}
|
||||
|
||||
// ListJobs handles the job list endpoint
|
||||
func ListJobs(j *repository.JobRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: ListJobs - starting\n", requestID)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
if err := json.NewEncoder(w).Encode(j.GetJobs()); err != nil {
|
||||
log.Printf("RequestID:%6d: ListJobs - Error writing the response: %v", requestID, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Printf("RequestID:%6d: ListJobs - completed\n", requestID)
|
||||
}
|
||||
}
|
||||
|
||||
// GetJob handles the job get endpoint
|
||||
func GetJob(j *repository.JobRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: GetJob - starting\n", requestID)
|
||||
|
||||
vars := mux.Vars(r)
|
||||
jobID, err := strconv.ParseInt(vars["job_id"], 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("RequestID:%6d: GetJob - Invalid job_id: %v", requestID, err)
|
||||
http.Error(w, "Invalid job_id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
job := j.GetJob(jobID)
|
||||
if job.JobID <= 0 {
|
||||
log.Printf("RequestID:%6d: GetJob(%d) - Not found", requestID, jobID)
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
if err := json.NewEncoder(w).Encode(job); err != nil {
|
||||
log.Printf("RequestID:%6d: GetJob(%d) - Error writing the response: %v", requestID, jobID, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Printf("RequestID:%6d: GetJob(%d) - completed\n", requestID, jobID)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteJob handles the job delete endpoint
|
||||
func DeleteJob(j *repository.JobRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: DeleteJob - starting\n", requestID)
|
||||
|
||||
var request struct {
|
||||
JobID int64 `json:"job_id"`
|
||||
}
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
|
||||
defer r.Body.Close() // nolint: errcheck
|
||||
if err != nil {
|
||||
log.Printf("RequestID:%6d: DeleteJob - Error reading the body: %v", requestID, err)
|
||||
http.Error(w, "Error reading the body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &request); err != nil {
|
||||
log.Printf("RequestID:%6d: DeleteJob - Error parsing the body: %v", requestID, err)
|
||||
http.Error(w, "Error parsing body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := j.DeleteJob(request.JobID); err != nil {
|
||||
log.Printf("RequestID:%6d: DeleteJob(%d) - Not found", requestID, request.JobID)
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
// Set status here as we're not writing to the response
|
||||
w.WriteHeader(http.StatusOK)
|
||||
log.Printf("RequestID:%6d: DeleteJob(%d) - completed\n", requestID, request.JobID)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,197 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/model"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/repository"
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//SubmitRun handles the runs submit endpoint
|
||||
func SubmitRun(runRepo *repository.RunRepository, jobRepo *repository.JobRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: SubmitRun - starting\n", requestID)
|
||||
|
||||
var run model.JobsRunsSubmitRequest
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
|
||||
defer r.Body.Close() // nolint: errcheck
|
||||
if err != nil {
|
||||
log.Printf("RequestID:%6d: SubmitRun - Error reading the body: %v", requestID, err)
|
||||
http.Error(w, "Error reading the body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &run); err != nil {
|
||||
log.Printf("RequestID:%6d: SubmitRun - Error parsing the body: %v", requestID, err)
|
||||
http.Error(w, "Error parsing body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
job := dbmodel.JobSettings{
|
||||
NewCluster: &run.NewCluster,
|
||||
Libraries: run.Libraries,
|
||||
SparkJarTask: &run.SparkJarTask,
|
||||
}
|
||||
|
||||
response := dbmodel.Run{
|
||||
RunID: runRepo.CreateRun(run, jobRepo.CreateJob(job)),
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
if err := json.NewEncoder(w).Encode(response); err != nil {
|
||||
log.Printf("RequestID:%6d: SubmitRun(%d) - Error writing the response: %v", requestID, response.RunID, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Printf("RequestID:%6d: SubmitRun(%d) - completed\n", requestID, response.RunID)
|
||||
}
|
||||
}
|
||||
|
||||
// GetRun handles the runs get endpoint
|
||||
func GetRun(j *repository.RunRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: GetRun - starting\n", requestID)
|
||||
|
||||
vars := mux.Vars(r)
|
||||
runID, err := strconv.ParseInt(vars["run_id"], 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("RequestID:%6d: GetRun - Invalid run_id: %v", requestID, err)
|
||||
http.Error(w, "Invalid run_id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
run := j.GetRun(runID)
|
||||
if run.RunID <= 0 {
|
||||
log.Printf("RequestID:%6d: GetRun(%d) - Not found", requestID, runID)
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
if err := json.NewEncoder(w).Encode(run); err != nil {
|
||||
log.Printf("RequestID:%6d: GetRun(%d) - Error writing the response: %v", requestID, runID, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Printf("RequestID:%6d: GetRun(%d) - completed\n", requestID, runID)
|
||||
}
|
||||
}
|
||||
|
||||
// GetRunOutput handles the runs get output endpoint
|
||||
func GetRunOutput(j *repository.RunRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: GetRunOutput - starting\n", requestID)
|
||||
|
||||
vars := mux.Vars(r)
|
||||
runID, err := strconv.ParseInt(vars["run_id"], 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("RequestID:%6d: GetRunOutput - Invalid run_id: %v", requestID, err)
|
||||
http.Error(w, "Invalid run_id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
run := j.GetRunOutput(runID)
|
||||
if run.Metadata.RunID <= 0 {
|
||||
log.Printf("RequestID:%6d: GetRunOutput(%d) - Not found", requestID, runID)
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
if err := json.NewEncoder(w).Encode(run); err != nil {
|
||||
log.Printf("RequestID:%6d: GetRunOutput(%d) - Error writing the response: %v", requestID, runID, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Printf("RequestID:%6d: GetRunOutput(%d) - completed\n", requestID, runID)
|
||||
}
|
||||
}
|
||||
|
||||
// ListRuns handles the runs list endpoint
|
||||
func ListRuns(j *repository.RunRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: ListRuns - starting\n", requestID)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
if err := json.NewEncoder(w).Encode(j.GetRuns()); err != nil {
|
||||
log.Printf("RequestID:%6d: ListRuns - Error writing the response: %v", requestID, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Printf("RequestID:%6d: ListRuns - completed\n", requestID)
|
||||
}
|
||||
}
|
||||
|
||||
// CancelRun handles the runs cancel endpoint
|
||||
func CancelRun(j *repository.RunRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: CancelRun - starting\n", requestID)
|
||||
|
||||
var request struct {
|
||||
RunID int64 `json:"run_id"`
|
||||
}
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
|
||||
defer r.Body.Close() // nolint: errcheck
|
||||
if err != nil {
|
||||
log.Printf("RequestID:%6d: CancelRun - Error reading the body: %v", requestID, err)
|
||||
http.Error(w, "Error reading the body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &request); err != nil {
|
||||
log.Printf("RequestID:%6d: CancelRun - Error parsing the body: %v", requestID, err)
|
||||
http.Error(w, "Error parsing body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := j.CancelRun(request.RunID); err != nil {
|
||||
log.Printf("RequestID:%6d: CancelRun(%d) - Not found", requestID, request.RunID)
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
// Set status here as we're not writing to the response
|
||||
w.WriteHeader(http.StatusOK)
|
||||
log.Printf("RequestID:%6d: CancelRun(%d) - completed\n", requestID, request.RunID)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteRun handles the runs delete endpoint
|
||||
func DeleteRun(j *repository.RunRepository) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestID := getNewRequestID()
|
||||
log.Printf("RequestID:%6d: DeleteRun - starting\n", requestID)
|
||||
|
||||
var request struct {
|
||||
RunID int64 `json:"run_id"`
|
||||
}
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
|
||||
defer r.Body.Close() // nolint: errcheck
|
||||
if err != nil {
|
||||
log.Printf("RequestID:%6d: DeleteRun - Error reading the body: %v", requestID, err)
|
||||
http.Error(w, "Error reading the body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &request); err != nil {
|
||||
log.Printf("RequestID:%6d: DeleteRun - Error parsing the body: %v", requestID, err)
|
||||
http.Error(w, "Error parsing body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := j.DeleteRun(request.RunID); err != nil {
|
||||
log.Printf("RequestID:%6d: DeleteRun(%d) - Not found", requestID, request.RunID)
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
// Set status here as we're not writing to the response
|
||||
w.WriteHeader(http.StatusOK)
|
||||
log.Printf("RequestID:%6d: DeleteRun(%d) - completed\n", requestID, request.RunID)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var internalRequestCounter int64
|
||||
|
||||
func getNewRequestID() int64 {
|
||||
return atomic.AddInt64(&internalRequestCounter, 1)
|
||||
}
|
|
@ -0,0 +1,265 @@
|
|||
package integration_test
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/middleware"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/model"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/router"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func unsetLatencyEnvVars() {
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN")
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX")
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN")
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX")
|
||||
}
|
||||
|
||||
func TestAPI_Index(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Get(server.URL + "/")
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_JobList_WithFastLatency(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN", "1000")
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX", "2000")
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN", "0")
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX", "0")
|
||||
|
||||
// Act
|
||||
start := time.Now()
|
||||
response, err := server.Client().Get(server.URL + "/api/2.0/jobs/list")
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// Assert
|
||||
elapsedMilliseconds := int64(elapsed / time.Millisecond)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.GreaterOrEqual(t, elapsedMilliseconds, int64(1000))
|
||||
assert.LessOrEqual(t, elapsedMilliseconds, int64(3000))
|
||||
|
||||
// Cleanup
|
||||
unsetLatencyEnvVars()
|
||||
}
|
||||
|
||||
func TestAPI_JobList_WithSlowLatency(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN", "0")
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX", "0")
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN", "1000")
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX", "2000")
|
||||
|
||||
// Act
|
||||
start := time.Now()
|
||||
response, err := createJob(server)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// Assert
|
||||
elapsedMilliseconds := int64(elapsed / time.Millisecond)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.GreaterOrEqual(t, elapsedMilliseconds, int64(1000))
|
||||
assert.LessOrEqual(t, elapsedMilliseconds, int64(3000))
|
||||
|
||||
// Cleanup
|
||||
unsetLatencyEnvVars()
|
||||
}
|
||||
func TestAPI_JobList_WithRateLimit(t *testing.T) {
|
||||
// Arrange
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_RATE_LIMIT", "10")
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
start := time.Now()
|
||||
end := start.Add(time.Second)
|
||||
successCount := 0
|
||||
for time.Now().Before(end) {
|
||||
response, err := server.Client().Get(server.URL + "/api/2.0/jobs/list")
|
||||
if err != nil {
|
||||
t.Errorf("*** Get failed: %v\n", err)
|
||||
break
|
||||
}
|
||||
_, _ = ioutil.ReadAll(response.Body)
|
||||
_ = response.Body.Close()
|
||||
if response.StatusCode == 200 {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Assert
|
||||
t.Logf("Success Count %d\n", successCount)
|
||||
assert.Equal(t, 10, successCount)
|
||||
|
||||
// Cleanup
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_RATE_LIMIT")
|
||||
}
|
||||
|
||||
func TestAPI_JobList_Error500_20PercentRate(t *testing.T) {
|
||||
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_ERROR_500_PROBABILITY", "20")
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
error500Count := 0
|
||||
for i := 0; i < 10000; i++ {
|
||||
response, err := server.Client().Get(server.URL + "/api/2.0/jobs/list")
|
||||
if err != nil {
|
||||
t.Errorf("*** Get failed: %v\n", err)
|
||||
break
|
||||
}
|
||||
_, _ = ioutil.ReadAll(response.Body)
|
||||
_ = response.Body.Close()
|
||||
if response.StatusCode == 500 {
|
||||
error500Count++
|
||||
}
|
||||
}
|
||||
|
||||
// 10000 requests with a 20% probability configured for Error500 responses
|
||||
// Treat 1900-2100 inclusive as success
|
||||
assert.GreaterOrEqual(t, error500Count, 1900, "For 10000 executions with 20%% Error500 rate, expected the error rate to be 1900-2100. Got %d", error500Count)
|
||||
assert.LessOrEqualf(t, error500Count, 2100, "For 10000 executions with 20% Error500 rate, expected the error rate to be 1900-2100. Got %d", error500Count)
|
||||
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_ERROR_500_PROBABILITY")
|
||||
}
|
||||
|
||||
func TestAPI_JobList_Error500_100PercentRate(t *testing.T) {
|
||||
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_ERROR_500_PROBABILITY", "100")
|
||||
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
error500Count := 0
|
||||
for i := 0; i < 1000; i++ {
|
||||
response, err := server.Client().Get(server.URL + "/api/2.0/jobs/list")
|
||||
if err != nil {
|
||||
t.Errorf("*** Get failed: %v\n", err)
|
||||
break
|
||||
}
|
||||
_, _ = ioutil.ReadAll(response.Body)
|
||||
_ = response.Body.Close()
|
||||
if response.StatusCode == 500 {
|
||||
error500Count++
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, error500Count, 1000)
|
||||
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_ERROR_500_PROBABILITY")
|
||||
}
|
||||
|
||||
func TestAPI_JobList_WithLatencyAndError500_EnsureLatencyIsApplied(t *testing.T) {
|
||||
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN", "1000")
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX", "1000")
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_ERROR_500_PROBABILITY", "100")
|
||||
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
start := time.Now()
|
||||
response, err := server.Client().Get(server.URL + "/api/2.0/jobs/list")
|
||||
if err != nil {
|
||||
t.Errorf("*** Get failed: %v\n", err)
|
||||
} else {
|
||||
_, _ = ioutil.ReadAll(response.Body)
|
||||
_ = response.Body.Close()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
|
||||
// Assert
|
||||
elapsedMilliseconds := int64(elapsed / time.Millisecond)
|
||||
assert.GreaterOrEqual(t, elapsedMilliseconds, int64(1000))
|
||||
}
|
||||
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_ERROR_500_PROBABILITY")
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN")
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX")
|
||||
}
|
||||
|
||||
func TestAPI_JobList_WithSinkHole_EnsureClientRequestTimesOut(t *testing.T) {
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_ERROR_SINKHOLE_PROBABILITY", "100")
|
||||
middleware.SetErrorSinkHoleDuration(15 * time.Second)
|
||||
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
client := server.Client()
|
||||
|
||||
client.Timeout = 10 * time.Second
|
||||
_, err := client.Get(server.URL + "/api/2.0/jobs/list")
|
||||
|
||||
assert.NotNil(t, err)
|
||||
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_ERROR_SINKHOLE_PROBABILITY")
|
||||
}
|
||||
|
||||
func TestAPI_JobList_WithXMLResponse(t *testing.T) {
|
||||
_ = os.Setenv("DATABRICKS_MOCK_API_ERROR_XML_RESPONSE_PROBABILITY", "100")
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
client := server.Client()
|
||||
response, err := client.Get(server.URL + "/api/2.0/jobs/list")
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
var xmlResponse model.TestXMLResponse
|
||||
err = xml.Unmarshal(body, &xmlResponse)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "TestJob", xmlResponse.Name)
|
||||
|
||||
_ = os.Unsetenv("DATABRICKS_MOCK_API_ERROR_XML_RESPONSE_PROBABILITY")
|
||||
|
||||
}
|
||||
|
||||
func TestAPI_PageNotFound(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Get(server.URL + "/unknown")
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 404, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_MethodNotFound(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Get(server.URL + jobAPI + "create")
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 405, response.StatusCode)
|
||||
}
|
|
@ -0,0 +1,193 @@
|
|||
package integration_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/model"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/router"
|
||||
"github.com/stretchr/testify/assert"
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
)
|
||||
|
||||
const jobFileLocation = "test_data/job/job_create.json"
|
||||
const jobAPI = "/api/2.0/jobs/"
|
||||
|
||||
func createJob(server *httptest.Server) (*http.Response, error) {
|
||||
jsonFile, _ := os.Open(jobFileLocation)
|
||||
return server.Client().Post(
|
||||
server.URL+jobAPI+"create",
|
||||
"application/json",
|
||||
jsonFile)
|
||||
}
|
||||
|
||||
func TestAPI_JobsCreate(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := createJob(server)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var jobResponse dbmodel.Job
|
||||
err = json.Unmarshal(body, &jobResponse)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), jobResponse.JobID)
|
||||
}
|
||||
|
||||
func TestAPI_JobsCreate_ConcurrentRequest(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
const numberToAdd = 50
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numberToAdd)
|
||||
|
||||
// Act
|
||||
for index := 0; index < numberToAdd; index++ {
|
||||
go func() {
|
||||
_, err := createJob(server)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Assert
|
||||
response, err := server.Client().Get(server.URL + jobAPI + "list")
|
||||
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
var listResponse model.JobsListResponse
|
||||
_ = json.Unmarshal(body, &listResponse)
|
||||
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, numberToAdd, len(listResponse.Jobs))
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_JobsGet(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
_, _ = createJob(server)
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Get(server.URL + jobAPI + "get?job_id=1")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var jobCreateResponse dbmodel.Job
|
||||
err = json.Unmarshal(body, &jobCreateResponse)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), jobCreateResponse.JobID)
|
||||
assert.Equal(t, "Nightly model training", jobCreateResponse.Settings.Name)
|
||||
|
||||
}
|
||||
|
||||
func TestAPI_JobsListTwoJobs(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
_, _ = createJob(server)
|
||||
_, _ = createJob(server)
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Get(server.URL + jobAPI + "list")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var listResponse model.JobsListResponse
|
||||
err = json.Unmarshal(body, &listResponse)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(listResponse.Jobs))
|
||||
}
|
||||
|
||||
func TestAPI_JobsList_EmptyList(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Get(server.URL + jobAPI + "list")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var listResponse model.JobsListResponse
|
||||
err = json.Unmarshal(body, &listResponse)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(listResponse.Jobs))
|
||||
}
|
||||
|
||||
func TestAPI_JobsDelete(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
response, err := createJob(server)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
|
||||
// Act
|
||||
response, err = server.Client().Post(
|
||||
server.URL+jobAPI+"delete",
|
||||
"application/json",
|
||||
bytes.NewBufferString("{\"job_id\":1}"))
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_DeleteJob_WithInvalidJobID(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Post(
|
||||
server.URL+jobAPI+"delete",
|
||||
"application/json",
|
||||
bytes.NewBufferString("{\"job_id\":1}"))
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 404, response.StatusCode)
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package integration_test
|
||||
|
||||
import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/router"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAPI_Metrics(t *testing.T) {
|
||||
//Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
//Act
|
||||
response, err := server.Client().Get(server.URL + "/metrics")
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
}
|
|
@ -0,0 +1,294 @@
|
|||
package integration_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/router"
|
||||
"github.com/stretchr/testify/assert"
|
||||
azure "github.com/xinsnake/databricks-sdk-golang/azure"
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
)
|
||||
|
||||
const runSubmitFileLocation = "test_data/run/run_submit.json"
|
||||
const runAPI = "/api/2.0/jobs/runs/"
|
||||
|
||||
func submitRun(server *httptest.Server) (*http.Response, error) {
|
||||
jsonFile, _ := os.Open(runSubmitFileLocation)
|
||||
return server.Client().Post(
|
||||
server.URL+runAPI+"submit",
|
||||
"application/json",
|
||||
jsonFile)
|
||||
}
|
||||
|
||||
func TestAPI_RunsSubmit(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := submitRun(server)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var runResponse dbmodel.Run
|
||||
err = json.Unmarshal(body, &runResponse)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), runResponse.RunID)
|
||||
}
|
||||
|
||||
func TestAPI_RunsSubmit_JobIsCreated(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
_, _ = submitRun(server)
|
||||
|
||||
// Assert
|
||||
response, err := server.Client().Get(server.URL + jobAPI + "get?job_id=1")
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var job dbmodel.Job
|
||||
err = json.Unmarshal(body, &job)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, int64(1), job.JobID)
|
||||
assert.Equal(t, "5.3.x-scala2.11", job.Settings.NewCluster.SparkVersion)
|
||||
assert.Equal(t, "com.databricks.ComputeModels", job.Settings.SparkJarTask.MainClassName)
|
||||
assert.Equal(t, "dbfs:/my-jar.jar", job.Settings.Libraries[0].Jar)
|
||||
|
||||
}
|
||||
|
||||
func TestAPI_RunsSubmit_Concurrently(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
const numberToAdd = 50
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numberToAdd)
|
||||
|
||||
// Act
|
||||
for index := 0; index < numberToAdd; index++ {
|
||||
go func() {
|
||||
_, err := submitRun(server)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Assert
|
||||
response, err := server.Client().Get(server.URL + runAPI + "list")
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
var runsListResponse azure.JobsRunsListResponse
|
||||
_ = json.Unmarshal(body, &runsListResponse)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, numberToAdd, len(runsListResponse.Runs))
|
||||
}
|
||||
|
||||
func TestAPI_RunsGet(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
_, _ = submitRun(server)
|
||||
|
||||
// Act
|
||||
response, _ := server.Client().Get(server.URL + runAPI + "get?run_id=1")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var runResponse dbmodel.Run
|
||||
err = json.Unmarshal(body, &runResponse)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), runResponse.RunID)
|
||||
}
|
||||
|
||||
func TestAPI_RunsGetWithNoRuns(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, _ := server.Client().Get(server.URL + runAPI + "get?run_id=1")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 404, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_RunsGetOutput(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
_, _ = submitRun(server)
|
||||
|
||||
// Act
|
||||
response, _ := server.Client().Get(server.URL + runAPI + "get-output?run_id=1")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var runResponse azure.JobsRunsGetOutputResponse
|
||||
err = json.Unmarshal(body, &runResponse)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), runResponse.Metadata.RunID)
|
||||
}
|
||||
|
||||
func TestAPI_RunsGetOutputWithNoRuns(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, _ := server.Client().Get(server.URL + runAPI + "get-output?run_id=1")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 404, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_RunsListWithTwoRuns(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
_, _ = submitRun(server)
|
||||
_, _ = submitRun(server)
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Get(server.URL + runAPI + "list")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var runsListResponse azure.JobsRunsListResponse
|
||||
err = json.Unmarshal(body, &runsListResponse)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(runsListResponse.Runs))
|
||||
assert.Equal(t, false, runsListResponse.HasMore)
|
||||
}
|
||||
|
||||
func TestAPI_RunsListWithEmptyList(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Get(server.URL + runAPI + "list")
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var runsListResponse azure.JobsRunsListResponse
|
||||
err = json.Unmarshal(body, &runsListResponse)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(runsListResponse.Runs))
|
||||
}
|
||||
|
||||
func TestAPI_RunsCancel(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
response, err := submitRun(server)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
|
||||
// Act
|
||||
response, err = server.Client().Post(
|
||||
server.URL+runAPI+"cancel",
|
||||
"application/json",
|
||||
bytes.NewBufferString("{\"run_id\":1}"))
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_RunsCancel_WithInvalidJobID(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Post(
|
||||
server.URL+runAPI+"cancel",
|
||||
"application/json",
|
||||
bytes.NewBufferString("{\"run_id\":1}"))
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 404, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_RunsDelete(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
response, err := submitRun(server)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
|
||||
// Act
|
||||
response, err = server.Client().Post(
|
||||
server.URL+runAPI+"delete",
|
||||
"application/json",
|
||||
bytes.NewBufferString("{\"run_id\":1}"))
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 200, response.StatusCode)
|
||||
}
|
||||
|
||||
func TestAPI_RunsDelete_WithInvalidJobID(t *testing.T) {
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
response, err := server.Client().Post(
|
||||
server.URL+runAPI+"delete",
|
||||
"application/json",
|
||||
bytes.NewBufferString("{\"run_id\":1}"))
|
||||
|
||||
// Assert
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 404, response.StatusCode)
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
package integration_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/router"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const printMemEveryXItems = 50000
|
||||
|
||||
/*
|
||||
*
|
||||
* WARNING - THESE HAVE TO BE RUN INDEPENDENTLY SO MEMORY USAGE OF ONE BENCHMARK DOESN'T EFFECT THE OTHER. Use `make bench` to run correctly.
|
||||
*
|
||||
*/
|
||||
|
||||
func BenchmarkRunsMemoryUsage_Cleanup(b *testing.B) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 100
|
||||
|
||||
memLimitMb := 2000
|
||||
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
index := 0
|
||||
for {
|
||||
index++
|
||||
|
||||
submitAndCheckRun(server, b)
|
||||
|
||||
// delete run - takes advantage of fact that we call synchronously and ids are incremented per call so match the `index` value
|
||||
response, err := server.Client().Post(
|
||||
server.URL+runAPI+"delete",
|
||||
"application/json",
|
||||
bytes.NewBufferString(fmt.Sprintf("{\"run_id\":%v}", index)))
|
||||
|
||||
if err != nil {
|
||||
b.Error(err.Error())
|
||||
assert.Equal(b, nil, err)
|
||||
b.FailNow()
|
||||
}
|
||||
|
||||
// devnull the response
|
||||
io.Copy(ioutil.Discard, response.Body) //nolint
|
||||
response.Body.Close() //nolint
|
||||
|
||||
assert.Equal(b, 200, response.StatusCode)
|
||||
|
||||
// Only check the memory every x items
|
||||
if shouldCheckMemory(index) {
|
||||
shouldExit := checkAndPrintMemoryUsage(index, b, memLimitMb)
|
||||
if shouldExit {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkRunsMemoryUsage_NeverDelete(b *testing.B) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 100
|
||||
|
||||
memLimitMb := 2000
|
||||
|
||||
// Arrange
|
||||
server := httptest.NewServer(router.NewRouter())
|
||||
defer server.Close()
|
||||
|
||||
// Act
|
||||
index := 0
|
||||
for {
|
||||
index++
|
||||
|
||||
submitAndCheckRun(server, b)
|
||||
|
||||
// Only check the memory every x items
|
||||
if shouldCheckMemory(index) {
|
||||
shouldExit := checkAndPrintMemoryUsage(index, b, memLimitMb)
|
||||
if shouldExit {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func shouldCheckMemory(index int) bool {
|
||||
return float64(index)/printMemEveryXItems == math.Trunc(float64(index)/printMemEveryXItems)
|
||||
}
|
||||
|
||||
func checkAndPrintMemoryUsage(index int, b *testing.B, memLimitMb int) bool {
|
||||
mbUsed := PrintMemUsage()
|
||||
fmt.Printf("-- InProgress - Used %vmb of memory for %v items \n\n", mbUsed, index)
|
||||
|
||||
// If we've used more than 2GB stop the test
|
||||
if int(mbUsed) > memLimitMb {
|
||||
fmt.Printf("\n\n Test completed - Used >%vmb of memory. Used %vmb of memory for %vk items \n %vmb per item\n\n", memLimitMb, mbUsed, index/1000, float64(mbUsed)/float64(index))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func submitAndCheckRun(server *httptest.Server, b *testing.B) {
|
||||
response, err := submitRun(server)
|
||||
if err != nil {
|
||||
b.Error(err.Error())
|
||||
assert.Equal(b, nil, err)
|
||||
b.FailNow()
|
||||
}
|
||||
|
||||
// devnull the response
|
||||
io.Copy(ioutil.Discard, response.Body) //nolint
|
||||
response.Body.Close() //nolint
|
||||
|
||||
assert.Equal(b, 200, response.StatusCode)
|
||||
}
|
||||
|
||||
// PrintMemUsage outputs the current, total and OS memory being used. As well as the number
|
||||
// of garbage collection cycles completed.
|
||||
func PrintMemUsage() (mbUsed uint64) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
// For info on each, see: https://golang.org/pkg/runtime/#MemStats
|
||||
fmt.Printf("Alloc = %v MiB", bToMb(m.Alloc))
|
||||
fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc))
|
||||
fmt.Printf("\tSys = %v MiB", bToMb(m.Sys))
|
||||
fmt.Printf("\tNumGC = %v\n", m.NumGC)
|
||||
|
||||
return bToMb(m.Sys)
|
||||
}
|
||||
|
||||
func bToMb(b uint64) uint64 {
|
||||
return b / 1024 / 1024
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"name": "Nightly model training",
|
||||
"new_cluster": {
|
||||
"spark_version": "5.3.x-scala2.11",
|
||||
"node_type_id": "r3.xlarge",
|
||||
"aws_attributes": {
|
||||
"availability": "ON_DEMAND"
|
||||
},
|
||||
"num_workers": 10
|
||||
},
|
||||
"libraries": [
|
||||
{
|
||||
"jar": "dbfs:/my-jar.jar"
|
||||
},
|
||||
{
|
||||
"maven": {
|
||||
"coordinates": "org.jsoup:jsoup:1.7.2"
|
||||
}
|
||||
}
|
||||
],
|
||||
"email_notifications": {
|
||||
"on_start": [],
|
||||
"on_success": [],
|
||||
"on_failure": []
|
||||
},
|
||||
"timeout_seconds": 3600,
|
||||
"max_retries": 1,
|
||||
"schedule": {
|
||||
"quartz_cron_expression": "0 15 22 ? * *",
|
||||
"timezone_id": "America/Los_Angeles"
|
||||
},
|
||||
"spark_jar_task": {
|
||||
"main_class_name": "com.databricks.ComputeModels"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"run_name": "my spark task",
|
||||
"new_cluster": {
|
||||
"spark_version": "5.3.x-scala2.11",
|
||||
"node_type_id": "r3.xlarge",
|
||||
"aws_attributes": {
|
||||
"availability": "ON_DEMAND"
|
||||
},
|
||||
"num_workers": 10
|
||||
},
|
||||
"libraries": [
|
||||
{
|
||||
"jar": "dbfs:/my-jar.jar"
|
||||
},
|
||||
{
|
||||
"maven": {
|
||||
"coordinates": "org.jsoup:jsoup:1.7.2"
|
||||
}
|
||||
}
|
||||
],
|
||||
"spark_jar_task": {
|
||||
"main_class_name": "com.databricks.ComputeModels"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/router"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
router := router.NewRouter()
|
||||
|
||||
port := ":8085"
|
||||
fmt.Printf("API running on http://localhost%s\n", port)
|
||||
|
||||
log.Fatal(http.ListenAndServe(port, router))
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: databricks-mock-api
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: databricks-mock-api
|
||||
namespace: databricks-mock-api
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: databricks-mock-api
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: databricks-mock-api
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: control-plane
|
||||
operator: In
|
||||
values:
|
||||
- controller-manager
|
||||
topologyKey: kubernetes.io/hostname
|
||||
namespaces:
|
||||
- azure-databricks-operator-system
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- locust-loadtest
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: databricks-mock-api
|
||||
image: mockapi:latest
|
||||
env:
|
||||
- name: DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN
|
||||
value: '200'
|
||||
- name: DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX
|
||||
value: '1000'
|
||||
- name: DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN
|
||||
value: '50'
|
||||
- name: DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX
|
||||
value: '200'
|
||||
- name: DATABRICKS_MOCK_API_RATE_LIMIT
|
||||
value: '200'
|
||||
- name: DATABRICKS_MOCK_API_ERROR_500_PROBABILITY
|
||||
value: '0'
|
||||
- name: DATABRICKS_MOCK_API_ERROR_XML_RESPONSE_PROBABILITY
|
||||
value: '0'
|
||||
- name: DATABRICKS_MOCK_API_ERROR_SINKHOLE_PROBABILITY
|
||||
value: '0'
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: '4G'
|
||||
cpu: '1600m' # If running on D2v2 instance can't use 2cpu as some is reserved for system pods so use slightly less than 2
|
||||
ports:
|
||||
- containerPort: 8085
|
||||
name: api
|
|
@ -0,0 +1,32 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: databricks-mock-api
|
||||
namespace: databricks-mock-api
|
||||
labels:
|
||||
app: databricks-mock-api
|
||||
spec:
|
||||
selector:
|
||||
app: databricks-mock-api
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: api
|
||||
name: api
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: databricks-mockapi-servicemonitor
|
||||
namespace: databricks-mock-api
|
||||
labels:
|
||||
app: databricks-mock-api
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: databricks-mock-api
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- databricks-mock-api
|
||||
endpoints:
|
||||
- port: api
|
||||
path: /metrics
|
|
@ -0,0 +1,239 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/model"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// LatencySlowRequestMaxEnvName is the name of the env var for the max latency setting of a slow request Eg: Post request
|
||||
const LatencySlowRequestMaxEnvName = "DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX"
|
||||
|
||||
// LatencySlowRequestMinEnvName is the name of the env var for the min latency setting of a slow request Eg: Post request
|
||||
const LatencySlowRequestMinEnvName = "DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN"
|
||||
|
||||
// LatencyFastRequestMaxEnvName is the name of the env var for the max latency setting of a fast request Eg: Get request
|
||||
const LatencyFastRequestMaxEnvName = "DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX"
|
||||
|
||||
// LatencyFastRequestMinEnvName is the name of the env var for the min latency setting of a fast request Eg: Get request
|
||||
const LatencyFastRequestMinEnvName = "DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN"
|
||||
|
||||
// RateLimitEnvName is the name of the env var for the rate limit setting
|
||||
const RateLimitEnvName = "DATABRICKS_MOCK_API_RATE_LIMIT"
|
||||
const rateLimitDefault = rate.Inf
|
||||
const rateBurstAmount = 10 // Burst of 10 seems to work from testing with a client
|
||||
|
||||
// Error500ResponseEnvName is the name of the env var for specifying the probability of returning a 500 response.
|
||||
// E.g. 20 => 20% chance of returning a 500 response
|
||||
const Error500ResponseEnvName = "DATABRICKS_MOCK_API_ERROR_500_PROBABILITY"
|
||||
|
||||
// ErrorSinkHoleResponseEnvName is name of the env var for specifying the probability of a sink-hole response.
|
||||
// A sink-hole response is a request that writes nothing as part of the response for an extended period (5 minutes)
|
||||
// and holds open the connection during that time.
|
||||
// E.g. 20 => 20% chance of returning a sink-hole response
|
||||
const ErrorSinkHoleResponseEnvName = "DATABRICKS_MOCK_API_ERROR_SINKHOLE_PROBABILITY"
|
||||
const errorProbabilityDefault = 0
|
||||
const errorSinkHoleDurationDefault = 10 * time.Minute
|
||||
|
||||
//ErrorXMLResponseEnvName is the name of the env var for specifying status code of 200 but with XML body
|
||||
const ErrorXMLResponseEnvName = "DATABRICKS_MOCK_API_ERROR_XML_RESPONSE_PROBABILITY"
|
||||
|
||||
func init() {
|
||||
// From the docs, Seed should not be called concurrently with any other Rand method
|
||||
// so call Seed here to randomize on initialization rather than per-request
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// Add applies a set of middleware to a http.Handler.
|
||||
// Middleware is applied so that it executes in the order specified.
|
||||
// E.g. middleware.Add(h, mw1, mw2) is equivalent to mw2(mw1(h))
|
||||
func Add(h http.Handler, middleware ...func(http.Handler) http.Handler) http.Handler {
|
||||
// Reverse index to execute middleware in provided order
|
||||
for i := len(middleware) - 1; i >= 0; i-- {
|
||||
h = middleware[i](h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// AddLatency applies random latency to the request
|
||||
func AddLatency(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
// Fast Request Sleep
|
||||
fastRequestMin, fastRequestMax, err := getLatencyValues(LatencyFastRequestMinEnvName, LatencyFastRequestMaxEnvName)
|
||||
if err == nil {
|
||||
log.Printf("Adding latency for GET requests between %v-%v ms", fastRequestMin, fastRequestMax)
|
||||
fastRequestSleepDuration := fastRequestMin + rand.Intn(fastRequestMax-fastRequestMin+1)
|
||||
time.Sleep(time.Duration(fastRequestSleepDuration) * time.Millisecond)
|
||||
}
|
||||
default:
|
||||
// Slow Request Sleep
|
||||
slowRequestMin, slowRequestMax, err := getLatencyValues(LatencySlowRequestMinEnvName, LatencySlowRequestMaxEnvName)
|
||||
if err == nil {
|
||||
log.Printf("Adding latency for default POST/PUT/DELETE requests between %v-%v ms", slowRequestMin, slowRequestMax)
|
||||
slowRequestSleepDuration := slowRequestMin + rand.Intn(slowRequestMax-slowRequestMin+1)
|
||||
time.Sleep(time.Duration(slowRequestSleepDuration) * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func getLatencyValues(minName string, maxName string) (minValue int, maxValue int, errorValue error) {
|
||||
requestMin, minErr := strconv.Atoi(os.Getenv(minName))
|
||||
requestMax, maxErr := strconv.Atoi(os.Getenv(maxName))
|
||||
|
||||
// test minErr, maxErr and log + return error if either not nil
|
||||
if minErr != nil || maxErr != nil {
|
||||
log.Printf("Env variables are not set: %s, %s", minName, maxName)
|
||||
// errLatencyNotSet Env variable set
|
||||
errLatencyNotSet := errors.New("Env variables are not set ")
|
||||
return 0, 0, errLatencyNotSet
|
||||
}
|
||||
return requestMin, requestMax, nil
|
||||
|
||||
}
|
||||
|
||||
var limiter = rate.NewLimiter(rateLimitDefault, rateBurstAmount)
|
||||
|
||||
// RateLimit applies rate-limiting to the requests
|
||||
func RateLimit(handler http.Handler) http.Handler {
|
||||
|
||||
setRateLimit := func() {
|
||||
configValue, rateErr := strconv.Atoi(os.Getenv(RateLimitEnvName))
|
||||
var configRate rate.Limit
|
||||
if rateErr == nil {
|
||||
configRate = rate.Limit(configValue)
|
||||
} else {
|
||||
configRate = rateLimitDefault
|
||||
}
|
||||
if configRate != limiter.Limit() {
|
||||
log.Printf("%s has changed - updating limit to %.0f\n", RateLimitEnvName, configRate)
|
||||
limiter.SetLimit(rate.Limit(configRate))
|
||||
}
|
||||
}
|
||||
setRateLimit()
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
setRateLimit()
|
||||
|
||||
if limiter.Allow() {
|
||||
handler.ServeHTTP(w, r)
|
||||
} else {
|
||||
log.Printf("429 Response: %s\n", r.RequestURI)
|
||||
http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var error500Probabilty = -1
|
||||
var errorSinkHoleProbability = -1
|
||||
var errorXMLResponseProbability = -1
|
||||
var errorSinkHoleDuration = errorSinkHoleDurationDefault
|
||||
|
||||
// GetErrorSinkHoleDuration returns the duration used for sink-hole requests
|
||||
func GetErrorSinkHoleDuration() time.Duration {
|
||||
return errorSinkHoleDuration
|
||||
}
|
||||
|
||||
// SetErrorSinkHoleDuration sets the duration used for sink-hole requests
|
||||
func SetErrorSinkHoleDuration(duration time.Duration) {
|
||||
errorSinkHoleDuration = duration
|
||||
}
|
||||
|
||||
// ErrorResponse injects error behavior based on config
|
||||
func ErrorResponse(handler http.Handler) http.Handler {
|
||||
getConfigValue := func(name string) int {
|
||||
configValue, err := strconv.Atoi(os.Getenv(name))
|
||||
if err != nil {
|
||||
configValue = errorProbabilityDefault
|
||||
}
|
||||
if configValue < 0 || configValue > 100 {
|
||||
log.Printf("Invalid value for %s (%d). Using default of %d\n", name, configValue, errorProbabilityDefault)
|
||||
configValue = errorProbabilityDefault
|
||||
}
|
||||
return configValue
|
||||
}
|
||||
setErrorLimit := func() {
|
||||
error500ConfigValue := getConfigValue(Error500ResponseEnvName)
|
||||
errorSinkHoleConfigValue := getConfigValue(ErrorSinkHoleResponseEnvName)
|
||||
errorXMLResponseConfigValue := getConfigValue(ErrorXMLResponseEnvName)
|
||||
if error500ConfigValue+errorSinkHoleConfigValue+errorXMLResponseConfigValue > 100 {
|
||||
log.Printf("Invalid value for error rates - must add up to 100 or less. Using defaults of %d\n", errorProbabilityDefault)
|
||||
error500ConfigValue = errorProbabilityDefault
|
||||
errorSinkHoleConfigValue = errorProbabilityDefault
|
||||
errorXMLResponseConfigValue = errorProbabilityDefault
|
||||
}
|
||||
if error500ConfigValue != error500Probabilty {
|
||||
log.Printf("%s has changed - setting to %d\n", Error500ResponseEnvName, error500ConfigValue)
|
||||
error500Probabilty = error500ConfigValue
|
||||
}
|
||||
if errorSinkHoleConfigValue != errorSinkHoleProbability {
|
||||
log.Printf("%s has changed - setting to %d\n", ErrorSinkHoleResponseEnvName, errorSinkHoleConfigValue)
|
||||
errorSinkHoleProbability = errorSinkHoleConfigValue
|
||||
}
|
||||
if errorXMLResponseConfigValue != errorXMLResponseProbability {
|
||||
log.Printf("%s has changed - setting to %d\n", ErrorXMLResponseEnvName, errorXMLResponseConfigValue)
|
||||
errorXMLResponseProbability = errorXMLResponseConfigValue
|
||||
}
|
||||
}
|
||||
setErrorLimit()
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
setErrorLimit()
|
||||
randVal := rand.Intn(100)
|
||||
if randVal < error500Probabilty {
|
||||
sendStatus500Response(w, r)
|
||||
return
|
||||
}
|
||||
if randVal < (error500Probabilty + errorSinkHoleProbability) {
|
||||
handleSinkholeRequest(handler, w, r)
|
||||
return
|
||||
}
|
||||
if randVal < (error500Probabilty + errorSinkHoleProbability + errorXMLResponseProbability) {
|
||||
handleXMLResponseRequest(handler, w, r)
|
||||
return
|
||||
}
|
||||
// over the error threshold value so pass to inner handler
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func sendStatus500Response(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
func handleSinkholeRequest(handler http.Handler, w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(errorSinkHoleDuration)
|
||||
handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func handleXMLResponseRequest(handler http.Handler, w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
testdata := model.TestXMLResponse{Name: "TestJob"}
|
||||
x, err := xml.MarshalIndent(testdata, "", " ")
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Error marshaling xml data")
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/xml")
|
||||
_, errWrite := w.Write(x)
|
||||
|
||||
if errWrite != nil {
|
||||
log.Printf("Error writing XML response")
|
||||
return
|
||||
}
|
||||
handler.ServeHTTP(w, r)
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
package model
|
||||
|
||||
import (
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
)
|
||||
|
||||
// JobsListResponse represents Databricks jobs/list response object
|
||||
type JobsListResponse struct {
|
||||
Jobs []dbmodel.Job `json:"jobs"`
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package model
|
||||
|
||||
import (
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
)
|
||||
|
||||
//JobsRunsSubmitRequest represents DataBricks run submit request
|
||||
type JobsRunsSubmitRequest struct {
|
||||
RunName string `json:"run_name"`
|
||||
NewCluster dbmodel.NewCluster `json:"new_cluster"`
|
||||
Libraries []dbmodel.Library `json:"libraries"`
|
||||
SparkJarTask dbmodel.SparkJarTask `json:"spark_jar_task"`
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package model
|
||||
|
||||
//TestXMLResponse represents TestXML Response
|
||||
type TestXMLResponse struct {
|
||||
Name string `xml:"Test Job"`
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClusterRepository is a store for Cluster instances
|
||||
type ClusterRepository struct {
|
||||
clusters map[string]dbmodel.ClusterInfo
|
||||
writeLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewClusterRepository creates a new ClusterRepository
|
||||
func NewClusterRepository() *ClusterRepository {
|
||||
return &ClusterRepository{
|
||||
clusters: map[string]dbmodel.ClusterInfo{},
|
||||
}
|
||||
}
|
||||
|
||||
// GetCluster returns the Cluster with the specified ID or an empty Cluster
|
||||
func (r *ClusterRepository) GetCluster(id string) dbmodel.ClusterInfo {
|
||||
if cluster, ok := r.clusters[id]; ok {
|
||||
return cluster
|
||||
}
|
||||
|
||||
return dbmodel.ClusterInfo{}
|
||||
}
|
||||
|
||||
// GetClusters returns all Clusters
|
||||
func (r *ClusterRepository) GetClusters() []dbmodel.ClusterInfo {
|
||||
arr := []dbmodel.ClusterInfo{}
|
||||
for _, cluster := range r.clusters {
|
||||
arr = append(arr, cluster)
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
// CreateCluster adds an ID to the specified cluster and adds it to the collection
|
||||
func (r *ClusterRepository) CreateCluster(cluster dbmodel.ClusterInfo) string {
|
||||
newID := uuid.New().String()
|
||||
cluster.ClusterID = newID
|
||||
|
||||
r.writeLock.Lock()
|
||||
defer r.writeLock.Unlock()
|
||||
|
||||
r.clusters[newID] = cluster
|
||||
return cluster.ClusterID
|
||||
}
|
||||
|
||||
// DeleteCluster deletes the cluster with the specified ID
|
||||
func (r *ClusterRepository) DeleteCluster(id string) error {
|
||||
if _, ok := r.clusters[id]; ok {
|
||||
delete(r.clusters, id)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Could not find Job with id of %s to delete", id)
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/model"
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// JobRepository is a store for Job instances
|
||||
type JobRepository struct {
|
||||
currentID int64
|
||||
jobs map[int64]dbmodel.Job
|
||||
writeLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewJobRepository creates a new JobRepository
|
||||
func NewJobRepository() *JobRepository {
|
||||
return &JobRepository{
|
||||
jobs: map[int64]dbmodel.Job{},
|
||||
}
|
||||
}
|
||||
|
||||
// GetJob returns the Job with the specified ID or an empty Job
|
||||
func (r *JobRepository) GetJob(id int64) dbmodel.Job {
|
||||
if job, ok := r.jobs[id]; ok {
|
||||
return job
|
||||
}
|
||||
|
||||
return dbmodel.Job{}
|
||||
}
|
||||
|
||||
// GetJobs returns all Jobs
|
||||
func (r *JobRepository) GetJobs() model.JobsListResponse {
|
||||
arr := []dbmodel.Job{}
|
||||
for _, job := range r.jobs {
|
||||
arr = append(arr, job)
|
||||
}
|
||||
|
||||
result := model.JobsListResponse{
|
||||
Jobs: arr,
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// CreateJob adds an ID to the specified job and adds it to the collection
|
||||
func (r *JobRepository) CreateJob(jobCreateRequest dbmodel.JobSettings) int64 {
|
||||
newID := atomic.AddInt64(&r.currentID, 1)
|
||||
|
||||
job := dbmodel.Job{
|
||||
JobID: newID,
|
||||
Settings: &jobCreateRequest,
|
||||
CreatedTime: makeTimestamp(),
|
||||
}
|
||||
|
||||
job.JobID = newID
|
||||
|
||||
r.writeLock.Lock()
|
||||
defer r.writeLock.Unlock()
|
||||
|
||||
r.jobs[newID] = job
|
||||
return job.JobID
|
||||
}
|
||||
|
||||
// DeleteJob deletes the job with the specified ID
|
||||
func (r *JobRepository) DeleteJob(id int64) error {
|
||||
if _, ok := r.jobs[id]; ok {
|
||||
delete(r.jobs, id)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Could not find Job with id of %d to delete", id)
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/model"
|
||||
azure "github.com/xinsnake/databricks-sdk-golang/azure"
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// RunRepository is a store for Run instances
|
||||
type RunRepository struct {
|
||||
runID int64
|
||||
runs map[int64]dbmodel.Run
|
||||
writeLock sync.Mutex
|
||||
timePerRunLifeState int64
|
||||
}
|
||||
|
||||
// NewRunRepository creates a new RunRepository
|
||||
func NewRunRepository(timePerRunLifeState int64) *RunRepository {
|
||||
return &RunRepository{
|
||||
runs: map[int64]dbmodel.Run{},
|
||||
timePerRunLifeState: timePerRunLifeState,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateRun adds an ID to the specified run and adds it to the collection
|
||||
func (r *RunRepository) CreateRun(runReq model.JobsRunsSubmitRequest, jobID int64) int64 {
|
||||
|
||||
newID := atomic.AddInt64(&r.runID, 1)
|
||||
lifeCycleState := dbmodel.RunLifeCycleState(dbmodel.RunLifeCycleStatePending)
|
||||
trigger := dbmodel.TriggerType(dbmodel.TriggerTypePeriodic)
|
||||
run := dbmodel.Run{
|
||||
RunID: newID,
|
||||
JobID: jobID,
|
||||
NumberInJob: int64(runReq.NewCluster.NumWorkers),
|
||||
State: &dbmodel.RunState{
|
||||
LifeCycleState: &lifeCycleState,
|
||||
StateMessage: "Starting action",
|
||||
},
|
||||
ClusterInstance: &dbmodel.ClusterInstance{
|
||||
ClusterID: "1201-my-cluster",
|
||||
SparkContextID: "1102398-spark-context-id",
|
||||
},
|
||||
Task: &dbmodel.JobTask{NotebookTask: &dbmodel.NotebookTask{NotebookPath: "/Users/user@example.com/my-notebook"}},
|
||||
ClusterSpec: &dbmodel.ClusterSpec{ExistingClusterID: "1201-my-cluster"},
|
||||
OverridingParameters: &dbmodel.RunParameters{JarParams: []string{"param1", "param2"}},
|
||||
StartTime: makeTimestamp(),
|
||||
SetupDuration: r.timePerRunLifeState,
|
||||
ExecutionDuration: r.timePerRunLifeState,
|
||||
CleanupDuration: r.timePerRunLifeState,
|
||||
Trigger: &trigger,
|
||||
}
|
||||
|
||||
r.writeLock.Lock()
|
||||
defer r.writeLock.Unlock()
|
||||
|
||||
r.runs[newID] = run
|
||||
return newID
|
||||
}
|
||||
|
||||
// GetRun returns the Run with the specified ID or an empty Run
|
||||
func (r *RunRepository) GetRun(id int64) dbmodel.Run {
|
||||
if run, ok := r.runs[id]; ok {
|
||||
setRunState(&run)
|
||||
return run
|
||||
}
|
||||
|
||||
return dbmodel.Run{}
|
||||
}
|
||||
|
||||
// GetRunOutput returns the Run output along with the run as metadata or an empty run output
|
||||
func (r *RunRepository) GetRunOutput(id int64) azure.JobsRunsGetOutputResponse {
|
||||
return azure.JobsRunsGetOutputResponse{
|
||||
Metadata: r.GetRun(id),
|
||||
}
|
||||
}
|
||||
|
||||
// GetRuns returns all Runs
|
||||
func (r *RunRepository) GetRuns() azure.JobsRunsListResponse {
|
||||
arr := []dbmodel.Run{}
|
||||
for _, run := range r.runs {
|
||||
setRunState(&run)
|
||||
arr = append(arr, run)
|
||||
}
|
||||
|
||||
response := azure.JobsRunsListResponse{
|
||||
Runs: arr,
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// DeleteRun deletes the run with the specified ID
|
||||
func (r *RunRepository) DeleteRun(id int64) error {
|
||||
if _, ok := r.runs[id]; ok {
|
||||
delete(r.runs, id)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Could not find Run with id of %d to delete", id)
|
||||
}
|
||||
|
||||
//CancelRun cancels the run with the specified ID
|
||||
func (r *RunRepository) CancelRun(id int64) error {
|
||||
if run, ok := r.runs[id]; ok {
|
||||
setRunState(&run)
|
||||
//If the run has already been completed, cancel becomes NOP
|
||||
if run.State.ResultState != nil && *run.State.ResultState != "" {
|
||||
return nil
|
||||
}
|
||||
resultState := dbmodel.RunResultState(dbmodel.RunResultStateCanceled)
|
||||
run.State.ResultState = &resultState
|
||||
*run.State.LifeCycleState = dbmodel.RunLifeCycleStateTerminated
|
||||
r.runs[id] = run
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Could not find Run with id of %d to cancel", id)
|
||||
}
|
||||
|
||||
func setRunState(run *dbmodel.Run) {
|
||||
currentTime := makeTimestamp()
|
||||
setupFinishedTime := run.StartTime + run.SetupDuration
|
||||
runFinishedTime := setupFinishedTime + run.ExecutionDuration
|
||||
cleanupFinishedTime := runFinishedTime + run.CleanupDuration
|
||||
|
||||
if currentTime < setupFinishedTime || *run.State.LifeCycleState == dbmodel.RunLifeCycleStateTerminated {
|
||||
return
|
||||
}
|
||||
|
||||
if currentTime < runFinishedTime {
|
||||
*run.State.LifeCycleState = dbmodel.RunLifeCycleStateRunning
|
||||
return
|
||||
}
|
||||
|
||||
if currentTime < cleanupFinishedTime {
|
||||
*run.State.LifeCycleState = dbmodel.RunLifeCycleStateTerminating
|
||||
return
|
||||
}
|
||||
|
||||
*run.State.LifeCycleState = dbmodel.RunLifeCycleStateTerminated
|
||||
resultState := dbmodel.RunResultState(dbmodel.RunResultStateSuccess)
|
||||
run.State.ResultState = &resultState
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/model"
|
||||
mockableClock "github.com/stephanos/clock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
dbmodel "github.com/xinsnake/databricks-sdk-golang/azure/models"
|
||||
)
|
||||
|
||||
var testLifeCyclePeriodLength int64 = 500
|
||||
var mockClock = mockableClock.NewMock()
|
||||
var testTime = time.Date(2019, 01, 01, 01, 00, 00, 00, time.UTC)
|
||||
|
||||
var lifeCycleTests = []struct {
|
||||
name string
|
||||
waitTime int64
|
||||
expectedLifeCycleState string
|
||||
expectedResultState string
|
||||
}{
|
||||
{"LifeCycleState while run is PENDING", 0, dbmodel.RunLifeCycleStatePending, ""},
|
||||
{"LifeCycleState while run is RUNNING", testLifeCyclePeriodLength, dbmodel.RunLifeCycleStateRunning, ""},
|
||||
{"LifeCycleState while run is TERMINATING", testLifeCyclePeriodLength * 2, dbmodel.RunLifeCycleStateTerminating, ""},
|
||||
{"LifeCycleState while run is TERMINATED", testLifeCyclePeriodLength * 3, dbmodel.RunLifeCycleStateTerminated, dbmodel.RunResultStateSuccess},
|
||||
}
|
||||
|
||||
func TestGetRun_LifeCycle(t *testing.T) {
|
||||
// Arrange
|
||||
repo, runID := setupLifeCycleTests()
|
||||
|
||||
for _, tt := range lifeCycleTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mockClock.Set(testTime.Add(time.Duration(tt.waitTime) * time.Millisecond))
|
||||
|
||||
// Act
|
||||
run := repo.GetRun(runID)
|
||||
|
||||
//Assert
|
||||
var lifeCycleState string
|
||||
if run.State.LifeCycleState != nil {
|
||||
lifeCycleState = string(*run.State.LifeCycleState)
|
||||
}
|
||||
assert.Equal(t, tt.expectedLifeCycleState, lifeCycleState)
|
||||
|
||||
var resultState string
|
||||
if run.State.ResultState != nil {
|
||||
resultState = string(*run.State.ResultState)
|
||||
}
|
||||
assert.Equal(t, tt.expectedResultState, resultState)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRunOutput_LifeCycle(t *testing.T) {
|
||||
// Arrange
|
||||
repo, runID := setupLifeCycleTests()
|
||||
|
||||
for _, tt := range lifeCycleTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mockClock.Set(testTime.Add(time.Duration(tt.waitTime) * time.Millisecond))
|
||||
|
||||
// Act
|
||||
response := repo.GetRunOutput(runID)
|
||||
|
||||
//Assert
|
||||
var lifeCycleState string
|
||||
if response.Metadata.State.LifeCycleState != nil {
|
||||
lifeCycleState = string(*response.Metadata.State.LifeCycleState)
|
||||
}
|
||||
assert.Equal(t, tt.expectedLifeCycleState, lifeCycleState)
|
||||
|
||||
var resultState string
|
||||
if response.Metadata.State.ResultState != nil {
|
||||
resultState = string(*response.Metadata.State.ResultState)
|
||||
}
|
||||
assert.Equal(t, tt.expectedResultState, resultState)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListRun_LifeCycle(t *testing.T) {
|
||||
// Arrange
|
||||
repo, _ := setupLifeCycleTests()
|
||||
|
||||
for _, tt := range lifeCycleTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mockClock.Set(testTime.Add(time.Duration(tt.waitTime) * time.Millisecond))
|
||||
|
||||
// Act
|
||||
response := repo.GetRuns()
|
||||
|
||||
//Assert
|
||||
var lifeCycleState string
|
||||
if response.Runs[0].State.LifeCycleState != nil {
|
||||
lifeCycleState = string(*response.Runs[0].State.LifeCycleState)
|
||||
}
|
||||
assert.Equal(t, tt.expectedLifeCycleState, lifeCycleState)
|
||||
|
||||
var resultState string
|
||||
if response.Runs[0].State.ResultState != nil {
|
||||
resultState = string(*response.Runs[0].State.ResultState)
|
||||
}
|
||||
assert.Equal(t, tt.expectedResultState, resultState)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var cancelRunTests = []struct {
|
||||
name string
|
||||
waitTime int64
|
||||
expectedResultState string
|
||||
}{
|
||||
{"Cancel run while PENDING", 0, dbmodel.RunResultStateCanceled},
|
||||
{"Cancel run while RUNNING", testLifeCyclePeriodLength * 2, dbmodel.RunResultStateCanceled},
|
||||
{"Cancel run while TERMINATING", testLifeCyclePeriodLength * 3, dbmodel.RunResultStateCanceled},
|
||||
{"Cancel run while TERMINATED", testLifeCyclePeriodLength * 6, dbmodel.RunResultStateSuccess},
|
||||
}
|
||||
|
||||
func TestCancelRun(t *testing.T) {
|
||||
// Arrange
|
||||
repo := NewRunRepository(testLifeCyclePeriodLength)
|
||||
mockClock.Set(testTime)
|
||||
clock = mockClock
|
||||
|
||||
for i, tt := range cancelRunTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Act
|
||||
id := repo.CreateRun(model.JobsRunsSubmitRequest{}, int64(i))
|
||||
mockClock.Set(testTime.Add(time.Duration(tt.waitTime) * time.Millisecond))
|
||||
_ = repo.CancelRun(id)
|
||||
|
||||
//Assert
|
||||
run := repo.GetRun(id)
|
||||
|
||||
var resultState string
|
||||
if run.State.ResultState != nil {
|
||||
resultState = string(*run.State.ResultState)
|
||||
}
|
||||
assert.Equal(t, tt.expectedResultState, resultState)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func setupLifeCycleTests() (*RunRepository, int64) {
|
||||
mockClock.Set(testTime)
|
||||
clock = mockClock
|
||||
|
||||
repo := NewRunRepository(testLifeCyclePeriodLength)
|
||||
request := model.JobsRunsSubmitRequest{}
|
||||
id := repo.CreateRun(request, 1)
|
||||
return repo, id
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
mockableClock "github.com/stephanos/clock"
|
||||
"time"
|
||||
)
|
||||
|
||||
var clock = mockableClock.New()
|
||||
|
||||
func makeTimestamp() int64 {
|
||||
return clock.Now().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond))
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
type prometheusHTTPMetric struct {
|
||||
Prefix string
|
||||
ClientConnected prometheus.Gauge
|
||||
ResponseTimeHistogram *prometheus.HistogramVec
|
||||
Buckets []float64
|
||||
}
|
||||
|
||||
func initPrometheusHTTPMetric(prefix string, buckets []float64) *prometheusHTTPMetric {
|
||||
phm := prometheusHTTPMetric{
|
||||
Prefix: prefix,
|
||||
ClientConnected: promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: prefix + "_client_connected",
|
||||
Help: "Number of active client connections",
|
||||
}),
|
||||
ResponseTimeHistogram: promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: prefix + "_response_time",
|
||||
Help: "Histogram of response time for handler",
|
||||
Buckets: buckets,
|
||||
}, []string{"code", "type", "action", "method"}),
|
||||
}
|
||||
|
||||
return &phm
|
||||
}
|
||||
|
||||
func (phm *prometheusHTTPMetric) createHandlerWrapper(typeLabel string, actionLabel string) func(http.Handler) http.Handler {
|
||||
return func(handler http.Handler) http.Handler {
|
||||
wrappedHandler := promhttp.InstrumentHandlerInFlight(phm.ClientConnected,
|
||||
promhttp.InstrumentHandlerDuration(phm.ResponseTimeHistogram.MustCurryWith(prometheus.Labels{"type": typeLabel, "action": actionLabel}),
|
||||
handler),
|
||||
)
|
||||
return wrappedHandler
|
||||
}
|
||||
}
|
|
@ -0,0 +1,218 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/handler"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/middleware"
|
||||
"github.com/microsoft/azure-databricks-operator/mockapi/repository"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
const apiPrefix = "/api/2.0/"
|
||||
|
||||
var phm = initPrometheusHTTPMetric("databricks_mock_api", prometheus.LinearBuckets(0, 5, 20))
|
||||
|
||||
// NewRouter creates a new Router with mappings configured
|
||||
func NewRouter() *mux.Router {
|
||||
router := mux.NewRouter().StrictSlash(true)
|
||||
|
||||
// Routes registered directly here don't have middleware applied
|
||||
router.Name("Index").Methods("GET").Path("/").Handler(http.HandlerFunc(handler.Index))
|
||||
router.Name("ConfigGet").Methods("GET").Path("/config").Handler(http.HandlerFunc(handler.GetConfig()))
|
||||
router.Name("ConfigSet").Methods("PUT").Path("/config").Handler(http.HandlerFunc(handler.SetConfig()))
|
||||
router.Name("ConfigPatch").Methods("PATCH").Path("/config").Handler(http.HandlerFunc(handler.PatchConfig()))
|
||||
router.Name("Metrics").Methods("GET").Path("/metrics").Handler(promhttp.Handler())
|
||||
|
||||
// Build up API Routes and apply middleware
|
||||
jobRepo := repository.NewJobRepository()
|
||||
routes := append(getJobRoutes(jobRepo), getClusterRoutes()...)
|
||||
routes = append(routes, getRunRoutes(jobRepo)...)
|
||||
|
||||
for _, route := range routes {
|
||||
promMiddleware := phm.createHandlerWrapper(route.TypeLabel, route.ActionLabel)
|
||||
handler := middleware.Add(route.HandlerFunc, promMiddleware, middleware.RateLimit, middleware.AddLatency, middleware.ErrorResponse)
|
||||
router.
|
||||
Name(route.Name).
|
||||
Methods(route.Method).
|
||||
Path(route.Pattern).
|
||||
Handler(handler).
|
||||
Queries(route.Queries...)
|
||||
}
|
||||
|
||||
router.NotFoundHandler = http.HandlerFunc(handler.NotFoundPage)
|
||||
|
||||
router.MethodNotAllowedHandler = http.HandlerFunc(handler.MethodNotAllowed)
|
||||
|
||||
return router
|
||||
}
|
||||
|
||||
func getRunRoutes(jobRepo *repository.JobRepository) Routes {
|
||||
runRepo := repository.NewRunRepository(1500)
|
||||
|
||||
return Routes{
|
||||
Route{
|
||||
"RunsSubmit",
|
||||
"runs",
|
||||
"submit",
|
||||
"POST",
|
||||
apiPrefix + "jobs/runs/submit",
|
||||
http.HandlerFunc(handler.SubmitRun(runRepo, jobRepo)),
|
||||
nil,
|
||||
},
|
||||
Route{
|
||||
"RunsList",
|
||||
"runs",
|
||||
"list",
|
||||
"GET",
|
||||
apiPrefix + "jobs/runs/list",
|
||||
http.HandlerFunc(handler.ListRuns(runRepo)),
|
||||
nil,
|
||||
},
|
||||
Route{
|
||||
"RunsGet",
|
||||
"runs",
|
||||
"get",
|
||||
"GET",
|
||||
apiPrefix + "jobs/runs/get",
|
||||
http.HandlerFunc(handler.GetRun(runRepo)),
|
||||
[]string{"run_id", "{run_id:[0-9]+}"},
|
||||
},
|
||||
Route{
|
||||
"RunsGetOutput",
|
||||
"runs",
|
||||
"get_output",
|
||||
"GET",
|
||||
apiPrefix + "jobs/runs/get-output",
|
||||
http.HandlerFunc(handler.GetRunOutput(runRepo)),
|
||||
[]string{"run_id", "{run_id:[0-9]+}"},
|
||||
},
|
||||
Route{
|
||||
"RunsDelete",
|
||||
"runs",
|
||||
"delete",
|
||||
"POST",
|
||||
apiPrefix + "jobs/runs/delete",
|
||||
http.HandlerFunc(handler.DeleteRun(runRepo)),
|
||||
nil,
|
||||
},
|
||||
Route{
|
||||
"RunsCancel",
|
||||
"runs",
|
||||
"cancel",
|
||||
"POST",
|
||||
apiPrefix + "jobs/runs/cancel",
|
||||
http.HandlerFunc(handler.CancelRun(runRepo)),
|
||||
nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getJobRoutes(jobRepo *repository.JobRepository) Routes {
|
||||
return Routes{
|
||||
Route{
|
||||
"JobsCreate",
|
||||
"jobs",
|
||||
"create",
|
||||
"POST",
|
||||
apiPrefix + "jobs/create",
|
||||
http.HandlerFunc(handler.CreateJob(jobRepo)),
|
||||
nil,
|
||||
},
|
||||
Route{
|
||||
"JobsList",
|
||||
"jobs",
|
||||
"list",
|
||||
"GET",
|
||||
apiPrefix + "jobs/list",
|
||||
http.HandlerFunc(handler.ListJobs(jobRepo)),
|
||||
nil,
|
||||
},
|
||||
Route{
|
||||
"JobsGet",
|
||||
"jobs",
|
||||
"get",
|
||||
"GET",
|
||||
apiPrefix + "jobs/get",
|
||||
http.HandlerFunc(handler.GetJob(jobRepo)),
|
||||
[]string{"job_id", "{job_id:[0-9]+}"},
|
||||
},
|
||||
Route{
|
||||
"JobsDelete",
|
||||
"jobs",
|
||||
"delete",
|
||||
"POST",
|
||||
apiPrefix + "jobs/delete",
|
||||
http.HandlerFunc(handler.DeleteJob(jobRepo)),
|
||||
nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getClusterRoutes() Routes {
|
||||
clusterRepo := repository.NewClusterRepository()
|
||||
return Routes{
|
||||
Route{
|
||||
"ClustersCreate",
|
||||
"clusters",
|
||||
"create",
|
||||
"POST",
|
||||
apiPrefix + "clusters/create",
|
||||
http.HandlerFunc(handler.CreateCluster(clusterRepo)),
|
||||
nil,
|
||||
},
|
||||
Route{
|
||||
"ClustersList",
|
||||
"clusters",
|
||||
"list",
|
||||
"GET",
|
||||
apiPrefix + "clusters/list",
|
||||
http.HandlerFunc(handler.ListClusters(clusterRepo)),
|
||||
nil,
|
||||
},
|
||||
Route{
|
||||
"ClustersGet",
|
||||
"clusters",
|
||||
"get",
|
||||
"GET",
|
||||
apiPrefix + "clusters/get",
|
||||
http.HandlerFunc(handler.GetCluster(clusterRepo)),
|
||||
[]string{"job_id", "{job_id}"},
|
||||
},
|
||||
Route{
|
||||
"ClustersEdit",
|
||||
"clusters",
|
||||
"edit",
|
||||
"POST",
|
||||
apiPrefix + "clusters/edit",
|
||||
http.HandlerFunc(handler.EditCluster(clusterRepo)),
|
||||
nil,
|
||||
},
|
||||
Route{
|
||||
"ClustersDelete",
|
||||
"clusters",
|
||||
"delete",
|
||||
"POST",
|
||||
apiPrefix + "clusters/delete",
|
||||
http.HandlerFunc(handler.DeleteCluster(clusterRepo)),
|
||||
nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Route represents an API route/endpoint
|
||||
type Route struct {
|
||||
Name string
|
||||
TypeLabel string
|
||||
ActionLabel string
|
||||
Method string
|
||||
Pattern string
|
||||
HandlerFunc http.Handler
|
||||
Queries []string
|
||||
}
|
||||
|
||||
// Routes is an array of Routes
|
||||
type Routes []Route
|
Загрузка…
Ссылка в новой задаче