Seldon inference pipeline (#68)
This commit is contained in:
Родитель
840ec0e5da
Коммит
2b5422a727
|
@ -0,0 +1,13 @@
|
|||
FROM python:3.7-slim
|
||||
|
||||
ENV APP_HOME /app
|
||||
ENV WORKERS 1
|
||||
ENV THREADS 8
|
||||
ENV PREDICTIVE_UNIT_SERVICE_PORT 8080
|
||||
WORKDIR $APP_HOME
|
||||
COPY pipeline.py requirements.txt ./
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
RUN pip install --no-cache-dir -r ./requirements.txt
|
||||
|
||||
CMD ["sh","-c","gunicorn --bind 0.0.0.0:$PREDICTIVE_UNIT_SERVICE_PORT --workers $WORKERS --threads $THREADS pipeline"]
|
|
@ -0,0 +1,5 @@
|
|||
docker build . -t kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
|
||||
docker push kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
|
||||
kubectl apply -f pipeline.yaml -n serving
|
|
@ -0,0 +1 @@
|
|||
<mxfile host="app.diagrams.net" modified="2020-07-17T18:50:07.556Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Safari/605.1.15" etag="JktiPo-wsTHiWXRsLdoj" version="13.4.6" type="device"><diagram id="32LcnlI588N8-KOHWvop" name="Page-1">7VxRl6I2FP41PrYHCAF8nHG2uz09PdPT6Z52n3oyEJVTJDaGUfvrG5YgkIQVNQO4+jTmJkL8vu9e7r3GmYDZaveRovXyVxLhZOJY0W4CniaOY1tTn//JLfvC4ruwMCxoHBUmqzK8xP/h8p3CmsUR3ghbYWKEJCxeN40hSVMcsoYNUUq2zWVzkkQNwxotcGMbueElRAlWlv0ZR2xZWANYW/0Jx4tleWfbEjMrVC4Whs0SRWRbM4EPEzCjhLDi1Wo3w0kOXhOXn1pmDxujOGVd3hDa8+j58+pzHC62z5+g+wt5Bj+Iq7yhJBMf+Od0nTFu+oOidDMndIWp2D/bl6BQkqURzq9rT8Djdhkz/LJGYT675TLgtiVbJWJ6HifJjCSEfn0viBAO5iG3bxgl/+DajBcG+HXOZ9RPVm4TU4Z3NZP4pB8xWWFG93yJmAVTgbqQne2J8bYi0S6ZWdYILNchoZvF4dIVtPyFQPcEpB0F6eeMfY9Qe2BoqIEC9e8kY4bBxXYEsa8Dd+r5AHnvA64zHRpcVwF3RlavcWoY3nkQ4lCr3dcAujz8GoH3oFUBr2sNDS9U4C2fqQ9mYwPEQeTq8A2cV+AZkq8djA1frxXfx+8AXzh47PVb8X26QnwDR9Lv4GlE0Irv7ArxdR04MnynCr4KrDiNHvLKgo/CBG02cdhEksNB93/xgfUjLIdf8mE5eNo1RnsxaiKdkpTf7zHM6NuBwlbEGaILzL7xscQ6HJXVTgsvNdyhBvbSRnGCWPzWrJF0XIg7/EbilNUeu1LGKJG5IRkNsXhPvaKRLuPC5nWAfKECFuVCnD20ry1b5ws27du1oXa/bdtypKTCbyznL4r7Vxo94H9BdaEWcr3p1pBE4agkKifewDEkUp4DvYtIgSVtGPQhO7WqPV12u5jVVMdHX2ozlebyQSm5VpUVrBxX2VE1eqNSoyuReygD+1KjMcGo+Xcvgjk3tp0vtK5hD4xaaGUUuVxoQc9CUwuRk4XWSv5YyJIwliE+lyu7Yx5ljCu1qOk5KDRCQhUhLkp4jkYIr2OEcMYluqEjxKWJkQf7SIxM1JG9JkbjVhm0DKnM7/k5BNR+eb+xzR4ktrnXGdvkrwPguapr63sbjm2edB+vj6IPqN9RnC7pM/Px0hXsmiNUbqF3BUOSBh0l7Y5a0ucHUlnSsm+YkrTc1HP6kLSJsvRCSVsnSPqCVrGx0B3cdX6JzqGkc7cXnRuoivvU+V3SVyVpdwhJm2geXF820lXS/qglDc5uWUmSVroQhiQdyBVBLwm2webBKdKsuYHT8AP7iB8Y0nTQUdPjzrBHr2nPljQ97UHT5aPBUJj2T9LngKmHf9d0H5r2PX2u9L6a1hy68LlWvH+z/FD/Y4QYqkYT8FANrJrdf1I8geEda2pfW/HVz2wJE0riRZo7EFcq5vbH/DBWHKLkQUys4ijKb6M9D1adGDN0wNb1pa+WNAdAXY2O5dMW5o4vq4fDu3L2de83wprdYO1A0WCsabrod9Yk1hzJ14A9NGuaRvGdtSM/QRieNU0vtDNr9o2wZku+dshzBmNN09nrzJpzI6w5Us/VG/y5pmle3SOkxJrSVhw8Qmr6M/cIKbMmHaB2Nb9c7JW10vfvvvYN1nyZtaF9DZ5fY9+Or/nyd4lTd2DWnEtYux3iPMndoD+0ux1pj6wpjuKQxST9m+JNlrCbzymBfMDQGZrCI1V3dwpvyBGh7fTFIh9W//SlaEVX/zoHfPgf</diagram></mxfile>
|
Двоичный файл не отображается.
После Ширина: | Высота: | Размер: 58 KiB |
|
@ -0,0 +1,81 @@
|
|||
from flask import Flask, request
|
||||
import random
|
||||
|
||||
|
||||
application = Flask(__name__)
|
||||
|
||||
|
||||
# Randomly route to either 1st or 2nd child
|
||||
@application.route("/route", methods=['GET', 'POST'])
|
||||
def route():
|
||||
if (request.method == 'GET'):
|
||||
return "Healthy"
|
||||
else:
|
||||
payload = request.get_json()
|
||||
print(f'Route message: {payload}')
|
||||
child = random.randint(0, 1)
|
||||
|
||||
return '{"data":{"ndarray":[' + str(child) + ']}}'
|
||||
|
||||
|
||||
# Prediction is emulated by incrementing the input payload
|
||||
# e.g {"data":"0"} -> {"data":"1"}
|
||||
@application.route("/predict", methods=['GET', 'POST'])
|
||||
def predict() -> str:
|
||||
if (request.method == 'GET'):
|
||||
return "Healthy"
|
||||
else:
|
||||
payload = request.get_json()
|
||||
print(f'Input message: {payload}')
|
||||
|
||||
data = int(payload['data'])
|
||||
data = data + 1
|
||||
|
||||
return '{"data":"' + str(data) + '"}'
|
||||
|
||||
|
||||
# Aggregate the output of two children by concatanating
|
||||
# e.g. {"data":"101"},{"data":"101"} -> {"data":"101101"}
|
||||
@application.route("/aggregate", methods=['GET', 'POST'])
|
||||
def aggregate():
|
||||
if (request.method == 'GET'):
|
||||
return "Healthy"
|
||||
else:
|
||||
payload = request.get_json()
|
||||
print(f'Combine message: {payload}')
|
||||
|
||||
combined_data = payload[0]['data'] + payload[1]['data']
|
||||
return '{"data":"' + str(combined_data) + '"}'
|
||||
|
||||
|
||||
# Tranform the input data by adding 100 to the payload
|
||||
# e.g. {"data":"0"} -> {"data":"100"}
|
||||
@application.route("/transform-input", methods=['GET', 'POST'])
|
||||
def transform_input():
|
||||
if (request.method == 'GET'):
|
||||
return "Healthy"
|
||||
else:
|
||||
payload = request.get_json()
|
||||
print(f'Transform input message: {payload}')
|
||||
|
||||
data = int(payload['data'])
|
||||
data = data + 100
|
||||
|
||||
return '{"data":"' + str(data) + '"}'
|
||||
|
||||
|
||||
# Put the output data into the "prediction" json
|
||||
# e.g. {"data":"102"} -> {"prediction_result":"102"}
|
||||
@application.route("/transform-output", methods=['GET', 'POST'])
|
||||
def transform_output():
|
||||
if (request.method == 'GET'):
|
||||
return "Healthy"
|
||||
else:
|
||||
payload = request.get_json()
|
||||
print(f'Transform output message: {payload}')
|
||||
|
||||
return '{"prediction_result":"' + payload['data'] + '"}'
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
application.run(host='0.0.0.0')
|
|
@ -0,0 +1,104 @@
|
|||
|
||||
apiVersion: machinelearning.seldon.io/v1alpha2
|
||||
kind: SeldonDeployment
|
||||
metadata:
|
||||
name: pipeline
|
||||
spec:
|
||||
name: pipeline-deployment
|
||||
predictors:
|
||||
- componentSpecs:
|
||||
- spec:
|
||||
containers:
|
||||
- image: kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
name: router
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
- image: kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
name: model-a
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
name: http
|
||||
- image: kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
name: model-b
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8082
|
||||
name: http
|
||||
- image: kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
name: model-c
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8083
|
||||
name: http
|
||||
- image: kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
name: model-d
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8084
|
||||
name: http
|
||||
- image: kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
name: combiner
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8085
|
||||
name: http
|
||||
- image: kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
name: input-transformer
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8086
|
||||
name: http
|
||||
- image: kubeflowyoacr.azurecr.io/seldon-pipeline:latest
|
||||
name: output-transformer
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8087
|
||||
name: http
|
||||
graph:
|
||||
name: output-transformer
|
||||
type: OUTPUT_TRANSFORMER
|
||||
endpoint:
|
||||
type: REST
|
||||
children:
|
||||
- name: input-transformer
|
||||
type: TRANSFORMER
|
||||
endpoint:
|
||||
type: REST
|
||||
children:
|
||||
- name: router
|
||||
type: ROUTER
|
||||
endpoint:
|
||||
type: REST
|
||||
children:
|
||||
- name: model-a
|
||||
endpoint:
|
||||
type: REST
|
||||
type: MODEL
|
||||
children:
|
||||
- name: model-b
|
||||
endpoint:
|
||||
type: REST
|
||||
type: MODEL
|
||||
children: []
|
||||
- name: combiner
|
||||
endpoint:
|
||||
type: REST
|
||||
type: COMBINER
|
||||
children:
|
||||
- name: model-c
|
||||
endpoint:
|
||||
type: REST
|
||||
type: MODEL
|
||||
children: []
|
||||
- name: model-d
|
||||
endpoint:
|
||||
type: REST
|
||||
type: MODEL
|
||||
children: []
|
||||
name: pipeline-predictor
|
||||
replicas: 1
|
||||
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
## Seldon Inference pipeline
|
||||
|
||||
This sample demonstrates a simple inference pipeline containing all component types of a Seldon predictor such as
|
||||
* Model
|
||||
* Router
|
||||
* Combiner
|
||||
* Transformer
|
||||
* Output_Transformer
|
||||
|
||||
![Seldon Inference pipeline](diagrams/seldon-pipeline.png)
|
||||
|
||||
The input data for the pipeline is a json with an integer payload. The Input Transformer component adds 100 to the payload and forwards the request to the Router. The Router component randomly routes the request to either Model A or to the Combiner.
|
||||
Every model emulates a prediction by incrementing a payload. For example, the prediction made by Model A for {"data":"100"} will be {"data":"101"}. Model B takes the prediction from Model A as an input and returns the prediction {"data":"102"}.
|
||||
The Combiner component aggregates the results of its children, so it concatenates the predictions from Model C and Model D and returns {"data":"101101"}.
|
||||
Th Output Transformer wraps the result into a json with "prediction_result" attribute.
|
||||
|
||||
All components are implemented in a single [pipeline.py](pipeline.py) and packaged in a single [Docker image](Dockerfile). The Seldon Deployment with the inference pipeline is defined in [pipeline.yaml](pipeline.yaml).
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
Flask==1.1.1
|
||||
gunicorn==20.0.4
|
||||
requests
|
|
@ -0,0 +1,8 @@
|
|||
NAMESPACE=serving
|
||||
|
||||
kubectl port-forward svc/pipeline-pipeline-predictor 8000:8000 -n $NAMESPACE
|
||||
curl -v -H "Content-Type: application/json" -d '{"data":"0"}' http://localhost:8000/api/v0.1/predictions
|
||||
|
||||
CLUSTER_IP=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
curl -v -H "Content-Type: application/json" -d '{"data":"0"}' $CLUSTER_IP/seldon/$NAMESPACE/pipeline/api/v0.1/predictions
|
||||
|
Загрузка…
Ссылка в новой задаче