зеркало из https://github.com/Azure/AzureVision.git
fixes to custom vision, add cusvis vignette
This commit is contained in:
Родитель
f769926541
Коммит
66c10b4e7f
|
@ -45,6 +45,7 @@ export(get_tag)
|
|||
export(identify_regions)
|
||||
export(list_computervision_domains)
|
||||
export(list_images)
|
||||
export(list_model_exports)
|
||||
export(list_models)
|
||||
export(list_projects)
|
||||
export(list_tags)
|
||||
|
@ -56,6 +57,7 @@ export(remove_image_regions)
|
|||
export(remove_image_tags)
|
||||
export(remove_images)
|
||||
export(remove_tags)
|
||||
export(rename_model)
|
||||
export(show_model)
|
||||
export(show_training_performance)
|
||||
export(tag)
|
||||
|
|
|
@ -10,6 +10,15 @@ AzureCognitive::call_cognitive_endpoint
|
|||
|
||||
utils::globalVariables("id")
|
||||
|
||||
|
||||
.onLoad <- function(libname, pkgname)
|
||||
{
|
||||
options(azure_computervision_api_version="v2.0")
|
||||
options(azure_customvision_training_api_version="v3.1")
|
||||
options(azure_customvision_prediction_api_version="v3.0")
|
||||
}
|
||||
|
||||
|
||||
confirm_delete <- function(msg, confirm)
|
||||
{
|
||||
if(!interactive() || !confirm)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
print.customvision_model <- function(x, ...)
|
||||
{
|
||||
cat("Azure Custom Vision model\n")
|
||||
cat(" Project/iteration: ", x$project$project$name, "/", x$id, "\n", sep="")
|
||||
cat(" Project/iteration: ", x$project$project$name, "/", x$name, " (", x$id, ")", "\n", sep="")
|
||||
invisible(x)
|
||||
}
|
||||
|
||||
|
@ -10,13 +10,14 @@ print.customvision_model <- function(x, ...)
|
|||
#' Create, retrieve and delete a model iteration
|
||||
#'
|
||||
#' @param project A Custom Vision project.
|
||||
#' @param object For the `delete_model` methods, A Custom Vision project or model, as appropriate.
|
||||
#' @param model A Custom Vision model.
|
||||
#' @param object For the `delete_model` method, a Custom Vision project or model, as appropriate.
|
||||
#' @param training_method The training method to use. The default "quick" is faster but may be less accurate. The "advanced" method is slower but produces better results.
|
||||
#' @param max_time For advanced training, the maximum training time in hours.
|
||||
#' @param force For advanced training, whether to refit the model even if the data has not changed since the last iteration.
|
||||
#' @param email For advanced training, an email address to notify when the training is complete.
|
||||
#' @param wait whether to wait until training is complete (or the maximum training time has elapsed) before returning.
|
||||
#' @param iteration For `get_model` and `delete_model.customvision_project`, the iteration ID.
|
||||
#' @param iteration For `get_model` and `delete_model.customvision_project`, either the iteration name or ID.
|
||||
#' @param as For `list_models`, the format in which to return results: as a named vector of model iteration IDs, or a list of model objects.
|
||||
#' @param confirm For the `delete_model` methods, whether to ask for confirmation first.
|
||||
#' @param ... Arguments passed to lower-level functions.
|
||||
|
@ -27,7 +28,7 @@ print.customvision_model <- function(x, ...)
|
|||
#'
|
||||
#' By default, AzureVision will use the latest model iteration for actions such as prediction, showing performance statistics, and so on. You can list the model iterations with `list_models`, and retrieve a specific iteration by passing the iteration ID to `get_model`.
|
||||
#' @return
|
||||
#' For `train_model` and `get_model`, an object of class `customvision_model` which is a handle to the iteration.
|
||||
#' For `train_model`, `get_model` and `rename_model`, an object of class `customvision_model` which is a handle to the iteration.
|
||||
#'
|
||||
#' For `list_models`, based on the `as` argument: `as="ids"` returns a named vector of model iteration IDs, while `as="list"` returns a list of model objects.
|
||||
#' @seealso
|
||||
|
@ -90,14 +91,21 @@ list_models <- function(project, as=c("ids", "list"))
|
|||
#' @export
|
||||
get_model <- function(project, iteration=NULL)
|
||||
{
|
||||
if(is.null(iteration))
|
||||
iteration <- list_models(project)[1]
|
||||
|
||||
iteration <- find_model_iteration(iteration, project)
|
||||
res <- do_training_op(project, file.path("iterations", iteration))
|
||||
make_model_iteration(res, project)
|
||||
}
|
||||
|
||||
|
||||
#' @rdname customvision_train
|
||||
#' @export
|
||||
rename_model <- function(object, name, ...)
|
||||
{
|
||||
res <- do_training_op(object$project, file.path("iterations", object$id), body=list(name=name), http_verb="PATCH")
|
||||
make_model_iteration(res, object$project)
|
||||
}
|
||||
|
||||
|
||||
#' @rdname customvision_train
|
||||
#' @export
|
||||
delete_model <- function(object, ...)
|
||||
|
@ -113,9 +121,7 @@ delete_model.customvision_project <- function(object, iteration=NULL, confirm=TR
|
|||
if(!confirm_delete("Are you sure you want to delete this model iteration?", confirm))
|
||||
return(invisible(NULL))
|
||||
|
||||
if(is.null(iteration))
|
||||
iteration <- list_models(object)[[1]]$id
|
||||
|
||||
iteration <- find_model_iteration(iteration, project)
|
||||
do_training_op(object, file.path("iterations", iteration), http_verb="DELETE")
|
||||
invisible(NULL)
|
||||
}
|
||||
|
@ -190,13 +196,16 @@ summary.customvision_model <- function(object, ...)
|
|||
#' Publishing a model makes it available to clients as a predictive service. Exporting a model serialises it to a file of the given format in Azure storage, which can then be downloaded. Each iteration of the model can be published or exported separately.
|
||||
#'
|
||||
#' The `format` argument to `export_model` can be one of the following. Note that exporting a model requires that the project was created with support for it.
|
||||
#' - "onnx 1.0", "onnx 1.2": ONNX 1.0 or 1.2
|
||||
#' - "onnx": ONNX 1.2
|
||||
#' - "coreml": CoreML, for iOS 11 devices
|
||||
#' - "tensorflow": TensorFlow
|
||||
#' - "tensorflow lite": TensorFlow Lite for Android devices
|
||||
#' - "linux docker", "windows docker", "arm docker": A Docker image for the given platform (Raspberry Pi 3 in the case of ARM)
|
||||
#' - "vaidk": Vision AI Development Kit
|
||||
#' @return
|
||||
#' `export_model` returns the URL of the exported file, invisibly if `download=TRUE`.
|
||||
#'
|
||||
#' `list_model_exports` returns a data frame detailing the formats the current model has been exported to, along with their download URLs.
|
||||
#' @seealso
|
||||
#' [`train_model`], [`get_model`], [`customvision_predictive_service`], [`predict.classification_service`], [`predict.object_detection_service`]
|
||||
#' @rdname customvision_publish
|
||||
|
@ -209,6 +218,7 @@ publish_model <- function(model, name, prediction_resource)
|
|||
op <- file.path("iterations", model$id, "publish")
|
||||
options <- list(publishName=name, predictionId=prediction_resource)
|
||||
do_training_op(model$project, op, options=options, http_verb="POST")
|
||||
invisible(NULL)
|
||||
}
|
||||
|
||||
|
||||
|
@ -234,31 +244,54 @@ export_model <- function(model, format, download=TRUE, destfile=NULL)
|
|||
if(!is_compact_domain(settings$domainId))
|
||||
stop("Project was not created with support for exporting", call.=FALSE)
|
||||
|
||||
platform <- get_export_platform(format)
|
||||
if(platform$platform == "VAIDK" && is_empty(settings$targetExportPlatforms))
|
||||
plat <- get_export_platform(format)
|
||||
if(plat$platform == "VAIDK" && is_empty(settings$targetExportPlatforms))
|
||||
stop("Project does not support exporting to Vision AI Dev Kit format", call.=FALSE)
|
||||
|
||||
# if already exported, don't export again
|
||||
exports <- list_model_exports(model)
|
||||
this_exp <- find_model_export(plat, exports)
|
||||
if(is_empty(this_exp))
|
||||
{
|
||||
op <- file.path("iterations", model$id, "export")
|
||||
res <- do_training_op(model$project, op, options=plat, http_verb="POST")
|
||||
|
||||
# wait for it to appear in the list of exports
|
||||
for(i in 1:500)
|
||||
{
|
||||
res <- do_training_op(model$project, op, options=platform, http_verb="POST")
|
||||
status <- res$status
|
||||
if(res$status %in% c("Done", "Failed"))
|
||||
break
|
||||
Sys.sleep(10)
|
||||
}
|
||||
if(res$status != "Done")
|
||||
stop("Error exporting model", call.=FALSE)
|
||||
exports <- list_model_exports(model)
|
||||
this_exp <- find_model_export(plat, exports)
|
||||
if(is_empty(this_exp))
|
||||
stop("Exported model not found", call.=FALSE)
|
||||
|
||||
status <- exports$status[this_exp]
|
||||
if(status %in% c("Done", "Failed"))
|
||||
break
|
||||
Sys.sleep(5)
|
||||
}
|
||||
if(status != "Done")
|
||||
stop("Unable to export model", call.=FALSE)
|
||||
}
|
||||
|
||||
dl_link <- exports$downloadUri[this_exp]
|
||||
if(download)
|
||||
{
|
||||
if(is.null(destfile))
|
||||
destfile <- basename(httr::parse_url(res$downloadUri)$path)
|
||||
destfile <- basename(httr::parse_url(dl_link)$path)
|
||||
message("Downloading to ", destfile)
|
||||
utils::download.file(res$downloadUri, destfile)
|
||||
invisible(res$downloadUri)
|
||||
utils::download.file(dl_link, destfile)
|
||||
invisible(dl_link)
|
||||
}
|
||||
else res$downloadUri
|
||||
else dl_link
|
||||
}
|
||||
|
||||
|
||||
#' @rdname customvision_publish
|
||||
#' @export
|
||||
list_model_exports <- function(model)
|
||||
{
|
||||
op <- file.path("iterations", model$id, "export")
|
||||
do_training_op(model$project, op, simplifyVector=TRUE)
|
||||
}
|
||||
|
||||
|
||||
|
@ -271,7 +304,7 @@ as_datetime <- function(x, format="%Y-%m-%dT%H:%M:%S", tz="UTC")
|
|||
make_model_iteration <- function(iteration, project)
|
||||
{
|
||||
structure(
|
||||
list(project=project, id=iteration$id),
|
||||
list(project=project, id=iteration$id, name=iteration$name),
|
||||
class="customvision_model"
|
||||
)
|
||||
}
|
||||
|
@ -284,11 +317,44 @@ get_export_platform <- function(format)
|
|||
"arm docker"=list(platform="DockerFile", flavor="ARM"),
|
||||
"linux docker"=list(platform="DockerFile", flavor="Linux"),
|
||||
"windows docker"=list(platform="DockerFile", flavor="Windows"),
|
||||
"onnx 1.0"=list(platform="ONNX", flavor="ONNX10"),
|
||||
"onnx 1.2"=list(platform="ONNX", flavor="ONNX12"),
|
||||
"tensorflow"=list(platform="TensorFlow"),
|
||||
"onnx"=list(platform="ONNX"),
|
||||
"tensorflow"=list(platform="TensorFlow", flavor="TensorFlowNormal"),
|
||||
"tensorflow lite"=list(platform="TensorFlow", flavor="TensorFlowLite"),
|
||||
"vaidk"=list(platform="VAIDK"),
|
||||
stop("Unrecognised export format '", format, "'", call.=FALSE)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
find_model_export <- function(platform, exports)
|
||||
{
|
||||
this_plat <- exports$platform == platform$platform
|
||||
this_flav <- if(!is.null(platform$flavor))
|
||||
exports$flavor == platform$flavor
|
||||
else TRUE
|
||||
which(this_plat & this_flav)
|
||||
}
|
||||
|
||||
|
||||
find_model_iteration <- function(iteration=NULL, project)
|
||||
{
|
||||
iters <- list_models(project)
|
||||
|
||||
if(is.null(iteration))
|
||||
return(iters[1])
|
||||
|
||||
if(is_guid(iteration))
|
||||
{
|
||||
if(!(iteration %in% iters))
|
||||
stop("Invalid model iteration ID", call.=FALSE)
|
||||
return(iteration)
|
||||
}
|
||||
else
|
||||
{
|
||||
if(!(iteration %in% names(iters)))
|
||||
stop("Invalid model iteration name", call.=FALSE)
|
||||
return(iters[iteration])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
22
R/endpoint.R
22
R/endpoint.R
|
@ -1,9 +1,11 @@
|
|||
#' Endpoint objects for computer vision services
|
||||
#'
|
||||
#' @param url The URL of the endpoint.
|
||||
#' @param key A subscription key. Can be single-service or multi-service.
|
||||
#' @param aad_token For the Computer Vision endpoint, an OAuth token object, of class [`AzureAuth::AzureToken`]. You can supply this as an alternative to a subscription key.
|
||||
#' @param ... Other arguments to pass to [`AzureCognitive::cognitive_endpoint`].
|
||||
#' @details
|
||||
#' These are functions to create service-specific endpoint objects. They handle differences between the services in how they perform authentication, endpoint paths, and so on.
|
||||
#' These are functions to create service-specific endpoint objects. Computer Vision supports authentication via either a subscription key or Azure Active Directory (AAD) token; Custom Vision only supports subscription key. Note that there are _two_ kinds of Custom Vision endpoint, one for training and the other for prediction.
|
||||
#' @return
|
||||
#' An object inheriting from `cognitive_endpoint`. The subclass indicates the type of service/endpoint: Computer Vision, Custom Vision training, or Custom Vision prediction.
|
||||
#' @seealso
|
||||
|
@ -18,25 +20,31 @@
|
|||
#' customvision_prediction_endpoint("https://westus.api.cognitive.microsoft.com", key="key")
|
||||
#'
|
||||
#' @export
|
||||
computervision_endpoint <- function(url, ...)
|
||||
computervision_endpoint <- function(url, key=NULL, aad_token=NULL, ...)
|
||||
{
|
||||
cognitive_endpoint(url, service_type="ComputerVision", ...)
|
||||
endp <- cognitive_endpoint(url, service_type="ComputerVision", key=key, aad_token=aad_token, ...)
|
||||
endp$url$path <- file.path("vision", getOption("azure_computervision_api_version"))
|
||||
endp
|
||||
}
|
||||
|
||||
|
||||
#' @rdname endpoint
|
||||
#' @export
|
||||
customvision_training_endpoint <- function(url, ...)
|
||||
customvision_training_endpoint <- function(url, key=NULL, ...)
|
||||
{
|
||||
cognitive_endpoint(url, service_type="CustomVision.Training", ..., auth_header="training-key")
|
||||
endp <- cognitive_endpoint(url, service_type="CustomVision.Training", key=key, ..., auth_header="training-key")
|
||||
endp$url$path <- file.path("customvision", getOption("azure_customvision_training_api_version"))
|
||||
endp
|
||||
}
|
||||
|
||||
|
||||
#' @rdname endpoint
|
||||
#' @export
|
||||
customvision_prediction_endpoint <- function(url, ...)
|
||||
customvision_prediction_endpoint <- function(url, key=NULL, ...)
|
||||
{
|
||||
cognitive_endpoint(url, service_type="CustomVision.Prediction", ..., auth_header="prediction-key")
|
||||
endp <- cognitive_endpoint(url, service_type="CustomVision.Prediction", key=key, ..., auth_header="prediction-key")
|
||||
endp$url$path <- file.path("customvision", getOption("azure_customvision_prediction_api_version"))
|
||||
endp
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ describe(vis, bill_url)
|
|||
|
||||
## Custom Vision
|
||||
|
||||
To communicate with the Custom Vision service, call the `customvision_endpoint` function with the service URL and key.
|
||||
To communicate with the Custom Vision service, call the `customvision_training_endpoint` function with the service URL and key.
|
||||
|
||||
----
|
||||
<p align="center"><a href="https://github.com/Azure/AzureR"><img src="https://github.com/Azure/AzureR/raw/master/images/logo2.png" width=800 /></a></p>
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
\alias{publish_model}
|
||||
\alias{unpublish_model}
|
||||
\alias{export_model}
|
||||
\alias{list_model_exports}
|
||||
\title{Publish, export and unpublish a Custom Vision model iteration}
|
||||
\usage{
|
||||
publish_model(model, name, prediction_resource)
|
||||
|
@ -11,6 +12,8 @@ publish_model(model, name, prediction_resource)
|
|||
unpublish_model(model, confirm = TRUE)
|
||||
|
||||
export_model(model, format, download = TRUE, destfile = NULL)
|
||||
|
||||
list_model_exports(model)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{A Custom Vision model iteration object.}
|
||||
|
@ -29,6 +32,8 @@ export_model(model, format, download = TRUE, destfile = NULL)
|
|||
}
|
||||
\value{
|
||||
\code{export_model} returns the URL of the exported file, invisibly if \code{download=TRUE}.
|
||||
|
||||
\code{list_model_exports} returns a data frame detailing the formats the current model has been exported to, along with their download URLs.
|
||||
}
|
||||
\description{
|
||||
Publish, export and unpublish a Custom Vision model iteration
|
||||
|
@ -38,7 +43,7 @@ Publishing a model makes it available to clients as a predictive service. Export
|
|||
|
||||
The \code{format} argument to \code{export_model} can be one of the following. Note that exporting a model requires that the project was created with support for it.
|
||||
\itemize{
|
||||
\item "onnx 1.0", "onnx 1.2": ONNX 1.0 or 1.2
|
||||
\item "onnx": ONNX 1.2
|
||||
\item "coreml": CoreML, for iOS 11 devices
|
||||
\item "tensorflow": TensorFlow
|
||||
\item "linux docker", "windows docker", "arm docker": A Docker image for the given platform (Raspberry Pi 3 in the case of ARM)
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
\alias{train_model}
|
||||
\alias{list_models}
|
||||
\alias{get_model}
|
||||
\alias{rename_model}
|
||||
\alias{delete_model}
|
||||
\alias{delete_model.customvision_project}
|
||||
\alias{delete_model.customvision_model}
|
||||
|
@ -17,6 +18,8 @@ list_models(project, as = c("ids", "list"))
|
|||
|
||||
get_model(project, iteration = NULL)
|
||||
|
||||
rename_model(object, name, ...)
|
||||
|
||||
delete_model(object, ...)
|
||||
|
||||
\method{delete_model}{customvision_project}(object, iteration = NULL,
|
||||
|
@ -39,16 +42,18 @@ delete_model(object, ...)
|
|||
|
||||
\item{as}{For \code{list_models}, the format in which to return results: as a named vector of model iteration IDs, or a list of model objects.}
|
||||
|
||||
\item{iteration}{For \code{get_model} and \code{delete_model.customvision_project}, the iteration ID.}
|
||||
\item{iteration}{For \code{get_model} and \code{delete_model.customvision_project}, either the iteration name or ID.}
|
||||
|
||||
\item{object}{For the \code{delete_model} methods, A Custom Vision project or model, as appropriate.}
|
||||
\item{object}{For the \code{delete_model} method, a Custom Vision project or model, as appropriate.}
|
||||
|
||||
\item{...}{Arguments passed to lower-level functions.}
|
||||
|
||||
\item{confirm}{For the \code{delete_model} methods, whether to ask for confirmation first.}
|
||||
|
||||
\item{model}{A Custom Vision model.}
|
||||
}
|
||||
\value{
|
||||
For \code{train_model} and \code{get_model}, an object of class \code{customvision_model} which is a handle to the iteration.
|
||||
For \code{train_model}, \code{get_model} and \code{rename_model}, an object of class \code{customvision_model} which is a handle to the iteration.
|
||||
|
||||
For \code{list_models}, based on the \code{as} argument: \code{as="ids"} returns a named vector of model iteration IDs, while \code{as="list"} returns a list of model objects.
|
||||
}
|
||||
|
|
|
@ -6,15 +6,19 @@
|
|||
\alias{customvision_prediction_endpoint}
|
||||
\title{Endpoint objects for computer vision services}
|
||||
\usage{
|
||||
computervision_endpoint(url, ...)
|
||||
computervision_endpoint(url, key = NULL, aad_token = NULL, ...)
|
||||
|
||||
customvision_training_endpoint(url, ...)
|
||||
customvision_training_endpoint(url, key = NULL, ...)
|
||||
|
||||
customvision_prediction_endpoint(url, ...)
|
||||
customvision_prediction_endpoint(url, key = NULL, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{url}{The URL of the endpoint.}
|
||||
|
||||
\item{key}{A subscription key. Can be single-service or multi-service.}
|
||||
|
||||
\item{aad_token}{For the Computer Vision endpoint, an OAuth token object, of class \code{\link[AzureAuth:AzureToken]{AzureAuth::AzureToken}}. You can supply this as an alternative to a subscription key.}
|
||||
|
||||
\item{...}{Other arguments to pass to \code{\link[AzureCognitive:cognitive_endpoint]{AzureCognitive::cognitive_endpoint}}.}
|
||||
}
|
||||
\value{
|
||||
|
@ -24,7 +28,7 @@ An object inheriting from \code{cognitive_endpoint}. The subclass indicates the
|
|||
Endpoint objects for computer vision services
|
||||
}
|
||||
\details{
|
||||
These are functions to create service-specific endpoint objects. They handle differences between the services in how they perform authentication, endpoint paths, and so on.
|
||||
These are functions to create service-specific endpoint objects. Computer Vision supports authentication via either a subscription key or Azure Active Directory (AAD) token; Custom Vision only supports subscription key. Note that there are \emph{two} kinds of Custom Vision endpoint, one for training and the other for prediction.
|
||||
}
|
||||
\examples{
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ test_that("Computer Vision endpoint works with URL",
|
|||
endp <- computervision_endpoint(vision_url, key=vision_key)
|
||||
expect_is(endp, c("computervision_endpoint", "cognitive_endpoint"))
|
||||
|
||||
res_doms <- list_domains(endp)
|
||||
res_doms <- list_computervision_domains(endp)
|
||||
expect_type(res_doms, "character")
|
||||
|
||||
img <- httr::parse_url(storage)
|
||||
|
|
|
@ -70,7 +70,7 @@ test_that("Training endpoint prediction and export works",
|
|||
|
||||
expect_error(predict(mod, c(cans, "../resources/can1.jpg")))
|
||||
|
||||
exp_url <- export_model(mod, "tensorflow", download=FALSE)
|
||||
exp_url <- export_model(mod, "tensorflow lite", download=FALSE)
|
||||
expect_true(is_url(exp_url))
|
||||
})
|
||||
|
||||
|
|
|
@ -0,0 +1,290 @@
|
|||
---
|
||||
title: "Creating and deploying a Custom Vision predictive service"
|
||||
author: Hong Ooi
|
||||
output: rmarkdown::html_vignette
|
||||
vignette: >
|
||||
%\VignetteIndexEntry{Custom Vision}
|
||||
%\VignetteEngine{knitr::rmarkdown}
|
||||
%\VignetteEncoding{utf8}
|
||||
---
|
||||
|
||||
The basic idea behind Custom Vision is to take a pre-built image recognition model supplied by Azure, and customise it for your needs by supplying a set of images with which to update it. All model training and prediction is done in the cloud, so you don't need a powerful machine. Similarly, since you are starting with a model that has already been trained, you don't need a very large dataset or long training times to obtain good predictions (ideally). This vignette walks you through the process of creating and deploying a Custom Vision predictive service.
|
||||
|
||||
## Creating the resources
|
||||
|
||||
You can create the Custom Vision resources using the AzureRMR framework for interacting with Resource Manager. Note that Custom Vision requires at least _two_ resources to be created: one for training, and one for prediction. The available service tiers for Custom Vision are `F0` (free, limited to 10k transactions per month) and `S0`.
|
||||
|
||||
```r
|
||||
library(AzureVision)
|
||||
rg <- AzureRMR::get_azure_login("yourtenant")$
|
||||
get_subscription("sub_id")$
|
||||
get_resource_group("rgname")$
|
||||
|
||||
res <- rg$create_cognitive_service("mycustvis",
|
||||
service_type="CustomVision", service_tier="S0")
|
||||
pred_res <- rg$create_cognitive_service("mycustvispred",
|
||||
service_type="CustomVision.Prediction", service_tier="S0")
|
||||
```
|
||||
|
||||
## Training
|
||||
|
||||
Custom Vision defines two different types of endpoint: a training endpoint, and a prediction endpoint. Somewhat confusingly, they can both share the same host, but use different paths and authentication keys. To start, call the `customvision_training_endpoint` function with the service URL and key.
|
||||
|
||||
```r
|
||||
endp <- customvision_training_endpoint(
|
||||
url="https://australiaeast.api.cognitive.microsoft.com",
|
||||
key="training_key"
|
||||
)
|
||||
```
|
||||
|
||||
### Creating the project
|
||||
|
||||
Custom Vision is organised hierarchically. At the top level, we have a _project_, which represents the data and model for a specific task. Within a project, we have one or more _iterations_ of the model, built on different sets of training images. Each iteration in a project is independent: you can create (train) an iteration, deploy it, and delete it without affecting other iterations.
|
||||
|
||||
You can see the projects that currently exist on the endpoint by calling `list_projects`. This returns a named list of project objects:
|
||||
|
||||
```r
|
||||
list_projects(endp)
|
||||
```
|
||||
```
|
||||
$general_compact
|
||||
Azure Custom Vision project 'general_compact' (304fc776-d860-490a-b4ec-5964bb134743)
|
||||
Endpoint: https://australiaeast.api.cognitive.microsoft.com/customvision/v3.0
|
||||
Domain: classification.general.compact (0732100f-1a38-4e49-a514-c9b44c697ab5)
|
||||
Export target: standard
|
||||
Classification type: Multiclass
|
||||
|
||||
$general_multilabel
|
||||
Azure Custom Vision project 'general_multilabel' (c485f10b-cb54-47a3-b585-624488335f58)
|
||||
Endpoint: https://australiaeast.api.cognitive.microsoft.com/customvision/v3.0
|
||||
Domain: classification.general (ee85a74c-405e-4adc-bb47-ffa8ca0c9f31)
|
||||
Export target: none
|
||||
Classification type: Multilabel
|
||||
|
||||
$logo_obj
|
||||
Azure Custom Vision project 'logo_obj' (af82557f-6ead-401c-afd6-bb9d5a3b042b)
|
||||
Endpoint: https://australiaeast.api.cognitive.microsoft.com/customvision/v3.0
|
||||
Domain: object_detection.logo (1d8ffafe-ec40-4fb2-8f90-72b3b6cecea4)
|
||||
Export target: none
|
||||
Classification type: NA
|
||||
```
|
||||
|
||||
There are three different types of projects, as implied by the list above:
|
||||
|
||||
- A _multiclass classification_ project is for classifying images into a set of _tags_, or target labels. An image can be assigned to one tag only.
|
||||
- A _multilabel classification_ project is similar, but each image can have multiple tags assigned to it.
|
||||
- An _object detection_ project is for detecting which objects, if any, from a set of candidates are present in an image.
|
||||
|
||||
The functions to create these projects are `create_classification_project` (which is used to create both multiclass and multilabel projects) and `create_object_detection_project`. Let's create a classification project:
|
||||
|
||||
```r
|
||||
testproj <- create_classification_project(endp, "testproj", export_target="standard")
|
||||
testproj
|
||||
```
|
||||
```
|
||||
Azure Custom Vision project 'testproj' (db368447-e5da-4cd7-8799-0ccd8157323e)
|
||||
Endpoint: https://australiaeast.api.cognitive.microsoft.com/customvision/v3.0
|
||||
Domain: classification.general.compact (0732100f-1a38-4e49-a514-c9b44c697ab5)
|
||||
Export target: standard
|
||||
Classification type: Multiclass
|
||||
```
|
||||
|
||||
Here, we specify the export target to be `standard` to support exporting the final model to one of various standalone formats, eg TensorFlow, CoreML or ONNX. The default is `none`, in which case the model stays on the Custom Vision server. The advantage of `none` is that the model can be more complex, resulting in potentially better accuracy. The type of project is multiclass classification, and the domain (the initial model used as the basis for training) is `general`. Other possible domains for classification include `landmarks` and `retail`.
|
||||
|
||||
### Adding and tagging images
|
||||
|
||||
Since a Custom Vision model is trained in Azure and not locally, we need to upload some images. The data we'll use comes from the Microsoft [Computer Vision Best Practices](https://github.com/microsoft/ComputerVision) project. This is a simple set of images containing 4 kinds of objects one might find in a fridge: cans, cartons, milk bottles, and water bottles.
|
||||
|
||||
```r
|
||||
download.file(
|
||||
"https://cvbp.blob.core.windows.net/public/datasets/image_classification/fridgeObjects.zip",
|
||||
"fridgeObjects.zip"
|
||||
)
|
||||
unzip("fridgeObjects.zip")
|
||||
```
|
||||
|
||||
The generic function to add images to a project is `add_images`, which takes a vector of filenames, Internet URLs or raw vectors as the images to upload. The method for classification projects also has an argument `tags` which can be used to assign labels to the images as they are uploaded.
|
||||
|
||||
`add_images` returns a vector of _image IDs_, which are how Custom Vision keeps track of the images it uses. It should be noted that Custom Vision does not keep a record of the source filename or URL; it works _only_ with image IDs. A future release of AzureVision may automatically track the source metadata, allowing you to associate an ID with an actual image. For now, this must be done manually.
|
||||
|
||||
Let's upload the fridge objects to the project. We'll keep aside 5 images from each class of object to use as validation data.
|
||||
|
||||
```r
|
||||
cans <- dir("fridgeObjects/can", full.names=TRUE)
|
||||
cartons <- dir("fridgeObjects/carton", full.names=TRUE)
|
||||
milk <- dir("fridgeObjects/milk_bottle", full.names=TRUE)
|
||||
water <- dir("fridgeObjects/water_bottle", full.names=TRUE)
|
||||
|
||||
# upload all but 5 images from cans and cartons, and tag them
|
||||
can_ids <- add_images(testproj, cans[-(1:5)], tags="can")
|
||||
carton_ids <- add_images(testproj, cartons[-(1:5)], tags="carton")
|
||||
```
|
||||
|
||||
If you don't tag the images at upload time, you can do so later with `add_image_tags`:
|
||||
|
||||
```r
|
||||
# upload all but 5 images from milk and water bottles
|
||||
milk_ids <- add_images(testproj, milk[-(1:5)])
|
||||
water_ids <- add_images(testproj, water[-(1:5)])
|
||||
|
||||
add_image_tags(testproj, milk_ids, tags="milk_bottle")
|
||||
add_image_tags(testproj, water_ids, tags="water_bottle")
|
||||
```
|
||||
|
||||
Other image functions to be aware of include `list_images`, `remove_images`, and `add_image_regions` (which is for object detection projects). A useful one is `browse_images`, which takes a vector of IDs and displays the corresponding images in your browser.
|
||||
|
||||
```r
|
||||
browse_images(testproj, water_ids[1:5])
|
||||
```
|
||||
|
||||
### Training the model
|
||||
|
||||
Having uploaded the data, we can train the Custom Vision model with `train_model`. This trains the model on the server and returns a _model iteration_, which is the result of running the training algorithm on the current set of images. Each time you call `train_model`, for example to update the model after adding or removing images, you will obtain a different model iteration. In general, you can rely on AzureVision to keep track of the iterations for you, and automatically return the relevant results for the latest iteration.
|
||||
|
||||
```r
|
||||
mod <- train_model(testproj)
|
||||
mod
|
||||
```
|
||||
```
|
||||
Azure Custom Vision model
|
||||
Project/iteration: testproj/Iteration 1 (f243bb4c-e4f8-473e-9df0-190a407472be)
|
||||
```
|
||||
|
||||
Optional arguments to `train_model` include:
|
||||
|
||||
- `training_method`: Set this to "advanced" to force Custom Vision to do the training from scratch, rather than simply updating a pre-trained model. This also enables the other arguments below.
|
||||
- `max_time`: If `training_method == "advanced"`, the maximum runtime in hours for training the model. The default is 1 hour.
|
||||
- `force`: If `training_method == "advanced"`, whether to train the model anyway even if the images have not changed.
|
||||
- `email`: If `training_method == "advanced"`, an optional email address to send a notification to when the training is complete.
|
||||
- `wait`: Whether to wait until training completes before returning.
|
||||
|
||||
Other model iteration management functions are `get_model` (to retrieve a previously trained iteration), `list_models` (retrieve all previously trained iterations), and `delete_model`.
|
||||
|
||||
We can examine the model performance on the training data (which may be different to the current data!) with the `summary` method. For this toy problem, the model manages to obtain a perfect fit.
|
||||
|
||||
```r
|
||||
summary(mod)
|
||||
```
|
||||
```
|
||||
$perTagPerformance
|
||||
id name precision precisionStdDeviation recall
|
||||
1 22ddd4bc-2031-43a1-b0ef-eb6b219eb6f7 can 1 0 1
|
||||
2 301db6f9-b701-4dc6-8650-a9cf3fe4bb2e carton 1 0 1
|
||||
3 594ad770-83e5-4c77-825d-9249dae4a2c6 milk_bottle 1 0 1
|
||||
4 eda5869a-cc75-41df-9c4c-717c10f79739 water_bottle 1 0 1
|
||||
|
||||
recallStdDeviation averagePrecision
|
||||
1 0 1
|
||||
2 0 1
|
||||
3 0 1
|
||||
4 0 1
|
||||
|
||||
$precision
|
||||
[1] 1
|
||||
|
||||
$precisionStdDeviation
|
||||
[1] 0
|
||||
|
||||
$recall
|
||||
[1] 1
|
||||
|
||||
$recallStdDeviation
|
||||
[1] 0
|
||||
|
||||
$averagePrecision
|
||||
[1] 1
|
||||
```
|
||||
|
||||
Obtaining predictions from the trained model is done with the `predict` method. By default, this returns the predicted tag (class label) for the image, but you can also get the predicted class probabilities by specifying `type="prob"`.
|
||||
|
||||
```r
|
||||
validation_imgs <- c(cans[1:5], cartons[1:5], milk[1:5], water[1:5])
|
||||
validation_tags <- rep(c("can", "carton", "milk_bottle", "water_bottle"), each=5)
|
||||
|
||||
predicted_tags <- predict(mod, validation_imgs)
|
||||
|
||||
table(predicted_tags, validation_tags)
|
||||
```
|
||||
```
|
||||
validation_tags
|
||||
predicted_tags can carton milk_bottle water_bottle
|
||||
can 4 0 0 0
|
||||
carton 0 5 0 0
|
||||
milk_bottle 1 0 5 0
|
||||
water_bottle 0 0 0 5
|
||||
```
|
||||
|
||||
```r
|
||||
head(predict(mod, validation_imgs, type="prob"))
|
||||
```
|
||||
```
|
||||
can carton milk_bottle water_bottle
|
||||
[1,] 9.999968e-01 8.977501e-08 5.855104e-11 3.154334e-06
|
||||
[2,] 9.732912e-01 3.454168e-10 4.610847e-06 2.670425e-02
|
||||
[3,] 3.019476e-01 5.779990e-04 6.974699e-01 4.506565e-06
|
||||
[4,] 5.072662e-01 2.849253e-03 4.856858e-01 4.198686e-03
|
||||
[5,] 9.962270e-01 5.411842e-07 3.540882e-03 2.316211e-04
|
||||
[6,] 3.145034e-11 1.000000e+00 2.574793e-10 4.242047e-14
|
||||
```
|
||||
|
||||
This shows that the model got 19 out of 20 predictions correct on the validation data, misclassifying one of the cans as a milk bottle.
|
||||
|
||||
## Deployment
|
||||
|
||||
### Publishing to a prediction endpoint
|
||||
|
||||
The code above demonstrates using the training endpoint to obtain predictions, which is really meant only for model testing and validation. For production purposes, we would normally _publish_ a trained model to a Custom Vision prediction endpoint. Among other things, a user with access to the training endpoint has complete freedom to modify the model and the data, whereas access to the prediction endpoint only allows getting predictions.
|
||||
|
||||
Publishing a model requires knowing the Azure resource ID of the prediction endpoint. Here, we'll use the resource object that was created earlier using AzureRMR; you can also obtain this information from the Azure Portal.
|
||||
|
||||
```r
|
||||
# publish to the prediction resource we created above
|
||||
publish_model(mod, "iteration1", pred_res)
|
||||
```
|
||||
|
||||
Once a model has been published, we can obtain predictions from the prediction endpoint in a manner very similar to previously. Define a classification predictive service with `classification_service`, and then call the `predict` method:
|
||||
|
||||
```r
|
||||
pred_endp <- customvision_prediction_endpoint(
|
||||
url="https://australiaeast.api.cognitive.microsoft.com/",
|
||||
key="prediction_key"
|
||||
)
|
||||
|
||||
# service we published above
|
||||
pred_svc <- classification_service(pred_endp, testproj, "iteration1")
|
||||
|
||||
# predictions from prediction endpoint -- same as before
|
||||
predsvc_tags <- predict(pred_svc, validation_imgs)
|
||||
table(predsvc_tags, validation_tags)
|
||||
```
|
||||
```
|
||||
validation_tags
|
||||
predsvc_tags can carton milk_bottle water_bottle
|
||||
can 4 0 0 0
|
||||
carton 0 5 0 0
|
||||
milk_bottle 1 0 5 0
|
||||
water_bottle 0 0 0 5
|
||||
```
|
||||
|
||||
### Exporting as standalone
|
||||
|
||||
As an alternative to deploying the model to an online predictive service resource, you can also export the model to a standalone format. This is only possible if the project was created to support exporting. The formats supported include:
|
||||
|
||||
- ONNX 1.2
|
||||
- CoreML
|
||||
- TensorFlow or TensorFlow Lite
|
||||
- A Docker image for either the Linux, Windows or Raspberry Pi environment
|
||||
- Vision AI Development Kit (VAIDK)
|
||||
|
||||
To export the model, call `export_model` and specify the target format. By default, the model will be downloaded to your local machine, but `export_model` also (invisibly) returns a URL from where it can be downloaded independently.
|
||||
|
||||
```r
|
||||
export_model(mod, "linux docker")
|
||||
```
|
||||
```
|
||||
Downloading to f243bb4c-e4f8-473e-9df0-190a407472be.TensorFlow.zip
|
||||
trying URL 'https://irisprodae...'
|
||||
Content type 'application/octet-stream' length 4673656 bytes (4.5 MB)
|
||||
downloaded 4.5 MB
|
||||
```
|
||||
|
Загрузка…
Ссылка в новой задаче