Adds CV recipe (#2)
* documents code * add cv readme * update main readme * update readme * update readme
This commit is contained in:
Родитель
6060068a9b
Коммит
e694b56f72
|
@ -3,7 +3,9 @@
|
|||
"python.linting.flake8Enabled": true,
|
||||
"python.linting.enabled": true,
|
||||
"cSpell.ignoreWords": [
|
||||
"adafruit",
|
||||
"dotenv",
|
||||
"getenv"
|
||||
"getenv",
|
||||
"imshow"
|
||||
]
|
||||
}
|
|
@ -59,6 +59,11 @@ First you'll provision the Azure resources needed for this sample. You're going
|
|||
|
||||
1. Give your resource group a name
|
||||
|
||||
1. Select a Location that is located near you
|
||||
|
||||
1. Select *F1: Free* for the pricing tier
|
||||
> Note: You can only have one free tier active per account
|
||||
|
||||
1. The IoT Hub should now appear in the *Azure IoT Hub* tab
|
||||
|
||||
1. Open the command palette, search for and select **Azure IoT Hub: Copy IoT Hub Connection String**
|
||||
|
|
|
@ -96,7 +96,7 @@ Our provision the Azure resources we'll need for this sample. We're going to use
|
|||
|
||||
1. In the client folder on your Pi type
|
||||
```sh
|
||||
source ./.venv/Scripts/activate
|
||||
source ./.venv/bin/activate
|
||||
```
|
||||
|
||||
1. Then type
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"python.linting.pylintEnabled": false,
|
||||
"python.linting.flake8Enabled": true,
|
||||
"python.linting.enabled": true
|
||||
}
|
|
@ -0,0 +1,139 @@
|
|||
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
|
||||
from msrest.authentication import CognitiveServicesCredentials
|
||||
|
||||
import os
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.patches as patches
|
||||
from PIL import Image
|
||||
from time import sleep
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from picamera import PiCamera
|
||||
|
||||
# Create a camera object
|
||||
camera = PiCamera()
|
||||
|
||||
load_dotenv('.env')
|
||||
|
||||
# Add your Computer Vision subscription key and endpoint to your environment
|
||||
# variables in the .env file.
|
||||
subscription_key = os.getenv('SUBSCRIPTION_KEY')
|
||||
endpoint = os.getenv('ENDPOINT')
|
||||
|
||||
computervision_client = ComputerVisionClient(
|
||||
endpoint,
|
||||
CognitiveServicesCredentials(subscription_key)
|
||||
)
|
||||
|
||||
|
||||
def take_picture(image_path='./image.jpg'):
|
||||
'''Returns path to picture, stores picture in the same directory as code.
|
||||
|
||||
Parameter:
|
||||
---------
|
||||
image_path: str
|
||||
path to save image to. Default is './image.jpg'
|
||||
|
||||
Returns:
|
||||
--------
|
||||
image_path: str
|
||||
path to image
|
||||
'''
|
||||
|
||||
camera.start_preview(alpha=200)
|
||||
# Pi Foundation recommends waiting 2s for light adjustment
|
||||
sleep(2)
|
||||
# Change or comment out as needed
|
||||
camera.rotation = 180
|
||||
# Input image file path here
|
||||
camera.capture(image_path)
|
||||
# Stop camera
|
||||
camera.stop_preview()
|
||||
|
||||
return(image_path)
|
||||
|
||||
|
||||
def detect_objects(image_path):
|
||||
''' Prints objects detected in an image and returns the ImageDescription
|
||||
from computer vision API
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image_path: str
|
||||
path to the location of the image file to use in object detection
|
||||
|
||||
Returns
|
||||
-------
|
||||
detected_objects: ImageDescription
|
||||
ImageDescription class created by the computer vision api
|
||||
|
||||
'''
|
||||
|
||||
# Open image from path
|
||||
local_image = open(image_path, "rb")
|
||||
# Call API with URL
|
||||
detected_objects = computervision_client.detect_objects_in_stream(
|
||||
local_image
|
||||
)
|
||||
|
||||
# Print detected objects results with bounding boxes
|
||||
print("Detecting objects in image:")
|
||||
if len(detected_objects.objects) == 0:
|
||||
print("No objects detected.")
|
||||
for object in detected_objects.objects:
|
||||
print(object.object_property)
|
||||
|
||||
return(detected_objects)
|
||||
|
||||
|
||||
def frame_objects(detected_objects, image_path):
|
||||
|
||||
'''Creates frames around the object detected in an image and plots the
|
||||
frame over that image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
detected_objects : ImageDescription
|
||||
ImageDescription that was returned from the computer vision api
|
||||
|
||||
image_path: str
|
||||
path to the location of the image file that was used in object
|
||||
detection
|
||||
'''
|
||||
|
||||
# Create figure and plot
|
||||
fig, ax = plt.subplots(1)
|
||||
|
||||
if len(detected_objects.objects) == 0:
|
||||
print("No objects detected.")
|
||||
else:
|
||||
for object in detected_objects.objects:
|
||||
rect = patches.Rectangle(
|
||||
(object.rectangle.x, object.rectangle.y),
|
||||
object.rectangle.w, object.rectangle.h,
|
||||
linewidth=1,
|
||||
edgecolor='r',
|
||||
facecolor='none'
|
||||
)
|
||||
plt.text(
|
||||
object.rectangle.x,
|
||||
object.rectangle.y,
|
||||
object.object_property,
|
||||
color='w'
|
||||
)
|
||||
ax.add_patch(rect)
|
||||
|
||||
local_image = open(image_path, "rb")
|
||||
image = Image.open(local_image)
|
||||
ax.imshow(image)
|
||||
plt.axis("off")
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
path_to_image = take_picture()
|
||||
detected_objects = detect_objects(path_to_image)
|
||||
frame_objects(detected_objects, path_to_image)
|
||||
except KeyboardInterrupt:
|
||||
print('Script Stopped')
|
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
# Setup Virtual Environment
|
||||
echo "Python virtual environment creation script"
|
||||
python -m venv ./.venv --system-site-packages
|
||||
echo "Virtual evnironment created"
|
||||
source ./.venv/bin/activate
|
||||
echo "Virtual enviornment activated"
|
||||
pip install -r requirements.txt
|
||||
echo SUBSCRIPTION_KEY= >> .env
|
||||
echo ENDPOINT= >>> .env
|
||||
echo "Dependencies installed"
|
||||
sleep 5
|
|
@ -0,0 +1,93 @@
|
|||
# Using Azure Computer Vision on a Raspberry Pi
|
||||
|
||||
## Overview
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. An active Azure account. If you don't have one, you can sign up for a [free account](https://azure.microsoft.com/free/).
|
||||
1. [VS Code](https://code.visualstudio.com/Download)
|
||||
1. Hardware listed below
|
||||
|
||||
### Hardware
|
||||
|
||||
| Item | Description | Link |
|
||||
|-|-|-|
|
||||
| Raspberry Pi 3 or 4 | Single board computer | [Adafruit](https://www.adafruit.com/product/4292) |
|
||||
| Raspberry Pi Camera V2 | Ribbon camera for the Pi | [Adafruit](https://www.adafruit.com/product/3099) |
|
||||
| USB C power supply (Pi 4) | Power cable for Raspberry Pi 4 | [Adafruit](https://www.adafruit.com/product/4298) |
|
||||
| Micro USB USB power supply (Pi 3) | Power cable for Raspberry Pi 3 | [Adafruit](https://www.adafruit.com/product/1995) |
|
||||
| SD Card with Raspberry Pi OS | Operating system for the Pi | [Adafruit](https://www.adafruit.com/product/2820) |
|
||||
|
||||
## Setup Azure Resources
|
||||
|
||||
| Resource | Description | Link |
|
||||
|-|-|-|
|
||||
| Computer Vision | Computer vision API from Azure | [Azure](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/) |
|
||||
|
||||
### Preparing your environment
|
||||
|
||||
1. If you haven't already, clone this repo to your computer
|
||||
|
||||
1. Open command prompt or terminal and navigate to *pi-azure-recipes*
|
||||
|
||||
1. In command prompt or terminal type and run ```code 03_cv```. This will open the project folder in VS Code
|
||||
|
||||
### Create a computer vision resource
|
||||
|
||||
1. First you'll need to create [a computer vision resource](https://ms.portal.azure.com/#create/Microsoft.CognitiveServicesComputerVision)
|
||||
|
||||
1. Select your subscription
|
||||
|
||||
1. For resource group select *Create new*, and give your resource group a name
|
||||
|
||||
1. Select a Location that is near you
|
||||
|
||||
1. Give your computer vision resource a name
|
||||
|
||||
1. Select *Free F0* for the pricing tier
|
||||
> Note: You can only have one free tier active per account
|
||||
|
||||
1. After your resource is deployed, select *Go to resource*
|
||||
|
||||
1. You will need the key and endpoint from this resource to connect your Raspberry Pi to it. Select *Keys and Endpoints* from the left navigation.
|
||||
|
||||
1. Make a note of the key and endpoint for your resource, you'll use these later on your Pi.
|
||||
> Rember to treat these like passwords
|
||||
|
||||
|
||||
### Setup you Raspberry Pi Device
|
||||
|
||||
1. Connect your Raspberry Pi to a monitor and keyboard or use the the instructions [here](https://github.com/microsoft/rpi-resources/tree/master/headless-setup) to setup your pi for SSH
|
||||
|
||||
1. Set up your Pi camera using [this guide](https://www.raspberrypi.org/documentation/configuration/camera.md)
|
||||
|
||||
1. Using a USB drive or an SSH file transfer software copy the *client* folder to the Pi
|
||||
|
||||
1. Run the *python_environment_setup.sh* shell script
|
||||
|
||||
1. Once the script finishes navigate to the *client* folder press **Ctrl + H* to show hidden files.
|
||||
|
||||
1. Open the newly created *.env* file in a text editor and fill in your key and endpoint
|
||||
```
|
||||
SUBSCRIPTION_KEY='YOUR-SUBSCRIPTION-KEY'
|
||||
ENDPOINT='YOUR-ENDPOINT'
|
||||
```
|
||||
|
||||
1. Then type
|
||||
```sh
|
||||
python3 cv_pi_client.py
|
||||
```
|
||||
|
||||
1. You should see the picture the camera took and what the Computer Vision service was able to identify in it.
|
||||
|
||||
## Clean up Resources
|
||||
|
||||
If you keep the resources you provisioned you'll continue to incur costs on them. The steps below will walk you through how to clean up your resources.
|
||||
|
||||
1. In the Azure portal navigate to the resource group you created earlier
|
||||
|
||||
1. In the Resource group page, review the list of included resources, and verify that they are the ones you want to delete
|
||||
|
||||
1. Select Delete resource group, and follow the instructions.
|
||||
|
||||
Deletion may take a couple of minutes. When it's done, a notification appears for a few seconds. You can also select the bell icon at the top of the page to view the notification.
|
|
@ -0,0 +1,3 @@
|
|||
azure-cognitiveservices-vision-computervision
|
||||
python-dotenv
|
||||
matplotlib
|
|
@ -20,7 +20,8 @@ If there is something you'd like to know how to do using Azure and a Raspberry P
|
|||
| Recipe | Description | Time | Prerequisites |
|
||||
|--------|-------------|------|---------------|
|
||||
| [01 IoT Hub d2c](./01_iot) | Send telemetry to Azure table storage using IoT Hub and Azure functions. | 25 Mins | None |
|
||||
| [02 IoT Hub c2d](./02_c2d_messages) | Trigger events on your Raspberry Pi using Azure functions and IoT Hub | 15 mins | None
|
||||
| [02 IoT Hub c2d](./02_c2d_messages) | Trigger events on your Raspberry Pi using Azure functions and IoT Hub | 15 mins | None |
|
||||
| [03 Computer Vision](./03_cv) | Use the computer vision cognitive service to detect objects in an image | 20 Mins | None |
|
||||
|
||||
## Resources
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче